Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next
This commit is contained in:
commit
4b0d1a0b1f
@ -129,7 +129,6 @@
|
||||
!Finclude/net/cfg80211.h cfg80211_pmksa
|
||||
!Finclude/net/cfg80211.h cfg80211_send_rx_auth
|
||||
!Finclude/net/cfg80211.h cfg80211_send_auth_timeout
|
||||
!Finclude/net/cfg80211.h __cfg80211_auth_canceled
|
||||
!Finclude/net/cfg80211.h cfg80211_send_rx_assoc
|
||||
!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout
|
||||
!Finclude/net/cfg80211.h cfg80211_send_deauth
|
||||
|
@ -4914,8 +4914,6 @@ F: fs/ocfs2/
|
||||
|
||||
ORINOCO DRIVER
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: orinoco-users@lists.sourceforge.net
|
||||
L: orinoco-devel@lists.sourceforge.net
|
||||
W: http://linuxwireless.org/en/users/Drivers/orinoco
|
||||
W: http://www.nongnu.org/orinoco/
|
||||
S: Orphan
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ssb/ssb.h>
|
||||
#include <linux/bcma/bcma.h>
|
||||
#include <bcm47xx.h>
|
||||
|
||||
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
@ -32,15 +33,12 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pcibios_plat_dev_init(struct pci_dev *dev)
|
||||
{
|
||||
#ifdef CONFIG_BCM47XX_SSB
|
||||
static int bcm47xx_pcibios_plat_dev_init_ssb(struct pci_dev *dev)
|
||||
{
|
||||
int res;
|
||||
u8 slot, pin;
|
||||
|
||||
if (bcm47xx_bus_type != BCM47XX_BUS_TYPE_SSB)
|
||||
return 0;
|
||||
|
||||
res = ssb_pcibios_plat_dev_init(dev);
|
||||
if (res < 0) {
|
||||
printk(KERN_ALERT "PCI: Failed to init device %s\n",
|
||||
@ -60,6 +58,47 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
|
||||
}
|
||||
|
||||
dev->irq = res;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BCM47XX_BCMA
|
||||
static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev)
|
||||
{
|
||||
int res;
|
||||
|
||||
res = bcma_core_pci_plat_dev_init(dev);
|
||||
if (res < 0) {
|
||||
printk(KERN_ALERT "PCI: Failed to init device %s\n",
|
||||
pci_name(dev));
|
||||
return res;
|
||||
}
|
||||
|
||||
res = bcma_core_pci_pcibios_map_irq(dev);
|
||||
|
||||
/* IRQ-0 and IRQ-1 are software interrupts. */
|
||||
if (res < 2) {
|
||||
printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
|
||||
pci_name(dev));
|
||||
return res;
|
||||
}
|
||||
|
||||
dev->irq = res;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int pcibios_plat_dev_init(struct pci_dev *dev)
|
||||
{
|
||||
#ifdef CONFIG_BCM47XX_SSB
|
||||
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
|
||||
return bcm47xx_pcibios_plat_dev_init_ssb(dev);
|
||||
else
|
||||
#endif
|
||||
#ifdef CONFIG_BCM47XX_BCMA
|
||||
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA)
|
||||
return bcm47xx_pcibios_plat_dev_init_bcma(dev);
|
||||
else
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
struct bcma_bus;
|
||||
|
||||
/* main.c */
|
||||
int bcma_bus_register(struct bcma_bus *bus);
|
||||
int __devinit bcma_bus_register(struct bcma_bus *bus);
|
||||
void bcma_bus_unregister(struct bcma_bus *bus);
|
||||
int __init bcma_bus_early_register(struct bcma_bus *bus,
|
||||
struct bcma_device *core_cc,
|
||||
@ -48,8 +48,12 @@ extern int __init bcma_host_pci_init(void);
|
||||
extern void __exit bcma_host_pci_exit(void);
|
||||
#endif /* CONFIG_BCMA_HOST_PCI */
|
||||
|
||||
/* driver_pci.c */
|
||||
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
|
||||
|
||||
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
|
||||
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
|
||||
bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
|
||||
void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
|
||||
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
|
||||
|
||||
#endif
|
||||
|
@ -2,8 +2,9 @@
|
||||
* Broadcom specific AMBA
|
||||
* PCI Core
|
||||
*
|
||||
* Copyright 2005, Broadcom Corporation
|
||||
* Copyright 2005, 2011, Broadcom Corporation
|
||||
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
|
||||
* Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
|
||||
*
|
||||
* Licensed under the GNU/GPL. See COPYING for details.
|
||||
*/
|
||||
@ -16,40 +17,41 @@
|
||||
* R/W ops.
|
||||
**************************************************/
|
||||
|
||||
static u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
|
||||
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
|
||||
{
|
||||
pcicore_write32(pc, 0x130, address);
|
||||
pcicore_read32(pc, 0x130);
|
||||
return pcicore_read32(pc, 0x134);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
|
||||
pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
|
||||
return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
|
||||
{
|
||||
pcicore_write32(pc, 0x130, address);
|
||||
pcicore_read32(pc, 0x130);
|
||||
pcicore_write32(pc, 0x134, data);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
|
||||
pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
|
||||
{
|
||||
const u16 mdio_control = 0x128;
|
||||
const u16 mdio_data = 0x12C;
|
||||
u32 v;
|
||||
int i;
|
||||
|
||||
v = (1 << 30); /* Start of Transaction */
|
||||
v |= (1 << 28); /* Write Transaction */
|
||||
v |= (1 << 17); /* Turnaround */
|
||||
v |= (0x1F << 18);
|
||||
v = BCMA_CORE_PCI_MDIODATA_START;
|
||||
v |= BCMA_CORE_PCI_MDIODATA_WRITE;
|
||||
v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
|
||||
BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
|
||||
v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
|
||||
BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
|
||||
v |= BCMA_CORE_PCI_MDIODATA_TA;
|
||||
v |= (phy << 4);
|
||||
pcicore_write32(pc, mdio_data, v);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
|
||||
|
||||
udelay(10);
|
||||
for (i = 0; i < 200; i++) {
|
||||
v = pcicore_read32(pc, mdio_control);
|
||||
if (v & 0x100 /* Trans complete */)
|
||||
v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
|
||||
if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
@ -57,79 +59,84 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
|
||||
|
||||
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
|
||||
{
|
||||
const u16 mdio_control = 0x128;
|
||||
const u16 mdio_data = 0x12C;
|
||||
int max_retries = 10;
|
||||
u16 ret = 0;
|
||||
u32 v;
|
||||
int i;
|
||||
|
||||
v = 0x80; /* Enable Preamble Sequence */
|
||||
v |= 0x2; /* MDIO Clock Divisor */
|
||||
pcicore_write32(pc, mdio_control, v);
|
||||
/* enable mdio access to SERDES */
|
||||
v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
|
||||
v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
|
||||
|
||||
if (pc->core->id.rev >= 10) {
|
||||
max_retries = 200;
|
||||
bcma_pcie_mdio_set_phy(pc, device);
|
||||
v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
|
||||
BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
|
||||
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
|
||||
} else {
|
||||
v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
|
||||
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
|
||||
}
|
||||
|
||||
v = (1 << 30); /* Start of Transaction */
|
||||
v |= (1 << 29); /* Read Transaction */
|
||||
v |= (1 << 17); /* Turnaround */
|
||||
if (pc->core->id.rev < 10)
|
||||
v |= (u32)device << 22;
|
||||
v |= (u32)address << 18;
|
||||
pcicore_write32(pc, mdio_data, v);
|
||||
v = BCMA_CORE_PCI_MDIODATA_START;
|
||||
v |= BCMA_CORE_PCI_MDIODATA_READ;
|
||||
v |= BCMA_CORE_PCI_MDIODATA_TA;
|
||||
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
|
||||
/* Wait for the device to complete the transaction */
|
||||
udelay(10);
|
||||
for (i = 0; i < max_retries; i++) {
|
||||
v = pcicore_read32(pc, mdio_control);
|
||||
if (v & 0x100 /* Trans complete */) {
|
||||
v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
|
||||
if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
|
||||
udelay(10);
|
||||
ret = pcicore_read32(pc, mdio_data);
|
||||
ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
|
||||
break;
|
||||
}
|
||||
msleep(1);
|
||||
}
|
||||
pcicore_write32(pc, mdio_control, 0);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
|
||||
u8 address, u16 data)
|
||||
{
|
||||
const u16 mdio_control = 0x128;
|
||||
const u16 mdio_data = 0x12C;
|
||||
int max_retries = 10;
|
||||
u32 v;
|
||||
int i;
|
||||
|
||||
v = 0x80; /* Enable Preamble Sequence */
|
||||
v |= 0x2; /* MDIO Clock Divisor */
|
||||
pcicore_write32(pc, mdio_control, v);
|
||||
/* enable mdio access to SERDES */
|
||||
v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
|
||||
v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
|
||||
|
||||
if (pc->core->id.rev >= 10) {
|
||||
max_retries = 200;
|
||||
bcma_pcie_mdio_set_phy(pc, device);
|
||||
v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
|
||||
BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
|
||||
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
|
||||
} else {
|
||||
v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
|
||||
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
|
||||
}
|
||||
|
||||
v = (1 << 30); /* Start of Transaction */
|
||||
v |= (1 << 28); /* Write Transaction */
|
||||
v |= (1 << 17); /* Turnaround */
|
||||
if (pc->core->id.rev < 10)
|
||||
v |= (u32)device << 22;
|
||||
v |= (u32)address << 18;
|
||||
v = BCMA_CORE_PCI_MDIODATA_START;
|
||||
v |= BCMA_CORE_PCI_MDIODATA_WRITE;
|
||||
v |= BCMA_CORE_PCI_MDIODATA_TA;
|
||||
v |= data;
|
||||
pcicore_write32(pc, mdio_data, v);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
|
||||
/* Wait for the device to complete the transaction */
|
||||
udelay(10);
|
||||
for (i = 0; i < max_retries; i++) {
|
||||
v = pcicore_read32(pc, mdio_control);
|
||||
if (v & 0x100 /* Trans complete */)
|
||||
v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
|
||||
if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
pcicore_write32(pc, mdio_control, 0);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
@ -138,72 +145,53 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
|
||||
|
||||
static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
|
||||
{
|
||||
return (bcma_pcie_read(pc, 0x204) & 0x10) ? 0xC0 : 0x80;
|
||||
u32 tmp;
|
||||
|
||||
tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
|
||||
if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
|
||||
return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
|
||||
BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
|
||||
else
|
||||
return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
|
||||
}
|
||||
|
||||
static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
|
||||
{
|
||||
const u8 serdes_pll_device = 0x1D;
|
||||
const u8 serdes_rx_device = 0x1F;
|
||||
u16 tmp;
|
||||
|
||||
bcma_pcie_mdio_write(pc, serdes_rx_device, 1 /* Control */,
|
||||
bcma_pcicore_polarity_workaround(pc));
|
||||
tmp = bcma_pcie_mdio_read(pc, serdes_pll_device, 1 /* Control */);
|
||||
if (tmp & 0x4000)
|
||||
bcma_pcie_mdio_write(pc, serdes_pll_device, 1, tmp & ~0x4000);
|
||||
bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
|
||||
BCMA_CORE_PCI_SERDES_RX_CTRL,
|
||||
bcma_pcicore_polarity_workaround(pc));
|
||||
tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
|
||||
BCMA_CORE_PCI_SERDES_PLL_CTRL);
|
||||
if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
|
||||
bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
|
||||
BCMA_CORE_PCI_SERDES_PLL_CTRL,
|
||||
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Init.
|
||||
**************************************************/
|
||||
|
||||
static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
|
||||
static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
|
||||
{
|
||||
bcma_pcicore_serdes_workaround(pc);
|
||||
}
|
||||
|
||||
static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
|
||||
{
|
||||
struct bcma_bus *bus = pc->core->bus;
|
||||
u16 chipid_top;
|
||||
|
||||
chipid_top = (bus->chipinfo.id & 0xFF00);
|
||||
if (chipid_top != 0x4700 &&
|
||||
chipid_top != 0x5300)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_SSB_DRIVER_PCICORE
|
||||
if (bus->sprom.boardflags_lo & SSB_BFL_NOPCI)
|
||||
return false;
|
||||
#endif /* CONFIG_SSB_DRIVER_PCICORE */
|
||||
|
||||
#if 0
|
||||
/* TODO: on BCMA we use address from EROM instead of magic formula */
|
||||
u32 tmp;
|
||||
return !mips_busprobe32(tmp, (bus->mmio +
|
||||
(pc->core->core_index * BCMA_CORE_SIZE)));
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void bcma_core_pci_init(struct bcma_drv_pci *pc)
|
||||
void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
|
||||
{
|
||||
if (pc->setup_done)
|
||||
return;
|
||||
|
||||
if (bcma_core_pci_is_in_hostmode(pc)) {
|
||||
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
|
||||
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
|
||||
if (pc->hostmode)
|
||||
bcma_core_pci_hostmode_init(pc);
|
||||
#else
|
||||
pr_err("Driver compiled without support for hostmode PCI\n");
|
||||
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
|
||||
} else {
|
||||
bcma_core_pci_clientmode_init(pc);
|
||||
}
|
||||
|
||||
pc->setup_done = true;
|
||||
if (!pc->hostmode)
|
||||
bcma_core_pci_clientmode_init(pc);
|
||||
}
|
||||
|
||||
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
|
||||
|
@ -2,13 +2,587 @@
|
||||
* Broadcom specific AMBA
|
||||
* PCI Core in hostmode
|
||||
*
|
||||
* Copyright 2005 - 2011, Broadcom Corporation
|
||||
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
|
||||
* Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
|
||||
*
|
||||
* Licensed under the GNU/GPL. See COPYING for details.
|
||||
*/
|
||||
|
||||
#include "bcma_private.h"
|
||||
#include <linux/export.h>
|
||||
#include <linux/bcma/bcma.h>
|
||||
#include <asm/paccess.h>
|
||||
|
||||
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
|
||||
/* Probe a 32bit value on the bus and catch bus exceptions.
|
||||
* Returns nonzero on a bus exception.
|
||||
* This is MIPS specific */
|
||||
#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
|
||||
|
||||
/* Assume one-hot slot wiring */
|
||||
#define BCMA_PCI_SLOT_MAX 16
|
||||
#define PCI_CONFIG_SPACE_SIZE 256
|
||||
|
||||
bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
|
||||
{
|
||||
pr_err("No support for PCI core in hostmode yet\n");
|
||||
struct bcma_bus *bus = pc->core->bus;
|
||||
u16 chipid_top;
|
||||
u32 tmp;
|
||||
|
||||
chipid_top = (bus->chipinfo.id & 0xFF00);
|
||||
if (chipid_top != 0x4700 &&
|
||||
chipid_top != 0x5300)
|
||||
return false;
|
||||
|
||||
if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
|
||||
pr_info("This PCI core is disabled and not working\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
bcma_core_enable(pc->core, 0);
|
||||
|
||||
return !mips_busprobe32(tmp, pc->core->io_addr);
|
||||
}
|
||||
|
||||
static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
|
||||
{
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
|
||||
pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
|
||||
return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
|
||||
}
|
||||
|
||||
static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
|
||||
u32 data)
|
||||
{
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
|
||||
pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
|
||||
}
|
||||
|
||||
static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
|
||||
unsigned int func, unsigned int off)
|
||||
{
|
||||
u32 addr = 0;
|
||||
|
||||
/* Issue config commands only when the data link is up (atleast
|
||||
* one external pcie device is present).
|
||||
*/
|
||||
if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
|
||||
& BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
|
||||
goto out;
|
||||
|
||||
/* Type 0 transaction */
|
||||
/* Slide the PCI window to the appropriate slot */
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
|
||||
/* Calculate the address */
|
||||
addr = pc->host_controller->host_cfg_addr;
|
||||
addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
|
||||
addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
|
||||
addr |= (off & ~3);
|
||||
|
||||
out:
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
|
||||
unsigned int func, unsigned int off,
|
||||
void *buf, int len)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
u32 addr, val;
|
||||
void __iomem *mmio = 0;
|
||||
|
||||
WARN_ON(!pc->hostmode);
|
||||
if (unlikely(len != 1 && len != 2 && len != 4))
|
||||
goto out;
|
||||
if (dev == 0) {
|
||||
/* we support only two functions on device 0 */
|
||||
if (func > 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* accesses to config registers with offsets >= 256
|
||||
* requires indirect access.
|
||||
*/
|
||||
if (off >= PCI_CONFIG_SPACE_SIZE) {
|
||||
addr = (func << 12);
|
||||
addr |= (off & 0x0FFF);
|
||||
val = bcma_pcie_read_config(pc, addr);
|
||||
} else {
|
||||
addr = BCMA_CORE_PCI_PCICFG0;
|
||||
addr |= (func << 8);
|
||||
addr |= (off & 0xfc);
|
||||
val = pcicore_read32(pc, addr);
|
||||
}
|
||||
} else {
|
||||
addr = bcma_get_cfgspace_addr(pc, dev, func, off);
|
||||
if (unlikely(!addr))
|
||||
goto out;
|
||||
err = -ENOMEM;
|
||||
mmio = ioremap_nocache(addr, len);
|
||||
if (!mmio)
|
||||
goto out;
|
||||
|
||||
if (mips_busprobe32(val, mmio)) {
|
||||
val = 0xffffffff;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
val = readl(mmio);
|
||||
}
|
||||
val >>= (8 * (off & 3));
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
*((u8 *)buf) = (u8)val;
|
||||
break;
|
||||
case 2:
|
||||
*((u16 *)buf) = (u16)val;
|
||||
break;
|
||||
case 4:
|
||||
*((u32 *)buf) = (u32)val;
|
||||
break;
|
||||
}
|
||||
err = 0;
|
||||
unmap:
|
||||
if (mmio)
|
||||
iounmap(mmio);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
|
||||
unsigned int func, unsigned int off,
|
||||
const void *buf, int len)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
u32 addr = 0, val = 0;
|
||||
void __iomem *mmio = 0;
|
||||
u16 chipid = pc->core->bus->chipinfo.id;
|
||||
|
||||
WARN_ON(!pc->hostmode);
|
||||
if (unlikely(len != 1 && len != 2 && len != 4))
|
||||
goto out;
|
||||
if (dev == 0) {
|
||||
/* accesses to config registers with offsets >= 256
|
||||
* requires indirect access.
|
||||
*/
|
||||
if (off < PCI_CONFIG_SPACE_SIZE) {
|
||||
addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
|
||||
addr |= (func << 8);
|
||||
addr |= (off & 0xfc);
|
||||
mmio = ioremap_nocache(addr, len);
|
||||
if (!mmio)
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
addr = bcma_get_cfgspace_addr(pc, dev, func, off);
|
||||
if (unlikely(!addr))
|
||||
goto out;
|
||||
err = -ENOMEM;
|
||||
mmio = ioremap_nocache(addr, len);
|
||||
if (!mmio)
|
||||
goto out;
|
||||
|
||||
if (mips_busprobe32(val, mmio)) {
|
||||
val = 0xffffffff;
|
||||
goto unmap;
|
||||
}
|
||||
}
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
val = readl(mmio);
|
||||
val &= ~(0xFF << (8 * (off & 3)));
|
||||
val |= *((const u8 *)buf) << (8 * (off & 3));
|
||||
break;
|
||||
case 2:
|
||||
val = readl(mmio);
|
||||
val &= ~(0xFFFF << (8 * (off & 3)));
|
||||
val |= *((const u16 *)buf) << (8 * (off & 3));
|
||||
break;
|
||||
case 4:
|
||||
val = *((const u32 *)buf);
|
||||
break;
|
||||
}
|
||||
if (dev == 0 && !addr) {
|
||||
/* accesses to config registers with offsets >= 256
|
||||
* requires indirect access.
|
||||
*/
|
||||
addr = (func << 12);
|
||||
addr |= (off & 0x0FFF);
|
||||
bcma_pcie_write_config(pc, addr, val);
|
||||
} else {
|
||||
writel(val, mmio);
|
||||
|
||||
if (chipid == 0x4716 || chipid == 0x4748)
|
||||
readl(mmio);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
unmap:
|
||||
if (mmio)
|
||||
iounmap(mmio);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
|
||||
unsigned int devfn,
|
||||
int reg, int size, u32 *val)
|
||||
{
|
||||
unsigned long flags;
|
||||
int err;
|
||||
struct bcma_drv_pci *pc;
|
||||
struct bcma_drv_pci_host *pc_host;
|
||||
|
||||
pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
|
||||
pc = pc_host->pdev;
|
||||
|
||||
spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
|
||||
err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
|
||||
PCI_FUNC(devfn), reg, val, size);
|
||||
spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
|
||||
|
||||
return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
|
||||
unsigned int devfn,
|
||||
int reg, int size, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
int err;
|
||||
struct bcma_drv_pci *pc;
|
||||
struct bcma_drv_pci_host *pc_host;
|
||||
|
||||
pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
|
||||
pc = pc_host->pdev;
|
||||
|
||||
spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
|
||||
err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
|
||||
PCI_FUNC(devfn), reg, &val, size);
|
||||
spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
|
||||
|
||||
return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
/* return cap_offset if requested capability exists in the PCI config space */
|
||||
static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
|
||||
unsigned int dev,
|
||||
unsigned int func, u8 req_cap_id,
|
||||
unsigned char *buf, u32 *buflen)
|
||||
{
|
||||
u8 cap_id;
|
||||
u8 cap_ptr = 0;
|
||||
u32 bufsize;
|
||||
u8 byte_val;
|
||||
|
||||
/* check for Header type 0 */
|
||||
bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
|
||||
sizeof(u8));
|
||||
if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
|
||||
return cap_ptr;
|
||||
|
||||
/* check if the capability pointer field exists */
|
||||
bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
|
||||
sizeof(u8));
|
||||
if (!(byte_val & PCI_STATUS_CAP_LIST))
|
||||
return cap_ptr;
|
||||
|
||||
/* check if the capability pointer is 0x00 */
|
||||
bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
|
||||
sizeof(u8));
|
||||
if (cap_ptr == 0x00)
|
||||
return cap_ptr;
|
||||
|
||||
/* loop thr'u the capability list and see if the requested capabilty
|
||||
* exists */
|
||||
bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
|
||||
while (cap_id != req_cap_id) {
|
||||
bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
|
||||
sizeof(u8));
|
||||
if (cap_ptr == 0x00)
|
||||
return cap_ptr;
|
||||
bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
|
||||
sizeof(u8));
|
||||
}
|
||||
|
||||
/* found the caller requested capability */
|
||||
if ((buf != NULL) && (buflen != NULL)) {
|
||||
u8 cap_data;
|
||||
|
||||
bufsize = *buflen;
|
||||
if (!bufsize)
|
||||
return cap_ptr;
|
||||
|
||||
*buflen = 0;
|
||||
|
||||
/* copy the cpability data excluding cap ID and next ptr */
|
||||
cap_data = cap_ptr + 2;
|
||||
if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
|
||||
bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
|
||||
*buflen = bufsize;
|
||||
while (bufsize--) {
|
||||
bcma_extpci_read_config(pc, dev, func, cap_data, buf,
|
||||
sizeof(u8));
|
||||
cap_data++;
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
return cap_ptr;
|
||||
}
|
||||
|
||||
/* If the root port is capable of returning Config Request
|
||||
* Retry Status (CRS) Completion Status to software then
|
||||
* enable the feature.
|
||||
*/
|
||||
static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
|
||||
{
|
||||
u8 cap_ptr, root_ctrl, root_cap, dev;
|
||||
u16 val16;
|
||||
int i;
|
||||
|
||||
cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
|
||||
NULL);
|
||||
root_cap = cap_ptr + PCI_EXP_RTCAP;
|
||||
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
|
||||
if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
|
||||
/* Enable CRS software visibility */
|
||||
root_ctrl = cap_ptr + PCI_EXP_RTCTL;
|
||||
val16 = PCI_EXP_RTCTL_CRSSVE;
|
||||
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
|
||||
sizeof(u16));
|
||||
|
||||
/* Initiate a configuration request to read the vendor id
|
||||
* field of the device function's config space header after
|
||||
* 100 ms wait time from the end of Reset. If the device is
|
||||
* not done with its internal initialization, it must at
|
||||
* least return a completion TLP, with a completion status
|
||||
* of "Configuration Request Retry Status (CRS)". The root
|
||||
* complex must complete the request to the host by returning
|
||||
* a read-data value of 0001h for the Vendor ID field and
|
||||
* all 1s for any additional bytes included in the request.
|
||||
* Poll using the config reads for max wait time of 1 sec or
|
||||
* until we receive the successful completion status. Repeat
|
||||
* the procedure for all the devices.
|
||||
*/
|
||||
for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
|
||||
for (i = 0; i < 100000; i++) {
|
||||
bcma_extpci_read_config(pc, dev, 0,
|
||||
PCI_VENDOR_ID, &val16,
|
||||
sizeof(val16));
|
||||
if (val16 != 0x1)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
if (val16 == 0x1)
|
||||
pr_err("PCI: Broken device in slot %d\n", dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
|
||||
{
|
||||
struct bcma_bus *bus = pc->core->bus;
|
||||
struct bcma_drv_pci_host *pc_host;
|
||||
u32 tmp;
|
||||
u32 pci_membase_1G;
|
||||
unsigned long io_map_base;
|
||||
|
||||
pr_info("PCIEcore in host mode found\n");
|
||||
|
||||
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
|
||||
if (!pc_host) {
|
||||
pr_err("can not allocate memory");
|
||||
return;
|
||||
}
|
||||
|
||||
pc->host_controller = pc_host;
|
||||
pc_host->pci_controller.io_resource = &pc_host->io_resource;
|
||||
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
|
||||
pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
|
||||
pc_host->pdev = pc;
|
||||
|
||||
pci_membase_1G = BCMA_SOC_PCI_DMA;
|
||||
pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
|
||||
|
||||
pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
|
||||
pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
|
||||
|
||||
pc_host->mem_resource.name = "BCMA PCIcore external memory",
|
||||
pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
|
||||
pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
|
||||
pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
|
||||
|
||||
pc_host->io_resource.name = "BCMA PCIcore external I/O",
|
||||
pc_host->io_resource.start = 0x100;
|
||||
pc_host->io_resource.end = 0x7FF;
|
||||
pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
|
||||
|
||||
/* Reset RC */
|
||||
udelay(3000);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
|
||||
udelay(1000);
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
|
||||
BCMA_CORE_PCI_CTL_RST_OE);
|
||||
|
||||
/* 64 MB I/O access window. On 4716, use
|
||||
* sbtopcie0 to access the device registers. We
|
||||
* can't use address match 2 (1 GB window) region
|
||||
* as mips can't generate 64-bit address on the
|
||||
* backplane.
|
||||
*/
|
||||
if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) {
|
||||
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
|
||||
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
|
||||
BCMA_SOC_PCI_MEM_SZ - 1;
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
|
||||
BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
|
||||
} else if (bus->chipinfo.id == 0x5300) {
|
||||
tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
|
||||
tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
|
||||
tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
|
||||
if (pc->core->core_unit == 0) {
|
||||
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
|
||||
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
|
||||
BCMA_SOC_PCI_MEM_SZ - 1;
|
||||
pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
|
||||
tmp | BCMA_SOC_PCI_MEM);
|
||||
} else if (pc->core->core_unit == 1) {
|
||||
pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
|
||||
pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
|
||||
BCMA_SOC_PCI_MEM_SZ - 1;
|
||||
pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
|
||||
pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
|
||||
tmp | BCMA_SOC_PCI1_MEM);
|
||||
}
|
||||
} else
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
|
||||
BCMA_CORE_PCI_SBTOPCI_IO);
|
||||
|
||||
/* 64 MB configuration access window */
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
|
||||
|
||||
/* 1 GB memory access window */
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
|
||||
BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
|
||||
|
||||
|
||||
/* As per PCI Express Base Spec 1.1 we need to wait for
|
||||
* at least 100 ms from the end of a reset (cold/warm/hot)
|
||||
* before issuing configuration requests to PCI Express
|
||||
* devices.
|
||||
*/
|
||||
udelay(100000);
|
||||
|
||||
bcma_core_pci_enable_crs(pc);
|
||||
|
||||
/* Enable PCI bridge BAR0 memory & master access */
|
||||
tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
|
||||
bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
|
||||
|
||||
/* Enable PCI interrupts */
|
||||
pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
|
||||
|
||||
/* Ok, ready to run, register it to the system.
|
||||
* The following needs change, if we want to port hostmode
|
||||
* to non-MIPS platform. */
|
||||
io_map_base = (unsigned long)ioremap_nocache(BCMA_SOC_PCI_MEM,
|
||||
0x04000000);
|
||||
pc_host->pci_controller.io_map_base = io_map_base;
|
||||
set_io_port_base(pc_host->pci_controller.io_map_base);
|
||||
/* Give some time to the PCI controller to configure itself with the new
|
||||
* values. Not waiting at this point causes crashes of the machine. */
|
||||
mdelay(10);
|
||||
register_pci_controller(&pc_host->pci_controller);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Early PCI fixup for a device on the PCI-core bridge. */
|
||||
static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
|
||||
{
|
||||
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
|
||||
/* This is not a device on the PCI-core bridge. */
|
||||
return;
|
||||
}
|
||||
if (PCI_SLOT(dev->devfn) != 0)
|
||||
return;
|
||||
|
||||
pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
|
||||
|
||||
/* Enable PCI bridge bus mastering and memory space */
|
||||
pci_set_master(dev);
|
||||
if (pcibios_enable_device(dev, ~0) < 0) {
|
||||
pr_err("PCI: BCMA bridge enable failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Enable PCI bridge BAR1 prefetch and burst */
|
||||
pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
|
||||
|
||||
/* Early PCI fixup for all PCI-cores to set the correct memory address. */
|
||||
static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
|
||||
{
|
||||
struct resource *res;
|
||||
int pos;
|
||||
|
||||
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
|
||||
/* This is not a device on the PCI-core bridge. */
|
||||
return;
|
||||
}
|
||||
if (PCI_SLOT(dev->devfn) == 0)
|
||||
return;
|
||||
|
||||
pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
|
||||
|
||||
for (pos = 0; pos < 6; pos++) {
|
||||
res = &dev->resource[pos];
|
||||
if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM))
|
||||
pci_assign_resource(dev, pos);
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
|
||||
|
||||
/* This function is called when doing a pci_enable_device().
|
||||
* We must first check if the device is a device on the PCI-core bridge. */
|
||||
int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
|
||||
{
|
||||
struct bcma_drv_pci_host *pc_host;
|
||||
|
||||
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
|
||||
/* This is not a device on the PCI-core bridge. */
|
||||
return -ENODEV;
|
||||
}
|
||||
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
|
||||
pci_ops);
|
||||
|
||||
pr_info("PCI: Fixing up device %s\n", pci_name(dev));
|
||||
|
||||
/* Fix up interrupt lines */
|
||||
dev->irq = bcma_core_mips_irq(pc_host->pdev->core) + 2;
|
||||
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
|
||||
|
||||
/* PCI device IRQ mapping. */
|
||||
int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
|
||||
{
|
||||
struct bcma_drv_pci_host *pc_host;
|
||||
|
||||
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
|
||||
/* This is not a device on the PCI-core bridge. */
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
|
||||
pci_ops);
|
||||
return bcma_core_mips_irq(pc_host->pdev->core) + 2;
|
||||
}
|
||||
EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
|
||||
|
@ -154,8 +154,8 @@ const struct bcma_host_ops bcma_host_pci_ops = {
|
||||
.awrite32 = bcma_host_pci_awrite32,
|
||||
};
|
||||
|
||||
static int bcma_host_pci_probe(struct pci_dev *dev,
|
||||
const struct pci_device_id *id)
|
||||
static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
struct bcma_bus *bus;
|
||||
int err = -ENOMEM;
|
||||
|
@ -13,6 +13,12 @@
|
||||
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* contains the number the next bus should get. */
|
||||
static unsigned int bcma_bus_next_num = 0;
|
||||
|
||||
/* bcma_buses_mutex locks the bcma_bus_next_num */
|
||||
static DEFINE_MUTEX(bcma_buses_mutex);
|
||||
|
||||
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
|
||||
static int bcma_device_probe(struct device *dev);
|
||||
static int bcma_device_remove(struct device *dev);
|
||||
@ -93,7 +99,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
|
||||
|
||||
core->dev.release = bcma_release_core_dev;
|
||||
core->dev.bus = &bcma_bus_type;
|
||||
dev_set_name(&core->dev, "bcma%d:%d", 0/*bus->num*/, dev_id);
|
||||
dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
|
||||
|
||||
switch (bus->hosttype) {
|
||||
case BCMA_HOSTTYPE_PCI:
|
||||
@ -132,11 +138,15 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
|
||||
}
|
||||
}
|
||||
|
||||
int bcma_bus_register(struct bcma_bus *bus)
|
||||
int __devinit bcma_bus_register(struct bcma_bus *bus)
|
||||
{
|
||||
int err;
|
||||
struct bcma_device *core;
|
||||
|
||||
mutex_lock(&bcma_buses_mutex);
|
||||
bus->num = bcma_bus_next_num++;
|
||||
mutex_unlock(&bcma_buses_mutex);
|
||||
|
||||
/* Scan for devices (cores) */
|
||||
err = bcma_bus_scan(bus);
|
||||
if (err) {
|
||||
|
@ -212,6 +212,17 @@ static struct bcma_device *bcma_find_core_by_index(struct bcma_bus *bus,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 coreid)
|
||||
{
|
||||
struct bcma_device *core;
|
||||
|
||||
list_for_each_entry_reverse(core, &bus->cores, list) {
|
||||
if (core->id.id == coreid)
|
||||
return core;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
||||
struct bcma_device_id *match, int core_num,
|
||||
struct bcma_device *core)
|
||||
@ -353,6 +364,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
||||
void bcma_init_bus(struct bcma_bus *bus)
|
||||
{
|
||||
s32 tmp;
|
||||
struct bcma_chipinfo *chipinfo = &(bus->chipinfo);
|
||||
|
||||
if (bus->init_done)
|
||||
return;
|
||||
@ -363,9 +375,12 @@ void bcma_init_bus(struct bcma_bus *bus)
|
||||
bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
|
||||
|
||||
tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
|
||||
bus->chipinfo.id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
|
||||
bus->chipinfo.rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
|
||||
bus->chipinfo.pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
|
||||
chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
|
||||
chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
|
||||
chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
|
||||
pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
|
||||
chipinfo->id, chipinfo->rev, chipinfo->pkg);
|
||||
|
||||
bus->init_done = true;
|
||||
}
|
||||
|
||||
@ -392,6 +407,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
|
||||
bcma_scan_switch_core(bus, erombase);
|
||||
|
||||
while (eromptr < eromend) {
|
||||
struct bcma_device *other_core;
|
||||
struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
|
||||
if (!core)
|
||||
return -ENOMEM;
|
||||
@ -414,6 +430,8 @@ int bcma_bus_scan(struct bcma_bus *bus)
|
||||
|
||||
core->core_index = core_num++;
|
||||
bus->nr_cores++;
|
||||
other_core = bcma_find_core_reverse(bus, core->id.id);
|
||||
core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
|
||||
|
||||
pr_info("Core %d found: %s "
|
||||
"(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
|
||||
|
@ -250,6 +250,7 @@ int bcma_sprom_get(struct bcma_bus *bus)
|
||||
{
|
||||
u16 offset;
|
||||
u16 *sprom;
|
||||
u32 sromctrl;
|
||||
int err = 0;
|
||||
|
||||
if (!bus->drv_cc.core)
|
||||
@ -258,6 +259,12 @@ int bcma_sprom_get(struct bcma_bus *bus)
|
||||
if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
|
||||
return -ENOENT;
|
||||
|
||||
if (bus->drv_cc.core->id.rev >= 32) {
|
||||
sromctrl = bcma_read32(bus->drv_cc.core, BCMA_CC_SROM_CONTROL);
|
||||
if (!(sromctrl & BCMA_CC_SROM_CONTROL_PRESENT))
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
|
||||
GFP_KERNEL);
|
||||
if (!sprom)
|
||||
|
@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
|
||||
|
||||
/* Broadcom BCM20702A0 */
|
||||
{ USB_DEVICE(0x0a5c, 0x21e3) },
|
||||
{ USB_DEVICE(0x0a5c, 0x21f3) },
|
||||
{ USB_DEVICE(0x413c, 0x8197) },
|
||||
|
||||
{ } /* Terminating entry */
|
||||
@ -726,9 +727,6 @@ static int btusb_send_frame(struct sk_buff *skb)
|
||||
usb_fill_bulk_urb(urb, data->udev, pipe,
|
||||
skb->data, skb->len, btusb_tx_complete, skb);
|
||||
|
||||
if (skb->priority >= HCI_PRIO_MAX - 1)
|
||||
urb->transfer_flags = URB_ISO_ASAP;
|
||||
|
||||
hdev->stat.acl_tx++;
|
||||
break;
|
||||
|
||||
|
@ -1,12 +1,29 @@
|
||||
config ATH6KL
|
||||
tristate "Atheros ath6kl support"
|
||||
tristate "Atheros mobile chipsets support"
|
||||
|
||||
config ATH6KL_SDIO
|
||||
tristate "Atheros ath6kl SDIO support"
|
||||
depends on ATH6KL
|
||||
depends on MMC
|
||||
depends on CFG80211
|
||||
---help---
|
||||
This module adds support for wireless adapters based on
|
||||
Atheros AR6003 chipset running over SDIO. If you choose to
|
||||
build it as a module, it will be called ath6kl. Pls note
|
||||
that AR6002 and AR6001 are not supported by this driver.
|
||||
Atheros AR6003 and AR6004 chipsets running over SDIO. If you
|
||||
choose to build it as a module, it will be called ath6kl_sdio.
|
||||
Please note that AR6002 and AR6001 are not supported by this
|
||||
driver.
|
||||
|
||||
config ATH6KL_USB
|
||||
tristate "Atheros ath6kl USB support"
|
||||
depends on ATH6KL
|
||||
depends on USB
|
||||
depends on CFG80211
|
||||
depends on EXPERIMENTAL
|
||||
---help---
|
||||
This module adds support for wireless adapters based on
|
||||
Atheros AR6004 chipset running over USB. This is still under
|
||||
implementation and it isn't functional. If you choose to
|
||||
build it as a module, it will be called ath6kl_usb.
|
||||
|
||||
config ATH6KL_DEBUG
|
||||
bool "Atheros ath6kl debugging"
|
||||
|
@ -21,17 +21,21 @@
|
||||
# Author(s): ="Atheros"
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
obj-$(CONFIG_ATH6KL) := ath6kl.o
|
||||
ath6kl-y += debug.o
|
||||
ath6kl-y += hif.o
|
||||
ath6kl-y += htc.o
|
||||
ath6kl-y += bmi.o
|
||||
ath6kl-y += cfg80211.o
|
||||
ath6kl-y += init.o
|
||||
ath6kl-y += main.o
|
||||
ath6kl-y += txrx.o
|
||||
ath6kl-y += wmi.o
|
||||
ath6kl-y += sdio.o
|
||||
ath6kl-$(CONFIG_NL80211_TESTMODE) += testmode.o
|
||||
obj-$(CONFIG_ATH6KL) += ath6kl_core.o
|
||||
ath6kl_core-y += debug.o
|
||||
ath6kl_core-y += hif.o
|
||||
ath6kl_core-y += htc.o
|
||||
ath6kl_core-y += bmi.o
|
||||
ath6kl_core-y += cfg80211.o
|
||||
ath6kl_core-y += init.o
|
||||
ath6kl_core-y += main.o
|
||||
ath6kl_core-y += txrx.o
|
||||
ath6kl_core-y += wmi.o
|
||||
ath6kl_core-y += core.o
|
||||
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
|
||||
|
||||
ccflags-y += -D__CHECK_ENDIAN__
|
||||
obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
|
||||
ath6kl_sdio-y += sdio.o
|
||||
|
||||
obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
|
||||
ath6kl_usb-y += usb.o
|
||||
|
@ -57,8 +57,14 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
|
||||
sizeof(targ_info->version));
|
||||
if (ar->hif_type == ATH6KL_HIF_TYPE_USB) {
|
||||
ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info,
|
||||
sizeof(*targ_info));
|
||||
} else {
|
||||
ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
|
||||
sizeof(targ_info->version));
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
ath6kl_err("Unable to recv target info: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -15,6 +15,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "cfg80211.h"
|
||||
@ -22,10 +24,6 @@
|
||||
#include "hif-ops.h"
|
||||
#include "testmode.h"
|
||||
|
||||
static unsigned int ath6kl_p2p;
|
||||
|
||||
module_param(ath6kl_p2p, uint, 0644);
|
||||
|
||||
#define RATETAB_ENT(_rate, _rateid, _flags) { \
|
||||
.bitrate = (_rate), \
|
||||
.flags = (_flags), \
|
||||
@ -196,7 +194,7 @@ static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
|
||||
break;
|
||||
|
||||
default:
|
||||
ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
|
||||
ath6kl_err("%s: 0x%x not supported\n", __func__, auth_type);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
@ -461,13 +459,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (sme->ie && (sme->ie_len > 0)) {
|
||||
status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
|
||||
if (status) {
|
||||
up(&ar->sem);
|
||||
return status;
|
||||
}
|
||||
} else
|
||||
status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
|
||||
if (status) {
|
||||
up(&ar->sem);
|
||||
return status;
|
||||
}
|
||||
|
||||
if (sme->ie == NULL || sme->ie_len == 0)
|
||||
ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
|
||||
|
||||
if (test_bit(CONNECTED, &vif->flags) &&
|
||||
@ -523,8 +521,7 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
|
||||
(vif->prwise_crypto == WEP_CRYPT)) {
|
||||
struct ath6kl_key *key = NULL;
|
||||
|
||||
if (sme->key_idx < WMI_MIN_KEY_INDEX ||
|
||||
sme->key_idx > WMI_MAX_KEY_INDEX) {
|
||||
if (sme->key_idx > WMI_MAX_KEY_INDEX) {
|
||||
ath6kl_err("key index %d out of bounds\n",
|
||||
sme->key_idx);
|
||||
up(&ar->sem);
|
||||
@ -605,11 +602,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
|
||||
enum network_type nw_type,
|
||||
const u8 *bssid,
|
||||
struct ieee80211_channel *chan,
|
||||
const u8 *beacon_ie, size_t beacon_ie_len)
|
||||
static struct cfg80211_bss *
|
||||
ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
|
||||
enum network_type nw_type,
|
||||
const u8 *bssid,
|
||||
struct ieee80211_channel *chan,
|
||||
const u8 *beacon_ie,
|
||||
size_t beacon_ie_len)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
struct cfg80211_bss *bss;
|
||||
@ -638,7 +637,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
|
||||
*/
|
||||
ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
|
||||
if (ie == NULL)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
ie[0] = WLAN_EID_SSID;
|
||||
ie[1] = vif->ssid_len;
|
||||
memcpy(ie + 2, vif->ssid, vif->ssid_len);
|
||||
@ -652,15 +651,9 @@ static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
|
||||
"cfg80211\n", bssid);
|
||||
kfree(ie);
|
||||
} else
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
|
||||
"entry\n");
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
|
||||
|
||||
if (bss == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
cfg80211_put_bss(bss);
|
||||
|
||||
return 0;
|
||||
return bss;
|
||||
}
|
||||
|
||||
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
@ -672,6 +665,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
{
|
||||
struct ieee80211_channel *chan;
|
||||
struct ath6kl *ar = vif->ar;
|
||||
struct cfg80211_bss *bss;
|
||||
|
||||
/* capinfo + listen interval */
|
||||
u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
|
||||
@ -712,8 +706,9 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
|
||||
chan = ieee80211_get_channel(ar->wiphy, (int) channel);
|
||||
|
||||
if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info,
|
||||
beacon_ie_len) < 0) {
|
||||
bss = ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan,
|
||||
assoc_info, beacon_ie_len);
|
||||
if (!bss) {
|
||||
ath6kl_err("could not add cfg80211 bss entry\n");
|
||||
return;
|
||||
}
|
||||
@ -722,6 +717,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
|
||||
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
|
||||
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
|
||||
cfg80211_put_bss(bss);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -732,11 +728,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
assoc_req_ie, assoc_req_len,
|
||||
assoc_resp_ie, assoc_resp_len,
|
||||
WLAN_STATUS_SUCCESS, GFP_KERNEL);
|
||||
cfg80211_put_bss(bss);
|
||||
} else if (vif->sme_state == SME_CONNECTED) {
|
||||
/* inform roam event to cfg80211 */
|
||||
cfg80211_roamed(vif->ndev, chan, bssid,
|
||||
assoc_req_ie, assoc_req_len,
|
||||
assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
|
||||
cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
|
||||
assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -984,6 +980,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
|
||||
struct ath6kl *ar = ath6kl_priv(ndev);
|
||||
struct ath6kl_vif *vif = netdev_priv(ndev);
|
||||
struct ath6kl_key *key = NULL;
|
||||
int seq_len;
|
||||
u8 key_usage;
|
||||
u8 key_type;
|
||||
|
||||
@ -997,7 +994,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
|
||||
params->key);
|
||||
}
|
||||
|
||||
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
|
||||
if (key_index > WMI_MAX_KEY_INDEX) {
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
|
||||
"%s: key index %d out of bounds\n", __func__,
|
||||
key_index);
|
||||
@ -1012,23 +1009,21 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
|
||||
else
|
||||
key_usage = GROUP_USAGE;
|
||||
|
||||
if (params) {
|
||||
int seq_len = params->seq_len;
|
||||
if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
|
||||
seq_len > ATH6KL_KEY_SEQ_LEN) {
|
||||
/* Only first half of the WPI PN is configured */
|
||||
seq_len = ATH6KL_KEY_SEQ_LEN;
|
||||
}
|
||||
if (params->key_len > WLAN_MAX_KEY_LEN ||
|
||||
seq_len > sizeof(key->seq))
|
||||
return -EINVAL;
|
||||
|
||||
key->key_len = params->key_len;
|
||||
memcpy(key->key, params->key, key->key_len);
|
||||
key->seq_len = seq_len;
|
||||
memcpy(key->seq, params->seq, key->seq_len);
|
||||
key->cipher = params->cipher;
|
||||
seq_len = params->seq_len;
|
||||
if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
|
||||
seq_len > ATH6KL_KEY_SEQ_LEN) {
|
||||
/* Only first half of the WPI PN is configured */
|
||||
seq_len = ATH6KL_KEY_SEQ_LEN;
|
||||
}
|
||||
if (params->key_len > WLAN_MAX_KEY_LEN ||
|
||||
seq_len > sizeof(key->seq))
|
||||
return -EINVAL;
|
||||
|
||||
key->key_len = params->key_len;
|
||||
memcpy(key->key, params->key, key->key_len);
|
||||
key->seq_len = seq_len;
|
||||
memcpy(key->seq, params->seq, key->seq_len);
|
||||
key->cipher = params->cipher;
|
||||
|
||||
switch (key->cipher) {
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
@ -1115,7 +1110,7 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
|
||||
if (!ath6kl_cfg80211_ready(vif))
|
||||
return -EIO;
|
||||
|
||||
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
|
||||
if (key_index > WMI_MAX_KEY_INDEX) {
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
|
||||
"%s: key index %d out of bounds\n", __func__,
|
||||
key_index);
|
||||
@ -1148,7 +1143,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
|
||||
if (!ath6kl_cfg80211_ready(vif))
|
||||
return -EIO;
|
||||
|
||||
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
|
||||
if (key_index > WMI_MAX_KEY_INDEX) {
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
|
||||
"%s: key index %d out of bounds\n", __func__,
|
||||
key_index);
|
||||
@ -1184,7 +1179,7 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
|
||||
if (!ath6kl_cfg80211_ready(vif))
|
||||
return -EIO;
|
||||
|
||||
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
|
||||
if (key_index > WMI_MAX_KEY_INDEX) {
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
|
||||
"%s: key index %d out of bounds\n",
|
||||
__func__, key_index);
|
||||
@ -1403,7 +1398,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
|
||||
|
||||
ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
|
||||
|
||||
ath6kl_deinit_if_data(vif);
|
||||
ath6kl_cfg80211_vif_cleanup(vif);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1728,29 +1723,14 @@ static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
|
||||
static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
|
||||
struct cfg80211_wowlan *wow, u32 *filter)
|
||||
{
|
||||
struct ath6kl_vif *vif;
|
||||
int ret, pos, left;
|
||||
u32 filter = 0;
|
||||
u16 i;
|
||||
int ret, pos;
|
||||
u8 mask[WOW_MASK_SIZE];
|
||||
u16 i;
|
||||
|
||||
vif = ath6kl_vif_first(ar);
|
||||
if (!vif)
|
||||
return -EIO;
|
||||
|
||||
if (!ath6kl_cfg80211_ready(vif))
|
||||
return -EIO;
|
||||
|
||||
if (!test_bit(CONNECTED, &vif->flags))
|
||||
return -EINVAL;
|
||||
|
||||
/* Clear existing WOW patterns */
|
||||
for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
|
||||
ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
|
||||
WOW_LIST_ID, i);
|
||||
/* Configure new WOW patterns */
|
||||
/* Configure the patterns that we received from the user. */
|
||||
for (i = 0; i < wow->n_patterns; i++) {
|
||||
|
||||
/*
|
||||
@ -1773,29 +1753,221 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
|
||||
* matched from the first byte of received pkt in the firmware.
|
||||
*/
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
wow->patterns[i].pattern_len,
|
||||
0 /* pattern offset */,
|
||||
wow->patterns[i].pattern, mask);
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
wow->patterns[i].pattern_len,
|
||||
0 /* pattern offset */,
|
||||
wow->patterns[i].pattern, mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (wow->disconnect)
|
||||
filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
|
||||
*filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
|
||||
|
||||
if (wow->magic_pkt)
|
||||
filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
|
||||
*filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
|
||||
|
||||
if (wow->gtk_rekey_failure)
|
||||
filter |= WOW_FILTER_OPTION_GTK_ERROR;
|
||||
*filter |= WOW_FILTER_OPTION_GTK_ERROR;
|
||||
|
||||
if (wow->eap_identity_req)
|
||||
filter |= WOW_FILTER_OPTION_EAP_REQ;
|
||||
*filter |= WOW_FILTER_OPTION_EAP_REQ;
|
||||
|
||||
if (wow->four_way_handshake)
|
||||
filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
|
||||
*filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_wow_ap(struct ath6kl *ar, struct ath6kl_vif *vif)
|
||||
{
|
||||
static const u8 unicst_pattern[] = { 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x08 };
|
||||
static const u8 unicst_mask[] = { 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x7f };
|
||||
u8 unicst_offset = 0;
|
||||
static const u8 arp_pattern[] = { 0x08, 0x06 };
|
||||
static const u8 arp_mask[] = { 0xff, 0xff };
|
||||
u8 arp_offset = 20;
|
||||
static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
|
||||
static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
|
||||
u8 discvr_offset = 38;
|
||||
static const u8 dhcp_pattern[] = { 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x43 /* port 67 */ };
|
||||
static const u8 dhcp_mask[] = { 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0xff, 0xff /* port 67 */ };
|
||||
u8 dhcp_offset = 0;
|
||||
int ret;
|
||||
|
||||
/* Setup unicast IP, EAPOL-like and ARP pkt pattern */
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
sizeof(unicst_pattern), unicst_offset,
|
||||
unicst_pattern, unicst_mask);
|
||||
if (ret) {
|
||||
ath6kl_err("failed to add WOW unicast IP pattern\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Setup all ARP pkt pattern */
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
sizeof(arp_pattern), arp_offset,
|
||||
arp_pattern, arp_mask);
|
||||
if (ret) {
|
||||
ath6kl_err("failed to add WOW ARP pattern\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup multicast pattern for mDNS 224.0.0.251,
|
||||
* SSDP 239.255.255.250 and LLMNR 224.0.0.252
|
||||
*/
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
sizeof(discvr_pattern), discvr_offset,
|
||||
discvr_pattern, discvr_mask);
|
||||
if (ret) {
|
||||
ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Setup all DHCP broadcast pkt pattern */
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
sizeof(dhcp_pattern), dhcp_offset,
|
||||
dhcp_pattern, dhcp_mask);
|
||||
if (ret) {
|
||||
ath6kl_err("failed to add WOW DHCP broadcast pattern\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
|
||||
{
|
||||
struct net_device *ndev = vif->ndev;
|
||||
static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
|
||||
static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
|
||||
u8 discvr_offset = 38;
|
||||
u8 mac_mask[ETH_ALEN];
|
||||
int ret;
|
||||
|
||||
/* Setup unicast pkt pattern */
|
||||
memset(mac_mask, 0xff, ETH_ALEN);
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
ETH_ALEN, 0, ndev->dev_addr,
|
||||
mac_mask);
|
||||
if (ret) {
|
||||
ath6kl_err("failed to add WOW unicast pattern\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup multicast pattern for mDNS 224.0.0.251,
|
||||
* SSDP 239.255.255.250 and LLMNR 224.0.0.252
|
||||
*/
|
||||
if ((ndev->flags & IFF_ALLMULTI) ||
|
||||
(ndev->flags & IFF_MULTICAST && netdev_mc_count(ndev) > 0)) {
|
||||
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
|
||||
vif->fw_vif_idx, WOW_LIST_ID,
|
||||
sizeof(discvr_pattern), discvr_offset,
|
||||
discvr_pattern, discvr_mask);
|
||||
if (ret) {
|
||||
ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR "
|
||||
"pattern\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
|
||||
{
|
||||
struct in_device *in_dev;
|
||||
struct in_ifaddr *ifa;
|
||||
struct ath6kl_vif *vif;
|
||||
int ret, left;
|
||||
u32 filter = 0;
|
||||
u16 i;
|
||||
u8 index = 0;
|
||||
__be32 ips[MAX_IP_ADDRS];
|
||||
|
||||
vif = ath6kl_vif_first(ar);
|
||||
if (!vif)
|
||||
return -EIO;
|
||||
|
||||
if (!ath6kl_cfg80211_ready(vif))
|
||||
return -EIO;
|
||||
|
||||
if (!test_bit(CONNECTED, &vif->flags))
|
||||
return -ENOTCONN;
|
||||
|
||||
if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
|
||||
return -EINVAL;
|
||||
|
||||
/* Clear existing WOW patterns */
|
||||
for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
|
||||
ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
|
||||
WOW_LIST_ID, i);
|
||||
|
||||
/*
|
||||
* Skip the default WOW pattern configuration
|
||||
* if the driver receives any WOW patterns from
|
||||
* the user.
|
||||
*/
|
||||
if (wow)
|
||||
ret = ath6kl_wow_usr(ar, vif, wow, &filter);
|
||||
else if (vif->nw_type == AP_NETWORK)
|
||||
ret = ath6kl_wow_ap(ar, vif);
|
||||
else
|
||||
ret = ath6kl_wow_sta(ar, vif);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Setup own IP addr for ARP agent. */
|
||||
in_dev = __in_dev_get_rtnl(vif->ndev);
|
||||
if (!in_dev)
|
||||
goto skip_arp;
|
||||
|
||||
ifa = in_dev->ifa_list;
|
||||
memset(&ips, 0, sizeof(ips));
|
||||
|
||||
/* Configure IP addr only if IP address count < MAX_IP_ADDRS */
|
||||
while (index < MAX_IP_ADDRS && ifa) {
|
||||
ips[index] = ifa->ifa_local;
|
||||
ifa = ifa->ifa_next;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (ifa) {
|
||||
ath6kl_err("total IP addr count is exceeding fw limit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ath6kl_wmi_set_ip_cmd(ar->wmi, vif->fw_vif_idx, ips[0], ips[1]);
|
||||
if (ret) {
|
||||
ath6kl_err("fail to setup ip for arp agent\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
skip_arp:
|
||||
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
|
||||
ATH6KL_WOW_MODE_ENABLE,
|
||||
filter,
|
||||
@ -1803,11 +1975,26 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
|
||||
|
||||
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
|
||||
ATH6KL_HOST_MODE_ASLEEP);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
left = wait_event_interruptible_timeout(ar->event_wq,
|
||||
test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags),
|
||||
WMI_TIMEOUT);
|
||||
if (left == 0) {
|
||||
ath6kl_warn("timeout, didn't get host sleep cmd "
|
||||
"processed event\n");
|
||||
ret = -ETIMEDOUT;
|
||||
} else if (left < 0) {
|
||||
ath6kl_warn("error while waiting for host sleep cmd "
|
||||
"processed event %d\n", left);
|
||||
ret = left;
|
||||
}
|
||||
|
||||
if (ar->tx_pending[ar->ctrl_ep]) {
|
||||
left = wait_event_interruptible_timeout(ar->event_wq,
|
||||
ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
|
||||
@ -1911,6 +2098,7 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
|
||||
|
||||
int ath6kl_cfg80211_resume(struct ath6kl *ar)
|
||||
{
|
||||
@ -1962,6 +2150,7 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_cfg80211_resume);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
@ -2014,7 +2203,18 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
|
||||
struct ieee80211_channel *chan,
|
||||
enum nl80211_channel_type channel_type)
|
||||
{
|
||||
struct ath6kl_vif *vif = netdev_priv(dev);
|
||||
struct ath6kl_vif *vif;
|
||||
|
||||
/*
|
||||
* 'dev' could be NULL if a channel change is required for the hardware
|
||||
* device itself, instead of a particular VIF.
|
||||
*
|
||||
* FIXME: To be handled properly when monitor mode is supported.
|
||||
*/
|
||||
if (!dev)
|
||||
return -EBUSY;
|
||||
|
||||
vif = netdev_priv(dev);
|
||||
|
||||
if (!ath6kl_cfg80211_ready(vif))
|
||||
return -EIO;
|
||||
@ -2214,6 +2414,11 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
|
||||
p.dot11_auth_mode = vif->dot11_auth_mode;
|
||||
p.ch = cpu_to_le16(vif->next_chan);
|
||||
|
||||
/* Enable uAPSD support by default */
|
||||
res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
|
||||
p.nw_subtype = SUBTYPE_P2PGO;
|
||||
} else {
|
||||
@ -2259,6 +2464,19 @@ static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
|
||||
|
||||
static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
|
||||
u8 *mac)
|
||||
{
|
||||
struct ath6kl *ar = ath6kl_priv(dev);
|
||||
struct ath6kl_vif *vif = netdev_priv(dev);
|
||||
const u8 *addr = mac ? mac : bcast_addr;
|
||||
|
||||
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH,
|
||||
addr, WLAN_REASON_PREV_AUTH_NOT_VALID);
|
||||
}
|
||||
|
||||
static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
|
||||
u8 *mac, struct station_parameters *params)
|
||||
{
|
||||
@ -2518,6 +2736,12 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
|
||||
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
|
||||
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
|
||||
},
|
||||
[NL80211_IFTYPE_AP] = {
|
||||
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
|
||||
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
|
||||
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
|
||||
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
|
||||
},
|
||||
[NL80211_IFTYPE_P2P_CLIENT] = {
|
||||
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
|
||||
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
|
||||
@ -2562,6 +2786,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
|
||||
.add_beacon = ath6kl_add_beacon,
|
||||
.set_beacon = ath6kl_set_beacon,
|
||||
.del_beacon = ath6kl_del_beacon,
|
||||
.del_station = ath6kl_del_station,
|
||||
.change_station = ath6kl_change_station,
|
||||
.remain_on_channel = ath6kl_remain_on_channel,
|
||||
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
|
||||
@ -2629,69 +2854,108 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
|
||||
ath6kl_cfg80211_stop(vif);
|
||||
}
|
||||
|
||||
struct ath6kl *ath6kl_core_alloc(struct device *dev)
|
||||
static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
|
||||
{
|
||||
struct ath6kl *ar;
|
||||
struct wiphy *wiphy;
|
||||
u8 ctr;
|
||||
|
||||
/* create a new wiphy for use with cfg80211 */
|
||||
wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
|
||||
|
||||
if (!wiphy) {
|
||||
ath6kl_err("couldn't allocate wiphy device\n");
|
||||
return NULL;
|
||||
vif->aggr_cntxt = aggr_init(vif);
|
||||
if (!vif->aggr_cntxt) {
|
||||
ath6kl_err("failed to initialize aggr\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ar = wiphy_priv(wiphy);
|
||||
ar->p2p = !!ath6kl_p2p;
|
||||
ar->wiphy = wiphy;
|
||||
ar->dev = dev;
|
||||
setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
|
||||
(unsigned long) vif->ndev);
|
||||
setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
|
||||
(unsigned long) vif);
|
||||
|
||||
ar->vif_max = 1;
|
||||
set_bit(WMM_ENABLED, &vif->flags);
|
||||
spin_lock_init(&vif->if_lock);
|
||||
|
||||
ar->max_norm_iface = 1;
|
||||
INIT_LIST_HEAD(&vif->mc_filter);
|
||||
|
||||
spin_lock_init(&ar->lock);
|
||||
spin_lock_init(&ar->mcastpsq_lock);
|
||||
spin_lock_init(&ar->list_lock);
|
||||
|
||||
init_waitqueue_head(&ar->event_wq);
|
||||
sema_init(&ar->sem, 1);
|
||||
|
||||
INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
|
||||
INIT_LIST_HEAD(&ar->vif_list);
|
||||
|
||||
clear_bit(WMI_ENABLED, &ar->flag);
|
||||
clear_bit(SKIP_SCAN, &ar->flag);
|
||||
clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
|
||||
|
||||
ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
|
||||
ar->listen_intvl_b = 0;
|
||||
ar->tx_pwr = 0;
|
||||
|
||||
ar->intra_bss = 1;
|
||||
ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
|
||||
|
||||
ar->state = ATH6KL_STATE_OFF;
|
||||
|
||||
memset((u8 *)ar->sta_list, 0,
|
||||
AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
|
||||
|
||||
/* Init the PS queues */
|
||||
for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
|
||||
spin_lock_init(&ar->sta_list[ctr].psq_lock);
|
||||
skb_queue_head_init(&ar->sta_list[ctr].psq);
|
||||
}
|
||||
|
||||
skb_queue_head_init(&ar->mcastpsq);
|
||||
|
||||
memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
|
||||
|
||||
return ar;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
|
||||
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
struct ath6kl_mc_filter *mc_filter, *tmp;
|
||||
|
||||
aggr_module_destroy(vif->aggr_cntxt);
|
||||
|
||||
ar->avail_idx_map |= BIT(vif->fw_vif_idx);
|
||||
|
||||
if (vif->nw_type == ADHOC_NETWORK)
|
||||
ar->ibss_if_active = false;
|
||||
|
||||
list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
|
||||
list_del(&mc_filter->list);
|
||||
kfree(mc_filter);
|
||||
}
|
||||
|
||||
unregister_netdevice(vif->ndev);
|
||||
|
||||
ar->num_vif--;
|
||||
}
|
||||
|
||||
struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
|
||||
enum nl80211_iftype type, u8 fw_vif_idx,
|
||||
u8 nw_type)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
struct ath6kl_vif *vif;
|
||||
|
||||
ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
|
||||
if (!ndev)
|
||||
return NULL;
|
||||
|
||||
vif = netdev_priv(ndev);
|
||||
ndev->ieee80211_ptr = &vif->wdev;
|
||||
vif->wdev.wiphy = ar->wiphy;
|
||||
vif->ar = ar;
|
||||
vif->ndev = ndev;
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
|
||||
vif->wdev.netdev = ndev;
|
||||
vif->wdev.iftype = type;
|
||||
vif->fw_vif_idx = fw_vif_idx;
|
||||
vif->nw_type = vif->next_mode = nw_type;
|
||||
|
||||
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
|
||||
if (fw_vif_idx != 0)
|
||||
ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
|
||||
0x2;
|
||||
|
||||
init_netdev(ndev);
|
||||
|
||||
ath6kl_init_control_info(vif);
|
||||
|
||||
if (ath6kl_cfg80211_vif_init(vif))
|
||||
goto err;
|
||||
|
||||
if (register_netdevice(ndev))
|
||||
goto err;
|
||||
|
||||
ar->avail_idx_map &= ~BIT(fw_vif_idx);
|
||||
vif->sme_state = SME_DISCONNECTED;
|
||||
set_bit(WLAN_ENABLED, &vif->flags);
|
||||
ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
|
||||
set_bit(NETDEV_REGISTERED, &vif->flags);
|
||||
|
||||
if (type == NL80211_IFTYPE_ADHOC)
|
||||
ar->ibss_if_active = true;
|
||||
|
||||
spin_lock_bh(&ar->list_lock);
|
||||
list_add_tail(&vif->list, &ar->vif_list);
|
||||
spin_unlock_bh(&ar->list_lock);
|
||||
|
||||
return ndev;
|
||||
|
||||
err:
|
||||
aggr_module_destroy(vif->aggr_cntxt);
|
||||
free_netdev(ndev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int ath6kl_cfg80211_init(struct ath6kl *ar)
|
||||
{
|
||||
struct wiphy *wiphy = ar->wiphy;
|
||||
int ret;
|
||||
@ -2742,102 +3006,38 @@ int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_init_if_data(struct ath6kl_vif *vif)
|
||||
{
|
||||
vif->aggr_cntxt = aggr_init(vif->ndev);
|
||||
if (!vif->aggr_cntxt) {
|
||||
ath6kl_err("failed to initialize aggr\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
|
||||
(unsigned long) vif->ndev);
|
||||
setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
|
||||
(unsigned long) vif);
|
||||
|
||||
set_bit(WMM_ENABLED, &vif->flags);
|
||||
spin_lock_init(&vif->if_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
|
||||
aggr_module_destroy(vif->aggr_cntxt);
|
||||
|
||||
ar->avail_idx_map |= BIT(vif->fw_vif_idx);
|
||||
|
||||
if (vif->nw_type == ADHOC_NETWORK)
|
||||
ar->ibss_if_active = false;
|
||||
|
||||
unregister_netdevice(vif->ndev);
|
||||
|
||||
ar->num_vif--;
|
||||
}
|
||||
|
||||
struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
|
||||
enum nl80211_iftype type, u8 fw_vif_idx,
|
||||
u8 nw_type)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
struct ath6kl_vif *vif;
|
||||
|
||||
ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
|
||||
if (!ndev)
|
||||
return NULL;
|
||||
|
||||
vif = netdev_priv(ndev);
|
||||
ndev->ieee80211_ptr = &vif->wdev;
|
||||
vif->wdev.wiphy = ar->wiphy;
|
||||
vif->ar = ar;
|
||||
vif->ndev = ndev;
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
|
||||
vif->wdev.netdev = ndev;
|
||||
vif->wdev.iftype = type;
|
||||
vif->fw_vif_idx = fw_vif_idx;
|
||||
vif->nw_type = vif->next_mode = nw_type;
|
||||
|
||||
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
|
||||
if (fw_vif_idx != 0)
|
||||
ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
|
||||
0x2;
|
||||
|
||||
init_netdev(ndev);
|
||||
|
||||
ath6kl_init_control_info(vif);
|
||||
|
||||
/* TODO: Pass interface specific pointer instead of ar */
|
||||
if (ath6kl_init_if_data(vif))
|
||||
goto err;
|
||||
|
||||
if (register_netdevice(ndev))
|
||||
goto err;
|
||||
|
||||
ar->avail_idx_map &= ~BIT(fw_vif_idx);
|
||||
vif->sme_state = SME_DISCONNECTED;
|
||||
set_bit(WLAN_ENABLED, &vif->flags);
|
||||
ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
|
||||
set_bit(NETDEV_REGISTERED, &vif->flags);
|
||||
|
||||
if (type == NL80211_IFTYPE_ADHOC)
|
||||
ar->ibss_if_active = true;
|
||||
|
||||
spin_lock_bh(&ar->list_lock);
|
||||
list_add_tail(&vif->list, &ar->vif_list);
|
||||
spin_unlock_bh(&ar->list_lock);
|
||||
|
||||
return ndev;
|
||||
|
||||
err:
|
||||
aggr_module_destroy(vif->aggr_cntxt);
|
||||
free_netdev(ndev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
|
||||
void ath6kl_cfg80211_cleanup(struct ath6kl *ar)
|
||||
{
|
||||
wiphy_unregister(ar->wiphy);
|
||||
}
|
||||
|
||||
struct ath6kl *ath6kl_cfg80211_create(void)
|
||||
{
|
||||
struct ath6kl *ar;
|
||||
struct wiphy *wiphy;
|
||||
|
||||
/* create a new wiphy for use with cfg80211 */
|
||||
wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
|
||||
|
||||
if (!wiphy) {
|
||||
ath6kl_err("couldn't allocate wiphy device\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ar = wiphy_priv(wiphy);
|
||||
ar->wiphy = wiphy;
|
||||
|
||||
return ar;
|
||||
}
|
||||
|
||||
/* Note: ar variable must not be accessed after calling this! */
|
||||
void ath6kl_cfg80211_destroy(struct ath6kl *ar)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AP_MAX_NUM_STA; i++)
|
||||
kfree(ar->sta_list[i].aggr_conn);
|
||||
|
||||
wiphy_free(ar->wiphy);
|
||||
}
|
||||
|
||||
|
@ -27,10 +27,6 @@ enum ath6kl_cfg_suspend_mode {
|
||||
struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
|
||||
enum nl80211_iftype type,
|
||||
u8 fw_vif_idx, u8 nw_type);
|
||||
int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
|
||||
struct ath6kl *ath6kl_core_alloc(struct device *dev);
|
||||
void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
|
||||
|
||||
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
|
||||
|
||||
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
@ -53,7 +49,15 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
|
||||
|
||||
int ath6kl_cfg80211_resume(struct ath6kl *ar);
|
||||
|
||||
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif);
|
||||
|
||||
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
|
||||
void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
|
||||
|
||||
int ath6kl_cfg80211_init(struct ath6kl *ar);
|
||||
void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
|
||||
|
||||
struct ath6kl *ath6kl_cfg80211_create(void);
|
||||
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
|
||||
|
||||
#endif /* ATH6KL_CFG80211_H */
|
||||
|
@ -79,8 +79,5 @@ struct ath6kl;
|
||||
enum htc_credit_dist_reason;
|
||||
struct ath6kl_htc_credit_info;
|
||||
|
||||
struct ath6kl *ath6kl_core_alloc(struct device *sdev);
|
||||
int ath6kl_core_init(struct ath6kl *ar);
|
||||
void ath6kl_core_cleanup(struct ath6kl *ar);
|
||||
struct sk_buff *ath6kl_buf_alloc(int size);
|
||||
#endif /* COMMON_H */
|
||||
|
316
drivers/net/wireless/ath/ath6kl/core.c
Normal file
316
drivers/net/wireless/ath/ath6kl/core.c
Normal file
@ -0,0 +1,316 @@
|
||||
/*
|
||||
* Copyright (c) 2004-2011 Atheros Communications Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "core.h"
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "hif-ops.h"
|
||||
#include "cfg80211.h"
|
||||
|
||||
unsigned int debug_mask;
|
||||
static unsigned int suspend_mode;
|
||||
static unsigned int uart_debug;
|
||||
static unsigned int ath6kl_p2p;
|
||||
static unsigned int testmode;
|
||||
|
||||
module_param(debug_mask, uint, 0644);
|
||||
module_param(suspend_mode, uint, 0644);
|
||||
module_param(uart_debug, uint, 0644);
|
||||
module_param(ath6kl_p2p, uint, 0644);
|
||||
module_param(testmode, uint, 0644);
|
||||
|
||||
int ath6kl_core_init(struct ath6kl *ar)
|
||||
{
|
||||
struct ath6kl_bmi_target_info targ_info;
|
||||
struct net_device *ndev;
|
||||
int ret = 0, i;
|
||||
|
||||
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
|
||||
if (!ar->ath6kl_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ath6kl_bmi_init(ar);
|
||||
if (ret)
|
||||
goto err_wq;
|
||||
|
||||
/*
|
||||
* Turn on power to get hardware (target) version and leave power
|
||||
* on delibrately as we will boot the hardware anyway within few
|
||||
* seconds.
|
||||
*/
|
||||
ret = ath6kl_hif_power_on(ar);
|
||||
if (ret)
|
||||
goto err_bmi_cleanup;
|
||||
|
||||
ret = ath6kl_bmi_get_target_info(ar, &targ_info);
|
||||
if (ret)
|
||||
goto err_power_off;
|
||||
|
||||
ar->version.target_ver = le32_to_cpu(targ_info.version);
|
||||
ar->target_type = le32_to_cpu(targ_info.type);
|
||||
ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
|
||||
|
||||
ret = ath6kl_init_hw_params(ar);
|
||||
if (ret)
|
||||
goto err_power_off;
|
||||
|
||||
ar->htc_target = ath6kl_htc_create(ar);
|
||||
|
||||
if (!ar->htc_target) {
|
||||
ret = -ENOMEM;
|
||||
goto err_power_off;
|
||||
}
|
||||
|
||||
ar->testmode = testmode;
|
||||
|
||||
ret = ath6kl_init_fetch_firmwares(ar);
|
||||
if (ret)
|
||||
goto err_htc_cleanup;
|
||||
|
||||
/* FIXME: we should free all firmwares in the error cases below */
|
||||
|
||||
/* Indicate that WMI is enabled (although not ready yet) */
|
||||
set_bit(WMI_ENABLED, &ar->flag);
|
||||
ar->wmi = ath6kl_wmi_init(ar);
|
||||
if (!ar->wmi) {
|
||||
ath6kl_err("failed to initialize wmi\n");
|
||||
ret = -EIO;
|
||||
goto err_htc_cleanup;
|
||||
}
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
|
||||
|
||||
ret = ath6kl_cfg80211_init(ar);
|
||||
if (ret)
|
||||
goto err_node_cleanup;
|
||||
|
||||
ret = ath6kl_debug_init(ar);
|
||||
if (ret) {
|
||||
wiphy_unregister(ar->wiphy);
|
||||
goto err_node_cleanup;
|
||||
}
|
||||
|
||||
for (i = 0; i < ar->vif_max; i++)
|
||||
ar->avail_idx_map |= BIT(i);
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
/* Add an initial station interface */
|
||||
ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
|
||||
INFRA_NETWORK);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (!ndev) {
|
||||
ath6kl_err("Failed to instantiate a network device\n");
|
||||
ret = -ENOMEM;
|
||||
wiphy_unregister(ar->wiphy);
|
||||
goto err_debug_init;
|
||||
}
|
||||
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
|
||||
__func__, ndev->name, ndev, ar);
|
||||
|
||||
/* setup access class priority mappings */
|
||||
ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
|
||||
ar->ac_stream_pri_map[WMM_AC_BE] = 1;
|
||||
ar->ac_stream_pri_map[WMM_AC_VI] = 2;
|
||||
ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
|
||||
|
||||
/* give our connected endpoints some buffers */
|
||||
ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
|
||||
ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
|
||||
|
||||
/* allocate some buffers that handle larger AMSDU frames */
|
||||
ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
|
||||
|
||||
ath6kl_cookie_init(ar);
|
||||
|
||||
ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
|
||||
ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
|
||||
|
||||
if (suspend_mode &&
|
||||
suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
|
||||
suspend_mode <= WLAN_POWER_STATE_WOW)
|
||||
ar->suspend_mode = suspend_mode;
|
||||
else
|
||||
ar->suspend_mode = 0;
|
||||
|
||||
if (uart_debug)
|
||||
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
|
||||
|
||||
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
|
||||
WIPHY_FLAG_HAVE_AP_SME |
|
||||
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
|
||||
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
|
||||
|
||||
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
|
||||
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
|
||||
|
||||
ar->wiphy->probe_resp_offload =
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
|
||||
|
||||
set_bit(FIRST_BOOT, &ar->flag);
|
||||
|
||||
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
|
||||
|
||||
ret = ath6kl_init_hw_start(ar);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to start hardware: %d\n", ret);
|
||||
goto err_rxbuf_cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set mac address which is received in ready event
|
||||
* FIXME: Move to ath6kl_interface_add()
|
||||
*/
|
||||
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
|
||||
|
||||
return ret;
|
||||
|
||||
err_rxbuf_cleanup:
|
||||
ath6kl_htc_flush_rx_buf(ar->htc_target);
|
||||
ath6kl_cleanup_amsdu_rxbufs(ar);
|
||||
rtnl_lock();
|
||||
ath6kl_cfg80211_vif_cleanup(netdev_priv(ndev));
|
||||
rtnl_unlock();
|
||||
wiphy_unregister(ar->wiphy);
|
||||
err_debug_init:
|
||||
ath6kl_debug_cleanup(ar);
|
||||
err_node_cleanup:
|
||||
ath6kl_wmi_shutdown(ar->wmi);
|
||||
clear_bit(WMI_ENABLED, &ar->flag);
|
||||
ar->wmi = NULL;
|
||||
err_htc_cleanup:
|
||||
ath6kl_htc_cleanup(ar->htc_target);
|
||||
err_power_off:
|
||||
ath6kl_hif_power_off(ar);
|
||||
err_bmi_cleanup:
|
||||
ath6kl_bmi_cleanup(ar);
|
||||
err_wq:
|
||||
destroy_workqueue(ar->ath6kl_wq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_core_init);
|
||||
|
||||
struct ath6kl *ath6kl_core_create(struct device *dev)
|
||||
{
|
||||
struct ath6kl *ar;
|
||||
u8 ctr;
|
||||
|
||||
ar = ath6kl_cfg80211_create();
|
||||
if (!ar)
|
||||
return NULL;
|
||||
|
||||
ar->p2p = !!ath6kl_p2p;
|
||||
ar->dev = dev;
|
||||
|
||||
ar->vif_max = 1;
|
||||
|
||||
ar->max_norm_iface = 1;
|
||||
|
||||
spin_lock_init(&ar->lock);
|
||||
spin_lock_init(&ar->mcastpsq_lock);
|
||||
spin_lock_init(&ar->list_lock);
|
||||
|
||||
init_waitqueue_head(&ar->event_wq);
|
||||
sema_init(&ar->sem, 1);
|
||||
|
||||
INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
|
||||
INIT_LIST_HEAD(&ar->vif_list);
|
||||
|
||||
clear_bit(WMI_ENABLED, &ar->flag);
|
||||
clear_bit(SKIP_SCAN, &ar->flag);
|
||||
clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
|
||||
|
||||
ar->listen_intvl_b = A_DEFAULT_LISTEN_INTERVAL;
|
||||
ar->tx_pwr = 0;
|
||||
|
||||
ar->intra_bss = 1;
|
||||
ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
|
||||
|
||||
ar->state = ATH6KL_STATE_OFF;
|
||||
|
||||
memset((u8 *)ar->sta_list, 0,
|
||||
AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
|
||||
|
||||
/* Init the PS queues */
|
||||
for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
|
||||
spin_lock_init(&ar->sta_list[ctr].psq_lock);
|
||||
skb_queue_head_init(&ar->sta_list[ctr].psq);
|
||||
skb_queue_head_init(&ar->sta_list[ctr].apsdq);
|
||||
ar->sta_list[ctr].aggr_conn =
|
||||
kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
|
||||
if (!ar->sta_list[ctr].aggr_conn) {
|
||||
ath6kl_err("Failed to allocate memory for sta aggregation information\n");
|
||||
ath6kl_core_destroy(ar);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
skb_queue_head_init(&ar->mcastpsq);
|
||||
|
||||
memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
|
||||
|
||||
return ar;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_core_create);
|
||||
|
||||
void ath6kl_core_cleanup(struct ath6kl *ar)
|
||||
{
|
||||
ath6kl_hif_power_off(ar);
|
||||
|
||||
destroy_workqueue(ar->ath6kl_wq);
|
||||
|
||||
if (ar->htc_target)
|
||||
ath6kl_htc_cleanup(ar->htc_target);
|
||||
|
||||
ath6kl_cookie_cleanup(ar);
|
||||
|
||||
ath6kl_cleanup_amsdu_rxbufs(ar);
|
||||
|
||||
ath6kl_bmi_cleanup(ar);
|
||||
|
||||
ath6kl_debug_cleanup(ar);
|
||||
|
||||
kfree(ar->fw_board);
|
||||
kfree(ar->fw_otp);
|
||||
kfree(ar->fw);
|
||||
kfree(ar->fw_patch);
|
||||
kfree(ar->fw_testscript);
|
||||
|
||||
ath6kl_cfg80211_cleanup(ar);
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_core_cleanup);
|
||||
|
||||
void ath6kl_core_destroy(struct ath6kl *ar)
|
||||
{
|
||||
ath6kl_cfg80211_destroy(ar);
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_core_destroy);
|
||||
|
||||
MODULE_AUTHOR("Qualcomm Atheros");
|
||||
MODULE_DESCRIPTION("Core module for AR600x SDIO and USB devices.");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
@ -44,6 +44,10 @@
|
||||
#define ATH6KL_MAX_ENDPOINTS 4
|
||||
#define MAX_NODE_NUM 15
|
||||
|
||||
#define ATH6KL_APSD_ALL_FRAME 0xFFFF
|
||||
#define ATH6KL_APSD_NUM_OF_AC 0x4
|
||||
#define ATH6KL_APSD_FRAME_MASK 0xF
|
||||
|
||||
/* Extra bytes for htc header alignment */
|
||||
#define ATH6KL_HTC_ALIGN_BYTES 3
|
||||
|
||||
@ -55,7 +59,7 @@
|
||||
#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
|
||||
|
||||
#define DISCON_TIMER_INTVAL 10000 /* in msec */
|
||||
#define A_DEFAULT_LISTEN_INTERVAL 100
|
||||
#define A_DEFAULT_LISTEN_INTERVAL 1 /* beacon intervals */
|
||||
#define A_MAX_WOW_LISTEN_INTERVAL 1000
|
||||
|
||||
/* includes also the null byte */
|
||||
@ -97,45 +101,49 @@ struct ath6kl_fw_ie {
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
#define ATH6KL_FW_API2_FILE "fw-2.bin"
|
||||
#define ATH6KL_FW_API3_FILE "fw-3.bin"
|
||||
|
||||
/* AR6003 1.0 definitions */
|
||||
#define AR6003_HW_1_0_VERSION 0x300002ba
|
||||
|
||||
/* AR6003 2.0 definitions */
|
||||
#define AR6003_HW_2_0_VERSION 0x30000384
|
||||
#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
|
||||
#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
|
||||
#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
|
||||
#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
|
||||
#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
|
||||
#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
|
||||
#define AR6003_HW_2_0_FW_DIR "ath6k/AR6003/hw2.0"
|
||||
#define AR6003_HW_2_0_OTP_FILE "otp.bin.z77"
|
||||
#define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77"
|
||||
#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
|
||||
#define AR6003_HW_2_0_PATCH_FILE "data.patch.bin"
|
||||
#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
|
||||
#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
|
||||
"ath6k/AR6003/hw2.0/bdata.SD31.bin"
|
||||
|
||||
/* AR6003 3.0 definitions */
|
||||
#define AR6003_HW_2_1_1_VERSION 0x30000582
|
||||
#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
|
||||
#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
|
||||
#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \
|
||||
"ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
|
||||
#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
|
||||
#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
|
||||
#define AR6003_HW_2_1_1_FW_DIR "ath6k/AR6003/hw2.1.1"
|
||||
#define AR6003_HW_2_1_1_OTP_FILE "otp.bin"
|
||||
#define AR6003_HW_2_1_1_FIRMWARE_FILE "athwlan.bin"
|
||||
#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
|
||||
#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin"
|
||||
#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin"
|
||||
#define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin"
|
||||
#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
|
||||
#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
|
||||
"ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
|
||||
|
||||
/* AR6004 1.0 definitions */
|
||||
#define AR6004_HW_1_0_VERSION 0x30000623
|
||||
#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin"
|
||||
#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin"
|
||||
#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0"
|
||||
#define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin"
|
||||
#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
|
||||
#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
|
||||
"ath6k/AR6004/hw1.0/bdata.DB132.bin"
|
||||
|
||||
/* AR6004 1.1 definitions */
|
||||
#define AR6004_HW_1_1_VERSION 0x30000001
|
||||
#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin"
|
||||
#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin"
|
||||
#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1"
|
||||
#define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin"
|
||||
#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
|
||||
#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
|
||||
"ath6k/AR6004/hw1.1/bdata.DB132.bin"
|
||||
@ -144,6 +152,8 @@ struct ath6kl_fw_ie {
|
||||
#define STA_PS_AWAKE BIT(0)
|
||||
#define STA_PS_SLEEP BIT(1)
|
||||
#define STA_PS_POLLED BIT(2)
|
||||
#define STA_PS_APSD_TRIGGER BIT(3)
|
||||
#define STA_PS_APSD_EOSP BIT(4)
|
||||
|
||||
/* HTC TX packet tagging definitions */
|
||||
#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
|
||||
@ -186,7 +196,7 @@ struct ath6kl_fw_ie {
|
||||
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
|
||||
#define ATH6KL_CONF_ENABLE_11N BIT(2)
|
||||
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
|
||||
#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
|
||||
#define ATH6KL_CONF_UART_DEBUG BIT(4)
|
||||
|
||||
enum wlan_low_pwr_state {
|
||||
WLAN_POWER_STATE_ON,
|
||||
@ -231,14 +241,19 @@ struct rxtid_stats {
|
||||
u32 num_bar;
|
||||
};
|
||||
|
||||
struct aggr_info {
|
||||
struct aggr_info_conn {
|
||||
u8 aggr_sz;
|
||||
u8 timer_scheduled;
|
||||
struct timer_list timer;
|
||||
struct net_device *dev;
|
||||
struct rxtid rx_tid[NUM_OF_TIDS];
|
||||
struct sk_buff_head free_q;
|
||||
struct rxtid_stats stat[NUM_OF_TIDS];
|
||||
struct aggr_info *aggr_info;
|
||||
};
|
||||
|
||||
struct aggr_info {
|
||||
struct aggr_info_conn *aggr_conn;
|
||||
struct sk_buff_head rx_amsdu_freeq;
|
||||
};
|
||||
|
||||
struct ath6kl_wep_key {
|
||||
@ -280,6 +295,9 @@ struct ath6kl_sta {
|
||||
u8 wpa_ie[ATH6KL_MAX_IE];
|
||||
struct sk_buff_head psq;
|
||||
spinlock_t psq_lock;
|
||||
u8 apsd_info;
|
||||
struct sk_buff_head apsdq;
|
||||
struct aggr_info_conn *aggr_conn;
|
||||
};
|
||||
|
||||
struct ath6kl_version {
|
||||
@ -408,6 +426,13 @@ enum ath6kl_hif_type {
|
||||
ATH6KL_HIF_TYPE_USB,
|
||||
};
|
||||
|
||||
/* Max number of filters that hw supports */
|
||||
#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
|
||||
struct ath6kl_mc_filter {
|
||||
struct list_head list;
|
||||
char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
|
||||
};
|
||||
|
||||
/*
|
||||
* Driver's maximum limit, note that some firmwares support only one vif
|
||||
* and the runtime (current) limit must be checked from ar->vif_max.
|
||||
@ -426,6 +451,7 @@ enum ath6kl_vif_state {
|
||||
DTIM_PERIOD_AVAIL,
|
||||
WLAN_ENABLED,
|
||||
STATS_UPDATE_PEND,
|
||||
HOST_SLEEP_MODE_CMD_PROCESSED,
|
||||
};
|
||||
|
||||
struct ath6kl_vif {
|
||||
@ -471,6 +497,8 @@ struct ath6kl_vif {
|
||||
u8 assoc_bss_dtim_period;
|
||||
struct net_device_stats net_stats;
|
||||
struct target_stats target_stats;
|
||||
|
||||
struct list_head mc_filter;
|
||||
};
|
||||
|
||||
#define WOW_LIST_ID 0
|
||||
@ -504,6 +532,7 @@ struct ath6kl {
|
||||
struct wiphy *wiphy;
|
||||
|
||||
enum ath6kl_state state;
|
||||
unsigned int testmode;
|
||||
|
||||
struct ath6kl_bmi bmi;
|
||||
const struct ath6kl_hif_ops *hif_ops;
|
||||
@ -523,7 +552,6 @@ struct ath6kl {
|
||||
spinlock_t lock;
|
||||
struct semaphore sem;
|
||||
u16 listen_intvl_b;
|
||||
u16 listen_intvl_t;
|
||||
u8 lrssi_roam_threshold;
|
||||
struct ath6kl_version version;
|
||||
u32 target_type;
|
||||
@ -574,17 +602,24 @@ struct ath6kl {
|
||||
u32 board_addr;
|
||||
u32 refclk_hz;
|
||||
u32 uarttx_pin;
|
||||
u32 testscript_addr;
|
||||
|
||||
struct ath6kl_hw_fw {
|
||||
const char *dir;
|
||||
const char *otp;
|
||||
const char *fw;
|
||||
const char *tcmd;
|
||||
const char *patch;
|
||||
const char *utf;
|
||||
const char *testscript;
|
||||
} fw;
|
||||
|
||||
const char *fw_otp;
|
||||
const char *fw;
|
||||
const char *fw_tcmd;
|
||||
const char *fw_patch;
|
||||
const char *fw_api2;
|
||||
const char *fw_board;
|
||||
const char *fw_default_board;
|
||||
} hw;
|
||||
|
||||
u16 conf_flags;
|
||||
u16 suspend_mode;
|
||||
wait_queue_head_t event_wq;
|
||||
struct ath6kl_mbox_info mbox_info;
|
||||
|
||||
@ -603,6 +638,10 @@ struct ath6kl {
|
||||
u8 *fw_patch;
|
||||
size_t fw_patch_len;
|
||||
|
||||
u8 *fw_testscript;
|
||||
size_t fw_testscript_len;
|
||||
|
||||
unsigned int fw_api;
|
||||
unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN];
|
||||
|
||||
struct workqueue_struct *ath6kl_wq;
|
||||
@ -676,7 +715,9 @@ struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
|
||||
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
|
||||
int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
struct aggr_info *aggr_init(struct net_device *dev);
|
||||
struct aggr_info *aggr_init(struct ath6kl_vif *vif);
|
||||
void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
|
||||
struct aggr_info_conn *aggr_conn);
|
||||
void ath6kl_rx_refill(struct htc_target *target,
|
||||
enum htc_endpoint_id endpoint);
|
||||
void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
|
||||
@ -684,7 +725,7 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
|
||||
enum htc_endpoint_id endpoint,
|
||||
int len);
|
||||
void aggr_module_destroy(struct aggr_info *aggr_info);
|
||||
void aggr_reset_state(struct aggr_info *aggr_info);
|
||||
void aggr_reset_state(struct aggr_info_conn *aggr_conn);
|
||||
|
||||
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
|
||||
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
|
||||
@ -700,7 +741,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
|
||||
void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
|
||||
void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
|
||||
u8 keymgmt, u8 ucipher, u8 auth,
|
||||
u8 assoc_req_len, u8 *assoc_info);
|
||||
u8 assoc_req_len, u8 *assoc_info, u8 apsd_info);
|
||||
void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
|
||||
u8 *bssid, u8 assoc_resp_len,
|
||||
u8 *assoc_info, u16 prot_reason_status);
|
||||
@ -723,12 +764,18 @@ void ath6kl_wakeup_event(void *dev);
|
||||
void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
|
||||
bool wait_fot_compltn, bool cold_reset);
|
||||
void ath6kl_init_control_info(struct ath6kl_vif *vif);
|
||||
void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
|
||||
void ath6kl_core_free(struct ath6kl *ar);
|
||||
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
|
||||
void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
|
||||
int ath6kl_init_hw_start(struct ath6kl *ar);
|
||||
int ath6kl_init_hw_stop(struct ath6kl *ar);
|
||||
int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
|
||||
int ath6kl_init_hw_params(struct ath6kl *ar);
|
||||
|
||||
void ath6kl_check_wow_status(struct ath6kl *ar);
|
||||
|
||||
struct ath6kl *ath6kl_core_create(struct device *dev);
|
||||
int ath6kl_core_init(struct ath6kl *ar);
|
||||
void ath6kl_core_cleanup(struct ath6kl *ar);
|
||||
void ath6kl_core_destroy(struct ath6kl *ar);
|
||||
|
||||
#endif /* CORE_H */
|
||||
|
@ -54,9 +54,42 @@ int ath6kl_printk(const char *level, const char *fmt, ...)
|
||||
|
||||
return rtn;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_printk);
|
||||
|
||||
#ifdef CONFIG_ATH6KL_DEBUG
|
||||
|
||||
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
if (!(debug_mask & mask))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_dbg);
|
||||
|
||||
void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
|
||||
const char *msg, const char *prefix,
|
||||
const void *buf, size_t len)
|
||||
{
|
||||
if (debug_mask & mask) {
|
||||
if (msg)
|
||||
ath6kl_dbg(mask, "%s\n", msg);
|
||||
|
||||
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_dbg_dump);
|
||||
|
||||
#define REG_OUTPUT_LEN_PER_LINE 25
|
||||
#define REGTYPE_STR_LEN 100
|
||||
|
||||
@ -82,31 +115,31 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
|
||||
struct ath6kl_irq_enable_reg *irq_enable_reg)
|
||||
{
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
|
||||
|
||||
if (irq_proc_reg != NULL) {
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Host Int status: 0x%x\n",
|
||||
irq_proc_reg->host_int_status);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"CPU Int status: 0x%x\n",
|
||||
irq_proc_reg->cpu_int_status);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Error Int status: 0x%x\n",
|
||||
irq_proc_reg->error_int_status);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Counter Int status: 0x%x\n",
|
||||
irq_proc_reg->counter_int_status);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Mbox Frame: 0x%x\n",
|
||||
irq_proc_reg->mbox_frame);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Rx Lookahead Valid: 0x%x\n",
|
||||
irq_proc_reg->rx_lkahd_valid);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Rx Lookahead 0: 0x%x\n",
|
||||
irq_proc_reg->rx_lkahd[0]);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Rx Lookahead 1: 0x%x\n",
|
||||
irq_proc_reg->rx_lkahd[1]);
|
||||
|
||||
@ -115,16 +148,16 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
|
||||
* If the target supports GMBOX hardware, dump some
|
||||
* additional state.
|
||||
*/
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"GMBOX Host Int status 2: 0x%x\n",
|
||||
irq_proc_reg->host_int_status2);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"GMBOX RX Avail: 0x%x\n",
|
||||
irq_proc_reg->gmbox_rx_avail);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"GMBOX lookahead alias 0: 0x%x\n",
|
||||
irq_proc_reg->rx_gmbox_lkahd_alias[0]);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"GMBOX lookahead alias 1: 0x%x\n",
|
||||
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
|
||||
}
|
||||
@ -132,13 +165,13 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
|
||||
}
|
||||
|
||||
if (irq_enable_reg != NULL) {
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY,
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ,
|
||||
"Int status Enable: 0x%x\n",
|
||||
irq_enable_reg->int_status_en);
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status Enable: 0x%x\n",
|
||||
irq_enable_reg->cntr_int_status_en);
|
||||
}
|
||||
ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
|
||||
ath6kl_dbg(ATH6KL_DBG_IRQ, "<------------------------------->\n");
|
||||
}
|
||||
|
||||
static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
|
||||
@ -175,9 +208,6 @@ void dump_cred_dist_stats(struct htc_target *target)
|
||||
{
|
||||
struct htc_endpoint_credit_dist *ep_list;
|
||||
|
||||
if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
|
||||
return;
|
||||
|
||||
list_for_each_entry(ep_list, &target->cred_dist_list, list)
|
||||
dump_cred_dist(ep_list);
|
||||
|
||||
@ -1411,6 +1441,8 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
|
||||
return -EINVAL;
|
||||
pstream.medium_time = cpu_to_le32(val32);
|
||||
|
||||
pstream.nominal_phy = le32_to_cpu(pstream.min_phy_rate) / 1000000;
|
||||
|
||||
ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
|
||||
|
||||
return count;
|
||||
@ -1505,57 +1537,46 @@ static const struct file_operations fops_bgscan_int = {
|
||||
};
|
||||
|
||||
static ssize_t ath6kl_listen_int_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath6kl *ar = file->private_data;
|
||||
u16 listen_int_t, listen_int_b;
|
||||
struct ath6kl_vif *vif;
|
||||
u16 listen_interval;
|
||||
char buf[32];
|
||||
char *sptr, *token;
|
||||
ssize_t len;
|
||||
|
||||
vif = ath6kl_vif_first(ar);
|
||||
if (!vif)
|
||||
return -EIO;
|
||||
|
||||
len = min(count, sizeof(buf) - 1);
|
||||
if (copy_from_user(buf, user_buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
buf[len] = '\0';
|
||||
sptr = buf;
|
||||
|
||||
token = strsep(&sptr, " ");
|
||||
if (!token)
|
||||
if (kstrtou16(buf, 0, &listen_interval))
|
||||
return -EINVAL;
|
||||
|
||||
if (kstrtou16(token, 0, &listen_int_t))
|
||||
if ((listen_interval < 1) || (listen_interval > 50))
|
||||
return -EINVAL;
|
||||
|
||||
if (kstrtou16(sptr, 0, &listen_int_b))
|
||||
return -EINVAL;
|
||||
|
||||
if ((listen_int_t < 15) || (listen_int_t > 5000))
|
||||
return -EINVAL;
|
||||
|
||||
if ((listen_int_b < 1) || (listen_int_b > 50))
|
||||
return -EINVAL;
|
||||
|
||||
ar->listen_intvl_t = listen_int_t;
|
||||
ar->listen_intvl_b = listen_int_b;
|
||||
|
||||
ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
|
||||
ar->listen_intvl_b = listen_interval;
|
||||
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, 0,
|
||||
ar->listen_intvl_b);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t ath6kl_listen_int_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath6kl *ar = file->private_data;
|
||||
char buf[32];
|
||||
int len;
|
||||
|
||||
len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
|
||||
ar->listen_intvl_b);
|
||||
len = scnprintf(buf, sizeof(buf), "%u\n", ar->listen_intvl_b);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
@ -1710,6 +1731,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
|
||||
debugfs_create_file("bgscan_interval", S_IWUSR,
|
||||
ar->debugfs_phy, ar, &fops_bgscan_int);
|
||||
|
||||
debugfs_create_file("listen_interval", S_IRUSR | S_IWUSR,
|
||||
ar->debugfs_phy, ar, &fops_listen_int);
|
||||
|
||||
debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
|
||||
&fops_power_params);
|
||||
|
||||
|
@ -41,6 +41,7 @@ enum ATH6K_DEBUG_MASK {
|
||||
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
|
||||
ATH6KL_DBG_WMI_DUMP = BIT(19),
|
||||
ATH6KL_DBG_SUSPEND = BIT(20),
|
||||
ATH6KL_DBG_USB = BIT(21),
|
||||
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
|
||||
};
|
||||
|
||||
@ -55,35 +56,16 @@ int ath6kl_printk(const char *level, const char *fmt, ...);
|
||||
#define ath6kl_warn(fmt, ...) \
|
||||
ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define AR_DBG_LVL_CHECK(mask) (debug_mask & mask)
|
||||
|
||||
enum ath6kl_war {
|
||||
ATH6KL_WAR_INVALID_RATE,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ATH6KL_DEBUG
|
||||
#define ath6kl_dbg(mask, fmt, ...) \
|
||||
({ \
|
||||
int rtn; \
|
||||
if (debug_mask & mask) \
|
||||
rtn = ath6kl_printk(KERN_DEBUG, fmt, ##__VA_ARGS__); \
|
||||
else \
|
||||
rtn = 0; \
|
||||
\
|
||||
rtn; \
|
||||
})
|
||||
|
||||
static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
|
||||
const char *msg, const char *prefix,
|
||||
const void *buf, size_t len)
|
||||
{
|
||||
if (debug_mask & mask) {
|
||||
if (msg)
|
||||
ath6kl_dbg(mask, "%s\n", msg);
|
||||
|
||||
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
|
||||
}
|
||||
}
|
||||
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
|
||||
void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
|
||||
const char *msg, const char *prefix,
|
||||
const void *buf, size_t len);
|
||||
|
||||
void ath6kl_dump_registers(struct ath6kl_device *dev,
|
||||
struct ath6kl_irq_proc_registers *irq_proc_reg,
|
||||
|
@ -15,6 +15,8 @@
|
||||
*/
|
||||
#include "hif.h"
|
||||
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "target.h"
|
||||
#include "hif-ops.h"
|
||||
@ -59,6 +61,8 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
|
||||
|
||||
#define REG_DUMP_COUNT_AR6003 60
|
||||
#define REGISTER_DUMP_LEN_MAX 60
|
||||
|
||||
@ -429,9 +433,8 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
|
||||
ath6kl_dump_registers(dev, &dev->irq_proc_reg,
|
||||
&dev->irq_en_reg);
|
||||
ath6kl_dump_registers(dev, &dev->irq_proc_reg,
|
||||
&dev->irq_en_reg);
|
||||
|
||||
/* Update only those registers that are enabled */
|
||||
host_int_status = dev->irq_proc_reg.host_int_status &
|
||||
@ -561,6 +564,7 @@ int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler);
|
||||
|
||||
static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
|
||||
{
|
||||
@ -689,6 +693,11 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
|
||||
ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
|
||||
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
|
||||
|
||||
/* usb doesn't support enabling interrupts */
|
||||
/* FIXME: remove check once USB support is implemented */
|
||||
if (dev->ar->hif_type == ATH6KL_HIF_TYPE_USB)
|
||||
return 0;
|
||||
|
||||
status = ath6kl_hif_disable_intrs(dev);
|
||||
|
||||
fail_setup:
|
||||
|
@ -2062,6 +2062,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
|
||||
enum htc_endpoint_id id;
|
||||
int n_fetched = 0;
|
||||
|
||||
INIT_LIST_HEAD(&comp_pktq);
|
||||
*num_pkts = 0;
|
||||
|
||||
/*
|
||||
@ -2543,6 +2544,12 @@ int ath6kl_htc_wait_target(struct htc_target *target)
|
||||
struct htc_service_connect_resp resp;
|
||||
int status;
|
||||
|
||||
/* FIXME: remove once USB support is implemented */
|
||||
if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
|
||||
ath6kl_err("HTC doesn't support USB yet. Patience!\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* we should be getting 1 control message that the target is ready */
|
||||
packet = htc_wait_for_ctrl_msg(target);
|
||||
|
||||
@ -2772,7 +2779,9 @@ void ath6kl_htc_cleanup(struct htc_target *target)
|
||||
{
|
||||
struct htc_packet *packet, *tmp_packet;
|
||||
|
||||
ath6kl_hif_cleanup_scatter(target->dev->ar);
|
||||
/* FIXME: remove check once USB support is implemented */
|
||||
if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
|
||||
ath6kl_hif_cleanup_scatter(target->dev->ar);
|
||||
|
||||
list_for_each_entry_safe(packet, tmp_packet,
|
||||
&target->free_ctrl_txbuf, list) {
|
||||
|
@ -17,22 +17,16 @@
|
||||
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/mmc/sdio_func.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "cfg80211.h"
|
||||
#include "target.h"
|
||||
#include "debug.h"
|
||||
#include "hif-ops.h"
|
||||
|
||||
unsigned int debug_mask;
|
||||
static unsigned int testmode;
|
||||
static bool suspend_cutpower;
|
||||
|
||||
module_param(debug_mask, uint, 0644);
|
||||
module_param(testmode, uint, 0644);
|
||||
module_param(suspend_cutpower, bool, 0444);
|
||||
|
||||
static const struct ath6kl_hw hw_list[] = {
|
||||
{
|
||||
.id = AR6003_HW_2_0_VERSION,
|
||||
@ -47,11 +41,14 @@ static const struct ath6kl_hw hw_list[] = {
|
||||
/* hw2.0 needs override address hardcoded */
|
||||
.app_start_override_addr = 0x944C00,
|
||||
|
||||
.fw_otp = AR6003_HW_2_0_OTP_FILE,
|
||||
.fw = AR6003_HW_2_0_FIRMWARE_FILE,
|
||||
.fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
|
||||
.fw_patch = AR6003_HW_2_0_PATCH_FILE,
|
||||
.fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE,
|
||||
.fw = {
|
||||
.dir = AR6003_HW_2_0_FW_DIR,
|
||||
.otp = AR6003_HW_2_0_OTP_FILE,
|
||||
.fw = AR6003_HW_2_0_FIRMWARE_FILE,
|
||||
.tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
|
||||
.patch = AR6003_HW_2_0_PATCH_FILE,
|
||||
},
|
||||
|
||||
.fw_board = AR6003_HW_2_0_BOARD_DATA_FILE,
|
||||
.fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
|
||||
},
|
||||
@ -64,12 +61,18 @@ static const struct ath6kl_hw hw_list[] = {
|
||||
.reserved_ram_size = 512,
|
||||
.refclk_hz = 26000000,
|
||||
.uarttx_pin = 8,
|
||||
.testscript_addr = 0x57ef74,
|
||||
|
||||
.fw = {
|
||||
.dir = AR6003_HW_2_1_1_FW_DIR,
|
||||
.otp = AR6003_HW_2_1_1_OTP_FILE,
|
||||
.fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
|
||||
.tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
|
||||
.patch = AR6003_HW_2_1_1_PATCH_FILE,
|
||||
.utf = AR6003_HW_2_1_1_UTF_FIRMWARE_FILE,
|
||||
.testscript = AR6003_HW_2_1_1_TESTSCRIPT_FILE,
|
||||
},
|
||||
|
||||
.fw_otp = AR6003_HW_2_1_1_OTP_FILE,
|
||||
.fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
|
||||
.fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
|
||||
.fw_patch = AR6003_HW_2_1_1_PATCH_FILE,
|
||||
.fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE,
|
||||
.fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
|
||||
.fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
|
||||
},
|
||||
@ -84,8 +87,11 @@ static const struct ath6kl_hw hw_list[] = {
|
||||
.refclk_hz = 26000000,
|
||||
.uarttx_pin = 11,
|
||||
|
||||
.fw = AR6004_HW_1_0_FIRMWARE_FILE,
|
||||
.fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE,
|
||||
.fw = {
|
||||
.dir = AR6004_HW_1_0_FW_DIR,
|
||||
.fw = AR6004_HW_1_0_FIRMWARE_FILE,
|
||||
},
|
||||
|
||||
.fw_board = AR6004_HW_1_0_BOARD_DATA_FILE,
|
||||
.fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
|
||||
},
|
||||
@ -100,8 +106,11 @@ static const struct ath6kl_hw hw_list[] = {
|
||||
.refclk_hz = 40000000,
|
||||
.uarttx_pin = 11,
|
||||
|
||||
.fw = AR6004_HW_1_1_FIRMWARE_FILE,
|
||||
.fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE,
|
||||
.fw = {
|
||||
.dir = AR6004_HW_1_1_FW_DIR,
|
||||
.fw = AR6004_HW_1_1_FIRMWARE_FILE,
|
||||
},
|
||||
|
||||
.fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
|
||||
.fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
|
||||
},
|
||||
@ -452,6 +461,13 @@ int ath6kl_configure_target(struct ath6kl *ar)
|
||||
u8 fw_iftype, fw_mode = 0, fw_submode = 0;
|
||||
int i, status;
|
||||
|
||||
param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG);
|
||||
if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
|
||||
HI_ITEM(hi_serial_enable)), (u8 *)¶m, 4)) {
|
||||
ath6kl_err("bmi_write_memory for uart debug failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: Even though the firmware interface type is
|
||||
* chosen as BSS_STA for all three interfaces, can
|
||||
@ -573,36 +589,6 @@ int ath6kl_configure_target(struct ath6kl *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath6kl_core_free(struct ath6kl *ar)
|
||||
{
|
||||
wiphy_free(ar->wiphy);
|
||||
}
|
||||
|
||||
void ath6kl_core_cleanup(struct ath6kl *ar)
|
||||
{
|
||||
ath6kl_hif_power_off(ar);
|
||||
|
||||
destroy_workqueue(ar->ath6kl_wq);
|
||||
|
||||
if (ar->htc_target)
|
||||
ath6kl_htc_cleanup(ar->htc_target);
|
||||
|
||||
ath6kl_cookie_cleanup(ar);
|
||||
|
||||
ath6kl_cleanup_amsdu_rxbufs(ar);
|
||||
|
||||
ath6kl_bmi_cleanup(ar);
|
||||
|
||||
ath6kl_debug_cleanup(ar);
|
||||
|
||||
kfree(ar->fw_board);
|
||||
kfree(ar->fw_otp);
|
||||
kfree(ar->fw);
|
||||
kfree(ar->fw_patch);
|
||||
|
||||
ath6kl_deinit_ieee80211_hw(ar);
|
||||
}
|
||||
|
||||
/* firmware upload */
|
||||
static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
|
||||
u8 **fw, size_t *fw_len)
|
||||
@ -626,21 +612,6 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const char *get_target_ver_dir(const struct ath6kl *ar)
|
||||
{
|
||||
switch (ar->version.target_ver) {
|
||||
case AR6003_HW_1_0_VERSION:
|
||||
return "ath6k/AR6003/hw1.0";
|
||||
case AR6003_HW_2_0_VERSION:
|
||||
return "ath6k/AR6003/hw2.0";
|
||||
case AR6003_HW_2_1_1_VERSION:
|
||||
return "ath6k/AR6003/hw2.1.1";
|
||||
}
|
||||
ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
|
||||
ar->version.target_ver);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the device tree for a board-id and use it to construct
|
||||
* the pathname to the firmware file. Used (for now) to find a
|
||||
@ -663,7 +634,7 @@ static bool check_device_tree(struct ath6kl *ar)
|
||||
continue;
|
||||
}
|
||||
snprintf(board_filename, sizeof(board_filename),
|
||||
"%s/bdata.%s.bin", get_target_ver_dir(ar), board_id);
|
||||
"%s/bdata.%s.bin", ar->hw.fw.dir, board_id);
|
||||
|
||||
ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board,
|
||||
&ar->fw_board_len);
|
||||
@ -730,19 +701,20 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
|
||||
|
||||
static int ath6kl_fetch_otp_file(struct ath6kl *ar)
|
||||
{
|
||||
const char *filename;
|
||||
char filename[100];
|
||||
int ret;
|
||||
|
||||
if (ar->fw_otp != NULL)
|
||||
return 0;
|
||||
|
||||
if (ar->hw.fw_otp == NULL) {
|
||||
if (ar->hw.fw.otp == NULL) {
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT,
|
||||
"no OTP file configured for this hw\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
filename = ar->hw.fw_otp;
|
||||
snprintf(filename, sizeof(filename), "%s/%s",
|
||||
ar->hw.fw.dir, ar->hw.fw.otp);
|
||||
|
||||
ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
|
||||
&ar->fw_otp_len);
|
||||
@ -755,33 +727,61 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_fetch_testmode_file(struct ath6kl *ar)
|
||||
{
|
||||
char filename[100];
|
||||
int ret;
|
||||
|
||||
if (ar->testmode == 0)
|
||||
return 0;
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "testmode %d\n", ar->testmode);
|
||||
|
||||
if (ar->testmode == 2) {
|
||||
if (ar->hw.fw.utf == NULL) {
|
||||
ath6kl_warn("testmode 2 not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s/%s",
|
||||
ar->hw.fw.dir, ar->hw.fw.utf);
|
||||
} else {
|
||||
if (ar->hw.fw.tcmd == NULL) {
|
||||
ath6kl_warn("testmode 1 not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s/%s",
|
||||
ar->hw.fw.dir, ar->hw.fw.tcmd);
|
||||
}
|
||||
|
||||
set_bit(TESTMODE, &ar->flag);
|
||||
|
||||
ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to get testmode %d firmware file %s: %d\n",
|
||||
ar->testmode, filename, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_fetch_fw_file(struct ath6kl *ar)
|
||||
{
|
||||
const char *filename;
|
||||
char filename[100];
|
||||
int ret;
|
||||
|
||||
if (ar->fw != NULL)
|
||||
return 0;
|
||||
|
||||
if (testmode) {
|
||||
if (ar->hw.fw_tcmd == NULL) {
|
||||
ath6kl_warn("testmode not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
filename = ar->hw.fw_tcmd;
|
||||
|
||||
set_bit(TESTMODE, &ar->flag);
|
||||
|
||||
goto get_fw;
|
||||
}
|
||||
|
||||
if (WARN_ON(ar->hw.fw == NULL))
|
||||
/* FIXME: remove WARN_ON() as we won't support FW API 1 for long */
|
||||
if (WARN_ON(ar->hw.fw.fw == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
filename = ar->hw.fw;
|
||||
snprintf(filename, sizeof(filename), "%s/%s",
|
||||
ar->hw.fw.dir, ar->hw.fw.fw);
|
||||
|
||||
get_fw:
|
||||
ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to get firmware file %s: %d\n",
|
||||
@ -794,16 +794,17 @@ get_fw:
|
||||
|
||||
static int ath6kl_fetch_patch_file(struct ath6kl *ar)
|
||||
{
|
||||
const char *filename;
|
||||
char filename[100];
|
||||
int ret;
|
||||
|
||||
if (ar->fw_patch != NULL)
|
||||
return 0;
|
||||
|
||||
if (ar->hw.fw_patch == NULL)
|
||||
if (ar->hw.fw.patch == NULL)
|
||||
return 0;
|
||||
|
||||
filename = ar->hw.fw_patch;
|
||||
snprintf(filename, sizeof(filename), "%s/%s",
|
||||
ar->hw.fw.dir, ar->hw.fw.patch);
|
||||
|
||||
ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
|
||||
&ar->fw_patch_len);
|
||||
@ -816,6 +817,34 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_fetch_testscript_file(struct ath6kl *ar)
|
||||
{
|
||||
char filename[100];
|
||||
int ret;
|
||||
|
||||
if (ar->testmode != 2)
|
||||
return 0;
|
||||
|
||||
if (ar->fw_testscript != NULL)
|
||||
return 0;
|
||||
|
||||
if (ar->hw.fw.testscript == NULL)
|
||||
return 0;
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s/%s",
|
||||
ar->hw.fw.dir, ar->hw.fw.testscript);
|
||||
|
||||
ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript,
|
||||
&ar->fw_testscript_len);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to get testscript file %s: %d\n",
|
||||
filename, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_fetch_fw_api1(struct ath6kl *ar)
|
||||
{
|
||||
int ret;
|
||||
@ -832,23 +861,24 @@ static int ath6kl_fetch_fw_api1(struct ath6kl *ar)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ath6kl_fetch_testscript_file(ar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
|
||||
static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
|
||||
{
|
||||
size_t magic_len, len, ie_len;
|
||||
const struct firmware *fw;
|
||||
struct ath6kl_fw_ie *hdr;
|
||||
const char *filename;
|
||||
char filename[100];
|
||||
const u8 *data;
|
||||
int ret, ie_id, i, index, bit;
|
||||
__le32 *val;
|
||||
|
||||
if (ar->hw.fw_api2 == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
filename = ar->hw.fw_api2;
|
||||
snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name);
|
||||
|
||||
ret = request_firmware(&fw, filename, ar->dev);
|
||||
if (ret)
|
||||
@ -907,6 +937,10 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n",
|
||||
ie_len);
|
||||
|
||||
/* in testmode we already might have a fw file */
|
||||
if (ar->fw != NULL)
|
||||
break;
|
||||
|
||||
ar->fw = kmemdup(data, ie_len, GFP_KERNEL);
|
||||
|
||||
if (ar->fw == NULL) {
|
||||
@ -1010,7 +1044,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath6kl_fetch_firmwares(struct ath6kl *ar)
|
||||
int ath6kl_init_fetch_firmwares(struct ath6kl *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1018,17 +1052,30 @@ static int ath6kl_fetch_firmwares(struct ath6kl *ar)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ath6kl_fetch_fw_api2(ar);
|
||||
ret = ath6kl_fetch_testmode_file(ar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE);
|
||||
if (ret == 0) {
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api 2\n");
|
||||
return 0;
|
||||
ar->fw_api = 3;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API2_FILE);
|
||||
if (ret == 0) {
|
||||
ar->fw_api = 2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ath6kl_fetch_fw_api1(ar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api 1\n");
|
||||
ar->fw_api = 1;
|
||||
|
||||
out:
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api %d\n", ar->fw_api);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1249,6 +1296,50 @@ static int ath6kl_upload_patch(struct ath6kl *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_upload_testscript(struct ath6kl *ar)
|
||||
{
|
||||
u32 address, param;
|
||||
int ret;
|
||||
|
||||
if (ar->testmode != 2)
|
||||
return 0;
|
||||
|
||||
if (ar->fw_testscript == NULL)
|
||||
return 0;
|
||||
|
||||
address = ar->hw.testscript_addr;
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_BOOT, "writing testscript to 0x%x (%zd B)\n",
|
||||
address, ar->fw_testscript_len);
|
||||
|
||||
ret = ath6kl_bmi_write(ar, address, ar->fw_testscript,
|
||||
ar->fw_testscript_len);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to write testscript file: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
param = address;
|
||||
ath6kl_bmi_write(ar,
|
||||
ath6kl_get_hi_item_addr(ar,
|
||||
HI_ITEM(hi_ota_testscript)),
|
||||
(unsigned char *) ¶m, 4);
|
||||
|
||||
param = 4096;
|
||||
ath6kl_bmi_write(ar,
|
||||
ath6kl_get_hi_item_addr(ar,
|
||||
HI_ITEM(hi_end_ram_reserve_sz)),
|
||||
(unsigned char *) ¶m, 4);
|
||||
|
||||
param = 1;
|
||||
ath6kl_bmi_write(ar,
|
||||
ath6kl_get_hi_item_addr(ar,
|
||||
HI_ITEM(hi_test_apps_related)),
|
||||
(unsigned char *) ¶m, 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_init_upload(struct ath6kl *ar)
|
||||
{
|
||||
u32 param, options, sleep, address;
|
||||
@ -1357,6 +1448,11 @@ static int ath6kl_init_upload(struct ath6kl *ar)
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* Download the test script */
|
||||
status = ath6kl_upload_testscript(ar);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* Restore system sleep */
|
||||
address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
|
||||
status = ath6kl_bmi_reg_write(ar, address, sleep);
|
||||
@ -1372,9 +1468,9 @@ static int ath6kl_init_upload(struct ath6kl *ar)
|
||||
return status;
|
||||
}
|
||||
|
||||
static int ath6kl_init_hw_params(struct ath6kl *ar)
|
||||
int ath6kl_init_hw_params(struct ath6kl *ar)
|
||||
{
|
||||
const struct ath6kl_hw *hw;
|
||||
const struct ath6kl_hw *uninitialized_var(hw);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
|
||||
@ -1481,10 +1577,11 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
|
||||
|
||||
|
||||
if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
|
||||
ath6kl_info("%s %s fw %s%s\n",
|
||||
ath6kl_info("%s %s fw %s api %d%s\n",
|
||||
ar->hw.name,
|
||||
ath6kl_init_get_hif_name(ar->hif_type),
|
||||
ar->wiphy->fw_version,
|
||||
ar->fw_api,
|
||||
test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
|
||||
}
|
||||
|
||||
@ -1549,173 +1646,7 @@ int ath6kl_init_hw_stop(struct ath6kl *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath6kl_core_init(struct ath6kl *ar)
|
||||
{
|
||||
struct ath6kl_bmi_target_info targ_info;
|
||||
struct net_device *ndev;
|
||||
int ret = 0, i;
|
||||
|
||||
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
|
||||
if (!ar->ath6kl_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ath6kl_bmi_init(ar);
|
||||
if (ret)
|
||||
goto err_wq;
|
||||
|
||||
/*
|
||||
* Turn on power to get hardware (target) version and leave power
|
||||
* on delibrately as we will boot the hardware anyway within few
|
||||
* seconds.
|
||||
*/
|
||||
ret = ath6kl_hif_power_on(ar);
|
||||
if (ret)
|
||||
goto err_bmi_cleanup;
|
||||
|
||||
ret = ath6kl_bmi_get_target_info(ar, &targ_info);
|
||||
if (ret)
|
||||
goto err_power_off;
|
||||
|
||||
ar->version.target_ver = le32_to_cpu(targ_info.version);
|
||||
ar->target_type = le32_to_cpu(targ_info.type);
|
||||
ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
|
||||
|
||||
ret = ath6kl_init_hw_params(ar);
|
||||
if (ret)
|
||||
goto err_power_off;
|
||||
|
||||
ar->htc_target = ath6kl_htc_create(ar);
|
||||
|
||||
if (!ar->htc_target) {
|
||||
ret = -ENOMEM;
|
||||
goto err_power_off;
|
||||
}
|
||||
|
||||
ret = ath6kl_fetch_firmwares(ar);
|
||||
if (ret)
|
||||
goto err_htc_cleanup;
|
||||
|
||||
/* FIXME: we should free all firmwares in the error cases below */
|
||||
|
||||
/* Indicate that WMI is enabled (although not ready yet) */
|
||||
set_bit(WMI_ENABLED, &ar->flag);
|
||||
ar->wmi = ath6kl_wmi_init(ar);
|
||||
if (!ar->wmi) {
|
||||
ath6kl_err("failed to initialize wmi\n");
|
||||
ret = -EIO;
|
||||
goto err_htc_cleanup;
|
||||
}
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
|
||||
|
||||
ret = ath6kl_register_ieee80211_hw(ar);
|
||||
if (ret)
|
||||
goto err_node_cleanup;
|
||||
|
||||
ret = ath6kl_debug_init(ar);
|
||||
if (ret) {
|
||||
wiphy_unregister(ar->wiphy);
|
||||
goto err_node_cleanup;
|
||||
}
|
||||
|
||||
for (i = 0; i < ar->vif_max; i++)
|
||||
ar->avail_idx_map |= BIT(i);
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
/* Add an initial station interface */
|
||||
ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
|
||||
INFRA_NETWORK);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (!ndev) {
|
||||
ath6kl_err("Failed to instantiate a network device\n");
|
||||
ret = -ENOMEM;
|
||||
wiphy_unregister(ar->wiphy);
|
||||
goto err_debug_init;
|
||||
}
|
||||
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
|
||||
__func__, ndev->name, ndev, ar);
|
||||
|
||||
/* setup access class priority mappings */
|
||||
ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
|
||||
ar->ac_stream_pri_map[WMM_AC_BE] = 1;
|
||||
ar->ac_stream_pri_map[WMM_AC_VI] = 2;
|
||||
ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
|
||||
|
||||
/* give our connected endpoints some buffers */
|
||||
ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
|
||||
ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
|
||||
|
||||
/* allocate some buffers that handle larger AMSDU frames */
|
||||
ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
|
||||
|
||||
ath6kl_cookie_init(ar);
|
||||
|
||||
ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
|
||||
ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
|
||||
|
||||
if (suspend_cutpower)
|
||||
ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
|
||||
|
||||
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
|
||||
WIPHY_FLAG_HAVE_AP_SME |
|
||||
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
|
||||
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
|
||||
|
||||
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
|
||||
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
|
||||
|
||||
ar->wiphy->probe_resp_offload =
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
|
||||
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
|
||||
|
||||
set_bit(FIRST_BOOT, &ar->flag);
|
||||
|
||||
ret = ath6kl_init_hw_start(ar);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to start hardware: %d\n", ret);
|
||||
goto err_rxbuf_cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set mac address which is received in ready event
|
||||
* FIXME: Move to ath6kl_interface_add()
|
||||
*/
|
||||
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
|
||||
|
||||
return ret;
|
||||
|
||||
err_rxbuf_cleanup:
|
||||
ath6kl_htc_flush_rx_buf(ar->htc_target);
|
||||
ath6kl_cleanup_amsdu_rxbufs(ar);
|
||||
rtnl_lock();
|
||||
ath6kl_deinit_if_data(netdev_priv(ndev));
|
||||
rtnl_unlock();
|
||||
wiphy_unregister(ar->wiphy);
|
||||
err_debug_init:
|
||||
ath6kl_debug_cleanup(ar);
|
||||
err_node_cleanup:
|
||||
ath6kl_wmi_shutdown(ar->wmi);
|
||||
clear_bit(WMI_ENABLED, &ar->flag);
|
||||
ar->wmi = NULL;
|
||||
err_htc_cleanup:
|
||||
ath6kl_htc_cleanup(ar->htc_target);
|
||||
err_power_off:
|
||||
ath6kl_hif_power_off(ar);
|
||||
err_bmi_cleanup:
|
||||
ath6kl_bmi_cleanup(ar);
|
||||
err_wq:
|
||||
destroy_workqueue(ar->ath6kl_wq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
|
||||
void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
|
||||
{
|
||||
static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
@ -1747,6 +1678,7 @@ void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
|
||||
void ath6kl_stop_txrx(struct ath6kl *ar)
|
||||
{
|
||||
struct ath6kl_vif *vif, *tmp_vif;
|
||||
int i;
|
||||
|
||||
set_bit(DESTROY_IN_PROGRESS, &ar->flag);
|
||||
|
||||
@ -1755,13 +1687,16 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < AP_MAX_NUM_STA; i++)
|
||||
aggr_reset_state(ar->sta_list[i].aggr_conn);
|
||||
|
||||
spin_lock_bh(&ar->list_lock);
|
||||
list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
|
||||
list_del(&vif->list);
|
||||
spin_unlock_bh(&ar->list_lock);
|
||||
ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
|
||||
rtnl_lock();
|
||||
ath6kl_deinit_if_data(vif);
|
||||
ath6kl_cfg80211_vif_cleanup(vif);
|
||||
rtnl_unlock();
|
||||
spin_lock_bh(&ar->list_lock);
|
||||
}
|
||||
@ -1796,3 +1731,4 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
|
||||
|
||||
clear_bit(WLAN_ENABLED, &ar->flag);
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_stop_txrx);
|
||||
|
@ -52,9 +52,11 @@ struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
|
||||
return conn;
|
||||
}
|
||||
|
||||
static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
|
||||
u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
|
||||
static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid,
|
||||
u8 *wpaie, size_t ielen, u8 keymgmt,
|
||||
u8 ucipher, u8 auth, u8 apsd_info)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
struct ath6kl_sta *sta;
|
||||
u8 free_slot;
|
||||
|
||||
@ -68,9 +70,11 @@ static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
|
||||
sta->keymgmt = keymgmt;
|
||||
sta->ucipher = ucipher;
|
||||
sta->auth = auth;
|
||||
sta->apsd_info = apsd_info;
|
||||
|
||||
ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
|
||||
ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
|
||||
aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn);
|
||||
}
|
||||
|
||||
static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
|
||||
@ -80,6 +84,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
|
||||
/* empty the queued pkts in the PS queue if any */
|
||||
spin_lock_bh(&sta->psq_lock);
|
||||
skb_queue_purge(&sta->psq);
|
||||
skb_queue_purge(&sta->apsdq);
|
||||
spin_unlock_bh(&sta->psq_lock);
|
||||
|
||||
memset(&ar->ap_stats.sta[sta->aid - 1], 0,
|
||||
@ -90,7 +95,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
|
||||
sta->sta_flags = 0;
|
||||
|
||||
ar->sta_list_index = ar->sta_list_index & ~(1 << i);
|
||||
|
||||
aggr_reset_state(sta->aggr_conn);
|
||||
}
|
||||
|
||||
static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
|
||||
@ -252,7 +257,7 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
|
||||
struct ath6kl_dbglog_hdr debug_hdr;
|
||||
struct ath6kl_dbglog_buf debug_buf;
|
||||
u32 address, length, dropped, firstbuf, debug_hdr_addr;
|
||||
int ret = 0, loop;
|
||||
int ret, loop;
|
||||
u8 *buf;
|
||||
|
||||
buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL);
|
||||
@ -347,9 +352,6 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
|
||||
case TARGET_TYPE_AR6004:
|
||||
address = AR6004_RESET_CONTROL_ADDRESS;
|
||||
break;
|
||||
default:
|
||||
address = AR6003_RESET_CONTROL_ADDRESS;
|
||||
break;
|
||||
}
|
||||
|
||||
status = ath6kl_diag_write32(ar, address, data);
|
||||
@ -363,7 +365,7 @@ static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
|
||||
u8 index;
|
||||
u8 keyusage;
|
||||
|
||||
for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
|
||||
for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) {
|
||||
if (vif->wep_key_list[index].key_len) {
|
||||
keyusage = GROUP_USAGE;
|
||||
if (index == vif->def_txkey_index)
|
||||
@ -428,9 +430,8 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
|
||||
|
||||
void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
|
||||
u8 keymgmt, u8 ucipher, u8 auth,
|
||||
u8 assoc_req_len, u8 *assoc_info)
|
||||
u8 assoc_req_len, u8 *assoc_info, u8 apsd_info)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
u8 *ies = NULL, *wpa_ie = NULL, *pos;
|
||||
size_t ies_len = 0;
|
||||
struct station_info sinfo;
|
||||
@ -484,9 +485,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
|
||||
pos += 2 + pos[1];
|
||||
}
|
||||
|
||||
ath6kl_add_new_sta(ar, mac_addr, aid, wpa_ie,
|
||||
ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie,
|
||||
wpa_ie ? 2 + wpa_ie[1] : 0,
|
||||
keymgmt, ucipher, auth);
|
||||
keymgmt, ucipher, auth, apsd_info);
|
||||
|
||||
/* send event to application */
|
||||
memset(&sinfo, 0, sizeof(sinfo));
|
||||
@ -587,10 +588,11 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
|
||||
memcpy(vif->bssid, bssid, sizeof(vif->bssid));
|
||||
vif->bss_ch = channel;
|
||||
|
||||
if ((vif->nw_type == INFRA_NETWORK))
|
||||
if ((vif->nw_type == INFRA_NETWORK)) {
|
||||
ar->listen_intvl_b = listen_int;
|
||||
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
|
||||
ar->listen_intvl_t,
|
||||
ar->listen_intvl_b);
|
||||
0, ar->listen_intvl_b);
|
||||
}
|
||||
|
||||
netif_wake_queue(vif->ndev);
|
||||
|
||||
@ -601,7 +603,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
|
||||
netif_carrier_on(vif->ndev);
|
||||
spin_unlock_bh(&vif->if_lock);
|
||||
|
||||
aggr_reset_state(vif->aggr_cntxt);
|
||||
aggr_reset_state(vif->aggr_cntxt->aggr_conn);
|
||||
vif->reconnect_flag = 0;
|
||||
|
||||
if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
|
||||
@ -923,7 +925,7 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
|
||||
assoc_resp_len, assoc_info,
|
||||
prot_reason_status);
|
||||
|
||||
aggr_reset_state(vif->aggr_cntxt);
|
||||
aggr_reset_state(vif->aggr_cntxt->aggr_conn);
|
||||
|
||||
del_timer(&vif->disconnect_timer);
|
||||
|
||||
@ -1020,11 +1022,155 @@ static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
|
||||
return &vif->net_stats;
|
||||
}
|
||||
|
||||
static struct net_device_ops ath6kl_netdev_ops = {
|
||||
static int ath6kl_set_features(struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
struct ath6kl_vif *vif = netdev_priv(dev);
|
||||
struct ath6kl *ar = vif->ar;
|
||||
int err = 0;
|
||||
|
||||
if ((features & NETIF_F_RXCSUM) &&
|
||||
(ar->rx_meta_ver != WMI_META_VERSION_2)) {
|
||||
ar->rx_meta_ver = WMI_META_VERSION_2;
|
||||
err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
ar->rx_meta_ver, 0, 0);
|
||||
if (err) {
|
||||
dev->features = features & ~NETIF_F_RXCSUM;
|
||||
return err;
|
||||
}
|
||||
} else if (!(features & NETIF_F_RXCSUM) &&
|
||||
(ar->rx_meta_ver == WMI_META_VERSION_2)) {
|
||||
ar->rx_meta_ver = 0;
|
||||
err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
ar->rx_meta_ver, 0, 0);
|
||||
if (err) {
|
||||
dev->features = features | NETIF_F_RXCSUM;
|
||||
return err;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ath6kl_set_multicast_list(struct net_device *ndev)
|
||||
{
|
||||
struct ath6kl_vif *vif = netdev_priv(ndev);
|
||||
bool mc_all_on = false, mc_all_off = false;
|
||||
int mc_count = netdev_mc_count(ndev);
|
||||
struct netdev_hw_addr *ha;
|
||||
bool found;
|
||||
struct ath6kl_mc_filter *mc_filter, *tmp;
|
||||
struct list_head mc_filter_new;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(WMI_READY, &vif->ar->flag) ||
|
||||
!test_bit(WLAN_ENABLED, &vif->flags))
|
||||
return;
|
||||
|
||||
mc_all_on = !!(ndev->flags & IFF_PROMISC) ||
|
||||
!!(ndev->flags & IFF_ALLMULTI) ||
|
||||
!!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST);
|
||||
|
||||
mc_all_off = !(ndev->flags & IFF_MULTICAST) || mc_count == 0;
|
||||
|
||||
if (mc_all_on || mc_all_off) {
|
||||
/* Enable/disable all multicast */
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n",
|
||||
mc_all_on ? "enabling" : "disabling");
|
||||
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
|
||||
mc_all_on);
|
||||
if (ret)
|
||||
ath6kl_warn("Failed to %s multicast receive\n",
|
||||
mc_all_on ? "enable" : "disable");
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
|
||||
found = false;
|
||||
netdev_for_each_mc_addr(ha, ndev) {
|
||||
if (memcmp(ha->addr, mc_filter->hw_addr,
|
||||
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
/*
|
||||
* Delete the filter which was previously set
|
||||
* but not in the new request.
|
||||
*/
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC,
|
||||
"Removing %pM from multicast filter\n",
|
||||
mc_filter->hw_addr);
|
||||
ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
|
||||
vif->fw_vif_idx, mc_filter->hw_addr,
|
||||
false);
|
||||
if (ret) {
|
||||
ath6kl_warn("Failed to remove multicast filter:%pM\n",
|
||||
mc_filter->hw_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&mc_filter->list);
|
||||
kfree(mc_filter);
|
||||
}
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&mc_filter_new);
|
||||
|
||||
netdev_for_each_mc_addr(ha, ndev) {
|
||||
found = false;
|
||||
list_for_each_entry(mc_filter, &vif->mc_filter, list) {
|
||||
if (memcmp(ha->addr, mc_filter->hw_addr,
|
||||
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter),
|
||||
GFP_ATOMIC);
|
||||
if (!mc_filter) {
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(mc_filter->hw_addr, ha->addr,
|
||||
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
|
||||
/* Set the multicast filter */
|
||||
ath6kl_dbg(ATH6KL_DBG_TRC,
|
||||
"Adding %pM to multicast filter list\n",
|
||||
mc_filter->hw_addr);
|
||||
ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
|
||||
vif->fw_vif_idx, mc_filter->hw_addr,
|
||||
true);
|
||||
if (ret) {
|
||||
ath6kl_warn("Failed to add multicast filter :%pM\n",
|
||||
mc_filter->hw_addr);
|
||||
kfree(mc_filter);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add_tail(&mc_filter->list, &mc_filter_new);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
list_splice_tail(&mc_filter_new, &vif->mc_filter);
|
||||
}
|
||||
|
||||
static const struct net_device_ops ath6kl_netdev_ops = {
|
||||
.ndo_open = ath6kl_open,
|
||||
.ndo_stop = ath6kl_close,
|
||||
.ndo_start_xmit = ath6kl_data_tx,
|
||||
.ndo_get_stats = ath6kl_get_stats,
|
||||
.ndo_set_features = ath6kl_set_features,
|
||||
.ndo_set_rx_mode = ath6kl_set_multicast_list,
|
||||
};
|
||||
|
||||
void init_netdev(struct net_device *dev)
|
||||
|
@ -49,11 +49,13 @@ struct ath6kl_sdio {
|
||||
/* scatter request list head */
|
||||
struct list_head scat_req;
|
||||
|
||||
/* Avoids disabling irq while the interrupts being handled */
|
||||
struct mutex mtx_irq;
|
||||
|
||||
spinlock_t scat_lock;
|
||||
bool scatter_enabled;
|
||||
|
||||
bool is_disabled;
|
||||
atomic_t irq_handling;
|
||||
const struct sdio_device_id *id;
|
||||
struct work_struct wr_async_work;
|
||||
struct list_head wr_asyncq;
|
||||
@ -460,8 +462,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
|
||||
ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
|
||||
|
||||
ar_sdio = sdio_get_drvdata(func);
|
||||
atomic_set(&ar_sdio->irq_handling, 1);
|
||||
|
||||
mutex_lock(&ar_sdio->mtx_irq);
|
||||
/*
|
||||
* Release the host during interrups so we can pick it back up when
|
||||
* we process commands.
|
||||
@ -470,7 +471,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
|
||||
|
||||
status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
|
||||
sdio_claim_host(ar_sdio->func);
|
||||
atomic_set(&ar_sdio->irq_handling, 0);
|
||||
mutex_unlock(&ar_sdio->mtx_irq);
|
||||
WARN_ON(status && status != -ECANCELED);
|
||||
}
|
||||
|
||||
@ -578,17 +579,14 @@ static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
|
||||
|
||||
sdio_claim_host(ar_sdio->func);
|
||||
|
||||
/* Mask our function IRQ */
|
||||
while (atomic_read(&ar_sdio->irq_handling)) {
|
||||
sdio_release_host(ar_sdio->func);
|
||||
schedule_timeout(HZ / 10);
|
||||
sdio_claim_host(ar_sdio->func);
|
||||
}
|
||||
mutex_lock(&ar_sdio->mtx_irq);
|
||||
|
||||
ret = sdio_release_irq(ar_sdio->func);
|
||||
if (ret)
|
||||
ath6kl_err("Failed to release sdio irq: %d\n", ret);
|
||||
|
||||
mutex_unlock(&ar_sdio->mtx_irq);
|
||||
|
||||
sdio_release_host(ar_sdio->func);
|
||||
}
|
||||
|
||||
@ -772,7 +770,6 @@ static int ath6kl_sdio_config(struct ath6kl *ar)
|
||||
if (ret) {
|
||||
ath6kl_err("Set sdio block size %d failed: %d)\n",
|
||||
HIF_MBOX_BLOCK_SIZE, ret);
|
||||
sdio_release_host(func);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -782,6 +779,35 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
|
||||
{
|
||||
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
|
||||
struct sdio_func *func = ar_sdio->func;
|
||||
mmc_pm_flag_t flags;
|
||||
int ret;
|
||||
|
||||
flags = sdio_get_host_pm_caps(func);
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
|
||||
|
||||
if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
|
||||
!(flags & MMC_PM_KEEP_POWER))
|
||||
return -EINVAL;
|
||||
|
||||
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
|
||||
if (ret) {
|
||||
ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* sdio irq wakes up host */
|
||||
ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
|
||||
if (ret)
|
||||
ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
|
||||
{
|
||||
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
|
||||
@ -789,64 +815,70 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
|
||||
mmc_pm_flag_t flags;
|
||||
int ret;
|
||||
|
||||
flags = sdio_get_host_pm_caps(func);
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
|
||||
|
||||
if (!(flags & MMC_PM_KEEP_POWER) ||
|
||||
(ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
|
||||
/* as host doesn't support keep power we need to cut power */
|
||||
return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
|
||||
NULL);
|
||||
}
|
||||
|
||||
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
|
||||
goto deepsleep;
|
||||
|
||||
/* sdio irq wakes up host */
|
||||
|
||||
if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
|
||||
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
|
||||
|
||||
ret = ath6kl_set_sdio_pm_caps(ar);
|
||||
if (ret)
|
||||
goto cut_pwr;
|
||||
|
||||
ret = ath6kl_cfg80211_suspend(ar,
|
||||
ATH6KL_CFG_SUSPEND_SCHED_SCAN,
|
||||
NULL);
|
||||
if (ret) {
|
||||
ath6kl_warn("Schedule scan suspend failed: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
|
||||
if (ret)
|
||||
ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
|
||||
goto cut_pwr;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (wow) {
|
||||
/*
|
||||
* The host sdio controller is capable of keep power and
|
||||
* sdio irq wake up at this point. It's fine to continue
|
||||
* wow suspend operation.
|
||||
*/
|
||||
if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
|
||||
(!ar->suspend_mode && wow)) {
|
||||
|
||||
ret = ath6kl_set_sdio_pm_caps(ar);
|
||||
if (ret)
|
||||
goto cut_pwr;
|
||||
|
||||
ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto cut_pwr;
|
||||
|
||||
ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
|
||||
if (ret)
|
||||
ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
deepsleep:
|
||||
return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
|
||||
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
|
||||
!ar->suspend_mode) {
|
||||
|
||||
flags = sdio_get_host_pm_caps(func);
|
||||
if (!(flags & MMC_PM_KEEP_POWER))
|
||||
goto cut_pwr;
|
||||
|
||||
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
|
||||
if (ret)
|
||||
goto cut_pwr;
|
||||
|
||||
/*
|
||||
* Workaround to support Deep Sleep with MSM, set the host pm
|
||||
* flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
|
||||
* the sdc2_clock and internally allows MSM to enter
|
||||
* TCXO shutdown properly.
|
||||
*/
|
||||
if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
|
||||
ret = sdio_set_host_pm_flags(func,
|
||||
MMC_PM_WAKE_SDIO_IRQ);
|
||||
if (ret)
|
||||
goto cut_pwr;
|
||||
}
|
||||
|
||||
ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto cut_pwr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
cut_pwr:
|
||||
return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
|
||||
}
|
||||
|
||||
static int ath6kl_sdio_resume(struct ath6kl *ar)
|
||||
@ -1253,6 +1285,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
|
||||
spin_lock_init(&ar_sdio->scat_lock);
|
||||
spin_lock_init(&ar_sdio->wr_async_lock);
|
||||
mutex_init(&ar_sdio->dma_buffer_mutex);
|
||||
mutex_init(&ar_sdio->mtx_irq);
|
||||
|
||||
INIT_LIST_HEAD(&ar_sdio->scat_req);
|
||||
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
|
||||
@ -1263,7 +1296,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
|
||||
for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
|
||||
ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
|
||||
|
||||
ar = ath6kl_core_alloc(&ar_sdio->func->dev);
|
||||
ar = ath6kl_core_create(&ar_sdio->func->dev);
|
||||
if (!ar) {
|
||||
ath6kl_err("Failed to alloc ath6kl core\n");
|
||||
ret = -ENOMEM;
|
||||
@ -1293,7 +1326,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
|
||||
return ret;
|
||||
|
||||
err_core_alloc:
|
||||
ath6kl_core_free(ar_sdio->ar);
|
||||
ath6kl_core_destroy(ar_sdio->ar);
|
||||
err_dma:
|
||||
kfree(ar_sdio->dma_buffer);
|
||||
err_hif:
|
||||
@ -1316,6 +1349,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
|
||||
cancel_work_sync(&ar_sdio->wr_async_work);
|
||||
|
||||
ath6kl_core_cleanup(ar_sdio->ar);
|
||||
ath6kl_core_destroy(ar_sdio->ar);
|
||||
|
||||
kfree(ar_sdio->dma_buffer);
|
||||
kfree(ar_sdio);
|
||||
@ -1332,7 +1366,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
|
||||
MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
|
||||
|
||||
static struct sdio_driver ath6kl_sdio_driver = {
|
||||
.name = "ath6kl",
|
||||
.name = "ath6kl_sdio",
|
||||
.id_table = ath6kl_sdio_devices,
|
||||
.probe = ath6kl_sdio_probe,
|
||||
.remove = ath6kl_sdio_remove,
|
||||
@ -1362,19 +1396,19 @@ MODULE_AUTHOR("Atheros Communications, Inc.");
|
||||
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
#include "testmode.h"
|
||||
#include "debug.h"
|
||||
|
||||
#include <net/netlink.h>
|
||||
|
||||
@ -30,7 +31,7 @@ enum ath6kl_tm_attr {
|
||||
|
||||
enum ath6kl_tm_cmd {
|
||||
ATH6KL_TM_CMD_TCMD = 0,
|
||||
ATH6KL_TM_CMD_RX_REPORT = 1,
|
||||
ATH6KL_TM_CMD_RX_REPORT = 1, /* not used anymore */
|
||||
};
|
||||
|
||||
#define ATH6KL_TM_DATA_MAX_LEN 5000
|
||||
@ -41,84 +42,33 @@ static const struct nla_policy ath6kl_tm_policy[ATH6KL_TM_ATTR_MAX + 1] = {
|
||||
.len = ATH6KL_TM_DATA_MAX_LEN },
|
||||
};
|
||||
|
||||
void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf, size_t buf_len)
|
||||
void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
|
||||
{
|
||||
if (down_interruptible(&ar->sem))
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!buf || buf_len == 0)
|
||||
return;
|
||||
|
||||
kfree(ar->tm.rx_report);
|
||||
|
||||
ar->tm.rx_report = kmemdup(buf, buf_len, GFP_KERNEL);
|
||||
ar->tm.rx_report_len = buf_len;
|
||||
|
||||
up(&ar->sem);
|
||||
|
||||
wake_up(&ar->event_wq);
|
||||
}
|
||||
|
||||
static int ath6kl_tm_rx_report(struct ath6kl *ar, void *buf, size_t buf_len,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int ret = 0;
|
||||
long left;
|
||||
|
||||
if (down_interruptible(&ar->sem))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (!test_bit(WMI_READY, &ar->flag)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
ath6kl_warn("failed to allocate testmode rx skb!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len) < 0) {
|
||||
up(&ar->sem);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
left = wait_event_interruptible_timeout(ar->event_wq,
|
||||
ar->tm.rx_report != NULL,
|
||||
WMI_TIMEOUT);
|
||||
|
||||
if (left == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
} else if (left < 0) {
|
||||
ret = left;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ar->tm.rx_report == NULL || ar->tm.rx_report_len == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, ar->tm.rx_report_len,
|
||||
ar->tm.rx_report);
|
||||
|
||||
kfree(ar->tm.rx_report);
|
||||
ar->tm.rx_report = NULL;
|
||||
|
||||
out:
|
||||
up(&ar->sem);
|
||||
|
||||
return ret;
|
||||
NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD);
|
||||
NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf);
|
||||
cfg80211_testmode_event(skb, GFP_KERNEL);
|
||||
return;
|
||||
|
||||
nla_put_failure:
|
||||
ret = -ENOBUFS;
|
||||
goto out;
|
||||
kfree_skb(skb);
|
||||
ath6kl_warn("nla_put failed on testmode rx skb!\n");
|
||||
}
|
||||
|
||||
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
|
||||
{
|
||||
struct ath6kl *ar = wiphy_priv(wiphy);
|
||||
struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
|
||||
int err, buf_len, reply_len;
|
||||
struct sk_buff *skb;
|
||||
int err, buf_len;
|
||||
void *buf;
|
||||
|
||||
err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len,
|
||||
@ -143,24 +93,6 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
|
||||
|
||||
break;
|
||||
case ATH6KL_TM_CMD_RX_REPORT:
|
||||
if (!tb[ATH6KL_TM_ATTR_DATA])
|
||||
return -EINVAL;
|
||||
|
||||
buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]);
|
||||
buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]);
|
||||
|
||||
reply_len = nla_total_size(ATH6KL_TM_DATA_MAX_LEN);
|
||||
skb = cfg80211_testmode_alloc_reply_skb(wiphy, reply_len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
err = ath6kl_tm_rx_report(ar, buf, buf_len, skb);
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
return cfg80211_testmode_reply(skb);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -18,13 +18,13 @@
|
||||
|
||||
#ifdef CONFIG_NL80211_TESTMODE
|
||||
|
||||
void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf, size_t buf_len);
|
||||
void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
|
||||
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
|
||||
|
||||
#else
|
||||
|
||||
static inline void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf,
|
||||
size_t buf_len)
|
||||
static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
|
||||
size_t buf_len)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,23 @@
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
|
||||
/*
|
||||
* tid - tid_mux0..tid_mux3
|
||||
* aid - tid_mux4..tid_mux7
|
||||
*/
|
||||
#define ATH6KL_TID_MASK 0xf
|
||||
#define ATH6KL_AID_SHIFT 4
|
||||
|
||||
static inline u8 ath6kl_get_tid(u8 tid_mux)
|
||||
{
|
||||
return tid_mux & ATH6KL_TID_MASK;
|
||||
}
|
||||
|
||||
static inline u8 ath6kl_get_aid(u8 tid_mux)
|
||||
{
|
||||
return tid_mux >> ATH6KL_AID_SHIFT;
|
||||
}
|
||||
|
||||
static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 *map_no)
|
||||
{
|
||||
@ -77,12 +94,118 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
|
||||
return ar->node_map[ep_map].ep_id;
|
||||
}
|
||||
|
||||
static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
|
||||
struct ath6kl_vif *vif,
|
||||
struct sk_buff *skb,
|
||||
u32 *flags)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
bool is_apsdq_empty = false;
|
||||
struct ethhdr *datap = (struct ethhdr *) skb->data;
|
||||
u8 up = 0, traffic_class, *ip_hdr;
|
||||
u16 ether_type;
|
||||
struct ath6kl_llc_snap_hdr *llc_hdr;
|
||||
|
||||
if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
|
||||
/*
|
||||
* This tx is because of a uAPSD trigger, determine
|
||||
* more and EOSP bit. Set EOSP if queue is empty
|
||||
* or sufficient frames are delivered for this trigger.
|
||||
*/
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
if (!skb_queue_empty(&conn->apsdq))
|
||||
*flags |= WMI_DATA_HDR_FLAGS_MORE;
|
||||
else if (conn->sta_flags & STA_PS_APSD_EOSP)
|
||||
*flags |= WMI_DATA_HDR_FLAGS_EOSP;
|
||||
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
return false;
|
||||
} else if (!conn->apsd_info)
|
||||
return false;
|
||||
|
||||
if (test_bit(WMM_ENABLED, &vif->flags)) {
|
||||
ether_type = be16_to_cpu(datap->h_proto);
|
||||
if (is_ethertype(ether_type)) {
|
||||
/* packet is in DIX format */
|
||||
ip_hdr = (u8 *)(datap + 1);
|
||||
} else {
|
||||
/* packet is in 802.3 format */
|
||||
llc_hdr = (struct ath6kl_llc_snap_hdr *)
|
||||
(datap + 1);
|
||||
ether_type = be16_to_cpu(llc_hdr->eth_type);
|
||||
ip_hdr = (u8 *)(llc_hdr + 1);
|
||||
}
|
||||
|
||||
if (ether_type == IP_ETHERTYPE)
|
||||
up = ath6kl_wmi_determine_user_priority(
|
||||
ip_hdr, 0);
|
||||
}
|
||||
|
||||
traffic_class = ath6kl_wmi_get_traffic_class(up);
|
||||
|
||||
if ((conn->apsd_info & (1 << traffic_class)) == 0)
|
||||
return false;
|
||||
|
||||
/* Queue the frames if the STA is sleeping */
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
|
||||
skb_queue_tail(&conn->apsdq, skb);
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
|
||||
/*
|
||||
* If this is the first pkt getting queued
|
||||
* for this STA, update the PVB for this STA
|
||||
*/
|
||||
if (is_apsdq_empty) {
|
||||
ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
conn->aid, 1, 0);
|
||||
}
|
||||
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ath6kl_process_psq(struct ath6kl_sta *conn,
|
||||
struct ath6kl_vif *vif,
|
||||
struct sk_buff *skb,
|
||||
u32 *flags)
|
||||
{
|
||||
bool is_psq_empty = false;
|
||||
struct ath6kl *ar = vif->ar;
|
||||
|
||||
if (conn->sta_flags & STA_PS_POLLED) {
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
if (!skb_queue_empty(&conn->psq))
|
||||
*flags |= WMI_DATA_HDR_FLAGS_MORE;
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Queue the frames if the STA is sleeping */
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
is_psq_empty = skb_queue_empty(&conn->psq);
|
||||
skb_queue_tail(&conn->psq, skb);
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
|
||||
/*
|
||||
* If this is the first pkt getting queued
|
||||
* for this STA, update the PVB for this
|
||||
* STA.
|
||||
*/
|
||||
if (is_psq_empty)
|
||||
ath6kl_wmi_set_pvb_cmd(ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
conn->aid, 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
|
||||
bool *more_data)
|
||||
u32 *flags)
|
||||
{
|
||||
struct ethhdr *datap = (struct ethhdr *) skb->data;
|
||||
struct ath6kl_sta *conn = NULL;
|
||||
bool ps_queued = false, is_psq_empty = false;
|
||||
bool ps_queued = false;
|
||||
struct ath6kl *ar = vif->ar;
|
||||
|
||||
if (is_multicast_ether_addr(datap->h_dest)) {
|
||||
@ -128,7 +251,7 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
|
||||
*/
|
||||
spin_lock_bh(&ar->mcastpsq_lock);
|
||||
if (!skb_queue_empty(&ar->mcastpsq))
|
||||
*more_data = true;
|
||||
*flags |= WMI_DATA_HDR_FLAGS_MORE;
|
||||
spin_unlock_bh(&ar->mcastpsq_lock);
|
||||
}
|
||||
}
|
||||
@ -142,37 +265,13 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (conn->sta_flags & STA_PS_SLEEP) {
|
||||
if (!(conn->sta_flags & STA_PS_POLLED)) {
|
||||
/* Queue the frames if the STA is sleeping */
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
is_psq_empty = skb_queue_empty(&conn->psq);
|
||||
skb_queue_tail(&conn->psq, skb);
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
|
||||
/*
|
||||
* If this is the first pkt getting queued
|
||||
* for this STA, update the PVB for this
|
||||
* STA.
|
||||
*/
|
||||
if (is_psq_empty)
|
||||
ath6kl_wmi_set_pvb_cmd(ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
conn->aid, 1);
|
||||
|
||||
ps_queued = true;
|
||||
} else {
|
||||
/*
|
||||
* This tx is because of a PsPoll.
|
||||
* Determine if MoreData bit has to be set.
|
||||
*/
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
if (!skb_queue_empty(&conn->psq))
|
||||
*more_data = true;
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
}
|
||||
ps_queued = ath6kl_process_uapsdq(conn,
|
||||
vif, skb, flags);
|
||||
if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
|
||||
ps_queued = ath6kl_process_psq(conn,
|
||||
vif, skb, flags);
|
||||
}
|
||||
}
|
||||
|
||||
return ps_queued;
|
||||
}
|
||||
|
||||
@ -242,8 +341,13 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
u32 map_no = 0;
|
||||
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
|
||||
u8 ac = 99 ; /* initialize to unmapped ac */
|
||||
bool chk_adhoc_ps_mapping = false, more_data = false;
|
||||
bool chk_adhoc_ps_mapping = false;
|
||||
int ret;
|
||||
struct wmi_tx_meta_v2 meta_v2;
|
||||
void *meta;
|
||||
u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
|
||||
u8 meta_ver = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
|
||||
"%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
|
||||
@ -260,11 +364,19 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* AP mode Power saving processing */
|
||||
if (vif->nw_type == AP_NETWORK) {
|
||||
if (ath6kl_powersave_ap(vif, skb, &more_data))
|
||||
if (ath6kl_powersave_ap(vif, skb, &flags))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_bit(WMI_ENABLED, &ar->flag)) {
|
||||
if ((dev->features & NETIF_F_IP_CSUM) &&
|
||||
(csum == CHECKSUM_PARTIAL)) {
|
||||
csum_start = skb->csum_start -
|
||||
(skb_network_header(skb) - skb->head) +
|
||||
sizeof(struct ath6kl_llc_snap_hdr);
|
||||
csum_dest = skb->csum_offset + csum_start;
|
||||
}
|
||||
|
||||
if (skb_headroom(skb) < dev->needed_headroom) {
|
||||
struct sk_buff *tmp_skb = skb;
|
||||
|
||||
@ -281,10 +393,28 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
goto fail_tx;
|
||||
}
|
||||
|
||||
if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
|
||||
more_data, 0, 0, NULL,
|
||||
vif->fw_vif_idx)) {
|
||||
ath6kl_err("wmi_data_hdr_add failed\n");
|
||||
if ((dev->features & NETIF_F_IP_CSUM) &&
|
||||
(csum == CHECKSUM_PARTIAL)) {
|
||||
meta_v2.csum_start = csum_start;
|
||||
meta_v2.csum_dest = csum_dest;
|
||||
|
||||
/* instruct target to calculate checksum */
|
||||
meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
|
||||
meta_ver = WMI_META_VERSION_2;
|
||||
meta = &meta_v2;
|
||||
} else {
|
||||
meta_ver = 0;
|
||||
meta = NULL;
|
||||
}
|
||||
|
||||
ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
|
||||
DATA_MSGTYPE, flags, 0,
|
||||
meta_ver,
|
||||
meta, vif->fw_vif_idx);
|
||||
|
||||
if (ret) {
|
||||
ath6kl_warn("failed to add wmi data header:%d\n"
|
||||
, ret);
|
||||
goto fail_tx;
|
||||
}
|
||||
|
||||
@ -449,9 +579,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
|
||||
* WMI queue with too many commands the only exception to
|
||||
* this is during testing using endpointping.
|
||||
*/
|
||||
spin_lock_bh(&ar->lock);
|
||||
set_bit(WMI_CTRL_EP_FULL, &ar->flag);
|
||||
spin_unlock_bh(&ar->lock);
|
||||
ath6kl_err("wmi ctrl ep is full\n");
|
||||
return action;
|
||||
}
|
||||
@ -479,9 +607,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
|
||||
action != HTC_SEND_FULL_DROP) {
|
||||
spin_unlock_bh(&ar->list_lock);
|
||||
|
||||
spin_lock_bh(&vif->if_lock);
|
||||
set_bit(NETQ_STOPPED, &vif->flags);
|
||||
spin_unlock_bh(&vif->if_lock);
|
||||
netif_stop_queue(vif->ndev);
|
||||
|
||||
return action;
|
||||
@ -710,10 +836,12 @@ static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
|
||||
ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
|
||||
if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
|
||||
(AGGR_NUM_OF_FREE_NETBUFS >> 2))
|
||||
ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
|
||||
AGGR_NUM_OF_FREE_NETBUFS);
|
||||
|
||||
skb = skb_dequeue(&p_aggr->free_q);
|
||||
skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
|
||||
|
||||
return skb;
|
||||
}
|
||||
@ -881,7 +1009,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
|
||||
static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
|
||||
u16 seq_no, u8 order)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -890,11 +1018,8 @@ static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
|
||||
u16 idx, idx_end, seq_end;
|
||||
struct rxtid_stats *stats;
|
||||
|
||||
if (!p_aggr)
|
||||
return;
|
||||
|
||||
rxtid = &p_aggr->rx_tid[tid];
|
||||
stats = &p_aggr->stat[tid];
|
||||
rxtid = &agg_conn->rx_tid[tid];
|
||||
stats = &agg_conn->stat[tid];
|
||||
|
||||
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
|
||||
|
||||
@ -923,7 +1048,8 @@ static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
|
||||
|
||||
if (node->skb) {
|
||||
if (node->is_amsdu)
|
||||
aggr_slice_amsdu(p_aggr, rxtid, node->skb);
|
||||
aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
|
||||
node->skb);
|
||||
else
|
||||
skb_queue_tail(&rxtid->q, node->skb);
|
||||
node->skb = NULL;
|
||||
@ -939,10 +1065,10 @@ static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
|
||||
stats->num_delivered += skb_queue_len(&rxtid->q);
|
||||
|
||||
while ((skb = skb_dequeue(&rxtid->q)))
|
||||
ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
|
||||
ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
|
||||
}
|
||||
|
||||
static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
|
||||
u16 seq_no,
|
||||
bool is_amsdu, struct sk_buff *frame)
|
||||
{
|
||||
@ -954,18 +1080,18 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
bool is_queued = false;
|
||||
u16 extended_end;
|
||||
|
||||
rxtid = &agg_info->rx_tid[tid];
|
||||
stats = &agg_info->stat[tid];
|
||||
rxtid = &agg_conn->rx_tid[tid];
|
||||
stats = &agg_conn->stat[tid];
|
||||
|
||||
stats->num_into_aggr++;
|
||||
|
||||
if (!rxtid->aggr) {
|
||||
if (is_amsdu) {
|
||||
aggr_slice_amsdu(agg_info, rxtid, frame);
|
||||
aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
|
||||
is_queued = true;
|
||||
stats->num_amsdu++;
|
||||
while ((skb = skb_dequeue(&rxtid->q)))
|
||||
ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
|
||||
ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
|
||||
skb);
|
||||
}
|
||||
return is_queued;
|
||||
@ -985,7 +1111,7 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
(cur < end || cur > extended_end)) ||
|
||||
((end > extended_end) && (cur > extended_end) &&
|
||||
(cur < end))) {
|
||||
aggr_deque_frms(agg_info, tid, 0, 0);
|
||||
aggr_deque_frms(agg_conn, tid, 0, 0);
|
||||
if (cur >= rxtid->hold_q_sz - 1)
|
||||
rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
|
||||
else
|
||||
@ -1002,7 +1128,7 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
st = ATH6KL_MAX_SEQ_NO -
|
||||
(rxtid->hold_q_sz - 2 - cur);
|
||||
|
||||
aggr_deque_frms(agg_info, tid, st, 0);
|
||||
aggr_deque_frms(agg_conn, tid, st, 0);
|
||||
}
|
||||
|
||||
stats->num_oow++;
|
||||
@ -1041,9 +1167,9 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
|
||||
spin_unlock_bh(&rxtid->lock);
|
||||
|
||||
aggr_deque_frms(agg_info, tid, 0, 1);
|
||||
aggr_deque_frms(agg_conn, tid, 0, 1);
|
||||
|
||||
if (agg_info->timer_scheduled)
|
||||
if (agg_conn->timer_scheduled)
|
||||
rxtid->progress = true;
|
||||
else
|
||||
for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
|
||||
@ -1054,8 +1180,8 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
* the frame doesn't remain stuck
|
||||
* forever.
|
||||
*/
|
||||
agg_info->timer_scheduled = true;
|
||||
mod_timer(&agg_info->timer,
|
||||
agg_conn->timer_scheduled = true;
|
||||
mod_timer(&agg_conn->timer,
|
||||
(jiffies +
|
||||
HZ * (AGGR_RX_TIMEOUT) / 1000));
|
||||
rxtid->progress = false;
|
||||
@ -1067,6 +1193,76 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
|
||||
return is_queued;
|
||||
}
|
||||
|
||||
static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
|
||||
struct ath6kl_sta *conn)
|
||||
{
|
||||
struct ath6kl *ar = vif->ar;
|
||||
bool is_apsdq_empty, is_apsdq_empty_at_start;
|
||||
u32 num_frames_to_deliver, flags;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
/*
|
||||
* If the APSD q for this STA is not empty, dequeue and
|
||||
* send a pkt from the head of the q. Also update the
|
||||
* More data bit in the WMI_DATA_HDR if there are
|
||||
* more pkts for this STA in the APSD q.
|
||||
* If there are no more pkts for this STA,
|
||||
* update the APSD bitmap for this STA.
|
||||
*/
|
||||
|
||||
num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
|
||||
ATH6KL_APSD_FRAME_MASK;
|
||||
/*
|
||||
* Number of frames to send in a service period is
|
||||
* indicated by the station
|
||||
* in the QOS_INFO of the association request
|
||||
* If it is zero, send all frames
|
||||
*/
|
||||
if (!num_frames_to_deliver)
|
||||
num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
|
||||
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
is_apsdq_empty_at_start = is_apsdq_empty;
|
||||
|
||||
while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
|
||||
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
skb = skb_dequeue(&conn->apsdq);
|
||||
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
|
||||
/*
|
||||
* Set the STA flag to Trigger delivery,
|
||||
* so that the frame will go out
|
||||
*/
|
||||
conn->sta_flags |= STA_PS_APSD_TRIGGER;
|
||||
num_frames_to_deliver--;
|
||||
|
||||
/* Last frame in the service period, set EOSP or queue empty */
|
||||
if ((is_apsdq_empty) || (!num_frames_to_deliver))
|
||||
conn->sta_flags |= STA_PS_APSD_EOSP;
|
||||
|
||||
ath6kl_data_tx(skb, vif->ndev);
|
||||
conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
|
||||
conn->sta_flags &= ~(STA_PS_APSD_EOSP);
|
||||
}
|
||||
|
||||
if (is_apsdq_empty) {
|
||||
if (is_apsdq_empty_at_start)
|
||||
flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
|
||||
else
|
||||
flags = 0;
|
||||
|
||||
ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
conn->aid, 0, flags);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
{
|
||||
struct ath6kl *ar = target->dev->ar;
|
||||
@ -1078,10 +1274,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
int status = packet->status;
|
||||
enum htc_endpoint_id ept = packet->endpoint;
|
||||
bool is_amsdu, prev_ps, ps_state = false;
|
||||
bool trig_state = false;
|
||||
struct ath6kl_sta *conn = NULL;
|
||||
struct sk_buff *skb1 = NULL;
|
||||
struct ethhdr *datap = NULL;
|
||||
struct ath6kl_vif *vif;
|
||||
struct aggr_info_conn *aggr_conn;
|
||||
u16 seq_no, offset;
|
||||
u8 tid, if_idx;
|
||||
|
||||
@ -1171,6 +1369,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
WMI_DATA_HDR_PS_MASK);
|
||||
|
||||
offset = sizeof(struct wmi_data_hdr);
|
||||
trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
|
||||
|
||||
switch (meta_type) {
|
||||
case 0:
|
||||
@ -1209,18 +1408,36 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
else
|
||||
conn->sta_flags &= ~STA_PS_SLEEP;
|
||||
|
||||
/* Accept trigger only when the station is in sleep */
|
||||
if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
|
||||
ath6kl_uapsd_trigger_frame_rx(vif, conn);
|
||||
|
||||
if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
|
||||
if (!(conn->sta_flags & STA_PS_SLEEP)) {
|
||||
struct sk_buff *skbuff = NULL;
|
||||
bool is_apsdq_empty;
|
||||
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
while ((skbuff = skb_dequeue(&conn->psq))
|
||||
!= NULL) {
|
||||
while ((skbuff = skb_dequeue(&conn->psq))) {
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
ath6kl_data_tx(skbuff, vif->ndev);
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
}
|
||||
|
||||
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
|
||||
while ((skbuff = skb_dequeue(&conn->apsdq))) {
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
ath6kl_data_tx(skbuff, vif->ndev);
|
||||
spin_lock_bh(&conn->psq_lock);
|
||||
}
|
||||
spin_unlock_bh(&conn->psq_lock);
|
||||
|
||||
if (!is_apsdq_empty)
|
||||
ath6kl_wmi_set_apsd_bfrd_traf(
|
||||
ar->wmi,
|
||||
vif->fw_vif_idx,
|
||||
conn->aid, 0, 0);
|
||||
|
||||
/* Clear the PVB for this STA */
|
||||
ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
|
||||
conn->aid, 0);
|
||||
@ -1314,11 +1531,21 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
|
||||
datap = (struct ethhdr *) skb->data;
|
||||
|
||||
if (is_unicast_ether_addr(datap->h_dest) &&
|
||||
aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
|
||||
is_amsdu, skb))
|
||||
/* aggregation code will handle the skb */
|
||||
return;
|
||||
if (is_unicast_ether_addr(datap->h_dest)) {
|
||||
if (vif->nw_type == AP_NETWORK) {
|
||||
conn = ath6kl_find_sta(vif, datap->h_source);
|
||||
if (!conn)
|
||||
return;
|
||||
aggr_conn = conn->aggr_conn;
|
||||
} else
|
||||
aggr_conn = vif->aggr_cntxt->aggr_conn;
|
||||
|
||||
if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
|
||||
is_amsdu, skb)) {
|
||||
/* aggregation code will handle the skb */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
|
||||
}
|
||||
@ -1326,13 +1553,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
|
||||
static void aggr_timeout(unsigned long arg)
|
||||
{
|
||||
u8 i, j;
|
||||
struct aggr_info *p_aggr = (struct aggr_info *) arg;
|
||||
struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
|
||||
struct rxtid *rxtid;
|
||||
struct rxtid_stats *stats;
|
||||
|
||||
for (i = 0; i < NUM_OF_TIDS; i++) {
|
||||
rxtid = &p_aggr->rx_tid[i];
|
||||
stats = &p_aggr->stat[i];
|
||||
rxtid = &aggr_conn->rx_tid[i];
|
||||
stats = &aggr_conn->stat[i];
|
||||
|
||||
if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
|
||||
continue;
|
||||
@ -1343,18 +1570,18 @@ static void aggr_timeout(unsigned long arg)
|
||||
rxtid->seq_next,
|
||||
((rxtid->seq_next + rxtid->hold_q_sz-1) &
|
||||
ATH6KL_MAX_SEQ_NO));
|
||||
aggr_deque_frms(p_aggr, i, 0, 0);
|
||||
aggr_deque_frms(aggr_conn, i, 0, 0);
|
||||
}
|
||||
|
||||
p_aggr->timer_scheduled = false;
|
||||
aggr_conn->timer_scheduled = false;
|
||||
|
||||
for (i = 0; i < NUM_OF_TIDS; i++) {
|
||||
rxtid = &p_aggr->rx_tid[i];
|
||||
rxtid = &aggr_conn->rx_tid[i];
|
||||
|
||||
if (rxtid->aggr && rxtid->hold_q) {
|
||||
for (j = 0; j < rxtid->hold_q_sz; j++) {
|
||||
if (rxtid->hold_q[j].skb) {
|
||||
p_aggr->timer_scheduled = true;
|
||||
aggr_conn->timer_scheduled = true;
|
||||
rxtid->timer_mon = true;
|
||||
rxtid->progress = false;
|
||||
break;
|
||||
@ -1366,24 +1593,24 @@ static void aggr_timeout(unsigned long arg)
|
||||
}
|
||||
}
|
||||
|
||||
if (p_aggr->timer_scheduled)
|
||||
mod_timer(&p_aggr->timer,
|
||||
if (aggr_conn->timer_scheduled)
|
||||
mod_timer(&aggr_conn->timer,
|
||||
jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
|
||||
}
|
||||
|
||||
static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
|
||||
static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
|
||||
{
|
||||
struct rxtid *rxtid;
|
||||
struct rxtid_stats *stats;
|
||||
|
||||
if (!p_aggr || tid >= NUM_OF_TIDS)
|
||||
if (!aggr_conn || tid >= NUM_OF_TIDS)
|
||||
return;
|
||||
|
||||
rxtid = &p_aggr->rx_tid[tid];
|
||||
stats = &p_aggr->stat[tid];
|
||||
rxtid = &aggr_conn->rx_tid[tid];
|
||||
stats = &aggr_conn->stat[tid];
|
||||
|
||||
if (rxtid->aggr)
|
||||
aggr_deque_frms(p_aggr, tid, 0, 0);
|
||||
aggr_deque_frms(aggr_conn, tid, 0, 0);
|
||||
|
||||
rxtid->aggr = false;
|
||||
rxtid->progress = false;
|
||||
@ -1398,26 +1625,40 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
|
||||
memset(stats, 0, sizeof(struct rxtid_stats));
|
||||
}
|
||||
|
||||
void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
|
||||
void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
|
||||
u8 win_sz)
|
||||
{
|
||||
struct aggr_info *p_aggr = vif->aggr_cntxt;
|
||||
struct ath6kl_sta *sta;
|
||||
struct aggr_info_conn *aggr_conn = NULL;
|
||||
struct rxtid *rxtid;
|
||||
struct rxtid_stats *stats;
|
||||
u16 hold_q_size;
|
||||
u8 tid, aid;
|
||||
|
||||
if (!p_aggr)
|
||||
if (vif->nw_type == AP_NETWORK) {
|
||||
aid = ath6kl_get_aid(tid_mux);
|
||||
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
|
||||
if (sta)
|
||||
aggr_conn = sta->aggr_conn;
|
||||
} else
|
||||
aggr_conn = vif->aggr_cntxt->aggr_conn;
|
||||
|
||||
if (!aggr_conn)
|
||||
return;
|
||||
|
||||
rxtid = &p_aggr->rx_tid[tid];
|
||||
stats = &p_aggr->stat[tid];
|
||||
tid = ath6kl_get_tid(tid_mux);
|
||||
if (tid >= NUM_OF_TIDS)
|
||||
return;
|
||||
|
||||
rxtid = &aggr_conn->rx_tid[tid];
|
||||
stats = &aggr_conn->stat[tid];
|
||||
|
||||
if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
|
||||
__func__, win_sz, tid);
|
||||
|
||||
if (rxtid->aggr)
|
||||
aggr_delete_tid_state(p_aggr, tid);
|
||||
aggr_delete_tid_state(aggr_conn, tid);
|
||||
|
||||
rxtid->seq_next = seq_no;
|
||||
hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
|
||||
@ -1433,31 +1674,23 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
|
||||
rxtid->aggr = true;
|
||||
}
|
||||
|
||||
struct aggr_info *aggr_init(struct net_device *dev)
|
||||
void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
|
||||
struct aggr_info_conn *aggr_conn)
|
||||
{
|
||||
struct aggr_info *p_aggr = NULL;
|
||||
struct rxtid *rxtid;
|
||||
u8 i;
|
||||
|
||||
p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
|
||||
if (!p_aggr) {
|
||||
ath6kl_err("failed to alloc memory for aggr_node\n");
|
||||
return NULL;
|
||||
}
|
||||
aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
|
||||
aggr_conn->dev = vif->ndev;
|
||||
init_timer(&aggr_conn->timer);
|
||||
aggr_conn->timer.function = aggr_timeout;
|
||||
aggr_conn->timer.data = (unsigned long) aggr_conn;
|
||||
aggr_conn->aggr_info = aggr_info;
|
||||
|
||||
p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
|
||||
p_aggr->dev = dev;
|
||||
init_timer(&p_aggr->timer);
|
||||
p_aggr->timer.function = aggr_timeout;
|
||||
p_aggr->timer.data = (unsigned long) p_aggr;
|
||||
|
||||
p_aggr->timer_scheduled = false;
|
||||
skb_queue_head_init(&p_aggr->free_q);
|
||||
|
||||
ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
|
||||
aggr_conn->timer_scheduled = false;
|
||||
|
||||
for (i = 0; i < NUM_OF_TIDS; i++) {
|
||||
rxtid = &p_aggr->rx_tid[i];
|
||||
rxtid = &aggr_conn->rx_tid[i];
|
||||
rxtid->aggr = false;
|
||||
rxtid->progress = false;
|
||||
rxtid->timer_mon = false;
|
||||
@ -1465,29 +1698,75 @@ struct aggr_info *aggr_init(struct net_device *dev)
|
||||
spin_lock_init(&rxtid->lock);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct aggr_info *aggr_init(struct ath6kl_vif *vif)
|
||||
{
|
||||
struct aggr_info *p_aggr = NULL;
|
||||
|
||||
p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
|
||||
if (!p_aggr) {
|
||||
ath6kl_err("failed to alloc memory for aggr_node\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
|
||||
if (!p_aggr->aggr_conn) {
|
||||
ath6kl_err("failed to alloc memory for connection specific aggr info\n");
|
||||
kfree(p_aggr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
|
||||
|
||||
skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
|
||||
ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
|
||||
|
||||
return p_aggr;
|
||||
}
|
||||
|
||||
void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
|
||||
void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
|
||||
{
|
||||
struct aggr_info *p_aggr = vif->aggr_cntxt;
|
||||
struct ath6kl_sta *sta;
|
||||
struct rxtid *rxtid;
|
||||
struct aggr_info_conn *aggr_conn = NULL;
|
||||
u8 tid, aid;
|
||||
|
||||
if (!p_aggr)
|
||||
if (vif->nw_type == AP_NETWORK) {
|
||||
aid = ath6kl_get_aid(tid_mux);
|
||||
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
|
||||
if (sta)
|
||||
aggr_conn = sta->aggr_conn;
|
||||
} else
|
||||
aggr_conn = vif->aggr_cntxt->aggr_conn;
|
||||
|
||||
if (!aggr_conn)
|
||||
return;
|
||||
|
||||
rxtid = &p_aggr->rx_tid[tid];
|
||||
tid = ath6kl_get_tid(tid_mux);
|
||||
if (tid >= NUM_OF_TIDS)
|
||||
return;
|
||||
|
||||
rxtid = &aggr_conn->rx_tid[tid];
|
||||
|
||||
if (rxtid->aggr)
|
||||
aggr_delete_tid_state(p_aggr, tid);
|
||||
aggr_delete_tid_state(aggr_conn, tid);
|
||||
}
|
||||
|
||||
void aggr_reset_state(struct aggr_info *aggr_info)
|
||||
void aggr_reset_state(struct aggr_info_conn *aggr_conn)
|
||||
{
|
||||
u8 tid;
|
||||
|
||||
if (!aggr_conn)
|
||||
return;
|
||||
|
||||
if (aggr_conn->timer_scheduled) {
|
||||
del_timer(&aggr_conn->timer);
|
||||
aggr_conn->timer_scheduled = false;
|
||||
}
|
||||
|
||||
for (tid = 0; tid < NUM_OF_TIDS; tid++)
|
||||
aggr_delete_tid_state(aggr_info, tid);
|
||||
aggr_delete_tid_state(aggr_conn, tid);
|
||||
}
|
||||
|
||||
/* clean up our amsdu buffer list */
|
||||
@ -1514,28 +1793,11 @@ void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
|
||||
|
||||
void aggr_module_destroy(struct aggr_info *aggr_info)
|
||||
{
|
||||
struct rxtid *rxtid;
|
||||
u8 i, k;
|
||||
|
||||
if (!aggr_info)
|
||||
return;
|
||||
|
||||
if (aggr_info->timer_scheduled) {
|
||||
del_timer(&aggr_info->timer);
|
||||
aggr_info->timer_scheduled = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_OF_TIDS; i++) {
|
||||
rxtid = &aggr_info->rx_tid[i];
|
||||
if (rxtid->hold_q) {
|
||||
for (k = 0; k < rxtid->hold_q_sz; k++)
|
||||
dev_kfree_skb(rxtid->hold_q[k].skb);
|
||||
kfree(rxtid->hold_q);
|
||||
}
|
||||
|
||||
skb_queue_purge(&rxtid->q);
|
||||
}
|
||||
|
||||
skb_queue_purge(&aggr_info->free_q);
|
||||
aggr_reset_state(aggr_info->aggr_conn);
|
||||
skb_queue_purge(&aggr_info->rx_amsdu_freeq);
|
||||
kfree(aggr_info->aggr_conn);
|
||||
kfree(aggr_info);
|
||||
}
|
||||
|
431
drivers/net/wireless/ath/ath6kl/usb.c
Normal file
431
drivers/net/wireless/ath/ath6kl/usb.c
Normal file
@ -0,0 +1,431 @@
|
||||
/*
|
||||
* Copyright (c) 2007-2011 Atheros Communications Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/usb.h>
|
||||
|
||||
#include "debug.h"
|
||||
#include "core.h"
|
||||
|
||||
/* usb device object */
|
||||
struct ath6kl_usb {
|
||||
struct usb_device *udev;
|
||||
struct usb_interface *interface;
|
||||
u8 *diag_cmd_buffer;
|
||||
u8 *diag_resp_buffer;
|
||||
struct ath6kl *ar;
|
||||
};
|
||||
|
||||
/* diagnostic command defnitions */
|
||||
#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
|
||||
#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
|
||||
#define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3
|
||||
#define ATH6KL_USB_CONTROL_REQ_DIAG_RESP 4
|
||||
|
||||
#define ATH6KL_USB_CTRL_DIAG_CC_READ 0
|
||||
#define ATH6KL_USB_CTRL_DIAG_CC_WRITE 1
|
||||
|
||||
struct ath6kl_usb_ctrl_diag_cmd_write {
|
||||
__le32 cmd;
|
||||
__le32 address;
|
||||
__le32 value;
|
||||
__le32 _pad[1];
|
||||
} __packed;
|
||||
|
||||
struct ath6kl_usb_ctrl_diag_cmd_read {
|
||||
__le32 cmd;
|
||||
__le32 address;
|
||||
} __packed;
|
||||
|
||||
struct ath6kl_usb_ctrl_diag_resp_read {
|
||||
__le32 value;
|
||||
} __packed;
|
||||
|
||||
#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
|
||||
#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
|
||||
|
||||
static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
|
||||
{
|
||||
usb_set_intfdata(ar_usb->interface, NULL);
|
||||
|
||||
kfree(ar_usb->diag_cmd_buffer);
|
||||
kfree(ar_usb->diag_resp_buffer);
|
||||
|
||||
kfree(ar_usb);
|
||||
}
|
||||
|
||||
static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb = NULL;
|
||||
struct usb_device *dev = interface_to_usbdev(interface);
|
||||
int status = 0;
|
||||
|
||||
ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
|
||||
if (ar_usb == NULL)
|
||||
goto fail_ath6kl_usb_create;
|
||||
|
||||
memset(ar_usb, 0, sizeof(struct ath6kl_usb));
|
||||
usb_set_intfdata(interface, ar_usb);
|
||||
ar_usb->udev = dev;
|
||||
ar_usb->interface = interface;
|
||||
|
||||
ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
|
||||
if (ar_usb->diag_cmd_buffer == NULL) {
|
||||
status = -ENOMEM;
|
||||
goto fail_ath6kl_usb_create;
|
||||
}
|
||||
|
||||
ar_usb->diag_resp_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_RESP,
|
||||
GFP_KERNEL);
|
||||
if (ar_usb->diag_resp_buffer == NULL) {
|
||||
status = -ENOMEM;
|
||||
goto fail_ath6kl_usb_create;
|
||||
}
|
||||
|
||||
fail_ath6kl_usb_create:
|
||||
if (status != 0) {
|
||||
ath6kl_usb_destroy(ar_usb);
|
||||
ar_usb = NULL;
|
||||
}
|
||||
return ar_usb;
|
||||
}
|
||||
|
||||
static void ath6kl_usb_device_detached(struct usb_interface *interface)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb;
|
||||
|
||||
ar_usb = usb_get_intfdata(interface);
|
||||
if (ar_usb == NULL)
|
||||
return;
|
||||
|
||||
ath6kl_stop_txrx(ar_usb->ar);
|
||||
|
||||
ath6kl_core_cleanup(ar_usb->ar);
|
||||
|
||||
ath6kl_usb_destroy(ar_usb);
|
||||
}
|
||||
|
||||
static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
|
||||
u8 req, u16 value, u16 index, void *data,
|
||||
u32 size)
|
||||
{
|
||||
u8 *buf = NULL;
|
||||
int ret;
|
||||
|
||||
if (size > 0) {
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(buf, data, size);
|
||||
}
|
||||
|
||||
/* note: if successful returns number of bytes transfered */
|
||||
ret = usb_control_msg(ar_usb->udev,
|
||||
usb_sndctrlpipe(ar_usb->udev, 0),
|
||||
req,
|
||||
USB_DIR_OUT | USB_TYPE_VENDOR |
|
||||
USB_RECIP_DEVICE, value, index, buf,
|
||||
size, 1000);
|
||||
|
||||
if (ret < 0) {
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
|
||||
u8 req, u16 value, u16 index, void *data,
|
||||
u32 size)
|
||||
{
|
||||
u8 *buf = NULL;
|
||||
int ret;
|
||||
|
||||
if (size > 0) {
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* note: if successful returns number of bytes transfered */
|
||||
ret = usb_control_msg(ar_usb->udev,
|
||||
usb_rcvctrlpipe(ar_usb->udev, 0),
|
||||
req,
|
||||
USB_DIR_IN | USB_TYPE_VENDOR |
|
||||
USB_RECIP_DEVICE, value, index, buf,
|
||||
size, 2 * HZ);
|
||||
|
||||
if (ret < 0) {
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
memcpy((u8 *) data, buf, size);
|
||||
|
||||
kfree(buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_ctrl_msg_exchange(struct ath6kl_usb *ar_usb,
|
||||
u8 req_val, u8 *req_buf, u32 req_len,
|
||||
u8 resp_val, u8 *resp_buf, u32 *resp_len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* send command */
|
||||
ret = ath6kl_usb_submit_ctrl_out(ar_usb, req_val, 0, 0,
|
||||
req_buf, req_len);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (resp_buf == NULL) {
|
||||
/* no expected response */
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get response */
|
||||
ret = ath6kl_usb_submit_ctrl_in(ar_usb, resp_val, 0, 0,
|
||||
resp_buf, *resp_len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb = ar->hif_priv;
|
||||
struct ath6kl_usb_ctrl_diag_resp_read *resp;
|
||||
struct ath6kl_usb_ctrl_diag_cmd_read *cmd;
|
||||
u32 resp_len;
|
||||
int ret;
|
||||
|
||||
cmd = (struct ath6kl_usb_ctrl_diag_cmd_read *) ar_usb->diag_cmd_buffer;
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->cmd = ATH6KL_USB_CTRL_DIAG_CC_READ;
|
||||
cmd->address = cpu_to_le32(address);
|
||||
resp_len = sizeof(*resp);
|
||||
|
||||
ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
|
||||
(u8 *) cmd,
|
||||
sizeof(struct ath6kl_usb_ctrl_diag_cmd_write),
|
||||
ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
|
||||
ar_usb->diag_resp_buffer, &resp_len);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
|
||||
ar_usb->diag_resp_buffer;
|
||||
|
||||
*data = le32_to_cpu(resp->value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb = ar->hif_priv;
|
||||
struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
|
||||
|
||||
cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
|
||||
|
||||
memset(cmd, 0, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write));
|
||||
cmd->cmd = cpu_to_le32(ATH6KL_USB_CTRL_DIAG_CC_WRITE);
|
||||
cmd->address = cpu_to_le32(address);
|
||||
cmd->value = data;
|
||||
|
||||
return ath6kl_usb_ctrl_msg_exchange(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
|
||||
(u8 *) cmd,
|
||||
sizeof(*cmd),
|
||||
0, NULL, NULL);
|
||||
|
||||
}
|
||||
|
||||
static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb = ar->hif_priv;
|
||||
int ret;
|
||||
|
||||
/* get response */
|
||||
ret = ath6kl_usb_submit_ctrl_in(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
|
||||
0, 0, buf, len);
|
||||
if (ret != 0) {
|
||||
ath6kl_err("Unable to read the bmi data from the device: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
|
||||
{
|
||||
struct ath6kl_usb *ar_usb = ar->hif_priv;
|
||||
int ret;
|
||||
|
||||
/* send command */
|
||||
ret = ath6kl_usb_submit_ctrl_out(ar_usb,
|
||||
ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
|
||||
0, 0, buf, len);
|
||||
if (ret != 0) {
|
||||
ath6kl_err("unable to send the bmi data to the device: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_power_on(struct ath6kl *ar)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_usb_power_off(struct ath6kl *ar)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
|
||||
.diag_read32 = ath6kl_usb_diag_read32,
|
||||
.diag_write32 = ath6kl_usb_diag_write32,
|
||||
.bmi_read = ath6kl_usb_bmi_read,
|
||||
.bmi_write = ath6kl_usb_bmi_write,
|
||||
.power_on = ath6kl_usb_power_on,
|
||||
.power_off = ath6kl_usb_power_off,
|
||||
};
|
||||
|
||||
/* ath6kl usb driver registered functions */
|
||||
static int ath6kl_usb_probe(struct usb_interface *interface,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_device *dev = interface_to_usbdev(interface);
|
||||
struct ath6kl *ar;
|
||||
struct ath6kl_usb *ar_usb = NULL;
|
||||
int vendor_id, product_id;
|
||||
int ret = 0;
|
||||
|
||||
usb_get_dev(dev);
|
||||
|
||||
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
|
||||
product_id = le16_to_cpu(dev->descriptor.idProduct);
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "vendor_id = %04x\n", vendor_id);
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "product_id = %04x\n", product_id);
|
||||
|
||||
if (interface->cur_altsetting)
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "USB Interface %d\n",
|
||||
interface->cur_altsetting->desc.bInterfaceNumber);
|
||||
|
||||
|
||||
if (dev->speed == USB_SPEED_HIGH)
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "USB 2.0 Host\n");
|
||||
else
|
||||
ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n");
|
||||
|
||||
ar_usb = ath6kl_usb_create(interface);
|
||||
|
||||
if (ar_usb == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_usb_put;
|
||||
}
|
||||
|
||||
ar = ath6kl_core_create(&ar_usb->udev->dev);
|
||||
if (ar == NULL) {
|
||||
ath6kl_err("Failed to alloc ath6kl core\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_usb_destroy;
|
||||
}
|
||||
|
||||
ar->hif_priv = ar_usb;
|
||||
ar->hif_type = ATH6KL_HIF_TYPE_USB;
|
||||
ar->hif_ops = &ath6kl_usb_ops;
|
||||
ar->mbox_info.block_size = 16;
|
||||
ar->bmi.max_data_size = 252;
|
||||
|
||||
ar_usb->ar = ar;
|
||||
|
||||
ret = ath6kl_core_init(ar);
|
||||
if (ret) {
|
||||
ath6kl_err("Failed to init ath6kl core: %d\n", ret);
|
||||
goto err_core_free;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_core_free:
|
||||
ath6kl_core_destroy(ar);
|
||||
err_usb_destroy:
|
||||
ath6kl_usb_destroy(ar_usb);
|
||||
err_usb_put:
|
||||
usb_put_dev(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath6kl_usb_remove(struct usb_interface *interface)
|
||||
{
|
||||
usb_put_dev(interface_to_usbdev(interface));
|
||||
ath6kl_usb_device_detached(interface);
|
||||
}
|
||||
|
||||
/* table of devices that work with this driver */
|
||||
static struct usb_device_id ath6kl_usb_ids[] = {
|
||||
{USB_DEVICE(0x0cf3, 0x9374)},
|
||||
{ /* Terminating entry */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
|
||||
|
||||
static struct usb_driver ath6kl_usb_driver = {
|
||||
.name = "ath6kl_usb",
|
||||
.probe = ath6kl_usb_probe,
|
||||
.disconnect = ath6kl_usb_remove,
|
||||
.id_table = ath6kl_usb_ids,
|
||||
};
|
||||
|
||||
static int ath6kl_usb_init(void)
|
||||
{
|
||||
usb_register(&ath6kl_usb_driver);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath6kl_usb_exit(void)
|
||||
{
|
||||
usb_deregister(&ath6kl_usb_driver);
|
||||
}
|
||||
|
||||
module_init(ath6kl_usb_init);
|
||||
module_exit(ath6kl_usb_exit);
|
||||
|
||||
MODULE_AUTHOR("Atheros Communications, Inc.");
|
||||
MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
|
||||
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
|
@ -180,7 +180,7 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
|
||||
u8 msg_type, bool more_data,
|
||||
u8 msg_type, u32 flags,
|
||||
enum wmi_data_hdr_data_type data_type,
|
||||
u8 meta_ver, void *tx_meta_info, u8 if_idx)
|
||||
{
|
||||
@ -204,17 +204,19 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
|
||||
data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
|
||||
data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
|
||||
|
||||
if (more_data)
|
||||
data_hdr->info |=
|
||||
WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
|
||||
if (flags & WMI_DATA_HDR_FLAGS_MORE)
|
||||
data_hdr->info |= WMI_DATA_HDR_MORE;
|
||||
|
||||
data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
|
||||
data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
|
||||
if (flags & WMI_DATA_HDR_FLAGS_EOSP)
|
||||
data_hdr->info3 |= cpu_to_le16(WMI_DATA_HDR_EOSP);
|
||||
|
||||
data_hdr->info2 |= cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
|
||||
data_hdr->info3 |= cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
|
||||
u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
|
||||
{
|
||||
struct iphdr *ip_hdr = (struct iphdr *) pkt;
|
||||
u8 ip_pri;
|
||||
@ -236,6 +238,11 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
|
||||
return ip_pri;
|
||||
}
|
||||
|
||||
u8 ath6kl_wmi_get_traffic_class(u8 user_priority)
|
||||
{
|
||||
return up_to_ac[user_priority & 0x7];
|
||||
}
|
||||
|
||||
int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
|
||||
struct sk_buff *skb,
|
||||
u32 layer2_priority, bool wmm_enabled,
|
||||
@ -419,9 +426,6 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
|
||||
ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
|
||||
evt->num_msg, evt->msg_len, evt->msg_type);
|
||||
|
||||
if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_WMI))
|
||||
return 0;
|
||||
|
||||
for (index = 0; index < evt->num_msg; index++) {
|
||||
size = sizeof(struct wmi_tx_complete_event) +
|
||||
(index * sizeof(struct tx_complete_msg_v1));
|
||||
@ -786,12 +790,14 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
|
||||
ev->u.ap_sta.keymgmt,
|
||||
le16_to_cpu(ev->u.ap_sta.cipher),
|
||||
ev->u.ap_sta.apsd_info);
|
||||
|
||||
ath6kl_connect_ap_mode_sta(
|
||||
vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
|
||||
ev->u.ap_sta.keymgmt,
|
||||
le16_to_cpu(ev->u.ap_sta.cipher),
|
||||
ev->u.ap_sta.auth, ev->assoc_req_len,
|
||||
ev->assoc_info + ev->beacon_ie_len);
|
||||
ev->assoc_info + ev->beacon_ie_len,
|
||||
ev->u.ap_sta.apsd_info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1145,9 +1151,9 @@ static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath6kl_wmi_tcmd_test_report_rx(struct wmi *wmi, u8 *datap, int len)
|
||||
static int ath6kl_wmi_test_rx(struct wmi *wmi, u8 *datap, int len)
|
||||
{
|
||||
ath6kl_tm_rx_report_event(wmi->parent_dev, datap, len);
|
||||
ath6kl_tm_rx_event(wmi->parent_dev, datap, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2479,15 +2485,16 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
|
||||
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
|
||||
__be32 ips0, __be32 ips1)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct wmi_set_ip_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
/* Multicast address are not valid */
|
||||
if ((*((u8 *) &ip_cmd->ips[0]) >= 0xE0) ||
|
||||
(*((u8 *) &ip_cmd->ips[1]) >= 0xE0))
|
||||
if (ipv4_is_multicast(ips0) ||
|
||||
ipv4_is_multicast(ips1))
|
||||
return -EINVAL;
|
||||
|
||||
skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
|
||||
@ -2495,9 +2502,10 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct wmi_set_ip_cmd *) skb->data;
|
||||
memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
|
||||
cmd->ips[0] = ips0;
|
||||
cmd->ips[1] = ips1;
|
||||
|
||||
ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
|
||||
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IP_CMDID,
|
||||
NO_SYNC_WMIFLAG);
|
||||
return ret;
|
||||
}
|
||||
@ -2582,6 +2590,18 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This command has zero length payload */
|
||||
static int ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(struct wmi *wmi,
|
||||
struct ath6kl_vif *vif)
|
||||
{
|
||||
struct ath6kl *ar = wmi->parent_dev;
|
||||
|
||||
set_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
|
||||
wake_up(&ar->event_wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
|
||||
enum ath6kl_wow_mode wow_mode,
|
||||
u32 filter, u16 host_req_delay)
|
||||
@ -2612,7 +2632,8 @@ int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
|
||||
|
||||
int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
|
||||
u8 list_id, u8 filter_size,
|
||||
u8 filter_offset, u8 *filter, u8 *mask)
|
||||
u8 filter_offset, const u8 *filter,
|
||||
const u8 *mask)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct wmi_add_wow_pattern_cmd *cmd;
|
||||
@ -2853,6 +2874,51 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct wmi_mcast_filter_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct wmi_mcast_filter_cmd *) skb->data;
|
||||
cmd->mcast_all_enable = mc_all_on;
|
||||
|
||||
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_MCAST_FILTER_CMDID,
|
||||
NO_SYNC_WMIFLAG);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
|
||||
u8 *filter, bool add_filter)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct wmi_mcast_filter_add_del_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
if ((filter[0] != 0x33 || filter[1] != 0x33) &&
|
||||
(filter[0] != 0x01 || filter[1] != 0x00 ||
|
||||
filter[2] != 0x5e || filter[3] > 0x7f)) {
|
||||
ath6kl_warn("invalid multicast filter address\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct wmi_mcast_filter_add_del_cmd *) skb->data;
|
||||
memcpy(cmd->mcast_mac, filter, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
|
||||
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
|
||||
add_filter ? WMI_SET_MCAST_FILTER_CMDID :
|
||||
WMI_DEL_MCAST_FILTER_CMDID,
|
||||
NO_SYNC_WMIFLAG);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
s32 ath6kl_wmi_get_rate(s8 rate_index)
|
||||
{
|
||||
@ -2946,6 +3012,43 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
|
||||
NO_SYNC_WMIFLAG);
|
||||
}
|
||||
|
||||
/* This command will be used to enable/disable AP uAPSD feature */
|
||||
int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable)
|
||||
{
|
||||
struct wmi_ap_set_apsd_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct wmi_ap_set_apsd_cmd *)skb->data;
|
||||
cmd->enable = enable;
|
||||
|
||||
return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_APSD_CMDID,
|
||||
NO_SYNC_WMIFLAG);
|
||||
}
|
||||
|
||||
int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi, u8 if_idx,
|
||||
u16 aid, u16 bitmap, u32 flags)
|
||||
{
|
||||
struct wmi_ap_apsd_buffered_traffic_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct wmi_ap_apsd_buffered_traffic_cmd *)skb->data;
|
||||
cmd->aid = cpu_to_le16(aid);
|
||||
cmd->bitmap = cpu_to_le16(bitmap);
|
||||
cmd->flags = cpu_to_le32(flags);
|
||||
|
||||
return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
|
||||
WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID,
|
||||
NO_SYNC_WMIFLAG);
|
||||
}
|
||||
|
||||
static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
|
||||
struct ath6kl_vif *vif)
|
||||
{
|
||||
@ -3400,7 +3503,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_TEST_EVENTID:
|
||||
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TEST_EVENTID\n");
|
||||
ret = ath6kl_wmi_tcmd_test_report_rx(wmi, datap, len);
|
||||
ret = ath6kl_wmi_test_rx(wmi, datap, len);
|
||||
break;
|
||||
case WMI_GET_FIXRATES_CMDID:
|
||||
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
|
||||
@ -3465,6 +3568,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
|
||||
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
|
||||
ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
|
||||
break;
|
||||
case WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID:
|
||||
ath6kl_dbg(ATH6KL_DBG_WMI,
|
||||
"WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID");
|
||||
ret = ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(wmi, vif);
|
||||
break;
|
||||
case WMI_REMAIN_ON_CHNL_EVENTID:
|
||||
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
|
||||
ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
|
||||
|
@ -149,8 +149,7 @@ enum wmi_msg_type {
|
||||
#define WMI_DATA_HDR_PS_MASK 0x1
|
||||
#define WMI_DATA_HDR_PS_SHIFT 5
|
||||
|
||||
#define WMI_DATA_HDR_MORE_MASK 0x1
|
||||
#define WMI_DATA_HDR_MORE_SHIFT 5
|
||||
#define WMI_DATA_HDR_MORE 0x20
|
||||
|
||||
enum wmi_data_hdr_data_type {
|
||||
WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
|
||||
@ -160,6 +159,13 @@ enum wmi_data_hdr_data_type {
|
||||
WMI_DATA_HDR_DATA_TYPE_ACL,
|
||||
};
|
||||
|
||||
/* Bitmap of data header flags */
|
||||
enum wmi_data_hdr_flags {
|
||||
WMI_DATA_HDR_FLAGS_MORE = 0x1,
|
||||
WMI_DATA_HDR_FLAGS_EOSP = 0x2,
|
||||
WMI_DATA_HDR_FLAGS_UAPSD = 0x4,
|
||||
};
|
||||
|
||||
#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
|
||||
#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
|
||||
|
||||
@ -173,8 +179,12 @@ enum wmi_data_hdr_data_type {
|
||||
#define WMI_DATA_HDR_META_MASK 0x7
|
||||
#define WMI_DATA_HDR_META_SHIFT 13
|
||||
|
||||
/* Macros for operating on WMI_DATA_HDR (info3) field */
|
||||
#define WMI_DATA_HDR_IF_IDX_MASK 0xF
|
||||
|
||||
#define WMI_DATA_HDR_TRIG 0x10
|
||||
#define WMI_DATA_HDR_EOSP 0x10
|
||||
|
||||
struct wmi_data_hdr {
|
||||
s8 rssi;
|
||||
|
||||
@ -203,7 +213,8 @@ struct wmi_data_hdr {
|
||||
/*
|
||||
* usage of info3, 16-bit:
|
||||
* b3:b0 - Interface index
|
||||
* b15:b4 - Reserved
|
||||
* b4 - uAPSD trigger in rx & EOSP in tx
|
||||
* b15:b5 - Reserved
|
||||
*/
|
||||
__le16 info3;
|
||||
} __packed;
|
||||
@ -257,6 +268,9 @@ static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
|
||||
#define WMI_META_VERSION_1 0x01
|
||||
#define WMI_META_VERSION_2 0x02
|
||||
|
||||
/* Flag to signal to FW to calculate TCP checksum */
|
||||
#define WMI_META_V2_FLAG_CSUM_OFFLOAD 0x01
|
||||
|
||||
struct wmi_tx_meta_v1 {
|
||||
/* packet ID to identify the tx request */
|
||||
u8 pkt_id;
|
||||
@ -646,7 +660,6 @@ enum auth_mode {
|
||||
WPA2_AUTH_CCKM = 0x40,
|
||||
};
|
||||
|
||||
#define WMI_MIN_KEY_INDEX 0
|
||||
#define WMI_MAX_KEY_INDEX 3
|
||||
|
||||
#define WMI_MAX_KEY_LEN 32
|
||||
@ -1237,6 +1250,15 @@ enum target_event_report_config {
|
||||
NO_DISCONN_EVT_IN_RECONN
|
||||
};
|
||||
|
||||
struct wmi_mcast_filter_cmd {
|
||||
u8 mcast_all_enable;
|
||||
} __packed;
|
||||
|
||||
#define ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE 6
|
||||
struct wmi_mcast_filter_add_del_cmd {
|
||||
u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
|
||||
} __packed;
|
||||
|
||||
/* Command Replies */
|
||||
|
||||
/* WMI_GET_CHANNEL_LIST_CMDID reply */
|
||||
@ -1335,6 +1357,8 @@ enum wmi_event_id {
|
||||
WMI_P2P_START_SDPD_EVENTID,
|
||||
WMI_P2P_SDPD_RX_EVENTID,
|
||||
|
||||
WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID = 0x1047,
|
||||
|
||||
WMI_THIN_RESERVED_START_EVENTID = 0x8000,
|
||||
/* Events in this range are reserved for thinmode */
|
||||
WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
|
||||
@ -1903,7 +1927,7 @@ struct wow_filter {
|
||||
|
||||
struct wmi_set_ip_cmd {
|
||||
/* IP in network byte order */
|
||||
__le32 ips[MAX_IP_ADDRS];
|
||||
__be32 ips[MAX_IP_ADDRS];
|
||||
} __packed;
|
||||
|
||||
enum ath6kl_wow_filters {
|
||||
@ -2105,6 +2129,19 @@ struct wmi_rx_frame_format_cmd {
|
||||
} __packed;
|
||||
|
||||
/* AP mode events */
|
||||
struct wmi_ap_set_apsd_cmd {
|
||||
u8 enable;
|
||||
} __packed;
|
||||
|
||||
enum wmi_ap_apsd_buffered_traffic_flags {
|
||||
WMI_AP_APSD_NO_DELIVERY_FRAMES = 0x1,
|
||||
};
|
||||
|
||||
struct wmi_ap_apsd_buffered_traffic_cmd {
|
||||
__le16 aid;
|
||||
__le16 bitmap;
|
||||
__le32 flags;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_POLL_EVENT */
|
||||
struct wmi_pspoll_event {
|
||||
@ -2321,7 +2358,7 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
|
||||
void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
|
||||
int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
|
||||
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
|
||||
u8 msg_type, bool more_data,
|
||||
u8 msg_type, u32 flags,
|
||||
enum wmi_data_hdr_data_type data_type,
|
||||
u8 meta_ver, void *tx_meta_info, u8 if_idx);
|
||||
|
||||
@ -2417,7 +2454,8 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
|
||||
|
||||
s32 ath6kl_wmi_get_rate(s8 rate_index);
|
||||
|
||||
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
|
||||
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
|
||||
__be32 ips0, __be32 ips1);
|
||||
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
|
||||
enum ath6kl_host_mode host_mode);
|
||||
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
|
||||
@ -2425,13 +2463,26 @@ int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
|
||||
u32 filter, u16 host_req_delay);
|
||||
int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
|
||||
u8 list_id, u8 filter_size,
|
||||
u8 filter_offset, u8 *filter, u8 *mask);
|
||||
u8 filter_offset, const u8 *filter,
|
||||
const u8 *mask);
|
||||
int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
|
||||
u16 list_id, u16 filter_id);
|
||||
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
|
||||
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
|
||||
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
|
||||
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
|
||||
int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
|
||||
u8 *filter, bool add_filter);
|
||||
/* AP mode uAPSD */
|
||||
int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
|
||||
|
||||
int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi,
|
||||
u8 if_idx, u16 aid,
|
||||
u16 bitmap, u32 flags);
|
||||
|
||||
u8 ath6kl_wmi_get_traffic_class(u8 user_priority);
|
||||
|
||||
u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri);
|
||||
/* AP mode */
|
||||
int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
|
||||
struct wmi_connect_cmd *p);
|
||||
|
@ -981,7 +981,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
while (len) {
|
||||
transfer = min_t(int, len, 4096);
|
||||
transfer = min_t(size_t, len, 4096);
|
||||
memcpy(buf, data, transfer);
|
||||
|
||||
err = usb_control_msg(hif_dev->udev,
|
||||
|
@ -1346,7 +1346,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
||||
fc = hdr->frame_control;
|
||||
for (i = 0; i < sc->hw->max_rates; i++) {
|
||||
struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
|
||||
if (!rate->count)
|
||||
if (rate->idx < 0 || !rate->count)
|
||||
break;
|
||||
|
||||
final_ts_idx = i;
|
||||
|
@ -172,7 +172,7 @@ libipw_rx_frame_mgmt(struct libipw_device *ieee, struct sk_buff *skb,
|
||||
u16 stype)
|
||||
{
|
||||
if (ieee->iw_mode == IW_MODE_MASTER) {
|
||||
printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
|
||||
printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
|
||||
ieee->dev->name);
|
||||
return 0;
|
||||
/*
|
||||
|
@ -140,7 +140,7 @@ il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
|
||||
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
|
||||
if (sta_id == il->ctx.bcast_sta_id)
|
||||
if (sta_id == il->hw_params.bcast_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
@ -341,7 +341,7 @@ il3945_send_beacon_cmd(struct il_priv *il)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rate = il_get_lowest_plcp(il, &il->ctx);
|
||||
rate = il_get_lowest_plcp(il);
|
||||
|
||||
frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
|
||||
|
||||
@ -512,7 +512,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
|
||||
hdr_len = ieee80211_hdrlen(fc);
|
||||
|
||||
/* Find idx into station table for destination station */
|
||||
sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
|
||||
sta_id = il_sta_id_or_broadcast(il, info->control.sta);
|
||||
if (sta_id == IL_INVALID_STATION) {
|
||||
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
|
||||
goto drop;
|
||||
@ -538,10 +538,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
|
||||
|
||||
idx = il_get_cmd_idx(q, q->write_ptr, 0);
|
||||
|
||||
/* Set up driver data for this TFD */
|
||||
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
|
||||
txq->txb[q->write_ptr].skb = skb;
|
||||
txq->txb[q->write_ptr].ctx = &il->ctx;
|
||||
txq->skbs[q->write_ptr] = skb;
|
||||
|
||||
/* Init first empty entry in queue's array of Tx/cmd buffers */
|
||||
out_cmd = txq->cmd[idx];
|
||||
@ -619,8 +616,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
|
||||
|
||||
/* Add buffer containing Tx command and MAC(!) header to TFD's
|
||||
* first entry */
|
||||
il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
|
||||
0);
|
||||
il->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
@ -629,8 +625,8 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
|
||||
phys_addr =
|
||||
pci_map_single(il->pci_dev, skb->data + hdr_len, len,
|
||||
PCI_DMA_TODEVICE);
|
||||
il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
|
||||
len, 0, U32_PAD(len));
|
||||
il->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
|
||||
U32_PAD(len));
|
||||
}
|
||||
|
||||
/* Tell device the write idx *just past* this latest filled TFD */
|
||||
@ -672,15 +668,13 @@ il3945_get_measurement(struct il_priv *il,
|
||||
int rc;
|
||||
int spectrum_resp_status;
|
||||
int duration = le16_to_cpu(params->duration);
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
if (il_is_associated(il))
|
||||
add_time =
|
||||
il_usecs_to_beacons(il,
|
||||
le64_to_cpu(params->start_time) -
|
||||
il->_3945.last_tsf,
|
||||
le16_to_cpu(ctx->timing.
|
||||
beacon_interval));
|
||||
le16_to_cpu(il->timing.beacon_interval));
|
||||
|
||||
memset(&spectrum, 0, sizeof(spectrum));
|
||||
|
||||
@ -694,15 +688,14 @@ il3945_get_measurement(struct il_priv *il,
|
||||
if (il_is_associated(il))
|
||||
spectrum.start_time =
|
||||
il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
|
||||
le16_to_cpu(ctx->timing.
|
||||
beacon_interval));
|
||||
le16_to_cpu(il->timing.beacon_interval));
|
||||
else
|
||||
spectrum.start_time = 0;
|
||||
|
||||
spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
|
||||
spectrum.channels[0].channel = params->channel;
|
||||
spectrum.channels[0].type = type;
|
||||
if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
|
||||
if (il->active.flags & RXON_FLG_BAND_24G_MSK)
|
||||
spectrum.flags |=
|
||||
RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
|
||||
RXON_FLG_TGG_PROTECT_MSK;
|
||||
@ -2150,7 +2143,6 @@ il3945_alive_start(struct il_priv *il)
|
||||
{
|
||||
int thermal_spin = 0;
|
||||
u32 rfkill;
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
D_INFO("Runtime Alive received.\n");
|
||||
|
||||
@ -2206,13 +2198,13 @@ il3945_alive_start(struct il_priv *il)
|
||||
|
||||
if (il_is_associated(il)) {
|
||||
struct il3945_rxon_cmd *active_rxon =
|
||||
(struct il3945_rxon_cmd *)(&ctx->active);
|
||||
(struct il3945_rxon_cmd *)(&il->active);
|
||||
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
} else {
|
||||
/* Initialize our rx_config data */
|
||||
il_connection_init_rx_config(il, ctx);
|
||||
il_connection_init_rx_config(il);
|
||||
}
|
||||
|
||||
/* Configure Bluetooth device coexistence support */
|
||||
@ -2221,7 +2213,7 @@ il3945_alive_start(struct il_priv *il)
|
||||
set_bit(S_READY, &il->status);
|
||||
|
||||
/* Configure the adapter for unassociated operation */
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il3945_commit_rxon(il);
|
||||
|
||||
il3945_reg_txpower_periodic(il);
|
||||
|
||||
@ -2253,7 +2245,7 @@ __il3945_down(struct il_priv *il)
|
||||
del_timer_sync(&il->watchdog);
|
||||
|
||||
/* Station information will now be cleared in device */
|
||||
il_clear_ucode_stations(il, NULL);
|
||||
il_clear_ucode_stations(il);
|
||||
il_dealloc_bcast_stations(il);
|
||||
il_clear_driver_stations(il);
|
||||
|
||||
@ -2339,12 +2331,11 @@ il3945_down(struct il_priv *il)
|
||||
static int
|
||||
il3945_alloc_bcast_station(struct il_priv *il)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
unsigned long flags;
|
||||
u8 sta_id;
|
||||
|
||||
spin_lock_irqsave(&il->sta_lock, flags);
|
||||
sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
|
||||
sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
|
||||
if (sta_id == IL_INVALID_STATION) {
|
||||
IL_ERR("Unable to prepare broadcast station\n");
|
||||
spin_unlock_irqrestore(&il->sta_lock, flags);
|
||||
@ -2422,7 +2413,7 @@ __il3945_up(struct il_priv *il)
|
||||
/* load bootstrap state machine,
|
||||
* load bootstrap program into processor's memory,
|
||||
* prepare to load the "initialize" uCode */
|
||||
rc = il->cfg->ops->lib->load_ucode(il);
|
||||
rc = il->ops->lib->load_ucode(il);
|
||||
|
||||
if (rc) {
|
||||
IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
|
||||
@ -2602,7 +2593,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
|
||||
/* We don't build a direct scan probe request; the uCode will do
|
||||
* that based on the direct_mask added to each channel entry */
|
||||
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
|
||||
scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
|
||||
scan->tx_cmd.sta_id = il->hw_params.bcast_id;
|
||||
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
||||
|
||||
/* flags + rate selection */
|
||||
@ -2664,14 +2655,12 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
|
||||
void
|
||||
il3945_post_scan(struct il_priv *il)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
/*
|
||||
* Since setting the RXON may have been deferred while
|
||||
* performing the scan, fire one off if needed
|
||||
*/
|
||||
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
|
||||
il3945_commit_rxon(il, ctx);
|
||||
if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
|
||||
il3945_commit_rxon(il);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2684,7 +2673,8 @@ il3945_bg_restart(struct work_struct *data)
|
||||
|
||||
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
|
||||
mutex_lock(&il->mutex);
|
||||
il->ctx.vif = NULL;
|
||||
/* FIXME: vif can be dereferenced */
|
||||
il->vif = NULL;
|
||||
il->is_open = 0;
|
||||
mutex_unlock(&il->mutex);
|
||||
il3945_down(il);
|
||||
@ -2722,13 +2712,12 @@ il3945_post_associate(struct il_priv *il)
|
||||
{
|
||||
int rc = 0;
|
||||
struct ieee80211_conf *conf = NULL;
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
if (!ctx->vif || !il->is_open)
|
||||
if (!il->vif || !il->is_open)
|
||||
return;
|
||||
|
||||
D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
|
||||
ctx->active.bssid_addr);
|
||||
D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
|
||||
il->active.bssid_addr);
|
||||
|
||||
if (test_bit(S_EXIT_PENDING, &il->status))
|
||||
return;
|
||||
@ -2737,35 +2726,35 @@ il3945_post_associate(struct il_priv *il)
|
||||
|
||||
conf = &il->hw->conf;
|
||||
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il3945_commit_rxon(il);
|
||||
|
||||
rc = il_send_rxon_timing(il, ctx);
|
||||
rc = il_send_rxon_timing(il);
|
||||
if (rc)
|
||||
IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
|
||||
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
|
||||
il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
|
||||
|
||||
D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
|
||||
ctx->vif->bss_conf.beacon_int);
|
||||
D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
|
||||
il->vif->bss_conf.beacon_int);
|
||||
|
||||
if (ctx->vif->bss_conf.use_short_preamble)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
if (il->vif->bss_conf.use_short_preamble)
|
||||
il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
|
||||
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (ctx->vif->bss_conf.use_short_slot)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (il->vif->bss_conf.use_short_slot)
|
||||
il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
}
|
||||
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il3945_commit_rxon(il);
|
||||
|
||||
switch (ctx->vif->type) {
|
||||
switch (il->vif->type) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
il3945_rate_scale_init(il->hw, IL_AP_ID);
|
||||
break;
|
||||
@ -2774,7 +2763,7 @@ il3945_post_associate(struct il_priv *il)
|
||||
break;
|
||||
default:
|
||||
IL_ERR("%s Should not be called in %d mode\n", __func__,
|
||||
ctx->vif->type);
|
||||
il->vif->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -2891,8 +2880,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
void
|
||||
il3945_config_ap(struct il_priv *il)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
struct ieee80211_vif *vif = ctx->vif;
|
||||
struct ieee80211_vif *vif = il->vif;
|
||||
int rc = 0;
|
||||
|
||||
if (test_bit(S_EXIT_PENDING, &il->status))
|
||||
@ -2902,31 +2890,31 @@ il3945_config_ap(struct il_priv *il)
|
||||
if (!(il_is_associated(il))) {
|
||||
|
||||
/* RXON - unassoc (to set timing command) */
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il3945_commit_rxon(il);
|
||||
|
||||
/* RXON Timing */
|
||||
rc = il_send_rxon_timing(il, ctx);
|
||||
rc = il_send_rxon_timing(il);
|
||||
if (rc)
|
||||
IL_WARN("C_RXON_TIMING failed - "
|
||||
"Attempting to continue.\n");
|
||||
|
||||
ctx->staging.assoc_id = 0;
|
||||
il->staging.assoc_id = 0;
|
||||
|
||||
if (vif->bss_conf.use_short_preamble)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
|
||||
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (vif->bss_conf.use_short_slot)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
}
|
||||
/* restore RXON assoc */
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il3945_commit_rxon(il);
|
||||
}
|
||||
il3945_send_beacon_cmd(il);
|
||||
}
|
||||
@ -2959,7 +2947,7 @@ il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
static_key = !il_is_associated(il);
|
||||
|
||||
if (!static_key) {
|
||||
sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
|
||||
sta_id = il_sta_id_or_broadcast(il, sta);
|
||||
if (sta_id == IL_INVALID_STATION)
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3007,8 +2995,7 @@ il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
D_INFO("proceeding to add station %pM\n", sta->addr);
|
||||
sta_priv->common.sta_id = IL_INVALID_STATION;
|
||||
|
||||
ret =
|
||||
il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
|
||||
ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
|
||||
if (ret) {
|
||||
IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
|
||||
/* Should we return success if return code is EEXIST ? */
|
||||
@ -3032,7 +3019,6 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
{
|
||||
struct il_priv *il = hw->priv;
|
||||
__le32 filter_or = 0, filter_nand = 0;
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
#define CHK(test, flag) do { \
|
||||
if (*total_flags & (test)) \
|
||||
@ -3052,8 +3038,8 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
|
||||
mutex_lock(&il->mutex);
|
||||
|
||||
ctx->staging.filter_flags &= ~filter_nand;
|
||||
ctx->staging.filter_flags |= filter_or;
|
||||
il->staging.filter_flags &= ~filter_nand;
|
||||
il->staging.filter_flags |= filter_or;
|
||||
|
||||
/*
|
||||
* Not committing directly because hardware can perform a scan,
|
||||
@ -3170,9 +3156,8 @@ static ssize_t
|
||||
il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct il_priv *il = dev_get_drvdata(d);
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
return sprintf(buf, "0x%04X\n", ctx->active.flags);
|
||||
return sprintf(buf, "0x%04X\n", il->active.flags);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -3181,17 +3166,16 @@ il3945_store_flags(struct device *d, struct device_attribute *attr,
|
||||
{
|
||||
struct il_priv *il = dev_get_drvdata(d);
|
||||
u32 flags = simple_strtoul(buf, NULL, 0);
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
mutex_lock(&il->mutex);
|
||||
if (le32_to_cpu(ctx->staging.flags) != flags) {
|
||||
if (le32_to_cpu(il->staging.flags) != flags) {
|
||||
/* Cancel any currently running scans... */
|
||||
if (il_scan_cancel_timeout(il, 100))
|
||||
IL_WARN("Could not cancel scan.\n");
|
||||
else {
|
||||
D_INFO("Committing rxon.flags = 0x%04X\n", flags);
|
||||
ctx->staging.flags = cpu_to_le32(flags);
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il->staging.flags = cpu_to_le32(flags);
|
||||
il3945_commit_rxon(il);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&il->mutex);
|
||||
@ -3207,9 +3191,8 @@ il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct il_priv *il = dev_get_drvdata(d);
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
|
||||
return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -3217,19 +3200,18 @@ il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct il_priv *il = dev_get_drvdata(d);
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
u32 filter_flags = simple_strtoul(buf, NULL, 0);
|
||||
|
||||
mutex_lock(&il->mutex);
|
||||
if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
|
||||
if (le32_to_cpu(il->staging.filter_flags) != filter_flags) {
|
||||
/* Cancel any currently running scans... */
|
||||
if (il_scan_cancel_timeout(il, 100))
|
||||
IL_WARN("Could not cancel scan.\n");
|
||||
else {
|
||||
D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
|
||||
filter_flags);
|
||||
ctx->staging.filter_flags = cpu_to_le32(filter_flags);
|
||||
il3945_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags = cpu_to_le32(filter_flags);
|
||||
il3945_commit_rxon(il);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&il->mutex);
|
||||
@ -3278,9 +3260,8 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct il_priv *il = dev_get_drvdata(d);
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
struct ieee80211_measurement_params params = {
|
||||
.channel = le16_to_cpu(ctx->active.channel),
|
||||
.channel = le16_to_cpu(il->active.channel),
|
||||
.start_time = cpu_to_le64(il->_3945.last_tsf),
|
||||
.duration = cpu_to_le16(1),
|
||||
};
|
||||
@ -3474,7 +3455,7 @@ static struct attribute_group il3945_attribute_group = {
|
||||
.attrs = il3945_sysfs_entries,
|
||||
};
|
||||
|
||||
struct ieee80211_ops il3945_hw_ops = {
|
||||
struct ieee80211_ops il3945_mac_ops = {
|
||||
.tx = il3945_mac_tx,
|
||||
.start = il3945_mac_start,
|
||||
.stop = il3945_mac_stop,
|
||||
@ -3567,7 +3548,8 @@ il3945_setup_mac(struct il_priv *il)
|
||||
/* Tell mac80211 our characteristics */
|
||||
hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
|
||||
|
||||
hw->wiphy->interface_modes = il->ctx.interface_modes;
|
||||
hw->wiphy->interface_modes =
|
||||
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
|
||||
|
||||
hw->wiphy->flags |=
|
||||
WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
|
||||
@ -3614,44 +3596,29 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
* 1. Allocating HW data
|
||||
* ********************/
|
||||
|
||||
/* mac80211 allocates memory for this device instance, including
|
||||
* space for this driver's ilate structure */
|
||||
hw = il_alloc_all(cfg);
|
||||
if (hw == NULL) {
|
||||
pr_err("Can not allocate network device\n");
|
||||
hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops);
|
||||
if (!hw) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
il = hw->priv;
|
||||
il->hw = hw;
|
||||
SET_IEEE80211_DEV(hw, &pdev->dev);
|
||||
|
||||
il->cmd_queue = IL39_CMD_QUEUE_NUM;
|
||||
|
||||
il->ctx.ctxid = 0;
|
||||
|
||||
il->ctx.rxon_cmd = C_RXON;
|
||||
il->ctx.rxon_timing_cmd = C_RXON_TIMING;
|
||||
il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
|
||||
il->ctx.qos_cmd = C_QOS_PARAM;
|
||||
il->ctx.ap_sta_id = IL_AP_ID;
|
||||
il->ctx.wep_key_cmd = C_WEPKEY;
|
||||
il->ctx.interface_modes =
|
||||
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
|
||||
il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
|
||||
il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
|
||||
il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
|
||||
|
||||
/*
|
||||
* Disabling hardware scan means that mac80211 will perform scans
|
||||
* "the hard way", rather than using device's scan.
|
||||
*/
|
||||
if (il3945_mod_params.disable_hw_scan) {
|
||||
D_INFO("Disabling hw_scan\n");
|
||||
il3945_hw_ops.hw_scan = NULL;
|
||||
il3945_mac_ops.hw_scan = NULL;
|
||||
}
|
||||
|
||||
D_INFO("*** LOAD DRIVER ***\n");
|
||||
il->cfg = cfg;
|
||||
il->ops = &il3945_ops;
|
||||
il->pci_dev = pdev;
|
||||
il->inta_mask = CSR_INI_SET_MASK;
|
||||
|
||||
@ -3773,8 +3740,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto out_release_irq;
|
||||
}
|
||||
|
||||
il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
|
||||
&il->ctx);
|
||||
il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
|
||||
il3945_setup_deferred_work(il);
|
||||
il3945_setup_handlers(il);
|
||||
il_power_initialize(il);
|
||||
|
@ -342,7 +342,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
|
||||
int i;
|
||||
|
||||
D_INFO("enter\n");
|
||||
if (sta_id == il->ctx.bcast_sta_id)
|
||||
if (sta_id == il->hw_params.bcast_id)
|
||||
goto out;
|
||||
|
||||
psta = (struct il3945_sta_priv *)sta->drv_priv;
|
||||
@ -927,8 +927,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
sta =
|
||||
ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
|
||||
sta = ieee80211_find_sta(il->vif, il->stations[sta_id].sta.sta.addr);
|
||||
if (!sta) {
|
||||
D_RATE("Unable to find station to initialize rate scaling.\n");
|
||||
rcu_read_unlock();
|
||||
@ -944,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
|
||||
switch (il->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
/* TODO: this always does G, not a regression */
|
||||
if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
|
||||
if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
|
||||
rs_sta->tgg = 1;
|
||||
rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
|
||||
} else
|
||||
|
@ -293,17 +293,17 @@ il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
|
||||
{
|
||||
struct il_tx_queue *txq = &il->txq[txq_id];
|
||||
struct il_queue *q = &txq->q;
|
||||
struct il_tx_info *tx_info;
|
||||
struct sk_buff *skb;
|
||||
|
||||
BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
|
||||
|
||||
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
|
||||
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
|
||||
|
||||
tx_info = &txq->txb[txq->q.read_ptr];
|
||||
ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
|
||||
tx_info->skb = NULL;
|
||||
il->cfg->ops->lib->txq_free_tfd(il, txq);
|
||||
skb = txq->skbs[txq->q.read_ptr];
|
||||
ieee80211_tx_status_irqsafe(il->hw, skb);
|
||||
txq->skbs[txq->q.read_ptr] = NULL;
|
||||
il->ops->lib->txq_free_tfd(il, txq);
|
||||
}
|
||||
|
||||
if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
|
||||
@ -336,7 +336,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
|
||||
}
|
||||
|
||||
txq->time_stamp = jiffies;
|
||||
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
|
||||
info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
|
||||
ieee80211_tx_info_clear_status(info);
|
||||
|
||||
/* Fill the MRR chain with some info about on-chip retransmissions */
|
||||
@ -660,15 +660,13 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->txb) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = txq->txb[txq->q.read_ptr].skb;
|
||||
if (txq->skbs) {
|
||||
struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
|
||||
|
||||
/* can be called from irqs-disabled context */
|
||||
if (skb) {
|
||||
dev_kfree_skb_any(skb);
|
||||
txq->txb[txq->q.read_ptr].skb = NULL;
|
||||
txq->skbs[txq->q.read_ptr] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -960,12 +958,12 @@ il3945_hw_nic_init(struct il_priv *il)
|
||||
struct il_rx_queue *rxq = &il->rxq;
|
||||
|
||||
spin_lock_irqsave(&il->lock, flags);
|
||||
il->cfg->ops->lib->apm_ops.init(il);
|
||||
il->ops->lib->apm_ops.init(il);
|
||||
spin_unlock_irqrestore(&il->lock, flags);
|
||||
|
||||
il3945_set_pwr_vmain(il);
|
||||
|
||||
il->cfg->ops->lib->apm_ops.config(il);
|
||||
il->ops->lib->apm_ops.config(il);
|
||||
|
||||
/* Allocate the RX queue, or reset if it is already allocated */
|
||||
if (!rxq->bd) {
|
||||
@ -1388,7 +1386,7 @@ il3945_send_tx_power(struct il_priv *il)
|
||||
int rate_idx, i;
|
||||
const struct il_channel_info *ch_info = NULL;
|
||||
struct il3945_txpowertable_cmd txpower = {
|
||||
.channel = il->ctx.active.channel,
|
||||
.channel = il->active.channel,
|
||||
};
|
||||
u16 chan;
|
||||
|
||||
@ -1397,7 +1395,7 @@ il3945_send_tx_power(struct il_priv *il)
|
||||
"TX Power requested while scanning!\n"))
|
||||
return -EAGAIN;
|
||||
|
||||
chan = le16_to_cpu(il->ctx.active.channel);
|
||||
chan = le16_to_cpu(il->active.channel);
|
||||
|
||||
txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
|
||||
ch_info = il_get_channel_info(il, il->band, chan);
|
||||
@ -1615,7 +1613,7 @@ il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
|
||||
}
|
||||
|
||||
/* send Txpower command for current channel to ucode */
|
||||
return il->cfg->ops->lib->send_tx_power(il);
|
||||
return il->ops->lib->send_tx_power(il);
|
||||
}
|
||||
|
||||
int
|
||||
@ -1662,7 +1660,7 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
|
||||
}
|
||||
|
||||
static int
|
||||
il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il3945_send_rxon_assoc(struct il_priv *il)
|
||||
{
|
||||
int rc = 0;
|
||||
struct il_rx_pkt *pkt;
|
||||
@ -1673,8 +1671,8 @@ il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
.flags = CMD_WANT_SKB,
|
||||
.data = &rxon_assoc,
|
||||
};
|
||||
const struct il_rxon_cmd *rxon1 = &ctx->staging;
|
||||
const struct il_rxon_cmd *rxon2 = &ctx->active;
|
||||
const struct il_rxon_cmd *rxon1 = &il->staging;
|
||||
const struct il_rxon_cmd *rxon2 = &il->active;
|
||||
|
||||
if (rxon1->flags == rxon2->flags &&
|
||||
rxon1->filter_flags == rxon2->filter_flags &&
|
||||
@ -1684,10 +1682,10 @@ il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rxon_assoc.flags = ctx->staging.flags;
|
||||
rxon_assoc.filter_flags = ctx->staging.filter_flags;
|
||||
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
|
||||
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
|
||||
rxon_assoc.flags = il->staging.flags;
|
||||
rxon_assoc.filter_flags = il->staging.filter_flags;
|
||||
rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
|
||||
rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
|
||||
rxon_assoc.reserved = 0;
|
||||
|
||||
rc = il_send_cmd_sync(il, &cmd);
|
||||
@ -1714,11 +1712,11 @@ il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
* a HW tune is required based on the RXON structure changes.
|
||||
*/
|
||||
int
|
||||
il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il3945_commit_rxon(struct il_priv *il)
|
||||
{
|
||||
/* cast away the const for active_rxon in this function */
|
||||
struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
|
||||
struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
|
||||
struct il3945_rxon_cmd *active_rxon = (void *)&il->active;
|
||||
struct il3945_rxon_cmd *staging_rxon = (void *)&il->staging;
|
||||
int rc = 0;
|
||||
bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||
|
||||
@ -1735,7 +1733,7 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
|
||||
staging_rxon->flags |= il3945_get_antenna_flags(il);
|
||||
|
||||
rc = il_check_rxon_cmd(il, ctx);
|
||||
rc = il_check_rxon_cmd(il);
|
||||
if (rc) {
|
||||
IL_ERR("Invalid RXON configuration. Not committing.\n");
|
||||
return -EINVAL;
|
||||
@ -1744,8 +1742,8 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
/* If we don't need to send a full RXON, we can use
|
||||
* il3945_rxon_assoc_cmd which is used to reconfigure filter
|
||||
* and other flags for the current radio configuration. */
|
||||
if (!il_full_rxon_required(il, &il->ctx)) {
|
||||
rc = il_send_rxon_assoc(il, &il->ctx);
|
||||
if (!il_full_rxon_required(il)) {
|
||||
rc = il_send_rxon_assoc(il);
|
||||
if (rc) {
|
||||
IL_ERR("Error setting RXON_ASSOC "
|
||||
"configuration (%d).\n", rc);
|
||||
@ -1776,7 +1774,7 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
active_rxon->reserved4 = 0;
|
||||
active_rxon->reserved5 = 0;
|
||||
rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
|
||||
&il->ctx.active);
|
||||
&il->active);
|
||||
|
||||
/* If the mask clearing failed then we set
|
||||
* active_rxon back to what it was previously */
|
||||
@ -1786,8 +1784,8 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
"configuration (%d).\n", rc);
|
||||
return rc;
|
||||
}
|
||||
il_clear_ucode_stations(il, &il->ctx);
|
||||
il_restore_stations(il, &il->ctx);
|
||||
il_clear_ucode_stations(il);
|
||||
il_restore_stations(il);
|
||||
}
|
||||
|
||||
D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
|
||||
@ -1801,7 +1799,7 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
staging_rxon->reserved4 = 0;
|
||||
staging_rxon->reserved5 = 0;
|
||||
|
||||
il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
|
||||
il_set_rxon_hwcrypto(il, !il3945_mod_params.sw_crypto);
|
||||
|
||||
/* Apply the new configuration */
|
||||
rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
|
||||
@ -1814,8 +1812,8 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
|
||||
|
||||
if (!new_assoc) {
|
||||
il_clear_ucode_stations(il, &il->ctx);
|
||||
il_restore_stations(il, &il->ctx);
|
||||
il_clear_ucode_stations(il);
|
||||
il_restore_stations(il);
|
||||
}
|
||||
|
||||
/* If we issue a new RXON command which required a tune then we must
|
||||
@ -2258,7 +2256,6 @@ il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
|
||||
static int
|
||||
il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
int ret;
|
||||
u8 sta_id;
|
||||
unsigned long flags;
|
||||
@ -2266,7 +2263,7 @@ il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
|
||||
if (sta_id_r)
|
||||
*sta_id_r = IL_INVALID_STATION;
|
||||
|
||||
ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
|
||||
ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
|
||||
if (ret) {
|
||||
IL_ERR("Unable to add station %pM\n", addr);
|
||||
return ret;
|
||||
@ -2396,15 +2393,16 @@ il3945_hw_set_hw_params(struct il_priv *il)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
il->hw_params.bcast_id = IL3945_BROADCAST_ID;
|
||||
|
||||
/* Assign number of Usable TX queues */
|
||||
il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
|
||||
il->hw_params.max_txq_num = il->cfg->num_of_queues;
|
||||
|
||||
il->hw_params.tfd_size = sizeof(struct il3945_tfd);
|
||||
il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
|
||||
il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
|
||||
il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
|
||||
il->hw_params.max_stations = IL3945_STATION_COUNT;
|
||||
il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
|
||||
|
||||
il->sta_key_max_num = STA_KEY_MAX_NUM;
|
||||
|
||||
@ -2425,7 +2423,7 @@ il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
|
||||
tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
|
||||
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
|
||||
|
||||
tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
|
||||
tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
|
||||
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
||||
|
||||
frame_size =
|
||||
@ -2685,23 +2683,12 @@ static struct il_hcmd_utils_ops il3945_hcmd_utils = {
|
||||
.post_scan = il3945_post_scan,
|
||||
};
|
||||
|
||||
static const struct il_ops il3945_ops = {
|
||||
const struct il_ops il3945_ops = {
|
||||
.lib = &il3945_lib,
|
||||
.hcmd = &il3945_hcmd,
|
||||
.utils = &il3945_hcmd_utils,
|
||||
.led = &il3945_led_ops,
|
||||
.legacy = &il3945_legacy_ops,
|
||||
.ieee80211_ops = &il3945_hw_ops,
|
||||
};
|
||||
|
||||
static struct il_base_params il3945_base_params = {
|
||||
.eeprom_size = IL3945_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IL39_NUM_QUEUES,
|
||||
.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
|
||||
.set_l0s = false,
|
||||
.use_bsm = true,
|
||||
.led_compensation = 64,
|
||||
.wd_timeout = IL_DEF_WD_TIMEOUT,
|
||||
};
|
||||
|
||||
static struct il_cfg il3945_bg_cfg = {
|
||||
@ -2711,10 +2698,16 @@ static struct il_cfg il3945_bg_cfg = {
|
||||
.ucode_api_min = IL3945_UCODE_API_MIN,
|
||||
.sku = IL_SKU_G,
|
||||
.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
|
||||
.ops = &il3945_ops,
|
||||
.mod_params = &il3945_mod_params,
|
||||
.base_params = &il3945_base_params,
|
||||
.led_mode = IL_LED_BLINK,
|
||||
|
||||
.eeprom_size = IL3945_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IL39_NUM_QUEUES,
|
||||
.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
|
||||
.set_l0s = false,
|
||||
.use_bsm = true,
|
||||
.led_compensation = 64,
|
||||
.wd_timeout = IL_DEF_WD_TIMEOUT
|
||||
};
|
||||
|
||||
static struct il_cfg il3945_abg_cfg = {
|
||||
@ -2724,10 +2717,16 @@ static struct il_cfg il3945_abg_cfg = {
|
||||
.ucode_api_min = IL3945_UCODE_API_MIN,
|
||||
.sku = IL_SKU_A | IL_SKU_G,
|
||||
.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
|
||||
.ops = &il3945_ops,
|
||||
.mod_params = &il3945_mod_params,
|
||||
.base_params = &il3945_base_params,
|
||||
.led_mode = IL_LED_BLINK,
|
||||
|
||||
.eeprom_size = IL3945_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IL39_NUM_QUEUES,
|
||||
.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
|
||||
.set_l0s = false,
|
||||
.use_bsm = true,
|
||||
.led_compensation = 64,
|
||||
.wd_timeout = IL_DEF_WD_TIMEOUT
|
||||
};
|
||||
|
||||
DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
|
||||
|
@ -36,6 +36,8 @@ extern const struct pci_device_id il3945_hw_card_ids[];
|
||||
|
||||
#include "common.h"
|
||||
|
||||
extern const struct il_ops il3945_ops;
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IL3945_UCODE_API_MAX 2
|
||||
|
||||
@ -249,7 +251,7 @@ extern int il4965_get_temperature(const struct il_priv *il);
|
||||
extern void il3945_post_associate(struct il_priv *il);
|
||||
extern void il3945_config_ap(struct il_priv *il);
|
||||
|
||||
extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
extern int il3945_commit_rxon(struct il_priv *il);
|
||||
|
||||
/**
|
||||
* il3945_hw_find_station - Find station id for a given BSSID
|
||||
@ -261,8 +263,6 @@ extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
*/
|
||||
extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
|
||||
|
||||
extern struct ieee80211_ops il3945_hw_ops;
|
||||
|
||||
extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
|
||||
extern int il3945_init_hw_rate_table(struct il_priv *il);
|
||||
extern void il3945_reg_txpower_periodic(struct il_priv *il);
|
||||
|
@ -627,13 +627,13 @@ il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
|
||||
|
||||
average_sig[0] =
|
||||
data->chain_signal_a /
|
||||
il->cfg->base_params->chain_noise_num_beacons;
|
||||
il->cfg->chain_noise_num_beacons;
|
||||
average_sig[1] =
|
||||
data->chain_signal_b /
|
||||
il->cfg->base_params->chain_noise_num_beacons;
|
||||
il->cfg->chain_noise_num_beacons;
|
||||
average_sig[2] =
|
||||
data->chain_signal_c /
|
||||
il->cfg->base_params->chain_noise_num_beacons;
|
||||
il->cfg->chain_noise_num_beacons;
|
||||
|
||||
if (average_sig[0] >= average_sig[1]) {
|
||||
max_average_sig = average_sig[0];
|
||||
@ -806,8 +806,6 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
|
||||
unsigned long flags;
|
||||
struct stats_rx_non_phy *rx_info;
|
||||
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
if (il->disable_chain_noise_cal)
|
||||
return;
|
||||
|
||||
@ -833,8 +831,8 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
|
||||
return;
|
||||
}
|
||||
|
||||
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
|
||||
rxon_chnum = le16_to_cpu(ctx->staging.channel);
|
||||
rxon_band24 = !!(il->staging.flags & RXON_FLG_BAND_24G_MSK);
|
||||
rxon_chnum = le16_to_cpu(il->staging.channel);
|
||||
|
||||
stat_band24 =
|
||||
!!(((struct il_notif_stats *)stat_resp)->
|
||||
@ -888,7 +886,7 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
|
||||
/* If this is the "chain_noise_num_beacons", determine:
|
||||
* 1) Disconnected antennas (using signal strengths)
|
||||
* 2) Differential gain (using silence noise) to balance receivers */
|
||||
if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
|
||||
if (data->beacon_count != il->cfg->chain_noise_num_beacons)
|
||||
return;
|
||||
|
||||
/* Analyze signal for disconnected antenna */
|
||||
@ -896,11 +894,11 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
|
||||
|
||||
/* Analyze noise for rx balance */
|
||||
average_noise[0] =
|
||||
data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
|
||||
data->chain_noise_a / il->cfg->chain_noise_num_beacons;
|
||||
average_noise[1] =
|
||||
data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
|
||||
data->chain_noise_b / il->cfg->chain_noise_num_beacons;
|
||||
average_noise[2] =
|
||||
data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
|
||||
data->chain_noise_c / il->cfg->chain_noise_num_beacons;
|
||||
|
||||
for (i = 0; i < NUM_RX_CHAINS; i++) {
|
||||
if (!data->disconn_array[i] &&
|
||||
@ -925,8 +923,8 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
|
||||
/* Some power changes may have been made during the calibration.
|
||||
* Update and commit the RXON
|
||||
*/
|
||||
if (il->cfg->ops->lib->update_chain_flags)
|
||||
il->cfg->ops->lib->update_chain_flags(il);
|
||||
if (il->ops->lib->update_chain_flags)
|
||||
il->ops->lib->update_chain_flags(il);
|
||||
|
||||
data->state = IL_CHAIN_NOISE_DONE;
|
||||
il_power_update_mode(il, false);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -641,13 +641,10 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
|
||||
* there are no non-GF stations present in the BSS.
|
||||
*/
|
||||
static bool
|
||||
il4965_rs_use_green(struct ieee80211_sta *sta)
|
||||
il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
|
||||
{
|
||||
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
|
||||
struct il_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
|
||||
!(ctx->ht.non_gf_sta_present);
|
||||
!il->ht.non_gf_sta_present;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -823,8 +820,6 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
|
||||
u32 tx_rate;
|
||||
struct il_scale_tbl_info tbl_type;
|
||||
struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
|
||||
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
|
||||
struct il_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
D_RATE("get frame ack response, update rate scale win\n");
|
||||
|
||||
@ -892,7 +887,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
|
||||
lq_sta->missed_rate_counter++;
|
||||
if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
|
||||
lq_sta->missed_rate_counter = 0;
|
||||
il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
|
||||
il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
|
||||
}
|
||||
/* Regardless, ignore this status info for outdated rate */
|
||||
return;
|
||||
@ -1184,8 +1179,6 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||||
u16 rate_mask;
|
||||
s32 rate;
|
||||
s8 is_green = lq_sta->is_green;
|
||||
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
|
||||
struct il_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
|
||||
return -1;
|
||||
@ -1206,7 +1199,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||||
tbl->max_search = IL_MAX_SEARCH;
|
||||
rate_mask = lq_sta->active_mimo2_rate;
|
||||
|
||||
if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
|
||||
if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
|
||||
tbl->is_ht40 = 1;
|
||||
else
|
||||
tbl->is_ht40 = 0;
|
||||
@ -1240,8 +1233,6 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||||
u16 rate_mask;
|
||||
u8 is_green = lq_sta->is_green;
|
||||
s32 rate;
|
||||
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
|
||||
struct il_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
|
||||
return -1;
|
||||
@ -1254,7 +1245,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||||
tbl->max_search = IL_MAX_SEARCH;
|
||||
rate_mask = lq_sta->active_siso_rate;
|
||||
|
||||
if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
|
||||
if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
|
||||
tbl->is_ht40 = 1;
|
||||
else
|
||||
tbl->is_ht40 = 0;
|
||||
@ -1733,8 +1724,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
|
||||
* setup rate table in uCode
|
||||
*/
|
||||
static void
|
||||
il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
struct il_lq_sta *lq_sta,
|
||||
il4965_rs_update_rate_tbl(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||||
struct il_scale_tbl_info *tbl, int idx, u8 is_green)
|
||||
{
|
||||
u32 rate;
|
||||
@ -1742,7 +1732,7 @@ il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
/* Update uCode's rate table. */
|
||||
rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
|
||||
il4965_rs_fill_link_cmd(il, lq_sta, rate);
|
||||
il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
|
||||
il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1778,8 +1768,6 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
|
||||
s32 sr;
|
||||
u8 tid = MAX_TID_COUNT;
|
||||
struct il_tid_data *tid_data;
|
||||
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
|
||||
struct il_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
D_RATE("rate scale calculate new rate for skb\n");
|
||||
|
||||
@ -1815,7 +1803,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
|
||||
if (is_legacy(tbl->lq_type))
|
||||
lq_sta->is_green = 0;
|
||||
else
|
||||
lq_sta->is_green = il4965_rs_use_green(sta);
|
||||
lq_sta->is_green = il4965_rs_use_green(il, sta);
|
||||
is_green = lq_sta->is_green;
|
||||
|
||||
/* current tx rate */
|
||||
@ -1854,7 +1842,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
|
||||
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
||||
/* get "active" rate info */
|
||||
idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
|
||||
il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
|
||||
il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx,
|
||||
is_green);
|
||||
}
|
||||
return;
|
||||
@ -2057,8 +2045,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
|
||||
lq_update:
|
||||
/* Replace uCode's rate table for the destination station. */
|
||||
if (update_lq)
|
||||
il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
|
||||
is_green);
|
||||
il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx, is_green);
|
||||
|
||||
/* Should we stay with this modulation mode,
|
||||
* or search for a new one? */
|
||||
@ -2098,7 +2085,7 @@ lq_update:
|
||||
D_RATE("Switch current mcs: %X idx: %d\n",
|
||||
tbl->current_rate, idx);
|
||||
il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
|
||||
il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
|
||||
il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
|
||||
} else
|
||||
done_search = 1;
|
||||
}
|
||||
@ -2166,17 +2153,15 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
|
||||
int rate_idx;
|
||||
int i;
|
||||
u32 rate;
|
||||
u8 use_green = il4965_rs_use_green(sta);
|
||||
u8 use_green = il4965_rs_use_green(il, sta);
|
||||
u8 active_tbl = 0;
|
||||
u8 valid_tx_ant;
|
||||
struct il_station_priv *sta_priv;
|
||||
struct il_rxon_context *ctx;
|
||||
|
||||
if (!sta || !lq_sta)
|
||||
return;
|
||||
|
||||
sta_priv = (void *)sta->drv_priv;
|
||||
ctx = sta_priv->common.ctx;
|
||||
|
||||
i = lq_sta->last_txrate_idx;
|
||||
|
||||
@ -2208,7 +2193,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
|
||||
il4965_rs_set_expected_tpt_table(lq_sta, tbl);
|
||||
il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
|
||||
il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
|
||||
il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
|
||||
il_send_lq_cmd(il, &lq_sta->lq, CMD_SYNC, true);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2341,7 +2326,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
|
||||
lq_sta->is_dup = 0;
|
||||
lq_sta->max_rate_idx = -1;
|
||||
lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
|
||||
lq_sta->is_green = il4965_rs_use_green(sta);
|
||||
lq_sta->is_green = il4965_rs_use_green(il, sta);
|
||||
lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
|
||||
lq_sta->band = il->band;
|
||||
/*
|
||||
@ -2579,9 +2564,6 @@ il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
|
||||
char buf[64];
|
||||
size_t buf_size;
|
||||
u32 parsed_rate;
|
||||
struct il_station_priv *sta_priv =
|
||||
container_of(lq_sta, struct il_station_priv, lq_sta);
|
||||
struct il_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
il = lq_sta->drv;
|
||||
memset(buf, 0, sizeof(buf));
|
||||
@ -2603,7 +2585,7 @@ il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
|
||||
|
||||
if (lq_sta->dbg_fixed_rate) {
|
||||
il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
|
||||
il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
|
||||
il_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
|
||||
}
|
||||
|
||||
return count;
|
||||
|
@ -569,82 +569,6 @@ il4965_chain_noise_reset(struct il_priv *il)
|
||||
}
|
||||
}
|
||||
|
||||
static struct il_sensitivity_ranges il4965_sensitivity = {
|
||||
.min_nrg_cck = 97,
|
||||
.max_nrg_cck = 0, /* not used, set to 0 */
|
||||
|
||||
.auto_corr_min_ofdm = 85,
|
||||
.auto_corr_min_ofdm_mrc = 170,
|
||||
.auto_corr_min_ofdm_x1 = 105,
|
||||
.auto_corr_min_ofdm_mrc_x1 = 220,
|
||||
|
||||
.auto_corr_max_ofdm = 120,
|
||||
.auto_corr_max_ofdm_mrc = 210,
|
||||
.auto_corr_max_ofdm_x1 = 140,
|
||||
.auto_corr_max_ofdm_mrc_x1 = 270,
|
||||
|
||||
.auto_corr_min_cck = 125,
|
||||
.auto_corr_max_cck = 200,
|
||||
.auto_corr_min_cck_mrc = 200,
|
||||
.auto_corr_max_cck_mrc = 400,
|
||||
|
||||
.nrg_th_cck = 100,
|
||||
.nrg_th_ofdm = 100,
|
||||
|
||||
.barker_corr_th_min = 190,
|
||||
.barker_corr_th_min_mrc = 390,
|
||||
.nrg_th_cca = 62,
|
||||
};
|
||||
|
||||
static void
|
||||
il4965_set_ct_threshold(struct il_priv *il)
|
||||
{
|
||||
/* want Kelvin */
|
||||
il->hw_params.ct_kill_threshold =
|
||||
CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
|
||||
}
|
||||
|
||||
/**
|
||||
* il4965_hw_set_hw_params
|
||||
*
|
||||
* Called when initializing driver
|
||||
*/
|
||||
static int
|
||||
il4965_hw_set_hw_params(struct il_priv *il)
|
||||
{
|
||||
if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
|
||||
il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
|
||||
il->cfg->base_params->num_of_queues =
|
||||
il->cfg->mod_params->num_of_queues;
|
||||
|
||||
il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
|
||||
il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
|
||||
il->hw_params.scd_bc_tbls_size =
|
||||
il->cfg->base_params->num_of_queues *
|
||||
sizeof(struct il4965_scd_bc_tbl);
|
||||
il->hw_params.tfd_size = sizeof(struct il_tfd);
|
||||
il->hw_params.max_stations = IL4965_STATION_COUNT;
|
||||
il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
|
||||
il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
|
||||
il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
|
||||
il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
|
||||
il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
|
||||
|
||||
il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
|
||||
|
||||
il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
|
||||
il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
|
||||
il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
|
||||
il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
|
||||
|
||||
il4965_set_ct_threshold(il);
|
||||
|
||||
il->hw_params.sens = &il4965_sensitivity;
|
||||
il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s32
|
||||
il4965_math_div_round(s32 num, s32 denom, s32 * res)
|
||||
{
|
||||
@ -1342,7 +1266,6 @@ il4965_send_tx_power(struct il_priv *il)
|
||||
u8 band = 0;
|
||||
bool is_ht40 = false;
|
||||
u8 ctrl_chan_high = 0;
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
if (WARN_ONCE
|
||||
(test_bit(S_SCAN_HW, &il->status),
|
||||
@ -1351,16 +1274,16 @@ il4965_send_tx_power(struct il_priv *il)
|
||||
|
||||
band = il->band == IEEE80211_BAND_2GHZ;
|
||||
|
||||
is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
|
||||
is_ht40 = iw4965_is_ht40_channel(il->active.flags);
|
||||
|
||||
if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
|
||||
if (is_ht40 && (il->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
|
||||
ctrl_chan_high = 1;
|
||||
|
||||
cmd.band = band;
|
||||
cmd.channel = ctx->active.channel;
|
||||
cmd.channel = il->active.channel;
|
||||
|
||||
ret =
|
||||
il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
|
||||
il4965_fill_txpower_tbl(il, band, le16_to_cpu(il->active.channel),
|
||||
is_ht40, ctrl_chan_high, &cmd.tx_power);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1372,12 +1295,12 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il4965_send_rxon_assoc(struct il_priv *il)
|
||||
{
|
||||
int ret = 0;
|
||||
struct il4965_rxon_assoc_cmd rxon_assoc;
|
||||
const struct il_rxon_cmd *rxon1 = &ctx->staging;
|
||||
const struct il_rxon_cmd *rxon2 = &ctx->active;
|
||||
const struct il_rxon_cmd *rxon1 = &il->staging;
|
||||
const struct il_rxon_cmd *rxon2 = &il->active;
|
||||
|
||||
if (rxon1->flags == rxon2->flags &&
|
||||
rxon1->filter_flags == rxon2->filter_flags &&
|
||||
@ -1392,16 +1315,16 @@ il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rxon_assoc.flags = ctx->staging.flags;
|
||||
rxon_assoc.filter_flags = ctx->staging.filter_flags;
|
||||
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
|
||||
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
|
||||
rxon_assoc.flags = il->staging.flags;
|
||||
rxon_assoc.filter_flags = il->staging.filter_flags;
|
||||
rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
|
||||
rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
|
||||
rxon_assoc.reserved = 0;
|
||||
rxon_assoc.ofdm_ht_single_stream_basic_rates =
|
||||
ctx->staging.ofdm_ht_single_stream_basic_rates;
|
||||
il->staging.ofdm_ht_single_stream_basic_rates;
|
||||
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
|
||||
ctx->staging.ofdm_ht_dual_stream_basic_rates;
|
||||
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
|
||||
il->staging.ofdm_ht_dual_stream_basic_rates;
|
||||
rxon_assoc.rx_chain_select_flags = il->staging.rx_chain;
|
||||
|
||||
ret =
|
||||
il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
|
||||
@ -1411,23 +1334,20 @@ il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
}
|
||||
|
||||
static int
|
||||
il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il4965_commit_rxon(struct il_priv *il)
|
||||
{
|
||||
/* cast away the const for active_rxon in this function */
|
||||
struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
|
||||
struct il_rxon_cmd *active_rxon = (void *)&il->active;
|
||||
int ret;
|
||||
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||
bool new_assoc = !!(il->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||
|
||||
if (!il_is_alive(il))
|
||||
return -EBUSY;
|
||||
|
||||
if (!ctx->is_active)
|
||||
return 0;
|
||||
|
||||
/* always get timestamp with Rx frame */
|
||||
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
|
||||
il->staging.flags |= RXON_FLG_TSF2HOST_MSK;
|
||||
|
||||
ret = il_check_rxon_cmd(il, ctx);
|
||||
ret = il_check_rxon_cmd(il);
|
||||
if (ret) {
|
||||
IL_ERR("Invalid RXON configuration. Not committing.\n");
|
||||
return -EINVAL;
|
||||
@ -1438,7 +1358,7 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
* abort any previous channel switch if still in process
|
||||
*/
|
||||
if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
|
||||
il->switch_channel != ctx->staging.channel) {
|
||||
il->switch_channel != il->staging.channel) {
|
||||
D_11H("abort channel switch on %d\n",
|
||||
le16_to_cpu(il->switch_channel));
|
||||
il_chswitch_done(il, false);
|
||||
@ -1447,15 +1367,15 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
/* If we don't need to send a full RXON, we can use
|
||||
* il_rxon_assoc_cmd which is used to reconfigure filter
|
||||
* and other flags for the current radio configuration. */
|
||||
if (!il_full_rxon_required(il, ctx)) {
|
||||
ret = il_send_rxon_assoc(il, ctx);
|
||||
if (!il_full_rxon_required(il)) {
|
||||
ret = il_send_rxon_assoc(il);
|
||||
if (ret) {
|
||||
IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
|
||||
il_print_rx_config_cmd(il, ctx);
|
||||
memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
|
||||
il_print_rx_config_cmd(il);
|
||||
/*
|
||||
* We do not commit tx power settings while channel changing,
|
||||
* do it now if tx power changed.
|
||||
@ -1468,12 +1388,12 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
* an RXON_ASSOC and the new config wants the associated mask enabled,
|
||||
* we must clear the associated from the active configuration
|
||||
* before we apply the new config */
|
||||
if (il_is_associated_ctx(ctx) && new_assoc) {
|
||||
if (il_is_associated(il) && new_assoc) {
|
||||
D_INFO("Toggling associated bit on current RXON\n");
|
||||
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
ret =
|
||||
il_send_cmd_pdu(il, ctx->rxon_cmd,
|
||||
il_send_cmd_pdu(il, C_RXON,
|
||||
sizeof(struct il_rxon_cmd), active_rxon);
|
||||
|
||||
/* If the mask clearing failed then we set
|
||||
@ -1483,9 +1403,9 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
il_clear_ucode_stations(il, ctx);
|
||||
il_restore_stations(il, ctx);
|
||||
ret = il4965_restore_default_wep_keys(il, ctx);
|
||||
il_clear_ucode_stations(il);
|
||||
il_restore_stations(il);
|
||||
ret = il4965_restore_default_wep_keys(il);
|
||||
if (ret) {
|
||||
IL_ERR("Failed to restore WEP keys (%d)\n", ret);
|
||||
return ret;
|
||||
@ -1494,9 +1414,9 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
|
||||
D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
|
||||
"* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
|
||||
le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
|
||||
le16_to_cpu(il->staging.channel), il->staging.bssid_addr);
|
||||
|
||||
il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
|
||||
il_set_rxon_hwcrypto(il, !il->cfg->mod_params->sw_crypto);
|
||||
|
||||
/* Apply the new configuration
|
||||
* RXON unassoc clears the station table in uCode so restoration of
|
||||
@ -1504,17 +1424,17 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
*/
|
||||
if (!new_assoc) {
|
||||
ret =
|
||||
il_send_cmd_pdu(il, ctx->rxon_cmd,
|
||||
sizeof(struct il_rxon_cmd), &ctx->staging);
|
||||
il_send_cmd_pdu(il, C_RXON,
|
||||
sizeof(struct il_rxon_cmd), &il->staging);
|
||||
if (ret) {
|
||||
IL_ERR("Error setting new RXON (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
D_INFO("Return from !new_assoc RXON.\n");
|
||||
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
|
||||
il_clear_ucode_stations(il, ctx);
|
||||
il_restore_stations(il, ctx);
|
||||
ret = il4965_restore_default_wep_keys(il, ctx);
|
||||
memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
|
||||
il_clear_ucode_stations(il);
|
||||
il_restore_stations(il);
|
||||
ret = il4965_restore_default_wep_keys(il);
|
||||
if (ret) {
|
||||
IL_ERR("Failed to restore WEP keys (%d)\n", ret);
|
||||
return ret;
|
||||
@ -1526,15 +1446,15 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
* RXON assoc doesn't clear the station table in uCode,
|
||||
*/
|
||||
ret =
|
||||
il_send_cmd_pdu(il, ctx->rxon_cmd,
|
||||
sizeof(struct il_rxon_cmd), &ctx->staging);
|
||||
il_send_cmd_pdu(il, C_RXON,
|
||||
sizeof(struct il_rxon_cmd), &il->staging);
|
||||
if (ret) {
|
||||
IL_ERR("Error setting new RXON (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
|
||||
memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
|
||||
}
|
||||
il_print_rx_config_cmd(il, ctx);
|
||||
il_print_rx_config_cmd(il);
|
||||
|
||||
il4965_init_sensitivity(il);
|
||||
|
||||
@ -1553,7 +1473,6 @@ static int
|
||||
il4965_hw_channel_switch(struct il_priv *il,
|
||||
struct ieee80211_channel_switch *ch_switch)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
int rc;
|
||||
u8 band = 0;
|
||||
bool is_ht40 = false;
|
||||
@ -1564,21 +1483,24 @@ il4965_hw_channel_switch(struct il_priv *il,
|
||||
u16 ch;
|
||||
u32 tsf_low;
|
||||
u8 switch_count;
|
||||
u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
|
||||
struct ieee80211_vif *vif = ctx->vif;
|
||||
band = il->band == IEEE80211_BAND_2GHZ;
|
||||
u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
|
||||
struct ieee80211_vif *vif = il->vif;
|
||||
band = (il->band == IEEE80211_BAND_2GHZ);
|
||||
|
||||
is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
|
||||
if (WARN_ON_ONCE(vif == NULL))
|
||||
return -EIO;
|
||||
|
||||
if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
|
||||
is_ht40 = iw4965_is_ht40_channel(il->staging.flags);
|
||||
|
||||
if (is_ht40 && (il->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
|
||||
ctrl_chan_high = 1;
|
||||
|
||||
cmd.band = band;
|
||||
cmd.expect_beacon = 0;
|
||||
ch = ch_switch->channel->hw_value;
|
||||
cmd.channel = cpu_to_le16(ch);
|
||||
cmd.rxon_flags = ctx->staging.flags;
|
||||
cmd.rxon_filter_flags = ctx->staging.filter_flags;
|
||||
cmd.rxon_flags = il->staging.flags;
|
||||
cmd.rxon_filter_flags = il->staging.filter_flags;
|
||||
switch_count = ch_switch->count;
|
||||
tsf_low = ch_switch->timestamp & 0x0ffffffff;
|
||||
/*
|
||||
@ -1611,7 +1533,7 @@ il4965_hw_channel_switch(struct il_priv *il,
|
||||
cmd.expect_beacon = il_is_channel_radar(ch_info);
|
||||
else {
|
||||
IL_ERR("invalid channel switch from %u to %u\n",
|
||||
ctx->active.channel, ch);
|
||||
il->active.channel, ch);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -1876,7 +1798,7 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
|
||||
D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
|
||||
agg->frame_count, agg->start_idx, idx);
|
||||
|
||||
info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
|
||||
info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
|
||||
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
||||
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
||||
info->flags |= il4965_tx_status_to_mac80211(status);
|
||||
@ -1891,6 +1813,7 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
|
||||
/* Two or more frames were attempted; expect block-ack */
|
||||
u64 bitmap = 0;
|
||||
int start = agg->start_idx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Construct bit-map of pending frames within Tx win */
|
||||
for (i = 0; i < agg->frame_count; i++) {
|
||||
@ -1908,12 +1831,10 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
|
||||
D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
|
||||
agg->frame_count, txq_id, idx);
|
||||
|
||||
hdr = il_tx_queue_get_hdr(il, txq_id, idx);
|
||||
if (!hdr) {
|
||||
IL_ERR("BUG_ON idx doesn't point to valid skb"
|
||||
" idx=%d, txq_id=%d\n", idx, txq_id);
|
||||
skb = il->txq[txq_id].skbs[idx];
|
||||
if (WARN_ON_ONCE(skb == NULL))
|
||||
return -1;
|
||||
}
|
||||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
|
||||
sc = le16_to_cpu(hdr->seq_ctrl);
|
||||
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
|
||||
@ -1969,7 +1890,7 @@ il4965_find_station(struct il_priv *il, const u8 * addr)
|
||||
start = IL_STA_ID;
|
||||
|
||||
if (is_broadcast_ether_addr(addr))
|
||||
return il->ctx.bcast_sta_id;
|
||||
return il->hw_params.bcast_id;
|
||||
|
||||
spin_lock_irqsave(&il->sta_lock, flags);
|
||||
for (i = start; i < il->hw_params.max_stations; i++)
|
||||
@ -2021,6 +1942,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
|
||||
int txq_id = SEQ_TO_QUEUE(sequence);
|
||||
int idx = SEQ_TO_IDX(sequence);
|
||||
struct il_tx_queue *txq = &il->txq[txq_id];
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ieee80211_tx_info *info;
|
||||
struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
|
||||
@ -2039,10 +1961,12 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
|
||||
}
|
||||
|
||||
txq->time_stamp = jiffies;
|
||||
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
|
||||
|
||||
skb = txq->skbs[txq->q.read_ptr];
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
memset(&info->status, 0, sizeof(info->status));
|
||||
|
||||
hdr = il_tx_queue_get_hdr(il, txq_id, idx);
|
||||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
qc = ieee80211_get_qos_ctl(hdr);
|
||||
tid = qc[0] & 0xf;
|
||||
@ -2133,21 +2057,18 @@ static struct il_hcmd_ops il4965_hcmd = {
|
||||
static void
|
||||
il4965_post_scan(struct il_priv *il)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
/*
|
||||
* Since setting the RXON may have been deferred while
|
||||
* performing the scan, fire one off if needed
|
||||
*/
|
||||
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
|
||||
il_commit_rxon(il, ctx);
|
||||
if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
|
||||
il_commit_rxon(il);
|
||||
}
|
||||
|
||||
static void
|
||||
il4965_post_associate(struct il_priv *il)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
struct ieee80211_vif *vif = ctx->vif;
|
||||
struct ieee80211_vif *vif = il->vif;
|
||||
struct ieee80211_conf *conf = NULL;
|
||||
int ret = 0;
|
||||
|
||||
@ -2161,41 +2082,41 @@ il4965_post_associate(struct il_priv *il)
|
||||
|
||||
conf = &il->hw->conf;
|
||||
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il_commit_rxon(il);
|
||||
|
||||
ret = il_send_rxon_timing(il, ctx);
|
||||
ret = il_send_rxon_timing(il);
|
||||
if (ret)
|
||||
IL_WARN("RXON timing - " "Attempting to continue.\n");
|
||||
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
il_set_rxon_ht(il, &il->current_ht_config);
|
||||
|
||||
if (il->cfg->ops->hcmd->set_rxon_chain)
|
||||
il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
|
||||
if (il->ops->hcmd->set_rxon_chain)
|
||||
il->ops->hcmd->set_rxon_chain(il);
|
||||
|
||||
ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
|
||||
il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
|
||||
|
||||
D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
|
||||
vif->bss_conf.beacon_int);
|
||||
|
||||
if (vif->bss_conf.use_short_preamble)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
|
||||
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (vif->bss_conf.use_short_slot)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
}
|
||||
|
||||
il_commit_rxon(il, ctx);
|
||||
il_commit_rxon(il);
|
||||
|
||||
D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
|
||||
ctx->active.bssid_addr);
|
||||
il->active.bssid_addr);
|
||||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
@ -2223,8 +2144,7 @@ il4965_post_associate(struct il_priv *il)
|
||||
static void
|
||||
il4965_config_ap(struct il_priv *il)
|
||||
{
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
struct ieee80211_vif *vif = ctx->vif;
|
||||
struct ieee80211_vif *vif = il->vif;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&il->mutex);
|
||||
@ -2233,14 +2153,14 @@ il4965_config_ap(struct il_priv *il)
|
||||
return;
|
||||
|
||||
/* The following should be done only at AP bring up */
|
||||
if (!il_is_associated_ctx(ctx)) {
|
||||
if (!il_is_associated(il)) {
|
||||
|
||||
/* RXON - unassoc (to set timing command) */
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
il_commit_rxon(il);
|
||||
|
||||
/* RXON Timing */
|
||||
ret = il_send_rxon_timing(il, ctx);
|
||||
ret = il_send_rxon_timing(il);
|
||||
if (ret)
|
||||
IL_WARN("RXON timing failed - "
|
||||
"Attempting to continue.\n");
|
||||
@ -2248,27 +2168,27 @@ il4965_config_ap(struct il_priv *il)
|
||||
/* AP has all antennas */
|
||||
il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
|
||||
il_set_rxon_ht(il, &il->current_ht_config);
|
||||
if (il->cfg->ops->hcmd->set_rxon_chain)
|
||||
il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
|
||||
if (il->ops->hcmd->set_rxon_chain)
|
||||
il->ops->hcmd->set_rxon_chain(il);
|
||||
|
||||
ctx->staging.assoc_id = 0;
|
||||
il->staging.assoc_id = 0;
|
||||
|
||||
if (vif->bss_conf.use_short_preamble)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
|
||||
|
||||
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
|
||||
if (vif->bss_conf.use_short_slot)
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
|
||||
}
|
||||
/* need to send beacon cmd before committing assoc RXON! */
|
||||
il4965_send_beacon_cmd(il);
|
||||
/* restore RXON assoc */
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il_commit_rxon(il, ctx);
|
||||
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
il_commit_rxon(il);
|
||||
}
|
||||
il4965_send_beacon_cmd(il);
|
||||
}
|
||||
@ -2281,7 +2201,6 @@ static struct il_hcmd_utils_ops il4965_hcmd_utils = {
|
||||
};
|
||||
|
||||
static struct il_lib_ops il4965_lib = {
|
||||
.set_hw_params = il4965_hw_set_hw_params,
|
||||
.txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
|
||||
.txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = il4965_hw_txq_free_tfd,
|
||||
@ -2330,38 +2249,32 @@ static const struct il_legacy_ops il4965_legacy_ops = {
|
||||
.update_bcast_stations = il4965_update_bcast_stations,
|
||||
};
|
||||
|
||||
struct ieee80211_ops il4965_hw_ops = {
|
||||
.tx = il4965_mac_tx,
|
||||
.start = il4965_mac_start,
|
||||
.stop = il4965_mac_stop,
|
||||
.add_interface = il_mac_add_interface,
|
||||
.remove_interface = il_mac_remove_interface,
|
||||
.change_interface = il_mac_change_interface,
|
||||
.config = il_mac_config,
|
||||
.configure_filter = il4965_configure_filter,
|
||||
.set_key = il4965_mac_set_key,
|
||||
.update_tkip_key = il4965_mac_update_tkip_key,
|
||||
.conf_tx = il_mac_conf_tx,
|
||||
.reset_tsf = il_mac_reset_tsf,
|
||||
.bss_info_changed = il_mac_bss_info_changed,
|
||||
.ampdu_action = il4965_mac_ampdu_action,
|
||||
.hw_scan = il_mac_hw_scan,
|
||||
.sta_add = il4965_mac_sta_add,
|
||||
.sta_remove = il_mac_sta_remove,
|
||||
.channel_switch = il4965_mac_channel_switch,
|
||||
.tx_last_beacon = il_mac_tx_last_beacon,
|
||||
};
|
||||
|
||||
static const struct il_ops il4965_ops = {
|
||||
const struct il_ops il4965_ops = {
|
||||
.lib = &il4965_lib,
|
||||
.hcmd = &il4965_hcmd,
|
||||
.utils = &il4965_hcmd_utils,
|
||||
.led = &il4965_led_ops,
|
||||
.legacy = &il4965_legacy_ops,
|
||||
.ieee80211_ops = &il4965_hw_ops,
|
||||
};
|
||||
|
||||
static struct il_base_params il4965_base_params = {
|
||||
struct il_cfg il4965_cfg = {
|
||||
.name = "Intel(R) Wireless WiFi Link 4965AGN",
|
||||
.fw_name_pre = IL4965_FW_PRE,
|
||||
.ucode_api_max = IL4965_UCODE_API_MAX,
|
||||
.ucode_api_min = IL4965_UCODE_API_MIN,
|
||||
.sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
|
||||
.valid_tx_ant = ANT_AB,
|
||||
.valid_rx_ant = ANT_ABC,
|
||||
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
|
||||
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
|
||||
.mod_params = &il4965_mod_params,
|
||||
.led_mode = IL_LED_BLINK,
|
||||
/*
|
||||
* Force use of chains B and C for scan RX on 5 GHz band
|
||||
* because the device has off-channel reception on chain A.
|
||||
*/
|
||||
.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
|
||||
|
||||
.eeprom_size = IL4965_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IL49_NUM_QUEUES,
|
||||
.num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
|
||||
@ -2377,26 +2290,5 @@ static struct il_base_params il4965_base_params = {
|
||||
.chain_noise_calib_by_driver = true,
|
||||
};
|
||||
|
||||
struct il_cfg il4965_cfg = {
|
||||
.name = "Intel(R) Wireless WiFi Link 4965AGN",
|
||||
.fw_name_pre = IL4965_FW_PRE,
|
||||
.ucode_api_max = IL4965_UCODE_API_MAX,
|
||||
.ucode_api_min = IL4965_UCODE_API_MIN,
|
||||
.sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
|
||||
.valid_tx_ant = ANT_AB,
|
||||
.valid_rx_ant = ANT_ABC,
|
||||
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
|
||||
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
|
||||
.ops = &il4965_ops,
|
||||
.mod_params = &il4965_mod_params,
|
||||
.base_params = &il4965_base_params,
|
||||
.led_mode = IL_LED_BLINK,
|
||||
/*
|
||||
* Force use of chains B and C for scan RX on 5 GHz band
|
||||
* because the device has off-channel reception on chain A.
|
||||
*/
|
||||
.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
|
||||
};
|
||||
|
||||
/* Module firmware */
|
||||
MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
|
||||
|
@ -38,17 +38,16 @@ struct il_rxon_context;
|
||||
|
||||
/* configuration for the _4965 devices */
|
||||
extern struct il_cfg il4965_cfg;
|
||||
extern const struct il_ops il4965_ops;
|
||||
|
||||
extern struct il_mod_params il4965_mod_params;
|
||||
|
||||
extern struct ieee80211_ops il4965_hw_ops;
|
||||
|
||||
/* tx queue */
|
||||
void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
|
||||
int freed);
|
||||
|
||||
/* RXON */
|
||||
void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
void il4965_set_rxon_chain(struct il_priv *il);
|
||||
|
||||
/* uCode */
|
||||
int il4965_verify_ucode(struct il_priv *il);
|
||||
@ -134,21 +133,18 @@ il4965_get_tx_fail_reason(u32 status)
|
||||
#endif
|
||||
|
||||
/* station management */
|
||||
int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
const u8 *addr, u8 *sta_id_r);
|
||||
int il4965_alloc_bcast_station(struct il_priv *il);
|
||||
int il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r);
|
||||
int il4965_remove_default_wep_key(struct il_priv *il,
|
||||
struct il_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *key);
|
||||
int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
int il4965_set_default_wep_key(struct il_priv *il,
|
||||
struct ieee80211_key_conf *key);
|
||||
int il4965_restore_default_wep_keys(struct il_priv *il,
|
||||
struct il_rxon_context *ctx);
|
||||
int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
int il4965_restore_default_wep_keys(struct il_priv *il);
|
||||
int il4965_set_dynamic_key(struct il_priv *il,
|
||||
struct ieee80211_key_conf *key, u8 sta_id);
|
||||
int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
int il4965_remove_dynamic_key(struct il_priv *il,
|
||||
struct ieee80211_key_conf *key, u8 sta_id);
|
||||
void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
void il4965_update_tkip_key(struct il_priv *il,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
struct ieee80211_sta *sta, u32 iv32,
|
||||
u16 *phase1key);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -143,12 +143,6 @@ struct il_queue {
|
||||
* space less than this */
|
||||
};
|
||||
|
||||
/* One for each TFD */
|
||||
struct il_tx_info {
|
||||
struct sk_buff *skb;
|
||||
struct il_rxon_context *ctx;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct il_tx_queue - Tx Queue for DMA
|
||||
* @q: generic Rx/Tx queue descriptor
|
||||
@ -156,7 +150,7 @@ struct il_tx_info {
|
||||
* @cmd: array of command/TX buffer pointers
|
||||
* @meta: array of meta data for each command/tx buffer
|
||||
* @dma_addr_cmd: physical address of cmd/tx buffer array
|
||||
* @txb: array of per-TFD driver data
|
||||
* @skbs: array of per-TFD socket buffer pointers
|
||||
* @time_stamp: time (in jiffies) of last read_ptr change
|
||||
* @need_update: indicates need to update read/write idx
|
||||
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
|
||||
@ -172,7 +166,7 @@ struct il_tx_queue {
|
||||
void *tfds;
|
||||
struct il_device_cmd **cmd;
|
||||
struct il_cmd_meta *meta;
|
||||
struct il_tx_info *txb;
|
||||
struct sk_buff **skbs;
|
||||
unsigned long time_stamp;
|
||||
u8 need_update;
|
||||
u8 sched_retry;
|
||||
@ -735,13 +729,12 @@ struct il_qos_info {
|
||||
struct il_station_entry {
|
||||
struct il_addsta_cmd sta;
|
||||
struct il_tid_data tid[MAX_TID_COUNT];
|
||||
u8 used, ctxid;
|
||||
u8 used;
|
||||
struct il_hw_key keyinfo;
|
||||
struct il_link_quality_cmd *lq;
|
||||
};
|
||||
|
||||
struct il_station_priv_common {
|
||||
struct il_rxon_context *ctx;
|
||||
u8 sta_id;
|
||||
};
|
||||
|
||||
@ -752,7 +745,6 @@ struct il_station_priv_common {
|
||||
* space for us to put data into.
|
||||
*/
|
||||
struct il_vif_priv {
|
||||
struct il_rxon_context *ctx;
|
||||
u8 ibss_bssid_sta_id;
|
||||
};
|
||||
|
||||
@ -816,6 +808,7 @@ struct il_sensitivity_ranges {
|
||||
|
||||
/**
|
||||
* struct il_hw_params
|
||||
* @bcast_id: f/w broadcast station ID
|
||||
* @max_txq_num: Max # Tx queues supported
|
||||
* @dma_chnl_num: Number of Tx DMA/FIFO channels
|
||||
* @scd_bc_tbls_size: size of scheduler byte count tables
|
||||
@ -836,6 +829,7 @@ struct il_sensitivity_ranges {
|
||||
* @struct il_sensitivity_ranges: range of sensitivity values
|
||||
*/
|
||||
struct il_hw_params {
|
||||
u8 bcast_id;
|
||||
u8 max_txq_num;
|
||||
u8 dma_chnl_num;
|
||||
u16 scd_bc_tbls_size;
|
||||
@ -1152,55 +1146,6 @@ struct il_force_reset {
|
||||
|
||||
struct il_rxon_context {
|
||||
struct ieee80211_vif *vif;
|
||||
|
||||
const u8 *ac_to_fifo;
|
||||
const u8 *ac_to_queue;
|
||||
u8 mcast_queue;
|
||||
|
||||
/*
|
||||
* We could use the vif to indicate active, but we
|
||||
* also need it to be active during disabling when
|
||||
* we already removed the vif for type setting.
|
||||
*/
|
||||
bool always_active, is_active;
|
||||
|
||||
bool ht_need_multiple_chains;
|
||||
|
||||
int ctxid;
|
||||
|
||||
u32 interface_modes, exclusive_interface_modes;
|
||||
u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
|
||||
|
||||
/*
|
||||
* We declare this const so it can only be
|
||||
* changed via explicit cast within the
|
||||
* routines that actually update the physical
|
||||
* hardware.
|
||||
*/
|
||||
const struct il_rxon_cmd active;
|
||||
struct il_rxon_cmd staging;
|
||||
|
||||
struct il_rxon_time_cmd timing;
|
||||
|
||||
struct il_qos_info qos_data;
|
||||
|
||||
u8 bcast_sta_id, ap_sta_id;
|
||||
|
||||
u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
|
||||
u8 qos_cmd;
|
||||
u8 wep_key_cmd;
|
||||
|
||||
struct il_wep_key wep_keys[WEP_KEYS_MAX];
|
||||
u8 key_mapping_keys;
|
||||
|
||||
__le32 station_flags;
|
||||
|
||||
struct {
|
||||
bool non_gf_sta_present;
|
||||
u8 protection;
|
||||
bool enabled, is_40mhz;
|
||||
u8 extension_chan_offset;
|
||||
} ht;
|
||||
};
|
||||
|
||||
struct il_power_mgr {
|
||||
@ -1217,6 +1162,7 @@ struct il_priv {
|
||||
struct ieee80211_channel *ieee_channels;
|
||||
struct ieee80211_rate *ieee_rates;
|
||||
struct il_cfg *cfg;
|
||||
const struct il_ops *ops;
|
||||
|
||||
/* temporary frame storage list */
|
||||
struct list_head free_frames;
|
||||
@ -1304,7 +1250,28 @@ struct il_priv {
|
||||
u8 ucode_write_complete; /* the image write is complete */
|
||||
char firmware_name[25];
|
||||
|
||||
struct il_rxon_context ctx;
|
||||
struct ieee80211_vif *vif;
|
||||
|
||||
struct il_qos_info qos_data;
|
||||
|
||||
struct {
|
||||
bool enabled;
|
||||
bool is_40mhz;
|
||||
bool non_gf_sta_present;
|
||||
u8 protection;
|
||||
u8 extension_chan_offset;
|
||||
} ht;
|
||||
|
||||
/*
|
||||
* We declare this const so it can only be
|
||||
* changed via explicit cast within the
|
||||
* routines that actually update the physical
|
||||
* hardware.
|
||||
*/
|
||||
const struct il_rxon_cmd active;
|
||||
struct il_rxon_cmd staging;
|
||||
|
||||
struct il_rxon_time_cmd timing;
|
||||
|
||||
__le16 switch_channel;
|
||||
|
||||
@ -1427,6 +1394,9 @@ struct il_priv {
|
||||
u8 phy_calib_chain_noise_reset_cmd;
|
||||
u8 phy_calib_chain_noise_gain_cmd;
|
||||
|
||||
u8 key_mapping_keys;
|
||||
struct il_wep_key wep_keys[WEP_KEYS_MAX];
|
||||
|
||||
struct il_notif_stats stats;
|
||||
#ifdef CONFIG_IWLEGACY_DEBUGFS
|
||||
struct il_notif_stats accum_stats;
|
||||
@ -1449,7 +1419,7 @@ struct il_priv {
|
||||
struct work_struct rx_replenish;
|
||||
struct work_struct abort_scan;
|
||||
|
||||
struct il_rxon_context *beacon_ctx;
|
||||
bool beacon_enabled;
|
||||
struct sk_buff *beacon_skb;
|
||||
|
||||
struct work_struct tx_flush;
|
||||
@ -1507,30 +1477,10 @@ il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
|
||||
clear_bit(txq_id, &il->txq_ctx_active_msk);
|
||||
}
|
||||
|
||||
static inline struct ieee80211_hdr *
|
||||
il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
|
||||
{
|
||||
if (il->txq[txq_id].txb[idx].skb)
|
||||
return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
|
||||
data;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct il_rxon_context *
|
||||
il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
|
||||
{
|
||||
struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
|
||||
|
||||
return vif_priv->ctx;
|
||||
}
|
||||
|
||||
#define for_each_context(il, _ctx) \
|
||||
for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
|
||||
|
||||
static inline int
|
||||
il_is_associated(struct il_priv *il)
|
||||
{
|
||||
return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
|
||||
return (il->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -1539,12 +1489,6 @@ il_is_any_associated(struct il_priv *il)
|
||||
return il_is_associated(il);
|
||||
}
|
||||
|
||||
static inline int
|
||||
il_is_associated_ctx(struct il_rxon_context *ctx)
|
||||
{
|
||||
return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
il_is_channel_valid(const struct il_channel_info *ch_info)
|
||||
{
|
||||
@ -1614,10 +1558,9 @@ il_free_pages(struct il_priv *il, unsigned long page)
|
||||
#define IL_RX_BUF_SIZE_8K (8 * 1024)
|
||||
|
||||
struct il_hcmd_ops {
|
||||
int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
|
||||
int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
|
||||
void (*set_rxon_chain) (struct il_priv *il,
|
||||
struct il_rxon_context *ctx);
|
||||
int (*rxon_assoc) (struct il_priv *il);
|
||||
int (*commit_rxon) (struct il_priv *il);
|
||||
void (*set_rxon_chain) (struct il_priv *il);
|
||||
};
|
||||
|
||||
struct il_hcmd_utils_ops {
|
||||
@ -1649,8 +1592,6 @@ struct il_temp_ops {
|
||||
};
|
||||
|
||||
struct il_lib_ops {
|
||||
/* set hw dependent parameters */
|
||||
int (*set_hw_params) (struct il_priv *il);
|
||||
/* Handling TX */
|
||||
void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
|
||||
struct il_tx_queue *txq,
|
||||
@ -1712,7 +1653,6 @@ struct il_ops {
|
||||
const struct il_led_ops *led;
|
||||
const struct il_nic_ops *nic;
|
||||
const struct il_legacy_ops *legacy;
|
||||
const struct ieee80211_ops *ieee80211_ops;
|
||||
};
|
||||
|
||||
struct il_mod_params {
|
||||
@ -1739,21 +1679,6 @@ struct il_mod_params {
|
||||
* chain noise calibration operation
|
||||
*/
|
||||
struct il_base_params {
|
||||
int eeprom_size;
|
||||
int num_of_queues; /* def: HW dependent */
|
||||
int num_of_ampdu_queues; /* def: HW dependent */
|
||||
/* for il_apm_init() */
|
||||
u32 pll_cfg_val;
|
||||
bool set_l0s;
|
||||
bool use_bsm;
|
||||
|
||||
u16 led_compensation;
|
||||
int chain_noise_num_beacons;
|
||||
unsigned int wd_timeout;
|
||||
bool temperature_kelvin;
|
||||
const bool ucode_tracing;
|
||||
const bool sensitivity_calib_by_driver;
|
||||
const bool chain_noise_calib_by_driver;
|
||||
};
|
||||
|
||||
#define IL_LED_SOLID 11
|
||||
@ -1821,7 +1746,6 @@ struct il_cfg {
|
||||
unsigned int sku;
|
||||
u16 eeprom_ver;
|
||||
u16 eeprom_calib_ver;
|
||||
const struct il_ops *ops;
|
||||
/* module based parameters which can be set from modprobe cmd */
|
||||
const struct il_mod_params *mod_params;
|
||||
/* params not likely to change within a device family */
|
||||
@ -1829,31 +1753,43 @@ struct il_cfg {
|
||||
/* params likely to change within a device family */
|
||||
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
|
||||
enum il_led_mode led_mode;
|
||||
|
||||
int eeprom_size;
|
||||
int num_of_queues; /* def: HW dependent */
|
||||
int num_of_ampdu_queues; /* def: HW dependent */
|
||||
/* for il_apm_init() */
|
||||
u32 pll_cfg_val;
|
||||
bool set_l0s;
|
||||
bool use_bsm;
|
||||
|
||||
u16 led_compensation;
|
||||
int chain_noise_num_beacons;
|
||||
unsigned int wd_timeout;
|
||||
bool temperature_kelvin;
|
||||
const bool ucode_tracing;
|
||||
const bool sensitivity_calib_by_driver;
|
||||
const bool chain_noise_calib_by_driver;
|
||||
};
|
||||
|
||||
/***************************
|
||||
* L i b *
|
||||
***************************/
|
||||
|
||||
struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
|
||||
int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
u16 queue, const struct ieee80211_tx_queue_params *params);
|
||||
int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
|
||||
|
||||
void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
int hw_decrypt);
|
||||
int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
|
||||
struct il_rxon_context *ctx);
|
||||
void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
enum ieee80211_band band, struct ieee80211_vif *vif);
|
||||
void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
|
||||
int il_check_rxon_cmd(struct il_priv *il);
|
||||
int il_full_rxon_required(struct il_priv *il);
|
||||
int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
|
||||
void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
|
||||
struct ieee80211_vif *vif);
|
||||
u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
|
||||
void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
|
||||
bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
bool il_is_ht40_tx_allowed(struct il_priv *il,
|
||||
struct ieee80211_sta_ht_cap *ht_cap);
|
||||
void il_connection_init_rx_config(struct il_priv *il,
|
||||
struct il_rxon_context *ctx);
|
||||
void il_connection_init_rx_config(struct il_priv *il);
|
||||
void il_set_rate(struct il_priv *il);
|
||||
int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
|
||||
u32 decrypt_res, struct ieee80211_rx_status *stats);
|
||||
@ -1956,7 +1892,7 @@ int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
|
||||
* Rate
|
||||
******************************************************************************/
|
||||
|
||||
u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
u8 il_get_lowest_plcp(struct il_priv *il);
|
||||
|
||||
/*******************************************************************************
|
||||
* Scanning
|
||||
@ -2043,10 +1979,10 @@ extern const struct dev_pm_ops il_pm_ops;
|
||||
******************************************************/
|
||||
void il4965_dump_nic_error_log(struct il_priv *il);
|
||||
#ifdef CONFIG_IWLEGACY_DEBUG
|
||||
void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
void il_print_rx_config_cmd(struct il_priv *il);
|
||||
#else
|
||||
static inline void
|
||||
il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il_print_rx_config_cmd(struct il_priv *il)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@ -2135,17 +2071,18 @@ extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
|
||||
void il_apm_stop(struct il_priv *il);
|
||||
int il_apm_init(struct il_priv *il);
|
||||
|
||||
int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
int il_send_rxon_timing(struct il_priv *il);
|
||||
|
||||
static inline int
|
||||
il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il_send_rxon_assoc(struct il_priv *il)
|
||||
{
|
||||
return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
|
||||
return il->ops->hcmd->rxon_assoc(il);
|
||||
}
|
||||
|
||||
static inline int
|
||||
il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
|
||||
il_commit_rxon(struct il_priv *il)
|
||||
{
|
||||
return il->cfg->ops->hcmd->commit_rxon(il, ctx);
|
||||
return il->ops->hcmd->commit_rxon(il);
|
||||
}
|
||||
|
||||
static inline const struct ieee80211_supported_band *
|
||||
@ -2303,23 +2240,22 @@ il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
|
||||
(this is for the IBSS BSSID stations) */
|
||||
#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
|
||||
|
||||
void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
|
||||
void il_restore_stations(struct il_priv *il);
|
||||
void il_clear_ucode_stations(struct il_priv *il);
|
||||
void il_dealloc_bcast_stations(struct il_priv *il);
|
||||
int il_get_free_ucode_key_idx(struct il_priv *il);
|
||||
int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
|
||||
int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
const u8 *addr, bool is_ap,
|
||||
int il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
|
||||
struct ieee80211_sta *sta, u8 *sta_id_r);
|
||||
int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
|
||||
int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
|
||||
u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
|
||||
u8 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
|
||||
struct ieee80211_sta *sta);
|
||||
|
||||
int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
|
||||
struct il_link_quality_cmd *lq, u8 flags, bool init);
|
||||
int il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
|
||||
u8 flags, bool init);
|
||||
|
||||
/**
|
||||
* il_clear_driver_stations - clear knowledge of all stations from driver
|
||||
@ -2334,24 +2270,11 @@ static inline void
|
||||
il_clear_driver_stations(struct il_priv *il)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
|
||||
spin_lock_irqsave(&il->sta_lock, flags);
|
||||
memset(il->stations, 0, sizeof(il->stations));
|
||||
il->num_stations = 0;
|
||||
|
||||
il->ucode_key_table = 0;
|
||||
|
||||
/*
|
||||
* Remove all key information that is not stored as part
|
||||
* of station information since mac80211 may not have had
|
||||
* a chance to remove all the keys. When device is
|
||||
* reconfigured by mac80211 after an error all keys will
|
||||
* be reconfigured.
|
||||
*/
|
||||
memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
|
||||
ctx->key_mapping_keys = 0;
|
||||
|
||||
spin_unlock_irqrestore(&il->sta_lock, flags);
|
||||
}
|
||||
|
||||
@ -2376,13 +2299,12 @@ il_sta_id(struct ieee80211_sta *sta)
|
||||
* inline wraps that pattern.
|
||||
*/
|
||||
static inline int
|
||||
il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
|
||||
struct ieee80211_sta *sta)
|
||||
il_sta_id_or_broadcast(struct il_priv *il, struct ieee80211_sta *sta)
|
||||
{
|
||||
int sta_id;
|
||||
|
||||
if (!sta)
|
||||
return context->bcast_sta_id;
|
||||
return il->hw_params.bcast_id;
|
||||
|
||||
sta_id = il_sta_id(sta);
|
||||
|
||||
|
@ -361,7 +361,7 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
|
||||
const u8 *ptr;
|
||||
char *buf;
|
||||
u16 eeprom_ver;
|
||||
size_t eeprom_len = il->cfg->base_params->eeprom_size;
|
||||
size_t eeprom_len = il->cfg->eeprom_size;
|
||||
buf_size = 4 * eeprom_len + 256;
|
||||
|
||||
if (eeprom_len % 16) {
|
||||
@ -644,12 +644,10 @@ il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct il_priv *il = file->private_data;
|
||||
struct il_rxon_context *ctx = &il->ctx;
|
||||
int pos = 0, i;
|
||||
char buf[256];
|
||||
const size_t bufsz = sizeof(buf);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
|
||||
for (i = 0; i < AC_NUM; i++) {
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
@ -657,10 +655,10 @@ il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
"AC[%d]\t%u\t%u\t%u\t%u\n", i,
|
||||
ctx->qos_data.def_qos_parm.ac[i].cw_min,
|
||||
ctx->qos_data.def_qos_parm.ac[i].cw_max,
|
||||
ctx->qos_data.def_qos_parm.ac[i].aifsn,
|
||||
ctx->qos_data.def_qos_parm.ac[i].edca_txop);
|
||||
il->qos_data.def_qos_parm.ac[i].cw_min,
|
||||
il->qos_data.def_qos_parm.ac[i].cw_max,
|
||||
il->qos_data.def_qos_parm.ac[i].aifsn,
|
||||
il->qos_data.def_qos_parm.ac[i].edca_txop);
|
||||
}
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
@ -729,7 +727,7 @@ il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
|
||||
char *buf;
|
||||
int bufsz =
|
||||
((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
|
||||
(il->cfg->base_params->num_of_queues * 32 * 8) + 400;
|
||||
(il->cfg->num_of_queues * 32 * 8) + 400;
|
||||
const u8 *ptr;
|
||||
ssize_t ret;
|
||||
|
||||
@ -835,7 +833,7 @@ il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
|
||||
int cnt;
|
||||
int ret;
|
||||
const size_t bufsz =
|
||||
sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
|
||||
sizeof(char) * 64 * il->cfg->num_of_queues;
|
||||
|
||||
if (!il->txq) {
|
||||
IL_ERR("txq not ready\n");
|
||||
@ -903,8 +901,7 @@ il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct il_priv *il = file->private_data;
|
||||
return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
|
||||
count, ppos);
|
||||
return il->ops->lib->debugfs_ops.rx_stats_read(file, user_buf, count, ppos);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -912,8 +909,7 @@ il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct il_priv *il = file->private_data;
|
||||
return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
|
||||
count, ppos);
|
||||
return il->ops->lib->debugfs_ops.tx_stats_read(file, user_buf, count, ppos);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -921,8 +917,7 @@ il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct il_priv *il = file->private_data;
|
||||
return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
|
||||
count, ppos);
|
||||
return il->ops->lib->debugfs_ops.general_stats_read(file, user_buf, count, ppos);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -1153,7 +1148,7 @@ il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
|
||||
int len = 0;
|
||||
char buf[20];
|
||||
|
||||
len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
|
||||
len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.flags));
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
@ -1167,7 +1162,7 @@ il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
|
||||
char buf[20];
|
||||
|
||||
len =
|
||||
sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
|
||||
sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
@ -1180,8 +1175,8 @@ il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
|
||||
int pos = 0;
|
||||
ssize_t ret = -EFAULT;
|
||||
|
||||
if (il->cfg->ops->lib->dump_fh) {
|
||||
ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
|
||||
if (il->ops->lib->dump_fh) {
|
||||
ret = pos = il->ops->lib->dump_fh(il, &buf, true);
|
||||
if (buf) {
|
||||
ret =
|
||||
simple_read_from_buffer(user_buf, count, ppos, buf,
|
||||
@ -1298,7 +1293,7 @@ il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
|
||||
if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
|
||||
timeout = IL_DEF_WD_TIMEOUT;
|
||||
|
||||
il->cfg->base_params->wd_timeout = timeout;
|
||||
il->cfg->wd_timeout = timeout;
|
||||
il_setup_watchdog(il);
|
||||
return count;
|
||||
}
|
||||
@ -1372,17 +1367,17 @@ il_dbgfs_register(struct il_priv *il, const char *name)
|
||||
DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
|
||||
|
||||
if (il->cfg->base_params->sensitivity_calib_by_driver)
|
||||
if (il->cfg->sensitivity_calib_by_driver)
|
||||
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
|
||||
if (il->cfg->base_params->chain_noise_calib_by_driver)
|
||||
if (il->cfg->chain_noise_calib_by_driver)
|
||||
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
|
||||
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
|
||||
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
|
||||
if (il->cfg->base_params->sensitivity_calib_by_driver)
|
||||
if (il->cfg->sensitivity_calib_by_driver)
|
||||
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
|
||||
&il->disable_sens_cal);
|
||||
if (il->cfg->base_params->chain_noise_calib_by_driver)
|
||||
if (il->cfg->chain_noise_calib_by_driver)
|
||||
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
|
||||
&il->disable_chain_noise_cal);
|
||||
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
|
||||
|
@ -84,13 +84,13 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
|
||||
static void iwl1000_nic_config(struct iwl_priv *priv)
|
||||
{
|
||||
/* set CSR_HW_CONFIG_REG for uCode use */
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
|
||||
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
|
||||
|
||||
/* Setting digital SVR for 1000 card to 1.32V */
|
||||
/* locking is acquired in iwl_set_bits_mask_prph() function */
|
||||
iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
|
||||
iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG,
|
||||
APMG_SVR_DIGITAL_VOLTAGE_1_32,
|
||||
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
|
||||
}
|
||||
@ -128,8 +128,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
|
||||
|
@ -87,7 +87,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
|
||||
iwl_rf_config(priv);
|
||||
|
||||
if (cfg(priv)->iq_invert)
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
|
||||
}
|
||||
|
||||
@ -124,8 +124,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
|
||||
|
||||
|
@ -73,7 +73,7 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
|
||||
* (PCIe power is lost before PERST# is asserted),
|
||||
* causing ME FW to lose ownership and not being able to obtain it back.
|
||||
*/
|
||||
iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
|
||||
iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
|
||||
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
|
||||
|
||||
@ -170,8 +170,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
|
||||
@ -199,8 +197,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
|
||||
|
@ -82,7 +82,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
|
||||
{
|
||||
/* Indicate calibration version to uCode. */
|
||||
if (iwl_eeprom_calib_version(priv->shrd) >= 6)
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
|
||||
}
|
||||
|
||||
@ -90,9 +90,9 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
|
||||
{
|
||||
/* Indicate calibration version to uCode. */
|
||||
if (iwl_eeprom_calib_version(priv->shrd) >= 6)
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_6050_1x2);
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
|
||||
/* no locking required for register write */
|
||||
if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
|
||||
/* 2x2 IPA phy type */
|
||||
iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
|
||||
iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
|
||||
}
|
||||
/* do additional nic configuration if needed */
|
||||
@ -145,8 +145,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
|
||||
|
||||
|
@ -628,16 +628,16 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
|
||||
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
|
||||
CT_CARD_DISABLED)) {
|
||||
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
|
||||
iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
|
||||
iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
|
||||
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
|
||||
|
||||
if (!(flags & RXON_CARD_DISABLED)) {
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
|
||||
iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
|
||||
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
|
||||
}
|
||||
if (flags & CT_CARD_DISABLED)
|
||||
|
@ -178,19 +178,19 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
|
||||
|
||||
if (tt->state == IWL_TI_CT_KILL) {
|
||||
if (priv->thermal_throttle.ct_kill_toggle) {
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
||||
priv->thermal_throttle.ct_kill_toggle = false;
|
||||
} else {
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
||||
priv->thermal_throttle.ct_kill_toggle = true;
|
||||
}
|
||||
iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
|
||||
spin_lock_irqsave(&bus(priv)->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus(priv)))
|
||||
iwl_release_nic_access(bus(priv));
|
||||
spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
|
||||
iwl_read32(trans(priv), CSR_UCODE_DRV_GP1);
|
||||
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(trans(priv)))
|
||||
iwl_release_nic_access(trans(priv));
|
||||
spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
|
||||
|
||||
/* Reschedule the ct_kill timer to occur in
|
||||
* CT_KILL_EXIT_DURATION seconds to ensure we get a
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_arp.h>
|
||||
|
||||
@ -42,6 +41,7 @@
|
||||
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include "iwl-ucode.h"
|
||||
#include "iwl-eeprom.h"
|
||||
#include "iwl-wifi.h"
|
||||
#include "iwl-dev.h"
|
||||
@ -328,14 +328,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
|
||||
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
|
||||
|
||||
/* Make sure device is powered up for SRAM reads */
|
||||
spin_lock_irqsave(&bus(priv)->reg_lock, reg_flags);
|
||||
if (iwl_grab_nic_access(bus(priv))) {
|
||||
spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
|
||||
spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags);
|
||||
if (iwl_grab_nic_access(trans(priv))) {
|
||||
spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Set starting address; reads will auto-increment */
|
||||
iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, ptr);
|
||||
iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr);
|
||||
rmb();
|
||||
|
||||
/*
|
||||
@ -352,19 +352,19 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
|
||||
* place event id # at far right for easier visual parsing.
|
||||
*/
|
||||
for (i = 0; i < num_events; i++) {
|
||||
ev = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
|
||||
time = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
|
||||
ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
|
||||
time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
|
||||
if (mode == 0) {
|
||||
trace_iwlwifi_dev_ucode_cont_event(priv, 0, time, ev);
|
||||
} else {
|
||||
data = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
|
||||
data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
|
||||
trace_iwlwifi_dev_ucode_cont_event(priv, time,
|
||||
data, ev);
|
||||
}
|
||||
}
|
||||
/* Allow device to power down */
|
||||
iwl_release_nic_access(bus(priv));
|
||||
spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
|
||||
iwl_release_nic_access(trans(priv));
|
||||
spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
|
||||
}
|
||||
|
||||
static void iwl_continuous_event_trace(struct iwl_priv *priv)
|
||||
@ -383,7 +383,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
|
||||
|
||||
base = priv->shrd->device_pointers.log_event_table;
|
||||
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||
iwl_read_targ_mem_words(bus(priv), base, &read, sizeof(read));
|
||||
iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read));
|
||||
|
||||
capacity = read.capacity;
|
||||
mode = read.mode;
|
||||
@ -490,7 +490,7 @@ static void iwl_bg_tx_flush(struct work_struct *work)
|
||||
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
|
||||
}
|
||||
|
||||
static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
||||
void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -513,6 +513,7 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
||||
priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
|
||||
BIT(NL80211_IFTYPE_ADHOC);
|
||||
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
|
||||
@ -547,609 +548,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
}
|
||||
|
||||
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
|
||||
|
||||
#define UCODE_EXPERIMENTAL_INDEX 100
|
||||
#define UCODE_EXPERIMENTAL_TAG "exp"
|
||||
|
||||
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
|
||||
{
|
||||
const char *name_pre = cfg(priv)->fw_name_pre;
|
||||
char tag[8];
|
||||
|
||||
if (first) {
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
|
||||
priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
|
||||
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
|
||||
} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
|
||||
#endif
|
||||
priv->fw_index = cfg(priv)->ucode_api_max;
|
||||
sprintf(tag, "%d", priv->fw_index);
|
||||
} else {
|
||||
priv->fw_index--;
|
||||
sprintf(tag, "%d", priv->fw_index);
|
||||
}
|
||||
|
||||
if (priv->fw_index < cfg(priv)->ucode_api_min) {
|
||||
IWL_ERR(priv, "no suitable firmware found!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
|
||||
|
||||
IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
|
||||
(priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? "EXPERIMENTAL " : "",
|
||||
priv->firmware_name);
|
||||
|
||||
return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
|
||||
bus(priv)->dev,
|
||||
GFP_KERNEL, priv, iwl_ucode_callback);
|
||||
}
|
||||
|
||||
struct iwlagn_firmware_pieces {
|
||||
const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
|
||||
size_t inst_size, data_size, init_size, init_data_size,
|
||||
wowlan_inst_size, wowlan_data_size;
|
||||
|
||||
u32 build;
|
||||
|
||||
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
|
||||
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
|
||||
};
|
||||
|
||||
static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
|
||||
const struct firmware *ucode_raw,
|
||||
struct iwlagn_firmware_pieces *pieces)
|
||||
{
|
||||
struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
|
||||
u32 api_ver, hdr_size;
|
||||
const u8 *src;
|
||||
|
||||
priv->ucode_ver = le32_to_cpu(ucode->ver);
|
||||
api_ver = IWL_UCODE_API(priv->ucode_ver);
|
||||
|
||||
switch (api_ver) {
|
||||
default:
|
||||
hdr_size = 28;
|
||||
if (ucode_raw->size < hdr_size) {
|
||||
IWL_ERR(priv, "File size too small!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pieces->build = le32_to_cpu(ucode->u.v2.build);
|
||||
pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
|
||||
pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
|
||||
pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
|
||||
pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
|
||||
src = ucode->u.v2.data;
|
||||
break;
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
hdr_size = 24;
|
||||
if (ucode_raw->size < hdr_size) {
|
||||
IWL_ERR(priv, "File size too small!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pieces->build = 0;
|
||||
pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
|
||||
pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
|
||||
pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
|
||||
pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
|
||||
src = ucode->u.v1.data;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Verify size of file vs. image size info in file's header */
|
||||
if (ucode_raw->size != hdr_size + pieces->inst_size +
|
||||
pieces->data_size + pieces->init_size +
|
||||
pieces->init_data_size) {
|
||||
|
||||
IWL_ERR(priv,
|
||||
"uCode file size %d does not match expected size\n",
|
||||
(int)ucode_raw->size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pieces->inst = src;
|
||||
src += pieces->inst_size;
|
||||
pieces->data = src;
|
||||
src += pieces->data_size;
|
||||
pieces->init = src;
|
||||
src += pieces->init_size;
|
||||
pieces->init_data = src;
|
||||
src += pieces->init_data_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwlagn_load_firmware(struct iwl_priv *priv,
|
||||
const struct firmware *ucode_raw,
|
||||
struct iwlagn_firmware_pieces *pieces,
|
||||
struct iwlagn_ucode_capabilities *capa)
|
||||
{
|
||||
struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
size_t len = ucode_raw->size;
|
||||
const u8 *data;
|
||||
int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
|
||||
int tmp;
|
||||
u64 alternatives;
|
||||
u32 tlv_len;
|
||||
enum iwl_ucode_tlv_type tlv_type;
|
||||
const u8 *tlv_data;
|
||||
|
||||
if (len < sizeof(*ucode)) {
|
||||
IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
|
||||
IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
|
||||
le32_to_cpu(ucode->magic));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check which alternatives are present, and "downgrade"
|
||||
* when the chosen alternative is not present, warning
|
||||
* the user when that happens. Some files may not have
|
||||
* any alternatives, so don't warn in that case.
|
||||
*/
|
||||
alternatives = le64_to_cpu(ucode->alternatives);
|
||||
tmp = wanted_alternative;
|
||||
if (wanted_alternative > 63)
|
||||
wanted_alternative = 63;
|
||||
while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
|
||||
wanted_alternative--;
|
||||
if (wanted_alternative && wanted_alternative != tmp)
|
||||
IWL_WARN(priv,
|
||||
"uCode alternative %d not available, choosing %d\n",
|
||||
tmp, wanted_alternative);
|
||||
|
||||
priv->ucode_ver = le32_to_cpu(ucode->ver);
|
||||
pieces->build = le32_to_cpu(ucode->build);
|
||||
data = ucode->data;
|
||||
|
||||
len -= sizeof(*ucode);
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
u16 tlv_alt;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le16_to_cpu(tlv->type);
|
||||
tlv_alt = le16_to_cpu(tlv->alternative);
|
||||
tlv_data = tlv->data;
|
||||
|
||||
if (len < tlv_len) {
|
||||
IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
|
||||
len, tlv_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
|
||||
/*
|
||||
* Alternative 0 is always valid.
|
||||
*
|
||||
* Skip alternative TLVs that are not selected.
|
||||
*/
|
||||
if (tlv_alt != 0 && tlv_alt != wanted_alternative)
|
||||
continue;
|
||||
|
||||
switch (tlv_type) {
|
||||
case IWL_UCODE_TLV_INST:
|
||||
pieces->inst = tlv_data;
|
||||
pieces->inst_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_DATA:
|
||||
pieces->data = tlv_data;
|
||||
pieces->data_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT:
|
||||
pieces->init = tlv_data;
|
||||
pieces->init_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_DATA:
|
||||
pieces->init_data = tlv_data;
|
||||
pieces->init_data_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_BOOT:
|
||||
IWL_ERR(priv, "Found unexpected BOOT ucode\n");
|
||||
break;
|
||||
case IWL_UCODE_TLV_PROBE_MAX_LEN:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->max_probe_length =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_PAN:
|
||||
if (tlv_len)
|
||||
goto invalid_tlv_len;
|
||||
capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
|
||||
break;
|
||||
case IWL_UCODE_TLV_FLAGS:
|
||||
/* must be at least one u32 */
|
||||
if (tlv_len < sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
/* and a proper number of u32s */
|
||||
if (tlv_len % sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
/*
|
||||
* This driver only reads the first u32 as
|
||||
* right now no more features are defined,
|
||||
* if that changes then either the driver
|
||||
* will not work with the new firmware, or
|
||||
* it'll not take advantage of new features.
|
||||
*/
|
||||
capa->flags = le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_evtlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_evtlog_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_errlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_evtlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_evtlog_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_errlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
|
||||
if (tlv_len)
|
||||
goto invalid_tlv_len;
|
||||
priv->enhance_sensitivity_table = true;
|
||||
break;
|
||||
case IWL_UCODE_TLV_WOWLAN_INST:
|
||||
pieces->wowlan_inst = tlv_data;
|
||||
pieces->wowlan_inst_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_WOWLAN_DATA:
|
||||
pieces->wowlan_data = tlv_data;
|
||||
pieces->wowlan_data_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->standard_phy_calibration_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
default:
|
||||
IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (len) {
|
||||
IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
|
||||
iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
invalid_tlv_len:
|
||||
IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
|
||||
iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_ucode_callback - callback when firmware was loaded
|
||||
*
|
||||
* If loaded successfully, copies the firmware into buffers
|
||||
* for the card to fetch (via DMA).
|
||||
*/
|
||||
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
|
||||
{
|
||||
struct iwl_priv *priv = context;
|
||||
struct iwl_ucode_header *ucode;
|
||||
int err;
|
||||
struct iwlagn_firmware_pieces pieces;
|
||||
const unsigned int api_max = cfg(priv)->ucode_api_max;
|
||||
unsigned int api_ok = cfg(priv)->ucode_api_ok;
|
||||
const unsigned int api_min = cfg(priv)->ucode_api_min;
|
||||
u32 api_ver;
|
||||
char buildstr[25];
|
||||
u32 build;
|
||||
struct iwlagn_ucode_capabilities ucode_capa = {
|
||||
.max_probe_length = 200,
|
||||
.standard_phy_calibration_size =
|
||||
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
|
||||
};
|
||||
|
||||
if (!api_ok)
|
||||
api_ok = api_max;
|
||||
|
||||
memset(&pieces, 0, sizeof(pieces));
|
||||
|
||||
if (!ucode_raw) {
|
||||
if (priv->fw_index <= api_ok)
|
||||
IWL_ERR(priv,
|
||||
"request for firmware file '%s' failed.\n",
|
||||
priv->firmware_name);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
|
||||
priv->firmware_name, ucode_raw->size);
|
||||
|
||||
/* Make sure that we got at least the API version number */
|
||||
if (ucode_raw->size < 4) {
|
||||
IWL_ERR(priv, "File size way too small!\n");
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/* Data from ucode file: header followed by uCode images */
|
||||
ucode = (struct iwl_ucode_header *)ucode_raw->data;
|
||||
|
||||
if (ucode->ver)
|
||||
err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
|
||||
else
|
||||
err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
|
||||
&ucode_capa);
|
||||
|
||||
if (err)
|
||||
goto try_again;
|
||||
|
||||
api_ver = IWL_UCODE_API(priv->ucode_ver);
|
||||
build = pieces.build;
|
||||
|
||||
/*
|
||||
* api_ver should match the api version forming part of the
|
||||
* firmware filename ... but we don't check for that and only rely
|
||||
* on the API version read from firmware header from here on forward
|
||||
*/
|
||||
/* no api version check required for experimental uCode */
|
||||
if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
|
||||
if (api_ver < api_min || api_ver > api_max) {
|
||||
IWL_ERR(priv,
|
||||
"Driver unable to support your firmware API. "
|
||||
"Driver supports v%u, firmware is v%u.\n",
|
||||
api_max, api_ver);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (api_ver < api_ok) {
|
||||
if (api_ok != api_max)
|
||||
IWL_ERR(priv, "Firmware has old API version, "
|
||||
"expected v%u through v%u, got v%u.\n",
|
||||
api_ok, api_max, api_ver);
|
||||
else
|
||||
IWL_ERR(priv, "Firmware has old API version, "
|
||||
"expected v%u, got v%u.\n",
|
||||
api_max, api_ver);
|
||||
IWL_ERR(priv, "New firmware can be obtained from "
|
||||
"http://www.intellinuxwireless.org/.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (build)
|
||||
sprintf(buildstr, " build %u%s", build,
|
||||
(priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? " (EXP)" : "");
|
||||
else
|
||||
buildstr[0] = '\0';
|
||||
|
||||
IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
|
||||
IWL_UCODE_MAJOR(priv->ucode_ver),
|
||||
IWL_UCODE_MINOR(priv->ucode_ver),
|
||||
IWL_UCODE_API(priv->ucode_ver),
|
||||
IWL_UCODE_SERIAL(priv->ucode_ver),
|
||||
buildstr);
|
||||
|
||||
snprintf(priv->hw->wiphy->fw_version,
|
||||
sizeof(priv->hw->wiphy->fw_version),
|
||||
"%u.%u.%u.%u%s",
|
||||
IWL_UCODE_MAJOR(priv->ucode_ver),
|
||||
IWL_UCODE_MINOR(priv->ucode_ver),
|
||||
IWL_UCODE_API(priv->ucode_ver),
|
||||
IWL_UCODE_SERIAL(priv->ucode_ver),
|
||||
buildstr);
|
||||
|
||||
/*
|
||||
* For any of the failures below (before allocating pci memory)
|
||||
* we will try to load a version with a smaller API -- maybe the
|
||||
* user just got a corrupted version of the latest API.
|
||||
*/
|
||||
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
|
||||
priv->ucode_ver);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
|
||||
pieces.inst_size);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
|
||||
pieces.data_size);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
|
||||
pieces.init_size);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
|
||||
pieces.init_data_size);
|
||||
|
||||
/* Verify that uCode images will fit in card's SRAM */
|
||||
if (pieces.inst_size > hw_params(priv).max_inst_size) {
|
||||
IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
|
||||
pieces.inst_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (pieces.data_size > hw_params(priv).max_data_size) {
|
||||
IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
|
||||
pieces.data_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (pieces.init_size > hw_params(priv).max_inst_size) {
|
||||
IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
|
||||
pieces.init_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (pieces.init_data_size > hw_params(priv).max_data_size) {
|
||||
IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
|
||||
pieces.init_data_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/* Allocate ucode buffers for card's bus-master loading ... */
|
||||
|
||||
/* Runtime instructions and 2 copies of data:
|
||||
* 1) unmodified from disk
|
||||
* 2) backup cache for save/restore during power-downs */
|
||||
if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
|
||||
pieces.inst, pieces.inst_size))
|
||||
goto err_pci_alloc;
|
||||
if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
|
||||
pieces.data, pieces.data_size))
|
||||
goto err_pci_alloc;
|
||||
|
||||
/* Initialization instructions and data */
|
||||
if (pieces.init_size && pieces.init_data_size) {
|
||||
if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
|
||||
pieces.init, pieces.init_size))
|
||||
goto err_pci_alloc;
|
||||
if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
|
||||
pieces.init_data, pieces.init_data_size))
|
||||
goto err_pci_alloc;
|
||||
}
|
||||
|
||||
/* WoWLAN instructions and data */
|
||||
if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
|
||||
if (iwl_alloc_fw_desc(bus(priv),
|
||||
&trans(priv)->ucode_wowlan.code,
|
||||
pieces.wowlan_inst,
|
||||
pieces.wowlan_inst_size))
|
||||
goto err_pci_alloc;
|
||||
if (iwl_alloc_fw_desc(bus(priv),
|
||||
&trans(priv)->ucode_wowlan.data,
|
||||
pieces.wowlan_data,
|
||||
pieces.wowlan_data_size))
|
||||
goto err_pci_alloc;
|
||||
}
|
||||
|
||||
/* Now that we can no longer fail, copy information */
|
||||
|
||||
/*
|
||||
* The (size - 16) / 12 formula is based on the information recorded
|
||||
* for each event, which is of mode 1 (including timestamp) for all
|
||||
* new microcodes that include this information.
|
||||
*/
|
||||
priv->init_evtlog_ptr = pieces.init_evtlog_ptr;
|
||||
if (pieces.init_evtlog_size)
|
||||
priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
|
||||
else
|
||||
priv->init_evtlog_size =
|
||||
cfg(priv)->base_params->max_event_log_size;
|
||||
priv->init_errlog_ptr = pieces.init_errlog_ptr;
|
||||
priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
|
||||
if (pieces.inst_evtlog_size)
|
||||
priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
|
||||
else
|
||||
priv->inst_evtlog_size =
|
||||
cfg(priv)->base_params->max_event_log_size;
|
||||
priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
|
||||
#ifndef CONFIG_IWLWIFI_P2P
|
||||
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
|
||||
#endif
|
||||
|
||||
priv->new_scan_threshold_behaviour =
|
||||
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
|
||||
|
||||
if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
|
||||
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
|
||||
|
||||
/*
|
||||
* if not PAN, then don't support P2P -- might be a uCode
|
||||
* packaging bug or due to the eeprom check above
|
||||
*/
|
||||
if (!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN))
|
||||
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
|
||||
|
||||
if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
|
||||
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
|
||||
priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
|
||||
} else {
|
||||
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
||||
priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
||||
}
|
||||
/*
|
||||
* figure out the offset of chain noise reset and gain commands
|
||||
* base on the size of standard phy calibration commands table size
|
||||
*/
|
||||
if (ucode_capa.standard_phy_calibration_size >
|
||||
IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
|
||||
ucode_capa.standard_phy_calibration_size =
|
||||
IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
|
||||
|
||||
priv->phy_calib_chain_noise_reset_cmd =
|
||||
ucode_capa.standard_phy_calibration_size;
|
||||
priv->phy_calib_chain_noise_gain_cmd =
|
||||
ucode_capa.standard_phy_calibration_size + 1;
|
||||
|
||||
/* initialize all valid contexts */
|
||||
iwl_init_context(priv, ucode_capa.flags);
|
||||
|
||||
/**************************************************
|
||||
* This is still part of probe() in a sense...
|
||||
*
|
||||
* 9. Setup and register with mac80211 and debugfs
|
||||
**************************************************/
|
||||
err = iwlagn_mac_setup_register(priv, &ucode_capa);
|
||||
if (err)
|
||||
goto out_unbind;
|
||||
|
||||
err = iwl_dbgfs_register(priv, DRV_NAME);
|
||||
if (err)
|
||||
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
|
||||
|
||||
/* We have our copies now, allow OS release its copies */
|
||||
release_firmware(ucode_raw);
|
||||
complete(&priv->firmware_loading_complete);
|
||||
return;
|
||||
|
||||
try_again:
|
||||
/* try next, if any */
|
||||
if (iwl_request_firmware(priv, false))
|
||||
goto out_unbind;
|
||||
release_firmware(ucode_raw);
|
||||
return;
|
||||
|
||||
err_pci_alloc:
|
||||
IWL_ERR(priv, "failed to allocate pci memory\n");
|
||||
iwl_dealloc_ucode(trans(priv));
|
||||
out_unbind:
|
||||
complete(&priv->firmware_loading_complete);
|
||||
device_release_driver(bus(priv)->dev);
|
||||
release_firmware(ucode_raw);
|
||||
}
|
||||
|
||||
static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_ct_kill_config cmd;
|
||||
@ -1158,7 +556,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
priv->thermal_throttle.ct_kill_toggle = false;
|
||||
@ -1243,9 +641,6 @@ int iwl_alive_start(struct iwl_priv *priv)
|
||||
int ret = 0;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
|
||||
/*TODO: this should go to the transport layer */
|
||||
iwl_reset_ict(trans(priv));
|
||||
|
||||
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
|
||||
|
||||
/* After the ALIVE response, we can send host commands to the uCode */
|
||||
@ -1692,13 +1087,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
static u32 iwl_hw_detect(struct iwl_priv *priv)
|
||||
{
|
||||
return iwl_read32(bus(priv), CSR_HW_REV);
|
||||
}
|
||||
|
||||
/* Size of one Rx buffer in host DRAM */
|
||||
#define IWL_RX_BUF_SIZE_4K (4 * 1024)
|
||||
#define IWL_RX_BUF_SIZE_8K (8 * 1024)
|
||||
@ -1730,32 +1118,32 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
|
||||
|
||||
static void iwl_debug_config(struct iwl_priv *priv)
|
||||
{
|
||||
dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
|
||||
dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
"enabled\n");
|
||||
#else
|
||||
"disabled\n");
|
||||
#endif
|
||||
dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
|
||||
dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
"enabled\n");
|
||||
#else
|
||||
"disabled\n");
|
||||
#endif
|
||||
dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
|
||||
dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
"enabled\n");
|
||||
#else
|
||||
"disabled\n");
|
||||
#endif
|
||||
|
||||
dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
|
||||
dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
"enabled\n");
|
||||
#else
|
||||
"disabled\n");
|
||||
#endif
|
||||
dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
|
||||
dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_P2P "
|
||||
#ifdef CONFIG_IWLWIFI_P2P
|
||||
"enabled\n");
|
||||
#else
|
||||
@ -1770,7 +1158,6 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
|
||||
struct iwl_priv *priv;
|
||||
struct ieee80211_hw *hw;
|
||||
u16 num_mac;
|
||||
u32 hw_rev;
|
||||
|
||||
/************************
|
||||
* 1. Allocating HW data
|
||||
@ -1783,22 +1170,14 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
|
||||
}
|
||||
|
||||
priv = hw->priv;
|
||||
priv->shrd = &priv->_shrd;
|
||||
bus->shrd = priv->shrd;
|
||||
priv->shrd->bus = bus;
|
||||
priv->shrd = bus->shrd;
|
||||
priv->shrd->priv = priv;
|
||||
|
||||
priv->shrd->trans = trans_ops->alloc(priv->shrd);
|
||||
if (priv->shrd->trans == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_traffic_mem;
|
||||
}
|
||||
|
||||
/* At this point both hw and priv are allocated. */
|
||||
|
||||
SET_IEEE80211_DEV(hw, bus(priv)->dev);
|
||||
SET_IEEE80211_DEV(hw, trans(priv)->dev);
|
||||
|
||||
/* what debugging capabilities we have */
|
||||
/* show what debugging capabilities we have */
|
||||
iwl_debug_config(priv);
|
||||
|
||||
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
|
||||
@ -1821,41 +1200,29 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
|
||||
/* these spin locks will be used in apm_ops.init and EEPROM access
|
||||
* we should init now
|
||||
*/
|
||||
spin_lock_init(&bus(priv)->reg_lock);
|
||||
spin_lock_init(&trans(priv)->reg_lock);
|
||||
spin_lock_init(&priv->shrd->lock);
|
||||
|
||||
/*
|
||||
* stop and reset the on-board processor just in case it is in a
|
||||
* strange state ... like being left stranded by a primary kernel
|
||||
* and this is now the kdump kernel trying to start up
|
||||
*/
|
||||
iwl_write32(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
|
||||
|
||||
/***********************
|
||||
* 3. Read REV register
|
||||
***********************/
|
||||
hw_rev = iwl_hw_detect(priv);
|
||||
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
|
||||
cfg(priv)->name, hw_rev);
|
||||
cfg(priv)->name, trans(priv)->hw_rev);
|
||||
|
||||
err = iwl_trans_request_irq(trans(priv));
|
||||
err = iwl_trans_start_hw(trans(priv));
|
||||
if (err)
|
||||
goto out_free_trans;
|
||||
|
||||
if (iwl_trans_prepare_card_hw(trans(priv))) {
|
||||
err = -EIO;
|
||||
IWL_WARN(priv, "Failed, HW not ready\n");
|
||||
goto out_free_trans;
|
||||
}
|
||||
goto out_free_traffic_mem;
|
||||
|
||||
/*****************
|
||||
* 4. Read EEPROM
|
||||
*****************/
|
||||
/* Read the EEPROM */
|
||||
err = iwl_eeprom_init(priv, hw_rev);
|
||||
err = iwl_eeprom_init(priv, trans(priv)->hw_rev);
|
||||
/* Reset chip to save power until we load uCode during "up". */
|
||||
iwl_trans_stop_hw(trans(priv));
|
||||
if (err) {
|
||||
IWL_ERR(priv, "Unable to init EEPROM\n");
|
||||
goto out_free_trans;
|
||||
goto out_free_traffic_mem;
|
||||
}
|
||||
err = iwl_eeprom_check_version(priv);
|
||||
if (err)
|
||||
@ -1903,22 +1270,6 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
|
||||
iwl_setup_rx_handlers(priv);
|
||||
iwl_testmode_init(priv);
|
||||
|
||||
/*********************************************
|
||||
* 8. Enable interrupts
|
||||
*********************************************/
|
||||
|
||||
iwl_enable_rfkill_int(priv);
|
||||
|
||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||
if (iwl_read32(bus(priv),
|
||||
CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
|
||||
else
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
|
||||
|
||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
|
||||
|
||||
iwl_power_initialize(priv);
|
||||
iwl_tt_initialize(priv);
|
||||
|
||||
@ -1936,8 +1287,6 @@ out_destroy_workqueue:
|
||||
iwl_uninit_drv(priv);
|
||||
out_free_eeprom:
|
||||
iwl_eeprom_free(priv->shrd);
|
||||
out_free_trans:
|
||||
iwl_trans_free(trans(priv));
|
||||
out_free_traffic_mem:
|
||||
iwl_free_traffic_mem(priv);
|
||||
ieee80211_free_hw(priv->hw);
|
||||
@ -1981,8 +1330,6 @@ void __devexit iwl_remove(struct iwl_priv * priv)
|
||||
priv->shrd->workqueue = NULL;
|
||||
iwl_free_traffic_mem(priv);
|
||||
|
||||
iwl_trans_free(trans(priv));
|
||||
|
||||
iwl_uninit_drv(priv);
|
||||
|
||||
dev_kfree_skb(priv->beacon_skb);
|
||||
|
@ -73,8 +73,6 @@ struct iwlagn_ucode_capabilities {
|
||||
|
||||
extern struct ieee80211_ops iwlagn_hw_ops;
|
||||
|
||||
int iwl_reset_ict(struct iwl_trans *trans);
|
||||
|
||||
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
|
||||
{
|
||||
hdr->op_code = cmd;
|
||||
@ -109,6 +107,7 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
|
||||
int iwlagn_rx_calib_result(struct iwl_priv *priv,
|
||||
struct iwl_rx_mem_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
|
||||
|
||||
/* lib */
|
||||
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
||||
|
@ -118,88 +118,24 @@
|
||||
struct iwl_shared;
|
||||
struct iwl_bus;
|
||||
|
||||
/**
|
||||
* struct iwl_bus_ops - bus specific operations
|
||||
* @get_pm_support: must returns true if the bus can go to sleep
|
||||
* @apm_config: will be called during the config of the APM
|
||||
* @get_hw_id_string: prints the hw_id in the provided buffer
|
||||
* @get_hw_id: get hw_id in u32
|
||||
* @write8: write a byte to register at offset ofs
|
||||
* @write32: write a dword to register at offset ofs
|
||||
* @wread32: read a dword at register at offset ofs
|
||||
*/
|
||||
struct iwl_bus_ops {
|
||||
bool (*get_pm_support)(struct iwl_bus *bus);
|
||||
void (*apm_config)(struct iwl_bus *bus);
|
||||
void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
|
||||
u32 (*get_hw_id)(struct iwl_bus *bus);
|
||||
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
|
||||
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_bus - bus common data
|
||||
*
|
||||
* This data is common to all bus layer implementations.
|
||||
*
|
||||
* @dev - pointer to struct device * that represents the device
|
||||
* @ops - pointer to iwl_bus_ops
|
||||
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
|
||||
* NB: for the time being this needs to be set by the upper layer since
|
||||
* it allocates the shared data
|
||||
* @irq - the irq number for the device
|
||||
* @reg_lock - protect hw register access
|
||||
*/
|
||||
struct iwl_bus {
|
||||
struct device *dev;
|
||||
const struct iwl_bus_ops *ops;
|
||||
struct iwl_shared *shrd;
|
||||
|
||||
unsigned int irq;
|
||||
spinlock_t reg_lock;
|
||||
|
||||
/* pointer to bus specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
char bus_specific[0] __attribute__((__aligned__(sizeof(void *))));
|
||||
};
|
||||
|
||||
static inline bool bus_get_pm_support(struct iwl_bus *bus)
|
||||
{
|
||||
return bus->ops->get_pm_support(bus);
|
||||
}
|
||||
|
||||
static inline void bus_apm_config(struct iwl_bus *bus)
|
||||
{
|
||||
bus->ops->apm_config(bus);
|
||||
}
|
||||
|
||||
static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
|
||||
int buf_len)
|
||||
{
|
||||
bus->ops->get_hw_id_string(bus, buf, buf_len);
|
||||
}
|
||||
|
||||
static inline u32 bus_get_hw_id(struct iwl_bus *bus)
|
||||
{
|
||||
return bus->ops->get_hw_id(bus);
|
||||
}
|
||||
|
||||
static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
|
||||
{
|
||||
bus->ops->write8(bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val)
|
||||
{
|
||||
bus->ops->write32(bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
|
||||
{
|
||||
return bus->ops->read32(bus, ofs);
|
||||
}
|
||||
|
||||
/*****************************************************
|
||||
* Bus layer registration functions
|
||||
******************************************************/
|
||||
|
@ -203,10 +203,9 @@ int iwl_init_geos(struct iwl_priv *priv)
|
||||
|
||||
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
|
||||
cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
|
||||
char buf[32];
|
||||
bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
|
||||
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
|
||||
"Please send your %s to maintainer.\n", buf);
|
||||
"Please send your %s to maintainer.\n",
|
||||
trans(priv)->hw_id_str);
|
||||
cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
|
||||
}
|
||||
|
||||
@ -883,129 +882,6 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_apm_stop_master(struct iwl_priv *priv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* stop device's busmaster DMA activity */
|
||||
iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
||||
|
||||
ret = iwl_poll_bit(bus(priv), CSR_RESET,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
|
||||
if (ret)
|
||||
IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
|
||||
|
||||
IWL_DEBUG_INFO(priv, "stop master\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_apm_stop(struct iwl_priv *priv)
|
||||
{
|
||||
IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
|
||||
|
||||
clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
|
||||
|
||||
/* Stop device's DMA activity */
|
||||
iwl_apm_stop_master(priv);
|
||||
|
||||
/* Reset the entire device */
|
||||
iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
|
||||
udelay(10);
|
||||
|
||||
/*
|
||||
* Clear "initialization complete" bit to move adapter from
|
||||
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
|
||||
*/
|
||||
iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Start up NIC's basic functionality after it has been reset
|
||||
* (e.g. after platform boot, or shutdown via iwl_apm_stop())
|
||||
* NOTE: This does not load uCode nor start the embedded processor
|
||||
*/
|
||||
int iwl_apm_init(struct iwl_priv *priv)
|
||||
{
|
||||
int ret = 0;
|
||||
IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
|
||||
|
||||
/*
|
||||
* Use "set_bit" below rather than "write", to preserve any hardware
|
||||
* bits already set by default after reset.
|
||||
*/
|
||||
|
||||
/* Disable L0S exit timer (platform NMI Work/Around) */
|
||||
iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
|
||||
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
|
||||
|
||||
/*
|
||||
* Disable L0s without affecting L1;
|
||||
* don't wait for ICH L0s (ICH bug W/A)
|
||||
*/
|
||||
iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
|
||||
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
|
||||
|
||||
/* Set FH wait threshold to maximum (HW error during stress W/A) */
|
||||
iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
|
||||
|
||||
/*
|
||||
* Enable HAP INTA (interrupt from management bus) to
|
||||
* wake device's PCI Express link L1a -> L0s
|
||||
*/
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
|
||||
|
||||
bus_apm_config(bus(priv));
|
||||
|
||||
/* Configure analog phase-lock-loop before activating to D0A */
|
||||
if (cfg(priv)->base_params->pll_cfg_val)
|
||||
iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
|
||||
cfg(priv)->base_params->pll_cfg_val);
|
||||
|
||||
/*
|
||||
* Set "initialization complete" bit to move adapter from
|
||||
* D0U* --> D0A* (powered-up active) state.
|
||||
*/
|
||||
iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
/*
|
||||
* Wait for clock stabilization; once stabilized, access to
|
||||
* device-internal resources is supported, e.g. iwl_write_prph()
|
||||
* and accesses to uCode SRAM.
|
||||
*/
|
||||
ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_INFO(priv, "Failed to init the card\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable DMA clock and wait for it to stabilize.
|
||||
*
|
||||
* Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
|
||||
* do not disable clocks. This preserves any hardware bits already
|
||||
* set by default in "CLK_CTRL_REG" after reset.
|
||||
*/
|
||||
iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
|
||||
udelay(20);
|
||||
|
||||
/* Disable L1-Active */
|
||||
iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||
|
||||
set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
|
||||
{
|
||||
int ret;
|
||||
|
@ -297,12 +297,6 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
|
||||
cfg(priv)->bt_params->advanced_bt_coexist;
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
|
||||
{
|
||||
IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
|
||||
iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
|
||||
}
|
||||
|
||||
extern bool bt_siso_mode;
|
||||
|
||||
#endif /* __iwl_core_h__ */
|
||||
|
@ -35,10 +35,10 @@
|
||||
struct iwl_priv;
|
||||
|
||||
/*No matter what is m (priv, bus, trans), this will work */
|
||||
#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
|
||||
#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
|
||||
#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
|
||||
#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
|
||||
#define IWL_ERR(m, f, a...) dev_err(trans(m)->dev, f, ## a)
|
||||
#define IWL_WARN(m, f, a...) dev_warn(trans(m)->dev, f, ## a)
|
||||
#define IWL_INFO(m, f, a...) dev_info(trans(m)->dev, f, ## a)
|
||||
#define IWL_CRIT(m, f, a...) dev_crit(trans(m)->dev, f, ## a)
|
||||
|
||||
#define iwl_print_hex_error(m, p, len) \
|
||||
do { \
|
||||
@ -50,7 +50,7 @@ do { \
|
||||
#define IWL_DEBUG(m, level, fmt, ...) \
|
||||
do { \
|
||||
if (iwl_get_debug_level((m)->shrd) & (level)) \
|
||||
dev_err(bus(m)->dev, "%c %s " fmt, \
|
||||
dev_err(trans(m)->dev, "%c %s " fmt, \
|
||||
in_interrupt() ? 'I' : 'U', __func__, \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
@ -59,7 +59,7 @@ do { \
|
||||
do { \
|
||||
if (iwl_get_debug_level((m)->shrd) & (level) && \
|
||||
net_ratelimit()) \
|
||||
dev_err(bus(m)->dev, "%c %s " fmt, \
|
||||
dev_err(trans(m)->dev, "%c %s " fmt, \
|
||||
in_interrupt() ? 'I' : 'U', __func__, \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
@ -74,12 +74,12 @@ do { \
|
||||
#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
|
||||
do { \
|
||||
if (!iwl_is_rfkill(p->shrd)) \
|
||||
dev_err(bus(p)->dev, "%s%c %s " fmt, \
|
||||
dev_err(trans(p)->dev, "%s%c %s " fmt, \
|
||||
"", \
|
||||
in_interrupt() ? 'I' : 'U', __func__, \
|
||||
##__VA_ARGS__); \
|
||||
else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
|
||||
dev_err(bus(p)->dev, "%s%c %s " fmt, \
|
||||
dev_err(trans(p)->dev, "%s%c %s " fmt, \
|
||||
"(RFKILL) ", \
|
||||
in_interrupt() ? 'I' : 'U', __func__, \
|
||||
##__VA_ARGS__); \
|
||||
|
@ -263,7 +263,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
||||
sram = priv->dbgfs_sram_offset & ~0x3;
|
||||
|
||||
/* read the first u32 from sram */
|
||||
val = iwl_read_targ_mem(bus(priv), sram);
|
||||
val = iwl_read_targ_mem(trans(priv), sram);
|
||||
|
||||
for (; len; len--) {
|
||||
/* put the address at the start of every line */
|
||||
@ -282,7 +282,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
||||
if (++offset == 4) {
|
||||
sram += 4;
|
||||
offset = 0;
|
||||
val = iwl_read_targ_mem(bus(priv), sram);
|
||||
val = iwl_read_targ_mem(trans(priv), sram);
|
||||
}
|
||||
|
||||
/* put in extra spaces and split lines for human readability */
|
||||
@ -2055,7 +2055,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
|
||||
const size_t bufsz = sizeof(buf);
|
||||
u32 pwrsave_status;
|
||||
|
||||
pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
|
||||
pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) &
|
||||
CSR_GP_REG_POWER_SAVE_STATUS_MSK;
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
|
||||
|
@ -292,114 +292,6 @@ struct iwl_vif_priv {
|
||||
u8 ibss_bssid_sta_id;
|
||||
};
|
||||
|
||||
/* v1/v2 uCode file layout */
|
||||
struct iwl_ucode_header {
|
||||
__le32 ver; /* major/minor/API/serial */
|
||||
union {
|
||||
struct {
|
||||
__le32 inst_size; /* bytes of runtime code */
|
||||
__le32 data_size; /* bytes of runtime data */
|
||||
__le32 init_size; /* bytes of init code */
|
||||
__le32 init_data_size; /* bytes of init data */
|
||||
__le32 boot_size; /* bytes of bootstrap code */
|
||||
u8 data[0]; /* in same order as sizes */
|
||||
} v1;
|
||||
struct {
|
||||
__le32 build; /* build number */
|
||||
__le32 inst_size; /* bytes of runtime code */
|
||||
__le32 data_size; /* bytes of runtime data */
|
||||
__le32 init_size; /* bytes of init code */
|
||||
__le32 init_data_size; /* bytes of init data */
|
||||
__le32 boot_size; /* bytes of bootstrap code */
|
||||
u8 data[0]; /* in same order as sizes */
|
||||
} v2;
|
||||
} u;
|
||||
};
|
||||
|
||||
/*
|
||||
* new TLV uCode file layout
|
||||
*
|
||||
* The new TLV file format contains TLVs, that each specify
|
||||
* some piece of data. To facilitate "groups", for example
|
||||
* different instruction image with different capabilities,
|
||||
* bundled with the same init image, an alternative mechanism
|
||||
* is provided:
|
||||
* When the alternative field is 0, that means that the item
|
||||
* is always valid. When it is non-zero, then it is only
|
||||
* valid in conjunction with items of the same alternative,
|
||||
* in which case the driver (user) selects one alternative
|
||||
* to use.
|
||||
*/
|
||||
|
||||
enum iwl_ucode_tlv_type {
|
||||
IWL_UCODE_TLV_INVALID = 0, /* unused */
|
||||
IWL_UCODE_TLV_INST = 1,
|
||||
IWL_UCODE_TLV_DATA = 2,
|
||||
IWL_UCODE_TLV_INIT = 3,
|
||||
IWL_UCODE_TLV_INIT_DATA = 4,
|
||||
IWL_UCODE_TLV_BOOT = 5,
|
||||
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
|
||||
IWL_UCODE_TLV_PAN = 7,
|
||||
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
|
||||
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
|
||||
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
|
||||
IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
|
||||
IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
|
||||
IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
|
||||
IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
|
||||
IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
|
||||
IWL_UCODE_TLV_WOWLAN_INST = 16,
|
||||
IWL_UCODE_TLV_WOWLAN_DATA = 17,
|
||||
IWL_UCODE_TLV_FLAGS = 18,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_ucode_tlv_flag - ucode API flags
|
||||
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
|
||||
* was a separate TLV but moved here to save space.
|
||||
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
|
||||
* treats good CRC threshold as a boolean
|
||||
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
|
||||
*/
|
||||
enum iwl_ucode_tlv_flag {
|
||||
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
|
||||
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
|
||||
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
|
||||
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
|
||||
};
|
||||
|
||||
struct iwl_ucode_tlv {
|
||||
__le16 type; /* see above */
|
||||
__le16 alternative; /* see comment */
|
||||
__le32 length; /* not including type/length fields */
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
|
||||
|
||||
struct iwl_tlv_ucode_header {
|
||||
/*
|
||||
* The TLV style ucode header is distinguished from
|
||||
* the v1/v2 style header by first four bytes being
|
||||
* zero, as such is an invalid combination of
|
||||
* major/minor/API/serial versions.
|
||||
*/
|
||||
__le32 zero;
|
||||
__le32 magic;
|
||||
u8 human_readable[64];
|
||||
__le32 ver; /* major/minor/API/serial */
|
||||
__le32 build;
|
||||
__le64 alternatives; /* bitmask of valid alternatives */
|
||||
/*
|
||||
* The data contained herein has a TLV layout,
|
||||
* see above for the TLV header and types.
|
||||
* Note that each TLV is padded to a length
|
||||
* that is a multiple of 4 for alignment.
|
||||
*/
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
struct iwl_sensitivity_ranges {
|
||||
u16 min_nrg_cck;
|
||||
u16 max_nrg_cck;
|
||||
@ -821,7 +713,6 @@ struct iwl_wipan_noa_data {
|
||||
struct iwl_priv {
|
||||
|
||||
/*data shared among all the driver's layers */
|
||||
struct iwl_shared _shrd;
|
||||
struct iwl_shared *shrd;
|
||||
|
||||
/* ieee device used by generic ieee processing code */
|
||||
|
@ -156,16 +156,16 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
|
||||
|
||||
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
|
||||
/* Request semaphore */
|
||||
iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(trans(bus), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
|
||||
|
||||
/* See if we got it */
|
||||
ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
|
||||
ret = iwl_poll_bit(trans(bus), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
|
||||
EEPROM_SEM_TIMEOUT);
|
||||
if (ret >= 0) {
|
||||
IWL_DEBUG_EEPROM(bus,
|
||||
IWL_DEBUG_EEPROM(trans(bus),
|
||||
"Acquired semaphore after %d tries.\n",
|
||||
count+1);
|
||||
return ret;
|
||||
@ -177,14 +177,15 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
|
||||
|
||||
static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
|
||||
{
|
||||
iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_clear_bit(trans(bus), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
|
||||
|
||||
}
|
||||
|
||||
static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
|
||||
{
|
||||
u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
|
||||
u32 gp = iwl_read32(trans, CSR_EEPROM_GP) &
|
||||
CSR_EEPROM_GP_VALID_MSK;
|
||||
int ret = 0;
|
||||
|
||||
IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
|
||||
@ -305,13 +306,13 @@ void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
|
||||
|
||||
static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
|
||||
{
|
||||
iwl_read32(bus, CSR_OTP_GP_REG);
|
||||
iwl_read32(trans(bus), CSR_OTP_GP_REG);
|
||||
|
||||
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
|
||||
iwl_clear_bit(bus, CSR_OTP_GP_REG,
|
||||
iwl_clear_bit(trans(bus), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
|
||||
else
|
||||
iwl_set_bit(bus, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(trans(bus), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
|
||||
}
|
||||
|
||||
@ -332,7 +333,7 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
|
||||
nvm_type = NVM_DEVICE_TYPE_EEPROM;
|
||||
break;
|
||||
default:
|
||||
otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
|
||||
otpgp = iwl_read32(trans(bus), CSR_OTP_GP_REG);
|
||||
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
|
||||
nvm_type = NVM_DEVICE_TYPE_OTP;
|
||||
else
|
||||
@ -347,22 +348,22 @@ static int iwl_init_otp_access(struct iwl_bus *bus)
|
||||
int ret;
|
||||
|
||||
/* Enable 40MHz radio clock */
|
||||
iwl_write32(bus, CSR_GP_CNTRL,
|
||||
iwl_read32(bus, CSR_GP_CNTRL) |
|
||||
iwl_write32(trans(bus), CSR_GP_CNTRL,
|
||||
iwl_read32(trans(bus), CSR_GP_CNTRL) |
|
||||
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
/* wait for clock to be ready */
|
||||
ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
|
||||
ret = iwl_poll_bit(trans(bus), CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
25000);
|
||||
if (ret < 0)
|
||||
IWL_ERR(bus, "Time out access OTP\n");
|
||||
else {
|
||||
iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
|
||||
iwl_set_bits_prph(trans(bus), APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_RESET_REQ);
|
||||
udelay(5);
|
||||
iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
|
||||
iwl_clear_bits_prph(trans(bus), APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_RESET_REQ);
|
||||
|
||||
/*
|
||||
@ -370,7 +371,7 @@ static int iwl_init_otp_access(struct iwl_bus *bus)
|
||||
* this is only applicable for HW with OTP shadow RAM
|
||||
*/
|
||||
if (cfg(bus)->base_params->shadow_ram_support)
|
||||
iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
iwl_set_bit(trans(bus), CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
}
|
||||
return ret;
|
||||
@ -382,9 +383,9 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
|
||||
u32 r;
|
||||
u32 otpgp;
|
||||
|
||||
iwl_write32(bus, CSR_EEPROM_REG,
|
||||
iwl_write32(trans(bus), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
|
||||
ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
|
||||
ret = iwl_poll_bit(trans(bus), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
IWL_EEPROM_ACCESS_TIMEOUT);
|
||||
@ -392,13 +393,13 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
|
||||
IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
|
||||
return ret;
|
||||
}
|
||||
r = iwl_read32(bus, CSR_EEPROM_REG);
|
||||
r = iwl_read32(trans(bus), CSR_EEPROM_REG);
|
||||
/* check for ECC errors: */
|
||||
otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
|
||||
otpgp = iwl_read32(trans(bus), CSR_OTP_GP_REG);
|
||||
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
|
||||
/* stop in this case */
|
||||
/* set the uncorrectable OTP ECC bit for acknowledgement */
|
||||
iwl_set_bit(bus, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(trans(bus), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
|
||||
IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
|
||||
return -EINVAL;
|
||||
@ -406,7 +407,7 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
|
||||
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
|
||||
/* continue in this case */
|
||||
/* set the correctable OTP ECC bit for acknowledgement */
|
||||
iwl_set_bit(bus, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(trans(bus), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
|
||||
IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
|
||||
}
|
||||
@ -656,7 +657,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
||||
{
|
||||
struct iwl_shared *shrd = priv->shrd;
|
||||
__le16 *e;
|
||||
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
|
||||
u32 gp = iwl_read32(trans(priv), CSR_EEPROM_GP);
|
||||
int sz;
|
||||
int ret;
|
||||
u16 addr;
|
||||
@ -676,8 +677,6 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
||||
}
|
||||
e = (__le16 *)shrd->eeprom;
|
||||
|
||||
iwl_apm_init(priv);
|
||||
|
||||
ret = iwl_eeprom_verify_signature(trans(priv));
|
||||
if (ret < 0) {
|
||||
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
|
||||
@ -701,11 +700,11 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
||||
ret = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
iwl_write32(bus(priv), CSR_EEPROM_GP,
|
||||
iwl_read32(bus(priv), CSR_EEPROM_GP) &
|
||||
iwl_write32(trans(priv), CSR_EEPROM_GP,
|
||||
iwl_read32(trans(priv), CSR_EEPROM_GP) &
|
||||
~CSR_EEPROM_GP_IF_OWNER_MSK);
|
||||
|
||||
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
|
||||
iwl_set_bit(trans(priv), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
|
||||
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
|
||||
/* traversing the linked list if no shadow ram supported */
|
||||
@ -730,10 +729,10 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
||||
for (addr = 0; addr < sz; addr += sizeof(u16)) {
|
||||
u32 r;
|
||||
|
||||
iwl_write32(bus(priv), CSR_EEPROM_REG,
|
||||
iwl_write32(trans(priv), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
|
||||
|
||||
ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
|
||||
ret = iwl_poll_bit(trans(priv), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
IWL_EEPROM_ACCESS_TIMEOUT);
|
||||
@ -741,7 +740,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
||||
IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
|
||||
goto done;
|
||||
}
|
||||
r = iwl_read32(bus(priv), CSR_EEPROM_REG);
|
||||
r = iwl_read32(trans(priv), CSR_EEPROM_REG);
|
||||
e[addr / 2] = cpu_to_le16(r >> 16);
|
||||
}
|
||||
}
|
||||
@ -758,8 +757,6 @@ done:
|
||||
err:
|
||||
if (ret)
|
||||
iwl_eeprom_free(priv->shrd);
|
||||
/* Reset chip to save power until we load uCode during "up". */
|
||||
iwl_apm_stop(priv);
|
||||
alloc_err:
|
||||
return ret;
|
||||
}
|
||||
@ -1072,7 +1069,7 @@ void iwl_rf_config(struct iwl_priv *priv)
|
||||
|
||||
/* write radio config values to register */
|
||||
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
|
||||
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
|
||||
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
|
||||
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
|
||||
@ -1084,7 +1081,7 @@ void iwl_rf_config(struct iwl_priv *priv)
|
||||
WARN_ON(1);
|
||||
|
||||
/* set CSR_HW_CONFIG_REG for uCode use */
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
|
||||
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
|
||||
}
|
||||
|
@ -34,41 +34,41 @@
|
||||
|
||||
#define IWL_POLL_INTERVAL 10 /* microseconds */
|
||||
|
||||
static inline void __iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
iwl_write32(bus, reg, iwl_read32(bus, reg) | mask);
|
||||
iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
|
||||
}
|
||||
|
||||
static inline void __iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
iwl_write32(bus, reg, iwl_read32(bus, reg) & ~mask);
|
||||
iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
|
||||
}
|
||||
|
||||
void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
__iwl_set_bit(bus, reg, mask);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
__iwl_set_bit(trans, reg, mask);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
__iwl_clear_bit(bus, reg, mask);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
__iwl_clear_bit(trans, reg, mask);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
|
||||
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
|
||||
u32 bits, u32 mask, int timeout)
|
||||
{
|
||||
int t = 0;
|
||||
|
||||
do {
|
||||
if ((iwl_read32(bus, addr) & mask) == (bits & mask))
|
||||
if ((iwl_read32(trans, addr) & mask) == (bits & mask))
|
||||
return t;
|
||||
udelay(IWL_POLL_INTERVAL);
|
||||
t += IWL_POLL_INTERVAL;
|
||||
@ -77,14 +77,15 @@ int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int iwl_grab_nic_access_silent(struct iwl_bus *bus)
|
||||
int iwl_grab_nic_access_silent(struct iwl_trans *trans)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&bus->reg_lock);
|
||||
lockdep_assert_held(&trans->reg_lock);
|
||||
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_set_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
__iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/*
|
||||
* These bits say the device is running, and should keep running for
|
||||
@ -105,70 +106,70 @@ int iwl_grab_nic_access_silent(struct iwl_bus *bus)
|
||||
* 5000 series and later (including 1000 series) have non-volatile SRAM,
|
||||
* and do not save/restore SRAM when power cycling.
|
||||
*/
|
||||
ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
|
||||
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
|
||||
if (ret < 0) {
|
||||
iwl_write32(bus, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_grab_nic_access(struct iwl_bus *bus)
|
||||
int iwl_grab_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
int ret = iwl_grab_nic_access_silent(bus);
|
||||
int ret = iwl_grab_nic_access_silent(trans);
|
||||
if (ret) {
|
||||
u32 val = iwl_read32(bus, CSR_GP_CNTRL);
|
||||
IWL_ERR(bus,
|
||||
u32 val = iwl_read32(trans, CSR_GP_CNTRL);
|
||||
IWL_ERR(trans,
|
||||
"MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_release_nic_access(struct iwl_bus *bus)
|
||||
void iwl_release_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
lockdep_assert_held(&bus->reg_lock);
|
||||
__iwl_clear_bit(bus, CSR_GP_CNTRL,
|
||||
lockdep_assert_held(&trans->reg_lock);
|
||||
__iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
}
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
|
||||
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
|
||||
{
|
||||
u32 value;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
value = iwl_read32(bus, reg);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
value = iwl_read32(trans, reg);
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value)
|
||||
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus)) {
|
||||
iwl_write32(bus, reg, value);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(trans)) {
|
||||
iwl_write32(trans, reg, value);
|
||||
iwl_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
|
||||
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
|
||||
int timeout)
|
||||
{
|
||||
int t = 0;
|
||||
|
||||
do {
|
||||
if ((iwl_read_direct32(bus, addr) & mask) == mask)
|
||||
if ((iwl_read_direct32(trans, addr) & mask) == mask)
|
||||
return t;
|
||||
udelay(IWL_POLL_INTERVAL);
|
||||
t += IWL_POLL_INTERVAL;
|
||||
@ -177,135 +178,135 @@ int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static inline u32 __iwl_read_prph(struct iwl_bus *bus, u32 reg)
|
||||
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
|
||||
{
|
||||
iwl_write32(bus, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
|
||||
rmb();
|
||||
return iwl_read32(bus, HBUS_TARG_PRPH_RDAT);
|
||||
return iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
|
||||
}
|
||||
|
||||
static inline void __iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
|
||||
static inline void __iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
|
||||
{
|
||||
iwl_write32(bus, HBUS_TARG_PRPH_WADDR,
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
|
||||
((addr & 0x0000FFFF) | (3 << 24)));
|
||||
wmb();
|
||||
iwl_write32(bus, HBUS_TARG_PRPH_WDAT, val);
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
||||
}
|
||||
|
||||
u32 iwl_read_prph(struct iwl_bus *bus, u32 reg)
|
||||
u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
val = __iwl_read_prph(bus, reg);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
val = __iwl_read_prph(trans, reg);
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
|
||||
void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus)) {
|
||||
__iwl_write_prph(bus, addr, val);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(trans)) {
|
||||
__iwl_write_prph(trans, addr, val);
|
||||
iwl_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
__iwl_write_prph(bus, reg, __iwl_read_prph(bus, reg) | mask);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
__iwl_write_prph(trans, reg, __iwl_read_prph(trans, reg) | mask);
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
|
||||
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
|
||||
u32 bits, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
__iwl_write_prph(bus, reg,
|
||||
(__iwl_read_prph(bus, reg) & mask) | bits);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
__iwl_write_prph(trans, reg,
|
||||
(__iwl_read_prph(trans, reg) & mask) | bits);
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
val = __iwl_read_prph(bus, reg);
|
||||
__iwl_write_prph(bus, reg, (val & ~mask));
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
val = __iwl_read_prph(trans, reg);
|
||||
__iwl_write_prph(trans, reg, (val & ~mask));
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
|
||||
void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int words)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs;
|
||||
u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
|
||||
iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
|
||||
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
|
||||
rmb();
|
||||
|
||||
for (offs = 0; offs < words; offs++)
|
||||
vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
|
||||
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
|
||||
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
|
||||
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
_iwl_read_targ_mem_words(bus, addr, &value, 1);
|
||||
_iwl_read_targ_mem_words(trans, addr, &value, 1);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
|
||||
int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int words)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs, result = 0;
|
||||
u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus)) {
|
||||
iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(trans)) {
|
||||
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
|
||||
wmb();
|
||||
|
||||
for (offs = 0; offs < words; offs++)
|
||||
iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]);
|
||||
iwl_release_nic_access(bus);
|
||||
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
|
||||
iwl_release_nic_access(trans);
|
||||
} else
|
||||
result = -EBUSY;
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
|
||||
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
|
||||
{
|
||||
return _iwl_write_targ_mem_words(bus, addr, &val, 1);
|
||||
return _iwl_write_targ_mem_words(trans, addr, &val, 1);
|
||||
}
|
||||
|
@ -31,63 +31,63 @@
|
||||
|
||||
#include "iwl-devtrace.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
|
||||
static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
|
||||
bus_write8(bus, ofs, val);
|
||||
trace_iwlwifi_dev_iowrite8(priv(trans), ofs, val);
|
||||
iwl_trans_write8(trans, ofs, val);
|
||||
}
|
||||
|
||||
static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
|
||||
static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
|
||||
bus_write32(bus, ofs, val);
|
||||
trace_iwlwifi_dev_iowrite32(priv(trans), ofs, val);
|
||||
iwl_trans_write32(trans, ofs, val);
|
||||
}
|
||||
|
||||
static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
|
||||
static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
u32 val = bus_read32(bus, ofs);
|
||||
trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
|
||||
u32 val = iwl_trans_read32(trans, ofs);
|
||||
trace_iwlwifi_dev_ioread32(priv(trans), ofs, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
|
||||
int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
|
||||
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
|
||||
u32 bits, u32 mask, int timeout);
|
||||
int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
|
||||
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
|
||||
int timeout);
|
||||
|
||||
int iwl_grab_nic_access_silent(struct iwl_bus *bus);
|
||||
int iwl_grab_nic_access(struct iwl_bus *bus);
|
||||
void iwl_release_nic_access(struct iwl_bus *bus);
|
||||
int iwl_grab_nic_access_silent(struct iwl_trans *trans);
|
||||
int iwl_grab_nic_access(struct iwl_trans *trans);
|
||||
void iwl_release_nic_access(struct iwl_trans *trans);
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
|
||||
void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
|
||||
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
|
||||
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
|
||||
|
||||
|
||||
u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
|
||||
void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
|
||||
void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
|
||||
u32 iwl_read_prph(struct iwl_trans *trans, u32 reg);
|
||||
void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
|
||||
void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
|
||||
u32 bits, u32 mask);
|
||||
void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
|
||||
void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
|
||||
void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int words);
|
||||
|
||||
#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
|
||||
#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \
|
||||
do { \
|
||||
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
|
||||
_iwl_read_targ_mem_words(bus, addr, buf, \
|
||||
_iwl_read_targ_mem_words(trans, addr, buf, \
|
||||
(bufsize) / sizeof(u32));\
|
||||
} while (0)
|
||||
|
||||
int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
|
||||
int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int words);
|
||||
|
||||
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
|
||||
int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
|
||||
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
|
||||
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
|
||||
#endif
|
||||
|
@ -71,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
|
||||
/* Set led register off */
|
||||
void iwlagn_led_enable(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
|
||||
iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -107,9 +107,10 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
|
||||
};
|
||||
u32 reg;
|
||||
|
||||
reg = iwl_read32(bus(priv), CSR_LED_REG);
|
||||
reg = iwl_read32(trans(priv), CSR_LED_REG);
|
||||
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
|
||||
iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
|
||||
iwl_write32(trans(priv), CSR_LED_REG,
|
||||
reg & CSR_LED_BSM_CTRL_MSK);
|
||||
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
}
|
||||
@ -206,7 +207,7 @@ void iwl_leds_init(struct iwl_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = led_classdev_register(bus(priv)->dev, &priv->led);
|
||||
ret = led_classdev_register(trans(priv)->dev, &priv->led);
|
||||
if (ret) {
|
||||
kfree(priv->led.name);
|
||||
return;
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_arp.h>
|
||||
|
||||
@ -43,6 +42,7 @@
|
||||
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include "iwl-ucode.h"
|
||||
#include "iwl-eeprom.h"
|
||||
#include "iwl-wifi.h"
|
||||
#include "iwl-dev.h"
|
||||
@ -196,7 +196,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
||||
WIPHY_FLAG_IBSS_RSN;
|
||||
|
||||
if (trans(priv)->ucode_wowlan.code.len &&
|
||||
device_can_wakeup(bus(priv)->dev)) {
|
||||
device_can_wakeup(trans(priv)->dev)) {
|
||||
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
|
||||
WIPHY_WOWLAN_DISCONNECT |
|
||||
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
|
||||
@ -234,7 +234,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
||||
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
|
||||
&priv->bands[IEEE80211_BAND_5GHZ];
|
||||
|
||||
hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
|
||||
hw->wiphy->hw_version = trans(priv)->hw_id;
|
||||
|
||||
iwl_leds_init(priv);
|
||||
|
||||
@ -346,9 +346,10 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
|
||||
flush_workqueue(priv->shrd->workqueue);
|
||||
|
||||
/* User space software may expect getting rfkill changes
|
||||
* even if interface is down */
|
||||
iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
|
||||
iwl_enable_rfkill_int(priv);
|
||||
* even if interface is down, trans->down will leave the RF
|
||||
* kill interrupt enabled
|
||||
*/
|
||||
iwl_trans_stop_hw(trans(priv));
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
}
|
||||
@ -405,10 +406,10 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
device_set_wakeup_enable(bus(priv)->dev, true);
|
||||
device_set_wakeup_enable(trans(priv)->dev, true);
|
||||
|
||||
/* Now let the ucode operate on its own */
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
|
||||
|
||||
goto out;
|
||||
@ -436,19 +437,19 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
|
||||
|
||||
base = priv->shrd->device_pointers.error_event_table;
|
||||
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||
spin_lock_irqsave(&bus(priv)->reg_lock, flags);
|
||||
ret = iwl_grab_nic_access_silent(bus(priv));
|
||||
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
|
||||
ret = iwl_grab_nic_access_silent(trans(priv));
|
||||
if (ret == 0) {
|
||||
iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
|
||||
status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
|
||||
iwl_release_nic_access(bus(priv));
|
||||
iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base);
|
||||
status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
|
||||
iwl_release_nic_access(trans(priv));
|
||||
}
|
||||
spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (ret == 0) {
|
||||
@ -460,7 +461,8 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
||||
|
||||
if (priv->wowlan_sram)
|
||||
_iwl_read_targ_mem_words(
|
||||
bus(priv), 0x800000, priv->wowlan_sram,
|
||||
trans(priv), 0x800000,
|
||||
priv->wowlan_sram,
|
||||
trans->ucode_wowlan.data.len / 4);
|
||||
}
|
||||
#endif
|
||||
@ -471,7 +473,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
||||
|
||||
priv->shrd->wowlan = false;
|
||||
|
||||
device_set_wakeup_enable(bus(priv)->dev, false);
|
||||
device_set_wakeup_enable(trans(priv)->dev, false);
|
||||
|
||||
iwlagn_prepare_restart(priv);
|
||||
|
||||
|
@ -71,112 +71,6 @@
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-cfg.h"
|
||||
|
||||
/* PCI registers */
|
||||
#define PCI_CFG_RETRY_TIMEOUT 0x041
|
||||
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
|
||||
#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
|
||||
|
||||
struct iwl_pci_bus {
|
||||
/* basic pci-network driver stuff */
|
||||
struct pci_dev *pci_dev;
|
||||
|
||||
/* pci hardware address support */
|
||||
void __iomem *hw_base;
|
||||
};
|
||||
|
||||
#define IWL_BUS_GET_PCI_BUS(_iwl_bus) \
|
||||
((struct iwl_pci_bus *) ((_iwl_bus)->bus_specific))
|
||||
|
||||
#define IWL_BUS_GET_PCI_DEV(_iwl_bus) \
|
||||
((IWL_BUS_GET_PCI_BUS(_iwl_bus))->pci_dev)
|
||||
|
||||
static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
|
||||
{
|
||||
int pos;
|
||||
u16 pci_lnk_ctl;
|
||||
|
||||
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
|
||||
|
||||
pos = pci_pcie_cap(pci_dev);
|
||||
pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
|
||||
return pci_lnk_ctl;
|
||||
}
|
||||
|
||||
static bool iwl_pci_is_pm_supported(struct iwl_bus *bus)
|
||||
{
|
||||
u16 lctl = iwl_pciexp_link_ctrl(bus);
|
||||
|
||||
return !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
|
||||
}
|
||||
|
||||
static void iwl_pci_apm_config(struct iwl_bus *bus)
|
||||
{
|
||||
/*
|
||||
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
|
||||
* Check if BIOS (or OS) enabled L1-ASPM on this device.
|
||||
* If so (likely), disable L0S, so device moves directly L0->L1;
|
||||
* costs negligible amount of power savings.
|
||||
* If not (unlikely), enable L0S, so there is at least some
|
||||
* power savings, even without L1.
|
||||
*/
|
||||
u16 lctl = iwl_pciexp_link_ctrl(bus);
|
||||
|
||||
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
|
||||
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
|
||||
/* L1-ASPM enabled; disable(!) L0S */
|
||||
iwl_set_bit(bus, CSR_GIO_REG,
|
||||
CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
|
||||
} else {
|
||||
/* L1-ASPM disabled; enable(!) L0S */
|
||||
iwl_clear_bit(bus, CSR_GIO_REG,
|
||||
CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
|
||||
int buf_len)
|
||||
{
|
||||
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
|
||||
|
||||
snprintf(buf, buf_len, "PCI ID: 0x%04X:0x%04X", pci_dev->device,
|
||||
pci_dev->subsystem_device);
|
||||
}
|
||||
|
||||
static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
|
||||
{
|
||||
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
|
||||
|
||||
return (pci_dev->device << 16) + pci_dev->subsystem_device;
|
||||
}
|
||||
|
||||
static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
|
||||
{
|
||||
iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
|
||||
}
|
||||
|
||||
static void iwl_pci_write32(struct iwl_bus *bus, u32 ofs, u32 val)
|
||||
{
|
||||
iowrite32(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
|
||||
}
|
||||
|
||||
static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
|
||||
{
|
||||
u32 val = ioread32(IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
|
||||
return val;
|
||||
}
|
||||
|
||||
static const struct iwl_bus_ops bus_ops_pci = {
|
||||
.get_pm_support = iwl_pci_is_pm_supported,
|
||||
.apm_config = iwl_pci_apm_config,
|
||||
.get_hw_id_string = iwl_pci_get_hw_id_string,
|
||||
.get_hw_id = iwl_pci_get_hw_id,
|
||||
.write8 = iwl_pci_write8,
|
||||
.write32 = iwl_pci_write32,
|
||||
.read32 = iwl_pci_read32,
|
||||
};
|
||||
|
||||
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
|
||||
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
|
||||
.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
|
||||
@ -362,112 +256,61 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
|
||||
|
||||
/* PCI registers */
|
||||
#define PCI_CFG_RETRY_TIMEOUT 0x041
|
||||
|
||||
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
|
||||
struct iwl_bus *bus;
|
||||
struct iwl_pci_bus *pci_bus;
|
||||
u16 pci_cmd;
|
||||
int err;
|
||||
|
||||
bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
|
||||
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
|
||||
if (!bus) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"Couldn't allocate iwl_pci_bus");
|
||||
err = -ENOMEM;
|
||||
goto out_no_pci;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pci_bus = IWL_BUS_GET_PCI_BUS(bus);
|
||||
pci_bus->pci_dev = pdev;
|
||||
bus->shrd = kzalloc(sizeof(*bus->shrd), GFP_KERNEL);
|
||||
if (!bus->shrd) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"Couldn't allocate iwl_shared");
|
||||
err = -ENOMEM;
|
||||
goto out_free_bus;
|
||||
}
|
||||
|
||||
bus->shrd->bus = bus;
|
||||
|
||||
pci_set_drvdata(pdev, bus);
|
||||
|
||||
/* W/A - seems to solve weird behavior. We need to remove this if we
|
||||
* don't want to stay in L1 all the time. This wastes a lot of power */
|
||||
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
|
||||
PCIE_LINK_STATE_CLKPM);
|
||||
|
||||
if (pci_enable_device(pdev)) {
|
||||
err = -ENODEV;
|
||||
goto out_no_pci;
|
||||
#ifdef CONFIG_IWLWIFI_IDI
|
||||
trans(bus) = iwl_trans_idi_alloc(bus->shrd, pdev, ent);
|
||||
if (trans(bus) == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_bus;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (err) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev,
|
||||
DMA_BIT_MASK(32));
|
||||
/* both attempts failed: */
|
||||
if (err) {
|
||||
dev_printk(KERN_ERR, bus->dev,
|
||||
"No suitable DMA available.\n");
|
||||
goto out_pci_disable_device;
|
||||
}
|
||||
err = iwl_probe(bus, &trans_ops_idi, cfg);
|
||||
#else
|
||||
trans(bus) = iwl_trans_pcie_alloc(bus->shrd, pdev, ent);
|
||||
if (trans(bus) == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_bus;
|
||||
}
|
||||
|
||||
err = pci_request_regions(pdev, DRV_NAME);
|
||||
if (err) {
|
||||
dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
|
||||
goto out_pci_disable_device;
|
||||
}
|
||||
|
||||
pci_bus->hw_base = pci_iomap(pdev, 0, 0);
|
||||
if (!pci_bus->hw_base) {
|
||||
dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
|
||||
err = -ENODEV;
|
||||
goto out_pci_release_regions;
|
||||
}
|
||||
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"pci_resource_len = 0x%08llx\n",
|
||||
(unsigned long long) pci_resource_len(pdev, 0));
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"pci_resource_base = %p\n", pci_bus->hw_base);
|
||||
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"HW Revision ID = 0x%X\n", pdev->revision);
|
||||
|
||||
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
||||
* PCI Tx retries from interfering with C3 CPU state */
|
||||
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
||||
|
||||
err = pci_enable_msi(pdev);
|
||||
if (err)
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"pci_enable_msi failed(0X%x)", err);
|
||||
|
||||
/* TODO: Move this away, not needed if not MSI */
|
||||
/* enable rfkill interrupt: hw bug w/a */
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
|
||||
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
|
||||
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
|
||||
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
|
||||
}
|
||||
|
||||
bus->dev = &pdev->dev;
|
||||
bus->irq = pdev->irq;
|
||||
bus->ops = &bus_ops_pci;
|
||||
|
||||
err = iwl_probe(bus, &trans_ops_pcie, cfg);
|
||||
#endif
|
||||
if (err)
|
||||
goto out_disable_msi;
|
||||
goto out_free_trans;
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_msi:
|
||||
pci_disable_msi(pdev);
|
||||
pci_iounmap(pdev, pci_bus->hw_base);
|
||||
out_pci_release_regions:
|
||||
out_free_trans:
|
||||
iwl_trans_free(trans(bus));
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
pci_release_regions(pdev);
|
||||
out_pci_disable_device:
|
||||
pci_disable_device(pdev);
|
||||
out_no_pci:
|
||||
out_free_bus:
|
||||
kfree(bus->shrd);
|
||||
kfree(bus);
|
||||
return err;
|
||||
}
|
||||
@ -475,18 +318,14 @@ out_no_pci:
|
||||
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct iwl_bus *bus = pci_get_drvdata(pdev);
|
||||
struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
|
||||
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
|
||||
struct iwl_shared *shrd = bus->shrd;
|
||||
|
||||
iwl_remove(shrd->priv);
|
||||
iwl_trans_free(shrd->trans);
|
||||
|
||||
pci_disable_msi(pci_dev);
|
||||
pci_iounmap(pci_dev, pci_bus->hw_base);
|
||||
pci_release_regions(pci_dev);
|
||||
pci_disable_device(pci_dev);
|
||||
pci_set_drvdata(pci_dev, NULL);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
||||
kfree(bus->shrd);
|
||||
kfree(bus);
|
||||
}
|
||||
|
||||
|
@ -436,7 +436,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
|
||||
/* initialize to default */
|
||||
void iwl_power_initialize(struct iwl_priv *priv)
|
||||
{
|
||||
priv->power_data.bus_pm = bus_get_pm_support(bus(priv));
|
||||
priv->power_data.bus_pm = trans(priv)->pm_support;
|
||||
|
||||
priv->power_data.debug_sleep_level_override = -1;
|
||||
|
||||
|
@ -543,8 +543,6 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr);
|
||||
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
|
||||
void iwl_nic_config(struct iwl_priv *priv);
|
||||
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
|
||||
void iwl_apm_stop(struct iwl_priv *priv);
|
||||
int iwl_apm_init(struct iwl_priv *priv);
|
||||
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
|
||||
const char *get_cmd_string(u8 cmd);
|
||||
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
|
||||
|
@ -79,6 +79,7 @@
|
||||
#include "iwl-testmode.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-fh.h"
|
||||
|
||||
/* The TLVs used in the gnl message policy between the kernel module and
|
||||
* user space application. iwl_testmode_gnl_msg_policy is to be carried
|
||||
@ -208,7 +209,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
|
||||
if (priv->testmode_trace.trace_enabled) {
|
||||
if (priv->testmode_trace.cpu_addr &&
|
||||
priv->testmode_trace.dma_addr)
|
||||
dma_free_coherent(bus(priv)->dev,
|
||||
dma_free_coherent(trans(priv)->dev,
|
||||
priv->testmode_trace.total_size,
|
||||
priv->testmode_trace.cpu_addr,
|
||||
priv->testmode_trace.dma_addr);
|
||||
@ -288,7 +289,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
{
|
||||
struct iwl_priv *priv = hw->priv;
|
||||
u32 ofs, val32;
|
||||
u32 ofs, val32, cmd;
|
||||
u8 val8;
|
||||
struct sk_buff *skb;
|
||||
int status = 0;
|
||||
@ -300,9 +301,22 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
|
||||
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
/* Allow access only to FH/CSR/HBUS in direct mode.
|
||||
Since we don't have the upper bounds for the CSR and HBUS segments,
|
||||
we will use only the upper bound of FH for sanity check. */
|
||||
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
|
||||
if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
|
||||
cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
|
||||
cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
|
||||
(ofs >= FH_MEM_UPPER_BOUND)) {
|
||||
IWL_DEBUG_INFO(priv, "offset out of segment (0x0 - 0x%x)\n",
|
||||
FH_MEM_UPPER_BOUND);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
|
||||
val32 = iwl_read_direct32(bus(priv), ofs);
|
||||
val32 = iwl_read_direct32(trans(priv), ofs);
|
||||
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
|
||||
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
|
||||
@ -324,7 +338,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
} else {
|
||||
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
|
||||
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
|
||||
iwl_write_direct32(bus(priv), ofs, val32);
|
||||
iwl_write_direct32(trans(priv), ofs, val32);
|
||||
}
|
||||
break;
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
|
||||
@ -334,11 +348,11 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
} else {
|
||||
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
|
||||
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
|
||||
iwl_write8(bus(priv), ofs, val8);
|
||||
iwl_write8(trans(priv), ofs, val8);
|
||||
}
|
||||
break;
|
||||
case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
|
||||
val32 = iwl_read_prph(bus(priv), ofs);
|
||||
val32 = iwl_read_prph(trans(priv), ofs);
|
||||
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
|
||||
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
|
||||
@ -360,7 +374,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
} else {
|
||||
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
|
||||
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
|
||||
iwl_write_prph(bus(priv), ofs, val32);
|
||||
iwl_write_prph(trans(priv), ofs, val32);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -536,7 +550,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
|
||||
devid = bus_get_hw_id(bus(priv));
|
||||
devid = trans(priv)->hw_id;
|
||||
IWL_INFO(priv, "hw version: 0x%x\n", devid);
|
||||
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
|
||||
@ -615,7 +629,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
struct iwl_priv *priv = hw->priv;
|
||||
struct sk_buff *skb;
|
||||
int status = 0;
|
||||
struct device *dev = bus(priv)->dev;
|
||||
struct device *dev = trans(priv)->dev;
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
@ -814,7 +828,7 @@ static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
IWL_ERR(priv, "Error allocating memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
_iwl_read_targ_mem_words(bus(priv), ofs,
|
||||
_iwl_read_targ_mem_words(trans(priv), ofs,
|
||||
priv->testmode_sram.buff_addr,
|
||||
priv->testmode_sram.buff_size / 4);
|
||||
priv->testmode_sram.num_chunks =
|
||||
|
@ -201,6 +201,7 @@ struct iwl_tx_queue {
|
||||
* @rxq: all the RX queue data
|
||||
* @rx_replenish: work that will be called when buffers need to be allocated
|
||||
* @trans: pointer to the generic transport area
|
||||
* @irq_requested: true when the irq has been requested
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
||||
* @kw: keep warm address
|
||||
@ -211,6 +212,8 @@ struct iwl_tx_queue {
|
||||
* @txq_ctx_active_msk: what queue is active
|
||||
* queue_stopped: tracks what queue is stopped
|
||||
* queue_stop_count: tracks what SW queue is stopped
|
||||
* @pci_dev: basic pci-network driver stuff
|
||||
* @hw_base: pci hardware address support
|
||||
*/
|
||||
struct iwl_trans_pcie {
|
||||
struct iwl_rx_queue rxq;
|
||||
@ -223,6 +226,7 @@ struct iwl_trans_pcie {
|
||||
int ict_index;
|
||||
u32 inta;
|
||||
bool use_ict;
|
||||
bool irq_requested;
|
||||
struct tasklet_struct irq_tasklet;
|
||||
struct isr_statistics isr_stats;
|
||||
|
||||
@ -241,6 +245,10 @@ struct iwl_trans_pcie {
|
||||
#define IWL_MAX_HW_QUEUES 32
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
atomic_t queue_stop_count[4];
|
||||
|
||||
/* PCI bus related data */
|
||||
struct pci_dev *pci_dev;
|
||||
void __iomem *hw_base;
|
||||
};
|
||||
|
||||
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
|
||||
@ -258,7 +266,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
|
||||
/*****************************************************
|
||||
* ICT
|
||||
******************************************************/
|
||||
int iwl_reset_ict(struct iwl_trans *trans);
|
||||
void iwl_reset_ict(struct iwl_trans *trans);
|
||||
void iwl_disable_ict(struct iwl_trans *trans);
|
||||
int iwl_alloc_isr_ict(struct iwl_trans *trans);
|
||||
void iwl_free_isr_ict(struct iwl_trans *trans);
|
||||
@ -311,12 +319,12 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
|
||||
clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
|
||||
|
||||
/* disable interrupts from uCode/NIC to host */
|
||||
iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
|
||||
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* acknowledge/clear/reset any interrupts still pending
|
||||
* from uCode or flow handler (Rx/Tx DMA) */
|
||||
iwl_write32(bus(trans), CSR_INT, 0xffffffff);
|
||||
iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
|
||||
iwl_write32(trans, CSR_INT, 0xffffffff);
|
||||
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
|
||||
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
|
||||
}
|
||||
|
||||
@ -327,7 +335,7 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
|
||||
|
||||
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
|
||||
set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
|
||||
iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -35,6 +35,10 @@
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-trans-pcie-int.h"
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_IDI
|
||||
#include "iwl-amfh.h"
|
||||
#endif
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* RX path functions
|
||||
@ -140,30 +144,30 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
|
||||
/* shadow register enabled */
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
||||
} else {
|
||||
/* If power-saving is in use, make sure device is awake */
|
||||
if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
|
||||
reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
|
||||
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
|
||||
|
||||
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"Rx queue requesting wakeup,"
|
||||
" GP1 = 0x%x\n", reg);
|
||||
iwl_set_bit(bus(trans), CSR_GP_CNTRL,
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
|
||||
/* Else device is assumed to be awake */
|
||||
} else {
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
}
|
||||
}
|
||||
@ -308,7 +312,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
|
||||
BUG_ON(rxb->page);
|
||||
rxb->page = page;
|
||||
/* Get physical address of the RB */
|
||||
rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
|
||||
rxb->page_dma = dma_map_page(trans->dev, page, 0,
|
||||
PAGE_SIZE << hw_params(trans).rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
/* dma address must be no more than 36 bits */
|
||||
@ -414,7 +418,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
|
||||
|
||||
rxq->queue[i] = NULL;
|
||||
|
||||
dma_unmap_page(bus(trans)->dev, rxb->page_dma,
|
||||
dma_unmap_page(trans->dev, rxb->page_dma,
|
||||
PAGE_SIZE << hw_params(trans).rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
pkt = rxb_addr(rxb);
|
||||
@ -485,7 +489,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
|
||||
* rx_free list for reuse later. */
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (rxb->page != NULL) {
|
||||
rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
|
||||
rxb->page_dma = dma_map_page(trans->dev, rxb->page,
|
||||
0, PAGE_SIZE <<
|
||||
hw_params(trans).rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
@ -612,7 +616,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));
|
||||
iwl_read_targ_mem_words(trans(priv), base, &table, sizeof(table));
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
@ -673,9 +677,9 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
|
||||
struct iwl_priv *priv = priv(trans);
|
||||
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
|
||||
if (cfg(priv)->internal_wimax_coex &&
|
||||
(!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
|
||||
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
|
||||
APMS_CLK_VAL_MRB_FUNC_MODE) ||
|
||||
(iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
|
||||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
|
||||
APMG_PS_CTRL_VAL_RESET_REQ))) {
|
||||
/*
|
||||
* Keep the restart process from trying to send host
|
||||
@ -741,18 +745,18 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
|
||||
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
|
||||
|
||||
/* Make sure device is powered up for SRAM reads */
|
||||
spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags);
|
||||
iwl_grab_nic_access(bus(trans));
|
||||
spin_lock_irqsave(&trans->reg_lock, reg_flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
|
||||
/* Set starting address; reads will auto-increment */
|
||||
iwl_write32(bus(trans), HBUS_TARG_MEM_RADDR, ptr);
|
||||
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
|
||||
rmb();
|
||||
|
||||
/* "time" is actually "data" for mode 0 (no timestamp).
|
||||
* place event id # at far right for easier visual parsing. */
|
||||
for (i = 0; i < num_events; i++) {
|
||||
ev = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
|
||||
time = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
|
||||
ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
if (mode == 0) {
|
||||
/* data, ev */
|
||||
if (bufsz) {
|
||||
@ -766,7 +770,7 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
|
||||
time, ev);
|
||||
}
|
||||
} else {
|
||||
data = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
|
||||
data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
if (bufsz) {
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
"EVT_LOGT:%010u:0x%08x:%04u\n",
|
||||
@ -781,8 +785,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
|
||||
}
|
||||
|
||||
/* Allow device to power down */
|
||||
iwl_release_nic_access(bus(trans));
|
||||
spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags);
|
||||
iwl_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
|
||||
return pos;
|
||||
}
|
||||
|
||||
@ -859,10 +863,10 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
|
||||
}
|
||||
|
||||
/* event log header */
|
||||
capacity = iwl_read_targ_mem(bus(trans), base);
|
||||
mode = iwl_read_targ_mem(bus(trans), base + (1 * sizeof(u32)));
|
||||
num_wraps = iwl_read_targ_mem(bus(trans), base + (2 * sizeof(u32)));
|
||||
next_entry = iwl_read_targ_mem(bus(trans), base + (3 * sizeof(u32)));
|
||||
capacity = iwl_read_targ_mem(trans, base);
|
||||
mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
|
||||
num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
|
||||
next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
|
||||
|
||||
if (capacity > logsize) {
|
||||
IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
|
||||
@ -958,7 +962,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
* hardware bugs here by ACKing all the possible interrupts so that
|
||||
* interrupt coalescing can still be achieved.
|
||||
*/
|
||||
iwl_write32(bus(trans), CSR_INT,
|
||||
iwl_write32(trans, CSR_INT,
|
||||
trans_pcie->inta | ~trans_pcie->inta_mask);
|
||||
|
||||
inta = trans_pcie->inta;
|
||||
@ -966,7 +970,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
|
||||
/* just for debug */
|
||||
inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
|
||||
inta_mask = iwl_read32(trans, CSR_INT_MASK);
|
||||
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
|
||||
inta, inta_mask);
|
||||
}
|
||||
@ -1014,7 +1018,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
/* HW RF KILL switch toggled */
|
||||
if (inta & CSR_INT_BIT_RF_KILL) {
|
||||
int hw_rf_kill = 0;
|
||||
if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
|
||||
if (!(iwl_read32(trans, CSR_GP_CNTRL) &
|
||||
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
|
||||
hw_rf_kill = 1;
|
||||
|
||||
@ -1078,12 +1082,12 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
IWL_DEBUG_ISR(trans, "Rx interrupt\n");
|
||||
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
|
||||
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
|
||||
iwl_write32(bus(trans), CSR_FH_INT_STATUS,
|
||||
iwl_write32(trans, CSR_FH_INT_STATUS,
|
||||
CSR_FH_INT_RX_MASK);
|
||||
}
|
||||
if (inta & CSR_INT_BIT_RX_PERIODIC) {
|
||||
handled |= CSR_INT_BIT_RX_PERIODIC;
|
||||
iwl_write32(bus(trans),
|
||||
iwl_write32(trans,
|
||||
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
|
||||
}
|
||||
/* Sending RX interrupt require many steps to be done in the
|
||||
@ -1098,10 +1102,13 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
*/
|
||||
|
||||
/* Disable periodic interrupt; we use it as just a one-shot. */
|
||||
iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
|
||||
iwl_write8(trans, CSR_INT_PERIODIC_REG,
|
||||
CSR_INT_PERIODIC_DIS);
|
||||
#ifdef CONFIG_IWLWIFI_IDI
|
||||
iwl_amfh_rx_handler();
|
||||
#else
|
||||
iwl_rx_handle(trans);
|
||||
|
||||
#endif
|
||||
/*
|
||||
* Enable periodic interrupt in 8 msec only if we received
|
||||
* real RX interrupt (instead of just periodic int), to catch
|
||||
@ -1110,7 +1117,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
* to extend the periodic interrupt; one-shot is enough.
|
||||
*/
|
||||
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
|
||||
iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
|
||||
iwl_write8(trans, CSR_INT_PERIODIC_REG,
|
||||
CSR_INT_PERIODIC_ENA);
|
||||
|
||||
isr_stats->rx++;
|
||||
@ -1118,7 +1125,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
|
||||
/* This "Tx" DMA channel is used only for loading uCode */
|
||||
if (inta & CSR_INT_BIT_FH_TX) {
|
||||
iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
|
||||
iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
|
||||
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
|
||||
isr_stats->tx++;
|
||||
handled |= CSR_INT_BIT_FH_TX;
|
||||
@ -1142,8 +1149,10 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||
if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
|
||||
iwl_enable_interrupts(trans);
|
||||
/* Re-enable RF_KILL if it occurred */
|
||||
else if (handled & CSR_INT_BIT_RF_KILL)
|
||||
iwl_enable_rfkill_int(priv(trans));
|
||||
else if (handled & CSR_INT_BIT_RF_KILL) {
|
||||
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
|
||||
iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -1164,7 +1173,7 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (trans_pcie->ict_tbl) {
|
||||
dma_free_coherent(bus(trans)->dev, ICT_SIZE,
|
||||
dma_free_coherent(trans->dev, ICT_SIZE,
|
||||
trans_pcie->ict_tbl,
|
||||
trans_pcie->ict_tbl_dma);
|
||||
trans_pcie->ict_tbl = NULL;
|
||||
@ -1184,7 +1193,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
trans_pcie->ict_tbl =
|
||||
dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
|
||||
dma_alloc_coherent(trans->dev, ICT_SIZE,
|
||||
&trans_pcie->ict_tbl_dma,
|
||||
GFP_KERNEL);
|
||||
if (!trans_pcie->ict_tbl)
|
||||
@ -1213,7 +1222,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
|
||||
/* Device is going up inform it about using ICT interrupt table,
|
||||
* also we need to tell the driver to start using ICT interrupt.
|
||||
*/
|
||||
int iwl_reset_ict(struct iwl_trans *trans)
|
||||
void iwl_reset_ict(struct iwl_trans *trans)
|
||||
{
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
@ -1221,7 +1230,7 @@ int iwl_reset_ict(struct iwl_trans *trans)
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!trans_pcie->ict_tbl)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&trans->shrd->lock, flags);
|
||||
iwl_disable_interrupts(trans);
|
||||
@ -1235,14 +1244,12 @@ int iwl_reset_ict(struct iwl_trans *trans)
|
||||
|
||||
IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
|
||||
|
||||
iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
|
||||
iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
|
||||
trans_pcie->use_ict = true;
|
||||
trans_pcie->ict_index = 0;
|
||||
iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask);
|
||||
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
|
||||
iwl_enable_interrupts(trans);
|
||||
spin_unlock_irqrestore(&trans->shrd->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going down disable ict interrupt usage */
|
||||
@ -1280,11 +1287,11 @@ static irqreturn_t iwl_isr(int irq, void *data)
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here. */
|
||||
inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
|
||||
inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* Discover which interrupts are active/pending */
|
||||
inta = iwl_read32(bus(trans), CSR_INT);
|
||||
inta = iwl_read32(trans, CSR_INT);
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
@ -1303,7 +1310,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
|
||||
inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
|
||||
inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
|
||||
IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
|
||||
"fh 0x%08x\n", inta, inta_mask, inta_fh);
|
||||
}
|
||||
@ -1369,8 +1376,8 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here.
|
||||
*/
|
||||
inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
|
||||
inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
|
@ -100,7 +100,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
|
||||
|
||||
if (hw_params(trans).shadow_reg_enable) {
|
||||
/* shadow register enabled */
|
||||
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
|
||||
iwl_write32(trans, HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
} else {
|
||||
/* if we're trying to save power */
|
||||
@ -108,18 +108,18 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
|
||||
/* wake up nic if it's powered down ...
|
||||
* uCode will wake up, and interrupt us again, so next
|
||||
* time we'll skip this part. */
|
||||
reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
|
||||
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
|
||||
|
||||
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"Tx queue %d requesting wakeup,"
|
||||
" GP1 = 0x%x\n", txq_id, reg);
|
||||
iwl_set_bit(bus(trans), CSR_GP_CNTRL,
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
|
||||
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
|
||||
/*
|
||||
@ -128,7 +128,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
|
||||
* trying to tx (during RFKILL, we're not trying to tx).
|
||||
*/
|
||||
} else
|
||||
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
|
||||
iwl_write32(trans, HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
}
|
||||
txq->need_update = 0;
|
||||
@ -190,14 +190,14 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (num_tbs)
|
||||
dma_unmap_single(bus(trans)->dev,
|
||||
dma_unmap_single(trans->dev,
|
||||
dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len),
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++)
|
||||
dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||
dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||
iwl_tfd_tb_get_len(tfd, i), dma_dir);
|
||||
}
|
||||
|
||||
@ -383,14 +383,14 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
|
||||
tbl_dw_addr = trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
|
||||
|
||||
tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
|
||||
tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
|
||||
|
||||
if (txq_id & 0x1)
|
||||
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
|
||||
else
|
||||
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
|
||||
|
||||
iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
|
||||
iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -399,7 +399,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
|
||||
{
|
||||
/* Simply stop the queue, but don't change any configuration;
|
||||
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
|
||||
iwl_write_prph(bus(trans),
|
||||
iwl_write_prph(trans,
|
||||
SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
|
||||
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
|
||||
@ -409,9 +409,9 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
|
||||
int txq_id, u32 index)
|
||||
{
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
|
||||
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
|
||||
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
||||
(index & 0xff) | (txq_id << 8));
|
||||
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
|
||||
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
|
||||
}
|
||||
|
||||
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||
@ -423,7 +423,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||
int active =
|
||||
test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
|
||||
|
||||
iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
||||
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
|
||||
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
|
||||
@ -431,9 +431,12 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||
|
||||
txq->sched_retry = scd_retry;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
|
||||
active ? "Activate" : "Deactivate",
|
||||
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
|
||||
if (active)
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
|
||||
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
|
||||
else
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
|
||||
scd_retry ? "BA" : "AC/CMD", txq_id);
|
||||
}
|
||||
|
||||
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
|
||||
@ -498,10 +501,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
|
||||
|
||||
/* Set this queue as a chain-building queue */
|
||||
iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
||||
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
||||
|
||||
/* enable aggregations for the queue */
|
||||
iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
|
||||
iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
|
||||
|
||||
/* Place first TFD at index corresponding to start sequence number.
|
||||
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
||||
@ -510,7 +513,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||
iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
|
||||
|
||||
/* Set up Tx window size and frame limit for this queue */
|
||||
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
|
||||
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
||||
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
|
||||
sizeof(u32),
|
||||
((frame_limit <<
|
||||
@ -520,7 +523,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||
|
||||
iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
|
||||
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
|
||||
@ -584,7 +587,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
|
||||
|
||||
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
||||
|
||||
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
|
||||
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
|
||||
|
||||
trans_pcie->agg_txq[sta_id][tid] = 0;
|
||||
trans_pcie->txq[txq_id].q.read_ptr = 0;
|
||||
@ -592,7 +595,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
|
||||
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
||||
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
|
||||
|
||||
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
|
||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
|
||||
return 0;
|
||||
@ -725,9 +728,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
|
||||
q->write_ptr, idx, trans->shrd->cmd_queue);
|
||||
|
||||
phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
|
||||
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -748,10 +751,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||
continue;
|
||||
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
||||
continue;
|
||||
phys_addr = dma_map_single(bus(trans)->dev,
|
||||
phys_addr = dma_map_single(trans->dev,
|
||||
(void *)cmd->data[i],
|
||||
cmd->len[i], DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
|
||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||
iwlagn_unmap_tfd(trans, out_meta,
|
||||
&txq->tfds[q->write_ptr],
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -131,16 +131,26 @@ struct iwl_host_cmd {
|
||||
u8 id;
|
||||
};
|
||||
|
||||
/* one for each uCode image (inst/data, boot/init/runtime) */
|
||||
struct fw_desc {
|
||||
dma_addr_t p_addr; /* hardware address */
|
||||
void *v_addr; /* software address */
|
||||
u32 len; /* size in bytes */
|
||||
};
|
||||
|
||||
struct fw_img {
|
||||
struct fw_desc code; /* firmware code image */
|
||||
struct fw_desc data; /* firmware data image */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_ops - transport specific operations
|
||||
* @alloc: allocates the meta data (not the queues themselves)
|
||||
* @request_irq: requests IRQ - will be called before the FW load in probe flow
|
||||
* @start_device: allocates and inits all the resources for the transport
|
||||
* layer.
|
||||
* @prepare_card_hw: claim the ownership on the HW. Will be called during
|
||||
* probe.
|
||||
* @tx_start: starts and configures all the Tx fifo - usually done once the fw
|
||||
* is alive.
|
||||
* @start_hw: starts the HW- from that point on, the HW can send interrupts
|
||||
* @stop_hw: stops the HW- from that point on, the HW will be in low power but
|
||||
* will still issue interrupt if the HW RF kill is triggered.
|
||||
* @start_fw: allocates and inits all the resources for the transport
|
||||
* layer. Also kick a fw image. This handler may sleep.
|
||||
* @fw_alive: called when the fw sends alive notification
|
||||
* @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
|
||||
* @stop_device:stops the whole device (embedded CPU put to reset)
|
||||
* @send_cmd:send a host command
|
||||
@ -150,7 +160,6 @@ struct iwl_host_cmd {
|
||||
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
|
||||
* ready and a successful ADDBA response has been received.
|
||||
* @tx_agg_disable: de-configure a Tx queue to send AMPDUs
|
||||
* @kick_nic: remove the RESET from the embedded CPU and let it run
|
||||
* @free: release all the ressource for the transport layer itself such as
|
||||
* irq, tasklet etc...
|
||||
* @stop_queue: stop a specific queue
|
||||
@ -160,15 +169,17 @@ struct iwl_host_cmd {
|
||||
* automatically deleted.
|
||||
* @suspend: stop the device unless WoWLAN is configured
|
||||
* @resume: resume activity of the device
|
||||
* @write8: write a u8 to a register at offset ofs from the BAR
|
||||
* @write32: write a u32 to a register at offset ofs from the BAR
|
||||
* @read32: read a u32 register at offset ofs from the BAR
|
||||
*/
|
||||
struct iwl_trans_ops {
|
||||
|
||||
struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
|
||||
int (*request_irq)(struct iwl_trans *iwl_trans);
|
||||
int (*start_device)(struct iwl_trans *trans);
|
||||
int (*prepare_card_hw)(struct iwl_trans *trans);
|
||||
int (*start_hw)(struct iwl_trans *iwl_trans);
|
||||
void (*stop_hw)(struct iwl_trans *iwl_trans);
|
||||
int (*start_fw)(struct iwl_trans *trans, struct fw_img *fw);
|
||||
void (*fw_alive)(struct iwl_trans *trans);
|
||||
void (*stop_device)(struct iwl_trans *trans);
|
||||
void (*tx_start)(struct iwl_trans *trans);
|
||||
|
||||
void (*wake_any_queue)(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
@ -191,8 +202,6 @@ struct iwl_trans_ops {
|
||||
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
||||
int frame_limit, u16 ssn);
|
||||
|
||||
void (*kick_nic)(struct iwl_trans *trans);
|
||||
|
||||
void (*free)(struct iwl_trans *trans);
|
||||
|
||||
void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
|
||||
@ -204,18 +213,9 @@ struct iwl_trans_ops {
|
||||
int (*suspend)(struct iwl_trans *trans);
|
||||
int (*resume)(struct iwl_trans *trans);
|
||||
#endif
|
||||
};
|
||||
|
||||
/* one for each uCode image (inst/data, boot/init/runtime) */
|
||||
struct fw_desc {
|
||||
dma_addr_t p_addr; /* hardware address */
|
||||
void *v_addr; /* software address */
|
||||
u32 len; /* size in bytes */
|
||||
};
|
||||
|
||||
struct fw_img {
|
||||
struct fw_desc code; /* firmware code image */
|
||||
struct fw_desc data; /* firmware data image */
|
||||
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
|
||||
};
|
||||
|
||||
/* Opaque calibration results */
|
||||
@ -231,17 +231,31 @@ struct iwl_calib_result {
|
||||
* @ops - pointer to iwl_trans_ops
|
||||
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
|
||||
* @hcmd_lock: protects HCMD
|
||||
* @reg_lock - protect hw register access
|
||||
* @dev - pointer to struct device * that represents the device
|
||||
* @irq - the irq number for the device
|
||||
* @hw_id: a u32 with the ID of the device / subdevice.
|
||||
* Set during transport alloaction.
|
||||
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
|
||||
* @ucode_write_complete: indicates that the ucode has been copied.
|
||||
* @ucode_rt: run time ucode image
|
||||
* @ucode_init: init ucode image
|
||||
* @ucode_wowlan: wake on wireless ucode image (optional)
|
||||
* @nvm_device_type: indicates OTP or eeprom
|
||||
* @pm_support: set to true in start_hw if link pm is supported
|
||||
* @calib_results: list head for init calibration results
|
||||
*/
|
||||
struct iwl_trans {
|
||||
const struct iwl_trans_ops *ops;
|
||||
struct iwl_shared *shrd;
|
||||
spinlock_t hcmd_lock;
|
||||
spinlock_t reg_lock;
|
||||
|
||||
struct device *dev;
|
||||
unsigned int irq;
|
||||
u32 hw_rev;
|
||||
u32 hw_id;
|
||||
char hw_id_str[52];
|
||||
|
||||
u8 ucode_write_complete; /* the image write is complete */
|
||||
struct fw_img ucode_rt;
|
||||
@ -250,6 +264,7 @@ struct iwl_trans {
|
||||
|
||||
/* eeprom related variables */
|
||||
int nvm_device_type;
|
||||
bool pm_support;
|
||||
|
||||
/* init calibration results */
|
||||
struct list_head calib_results;
|
||||
@ -259,19 +274,26 @@ struct iwl_trans {
|
||||
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
|
||||
};
|
||||
|
||||
static inline int iwl_trans_request_irq(struct iwl_trans *trans)
|
||||
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->request_irq(trans);
|
||||
return trans->ops->start_hw(trans);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_start_device(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_stop_hw(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->start_device(trans);
|
||||
trans->ops->stop_hw(trans);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->prepare_card_hw(trans);
|
||||
trans->ops->fw_alive(trans);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_start_fw(struct iwl_trans *trans, struct fw_img *fw)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
return trans->ops->start_fw(trans, fw);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
|
||||
@ -279,11 +301,6 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
|
||||
trans->ops->stop_device(trans);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_tx_start(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->tx_start(trans);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
const char *msg)
|
||||
@ -337,11 +354,6 @@ static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
|
||||
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->kick_nic(trans);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_free(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->free(trans);
|
||||
@ -380,13 +392,24 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*****************************************************
|
||||
* Transport layers implementations
|
||||
******************************************************/
|
||||
extern const struct iwl_trans_ops trans_ops_pcie;
|
||||
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||
{
|
||||
trans->ops->write8(trans, ofs, val);
|
||||
}
|
||||
|
||||
int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
|
||||
const void *data, size_t len);
|
||||
static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
|
||||
{
|
||||
trans->ops->write32(trans, ofs, val);
|
||||
}
|
||||
|
||||
static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
return trans->ops->read32(trans, ofs);
|
||||
}
|
||||
|
||||
/*****************************************************
|
||||
* Utils functions
|
||||
******************************************************/
|
||||
void iwl_dealloc_ucode(struct iwl_trans *trans);
|
||||
|
||||
int iwl_send_calib_results(struct iwl_trans *trans);
|
||||
@ -394,4 +417,18 @@ int iwl_calib_set(struct iwl_trans *trans,
|
||||
const struct iwl_calib_hdr *cmd, int len);
|
||||
void iwl_calib_free_results(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
* Transport layers implementations + their allocation function
|
||||
******************************************************/
|
||||
struct pci_dev;
|
||||
struct pci_device_id;
|
||||
extern const struct iwl_trans_ops trans_ops_pcie;
|
||||
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
|
||||
struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent);
|
||||
|
||||
extern const struct iwl_trans_ops trans_ops_idi;
|
||||
struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
|
||||
void *pdev_void,
|
||||
const void *ent_void);
|
||||
#endif /* __iwl_trans_h__ */
|
||||
|
@ -32,7 +32,9 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "iwl-ucode.h"
|
||||
#include "iwl-wifi.h"
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
@ -80,29 +82,29 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
|
||||
static void iwl_free_fw_desc(struct iwl_trans *trans, struct fw_desc *desc)
|
||||
{
|
||||
if (desc->v_addr)
|
||||
dma_free_coherent(bus->dev, desc->len,
|
||||
dma_free_coherent(trans->dev, desc->len,
|
||||
desc->v_addr, desc->p_addr);
|
||||
desc->v_addr = NULL;
|
||||
desc->len = 0;
|
||||
}
|
||||
|
||||
static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
|
||||
static void iwl_free_fw_img(struct iwl_trans *trans, struct fw_img *img)
|
||||
{
|
||||
iwl_free_fw_desc(bus, &img->code);
|
||||
iwl_free_fw_desc(bus, &img->data);
|
||||
iwl_free_fw_desc(trans, &img->code);
|
||||
iwl_free_fw_desc(trans, &img->data);
|
||||
}
|
||||
|
||||
void iwl_dealloc_ucode(struct iwl_trans *trans)
|
||||
{
|
||||
iwl_free_fw_img(bus(trans), &trans->ucode_rt);
|
||||
iwl_free_fw_img(bus(trans), &trans->ucode_init);
|
||||
iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
|
||||
iwl_free_fw_img(trans, &trans->ucode_rt);
|
||||
iwl_free_fw_img(trans, &trans->ucode_init);
|
||||
iwl_free_fw_img(trans, &trans->ucode_wowlan);
|
||||
}
|
||||
|
||||
int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
|
||||
static int iwl_alloc_fw_desc(struct iwl_trans *trans, struct fw_desc *desc,
|
||||
const void *data, size_t len)
|
||||
{
|
||||
if (!len) {
|
||||
@ -110,7 +112,7 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
desc->v_addr = dma_alloc_coherent(bus->dev, len,
|
||||
desc->v_addr = dma_alloc_coherent(trans->dev, len,
|
||||
&desc->p_addr, GFP_KERNEL);
|
||||
if (!desc->v_addr)
|
||||
return -ENOMEM;
|
||||
@ -120,59 +122,6 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ucode
|
||||
*/
|
||||
static int iwl_load_section(struct iwl_trans *trans, const char *name,
|
||||
struct fw_desc *image, u32 dst_addr)
|
||||
{
|
||||
struct iwl_bus *bus = bus(trans);
|
||||
dma_addr_t phy_addr = image->p_addr;
|
||||
u32 byte_cnt = image->len;
|
||||
int ret;
|
||||
|
||||
trans->ucode_write_complete = 0;
|
||||
|
||||
iwl_write_direct32(bus,
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
|
||||
|
||||
iwl_write_direct32(bus,
|
||||
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
|
||||
|
||||
iwl_write_direct32(bus,
|
||||
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
|
||||
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
|
||||
|
||||
iwl_write_direct32(bus,
|
||||
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
|
||||
(iwl_get_dma_hi_addr(phy_addr)
|
||||
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
|
||||
|
||||
iwl_write_direct32(bus,
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
|
||||
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
|
||||
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
|
||||
|
||||
iwl_write_direct32(bus,
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
|
||||
|
||||
IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
|
||||
ret = wait_event_timeout(trans->shrd->wait_command_queue,
|
||||
trans->ucode_write_complete, 5 * HZ);
|
||||
if (!ret) {
|
||||
IWL_ERR(trans, "Could not load the %s uCode section\n",
|
||||
name);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
|
||||
enum iwl_ucode_type ucode_type)
|
||||
{
|
||||
@ -189,28 +138,6 @@ static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int iwl_load_given_ucode(struct iwl_trans *trans,
|
||||
enum iwl_ucode_type ucode_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
|
||||
|
||||
|
||||
if (!image) {
|
||||
IWL_ERR(trans, "Invalid ucode requested (%d)\n",
|
||||
ucode_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = iwl_load_section(trans, "INST", &image->code,
|
||||
IWLAGN_RTC_INST_LOWER_BOUND);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return iwl_load_section(trans, "DATA", &image->data,
|
||||
IWLAGN_RTC_DATA_LOWER_BOUND);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calibration
|
||||
*/
|
||||
@ -447,7 +374,7 @@ static int iwl_alive_notify(struct iwl_trans *trans)
|
||||
if (!priv->tx_cmd_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
iwl_trans_tx_start(trans);
|
||||
iwl_trans_fw_alive(trans);
|
||||
for_each_context(priv, ctx)
|
||||
ctx->last_tx_rejected = false;
|
||||
|
||||
@ -470,7 +397,7 @@ static int iwl_alive_notify(struct iwl_trans *trans)
|
||||
* using sample data 100 bytes apart. If these sample points are good,
|
||||
* it's a pretty good bet that everything between them is good, too.
|
||||
*/
|
||||
static int iwl_verify_inst_sparse(struct iwl_bus *bus,
|
||||
static int iwl_verify_inst_sparse(struct iwl_trans *trans,
|
||||
struct fw_desc *fw_desc)
|
||||
{
|
||||
__le32 *image = (__le32 *)fw_desc->v_addr;
|
||||
@ -478,15 +405,15 @@ static int iwl_verify_inst_sparse(struct iwl_bus *bus,
|
||||
u32 val;
|
||||
u32 i;
|
||||
|
||||
IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
|
||||
IWL_DEBUG_FW(trans, "ucode inst image size is %u\n", len);
|
||||
|
||||
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
|
||||
/* read data comes through single port, auto-incr addr */
|
||||
/* NOTE: Use the debugless read so we don't flood kernel log
|
||||
* if IWL_DL_IO is set */
|
||||
iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
|
||||
iwl_write_direct32(trans, HBUS_TARG_MEM_RADDR,
|
||||
i + IWLAGN_RTC_INST_LOWER_BOUND);
|
||||
val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
|
||||
val = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
if (val != le32_to_cpu(*image))
|
||||
return -EIO;
|
||||
}
|
||||
@ -494,7 +421,7 @@ static int iwl_verify_inst_sparse(struct iwl_bus *bus,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_print_mismatch_inst(struct iwl_bus *bus,
|
||||
static void iwl_print_mismatch_inst(struct iwl_trans *trans,
|
||||
struct fw_desc *fw_desc)
|
||||
{
|
||||
__le32 *image = (__le32 *)fw_desc->v_addr;
|
||||
@ -503,18 +430,18 @@ static void iwl_print_mismatch_inst(struct iwl_bus *bus,
|
||||
u32 offs;
|
||||
int errors = 0;
|
||||
|
||||
IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
|
||||
IWL_DEBUG_FW(trans, "ucode inst image size is %u\n", len);
|
||||
|
||||
iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
|
||||
iwl_write_direct32(trans, HBUS_TARG_MEM_RADDR,
|
||||
IWLAGN_RTC_INST_LOWER_BOUND);
|
||||
|
||||
for (offs = 0;
|
||||
offs < len && errors < 20;
|
||||
offs += sizeof(u32), image++) {
|
||||
/* read data comes through single port, auto-incr addr */
|
||||
val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
|
||||
val = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
if (val != le32_to_cpu(*image)) {
|
||||
IWL_ERR(bus, "uCode INST section at "
|
||||
IWL_ERR(trans, "uCode INST section at "
|
||||
"offset 0x%x, is 0x%x, s/b 0x%x\n",
|
||||
offs, val, le32_to_cpu(*image));
|
||||
errors++;
|
||||
@ -536,14 +463,14 @@ static int iwl_verify_ucode(struct iwl_trans *trans,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
|
||||
if (!iwl_verify_inst_sparse(trans, &img->code)) {
|
||||
IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
|
||||
|
||||
iwl_print_mismatch_inst(bus(trans), &img->code);
|
||||
iwl_print_mismatch_inst(trans, &img->code);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -647,28 +574,27 @@ int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
|
||||
{
|
||||
struct iwl_notification_wait alive_wait;
|
||||
struct iwl_alive_data alive_data;
|
||||
struct fw_img *fw;
|
||||
int ret;
|
||||
enum iwl_ucode_type old_type;
|
||||
|
||||
ret = iwl_trans_start_device(trans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
|
||||
iwl_alive_fn, &alive_data);
|
||||
|
||||
old_type = trans->shrd->ucode_type;
|
||||
trans->shrd->ucode_type = ucode_type;
|
||||
fw = iwl_get_ucode_image(trans, ucode_type);
|
||||
|
||||
ret = iwl_load_given_ucode(trans, ucode_type);
|
||||
if (!fw)
|
||||
return -EINVAL;
|
||||
|
||||
ret = iwl_trans_start_fw(trans, fw);
|
||||
if (ret) {
|
||||
trans->shrd->ucode_type = old_type;
|
||||
iwl_remove_notification(trans->shrd, &alive_wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
iwl_trans_kick_nic(trans);
|
||||
|
||||
/*
|
||||
* Some things may run in the background now, but we
|
||||
* just wait for the ALIVE notification here.
|
||||
@ -756,3 +682,609 @@ int iwl_run_init_ucode(struct iwl_trans *trans)
|
||||
iwl_trans_stop_device(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
|
||||
|
||||
#define UCODE_EXPERIMENTAL_INDEX 100
|
||||
#define UCODE_EXPERIMENTAL_TAG "exp"
|
||||
|
||||
int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
|
||||
{
|
||||
const char *name_pre = cfg(priv)->fw_name_pre;
|
||||
char tag[8];
|
||||
|
||||
if (first) {
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
|
||||
priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
|
||||
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
|
||||
} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
|
||||
#endif
|
||||
priv->fw_index = cfg(priv)->ucode_api_max;
|
||||
sprintf(tag, "%d", priv->fw_index);
|
||||
} else {
|
||||
priv->fw_index--;
|
||||
sprintf(tag, "%d", priv->fw_index);
|
||||
}
|
||||
|
||||
if (priv->fw_index < cfg(priv)->ucode_api_min) {
|
||||
IWL_ERR(priv, "no suitable firmware found!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
|
||||
|
||||
IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
|
||||
(priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? "EXPERIMENTAL " : "",
|
||||
priv->firmware_name);
|
||||
|
||||
return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
|
||||
trans(priv)->dev,
|
||||
GFP_KERNEL, priv, iwl_ucode_callback);
|
||||
}
|
||||
|
||||
struct iwlagn_firmware_pieces {
|
||||
const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
|
||||
size_t inst_size, data_size, init_size, init_data_size,
|
||||
wowlan_inst_size, wowlan_data_size;
|
||||
|
||||
u32 build;
|
||||
|
||||
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
|
||||
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
|
||||
};
|
||||
|
||||
static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
|
||||
const struct firmware *ucode_raw,
|
||||
struct iwlagn_firmware_pieces *pieces)
|
||||
{
|
||||
struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
|
||||
u32 api_ver, hdr_size;
|
||||
const u8 *src;
|
||||
|
||||
priv->ucode_ver = le32_to_cpu(ucode->ver);
|
||||
api_ver = IWL_UCODE_API(priv->ucode_ver);
|
||||
|
||||
switch (api_ver) {
|
||||
default:
|
||||
hdr_size = 28;
|
||||
if (ucode_raw->size < hdr_size) {
|
||||
IWL_ERR(priv, "File size too small!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pieces->build = le32_to_cpu(ucode->u.v2.build);
|
||||
pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
|
||||
pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
|
||||
pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
|
||||
pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
|
||||
src = ucode->u.v2.data;
|
||||
break;
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
hdr_size = 24;
|
||||
if (ucode_raw->size < hdr_size) {
|
||||
IWL_ERR(priv, "File size too small!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pieces->build = 0;
|
||||
pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
|
||||
pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
|
||||
pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
|
||||
pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
|
||||
src = ucode->u.v1.data;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Verify size of file vs. image size info in file's header */
|
||||
if (ucode_raw->size != hdr_size + pieces->inst_size +
|
||||
pieces->data_size + pieces->init_size +
|
||||
pieces->init_data_size) {
|
||||
|
||||
IWL_ERR(priv,
|
||||
"uCode file size %d does not match expected size\n",
|
||||
(int)ucode_raw->size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pieces->inst = src;
|
||||
src += pieces->inst_size;
|
||||
pieces->data = src;
|
||||
src += pieces->data_size;
|
||||
pieces->init = src;
|
||||
src += pieces->init_size;
|
||||
pieces->init_data = src;
|
||||
src += pieces->init_data_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwlagn_load_firmware(struct iwl_priv *priv,
|
||||
const struct firmware *ucode_raw,
|
||||
struct iwlagn_firmware_pieces *pieces,
|
||||
struct iwlagn_ucode_capabilities *capa)
|
||||
{
|
||||
struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
size_t len = ucode_raw->size;
|
||||
const u8 *data;
|
||||
int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
|
||||
int tmp;
|
||||
u64 alternatives;
|
||||
u32 tlv_len;
|
||||
enum iwl_ucode_tlv_type tlv_type;
|
||||
const u8 *tlv_data;
|
||||
|
||||
if (len < sizeof(*ucode)) {
|
||||
IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
|
||||
IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
|
||||
le32_to_cpu(ucode->magic));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check which alternatives are present, and "downgrade"
|
||||
* when the chosen alternative is not present, warning
|
||||
* the user when that happens. Some files may not have
|
||||
* any alternatives, so don't warn in that case.
|
||||
*/
|
||||
alternatives = le64_to_cpu(ucode->alternatives);
|
||||
tmp = wanted_alternative;
|
||||
if (wanted_alternative > 63)
|
||||
wanted_alternative = 63;
|
||||
while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
|
||||
wanted_alternative--;
|
||||
if (wanted_alternative && wanted_alternative != tmp)
|
||||
IWL_WARN(priv,
|
||||
"uCode alternative %d not available, choosing %d\n",
|
||||
tmp, wanted_alternative);
|
||||
|
||||
priv->ucode_ver = le32_to_cpu(ucode->ver);
|
||||
pieces->build = le32_to_cpu(ucode->build);
|
||||
data = ucode->data;
|
||||
|
||||
len -= sizeof(*ucode);
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
u16 tlv_alt;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le16_to_cpu(tlv->type);
|
||||
tlv_alt = le16_to_cpu(tlv->alternative);
|
||||
tlv_data = tlv->data;
|
||||
|
||||
if (len < tlv_len) {
|
||||
IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
|
||||
len, tlv_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
|
||||
/*
|
||||
* Alternative 0 is always valid.
|
||||
*
|
||||
* Skip alternative TLVs that are not selected.
|
||||
*/
|
||||
if (tlv_alt != 0 && tlv_alt != wanted_alternative)
|
||||
continue;
|
||||
|
||||
switch (tlv_type) {
|
||||
case IWL_UCODE_TLV_INST:
|
||||
pieces->inst = tlv_data;
|
||||
pieces->inst_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_DATA:
|
||||
pieces->data = tlv_data;
|
||||
pieces->data_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT:
|
||||
pieces->init = tlv_data;
|
||||
pieces->init_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_DATA:
|
||||
pieces->init_data = tlv_data;
|
||||
pieces->init_data_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_BOOT:
|
||||
IWL_ERR(priv, "Found unexpected BOOT ucode\n");
|
||||
break;
|
||||
case IWL_UCODE_TLV_PROBE_MAX_LEN:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->max_probe_length =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_PAN:
|
||||
if (tlv_len)
|
||||
goto invalid_tlv_len;
|
||||
capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
|
||||
break;
|
||||
case IWL_UCODE_TLV_FLAGS:
|
||||
/* must be at least one u32 */
|
||||
if (tlv_len < sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
/* and a proper number of u32s */
|
||||
if (tlv_len % sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
/*
|
||||
* This driver only reads the first u32 as
|
||||
* right now no more features are defined,
|
||||
* if that changes then either the driver
|
||||
* will not work with the new firmware, or
|
||||
* it'll not take advantage of new features.
|
||||
*/
|
||||
capa->flags = le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_evtlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_evtlog_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->init_errlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_evtlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_evtlog_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
pieces->inst_errlog_ptr =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
|
||||
if (tlv_len)
|
||||
goto invalid_tlv_len;
|
||||
priv->enhance_sensitivity_table = true;
|
||||
break;
|
||||
case IWL_UCODE_TLV_WOWLAN_INST:
|
||||
pieces->wowlan_inst = tlv_data;
|
||||
pieces->wowlan_inst_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_WOWLAN_DATA:
|
||||
pieces->wowlan_data = tlv_data;
|
||||
pieces->wowlan_data_size = tlv_len;
|
||||
break;
|
||||
case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
|
||||
if (tlv_len != sizeof(u32))
|
||||
goto invalid_tlv_len;
|
||||
capa->standard_phy_calibration_size =
|
||||
le32_to_cpup((__le32 *)tlv_data);
|
||||
break;
|
||||
default:
|
||||
IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (len) {
|
||||
IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
|
||||
iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
invalid_tlv_len:
|
||||
IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
|
||||
iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_ucode_callback - callback when firmware was loaded
|
||||
*
|
||||
* If loaded successfully, copies the firmware into buffers
|
||||
* for the card to fetch (via DMA).
|
||||
*/
|
||||
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
|
||||
{
|
||||
struct iwl_priv *priv = context;
|
||||
struct iwl_ucode_header *ucode;
|
||||
int err;
|
||||
struct iwlagn_firmware_pieces pieces;
|
||||
const unsigned int api_max = cfg(priv)->ucode_api_max;
|
||||
unsigned int api_ok = cfg(priv)->ucode_api_ok;
|
||||
const unsigned int api_min = cfg(priv)->ucode_api_min;
|
||||
u32 api_ver;
|
||||
char buildstr[25];
|
||||
u32 build;
|
||||
struct iwlagn_ucode_capabilities ucode_capa = {
|
||||
.max_probe_length = 200,
|
||||
.standard_phy_calibration_size =
|
||||
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
|
||||
};
|
||||
|
||||
if (!api_ok)
|
||||
api_ok = api_max;
|
||||
|
||||
memset(&pieces, 0, sizeof(pieces));
|
||||
|
||||
if (!ucode_raw) {
|
||||
if (priv->fw_index <= api_ok)
|
||||
IWL_ERR(priv,
|
||||
"request for firmware file '%s' failed.\n",
|
||||
priv->firmware_name);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
|
||||
priv->firmware_name, ucode_raw->size);
|
||||
|
||||
/* Make sure that we got at least the API version number */
|
||||
if (ucode_raw->size < 4) {
|
||||
IWL_ERR(priv, "File size way too small!\n");
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/* Data from ucode file: header followed by uCode images */
|
||||
ucode = (struct iwl_ucode_header *)ucode_raw->data;
|
||||
|
||||
if (ucode->ver)
|
||||
err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
|
||||
else
|
||||
err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
|
||||
&ucode_capa);
|
||||
|
||||
if (err)
|
||||
goto try_again;
|
||||
|
||||
api_ver = IWL_UCODE_API(priv->ucode_ver);
|
||||
build = pieces.build;
|
||||
|
||||
/*
|
||||
* api_ver should match the api version forming part of the
|
||||
* firmware filename ... but we don't check for that and only rely
|
||||
* on the API version read from firmware header from here on forward
|
||||
*/
|
||||
/* no api version check required for experimental uCode */
|
||||
if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
|
||||
if (api_ver < api_min || api_ver > api_max) {
|
||||
IWL_ERR(priv,
|
||||
"Driver unable to support your firmware API. "
|
||||
"Driver supports v%u, firmware is v%u.\n",
|
||||
api_max, api_ver);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (api_ver < api_ok) {
|
||||
if (api_ok != api_max)
|
||||
IWL_ERR(priv, "Firmware has old API version, "
|
||||
"expected v%u through v%u, got v%u.\n",
|
||||
api_ok, api_max, api_ver);
|
||||
else
|
||||
IWL_ERR(priv, "Firmware has old API version, "
|
||||
"expected v%u, got v%u.\n",
|
||||
api_max, api_ver);
|
||||
IWL_ERR(priv, "New firmware can be obtained from "
|
||||
"http://www.intellinuxwireless.org/.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (build)
|
||||
sprintf(buildstr, " build %u%s", build,
|
||||
(priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? " (EXP)" : "");
|
||||
else
|
||||
buildstr[0] = '\0';
|
||||
|
||||
IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
|
||||
IWL_UCODE_MAJOR(priv->ucode_ver),
|
||||
IWL_UCODE_MINOR(priv->ucode_ver),
|
||||
IWL_UCODE_API(priv->ucode_ver),
|
||||
IWL_UCODE_SERIAL(priv->ucode_ver),
|
||||
buildstr);
|
||||
|
||||
snprintf(priv->hw->wiphy->fw_version,
|
||||
sizeof(priv->hw->wiphy->fw_version),
|
||||
"%u.%u.%u.%u%s",
|
||||
IWL_UCODE_MAJOR(priv->ucode_ver),
|
||||
IWL_UCODE_MINOR(priv->ucode_ver),
|
||||
IWL_UCODE_API(priv->ucode_ver),
|
||||
IWL_UCODE_SERIAL(priv->ucode_ver),
|
||||
buildstr);
|
||||
|
||||
/*
|
||||
* For any of the failures below (before allocating pci memory)
|
||||
* we will try to load a version with a smaller API -- maybe the
|
||||
* user just got a corrupted version of the latest API.
|
||||
*/
|
||||
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
|
||||
priv->ucode_ver);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
|
||||
pieces.inst_size);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
|
||||
pieces.data_size);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
|
||||
pieces.init_size);
|
||||
IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
|
||||
pieces.init_data_size);
|
||||
|
||||
/* Verify that uCode images will fit in card's SRAM */
|
||||
if (pieces.inst_size > hw_params(priv).max_inst_size) {
|
||||
IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
|
||||
pieces.inst_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (pieces.data_size > hw_params(priv).max_data_size) {
|
||||
IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
|
||||
pieces.data_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (pieces.init_size > hw_params(priv).max_inst_size) {
|
||||
IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
|
||||
pieces.init_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
if (pieces.init_data_size > hw_params(priv).max_data_size) {
|
||||
IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
|
||||
pieces.init_data_size);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/* Allocate ucode buffers for card's bus-master loading ... */
|
||||
|
||||
/* Runtime instructions and 2 copies of data:
|
||||
* 1) unmodified from disk
|
||||
* 2) backup cache for save/restore during power-downs */
|
||||
if (iwl_alloc_fw_desc(trans(priv), &trans(priv)->ucode_rt.code,
|
||||
pieces.inst, pieces.inst_size))
|
||||
goto err_pci_alloc;
|
||||
if (iwl_alloc_fw_desc(trans(priv), &trans(priv)->ucode_rt.data,
|
||||
pieces.data, pieces.data_size))
|
||||
goto err_pci_alloc;
|
||||
|
||||
/* Initialization instructions and data */
|
||||
if (pieces.init_size && pieces.init_data_size) {
|
||||
if (iwl_alloc_fw_desc(trans(priv),
|
||||
&trans(priv)->ucode_init.code,
|
||||
pieces.init, pieces.init_size))
|
||||
goto err_pci_alloc;
|
||||
if (iwl_alloc_fw_desc(trans(priv),
|
||||
&trans(priv)->ucode_init.data,
|
||||
pieces.init_data, pieces.init_data_size))
|
||||
goto err_pci_alloc;
|
||||
}
|
||||
|
||||
/* WoWLAN instructions and data */
|
||||
if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
|
||||
if (iwl_alloc_fw_desc(trans(priv),
|
||||
&trans(priv)->ucode_wowlan.code,
|
||||
pieces.wowlan_inst,
|
||||
pieces.wowlan_inst_size))
|
||||
goto err_pci_alloc;
|
||||
if (iwl_alloc_fw_desc(trans(priv),
|
||||
&trans(priv)->ucode_wowlan.data,
|
||||
pieces.wowlan_data,
|
||||
pieces.wowlan_data_size))
|
||||
goto err_pci_alloc;
|
||||
}
|
||||
|
||||
/* Now that we can no longer fail, copy information */
|
||||
|
||||
/*
|
||||
* The (size - 16) / 12 formula is based on the information recorded
|
||||
* for each event, which is of mode 1 (including timestamp) for all
|
||||
* new microcodes that include this information.
|
||||
*/
|
||||
priv->init_evtlog_ptr = pieces.init_evtlog_ptr;
|
||||
if (pieces.init_evtlog_size)
|
||||
priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
|
||||
else
|
||||
priv->init_evtlog_size =
|
||||
cfg(priv)->base_params->max_event_log_size;
|
||||
priv->init_errlog_ptr = pieces.init_errlog_ptr;
|
||||
priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
|
||||
if (pieces.inst_evtlog_size)
|
||||
priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
|
||||
else
|
||||
priv->inst_evtlog_size =
|
||||
cfg(priv)->base_params->max_event_log_size;
|
||||
priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
|
||||
#ifndef CONFIG_IWLWIFI_P2P
|
||||
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
|
||||
#endif
|
||||
|
||||
priv->new_scan_threshold_behaviour =
|
||||
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
|
||||
|
||||
if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
|
||||
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
|
||||
|
||||
/*
|
||||
* if not PAN, then don't support P2P -- might be a uCode
|
||||
* packaging bug or due to the eeprom check above
|
||||
*/
|
||||
if (!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN))
|
||||
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
|
||||
|
||||
if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
|
||||
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
|
||||
priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
|
||||
} else {
|
||||
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
||||
priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
||||
}
|
||||
/*
|
||||
* figure out the offset of chain noise reset and gain commands
|
||||
* base on the size of standard phy calibration commands table size
|
||||
*/
|
||||
if (ucode_capa.standard_phy_calibration_size >
|
||||
IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
|
||||
ucode_capa.standard_phy_calibration_size =
|
||||
IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
|
||||
|
||||
priv->phy_calib_chain_noise_reset_cmd =
|
||||
ucode_capa.standard_phy_calibration_size;
|
||||
priv->phy_calib_chain_noise_gain_cmd =
|
||||
ucode_capa.standard_phy_calibration_size + 1;
|
||||
|
||||
/* initialize all valid contexts */
|
||||
iwl_init_context(priv, ucode_capa.flags);
|
||||
|
||||
/**************************************************
|
||||
* This is still part of probe() in a sense...
|
||||
*
|
||||
* 9. Setup and register with mac80211 and debugfs
|
||||
**************************************************/
|
||||
err = iwlagn_mac_setup_register(priv, &ucode_capa);
|
||||
if (err)
|
||||
goto out_unbind;
|
||||
|
||||
err = iwl_dbgfs_register(priv, DRV_NAME);
|
||||
if (err)
|
||||
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
|
||||
|
||||
/* We have our copies now, allow OS release its copies */
|
||||
release_firmware(ucode_raw);
|
||||
complete(&priv->firmware_loading_complete);
|
||||
return;
|
||||
|
||||
try_again:
|
||||
/* try next, if any */
|
||||
if (iwl_request_firmware(priv, false))
|
||||
goto out_unbind;
|
||||
release_firmware(ucode_raw);
|
||||
return;
|
||||
|
||||
err_pci_alloc:
|
||||
IWL_ERR(priv, "failed to allocate pci memory\n");
|
||||
iwl_dealloc_ucode(trans(priv));
|
||||
out_unbind:
|
||||
complete(&priv->firmware_loading_complete);
|
||||
device_release_driver(trans(priv)->dev);
|
||||
release_firmware(ucode_raw);
|
||||
}
|
||||
|
||||
|
178
drivers/net/wireless/iwlwifi/iwl-ucode.h
Normal file
178
drivers/net/wireless/iwlwifi/iwl-ucode.h
Normal file
@ -0,0 +1,178 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#ifndef __iwl_ucode_h__
|
||||
#define __iwl_ucode_h__
|
||||
|
||||
/* v1/v2 uCode file layout */
|
||||
struct iwl_ucode_header {
|
||||
__le32 ver; /* major/minor/API/serial */
|
||||
union {
|
||||
struct {
|
||||
__le32 inst_size; /* bytes of runtime code */
|
||||
__le32 data_size; /* bytes of runtime data */
|
||||
__le32 init_size; /* bytes of init code */
|
||||
__le32 init_data_size; /* bytes of init data */
|
||||
__le32 boot_size; /* bytes of bootstrap code */
|
||||
u8 data[0]; /* in same order as sizes */
|
||||
} v1;
|
||||
struct {
|
||||
__le32 build; /* build number */
|
||||
__le32 inst_size; /* bytes of runtime code */
|
||||
__le32 data_size; /* bytes of runtime data */
|
||||
__le32 init_size; /* bytes of init code */
|
||||
__le32 init_data_size; /* bytes of init data */
|
||||
__le32 boot_size; /* bytes of bootstrap code */
|
||||
u8 data[0]; /* in same order as sizes */
|
||||
} v2;
|
||||
} u;
|
||||
};
|
||||
|
||||
/*
|
||||
* new TLV uCode file layout
|
||||
*
|
||||
* The new TLV file format contains TLVs, that each specify
|
||||
* some piece of data. To facilitate "groups", for example
|
||||
* different instruction image with different capabilities,
|
||||
* bundled with the same init image, an alternative mechanism
|
||||
* is provided:
|
||||
* When the alternative field is 0, that means that the item
|
||||
* is always valid. When it is non-zero, then it is only
|
||||
* valid in conjunction with items of the same alternative,
|
||||
* in which case the driver (user) selects one alternative
|
||||
* to use.
|
||||
*/
|
||||
|
||||
enum iwl_ucode_tlv_type {
|
||||
IWL_UCODE_TLV_INVALID = 0, /* unused */
|
||||
IWL_UCODE_TLV_INST = 1,
|
||||
IWL_UCODE_TLV_DATA = 2,
|
||||
IWL_UCODE_TLV_INIT = 3,
|
||||
IWL_UCODE_TLV_INIT_DATA = 4,
|
||||
IWL_UCODE_TLV_BOOT = 5,
|
||||
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
|
||||
IWL_UCODE_TLV_PAN = 7,
|
||||
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
|
||||
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
|
||||
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
|
||||
IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
|
||||
IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
|
||||
IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
|
||||
IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
|
||||
IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
|
||||
IWL_UCODE_TLV_WOWLAN_INST = 16,
|
||||
IWL_UCODE_TLV_WOWLAN_DATA = 17,
|
||||
IWL_UCODE_TLV_FLAGS = 18,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_ucode_tlv_flag - ucode API flags
|
||||
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
|
||||
* was a separate TLV but moved here to save space.
|
||||
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
|
||||
* treats good CRC threshold as a boolean
|
||||
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
|
||||
*/
|
||||
enum iwl_ucode_tlv_flag {
|
||||
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
|
||||
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
|
||||
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
|
||||
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
|
||||
};
|
||||
|
||||
struct iwl_ucode_tlv {
|
||||
__le16 type; /* see above */
|
||||
__le16 alternative; /* see comment */
|
||||
__le32 length; /* not including type/length fields */
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
|
||||
|
||||
struct iwl_tlv_ucode_header {
|
||||
/*
|
||||
* The TLV style ucode header is distinguished from
|
||||
* the v1/v2 style header by first four bytes being
|
||||
* zero, as such is an invalid combination of
|
||||
* major/minor/API/serial versions.
|
||||
*/
|
||||
__le32 zero;
|
||||
__le32 magic;
|
||||
u8 human_readable[64];
|
||||
__le32 ver; /* major/minor/API/serial */
|
||||
__le32 build;
|
||||
__le64 alternatives; /* bitmask of valid alternatives */
|
||||
/*
|
||||
* The data contained herein has a TLV layout,
|
||||
* see above for the TLV header and types.
|
||||
* Note that each TLV is padded to a length
|
||||
* that is a multiple of 4 for alignment.
|
||||
*/
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
struct iwl_priv;
|
||||
|
||||
int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first);
|
||||
|
||||
#endif /* __iwl_ucode_h__ */
|
@ -144,7 +144,7 @@ TRACE_EVENT(iwm_tx_packets,
|
||||
|
||||
TP_printk(
|
||||
IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
|
||||
"ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
|
||||
"ra_tid 0x%x, credit_group 0x%x, embedded_packets %d, %d bytes",
|
||||
IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
|
||||
__entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
|
||||
__entry->credit_group, __entry->npkt, __entry->bytes
|
||||
|
@ -376,7 +376,12 @@ mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
|
||||
struct ieee80211_channel *chan,
|
||||
enum nl80211_channel_type channel_type)
|
||||
{
|
||||
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
|
||||
struct mwifiex_private *priv;
|
||||
|
||||
if (dev)
|
||||
priv = mwifiex_netdev_get_priv(dev);
|
||||
else
|
||||
priv = mwifiex_cfg80211_get_priv(wiphy);
|
||||
|
||||
if (priv->media_connected) {
|
||||
wiphy_err(wiphy, "This setting is valid only when station "
|
||||
@ -534,6 +539,11 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
/* Get DTIM period information from firmware */
|
||||
mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
|
||||
HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
|
||||
&priv->dtim_period);
|
||||
|
||||
/*
|
||||
* Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
|
||||
* MCS index values for us are 0 to 7.
|
||||
@ -568,8 +578,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
|
||||
WLAN_CAPABILITY_SHORT_SLOT_TIME)
|
||||
sinfo->bss_param.flags |=
|
||||
BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
|
||||
sinfo->bss_param.dtim_period =
|
||||
priv->curr_bss_params.bss_descriptor.dtim_period;
|
||||
sinfo->bss_param.dtim_period = priv->dtim_period;
|
||||
sinfo->bss_param.beacon_interval =
|
||||
priv->curr_bss_params.bss_descriptor.beacon_period;
|
||||
}
|
||||
@ -858,7 +867,12 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
|
||||
ret = mwifiex_set_rf_channel(priv, channel,
|
||||
priv->adapter->channel_type);
|
||||
|
||||
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */
|
||||
/* As this is new association, clear locally stored
|
||||
* keys and security related flags */
|
||||
priv->sec_info.wpa_enabled = false;
|
||||
priv->sec_info.wpa2_enabled = false;
|
||||
priv->wep_key_curr_index = 0;
|
||||
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
|
||||
|
||||
if (mode == NL80211_IFTYPE_ADHOC) {
|
||||
/* "privacy" is set only for ad-hoc mode */
|
||||
@ -903,6 +917,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
|
||||
dev_dbg(priv->adapter->dev,
|
||||
"info: setting wep encryption"
|
||||
" with key len %d\n", sme->key_len);
|
||||
priv->wep_key_curr_index = sme->key_idx;
|
||||
ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
|
||||
sme->key_idx, 0);
|
||||
}
|
||||
|
@ -280,6 +280,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
|
||||
adapter->adhoc_awake_period = 0;
|
||||
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
|
||||
adapter->arp_filter_size = 0;
|
||||
adapter->channel_type = NL80211_CHAN_HT20;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -249,7 +249,6 @@ struct mwifiex_bssdescriptor {
|
||||
u32 channel;
|
||||
u32 freq;
|
||||
u16 beacon_period;
|
||||
u8 dtim_period;
|
||||
u8 erp_flags;
|
||||
u32 bss_mode;
|
||||
u8 supported_rates[MWIFIEX_SUPPORTED_RATES];
|
||||
@ -392,6 +391,7 @@ struct mwifiex_private {
|
||||
u8 prev_bssid[ETH_ALEN];
|
||||
struct mwifiex_current_bss_params curr_bss_params;
|
||||
u16 beacon_period;
|
||||
u8 dtim_period;
|
||||
u16 listen_interval;
|
||||
u16 atim_window;
|
||||
u8 adhoc_channel;
|
||||
|
@ -1086,7 +1086,6 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
|
||||
struct ieee_types_vendor_specific *vendor_ie;
|
||||
const u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 };
|
||||
const u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 };
|
||||
struct ieee80211_tim_ie *tim_ie;
|
||||
|
||||
found_data_rate_ie = false;
|
||||
rate_size = 0;
|
||||
@ -1259,11 +1258,6 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
|
||||
sizeof(struct ieee_types_header) -
|
||||
bss_entry->beacon_buf);
|
||||
break;
|
||||
case WLAN_EID_TIM:
|
||||
tim_ie = (void *) (current_ptr +
|
||||
sizeof(struct ieee_types_header));
|
||||
bss_entry->dtim_period = tim_ie->dtim_period;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
|
||||
static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
|
||||
struct host_cmd_ds_command *cmd,
|
||||
u16 cmd_action, u32 cmd_oid,
|
||||
u32 *ul_temp)
|
||||
u16 *ul_temp)
|
||||
{
|
||||
struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
|
||||
|
||||
@ -112,62 +112,18 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
|
||||
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
|
||||
- 1 + S_DS_GEN);
|
||||
|
||||
snmp_mib->oid = cpu_to_le16((u16)cmd_oid);
|
||||
if (cmd_action == HostCmd_ACT_GEN_GET) {
|
||||
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
|
||||
snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
|
||||
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
|
||||
+ MAX_SNMP_BUF_SIZE);
|
||||
le16_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
|
||||
} else if (cmd_action == HostCmd_ACT_GEN_SET) {
|
||||
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
|
||||
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
|
||||
*((__le16 *) (snmp_mib->value)) = cpu_to_le16(*ul_temp);
|
||||
le16_add_cpu(&cmd->size, sizeof(u16));
|
||||
}
|
||||
|
||||
switch (cmd_oid) {
|
||||
case FRAG_THRESH_I:
|
||||
snmp_mib->oid = cpu_to_le16((u16) FRAG_THRESH_I);
|
||||
if (cmd_action == HostCmd_ACT_GEN_SET) {
|
||||
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
|
||||
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
|
||||
*((__le16 *) (snmp_mib->value)) =
|
||||
cpu_to_le16((u16) *ul_temp);
|
||||
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
|
||||
+ sizeof(u16));
|
||||
}
|
||||
break;
|
||||
case RTS_THRESH_I:
|
||||
snmp_mib->oid = cpu_to_le16((u16) RTS_THRESH_I);
|
||||
if (cmd_action == HostCmd_ACT_GEN_SET) {
|
||||
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
|
||||
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
|
||||
*(__le16 *) (snmp_mib->value) =
|
||||
cpu_to_le16((u16) *ul_temp);
|
||||
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
|
||||
+ sizeof(u16));
|
||||
}
|
||||
break;
|
||||
|
||||
case SHORT_RETRY_LIM_I:
|
||||
snmp_mib->oid = cpu_to_le16((u16) SHORT_RETRY_LIM_I);
|
||||
if (cmd_action == HostCmd_ACT_GEN_SET) {
|
||||
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
|
||||
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
|
||||
*((__le16 *) (snmp_mib->value)) =
|
||||
cpu_to_le16((u16) *ul_temp);
|
||||
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
|
||||
+ sizeof(u16));
|
||||
}
|
||||
break;
|
||||
case DOT11D_I:
|
||||
snmp_mib->oid = cpu_to_le16((u16) DOT11D_I);
|
||||
if (cmd_action == HostCmd_ACT_GEN_SET) {
|
||||
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
|
||||
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
|
||||
*((__le16 *) (snmp_mib->value)) =
|
||||
cpu_to_le16((u16) *ul_temp);
|
||||
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
|
||||
+ sizeof(u16));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
dev_dbg(priv->adapter->dev,
|
||||
"cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
|
||||
" Value=0x%x\n",
|
||||
|
@ -210,6 +210,9 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
|
||||
dev_dbg(priv->adapter->dev,
|
||||
"info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
|
||||
break;
|
||||
case DTIM_PERIOD_I:
|
||||
dev_dbg(priv->adapter->dev,
|
||||
"info: SNMP_RESP: DTIM period=%u\n", ul_temp);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -402,6 +402,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
|
||||
#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
|
||||
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
|
||||
#define MWL8K_CMD_GET_WATCHDOG_BITMAP 0x0205
|
||||
#define MWL8K_CMD_DEL_MAC_ADDR 0x0206 /* per-vif */
|
||||
#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
|
||||
#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
|
||||
#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
|
||||
@ -3429,10 +3430,7 @@ static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* CMD_SET_MAC_ADDR.
|
||||
*/
|
||||
struct mwl8k_cmd_set_mac_addr {
|
||||
struct mwl8k_cmd_update_mac_addr {
|
||||
struct mwl8k_cmd_pkt header;
|
||||
union {
|
||||
struct {
|
||||
@ -3448,12 +3446,12 @@ struct mwl8k_cmd_set_mac_addr {
|
||||
#define MWL8K_MAC_TYPE_PRIMARY_AP 2
|
||||
#define MWL8K_MAC_TYPE_SECONDARY_AP 3
|
||||
|
||||
static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, u8 *mac)
|
||||
static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, u8 *mac, bool set)
|
||||
{
|
||||
struct mwl8k_priv *priv = hw->priv;
|
||||
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
|
||||
struct mwl8k_cmd_set_mac_addr *cmd;
|
||||
struct mwl8k_cmd_update_mac_addr *cmd;
|
||||
int mac_type;
|
||||
int rc;
|
||||
|
||||
@ -3474,7 +3472,11 @@ static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
|
||||
if (cmd == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
|
||||
if (set)
|
||||
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
|
||||
else
|
||||
cmd->header.code = cpu_to_le16(MWL8K_CMD_DEL_MAC_ADDR);
|
||||
|
||||
cmd->header.length = cpu_to_le16(sizeof(*cmd));
|
||||
if (priv->ap_fw) {
|
||||
cmd->mbss.mac_type = cpu_to_le16(mac_type);
|
||||
@ -3489,6 +3491,24 @@ static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* MWL8K_CMD_SET_MAC_ADDR.
|
||||
*/
|
||||
static inline int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, u8 *mac)
|
||||
{
|
||||
return mwl8k_cmd_update_mac_addr(hw, vif, mac, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* MWL8K_CMD_DEL_MAC_ADDR.
|
||||
*/
|
||||
static inline int mwl8k_cmd_del_mac_addr(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, u8 *mac)
|
||||
{
|
||||
return mwl8k_cmd_update_mac_addr(hw, vif, mac, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* CMD_SET_RATEADAPT_MODE.
|
||||
*/
|
||||
@ -4541,7 +4561,7 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
|
||||
if (priv->ap_fw)
|
||||
mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
|
||||
|
||||
mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
|
||||
mwl8k_cmd_del_mac_addr(hw, vif, vif->addr);
|
||||
|
||||
mwl8k_remove_vif(priv, mwl8k_vif);
|
||||
}
|
||||
|
@ -1795,6 +1795,14 @@ struct mac_iveiv_entry {
|
||||
*/
|
||||
#define RFCSR2_RESCAL_EN FIELD8(0x80)
|
||||
|
||||
/*
|
||||
* RFCSR 3:
|
||||
*/
|
||||
#define RFCSR3_K FIELD8(0x0f)
|
||||
/* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
|
||||
#define RFCSR3_PA1_BIAS_CCK FIELD8(0x70);
|
||||
#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80);
|
||||
|
||||
/*
|
||||
* FRCSR 5:
|
||||
*/
|
||||
@ -1811,10 +1819,12 @@ struct mac_iveiv_entry {
|
||||
* RFCSR 7:
|
||||
*/
|
||||
#define RFCSR7_RF_TUNING FIELD8(0x01)
|
||||
#define RFCSR7_R02 FIELD8(0x07)
|
||||
#define RFCSR7_R3 FIELD8(0x08)
|
||||
#define RFCSR7_R45 FIELD8(0x30)
|
||||
#define RFCSR7_R67 FIELD8(0xc0)
|
||||
#define RFCSR7_BIT1 FIELD8(0x02)
|
||||
#define RFCSR7_BIT2 FIELD8(0x04)
|
||||
#define RFCSR7_BIT3 FIELD8(0x08)
|
||||
#define RFCSR7_BIT4 FIELD8(0x10)
|
||||
#define RFCSR7_BIT5 FIELD8(0x20)
|
||||
#define RFCSR7_BITS67 FIELD8(0xc0)
|
||||
|
||||
/*
|
||||
* RFCSR 11:
|
||||
@ -1838,6 +1848,11 @@ struct mac_iveiv_entry {
|
||||
*/
|
||||
#define RFCSR15_TX_LO2_EN FIELD8(0x08)
|
||||
|
||||
/*
|
||||
* RFCSR 16:
|
||||
*/
|
||||
#define RFCSR16_TXMIXER_GAIN FIELD8(0x07)
|
||||
|
||||
/*
|
||||
* RFCSR 17:
|
||||
*/
|
||||
@ -1866,6 +1881,13 @@ struct mac_iveiv_entry {
|
||||
*/
|
||||
#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
|
||||
|
||||
/*
|
||||
* RFCSR 24:
|
||||
*/
|
||||
#define RFCSR24_TX_AGC_FC FIELD8(0x1f)
|
||||
#define RFCSR24_TX_H20M FIELD8(0x20)
|
||||
#define RFCSR24_TX_CALIB FIELD8(0x7f)
|
||||
|
||||
/*
|
||||
* RFCSR 27:
|
||||
*/
|
||||
@ -1887,6 +1909,7 @@ struct mac_iveiv_entry {
|
||||
*/
|
||||
#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
|
||||
#define RFCSR31_RX_H20M FIELD8(0x20)
|
||||
#define RFCSR31_RX_CALIB FIELD8(0x7f)
|
||||
|
||||
/*
|
||||
* RFCSR 38:
|
||||
@ -2092,6 +2115,12 @@ struct mac_iveiv_entry {
|
||||
#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
|
||||
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
|
||||
|
||||
/*
|
||||
* EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2).
|
||||
*/
|
||||
#define EEPROM_TXMIXER_GAIN_A 0x0026
|
||||
#define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007)
|
||||
|
||||
/*
|
||||
* EEPROM EIRP Maximum TX power values(unit: dbm)
|
||||
*/
|
||||
@ -2422,4 +2451,16 @@ struct mac_iveiv_entry {
|
||||
*/
|
||||
#define EIRP_MAX_TX_POWER_LIMIT 0x50
|
||||
|
||||
/*
|
||||
* RT2800 driver data structure
|
||||
*/
|
||||
struct rt2800_drv_data {
|
||||
u8 calibration_bw20;
|
||||
u8 calibration_bw40;
|
||||
u8 bbp25;
|
||||
u8 bbp26;
|
||||
u8 txmixer_gain_24g;
|
||||
u8 txmixer_gain_5g;
|
||||
};
|
||||
|
||||
#endif /* RT2800_H */
|
||||
|
@ -1645,10 +1645,14 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
|
||||
struct rf_channel *rf,
|
||||
struct channel_info *info)
|
||||
{
|
||||
u8 rfcsr;
|
||||
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
|
||||
u8 rfcsr, calib_tx, calib_rx;
|
||||
|
||||
rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
|
||||
rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR3_K, rf->rf3);
|
||||
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
|
||||
@ -1662,16 +1666,82 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
|
||||
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
|
||||
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
|
||||
if (rt2x00_rt(rt2x00dev, RT3390)) {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
|
||||
rt2x00dev->default_ant.rx_chain_num == 1);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
|
||||
rt2x00dev->default_ant.tx_chain_num == 1);
|
||||
} else {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
|
||||
|
||||
switch (rt2x00dev->default_ant.tx_chain_num) {
|
||||
case 1:
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
|
||||
/* fall through */
|
||||
case 2:
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (rt2x00dev->default_ant.rx_chain_num) {
|
||||
case 1:
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
|
||||
/* fall through */
|
||||
case 2:
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
|
||||
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
|
||||
msleep(1);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
|
||||
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
|
||||
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
|
||||
|
||||
rt2800_rfcsr_write(rt2x00dev, 24,
|
||||
rt2x00dev->calibration[conf_is_ht40(conf)]);
|
||||
if (rt2x00_rt(rt2x00dev, RT3390)) {
|
||||
calib_tx = conf_is_ht40(conf) ? 0x68 : 0x4f;
|
||||
calib_rx = conf_is_ht40(conf) ? 0x6f : 0x4f;
|
||||
} else {
|
||||
if (conf_is_ht40(conf)) {
|
||||
calib_tx = drv_data->calibration_bw40;
|
||||
calib_rx = drv_data->calibration_bw40;
|
||||
} else {
|
||||
calib_tx = drv_data->calibration_bw20;
|
||||
calib_rx = drv_data->calibration_bw20;
|
||||
}
|
||||
}
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 24, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR24_TX_CALIB, calib_tx);
|
||||
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR31_RX_CALIB, calib_rx);
|
||||
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
|
||||
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
|
||||
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
|
||||
msleep(1);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
|
||||
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
|
||||
}
|
||||
|
||||
static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
@ -1679,12 +1749,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
struct rf_channel *rf,
|
||||
struct channel_info *info)
|
||||
{
|
||||
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
|
||||
u8 rfcsr;
|
||||
u32 reg;
|
||||
|
||||
if (rf->channel <= 14) {
|
||||
rt2800_bbp_write(rt2x00dev, 25, 0x15);
|
||||
rt2800_bbp_write(rt2x00dev, 26, 0x85);
|
||||
rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
|
||||
rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
|
||||
} else {
|
||||
rt2800_bbp_write(rt2x00dev, 25, 0x09);
|
||||
rt2800_bbp_write(rt2x00dev, 26, 0xff);
|
||||
@ -1712,8 +1783,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
if (rf->channel <= 14) {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
|
||||
(info->default_power1 & 0x3) |
|
||||
((info->default_power1 & 0xC) << 1));
|
||||
info->default_power1);
|
||||
} else {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 7);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
|
||||
@ -1726,8 +1796,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
if (rf->channel <= 14) {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
|
||||
(info->default_power2 & 0x3) |
|
||||
((info->default_power2 & 0xC) << 1));
|
||||
info->default_power2);
|
||||
} else {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 7);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
|
||||
@ -1737,11 +1806,12 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
|
||||
if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
|
||||
if (rf->channel <= 14) {
|
||||
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
|
||||
@ -1772,10 +1842,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
|
||||
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
|
||||
|
||||
rt2800_rfcsr_write(rt2x00dev, 24,
|
||||
rt2x00dev->calibration[conf_is_ht40(conf)]);
|
||||
rt2800_rfcsr_write(rt2x00dev, 31,
|
||||
rt2x00dev->calibration[conf_is_ht40(conf)]);
|
||||
if (conf_is_ht40(conf)) {
|
||||
rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw40);
|
||||
rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw40);
|
||||
} else {
|
||||
rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw20);
|
||||
rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw20);
|
||||
}
|
||||
|
||||
if (rf->channel <= 14) {
|
||||
rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
|
||||
@ -1783,7 +1856,10 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
|
||||
rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
|
||||
rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
|
||||
rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
|
||||
rfcsr = 0x4c;
|
||||
rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN,
|
||||
drv_data->txmixer_gain_24g);
|
||||
rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
|
||||
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
|
||||
rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
|
||||
rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
|
||||
@ -1792,12 +1868,20 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
|
||||
rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
|
||||
rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
|
||||
} else {
|
||||
rt2800_rfcsr_write(rt2x00dev, 7, 0x14);
|
||||
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR7_BIT2, 1);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR7_BIT3, 0);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR7_BIT4, 1);
|
||||
rt2x00_set_field8(&rfcsr, RFCSR7_BITS67, 0);
|
||||
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
|
||||
rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
|
||||
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
|
||||
rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
|
||||
rt2800_rfcsr_write(rt2x00dev, 15, 0x43);
|
||||
rt2800_rfcsr_write(rt2x00dev, 16, 0x7a);
|
||||
rfcsr = 0x7a;
|
||||
rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN,
|
||||
drv_data->txmixer_gain_5g);
|
||||
rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
|
||||
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
|
||||
if (rf->channel <= 64) {
|
||||
rt2800_rfcsr_write(rt2x00dev, 19, 0xb7);
|
||||
@ -3246,6 +3330,7 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
|
||||
|
||||
static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
|
||||
u8 rfcsr;
|
||||
u8 bbp;
|
||||
u32 reg;
|
||||
@ -3534,20 +3619,26 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
|
||||
* Set RX Filter calibration for 20MHz and 40MHz
|
||||
*/
|
||||
if (rt2x00_rt(rt2x00dev, RT3070)) {
|
||||
rt2x00dev->calibration[0] =
|
||||
drv_data->calibration_bw20 =
|
||||
rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
|
||||
rt2x00dev->calibration[1] =
|
||||
drv_data->calibration_bw40 =
|
||||
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
|
||||
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
|
||||
rt2x00_rt(rt2x00dev, RT3090) ||
|
||||
rt2x00_rt(rt2x00dev, RT3390) ||
|
||||
rt2x00_rt(rt2x00dev, RT3572)) {
|
||||
rt2x00dev->calibration[0] =
|
||||
drv_data->calibration_bw20 =
|
||||
rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
|
||||
rt2x00dev->calibration[1] =
|
||||
drv_data->calibration_bw40 =
|
||||
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save BBP 25 & 26 values for later use in channel switching
|
||||
*/
|
||||
rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
|
||||
rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
|
||||
|
||||
if (!rt2x00_rt(rt2x00dev, RT5390)) {
|
||||
/*
|
||||
* Set back to initial state
|
||||
@ -3587,11 +3678,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
|
||||
&rt2x00dev->cap_flags))
|
||||
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
|
||||
}
|
||||
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
|
||||
if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
|
||||
rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
|
||||
rt2x00_get_field16(eeprom,
|
||||
EEPROM_TXMIXER_GAIN_BG_VAL));
|
||||
rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
|
||||
drv_data->txmixer_gain_24g);
|
||||
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
|
||||
}
|
||||
|
||||
@ -3799,6 +3887,7 @@ EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
|
||||
|
||||
int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
|
||||
u16 word;
|
||||
u8 *mac;
|
||||
u8 default_lna_gain;
|
||||
@ -3882,6 +3971,14 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
|
||||
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
|
||||
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
|
||||
|
||||
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
|
||||
if ((word & 0x00ff) != 0x00ff) {
|
||||
drv_data->txmixer_gain_24g =
|
||||
rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
|
||||
} else {
|
||||
drv_data->txmixer_gain_24g = 0;
|
||||
}
|
||||
|
||||
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
|
||||
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
|
||||
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
|
||||
@ -3891,6 +3988,14 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
|
||||
default_lna_gain);
|
||||
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
|
||||
|
||||
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
|
||||
if ((word & 0x00ff) != 0x00ff) {
|
||||
drv_data->txmixer_gain_5g =
|
||||
rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
|
||||
} else {
|
||||
drv_data->txmixer_gain_5g = 0;
|
||||
}
|
||||
|
||||
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
|
||||
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
|
||||
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
|
||||
|
@ -1093,6 +1093,7 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
|
||||
|
||||
static const struct rt2x00_ops rt2800pci_ops = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.drv_data_size = sizeof(struct rt2800_drv_data),
|
||||
.max_sta_intf = 1,
|
||||
.max_ap_intf = 8,
|
||||
.eeprom_size = EEPROM_SIZE,
|
||||
|
@ -827,6 +827,7 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
|
||||
|
||||
static const struct rt2x00_ops rt2800usb_ops = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.drv_data_size = sizeof(struct rt2800_drv_data),
|
||||
.max_sta_intf = 1,
|
||||
.max_ap_intf = 8,
|
||||
.eeprom_size = EEPROM_SIZE,
|
||||
|
@ -647,6 +647,7 @@ struct rt2x00lib_ops {
|
||||
*/
|
||||
struct rt2x00_ops {
|
||||
const char *name;
|
||||
const unsigned int drv_data_size;
|
||||
const unsigned int max_sta_intf;
|
||||
const unsigned int max_ap_intf;
|
||||
const unsigned int eeprom_size;
|
||||
@ -741,6 +742,11 @@ struct rt2x00_dev {
|
||||
*/
|
||||
const struct rt2x00_ops *ops;
|
||||
|
||||
/*
|
||||
* Driver data.
|
||||
*/
|
||||
void *drv_data;
|
||||
|
||||
/*
|
||||
* IEEE80211 control structure.
|
||||
*/
|
||||
@ -886,17 +892,10 @@ struct rt2x00_dev {
|
||||
u8 rssi_offset;
|
||||
|
||||
/*
|
||||
* Frequency offset (for rt61pci & rt73usb).
|
||||
* Frequency offset.
|
||||
*/
|
||||
u8 freq_offset;
|
||||
|
||||
/*
|
||||
* Calibration information (for rt2800usb & rt2800pci).
|
||||
* [0] -> BW20
|
||||
* [1] -> BW40
|
||||
*/
|
||||
u8 calibration[2];
|
||||
|
||||
/*
|
||||
* Association id.
|
||||
*/
|
||||
|
@ -1121,6 +1121,18 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
int retval = -ENOMEM;
|
||||
|
||||
/*
|
||||
* Allocate the driver data memory, if necessary.
|
||||
*/
|
||||
if (rt2x00dev->ops->drv_data_size > 0) {
|
||||
rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size,
|
||||
GFP_KERNEL);
|
||||
if (!rt2x00dev->drv_data) {
|
||||
retval = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_init(&rt2x00dev->irqmask_lock);
|
||||
mutex_init(&rt2x00dev->csr_mutex);
|
||||
|
||||
@ -1261,6 +1273,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
|
||||
* Free queue structures.
|
||||
*/
|
||||
rt2x00queue_free(rt2x00dev);
|
||||
|
||||
/*
|
||||
* Free the driver data.
|
||||
*/
|
||||
if (rt2x00dev->drv_data)
|
||||
kfree(rt2x00dev->drv_data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
|
||||
|
||||
|
@ -296,12 +296,10 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
|
||||
* because that will cause nullfunc send by mac80211
|
||||
* fail, and cause pkt loss, we have tested that 5mA
|
||||
* is worked very well */
|
||||
if (!rtlpriv->psc.multi_buffered) {
|
||||
if (!rtlpriv->psc.multi_buffered)
|
||||
queue_delayed_work(rtlpriv->works.rtl_wq,
|
||||
&rtlpriv->works.ps_work,
|
||||
MSECS(5));
|
||||
pr_info("In section\n");
|
||||
}
|
||||
} else {
|
||||
rtl_swlps_rf_awake(hw);
|
||||
rtlpriv->psc.sw_ps_enabled = false;
|
||||
|
@ -198,7 +198,7 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
|
||||
}
|
||||
|
||||
/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
|
||||
static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
|
||||
static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
|
||||
{
|
||||
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
|
||||
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
|
||||
@ -207,8 +207,6 @@ static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
|
||||
|
||||
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
|
||||
udelay(100);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
|
||||
@ -1150,10 +1148,12 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
|
||||
ring->idx = (ring->idx + 1) % ring->entries;
|
||||
}
|
||||
|
||||
pci_free_consistent(rtlpci->pdev,
|
||||
sizeof(*ring->desc) * ring->entries,
|
||||
ring->desc, ring->dma);
|
||||
ring->desc = NULL;
|
||||
if (ring->desc) {
|
||||
pci_free_consistent(rtlpci->pdev,
|
||||
sizeof(*ring->desc) * ring->entries,
|
||||
ring->desc, ring->dma);
|
||||
ring->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
|
||||
@ -1177,12 +1177,14 @@ static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
pci_free_consistent(rtlpci->pdev,
|
||||
if (rtlpci->rx_ring[rx_queue_idx].desc) {
|
||||
pci_free_consistent(rtlpci->pdev,
|
||||
sizeof(*rtlpci->rx_ring[rx_queue_idx].
|
||||
desc) * rtlpci->rxringcount,
|
||||
rtlpci->rx_ring[rx_queue_idx].desc,
|
||||
rtlpci->rx_ring[rx_queue_idx].dma);
|
||||
rtlpci->rx_ring[rx_queue_idx].desc = NULL;
|
||||
rtlpci->rx_ring[rx_queue_idx].desc = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1760,8 +1762,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
RT_ASSERT(false,
|
||||
"Unable to obtain 32bit DMA for consistent allocations\n");
|
||||
pci_disable_device(pdev);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1803,7 +1805,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
|
||||
err = pci_request_regions(pdev, KBUILD_MODNAME);
|
||||
if (err) {
|
||||
RT_ASSERT(false, "Can't obtain PCI resources\n");
|
||||
goto fail2;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
|
||||
@ -1816,6 +1818,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
|
||||
rtlpriv->cfg->bar_id, pmem_len);
|
||||
if (rtlpriv->io.pci_mem_start == 0) {
|
||||
RT_ASSERT(false, "Can't map PCI mem\n");
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
@ -1832,8 +1835,10 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
|
||||
pci_write_config_byte(pdev, 0x04, 0x07);
|
||||
|
||||
/* find adapter */
|
||||
if (!_rtl_pci_find_adapter(pdev, hw))
|
||||
if (!_rtl_pci_find_adapter(pdev, hw)) {
|
||||
err = -ENODEV;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
/* Init IO handler */
|
||||
_rtl_pci_io_handler_init(&pdev->dev, hw);
|
||||
@ -1843,6 +1848,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
|
||||
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
|
||||
err = -ENODEV;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
@ -1887,7 +1893,6 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
fail3:
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
rtl_deinit_core(hw);
|
||||
_rtl_pci_io_handler_release(hw);
|
||||
|
||||
@ -1899,10 +1904,12 @@ fail2:
|
||||
complete(&rtlpriv->firmware_loading_complete);
|
||||
|
||||
fail1:
|
||||
|
||||
if (hw)
|
||||
ieee80211_free_hw(hw);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return -ENODEV;
|
||||
return err;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(rtl_pci_probe);
|
||||
|
@ -294,7 +294,7 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
|
||||
de_digtable.min_undecorated_pwdb_for_dm =
|
||||
rtlpriv->dm.UNDEC_SM_PWDB;
|
||||
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
|
||||
"AP Ext Port or disconnet PWDB = 0x%x\n",
|
||||
"AP Ext Port or disconnect PWDB = 0x%x\n",
|
||||
de_digtable.min_undecorated_pwdb_for_dm);
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,9 @@
|
||||
#include <linux/ssb/ssb_driver_chipcommon.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#ifdef CONFIG_BCM47XX
|
||||
#include <asm/mach-bcm47xx/nvram.h>
|
||||
#endif
|
||||
|
||||
#include "ssb_private.h"
|
||||
|
||||
@ -92,10 +95,6 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
|
||||
u32 pmuctl, tmp, pllctl;
|
||||
unsigned int i;
|
||||
|
||||
if ((bus->chip_id == 0x5354) && !crystalfreq) {
|
||||
/* The 5354 crystal freq is 25MHz */
|
||||
crystalfreq = 25000;
|
||||
}
|
||||
if (crystalfreq)
|
||||
e = pmu0_plltab_find_entry(crystalfreq);
|
||||
if (!e)
|
||||
@ -321,7 +320,11 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
|
||||
u32 crystalfreq = 0; /* in kHz. 0 = keep default freq. */
|
||||
|
||||
if (bus->bustype == SSB_BUSTYPE_SSB) {
|
||||
/* TODO: The user may override the crystal frequency. */
|
||||
#ifdef CONFIG_BCM47XX
|
||||
char buf[20];
|
||||
if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
|
||||
crystalfreq = simple_strtoul(buf, NULL, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
switch (bus->chip_id) {
|
||||
@ -330,7 +333,11 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
|
||||
ssb_pmu1_pllinit_r0(cc, crystalfreq);
|
||||
break;
|
||||
case 0x4328:
|
||||
ssb_pmu0_pllinit_r0(cc, crystalfreq);
|
||||
break;
|
||||
case 0x5354:
|
||||
if (crystalfreq == 0)
|
||||
crystalfreq = 25000;
|
||||
ssb_pmu0_pllinit_r0(cc, crystalfreq);
|
||||
break;
|
||||
case 0x4322:
|
||||
@ -607,3 +614,34 @@ void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on)
|
||||
|
||||
EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage);
|
||||
EXPORT_SYMBOL(ssb_pmu_set_ldo_paref);
|
||||
|
||||
u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
|
||||
{
|
||||
struct ssb_bus *bus = cc->dev->bus;
|
||||
|
||||
switch (bus->chip_id) {
|
||||
case 0x5354:
|
||||
/* 5354 chip uses a non programmable PLL of frequency 240MHz */
|
||||
return 240000000;
|
||||
default:
|
||||
ssb_printk(KERN_ERR PFX
|
||||
"ERROR: PMU cpu clock unknown for device %04X\n",
|
||||
bus->chip_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
|
||||
{
|
||||
struct ssb_bus *bus = cc->dev->bus;
|
||||
|
||||
switch (bus->chip_id) {
|
||||
case 0x5354:
|
||||
return 120000000;
|
||||
default:
|
||||
ssb_printk(KERN_ERR PFX
|
||||
"ERROR: PMU controlclock unknown for device %04X\n",
|
||||
bus->chip_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -208,6 +208,9 @@ u32 ssb_cpu_clock(struct ssb_mipscore *mcore)
|
||||
struct ssb_bus *bus = mcore->dev->bus;
|
||||
u32 pll_type, n, m, rate = 0;
|
||||
|
||||
if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
|
||||
return ssb_pmu_get_cpu_clock(&bus->chipco);
|
||||
|
||||
if (bus->extif.dev) {
|
||||
ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m);
|
||||
} else if (bus->chipco.dev) {
|
||||
|
@ -1094,6 +1094,9 @@ u32 ssb_clockspeed(struct ssb_bus *bus)
|
||||
u32 plltype;
|
||||
u32 clkctl_n, clkctl_m;
|
||||
|
||||
if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
|
||||
return ssb_pmu_get_controlclock(&bus->chipco);
|
||||
|
||||
if (ssb_extif_available(&bus->extif))
|
||||
ssb_extif_get_clockcontrol(&bus->extif, &plltype,
|
||||
&clkctl_n, &clkctl_m);
|
||||
|
@ -318,6 +318,9 @@ int ssb_bus_scan(struct ssb_bus *bus,
|
||||
bus->chip_package = 0;
|
||||
}
|
||||
}
|
||||
ssb_printk(KERN_INFO PFX "Found chip with id 0x%04X, rev 0x%02X and "
|
||||
"package 0x%02X\n", bus->chip_id, bus->chip_rev,
|
||||
bus->chip_package);
|
||||
if (!bus->nr_devices)
|
||||
bus->nr_devices = chipid_to_nrcores(bus->chip_id);
|
||||
if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
|
||||
|
@ -207,4 +207,8 @@ static inline void b43_pci_ssb_bridge_exit(void)
|
||||
}
|
||||
#endif /* CONFIG_SSB_B43_PCI_BRIDGE */
|
||||
|
||||
/* driver_chipcommon_pmu.c */
|
||||
extern u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc);
|
||||
extern u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc);
|
||||
|
||||
#endif /* LINUX_SSB_PRIVATE_H_ */
|
||||
|
@ -136,6 +136,7 @@ struct bcma_device {
|
||||
bool dev_registered;
|
||||
|
||||
u8 core_index;
|
||||
u8 core_unit;
|
||||
|
||||
u32 addr;
|
||||
u32 wrap;
|
||||
@ -195,6 +196,7 @@ struct bcma_bus {
|
||||
struct list_head cores;
|
||||
u8 nr_cores;
|
||||
u8 init_done:1;
|
||||
u8 num;
|
||||
|
||||
struct bcma_drv_cc drv_cc;
|
||||
struct bcma_drv_pci drv_pci;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user