bcma: pci: use fixed windows when possible

Some cores are mapped in the fixed way, they registers can be accessed
all the time.

Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Rafał Miłecki 2011-12-05 19:13:39 +01:00 committed by John W. Linville
parent 3df6eaea76
commit 439678f8b0

View File

@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pr_debug("Switched to core: 0x%X\n", core->id.id); pr_debug("Switched to core: 0x%X\n", core->id.id);
} }
static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) /* Provides access to the requested core. Returns base offset that has to be
* used. It makes use of fixed windows when possible. */
static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
{ {
switch (core->id.id) {
case BCMA_CORE_CHIPCOMMON:
return 3 * BCMA_CORE_SIZE;
case BCMA_CORE_PCIE:
return 2 * BCMA_CORE_SIZE;
}
if (core->bus->mapped_core != core) if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core); bcma_host_pci_switch_core(core);
return 0;
}
static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
{
offset += bcma_host_pci_provide_access_to_core(core);
return ioread8(core->bus->mmio + offset); return ioread8(core->bus->mmio + offset);
} }
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
{ {
if (core->bus->mapped_core != core) offset += bcma_host_pci_provide_access_to_core(core);
bcma_host_pci_switch_core(core);
return ioread16(core->bus->mmio + offset); return ioread16(core->bus->mmio + offset);
} }
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
{ {
if (core->bus->mapped_core != core) offset += bcma_host_pci_provide_access_to_core(core);
bcma_host_pci_switch_core(core);
return ioread32(core->bus->mmio + offset); return ioread32(core->bus->mmio + offset);
} }
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
u8 value) u8 value)
{ {
if (core->bus->mapped_core != core) offset += bcma_host_pci_provide_access_to_core(core);
bcma_host_pci_switch_core(core);
iowrite8(value, core->bus->mmio + offset); iowrite8(value, core->bus->mmio + offset);
} }
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
u16 value) u16 value)
{ {
if (core->bus->mapped_core != core) offset += bcma_host_pci_provide_access_to_core(core);
bcma_host_pci_switch_core(core);
iowrite16(value, core->bus->mmio + offset); iowrite16(value, core->bus->mmio + offset);
} }
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
u32 value) u32 value)
{ {
if (core->bus->mapped_core != core) offset += bcma_host_pci_provide_access_to_core(core);
bcma_host_pci_switch_core(core);
iowrite32(value, core->bus->mmio + offset); iowrite32(value, core->bus->mmio + offset);
} }