Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core/iommu changes from Ingo Molnar. * 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: iommu/dmar: Use pr_format() instead of PREFIX to tidy up pr_*() calls iommu/dmar: Reserve mmio space used by the IOMMU, if the BIOS forgets to iommu/dmar: Replace printks with appropriate pr_*()
This commit is contained in:
commit
ceee0e95b6
@ -26,6 +26,8 @@
|
||||
* These routines are used by both DMA-remapping and Interrupt-remapping
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/iova.h>
|
||||
@ -39,8 +41,6 @@
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/iommu_table.h>
|
||||
|
||||
#define PREFIX "DMAR: "
|
||||
|
||||
/* No locks are needed as DMA remapping hardware unit
|
||||
* list is constructed at boot time and hotplug of
|
||||
* these units are not supported by the architecture.
|
||||
@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
|
||||
* ignore it
|
||||
*/
|
||||
if (!bus) {
|
||||
printk(KERN_WARNING
|
||||
PREFIX "Device scope bus [%d] not found\n",
|
||||
scope->bus);
|
||||
pr_warn("Device scope bus [%d] not found\n", scope->bus);
|
||||
break;
|
||||
}
|
||||
pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
|
||||
if (!pdev) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Device scope device [%04x:%02x:%02x.%02x] not found\n",
|
||||
segment, bus->number, path->dev, path->fn);
|
||||
/* warning will be printed below */
|
||||
break;
|
||||
}
|
||||
path ++;
|
||||
@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
|
||||
bus = pdev->subordinate;
|
||||
}
|
||||
if (!pdev) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Device scope device [%04x:%02x:%02x.%02x] not found\n",
|
||||
segment, scope->bus, path->dev, path->fn);
|
||||
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
|
||||
segment, scope->bus, path->dev, path->fn);
|
||||
*dev = NULL;
|
||||
return 0;
|
||||
}
|
||||
@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
|
||||
pdev->subordinate) || (scope->entry_type == \
|
||||
ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
|
||||
pci_dev_put(pdev);
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Device scope type does not match for %s\n",
|
||||
pci_name(pdev));
|
||||
pr_warn("Device scope type does not match for %s\n",
|
||||
pci_name(pdev));
|
||||
return -EINVAL;
|
||||
}
|
||||
*dev = pdev;
|
||||
@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
||||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
|
||||
(*cnt)++;
|
||||
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Unsupported device scope\n");
|
||||
pr_warn("Unsupported device scope\n");
|
||||
}
|
||||
start += scope->length;
|
||||
}
|
||||
@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
|
||||
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
|
||||
drhd = container_of(header, struct acpi_dmar_hardware_unit,
|
||||
header);
|
||||
printk (KERN_INFO PREFIX
|
||||
"DRHD base: %#016Lx flags: %#x\n",
|
||||
pr_info("DRHD base: %#016Lx flags: %#x\n",
|
||||
(unsigned long long)drhd->address, drhd->flags);
|
||||
break;
|
||||
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
|
||||
rmrr = container_of(header, struct acpi_dmar_reserved_memory,
|
||||
header);
|
||||
printk (KERN_INFO PREFIX
|
||||
"RMRR base: %#016Lx end: %#016Lx\n",
|
||||
pr_info("RMRR base: %#016Lx end: %#016Lx\n",
|
||||
(unsigned long long)rmrr->base_address,
|
||||
(unsigned long long)rmrr->end_address);
|
||||
break;
|
||||
case ACPI_DMAR_TYPE_ATSR:
|
||||
atsr = container_of(header, struct acpi_dmar_atsr, header);
|
||||
printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
|
||||
pr_info("ATSR flags: %#x\n", atsr->flags);
|
||||
break;
|
||||
case ACPI_DMAR_HARDWARE_AFFINITY:
|
||||
rhsa = container_of(header, struct acpi_dmar_rhsa, header);
|
||||
printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
|
||||
pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
|
||||
(unsigned long long)rhsa->base_address,
|
||||
rhsa->proximity_domain);
|
||||
break;
|
||||
@ -299,7 +290,7 @@ static int __init dmar_table_detect(void)
|
||||
&dmar_tbl_size);
|
||||
|
||||
if (ACPI_SUCCESS(status) && !dmar_tbl) {
|
||||
printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
|
||||
pr_warn("Unable to map DMAR\n");
|
||||
status = AE_NOT_FOUND;
|
||||
}
|
||||
|
||||
@ -333,20 +324,18 @@ parse_dmar_table(void)
|
||||
return -ENODEV;
|
||||
|
||||
if (dmar->width < PAGE_SHIFT - 1) {
|
||||
printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
|
||||
pr_warn("Invalid DMAR haw\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk (KERN_INFO PREFIX "Host address width %d\n",
|
||||
dmar->width + 1);
|
||||
pr_info("Host address width %d\n", dmar->width + 1);
|
||||
|
||||
entry_header = (struct acpi_dmar_header *)(dmar + 1);
|
||||
while (((unsigned long)entry_header) <
|
||||
(((unsigned long)dmar) + dmar_tbl->length)) {
|
||||
/* Avoid looping forever on bad ACPI tables */
|
||||
if (entry_header->length == 0) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Invalid 0-length structure\n");
|
||||
pr_warn("Invalid 0-length structure\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -369,8 +358,7 @@ parse_dmar_table(void)
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Unknown DMAR structure type %d\n",
|
||||
pr_warn("Unknown DMAR structure type %d\n",
|
||||
entry_header->type);
|
||||
ret = 0; /* for forward compatibility */
|
||||
break;
|
||||
@ -469,12 +457,12 @@ int __init dmar_table_init(void)
|
||||
ret = parse_dmar_table();
|
||||
if (ret) {
|
||||
if (ret != -ENODEV)
|
||||
printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
|
||||
pr_info("parse DMAR table failure.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (list_empty(&dmar_drhd_units)) {
|
||||
printk(KERN_INFO PREFIX "No DMAR devices found\n");
|
||||
pr_info("No DMAR devices found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -506,8 +494,7 @@ int __init check_zero_address(void)
|
||||
(((unsigned long)dmar) + dmar_tbl->length)) {
|
||||
/* Avoid looping forever on bad ACPI tables */
|
||||
if (entry_header->length == 0) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Invalid 0-length structure\n");
|
||||
pr_warn("Invalid 0-length structure\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -558,8 +545,7 @@ int __init detect_intel_iommu(void)
|
||||
|
||||
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
|
||||
dmar->flags & 0x1)
|
||||
printk(KERN_INFO
|
||||
"Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
|
||||
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
|
||||
|
||||
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
|
||||
iommu_detected = 1;
|
||||
@ -579,14 +565,89 @@ int __init detect_intel_iommu(void)
|
||||
}
|
||||
|
||||
|
||||
static void unmap_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
iounmap(iommu->reg);
|
||||
release_mem_region(iommu->reg_phys, iommu->reg_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* map_iommu: map the iommu's registers
|
||||
* @iommu: the iommu to map
|
||||
* @phys_addr: the physical address of the base resgister
|
||||
*
|
||||
* Memory map the iommu's registers. Start w/ a single page, and
|
||||
* possibly expand if that turns out to be insufficent.
|
||||
*/
|
||||
static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
|
||||
{
|
||||
int map_size, err=0;
|
||||
|
||||
iommu->reg_phys = phys_addr;
|
||||
iommu->reg_size = VTD_PAGE_SIZE;
|
||||
|
||||
if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
|
||||
pr_err("IOMMU: can't reserve memory\n");
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
|
||||
if (!iommu->reg) {
|
||||
pr_err("IOMMU: can't map the region\n");
|
||||
err = -ENOMEM;
|
||||
goto release;
|
||||
}
|
||||
|
||||
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
|
||||
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
|
||||
|
||||
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
|
||||
err = -EINVAL;
|
||||
warn_invalid_dmar(phys_addr, " returns all ones");
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
/* the registers might be more than one page */
|
||||
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
|
||||
cap_max_fault_reg_offset(iommu->cap));
|
||||
map_size = VTD_PAGE_ALIGN(map_size);
|
||||
if (map_size > iommu->reg_size) {
|
||||
iounmap(iommu->reg);
|
||||
release_mem_region(iommu->reg_phys, iommu->reg_size);
|
||||
iommu->reg_size = map_size;
|
||||
if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
|
||||
iommu->name)) {
|
||||
pr_err("IOMMU: can't reserve memory\n");
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
|
||||
if (!iommu->reg) {
|
||||
pr_err("IOMMU: can't map the region\n");
|
||||
err = -ENOMEM;
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
err = 0;
|
||||
goto out;
|
||||
|
||||
unmap:
|
||||
iounmap(iommu->reg);
|
||||
release:
|
||||
release_mem_region(iommu->reg_phys, iommu->reg_size);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
{
|
||||
struct intel_iommu *iommu;
|
||||
int map_size;
|
||||
u32 ver;
|
||||
static int iommu_allocated = 0;
|
||||
int agaw = 0;
|
||||
int msagaw = 0;
|
||||
int err;
|
||||
|
||||
if (!drhd->reg_base_addr) {
|
||||
warn_invalid_dmar(0, "");
|
||||
@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
iommu->seq_id = iommu_allocated++;
|
||||
sprintf (iommu->name, "dmar%d", iommu->seq_id);
|
||||
|
||||
iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
|
||||
if (!iommu->reg) {
|
||||
printk(KERN_ERR "IOMMU: can't map the region\n");
|
||||
err = map_iommu(iommu, drhd->reg_base_addr);
|
||||
if (err) {
|
||||
pr_err("IOMMU: failed to map %s\n", iommu->name);
|
||||
goto error;
|
||||
}
|
||||
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
|
||||
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
|
||||
|
||||
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
|
||||
warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
agaw = iommu_calculate_agaw(iommu);
|
||||
if (agaw < 0) {
|
||||
printk(KERN_ERR
|
||||
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
|
||||
iommu->seq_id);
|
||||
pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
|
||||
iommu->seq_id);
|
||||
goto err_unmap;
|
||||
}
|
||||
msagaw = iommu_calculate_max_sagaw(iommu);
|
||||
if (msagaw < 0) {
|
||||
printk(KERN_ERR
|
||||
"Cannot get a valid max agaw for iommu (seq_id = %d)\n",
|
||||
pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
|
||||
iommu->seq_id);
|
||||
goto err_unmap;
|
||||
}
|
||||
@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
|
||||
iommu->node = -1;
|
||||
|
||||
/* the registers might be more than one page */
|
||||
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
|
||||
cap_max_fault_reg_offset(iommu->cap));
|
||||
map_size = VTD_PAGE_ALIGN(map_size);
|
||||
if (map_size > VTD_PAGE_SIZE) {
|
||||
iounmap(iommu->reg);
|
||||
iommu->reg = ioremap(drhd->reg_base_addr, map_size);
|
||||
if (!iommu->reg) {
|
||||
printk(KERN_ERR "IOMMU: can't map the region\n");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
ver = readl(iommu->reg + DMAR_VER_REG);
|
||||
pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
|
||||
iommu->seq_id,
|
||||
@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
iounmap(iommu->reg);
|
||||
unmap_iommu(iommu);
|
||||
error:
|
||||
kfree(iommu);
|
||||
return -1;
|
||||
return err;
|
||||
}
|
||||
|
||||
void free_iommu(struct intel_iommu *iommu)
|
||||
@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu)
|
||||
free_dmar_iommu(iommu);
|
||||
|
||||
if (iommu->reg)
|
||||
iounmap(iommu->reg);
|
||||
unmap_iommu(iommu);
|
||||
|
||||
kfree(iommu);
|
||||
}
|
||||
|
||||
@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
|
||||
if (fault & DMA_FSTS_IQE) {
|
||||
head = readl(iommu->reg + DMAR_IQH_REG);
|
||||
if ((head >> DMAR_IQ_SHIFT) == index) {
|
||||
printk(KERN_ERR "VT-d detected invalid descriptor: "
|
||||
pr_err("VT-d detected invalid descriptor: "
|
||||
"low=%llx, high=%llx\n",
|
||||
(unsigned long long)qi->desc[index].low,
|
||||
(unsigned long long)qi->desc[index].high);
|
||||
@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
|
||||
reason = dmar_get_fault_reason(fault_reason, &fault_type);
|
||||
|
||||
if (fault_type == INTR_REMAP)
|
||||
printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
|
||||
pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
|
||||
"fault index %llx\n"
|
||||
"INTR-REMAP:[fault reason %02d] %s\n",
|
||||
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
|
||||
PCI_FUNC(source_id & 0xFF), addr >> 48,
|
||||
fault_reason, reason);
|
||||
else
|
||||
printk(KERN_ERR
|
||||
"DMAR:[%s] Request device [%02x:%02x.%d] "
|
||||
pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
|
||||
"fault addr %llx \n"
|
||||
"DMAR:[fault reason %02d] %s\n",
|
||||
(type ? "DMA Read" : "DMA Write"),
|
||||
@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
||||
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
|
||||
if (fault_status)
|
||||
printk(KERN_ERR "DRHD: handling fault status reg %x\n",
|
||||
fault_status);
|
||||
pr_err("DRHD: handling fault status reg %x\n", fault_status);
|
||||
|
||||
/* TBD: ignore advanced fault log currently */
|
||||
if (!(fault_status & DMA_FSTS_PPF))
|
||||
@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
|
||||
|
||||
irq = create_irq();
|
||||
if (!irq) {
|
||||
printk(KERN_ERR "IOMMU: no free vectors\n");
|
||||
pr_err("IOMMU: no free vectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
|
||||
|
||||
ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
|
||||
if (ret)
|
||||
printk(KERN_ERR "IOMMU: can't request irq\n");
|
||||
pr_err("IOMMU: can't request irq\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void)
|
||||
ret = dmar_set_interrupt(iommu);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
|
||||
" interrupt, ret %d\n",
|
||||
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
|
||||
(unsigned long long)drhd->reg_base_addr, ret);
|
||||
return -1;
|
||||
}
|
||||
|
@ -308,6 +308,8 @@ enum {
|
||||
|
||||
struct intel_iommu {
|
||||
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
|
||||
u64 reg_phys; /* physical address of hw register set */
|
||||
u64 reg_size; /* size of hw register set */
|
||||
u64 cap;
|
||||
u64 ecap;
|
||||
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
|
||||
|
Loading…
Reference in New Issue
Block a user