iommu/ipmmu-vmsa: Write IMCTR twice

Write IMCTR both in the root device and the leaf node.

To allow access of IMCTR introduce the following function:
 - ipmmu_ctx_write_all()

While at it also rename context functions:
 - ipmmu_ctx_read() -> ipmmu_ctx_read_root()
 - ipmmu_ctx_write() -> ipmmu_ctx_write_root()

Signed-off-by: Magnus Damm <damm+renesas@opensource.se>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Magnus Damm 2017-10-16 21:30:18 +09:00 committed by Alex Williamson
parent 1c894225bf
commit d574893aee

View File

@ -248,19 +248,31 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg)
{
return ipmmu_read(domain->mmu->root,
domain->context_id * IM_CTX_SIZE + reg);
}
static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
u32 data)
static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
ipmmu_write(domain->mmu->root,
domain->context_id * IM_CTX_SIZE + reg, data);
}
static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
if (domain->mmu != domain->mmu->root)
ipmmu_write(domain->mmu,
domain->context_id * IM_CTX_SIZE + reg, data);
ipmmu_write(domain->mmu->root,
domain->context_id * IM_CTX_SIZE + reg, data);
}
/* -----------------------------------------------------------------------------
* TLB and microTLB Management
*/
@ -270,7 +282,7 @@ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
{
unsigned int count = 0;
while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
cpu_relax();
if (++count == TLB_LOOP_TIMEOUT) {
dev_err_ratelimited(domain->mmu->dev,
@ -285,9 +297,9 @@ static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
{
u32 reg;
reg = ipmmu_ctx_read(domain, IMCTR);
reg = ipmmu_ctx_read_root(domain, IMCTR);
reg |= IMCTR_FLUSH;
ipmmu_ctx_write(domain, IMCTR, reg);
ipmmu_ctx_write_all(domain, IMCTR, reg);
ipmmu_tlb_sync(domain);
}
@ -428,31 +440,32 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
/*
* TTBCR
* We use long descriptors with inner-shareable WBWA tables and allocate
* the whole 32-bit VA space to TTBR0.
*/
ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
/* MAIR0 */
ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
ipmmu_ctx_write_root(domain, IMMAIR0,
domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
ipmmu_ctx_write(domain, IMBUSCR,
ipmmu_ctx_read(domain, IMBUSCR) &
~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
ipmmu_ctx_write_root(domain, IMBUSCR,
ipmmu_ctx_read_root(domain, IMBUSCR) &
~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
/*
* IMSTR
* Clear all interrupt flags.
*/
ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
/*
* IMCTR
@ -461,7 +474,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
* software management as we have no use for it. Flush the TLB as
* required when modifying the context registers.
*/
ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
ipmmu_ctx_write_all(domain, IMCTR,
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
return 0;
}
@ -474,7 +488,7 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
*
* TODO: Is TLB flush really needed ?
*/
ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
ipmmu_tlb_sync(domain);
ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
}
@ -490,11 +504,11 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
u32 status;
u32 iova;
status = ipmmu_ctx_read(domain, IMSTR);
status = ipmmu_ctx_read_root(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
iova = ipmmu_ctx_read(domain, IMEAR);
iova = ipmmu_ctx_read_root(domain, IMEAR);
/*
* Clear the error status flags. Unlike traditional interrupt flag
@ -502,7 +516,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
* seems to require 0. The error address register must be read before,
* otherwise its value will be 0.
*/
ipmmu_ctx_write(domain, IMSTR, 0);
ipmmu_ctx_write_root(domain, IMSTR, 0);
/* Log fatal errors. */
if (status & IMSTR_MHIT)