iommu/amd: Consolidate amd_iommu_domain_flush_complete() call
Call amd_iommu_domain_flush_complete() from domain_flush_pages(). That way we can remove explicit call of amd_iommu_domain_flush_complete() from various places. Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20231122090215.6191-8-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
bbf85fe10f
commit
8d004ac1c6
@ -425,7 +425,6 @@ out:
|
|||||||
* increase_address_space().
|
* increase_address_space().
|
||||||
*/
|
*/
|
||||||
amd_iommu_domain_flush_tlb_pde(dom);
|
amd_iommu_domain_flush_tlb_pde(dom);
|
||||||
amd_iommu_domain_flush_complete(dom);
|
|
||||||
spin_unlock_irqrestore(&dom->lock, flags);
|
spin_unlock_irqrestore(&dom->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1489,6 +1489,10 @@ static void domain_flush_pages(struct protection_domain *domain,
|
|||||||
{
|
{
|
||||||
if (likely(!amd_iommu_np_cache)) {
|
if (likely(!amd_iommu_np_cache)) {
|
||||||
__domain_flush_pages(domain, address, size);
|
__domain_flush_pages(domain, address, size);
|
||||||
|
|
||||||
|
/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
|
||||||
|
amd_iommu_domain_flush_complete(domain);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1525,6 +1529,9 @@ static void domain_flush_pages(struct protection_domain *domain,
|
|||||||
address += flush_size;
|
address += flush_size;
|
||||||
size -= flush_size;
|
size -= flush_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
|
||||||
|
amd_iommu_domain_flush_complete(domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flush the whole IO/TLB for a given protection domain - including PDE */
|
/* Flush the whole IO/TLB for a given protection domain - including PDE */
|
||||||
@ -1558,7 +1565,6 @@ static void domain_flush_np_cache(struct protection_domain *domain,
|
|||||||
|
|
||||||
spin_lock_irqsave(&domain->lock, flags);
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
domain_flush_pages(domain, iova, size);
|
domain_flush_pages(domain, iova, size);
|
||||||
amd_iommu_domain_flush_complete(domain);
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1836,12 +1842,9 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
|||||||
/* Flush the DTE entry */
|
/* Flush the DTE entry */
|
||||||
device_flush_dte(dev_data);
|
device_flush_dte(dev_data);
|
||||||
|
|
||||||
/* Flush IOTLB */
|
/* Flush IOTLB and wait for the flushes to finish */
|
||||||
amd_iommu_domain_flush_tlb_pde(domain);
|
amd_iommu_domain_flush_tlb_pde(domain);
|
||||||
|
|
||||||
/* Wait for the flushes to finish */
|
|
||||||
amd_iommu_domain_flush_complete(domain);
|
|
||||||
|
|
||||||
/* decrease reference counters - needs to happen after the flushes */
|
/* decrease reference counters - needs to happen after the flushes */
|
||||||
domain->dev_iommu[iommu->index] -= 1;
|
domain->dev_iommu[iommu->index] -= 1;
|
||||||
domain->dev_cnt -= 1;
|
domain->dev_cnt -= 1;
|
||||||
@ -2018,7 +2021,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
|
|||||||
|
|
||||||
/* Flush domain TLB(s) and wait for completion */
|
/* Flush domain TLB(s) and wait for completion */
|
||||||
amd_iommu_domain_flush_tlb_pde(domain);
|
amd_iommu_domain_flush_tlb_pde(domain);
|
||||||
amd_iommu_domain_flush_complete(domain);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
@ -2451,10 +2453,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
|
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
|
||||||
if (domain_flush) {
|
if (domain_flush)
|
||||||
amd_iommu_domain_flush_tlb_pde(pdomain);
|
amd_iommu_domain_flush_tlb_pde(pdomain);
|
||||||
amd_iommu_domain_flush_complete(pdomain);
|
|
||||||
}
|
|
||||||
pdomain->dirty_tracking = enable;
|
pdomain->dirty_tracking = enable;
|
||||||
spin_unlock_irqrestore(&pdomain->lock, flags);
|
spin_unlock_irqrestore(&pdomain->lock, flags);
|
||||||
|
|
||||||
@ -2558,7 +2559,6 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
|||||||
|
|
||||||
spin_lock_irqsave(&dom->lock, flags);
|
spin_lock_irqsave(&dom->lock, flags);
|
||||||
amd_iommu_domain_flush_tlb_pde(dom);
|
amd_iommu_domain_flush_tlb_pde(dom);
|
||||||
amd_iommu_domain_flush_complete(dom);
|
|
||||||
spin_unlock_irqrestore(&dom->lock, flags);
|
spin_unlock_irqrestore(&dom->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2570,7 +2570,6 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
|
|||||||
|
|
||||||
spin_lock_irqsave(&dom->lock, flags);
|
spin_lock_irqsave(&dom->lock, flags);
|
||||||
domain_flush_pages(dom, gather->start, gather->end - gather->start + 1);
|
domain_flush_pages(dom, gather->start, gather->end - gather->start + 1);
|
||||||
amd_iommu_domain_flush_complete(dom);
|
|
||||||
spin_unlock_irqrestore(&dom->lock, flags);
|
spin_unlock_irqrestore(&dom->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user