iommu/vt-d: Add helper to flush caches for context change
This helper is used to flush the related caches following a change in a context table entry that was previously present. The VT-d specification provides guidance for such invalidations in section 6.5.3.3. This helper replaces the existing open code in the code paths where a present context entry is being torn down. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240701112317.94022-2-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20240702130839.108139-7-baolu.lu@linux.intel.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
2b989ab9bc
commit
f90584f4be
@ -1359,21 +1359,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info)
|
||||
}
|
||||
}
|
||||
|
||||
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
|
||||
u64 addr, unsigned int mask)
|
||||
{
|
||||
u16 sid, qdep;
|
||||
|
||||
if (!info || !info->ats_enabled)
|
||||
return;
|
||||
|
||||
sid = info->bus << 8 | info->devfn;
|
||||
qdep = info->ats_qdep;
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, addr, mask);
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
|
||||
}
|
||||
|
||||
static void intel_flush_iotlb_all(struct iommu_domain *domain)
|
||||
{
|
||||
cache_tag_flush_all(to_dmar_domain(domain));
|
||||
@ -1959,7 +1944,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
|
||||
{
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct context_entry *context;
|
||||
u16 did_old;
|
||||
|
||||
spin_lock(&iommu->lock);
|
||||
context = iommu_context_addr(iommu, bus, devfn, 0);
|
||||
@ -1968,24 +1952,10 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
|
||||
return;
|
||||
}
|
||||
|
||||
did_old = context_domain_id(context);
|
||||
|
||||
context_clear_entry(context);
|
||||
__iommu_flush_cache(iommu, context, sizeof(*context));
|
||||
spin_unlock(&iommu->lock);
|
||||
iommu->flush.flush_context(iommu,
|
||||
did_old,
|
||||
(((u16)bus) << 8) | devfn,
|
||||
DMA_CCMD_MASK_NOBIT,
|
||||
DMA_CCMD_DEVICE_INVL);
|
||||
|
||||
iommu->flush.flush_iotlb(iommu,
|
||||
did_old,
|
||||
0,
|
||||
0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
|
||||
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
|
||||
intel_context_flush_present(info, context, true);
|
||||
}
|
||||
|
||||
static int domain_setup_first_level(struct intel_iommu *iommu,
|
||||
|
@ -1143,6 +1143,10 @@ void cache_tag_flush_all(struct dmar_domain *domain);
|
||||
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
void intel_context_flush_present(struct device_domain_info *info,
|
||||
struct context_entry *context,
|
||||
bool affect_domains);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
void intel_svm_check(struct intel_iommu *iommu);
|
||||
int intel_svm_enable_prq(struct intel_iommu *iommu);
|
||||
|
@ -694,25 +694,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
|
||||
context_clear_entry(context);
|
||||
__iommu_flush_cache(iommu, context, sizeof(*context));
|
||||
spin_unlock(&iommu->lock);
|
||||
|
||||
/*
|
||||
* Cache invalidation for changes to a scalable-mode context table
|
||||
* entry.
|
||||
*
|
||||
* Section 6.5.3.3 of the VT-d spec:
|
||||
* - Device-selective context-cache invalidation;
|
||||
* - Domain-selective PASID-cache invalidation to affected domains
|
||||
* (can be skipped if all PASID entries were not-present);
|
||||
* - Domain-selective IOTLB invalidation to affected domains;
|
||||
* - Global Device-TLB invalidation to affected functions.
|
||||
*
|
||||
* The iommu has been parked in the blocking state. All domains have
|
||||
* been detached from the device or PASID. The PASID and IOTLB caches
|
||||
* have been invalidated during the domain detach path.
|
||||
*/
|
||||
iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
|
||||
DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
|
||||
devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
|
||||
intel_context_flush_present(info, context, false);
|
||||
}
|
||||
|
||||
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
|
||||
@ -874,3 +856,89 @@ int intel_pasid_setup_sm_context(struct device *dev)
|
||||
|
||||
return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Global Device-TLB invalidation following changes in a context entry which
|
||||
* was present.
|
||||
*/
|
||||
static void __context_flush_dev_iotlb(struct device_domain_info *info)
|
||||
{
|
||||
if (!info->ats_enabled)
|
||||
return;
|
||||
|
||||
qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn),
|
||||
info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH);
|
||||
|
||||
/*
|
||||
* There is no guarantee that the device DMA is stopped when it reaches
|
||||
* here. Therefore, always attempt the extra device TLB invalidation
|
||||
* quirk. The impact on performance is acceptable since this is not a
|
||||
* performance-critical path.
|
||||
*/
|
||||
quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID,
|
||||
info->ats_qdep);
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache invalidations after change in a context table entry that was present
|
||||
* according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
|
||||
* IOMMU is in scalable mode and all PASID table entries of the device were
|
||||
* non-present, set flush_domains to false. Otherwise, true.
|
||||
*/
|
||||
void intel_context_flush_present(struct device_domain_info *info,
|
||||
struct context_entry *context,
|
||||
bool flush_domains)
|
||||
{
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
u16 did = context_domain_id(context);
|
||||
struct pasid_entry *pte;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Device-selective context-cache invalidation. The Domain-ID field
|
||||
* of the Context-cache Invalidate Descriptor is ignored by hardware
|
||||
* when operating in scalable mode. Therefore the @did value doesn't
|
||||
* matter in scalable mode.
|
||||
*/
|
||||
iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn),
|
||||
DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
|
||||
|
||||
/*
|
||||
* For legacy mode:
|
||||
* - Domain-selective IOTLB invalidation
|
||||
* - Global Device-TLB invalidation to all affected functions
|
||||
*/
|
||||
if (!sm_supported(iommu)) {
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
|
||||
__context_flush_dev_iotlb(info);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For scalable mode:
|
||||
* - Domain-selective PASID-cache invalidation to affected domains
|
||||
* - Domain-selective IOTLB invalidation to affected domains
|
||||
* - Global Device-TLB invalidation to affected functions
|
||||
*/
|
||||
if (flush_domains) {
|
||||
/*
|
||||
* If the IOMMU is running in scalable mode and there might
|
||||
* be potential PASID translations, the caller should hold
|
||||
* the lock to ensure that context changes and cache flushes
|
||||
* are atomic.
|
||||
*/
|
||||
assert_spin_locked(&iommu->lock);
|
||||
for (i = 0; i < info->pasid_table->max_pasid; i++) {
|
||||
pte = intel_pasid_get_entry(info->dev, i);
|
||||
if (!pte || !pasid_pte_is_present(pte))
|
||||
continue;
|
||||
|
||||
did = pasid_get_domain_id(pte);
|
||||
qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
|
||||
}
|
||||
}
|
||||
|
||||
__context_flush_dev_iotlb(info);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user