IOMMU Fixes for Linux v6.8-rc5

Including:
 
 	- Intel VT-d fixes for nested domain handling:
 	  - Cache invalidation for changes in a parent domain
 	  - Dirty tracking setting for parent and nested domains
 	  - Fix a constant-out-of-range warning
 
 	- ARM SMMU fixes:
 	  - Fix CD allocation from atomic context when using SVA with SMMUv3
 	  - Revert the conversion of SMMUv2 to domain_alloc_paging(), as it
 	    breaks the boot for Qualcomm MSM8996 devices
 
 	- Restore SVA handle sharing in core code as it turned out there are
 	  still drivers relying on it
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmXaZ/QACgkQK/BELZcB
 GuPTaQ/9GJTAmfIktgPWqAgUOUa6fWpEsmChwVgpMDxw4fyQsJrKZ/YQfX8Vy17S
 TV9EyCFGALRdcE+VX41KtrvG5MWY5cHa4BlJ6/KaD3tilhViTsACN7JkGiWnlEIs
 4t6YefoaMvgdaJ4nqysFlHtlaVKHObTi6toyyCCHIywOcei2YqX3mqTIiPzUyflj
 dqx0HwBG5uz6q00JNbHVQHeMc8rIEvT61oMssQUMNt8KPvdNJl9OrZSRvXimHABU
 Vh0nMKYLAqHp40IvoXScA9Aj/DWTwE2346/Xpd6hnZ/yJvBlm6YQWKpJtjJz3z2Y
 ZnK+cmFPAaC0EE7dlEpN7hcwtEqumw1K+CJ4s8rfnNY5IdcY0DIRAVxCLsh2YH15
 rcupp3iNJMR3JeVUYHKe+mEHcYSyC9SABw01aq9NdEu1LRfXjLRrYFVh3yovpshV
 abXssThTFWQTfTvUs2Vt9zjVIinST3ogki+mxyfgqTuKYj9GY8P3eqYRFrdxGJnI
 mtwu0ByRwVpdNUBUWsOk+3sSdvPBsgd/Fchr8Gmpl5W2cAjPOetYn0th3hksG7tn
 9qyUDwTHDaQsXEONFwS44eP9KlccUUvvLhFHCBghDm5A6i/aDdnz6d1CdfPOdb4y
 DwOQem4AiOmeTBGEZA6Z/ZSKsqZGmpZditBveki0ImauxrUZXMU=
 =aVOB
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v6.8-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Intel VT-d fixes for nested domain handling:

      - Cache invalidation for changes in a parent domain

      - Dirty tracking setting for parent and nested domains

      - Fix a constant-out-of-range warning

 - ARM SMMU fixes:

      - Fix CD allocation from atomic context when using SVA with SMMUv3

      - Revert the conversion of SMMUv2 to domain_alloc_paging(), as it
        breaks the boot for Qualcomm MSM8996 devices

 - Restore SVA handle sharing in core code as it turned out there are
   still drivers relying on it

* tag 'iommu-fixes-v6.8-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/sva: Restore SVA handle sharing
  iommu/arm-smmu-v3: Do not use GFP_KERNEL under as spinlock
  iommu/vt-d: Fix constant-out-of-range warning
  iommu/vt-d: Set SSADE when attaching to a parent with dirty tracking
  iommu/vt-d: Add missing dirty tracking set for parent domain
  iommu/vt-d: Wrap the dirty tracking loop to be a helper
  iommu/vt-d: Remove domain parameter for intel_pasid_setup_dirty_tracking()
  iommu/vt-d: Add missing device iotlb flush for parent domain
  iommu/vt-d: Update iotlb in nested domain attach
  iommu/vt-d: Add missing iotlb flush for parent domain
  iommu/vt-d: Add __iommu_flush_iotlb_psi()
  iommu/vt-d: Track nested domains in parent
  Revert "iommu/arm-smmu: Convert to domain_alloc_paging()"
This commit is contained in:
Linus Torvalds 2024-02-24 15:59:26 -08:00
commit 91403d50e9
9 changed files with 225 additions and 93 deletions

View File

@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
cd);
if (ret) {
list_for_each_entry_from_reverse(
master, &smmu_domain->devices, domain_head)
arm_smmu_write_ctx_desc(
master, mm_get_enqcmd_pasid(mm), NULL);
break;
}
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (ret)
goto err_put_notifier;
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
err_put_notifier:
/* Frees smmu_mn */
mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list);
arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
NULL);
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
goto err_free_bond;
}
ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
if (ret)
goto err_put_notifier;
list_add(&bond->list, &master->bonds);
return 0;
err_put_notifier:
arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
arm_smmu_write_ctx_desc(master, id, NULL);
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
ret = __arm_smmu_sva_bind(dev, mm);
ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
return ret;

View File

@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
if (type != IOMMU_DOMAIN_UNMANAGED) {
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
return NULL;
}
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
if (dev) {
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
kfree(smmu_domain);
return NULL;
}
}
return &smmu_domain->domain;
}
@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,

View File

@ -396,8 +396,6 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
static void domain_update_iotlb(struct dmar_domain *domain);
/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{
@ -1218,7 +1216,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
return NULL;
}
static void domain_update_iotlb(struct dmar_domain *domain)
void domain_update_iotlb(struct dmar_domain *domain)
{
struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
@ -1368,6 +1366,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
unsigned long pfn, unsigned int pages,
int ih)
{
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned long bitmask = aligned_pages - 1;
unsigned int mask = ilog2(aligned_pages);
u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
/*
* PSI masks the low order bits of the base address. If the
* address isn't aligned to the mask, then compute a mask value
* needed to ensure the target range is flushed.
*/
if (unlikely(bitmask & pfn)) {
unsigned long end_pfn = pfn + pages - 1, shared_bits;
/*
* Since end_pfn <= pfn + bitmask, the only way bits
* higher than bitmask can differ in pfn and end_pfn is
* by carrying. This means after masking out bitmask,
* high bits starting with the first set bit in
* shared_bits are all equal in both pfn and end_pfn.
*/
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
}
/*
* Fallback to domain selective flush if no PSI support or
* the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@ -1384,42 +1422,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
if (domain->use_first_level) {
if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
} else {
unsigned long bitmask = aligned_pages - 1;
/*
* PSI masks the low order bits of the base address. If the
* address isn't aligned to the mask, then compute a mask value
* needed to ensure the target range is flushed.
*/
if (unlikely(bitmask & pfn)) {
unsigned long end_pfn = pfn + pages - 1, shared_bits;
/*
* Since end_pfn <= pfn + bitmask, the only way bits
* higher than bitmask can differ in pfn and end_pfn is
* by carrying. This means after masking out bitmask,
* high bits starting with the first set bit in
* shared_bits are all equal in both pfn and end_pfn.
*/
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
}
/*
* Fallback to domain selective flush if no PSI support or
* the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH);
}
else
__iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@ -1443,6 +1449,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *
iommu_flush_write_buffer(iommu);
}
/*
* Flush the relevant caches in nested translation if the domain
* also serves as a parent
*/
static void parent_domain_flush(struct dmar_domain *domain,
unsigned long pfn,
unsigned long pages, int ih)
{
struct dmar_domain *s1_domain;
spin_lock(&domain->s1_lock);
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
struct device_domain_info *device_info;
struct iommu_domain_info *info;
unsigned long flags;
unsigned long i;
xa_for_each(&s1_domain->iommu_array, i, info)
__iommu_flush_iotlb_psi(info->iommu, info->did,
pfn, pages, ih);
if (!s1_domain->has_iotlb_device)
continue;
spin_lock_irqsave(&s1_domain->lock, flags);
list_for_each_entry(device_info, &s1_domain->devices, link)
/*
* Address translation cache in device side caches the
* result of nested translation. There is no easy way
* to identify the exact set of nested translations
* affected by a change in S2. So just flush the entire
* device cache.
*/
__iommu_flush_dev_iotlb(device_info, 0,
MAX_AGAW_PFN_WIDTH);
spin_unlock_irqrestore(&s1_domain->lock, flags);
}
spin_unlock(&domain->s1_lock);
}
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@ -1462,6 +1508,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
if (dmar_domain->nested_parent)
parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@ -1985,6 +2034,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
if (domain->nested_parent)
parent_domain_flush(domain, start_pfn,
lvl_pages, 0);
}
pte++;
@ -3883,6 +3935,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
/* Must be NESTING domain */
@ -3908,11 +3961,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (!domain)
return ERR_PTR(-ENOMEM);
if (nested_parent)
to_dmar_domain(domain)->nested_parent = true;
dmar_domain = to_dmar_domain(domain);
if (nested_parent) {
dmar_domain->nested_parent = true;
INIT_LIST_HEAD(&dmar_domain->s1_domains);
spin_lock_init(&dmar_domain->s1_lock);
}
if (dirty_tracking) {
if (to_dmar_domain(domain)->use_first_level) {
if (dmar_domain->use_first_level) {
iommu_domain_free(domain);
return ERR_PTR(-EOPNOTSUPP);
}
@ -3924,8 +3982,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
WARN_ON(dmar_domain->nested_parent &&
!list_empty(&dmar_domain->s1_domains));
if (domain != &si_domain->domain)
domain_exit(to_dmar_domain(domain));
domain_exit(dmar_domain);
}
int prepare_domain_attach_device(struct iommu_domain *domain,
@ -4107,6 +4169,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
if (dmar_domain->nested_parent)
parent_domain_flush(dmar_domain, start_pfn, nrpages,
list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@ -4664,21 +4729,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
return vtd;
}
/*
* Set dirty tracking for the device list of a domain. The caller must
* hold the domain->lock when calling it.
*/
static int device_set_dirty_tracking(struct list_head *devices, bool enable)
{
struct device_domain_info *info;
int ret = 0;
list_for_each_entry(info, devices, link) {
ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
IOMMU_NO_PASID, enable);
if (ret)
break;
}
return ret;
}
static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
bool enable)
{
struct dmar_domain *s1_domain;
unsigned long flags;
int ret;
spin_lock(&domain->s1_lock);
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
spin_lock_irqsave(&s1_domain->lock, flags);
ret = device_set_dirty_tracking(&s1_domain->devices, enable);
spin_unlock_irqrestore(&s1_domain->lock, flags);
if (ret)
goto err_unwind;
}
spin_unlock(&domain->s1_lock);
return 0;
err_unwind:
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
spin_lock_irqsave(&s1_domain->lock, flags);
device_set_dirty_tracking(&s1_domain->devices,
domain->dirty_tracking);
spin_unlock_irqrestore(&s1_domain->lock, flags);
}
spin_unlock(&domain->s1_lock);
return ret;
}
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct device_domain_info *info;
int ret;
spin_lock(&dmar_domain->lock);
if (dmar_domain->dirty_tracking == enable)
goto out_unlock;
list_for_each_entry(info, &dmar_domain->devices, link) {
ret = intel_pasid_setup_dirty_tracking(info->iommu,
info->domain, info->dev,
IOMMU_NO_PASID, enable);
ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
if (ret)
goto err_unwind;
if (dmar_domain->nested_parent) {
ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
if (ret)
goto err_unwind;
}
@ -4690,10 +4804,8 @@ out_unlock:
return 0;
err_unwind:
list_for_each_entry(info, &dmar_domain->devices, link)
intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain,
info->dev, IOMMU_NO_PASID,
dmar_domain->dirty_tracking);
device_set_dirty_tracking(&dmar_domain->devices,
dmar_domain->dirty_tracking);
spin_unlock(&dmar_domain->lock);
return ret;
}

View File

@ -627,6 +627,10 @@ struct dmar_domain {
int agaw;
/* maximum mapped address */
u64 max_addr;
/* Protect the s1_domains list */
spinlock_t s1_lock;
/* Track s1_domains nested on this domain */
struct list_head s1_domains;
};
/* Nested user domain */
@ -637,6 +641,8 @@ struct dmar_domain {
unsigned long s1_pgtbl;
/* page table attributes */
struct iommu_hwpt_vtd_s1 s1_cfg;
/* link to parent domain siblings */
struct list_head s2_link;
};
};
@ -1060,6 +1066,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);

View File

@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
domain_update_iotlb(dmar_domain);
return 0;
}
static void intel_nested_domain_free(struct iommu_domain *domain)
{
kfree(to_dmar_domain(domain));
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dmar_domain *s2_domain = dmar_domain->s2_domain;
spin_lock(&s2_domain->s1_lock);
list_del(&dmar_domain->s2_link);
spin_unlock(&s2_domain->s1_lock);
kfree(dmar_domain);
}
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
}
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
unsigned long npages, bool ih)
u64 npages, bool ih)
{
struct iommu_domain_info *info;
unsigned int mask;
@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
spin_lock(&s2_domain->s1_lock);
list_add(&domain->s2_link, &s2_domain->s1_domains);
spin_unlock(&s2_domain->s1_lock);
return &domain->domain;
}

View File

@ -428,7 +428,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Set up dirty tracking on a second only or nested translation type.
*/
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled)
{
@ -445,7 +444,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
return -ENODEV;
}
did = domain_id_iommu(domain, iommu);
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
pgtt != PASID_ENTRY_PGTT_NESTED) {
@ -658,6 +657,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, s2_domain->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
if (s2_domain->dirty_tracking)
pasid_set_ssade(pte);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
spin_unlock(&iommu->lock);

View File

@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,

View File

@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
}
iommu_mm->pasid = pasid;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
INIT_LIST_HEAD(&iommu_mm->sva_handles);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
* initialization to iommu_mm fields. If it does, readers may see a
@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
goto out_unlock;
}
list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
if (handle->dev == dev) {
refcount_inc(&handle->users);
mutex_unlock(&iommu_sva_lock);
return handle;
}
}
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
ret = -ENOMEM;
@ -108,7 +117,9 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
if (ret)
goto out_free_domain;
domain->users = 1;
refcount_set(&handle->users, 1);
list_add(&domain->next, &mm->iommu_mm->sva_domains);
list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
out:
mutex_unlock(&iommu_sva_lock);
@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
if (!refcount_dec_and_test(&handle->users)) {
mutex_unlock(&iommu_sva_lock);
return;
}
list_del(&handle->handle_item);
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) {
list_del(&domain->next);

View File

@ -892,11 +892,14 @@ struct iommu_fwspec {
struct iommu_sva {
struct device *dev;
struct iommu_domain *domain;
struct list_head handle_item;
refcount_t users;
};
struct iommu_mm_data {
u32 pasid;
struct list_head sva_domains;
struct list_head sva_handles;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,