iommu/arm-smmu-v3-sva: Remove bond refcount

Always allocate a new arm_smmu_bond in __arm_smmu_sva_bind and remove
the bond refcount since arm_smmu_bond can never be shared across calls
to __arm_smmu_sva_bind.

The iommu framework will not allocate multiple SVA domains for the same
(device/mm) pair, nor will it call set_dev_pasid for a device if a
domain is already attached on the given pasid. There's also a one-to-one
mapping between MM and PASID. __arm_smmu_sva_bind is therefore never
called with the same (device/mm) pair, and so there's no reason to try
and normalize allocations of the arm_smmu_bond struct for a (device/mm)
pair across set_dev_pasid.

Signed-off-by: Michael Shavit <mshavit@google.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20230905194849.v1.2.Id3ab7cf665bcead097654937233a645722a4cce3@changeid
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Michael Shavit 2023-09-05 19:49:13 +08:00 committed by Will Deacon
parent d912aed14f
commit 37ed36448f

View File

@ -28,7 +28,6 @@ struct arm_smmu_bond {
struct mm_struct *mm;
struct arm_smmu_mmu_notifier *smmu_mn;
struct list_head list;
refcount_t refs;
};
#define sva_to_bond(handle) \
@ -386,20 +385,11 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
if (!master || !master->sva_enabled)
return -ENODEV;
/* If bind() was already called for this {dev, mm} pair, reuse it. */
list_for_each_entry(bond, &master->bonds, list) {
if (bond->mm == mm) {
refcount_inc(&bond->refs);
return &bond->sva;
}
}
bond = kzalloc(sizeof(*bond), GFP_KERNEL);
if (!bond)
return -ENOMEM;
bond->mm = mm;
refcount_set(&bond->refs, 1);
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
@ -578,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
}
}
if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
if (!WARN_ON(!bond)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond);