iommu: Add ops->domain_alloc_sva()
Make a new op that receives the device and the mm_struct that the SVA domain should be created for. Unlike domain_alloc_paging() the dev argument is never NULL here. This allows drivers to fully initialize the SVA domain and allocate the mmu_notifier during allocation. It allows the notifier lifetime to follow the lifetime of the iommu_domain. Since we have only one call site, upgrade the new op to return ERR_PTR instead of NULL. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> [Removed smmu3 related changes - Vasant] Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Tina Zhang <tina.zhang@intel.com> Link: https://lore.kernel.org/r/20240418103400.6229-15-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1af95763e0
commit
80af5a4520
@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
|||||||
|
|
||||||
/* Allocate a new domain and set it on device pasid. */
|
/* Allocate a new domain and set it on device pasid. */
|
||||||
domain = iommu_sva_domain_alloc(dev, mm);
|
domain = iommu_sva_domain_alloc(dev, mm);
|
||||||
if (!domain) {
|
if (IS_ERR(domain)) {
|
||||||
ret = -ENOMEM;
|
ret = PTR_ERR(domain);
|
||||||
goto out_free_handle;
|
goto out_free_handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
|||||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||||
struct iommu_domain *domain;
|
struct iommu_domain *domain;
|
||||||
|
|
||||||
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
if (ops->domain_alloc_sva) {
|
||||||
if (!domain)
|
domain = ops->domain_alloc_sva(dev, mm);
|
||||||
return NULL;
|
if (IS_ERR(domain))
|
||||||
|
return domain;
|
||||||
|
} else {
|
||||||
|
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
||||||
|
if (!domain)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
domain->type = IOMMU_DOMAIN_SVA;
|
domain->type = IOMMU_DOMAIN_SVA;
|
||||||
mmgrab(mm);
|
mmgrab(mm);
|
||||||
|
@ -518,6 +518,7 @@ static inline int __iommu_copy_struct_from_user_array(
|
|||||||
* Upon failure, ERR_PTR must be returned.
|
* Upon failure, ERR_PTR must be returned.
|
||||||
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
|
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
|
||||||
* UNMANAGED, DMA, and DMA_FQ domain types.
|
* UNMANAGED, DMA, and DMA_FQ domain types.
|
||||||
|
* @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
|
||||||
* @probe_device: Add device to iommu driver handling
|
* @probe_device: Add device to iommu driver handling
|
||||||
* @release_device: Remove device from iommu driver handling
|
* @release_device: Remove device from iommu driver handling
|
||||||
* @probe_finalize: Do final setup work after the device is added to an IOMMU
|
* @probe_finalize: Do final setup work after the device is added to an IOMMU
|
||||||
@ -558,6 +559,8 @@ struct iommu_ops {
|
|||||||
struct device *dev, u32 flags, struct iommu_domain *parent,
|
struct device *dev, u32 flags, struct iommu_domain *parent,
|
||||||
const struct iommu_user_data *user_data);
|
const struct iommu_user_data *user_data);
|
||||||
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
|
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
|
||||||
|
struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
|
||||||
|
struct mm_struct *mm);
|
||||||
|
|
||||||
struct iommu_device *(*probe_device)(struct device *dev);
|
struct iommu_device *(*probe_device)(struct device *dev);
|
||||||
void (*release_device)(struct device *dev);
|
void (*release_device)(struct device *dev);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user