17bad52708
Logically the HWPT should have the coherency set properly for the device that it is being created for when it is created. This was happening implicitly if the immediate_attach was set because iommufd_hw_pagetable_attach() does it as the first thing. Do it unconditionally so !immediate_attach works properly. Link: https://lore.kernel.org/r/9-v8-6659224517ea+532-iommufd_alloc_jgg@nvidia.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
115 lines
3.1 KiB
C
115 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
|
|
*/
|
|
#include <linux/iommu.h>
|
|
|
|
#include "iommufd_private.h"
|
|
|
|
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
|
|
{
|
|
struct iommufd_hw_pagetable *hwpt =
|
|
container_of(obj, struct iommufd_hw_pagetable, obj);
|
|
|
|
if (!list_empty(&hwpt->hwpt_item)) {
|
|
mutex_lock(&hwpt->ioas->mutex);
|
|
list_del(&hwpt->hwpt_item);
|
|
mutex_unlock(&hwpt->ioas->mutex);
|
|
|
|
iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
|
|
}
|
|
|
|
if (hwpt->domain)
|
|
iommu_domain_free(hwpt->domain);
|
|
|
|
refcount_dec(&hwpt->ioas->obj.users);
|
|
}
|
|
|
|
int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
|
|
{
|
|
if (hwpt->enforce_cache_coherency)
|
|
return 0;
|
|
|
|
if (hwpt->domain->ops->enforce_cache_coherency)
|
|
hwpt->enforce_cache_coherency =
|
|
hwpt->domain->ops->enforce_cache_coherency(
|
|
hwpt->domain);
|
|
if (!hwpt->enforce_cache_coherency)
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* iommufd_hw_pagetable_alloc() - Get an iommu_domain for a device
|
|
* @ictx: iommufd context
|
|
* @ioas: IOAS to associate the domain with
|
|
* @idev: Device to get an iommu_domain for
|
|
* @immediate_attach: True if idev should be attached to the hwpt
|
|
*
|
|
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
|
|
* will be linked to the given ioas and upon return the underlying iommu_domain
|
|
* is fully popoulated.
|
|
*/
|
|
struct iommufd_hw_pagetable *
|
|
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
|
|
struct iommufd_device *idev, bool immediate_attach)
|
|
{
|
|
struct iommufd_hw_pagetable *hwpt;
|
|
int rc;
|
|
|
|
lockdep_assert_held(&ioas->mutex);
|
|
|
|
hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE);
|
|
if (IS_ERR(hwpt))
|
|
return hwpt;
|
|
|
|
INIT_LIST_HEAD(&hwpt->hwpt_item);
|
|
/* Pairs with iommufd_hw_pagetable_destroy() */
|
|
refcount_inc(&ioas->obj.users);
|
|
hwpt->ioas = ioas;
|
|
|
|
hwpt->domain = iommu_domain_alloc(idev->dev->bus);
|
|
if (!hwpt->domain) {
|
|
rc = -ENOMEM;
|
|
goto out_abort;
|
|
}
|
|
|
|
/*
|
|
* Set the coherency mode before we do iopt_table_add_domain() as some
|
|
* iommus have a per-PTE bit that controls it and need to decide before
|
|
* doing any maps. It is an iommu driver bug to report
|
|
* IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
|
|
* a new domain.
|
|
*/
|
|
if (idev->enforce_cache_coherency) {
|
|
rc = iommufd_hw_pagetable_enforce_cc(hwpt);
|
|
if (WARN_ON(rc))
|
|
goto out_abort;
|
|
}
|
|
|
|
/*
|
|
* immediate_attach exists only to accommodate iommu drivers that cannot
|
|
* directly allocate a domain. These drivers do not finish creating the
|
|
* domain until attach is completed. Thus we must have this call
|
|
* sequence. Once those drivers are fixed this should be removed.
|
|
*/
|
|
if (immediate_attach) {
|
|
rc = iommufd_hw_pagetable_attach(hwpt, idev);
|
|
if (rc)
|
|
goto out_abort;
|
|
}
|
|
|
|
rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain);
|
|
if (rc)
|
|
goto out_detach;
|
|
list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
|
|
return hwpt;
|
|
|
|
out_detach:
|
|
if (immediate_attach)
|
|
iommufd_hw_pagetable_detach(idev);
|
|
out_abort:
|
|
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
|
|
return ERR_PTR(rc);
|
|
}
|