iommu: Tidy the control flow in iommu_group_store_type()

Use a normal "goto unwind" instead of trying to be clever with checking
!ret and manually managing the unlock.

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Tested-by: Heiko Stuebner <heiko@sntech.de>
Tested-by: Niklas Schnelle <schnelle@linux.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/17-v5-1b99ae392328+44574-iommu_err_unwind_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2023-05-11 01:42:15 -03:00 committed by Joerg Roedel
parent e996c12d76
commit 5957c19305

View File

@ -2940,6 +2940,7 @@ out_free:
static ssize_t iommu_group_store_type(struct iommu_group *group, static ssize_t iommu_group_store_type(struct iommu_group *group,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct group_device *gdev;
int ret, req_type; int ret, req_type;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
@ -2964,20 +2965,23 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
if (req_type == IOMMU_DOMAIN_DMA_FQ && if (req_type == IOMMU_DOMAIN_DMA_FQ &&
group->default_domain->type == IOMMU_DOMAIN_DMA) { group->default_domain->type == IOMMU_DOMAIN_DMA) {
ret = iommu_dma_init_fq(group->default_domain); ret = iommu_dma_init_fq(group->default_domain);
if (!ret) if (ret)
group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; goto out_unlock;
mutex_unlock(&group->mutex);
return ret ?: count; group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;
ret = count;
goto out_unlock;
} }
/* Otherwise, ensure that device exists and no driver is bound. */ /* Otherwise, ensure that device exists and no driver is bound. */
if (list_empty(&group->devices) || group->owner_cnt) { if (list_empty(&group->devices) || group->owner_cnt) {
mutex_unlock(&group->mutex); ret = -EPERM;
return -EPERM; goto out_unlock;
} }
ret = iommu_setup_default_domain(group, req_type); ret = iommu_setup_default_domain(group, req_type);
if (ret)
goto out_unlock;
/* /*
* Release the mutex here because ops->probe_finalize() call-back of * Release the mutex here because ops->probe_finalize() call-back of
@ -2988,13 +2992,12 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
/* Make sure dma_ops is appropriatley set */ /* Make sure dma_ops is appropriatley set */
if (!ret) { for_each_group_device(group, gdev)
struct group_device *gdev; iommu_group_do_probe_finalize(gdev->dev);
return count;
for_each_group_device(group, gdev)
iommu_group_do_probe_finalize(gdev->dev);
}
out_unlock:
mutex_unlock(&group->mutex);
return ret ?: count; return ret ?: count;
} }