iommu: Make iopf_group_response() return void

The iopf_group_response() should return void, as nothing can do anything
with the failure. This implies that ops->page_response() must also return
void; this is consistent with what the drivers do. The failure paths,
which are all integrity validations of the fault, should be WARN_ON'd,
not return codes.

If the iommu core fails to enqueue the fault, it should respond the fault
directly by calling ops->page_response() instead of returning an error
number and relying on the iommu drivers to do so. Consolidate the error
fault handling code in the core.

Co-developed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240212012227.119381-16-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu
2024-02-12 09:22:26 +08:00
committed by Joerg Roedel
parent 1991123271
commit b554e396e5
5 changed files with 98 additions and 120 deletions

View File

@@ -920,15 +920,16 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
} }
static int arm_smmu_page_response(struct device *dev, static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
struct iopf_fault *unused,
struct iommu_page_response *resp) struct iommu_page_response *resp)
{ {
struct arm_smmu_cmdq_ent cmd = {0}; struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev); struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int sid = master->streams[0].id; int sid = master->streams[0].id;
if (master->stall_enabled) { if (WARN_ON(!master->stall_enabled))
return;
cmd.opcode = CMDQ_OP_RESUME; cmd.opcode = CMDQ_OP_RESUME;
cmd.resume.sid = sid; cmd.resume.sid = sid;
cmd.resume.stag = resp->grpid; cmd.resume.stag = resp->grpid;
@@ -941,10 +942,7 @@ static int arm_smmu_page_response(struct device *dev,
cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY; cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
break; break;
default: default:
return -EINVAL; break;
}
} else {
return -ENODEV;
} }
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
@@ -954,8 +952,6 @@ static int arm_smmu_page_response(struct device *dev,
* terminated... at some point in the future. PRI_RESP is fire and * terminated... at some point in the future. PRI_RESP is fire and
* forget. * forget.
*/ */
return 0;
} }
/* Context descriptor manipulation functions */ /* Context descriptor manipulation functions */
@@ -1516,16 +1512,6 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
} }
ret = iommu_report_device_fault(master->dev, &fault_evt); ret = iommu_report_device_fault(master->dev, &fault_evt);
if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
/* Nobody cared, abort the access */
struct iommu_page_response resp = {
.pasid = flt->prm.pasid,
.grpid = flt->prm.grpid,
.code = IOMMU_PAGE_RESP_FAILURE,
};
arm_smmu_page_response(master->dev, &fault_evt, &resp);
}
out_unlock: out_unlock:
mutex_unlock(&smmu->streams_mutex); mutex_unlock(&smmu->streams_mutex);
return ret; return ret;

View File

@@ -1079,7 +1079,7 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
void intel_svm_check(struct intel_iommu *iommu); void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu); int intel_svm_enable_prq(struct intel_iommu *iommu);
int intel_svm_finish_prq(struct intel_iommu *iommu); int intel_svm_finish_prq(struct intel_iommu *iommu);
int intel_svm_page_response(struct device *dev, struct iopf_fault *evt, void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *msg); struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void); struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid); void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);

View File

@@ -740,8 +740,7 @@ prq_advance:
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
} }
int intel_svm_page_response(struct device *dev, void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
struct iopf_fault *evt,
struct iommu_page_response *msg) struct iommu_page_response *msg)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -751,7 +750,6 @@ int intel_svm_page_response(struct device *dev,
bool private_present; bool private_present;
bool pasid_present; bool pasid_present;
bool last_page; bool last_page;
int ret = 0;
u16 sid; u16 sid;
prm = &evt->fault.prm; prm = &evt->fault.prm;
@@ -760,16 +758,6 @@ int intel_svm_page_response(struct device *dev,
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
if (!pasid_present) {
ret = -EINVAL;
goto out;
}
if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
ret = -EINVAL;
goto out;
}
/* /*
* Per VT-d spec. v3.0 ch7.7, system software must respond * Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP) * with page group response if private data is present (PDP)
@@ -798,8 +786,6 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0); qi_submit_sync(iommu, &desc, 1, 0);
} }
out:
return ret;
} }
static int intel_svm_set_dev_pasid(struct iommu_domain *domain, static int intel_svm_set_dev_pasid(struct iommu_domain *domain,

View File

@@ -39,7 +39,7 @@ static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
kfree_rcu(fault_param, rcu); kfree_rcu(fault_param, rcu);
} }
void iopf_free_group(struct iopf_group *group) static void __iopf_free_group(struct iopf_group *group)
{ {
struct iopf_fault *iopf, *next; struct iopf_fault *iopf, *next;
@@ -50,6 +50,11 @@ void iopf_free_group(struct iopf_group *group)
/* Pair with iommu_report_device_fault(). */ /* Pair with iommu_report_device_fault(). */
iopf_put_dev_fault_param(group->fault_param); iopf_put_dev_fault_param(group->fault_param);
}
void iopf_free_group(struct iopf_group *group)
{
__iopf_free_group(group);
kfree(group); kfree(group);
} }
EXPORT_SYMBOL_GPL(iopf_free_group); EXPORT_SYMBOL_GPL(iopf_free_group);
@@ -97,14 +102,49 @@ static int report_partial_fault(struct iommu_fault_param *fault_param,
return 0; return 0;
} }
static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
struct iopf_fault *evt,
struct iopf_group *abort_group)
{
struct iopf_fault *iopf, *next;
struct iopf_group *group;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
/*
* We always need to construct the group as we need it to abort
* the request at the driver if it can't be handled.
*/
group = abort_group;
}
group->fault_param = iopf_param;
group->last_fault.fault = evt->fault;
INIT_LIST_HEAD(&group->faults);
INIT_LIST_HEAD(&group->pending_node);
list_add(&group->last_fault.list, &group->faults);
/* See if we have partial faults for this group */
mutex_lock(&iopf_param->lock);
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
/* Insert *before* the last fault */
list_move(&iopf->list, &group->faults);
}
list_add(&group->pending_node, &iopf_param->faults);
mutex_unlock(&iopf_param->lock);
return group;
}
/** /**
* iommu_report_device_fault() - Report fault event to device driver * iommu_report_device_fault() - Report fault event to device driver
* @dev: the device * @dev: the device
* @evt: fault event data * @evt: fault event data
* *
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
* handler. When this function fails and the fault is recoverable, it is the * handler. If this function fails then ops->page_response() was called to
* caller's responsibility to complete the fault. * complete evt if required.
* *
* This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
* them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -143,22 +183,18 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{ {
struct iommu_fault *fault = &evt->fault; struct iommu_fault *fault = &evt->fault;
struct iommu_fault_param *iopf_param; struct iommu_fault_param *iopf_param;
struct iopf_fault *iopf, *next; struct iopf_group abort_group = {};
struct iommu_domain *domain;
struct iopf_group *group; struct iopf_group *group;
int ret; int ret;
if (fault->type != IOMMU_FAULT_PAGE_REQ)
return -EOPNOTSUPP;
iopf_param = iopf_get_dev_fault_param(dev); iopf_param = iopf_get_dev_fault_param(dev);
if (!iopf_param) if (WARN_ON(!iopf_param))
return -ENODEV; return -ENODEV;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
ret = report_partial_fault(iopf_param, fault); ret = report_partial_fault(iopf_param, fault);
iopf_put_dev_fault_param(iopf_param); iopf_put_dev_fault_param(iopf_param);
/* A request that is not the last does not need to be ack'd */
return ret; return ret;
} }
@@ -170,56 +206,33 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
* will send a response to the hardware. We need to clean up before * will send a response to the hardware. We need to clean up before
* leaving, otherwise partial faults will be stuck. * leaving, otherwise partial faults will be stuck.
*/ */
domain = get_domain_for_iopf(dev, fault); group = iopf_group_alloc(iopf_param, evt, &abort_group);
if (!domain) { if (group == &abort_group) {
ret = -EINVAL;
goto cleanup_partial;
}
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
ret = -ENOMEM; ret = -ENOMEM;
goto cleanup_partial; goto err_abort;
} }
group->fault_param = iopf_param; group->domain = get_domain_for_iopf(dev, fault);
group->last_fault.fault = *fault; if (!group->domain) {
INIT_LIST_HEAD(&group->faults); ret = -EINVAL;
INIT_LIST_HEAD(&group->pending_node); goto err_abort;
group->domain = domain;
list_add(&group->last_fault.list, &group->faults);
/* See if we have partial faults for this group */
mutex_lock(&iopf_param->lock);
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
if (iopf->fault.prm.grpid == fault->prm.grpid)
/* Insert *before* the last fault */
list_move(&iopf->list, &group->faults);
} }
list_add(&group->pending_node, &iopf_param->faults);
mutex_unlock(&iopf_param->lock);
ret = domain->iopf_handler(group); /*
if (ret) { * On success iopf_handler must call iopf_group_response() and
mutex_lock(&iopf_param->lock); * iopf_free_group()
list_del_init(&group->pending_node); */
mutex_unlock(&iopf_param->lock); ret = group->domain->iopf_handler(group);
if (ret)
goto err_abort;
return 0;
err_abort:
iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
if (group == &abort_group)
__iopf_free_group(group);
else
iopf_free_group(group); iopf_free_group(group);
}
return ret;
cleanup_partial:
mutex_lock(&iopf_param->lock);
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
if (iopf->fault.prm.grpid == fault->prm.grpid) {
list_del(&iopf->list);
kfree(iopf);
}
}
mutex_unlock(&iopf_param->lock);
iopf_put_dev_fault_param(iopf_param);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(iommu_report_device_fault); EXPORT_SYMBOL_GPL(iommu_report_device_fault);
@@ -259,10 +272,8 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
* iopf_group_response - Respond a group of page faults * iopf_group_response - Respond a group of page faults
* @group: the group of faults with the same group id * @group: the group of faults with the same group id
* @status: the response code * @status: the response code
*
* Return 0 on success and <0 on error.
*/ */
int iopf_group_response(struct iopf_group *group, void iopf_group_response(struct iopf_group *group,
enum iommu_page_response_code status) enum iommu_page_response_code status)
{ {
struct iommu_fault_param *fault_param = group->fault_param; struct iommu_fault_param *fault_param = group->fault_param;
@@ -274,17 +285,14 @@ int iopf_group_response(struct iopf_group *group,
.grpid = iopf->fault.prm.grpid, .grpid = iopf->fault.prm.grpid,
.code = status, .code = status,
}; };
int ret = -EINVAL;
/* Only send response if there is a fault report pending */ /* Only send response if there is a fault report pending */
mutex_lock(&fault_param->lock); mutex_lock(&fault_param->lock);
if (!list_empty(&group->pending_node)) { if (!list_empty(&group->pending_node)) {
ret = ops->page_response(dev, &group->last_fault, &resp); ops->page_response(dev, &group->last_fault, &resp);
list_del_init(&group->pending_node); list_del_init(&group->pending_node);
} }
mutex_unlock(&fault_param->lock); mutex_unlock(&fault_param->lock);
return ret;
} }
EXPORT_SYMBOL_GPL(iopf_group_response); EXPORT_SYMBOL_GPL(iopf_group_response);

View File

@@ -574,8 +574,7 @@ struct iommu_ops {
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
int (*page_response)(struct device *dev, void (*page_response)(struct device *dev, struct iopf_fault *evt,
struct iopf_fault *evt,
struct iommu_page_response *msg); struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev); int (*def_domain_type)(struct device *dev);
@@ -1547,7 +1546,7 @@ void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue);
void iopf_free_group(struct iopf_group *group); void iopf_free_group(struct iopf_group *group);
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
int iopf_group_response(struct iopf_group *group, void iopf_group_response(struct iopf_group *group,
enum iommu_page_response_code status); enum iommu_page_response_code status);
#else #else
static inline int static inline int
@@ -1590,10 +1589,9 @@ iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
return -ENODEV; return -ENODEV;
} }
static inline int iopf_group_response(struct iopf_group *group, static inline void iopf_group_response(struct iopf_group *group,
enum iommu_page_response_code status) enum iommu_page_response_code status)
{ {
return -ENODEV;
} }
#endif /* CONFIG_IOMMU_IOPF */ #endif /* CONFIG_IOMMU_IOPF */
#endif /* __LINUX_IOMMU_H */ #endif /* __LINUX_IOMMU_H */