misc: genwqe: should return proper error value.
The function should return -EFAULT when copy_from_user fails. Even though the caller does not distinguish them. but we should keep backward compatibility. Signed-off-by: zhong jiang <zhongjiang@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
6dbfdc1a4e
commit
02241995b0
@ -298,7 +298,7 @@ static int genwqe_sgl_size(int num_pages)
|
|||||||
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
||||||
void __user *user_addr, size_t user_size, int write)
|
void __user *user_addr, size_t user_size, int write)
|
||||||
{
|
{
|
||||||
int rc;
|
int ret = -ENOMEM;
|
||||||
struct pci_dev *pci_dev = cd->pci_dev;
|
struct pci_dev *pci_dev = cd->pci_dev;
|
||||||
|
|
||||||
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
|
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
|
||||||
@ -318,7 +318,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
|||||||
if (get_order(sgl->sgl_size) > MAX_ORDER) {
|
if (get_order(sgl->sgl_size) > MAX_ORDER) {
|
||||||
dev_err(&pci_dev->dev,
|
dev_err(&pci_dev->dev,
|
||||||
"[%s] err: too much memory requested!\n", __func__);
|
"[%s] err: too much memory requested!\n", __func__);
|
||||||
return -ENOMEM;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
|
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
|
||||||
@ -326,7 +326,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
|||||||
if (sgl->sgl == NULL) {
|
if (sgl->sgl == NULL) {
|
||||||
dev_err(&pci_dev->dev,
|
dev_err(&pci_dev->dev,
|
||||||
"[%s] err: no memory available!\n", __func__);
|
"[%s] err: no memory available!\n", __func__);
|
||||||
return -ENOMEM;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Only use buffering on incomplete pages */
|
/* Only use buffering on incomplete pages */
|
||||||
@ -339,7 +339,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
|||||||
/* Sync with user memory */
|
/* Sync with user memory */
|
||||||
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
|
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
|
||||||
user_addr, sgl->fpage_size)) {
|
user_addr, sgl->fpage_size)) {
|
||||||
rc = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -352,7 +352,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
|||||||
/* Sync with user memory */
|
/* Sync with user memory */
|
||||||
if (copy_from_user(sgl->lpage, user_addr + user_size -
|
if (copy_from_user(sgl->lpage, user_addr + user_size -
|
||||||
sgl->lpage_size, sgl->lpage_size)) {
|
sgl->lpage_size, sgl->lpage_size)) {
|
||||||
rc = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto err_out2;
|
goto err_out2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -374,7 +374,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
|||||||
sgl->sgl = NULL;
|
sgl->sgl = NULL;
|
||||||
sgl->sgl_dma_addr = 0;
|
sgl->sgl_dma_addr = 0;
|
||||||
sgl->sgl_size = 0;
|
sgl->sgl_size = 0;
|
||||||
return -ENOMEM;
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
||||||
|
Reference in New Issue
Block a user