RDMA/i40iw: Address an mmap handler exploit in i40iw
commit 2ed381439e89fa6d1a0839ef45ccd45d99d8e915 upstream. i40iw_mmap manipulates the vma->vm_pgoff to differentiate a push page mmap vs a doorbell mmap, and uses it to compute the pfn in remap_pfn_range without any validation. This is vulnerable to an mmap exploit as described in: https://lore.kernel.org/r/20201119093523.7588-1-zhudi21@huawei.com The push feature is disabled in the driver currently and therefore no push mmaps are issued from user-space. The feature does not work as expected in the x722 product. Remove the push module parameter and all VMA attribute manipulations for this feature in i40iw_mmap. Update i40iw_mmap to only allow DB user mmapings at offset = 0. Check vm_pgoff for zero and if the mmaps are bound to a single page. Cc: <stable@kernel.org> Fixes: d37498417947 ("i40iw: add files for iwarp interface") Link: https://lore.kernel.org/r/20201125005616.1800-2-shiraz.saleem@intel.com Reported-by: Di Zhu <zhudi21@huawei.com> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
07434172c5
commit
4460a7c979
@ -54,10 +54,6 @@
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
|
||||
|
||||
static int push_mode;
|
||||
module_param(push_mode, int, 0644);
|
||||
MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
|
||||
@ -1588,7 +1584,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
||||
if (status)
|
||||
goto exit;
|
||||
iwdev->obj_next = iwdev->obj_mem;
|
||||
iwdev->push_mode = push_mode;
|
||||
|
||||
init_waitqueue_head(&iwdev->vchnl_waitq);
|
||||
init_waitqueue_head(&dev->vf_reqs);
|
||||
|
@ -168,38 +168,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
*/
|
||||
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
{
|
||||
struct i40iw_ucontext *ucontext;
|
||||
u64 db_addr_offset;
|
||||
u64 push_offset;
|
||||
struct i40iw_ucontext *ucontext = to_ucontext(context);
|
||||
u64 dbaddr;
|
||||
|
||||
ucontext = to_ucontext(context);
|
||||
if (ucontext->iwdev->sc_dev.is_pf) {
|
||||
db_addr_offset = I40IW_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
} else {
|
||||
db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_VF_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
}
|
||||
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
|
||||
dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
|
||||
|
||||
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_private_data = ucontext;
|
||||
} else {
|
||||
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
else
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
}
|
||||
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
|
||||
pgprot_noncached(vma->vm_page_prot)))
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user