Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull get_user_pages_fast() conversion from Al Viro: "A bunch of places switched to get_user_pages_fast()" * 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: ceph: use get_user_pages_fast() pvr2fs: use get_user_pages_fast() atomisp: use get_user_pages_fast() st: use get_user_pages_fast() via_dmablit(): use get_user_pages_fast() fsl_hypervisor: switch to get_user_pages_fast() rapidio: switch to get_user_pages_fast() vchiq_2835_arm: switch to get_user_pages_fast()
This commit is contained in:
commit
a0e136e5da
@ -238,9 +238,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
||||
vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
|
||||
if (NULL == vsg->pages)
|
||||
return -ENOMEM;
|
||||
ret = get_user_pages_unlocked((unsigned long)xfer->mem_addr,
|
||||
vsg->num_pages, vsg->pages,
|
||||
(vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0);
|
||||
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
|
||||
vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
|
||||
vsg->pages);
|
||||
if (ret != vsg->num_pages) {
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -889,11 +889,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
||||
goto err_req;
|
||||
}
|
||||
|
||||
pinned = get_user_pages_unlocked(
|
||||
pinned = get_user_pages_fast(
|
||||
(unsigned long)xfer->loc_addr & PAGE_MASK,
|
||||
nr_pages,
|
||||
page_list,
|
||||
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
|
||||
nr_pages, dir == DMA_FROM_DEVICE, page_list);
|
||||
|
||||
if (pinned != nr_pages) {
|
||||
if (pinned < 0) {
|
||||
|
@ -4920,11 +4920,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
||||
|
||||
/* Try to fault in all of the necessary pages */
|
||||
/* rw==READ means read from drive, write into memory area */
|
||||
res = get_user_pages_unlocked(
|
||||
uaddr,
|
||||
nr_pages,
|
||||
pages,
|
||||
rw == READ ? FOLL_WRITE : 0); /* don't force */
|
||||
res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
|
||||
|
||||
/* Errors and no page mapped should return here */
|
||||
if (res < nr_pages)
|
||||
|
@ -1020,10 +1020,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
|
||||
} else {
|
||||
/*Handle frame buffer allocated in user space*/
|
||||
mutex_unlock(&bo->mutex);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
page_nr = get_user_pages((unsigned long)userptr,
|
||||
(int)(bo->pgnr), 1, pages, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
page_nr = get_user_pages_fast((unsigned long)userptr,
|
||||
(int)(bo->pgnr), 1, pages);
|
||||
mutex_lock(&bo->mutex);
|
||||
bo->mem_type = HMM_BO_MEM_TYPE_USER;
|
||||
}
|
||||
|
@ -90,8 +90,7 @@ static irqreturn_t
|
||||
vchiq_doorbell_irq(int irq, void *dev_id);
|
||||
|
||||
static struct vchiq_pagelist_info *
|
||||
create_pagelist(char __user *buf, size_t count, unsigned short type,
|
||||
struct task_struct *task);
|
||||
create_pagelist(char __user *buf, size_t count, unsigned short type);
|
||||
|
||||
static void
|
||||
free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
|
||||
@ -255,8 +254,7 @@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
|
||||
pagelistinfo = create_pagelist((char __user *)offset, size,
|
||||
(dir == VCHIQ_BULK_RECEIVE)
|
||||
? PAGELIST_READ
|
||||
: PAGELIST_WRITE,
|
||||
current);
|
||||
: PAGELIST_WRITE);
|
||||
|
||||
if (!pagelistinfo)
|
||||
return VCHIQ_ERROR;
|
||||
@ -395,8 +393,7 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
|
||||
*/
|
||||
|
||||
static struct vchiq_pagelist_info *
|
||||
create_pagelist(char __user *buf, size_t count, unsigned short type,
|
||||
struct task_struct *task)
|
||||
create_pagelist(char __user *buf, size_t count, unsigned short type)
|
||||
{
|
||||
PAGELIST_T *pagelist;
|
||||
struct vchiq_pagelist_info *pagelistinfo;
|
||||
@ -476,14 +473,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
|
||||
}
|
||||
/* do not try and release vmalloc pages */
|
||||
} else {
|
||||
down_read(&task->mm->mmap_sem);
|
||||
actual_pages = get_user_pages(
|
||||
(unsigned long)buf & PAGE_MASK,
|
||||
actual_pages = get_user_pages_fast(
|
||||
(unsigned long)buf & PAGE_MASK,
|
||||
num_pages,
|
||||
(type == PAGELIST_READ) ? FOLL_WRITE : 0,
|
||||
pages,
|
||||
NULL /*vmas */);
|
||||
up_read(&task->mm->mmap_sem);
|
||||
type == PAGELIST_READ,
|
||||
pages);
|
||||
|
||||
if (actual_pages != num_pages) {
|
||||
vchiq_log_info(vchiq_arm_log_level,
|
||||
|
@ -686,9 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
|
||||
FOLL_WRITE);
|
||||
|
||||
ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
|
||||
if (ret < nr_pages) {
|
||||
nr_pages = ret;
|
||||
ret = -EINVAL;
|
||||
|
@ -243,8 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
||||
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
|
||||
|
||||
/* Get the physical addresses of the source buffer */
|
||||
num_pinned = get_user_pages_unlocked(param.local_vaddr - lb_offset,
|
||||
num_pages, pages, (param.source == -1) ? 0 : FOLL_WRITE);
|
||||
num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
|
||||
num_pages, param.source != -1, pages);
|
||||
|
||||
if (num_pinned != num_pages) {
|
||||
/* get_user_pages() failed */
|
||||
|
@ -25,9 +25,9 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
while (got < num_pages) {
|
||||
rc = get_user_pages_unlocked(
|
||||
rc = get_user_pages_fast(
|
||||
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
|
||||
num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
|
||||
num_pages - got, write_page, pages + got);
|
||||
if (rc < 0)
|
||||
break;
|
||||
BUG_ON(rc == 0);
|
||||
|
Loading…
Reference in New Issue
Block a user