process_vm_access: switch to copy_page_to_iter/iov_iter_copy_from_user

... rather than open-coding those.  As a side benefit, we get much saner
loop calling those; we can just feed entire pages, instead of the "copy
would span the iovec boundary, let's do it in two loop iterations" mess.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-02-05 12:14:11 -05:00
parent 9f78bdfabf
commit 240f3905f5

View File

@ -51,13 +51,11 @@ static int process_vm_rw_pages(struct task_struct *task,
ssize_t *bytes_copied) ssize_t *bytes_copied)
{ {
int pages_pinned; int pages_pinned;
void *target_kaddr;
int pgs_copied = 0; int pgs_copied = 0;
int j; int j;
int ret; int ret;
ssize_t bytes_to_copy; ssize_t bytes_to_copy;
ssize_t rc = 0; ssize_t rc = 0;
const struct iovec *iov = iter->iov;
*bytes_copied = 0; *bytes_copied = 0;
@ -75,77 +73,34 @@ static int process_vm_rw_pages(struct task_struct *task,
/* Do the copy for each page */ /* Do the copy for each page */
for (pgs_copied = 0; for (pgs_copied = 0;
(pgs_copied < nr_pages_to_copy) && iter->nr_segs; (pgs_copied < nr_pages_to_copy) && iov_iter_count(iter);
pgs_copied++) { pgs_copied++) {
/* Make sure we have a non zero length iovec */ struct page *page = process_pages[pgs_copied];
while (iter->nr_segs && iov->iov_len == 0) { bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, len);
iov++;
iter->nr_segs--;
}
if (!iter->nr_segs)
break;
/* if (vm_write) {
* Will copy smallest of: if (bytes_to_copy > iov_iter_count(iter))
* - bytes remaining in page bytes_to_copy = iov_iter_count(iter);
* - bytes remaining in destination iovec ret = iov_iter_copy_from_user(page,
*/ iter, start_offset, bytes_to_copy);
bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, iov_iter_advance(iter, ret);
len - *bytes_copied); set_page_dirty_lock(page);
bytes_to_copy = min_t(ssize_t, bytes_to_copy,
iov->iov_len
- iter->iov_offset);
target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
if (vm_write)
ret = copy_from_user(target_kaddr,
iov->iov_base
+ iter->iov_offset,
bytes_to_copy);
else
ret = copy_to_user(iov->iov_base
+ iter->iov_offset,
target_kaddr, bytes_to_copy);
kunmap(process_pages[pgs_copied]);
if (ret) {
*bytes_copied += bytes_to_copy - ret;
pgs_copied++;
rc = -EFAULT;
goto end;
}
*bytes_copied += bytes_to_copy;
iter->iov_offset += bytes_to_copy;
if (iter->iov_offset == iov->iov_len) {
/*
* Need to copy remaining part of page into the
* next iovec if there are any bytes left in page
*/
iter->nr_segs--;
iov++;
iter->iov_offset = 0;
start_offset = (start_offset + bytes_to_copy)
% PAGE_SIZE;
if (start_offset)
pgs_copied--;
} else { } else {
start_offset = 0; ret = copy_page_to_iter(page, start_offset,
bytes_to_copy, iter);
} }
*bytes_copied += ret;
len -= ret;
if (ret < bytes_to_copy && iov_iter_count(iter)) {
rc = -EFAULT;
break;
}
start_offset = 0;
} }
end: end:
if (vm_write) { for (j = 0; j < pages_pinned; j++)
for (j = 0; j < pages_pinned; j++) { put_page(process_pages[j]);
if (j < pgs_copied)
set_page_dirty_lock(process_pages[j]);
put_page(process_pages[j]);
}
} else {
for (j = 0; j < pages_pinned; j++)
put_page(process_pages[j]);
}
iter->iov = iov;
return rc; return rc;
} }
@ -194,7 +149,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
return 0; return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
while ((nr_pages_copied < nr_pages) && iter->nr_segs) { while ((nr_pages_copied < nr_pages) && iov_iter_count(iter)) {
nr_pages_to_copy = min(nr_pages - nr_pages_copied, nr_pages_to_copy = min(nr_pages - nr_pages_copied,
max_pages_per_loop); max_pages_per_loop);
@ -303,7 +258,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
goto put_task_struct; goto put_task_struct;
} }
for (i = 0; i < riovcnt && iter->nr_segs; i++) { for (i = 0; i < riovcnt && iov_iter_count(iter); i++) {
rc = process_vm_rw_single_vec( rc = process_vm_rw_single_vec(
(unsigned long)rvec[i].iov_base, rvec[i].iov_len, (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
iter, process_pages, mm, task, vm_write, iter, process_pages, mm, task, vm_write,