gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable}
Turn fault_in_pages_{readable,writeable} into versions that return the number of bytes not faulted in, similar to copy_to_user, instead of returning a non-zero value when any of the requested pages couldn't be faulted in. This supports the existing users that require all pages to be faulted in as well as new users that are happy if any pages can be faulted in. Rename the functions to fault_in_{readable,writeable} to make sure this change doesn't silently break things. Neither of these functions is entirely trivial and it doesn't seem useful to inline them, so move them to mm/gup.c. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
parent
0c8eb2884a
commit
bb523b406c
@ -669,7 +669,8 @@ static void __init kvm_use_magic_page(void)
|
||||
on_each_cpu(kvm_map_magic_page, &features, 1);
|
||||
|
||||
/* Quick self-test to see if the mapping works */
|
||||
if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
|
||||
if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
|
||||
sizeof(u32))) {
|
||||
kvm_patching_worked = false;
|
||||
return;
|
||||
}
|
||||
|
@ -1048,7 +1048,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
||||
if (new_ctx == NULL)
|
||||
return 0;
|
||||
if (!access_ok(new_ctx, ctx_size) ||
|
||||
fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
|
||||
fault_in_readable((char __user *)new_ctx, ctx_size))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
@ -1237,7 +1237,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
|
||||
#endif
|
||||
|
||||
if (!access_ok(ctx, sizeof(*ctx)) ||
|
||||
fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
|
||||
fault_in_readable((char __user *)ctx, sizeof(*ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
|
@ -688,7 +688,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
||||
if (new_ctx == NULL)
|
||||
return 0;
|
||||
if (!access_ok(new_ctx, ctx_size) ||
|
||||
fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
|
||||
fault_in_readable((char __user *)new_ctx, ctx_size))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
|
@ -205,7 +205,7 @@ retry:
|
||||
fpregs_unlock();
|
||||
|
||||
if (ret) {
|
||||
if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
|
||||
if (!fault_in_writeable(buf_fx, fpu_user_xstate_size))
|
||||
goto retry;
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -278,10 +278,9 @@ retry:
|
||||
if (ret != -EFAULT)
|
||||
return -EINVAL;
|
||||
|
||||
ret = fault_in_pages_readable(buf, size);
|
||||
if (!ret)
|
||||
if (!fault_in_readable(buf, size))
|
||||
goto retry;
|
||||
return ret;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -336,7 +336,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_armada_gem_pwrite *args = data;
|
||||
struct armada_gem_object *dobj;
|
||||
char __user *ptr;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
|
||||
args->handle, args->offset, args->size, args->ptr);
|
||||
@ -349,9 +349,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
if (!access_ok(ptr, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
ret = fault_in_pages_readable(ptr, args->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (fault_in_readable(ptr, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
dobj = armada_gem_object_lookup(file, args->handle);
|
||||
if (dobj == NULL)
|
||||
|
@ -2261,9 +2261,8 @@ static noinline int search_ioctl(struct inode *inode,
|
||||
key.offset = sk->min_offset;
|
||||
|
||||
while (1) {
|
||||
ret = fault_in_pages_writeable(ubuf + sk_offset,
|
||||
*buf_size - sk_offset);
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
|
||||
break;
|
||||
|
||||
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
|
||||
|
@ -733,61 +733,10 @@ int wait_on_page_private_2_killable(struct page *page);
|
||||
extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
|
||||
|
||||
/*
|
||||
* Fault everything in given userspace address range in.
|
||||
* Fault in userspace address range.
|
||||
*/
|
||||
static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
|
||||
{
|
||||
char __user *end = uaddr + size - 1;
|
||||
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
|
||||
if (unlikely(uaddr > end))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* Writing zeroes into userspace here is OK, because we know that if
|
||||
* the zero gets there, we'll be overwriting it.
|
||||
*/
|
||||
do {
|
||||
if (unlikely(__put_user(0, uaddr) != 0))
|
||||
return -EFAULT;
|
||||
uaddr += PAGE_SIZE;
|
||||
} while (uaddr <= end);
|
||||
|
||||
/* Check whether the range spilled into the next page. */
|
||||
if (((unsigned long)uaddr & PAGE_MASK) ==
|
||||
((unsigned long)end & PAGE_MASK))
|
||||
return __put_user(0, end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
|
||||
{
|
||||
volatile char c;
|
||||
const char __user *end = uaddr + size - 1;
|
||||
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
|
||||
if (unlikely(uaddr > end))
|
||||
return -EFAULT;
|
||||
|
||||
do {
|
||||
if (unlikely(__get_user(c, uaddr) != 0))
|
||||
return -EFAULT;
|
||||
uaddr += PAGE_SIZE;
|
||||
} while (uaddr <= end);
|
||||
|
||||
/* Check whether the range spilled into the next page. */
|
||||
if (((unsigned long)uaddr & PAGE_MASK) ==
|
||||
((unsigned long)end & PAGE_MASK)) {
|
||||
return __get_user(c, end);
|
||||
}
|
||||
|
||||
(void)c;
|
||||
return 0;
|
||||
}
|
||||
size_t fault_in_writeable(char __user *uaddr, size_t size);
|
||||
size_t fault_in_readable(const char __user *uaddr, size_t size);
|
||||
|
||||
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||
pgoff_t index, gfp_t gfp_mask);
|
||||
|
@ -191,7 +191,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
|
||||
buf = iov->iov_base + skip;
|
||||
copy = min(bytes, iov->iov_len - skip);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
|
||||
kaddr = kmap_atomic(page);
|
||||
from = kaddr + offset;
|
||||
|
||||
@ -275,7 +275,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
|
||||
buf = iov->iov_base + skip;
|
||||
copy = min(bytes, iov->iov_len - skip);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
|
||||
kaddr = kmap_atomic(page);
|
||||
to = kaddr + offset;
|
||||
|
||||
@ -446,13 +446,11 @@ int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
|
||||
bytes = i->count;
|
||||
for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
|
||||
size_t len = min(bytes, p->iov_len - skip);
|
||||
int err;
|
||||
|
||||
if (unlikely(!len))
|
||||
continue;
|
||||
err = fault_in_pages_readable(p->iov_base + skip, len);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
if (fault_in_readable(p->iov_base + skip, len))
|
||||
return -EFAULT;
|
||||
bytes -= len;
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@
|
||||
* ->lock_page (filemap_fault, access_process_vm)
|
||||
*
|
||||
* ->i_rwsem (generic_perform_write)
|
||||
* ->mmap_lock (fault_in_pages_readable->do_page_fault)
|
||||
* ->mmap_lock (fault_in_readable->do_page_fault)
|
||||
*
|
||||
* bdi->wb.list_lock
|
||||
* sb_lock (fs/fs-writeback.c)
|
||||
|
72
mm/gup.c
72
mm/gup.c
@ -1656,6 +1656,78 @@ finish_or_fault:
|
||||
}
|
||||
#endif /* !CONFIG_MMU */
|
||||
|
||||
/**
|
||||
* fault_in_writeable - fault in userspace address range for writing
|
||||
* @uaddr: start of address range
|
||||
* @size: size of address range
|
||||
*
|
||||
* Returns the number of bytes not faulted in (like copy_to_user() and
|
||||
* copy_from_user()).
|
||||
*/
|
||||
size_t fault_in_writeable(char __user *uaddr, size_t size)
|
||||
{
|
||||
char __user *start = uaddr, *end;
|
||||
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
if (!PAGE_ALIGNED(uaddr)) {
|
||||
if (unlikely(__put_user(0, uaddr) != 0))
|
||||
return size;
|
||||
uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
|
||||
}
|
||||
end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
|
||||
if (unlikely(end < start))
|
||||
end = NULL;
|
||||
while (uaddr != end) {
|
||||
if (unlikely(__put_user(0, uaddr) != 0))
|
||||
goto out;
|
||||
uaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
out:
|
||||
if (size > uaddr - start)
|
||||
return size - (uaddr - start);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fault_in_writeable);
|
||||
|
||||
/**
|
||||
* fault_in_readable - fault in userspace address range for reading
|
||||
* @uaddr: start of user address range
|
||||
* @size: size of user address range
|
||||
*
|
||||
* Returns the number of bytes not faulted in (like copy_to_user() and
|
||||
* copy_from_user()).
|
||||
*/
|
||||
size_t fault_in_readable(const char __user *uaddr, size_t size)
|
||||
{
|
||||
const char __user *start = uaddr, *end;
|
||||
volatile char c;
|
||||
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
if (!PAGE_ALIGNED(uaddr)) {
|
||||
if (unlikely(__get_user(c, uaddr) != 0))
|
||||
return size;
|
||||
uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
|
||||
}
|
||||
end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
|
||||
if (unlikely(end < start))
|
||||
end = NULL;
|
||||
while (uaddr != end) {
|
||||
if (unlikely(__get_user(c, uaddr) != 0))
|
||||
goto out;
|
||||
uaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
out:
|
||||
(void)c;
|
||||
if (size > uaddr - start)
|
||||
return size - (uaddr - start);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fault_in_readable);
|
||||
|
||||
/**
|
||||
* get_dump_page() - pin user page in memory while writing it to core dump
|
||||
* @addr: user address
|
||||
|
Loading…
Reference in New Issue
Block a user