Merge branches 'akpm' and 'akpm-hotfixes' (patches from Andrew)

Merge yet more updates and hotfixes from Andrew Morton:
 "Post-linux-next material, based upon latest upstream to catch the
  now-merged dependencies:

   - 10 patches.

     Subsystems affected by this patch series: mm (vmstat and migration)
     and compat.

  And bunch of hotfixes, mostly cc:stable:

   - 8 patches.

     Subsystems affected by this patch series: mm (hmm, hugetlb, vmscan,
     pagealloc, pagemap, kmemleak, mempolicy, and memblock)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  arch: remove compat_alloc_user_space
  compat: remove some compat entry points
  mm: simplify compat numa syscalls
  mm: simplify compat_sys_move_pages
  kexec: avoid compat_alloc_user_space
  kexec: move locking into do_kexec_load
  mm: migrate: change to use bool type for 'page_was_mapped'
  mm: migrate: fix the incorrect function name in comments
  mm: migrate: introduce a local variable to get the number of pages
  mm/vmstat: protect per cpu variables with preempt disable on RT

* emailed hotfixes from Andrew Morton <akpm@linux-foundation.org>:
  nds32/setup: remove unused memblock_region variable in setup_memory()
  mm/mempolicy: fix a race between offset_il_node and mpol_rebind_task
  mm/kmemleak: allow __GFP_NOLOCKDEP passed to kmemleak's gfp
  mmap_lock: change trace and locking order
  mm/page_alloc.c: avoid accessing uninitialized pcp page migratetype
  mm,vmscan: fix divide by zero in get_scan_count
  mm/hugetlb: initialize hugetlb_usage in mm_init
  mm/hmm: bypass devmap pte when all pfn requested flags are fulfilled
This commit is contained in:
Linus Torvalds
2021-09-08 18:52:05 -07:00
47 changed files with 267 additions and 663 deletions

View File

@ -269,24 +269,3 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
return 0;
}
EXPORT_SYMBOL_GPL(get_compat_sigset);
/*
* Allocate user-space memory for the duration of a single system call,
* in order to marshall parameters inside a compat thunk.
*/
void __user *compat_alloc_user_space(unsigned long len)
{
void __user *ptr;
/* If len would occupy more than half of the entire compat space... */
if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
return NULL;
ptr = arch_compat_alloc_user_space(len);
if (unlikely(!access_ok(ptr, len)))
return NULL;
return ptr;
}
EXPORT_SYMBOL_GPL(compat_alloc_user_space);

View File

@ -1063,6 +1063,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->pmd_huge_pte = NULL;
#endif
mm_init_uprobes_state(mm);
hugetlb_count_init(mm);
if (current->mm) {
mm->flags = current->mm->flags & MMF_INIT_MASK;

View File

@ -19,26 +19,9 @@
#include "kexec_internal.h"
static int copy_user_segment_list(struct kimage *image,
unsigned long nr_segments,
struct kexec_segment __user *segments)
{
int ret;
size_t segment_bytes;
/* Read in the segments */
image->nr_segments = nr_segments;
segment_bytes = nr_segments * sizeof(*segments);
ret = copy_from_user(image->segment, segments, segment_bytes);
if (ret)
ret = -EFAULT;
return ret;
}
static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
unsigned long nr_segments,
struct kexec_segment __user *segments,
struct kexec_segment *segments,
unsigned long flags)
{
int ret;
@ -58,10 +41,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
return -ENOMEM;
image->start = entry;
ret = copy_user_segment_list(image, nr_segments, segments);
if (ret)
goto out_free_image;
image->nr_segments = nr_segments;
memcpy(image->segment, segments, nr_segments * sizeof(*segments));
if (kexec_on_panic) {
/* Enable special crash kernel control page alloc policy. */
@ -104,12 +85,23 @@ out_free_image:
}
static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments, unsigned long flags)
struct kexec_segment *segments, unsigned long flags)
{
struct kimage **dest_image, *image;
unsigned long i;
int ret;
/*
* Because we write directly to the reserved memory region when loading
* crash kernels we need a mutex here to prevent multiple crash kernels
* from attempting to load simultaneously, and to prevent a crash kernel
* from loading over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
if (flags & KEXEC_ON_CRASH) {
dest_image = &kexec_crash_image;
if (kexec_crash_image)
@ -121,7 +113,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
if (nr_segments == 0) {
/* Uninstall image */
kimage_free(xchg(dest_image, NULL));
return 0;
ret = 0;
goto out_unlock;
}
if (flags & KEXEC_ON_CRASH) {
/*
@ -134,7 +127,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
if (ret)
return ret;
goto out_unlock;
if (flags & KEXEC_PRESERVE_CONTEXT)
image->preserve_context = 1;
@ -171,6 +164,8 @@ out:
arch_kexec_protect_crashkres();
kimage_free(image);
out_unlock:
mutex_unlock(&kexec_mutex);
return ret;
}
@ -236,7 +231,8 @@ static inline int kexec_load_check(unsigned long nr_segments,
SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
struct kexec_segment __user *, segments, unsigned long, flags)
{
int result;
struct kexec_segment *ksegments;
unsigned long result;
result = kexec_load_check(nr_segments, flags);
if (result)
@ -247,20 +243,12 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
return -EINVAL;
/* Because we write directly to the reserved memory
* region when loading crash kernels we need a mutex here to
* prevent multiple crash kernels from attempting to load
* simultaneously, and to prevent a crash kernel from loading
* over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
if (IS_ERR(ksegments))
return PTR_ERR(ksegments);
result = do_kexec_load(entry, nr_segments, segments, flags);
mutex_unlock(&kexec_mutex);
result = do_kexec_load(entry, nr_segments, ksegments, flags);
kfree(ksegments);
return result;
}
@ -272,7 +260,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
compat_ulong_t, flags)
{
struct compat_kexec_segment in;
struct kexec_segment out, __user *ksegments;
struct kexec_segment *ksegments;
unsigned long i, result;
result = kexec_load_check(nr_segments, flags);
@ -285,37 +273,26 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
return -EINVAL;
ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]),
GFP_KERNEL);
if (!ksegments)
return -ENOMEM;
for (i = 0; i < nr_segments; i++) {
result = copy_from_user(&in, &segments[i], sizeof(in));
if (result)
return -EFAULT;
goto fail;
out.buf = compat_ptr(in.buf);
out.bufsz = in.bufsz;
out.mem = in.mem;
out.memsz = in.memsz;
result = copy_to_user(&ksegments[i], &out, sizeof(out));
if (result)
return -EFAULT;
ksegments[i].buf = compat_ptr(in.buf);
ksegments[i].bufsz = in.bufsz;
ksegments[i].mem = in.mem;
ksegments[i].memsz = in.memsz;
}
/* Because we write directly to the reserved memory
* region when loading crash kernels we need a mutex here to
* prevent multiple crash kernels from attempting to load
* simultaneously, and to prevent a crash kernel from loading
* over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
result = do_kexec_load(entry, nr_segments, ksegments, flags);
mutex_unlock(&kexec_mutex);
fail:
kfree(ksegments);
return result;
}
#endif

View File

@ -292,15 +292,10 @@ COND_SYSCALL(process_madvise);
COND_SYSCALL(process_mrelease);
COND_SYSCALL(remap_file_pages);
COND_SYSCALL(mbind);
COND_SYSCALL_COMPAT(mbind);
COND_SYSCALL(get_mempolicy);
COND_SYSCALL_COMPAT(get_mempolicy);
COND_SYSCALL(set_mempolicy);
COND_SYSCALL_COMPAT(set_mempolicy);
COND_SYSCALL(migrate_pages);
COND_SYSCALL_COMPAT(migrate_pages);
COND_SYSCALL(move_pages);
COND_SYSCALL_COMPAT(move_pages);
COND_SYSCALL(perf_event_open);
COND_SYSCALL(accept4);