Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "15 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: tools/vm: fix cross-compile build coredump: fix null pointer dereference on coredump mm: shmem: disable interrupt when acquiring info->lock in userfaultfd_copy path shmem: fix possible deadlocks on shmlock_user_lock vmalloc: fix remap_vmalloc_range() bounds checks mm/shmem: fix build without THP mm/ksm: fix NULL pointer dereference when KSM zero page is enabled tools/build: tweak unused value workaround checkpatch: fix a typo in the regex for $allocFunctions mm, gup: return EINTR when gup is interrupted by fatal signals mm/hugetlb: fix a addressing exception caused by huge_pte_offset MAINTAINERS: add an entry for kfifo mm/userfaultfd: disable userfaultfd-wp on x86_32 slub: avoid redzone when choosing freepointer location sh: fix build error in mm/init.c
This commit is contained in:
commit
18bf34080c
@ -9417,6 +9417,13 @@ F: include/linux/keyctl.h
|
|||||||
F: include/uapi/linux/keyctl.h
|
F: include/uapi/linux/keyctl.h
|
||||||
F: security/keys/
|
F: security/keys/
|
||||||
|
|
||||||
|
KFIFO
|
||||||
|
M: Stefani Seibold <stefani@seibold.net>
|
||||||
|
S: Maintained
|
||||||
|
F: include/linux/kfifo.h
|
||||||
|
F: lib/kfifo.c
|
||||||
|
F: samples/kfifo/
|
||||||
|
|
||||||
KGDB / KDB /debug_core
|
KGDB / KDB /debug_core
|
||||||
M: Jason Wessel <jason.wessel@windriver.com>
|
M: Jason Wessel <jason.wessel@windriver.com>
|
||||||
M: Daniel Thompson <daniel.thompson@linaro.org>
|
M: Daniel Thompson <daniel.thompson@linaro.org>
|
||||||
|
@ -412,7 +412,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
|
|||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)
|
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* We only have ZONE_NORMAL, so this is easy.. */
|
/* We only have ZONE_NORMAL, so this is easy.. */
|
||||||
|
@ -149,7 +149,7 @@ config X86
|
|||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
|
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
|
||||||
select HAVE_ARCH_USERFAULTFD_WP if USERFAULTFD
|
select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD
|
||||||
select HAVE_ARCH_VMAP_STACK if X86_64
|
select HAVE_ARCH_VMAP_STACK if X86_64
|
||||||
select HAVE_ARCH_WITHIN_STACK_FRAMES
|
select HAVE_ARCH_WITHIN_STACK_FRAMES
|
||||||
select HAVE_ASM_MODVERSIONS
|
select HAVE_ASM_MODVERSIONS
|
||||||
|
@ -211,6 +211,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
(*argv)[(*argc)++] = 0;
|
(*argv)[(*argc)++] = 0;
|
||||||
++pat_ptr;
|
++pat_ptr;
|
||||||
|
if (!(*pat_ptr))
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Repeat as long as we have more pattern to process and more output
|
/* Repeat as long as we have more pattern to process and more output
|
||||||
|
@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
|
|||||||
if (start < offset + dump->size) {
|
if (start < offset + dump->size) {
|
||||||
tsz = min(offset + (u64)dump->size - start, (u64)size);
|
tsz = min(offset + (u64)dump->size - start, (u64)size);
|
||||||
buf = dump->buf + start - offset;
|
buf = dump->buf + start - offset;
|
||||||
if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
|
if (remap_vmalloc_range_partial(vma, dst, buf, 0,
|
||||||
|
tsz)) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
|
|||||||
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
|
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
|
||||||
kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
|
kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
|
||||||
if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
|
if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
|
||||||
kaddr, tsz))
|
kaddr, 0, tsz))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
size -= tsz;
|
size -= tsz;
|
||||||
|
@ -137,7 +137,7 @@ extern void vunmap(const void *addr);
|
|||||||
|
|
||||||
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
||||||
unsigned long uaddr, void *kaddr,
|
unsigned long uaddr, void *kaddr,
|
||||||
unsigned long size);
|
unsigned long pgoff, unsigned long size);
|
||||||
|
|
||||||
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
unsigned long pgoff);
|
unsigned long pgoff);
|
||||||
|
2
mm/gup.c
2
mm/gup.c
@ -1088,7 +1088,7 @@ retry:
|
|||||||
* potentially allocating memory.
|
* potentially allocating memory.
|
||||||
*/
|
*/
|
||||||
if (fatal_signal_pending(current)) {
|
if (fatal_signal_pending(current)) {
|
||||||
ret = -ERESTARTSYS;
|
ret = -EINTR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
14
mm/hugetlb.c
14
mm/hugetlb.c
@ -5365,8 +5365,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
p4d_t *p4d;
|
p4d_t *p4d;
|
||||||
pud_t *pud;
|
pud_t *pud, pud_entry;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd, pmd_entry;
|
||||||
|
|
||||||
pgd = pgd_offset(mm, addr);
|
pgd = pgd_offset(mm, addr);
|
||||||
if (!pgd_present(*pgd))
|
if (!pgd_present(*pgd))
|
||||||
@ -5376,17 +5376,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pud = pud_offset(p4d, addr);
|
pud = pud_offset(p4d, addr);
|
||||||
if (sz != PUD_SIZE && pud_none(*pud))
|
pud_entry = READ_ONCE(*pud);
|
||||||
|
if (sz != PUD_SIZE && pud_none(pud_entry))
|
||||||
return NULL;
|
return NULL;
|
||||||
/* hugepage or swap? */
|
/* hugepage or swap? */
|
||||||
if (pud_huge(*pud) || !pud_present(*pud))
|
if (pud_huge(pud_entry) || !pud_present(pud_entry))
|
||||||
return (pte_t *)pud;
|
return (pte_t *)pud;
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
if (sz != PMD_SIZE && pmd_none(*pmd))
|
pmd_entry = READ_ONCE(*pmd);
|
||||||
|
if (sz != PMD_SIZE && pmd_none(pmd_entry))
|
||||||
return NULL;
|
return NULL;
|
||||||
/* hugepage or swap? */
|
/* hugepage or swap? */
|
||||||
if (pmd_huge(*pmd) || !pmd_present(*pmd))
|
if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
|
||||||
return (pte_t *)pmd;
|
return (pte_t *)pmd;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
12
mm/ksm.c
12
mm/ksm.c
@ -2112,8 +2112,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
|
|||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
vma = find_mergeable_vma(mm, rmap_item->address);
|
vma = find_mergeable_vma(mm, rmap_item->address);
|
||||||
err = try_to_merge_one_page(vma, page,
|
if (vma) {
|
||||||
ZERO_PAGE(rmap_item->address));
|
err = try_to_merge_one_page(vma, page,
|
||||||
|
ZERO_PAGE(rmap_item->address));
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* If the vma is out of date, we do not need to
|
||||||
|
* continue.
|
||||||
|
*/
|
||||||
|
err = 0;
|
||||||
|
}
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
/*
|
/*
|
||||||
* In case of failure, the page was not really empty, so we
|
* In case of failure, the page was not really empty, so we
|
||||||
|
13
mm/shmem.c
13
mm/shmem.c
@ -952,7 +952,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|||||||
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
||||||
if (shmem_punch_compound(page, start, end))
|
if (shmem_punch_compound(page, start, end))
|
||||||
truncate_inode_page(mapping, page);
|
truncate_inode_page(mapping, page);
|
||||||
else {
|
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
|
||||||
/* Wipe the page and don't get stuck */
|
/* Wipe the page and don't get stuck */
|
||||||
clear_highpage(page);
|
clear_highpage(page);
|
||||||
flush_dcache_page(page);
|
flush_dcache_page(page);
|
||||||
@ -2179,7 +2179,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
|
|||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
int retval = -ENOMEM;
|
int retval = -ENOMEM;
|
||||||
|
|
||||||
spin_lock_irq(&info->lock);
|
/*
|
||||||
|
* What serializes the accesses to info->flags?
|
||||||
|
* ipc_lock_object() when called from shmctl_do_lock(),
|
||||||
|
* no serialization needed when called from shm_destroy().
|
||||||
|
*/
|
||||||
if (lock && !(info->flags & VM_LOCKED)) {
|
if (lock && !(info->flags & VM_LOCKED)) {
|
||||||
if (!user_shm_lock(inode->i_size, user))
|
if (!user_shm_lock(inode->i_size, user))
|
||||||
goto out_nomem;
|
goto out_nomem;
|
||||||
@ -2194,7 +2198,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
|
|||||||
retval = 0;
|
retval = 0;
|
||||||
|
|
||||||
out_nomem:
|
out_nomem:
|
||||||
spin_unlock_irq(&info->lock);
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2399,11 +2402,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
|||||||
|
|
||||||
lru_cache_add_anon(page);
|
lru_cache_add_anon(page);
|
||||||
|
|
||||||
spin_lock(&info->lock);
|
spin_lock_irq(&info->lock);
|
||||||
info->alloced++;
|
info->alloced++;
|
||||||
inode->i_blocks += BLOCKS_PER_PAGE;
|
inode->i_blocks += BLOCKS_PER_PAGE;
|
||||||
shmem_recalc_inode(inode);
|
shmem_recalc_inode(inode);
|
||||||
spin_unlock(&info->lock);
|
spin_unlock_irq(&info->lock);
|
||||||
|
|
||||||
inc_mm_counter(dst_mm, mm_counter_file(page));
|
inc_mm_counter(dst_mm, mm_counter_file(page));
|
||||||
page_add_file_rmap(page, false);
|
page_add_file_rmap(page, false);
|
||||||
|
12
mm/slub.c
12
mm/slub.c
@ -3533,6 +3533,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||||||
{
|
{
|
||||||
slab_flags_t flags = s->flags;
|
slab_flags_t flags = s->flags;
|
||||||
unsigned int size = s->object_size;
|
unsigned int size = s->object_size;
|
||||||
|
unsigned int freepointer_area;
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3541,6 +3542,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||||||
* the possible location of the free pointer.
|
* the possible location of the free pointer.
|
||||||
*/
|
*/
|
||||||
size = ALIGN(size, sizeof(void *));
|
size = ALIGN(size, sizeof(void *));
|
||||||
|
/*
|
||||||
|
* This is the area of the object where a freepointer can be
|
||||||
|
* safely written. If redzoning adds more to the inuse size, we
|
||||||
|
* can't use that portion for writing the freepointer, so
|
||||||
|
* s->offset must be limited within this for the general case.
|
||||||
|
*/
|
||||||
|
freepointer_area = size;
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
/*
|
/*
|
||||||
@ -3582,13 +3590,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||||||
*/
|
*/
|
||||||
s->offset = size;
|
s->offset = size;
|
||||||
size += sizeof(void *);
|
size += sizeof(void *);
|
||||||
} else if (size > sizeof(void *)) {
|
} else if (freepointer_area > sizeof(void *)) {
|
||||||
/*
|
/*
|
||||||
* Store freelist pointer near middle of object to keep
|
* Store freelist pointer near middle of object to keep
|
||||||
* it away from the edges of the object to avoid small
|
* it away from the edges of the object to avoid small
|
||||||
* sized over/underflows from neighboring allocations.
|
* sized over/underflows from neighboring allocations.
|
||||||
*/
|
*/
|
||||||
s->offset = ALIGN(size / 2, sizeof(void *));
|
s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
|
16
mm/vmalloc.c
16
mm/vmalloc.c
@ -34,6 +34,7 @@
|
|||||||
#include <linux/llist.h>
|
#include <linux/llist.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/rbtree_augmented.h>
|
#include <linux/rbtree_augmented.h>
|
||||||
|
#include <linux/overflow.h>
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
@ -3054,6 +3055,7 @@ finished:
|
|||||||
* @vma: vma to cover
|
* @vma: vma to cover
|
||||||
* @uaddr: target user address to start at
|
* @uaddr: target user address to start at
|
||||||
* @kaddr: virtual address of vmalloc kernel memory
|
* @kaddr: virtual address of vmalloc kernel memory
|
||||||
|
* @pgoff: offset from @kaddr to start at
|
||||||
* @size: size of map area
|
* @size: size of map area
|
||||||
*
|
*
|
||||||
* Returns: 0 for success, -Exxx on failure
|
* Returns: 0 for success, -Exxx on failure
|
||||||
@ -3066,9 +3068,15 @@ finished:
|
|||||||
* Similar to remap_pfn_range() (see mm/memory.c)
|
* Similar to remap_pfn_range() (see mm/memory.c)
|
||||||
*/
|
*/
|
||||||
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
|
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
|
||||||
void *kaddr, unsigned long size)
|
void *kaddr, unsigned long pgoff,
|
||||||
|
unsigned long size)
|
||||||
{
|
{
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
|
unsigned long off;
|
||||||
|
unsigned long end_index;
|
||||||
|
|
||||||
|
if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
@ -3082,8 +3090,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
|
|||||||
if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
|
if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (kaddr + size > area->addr + get_vm_area_size(area))
|
if (check_add_overflow(size, off, &end_index) ||
|
||||||
|
end_index > get_vm_area_size(area))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
kaddr += off;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct page *page = vmalloc_to_page(kaddr);
|
struct page *page = vmalloc_to_page(kaddr);
|
||||||
@ -3122,7 +3132,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
|||||||
unsigned long pgoff)
|
unsigned long pgoff)
|
||||||
{
|
{
|
||||||
return remap_vmalloc_range_partial(vma, vma->vm_start,
|
return remap_vmalloc_range_partial(vma, vma->vm_start,
|
||||||
addr + (pgoff << PAGE_SHIFT),
|
addr, pgoff,
|
||||||
vma->vm_end - vma->vm_start);
|
vma->vm_end - vma->vm_start);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(remap_vmalloc_range);
|
EXPORT_SYMBOL(remap_vmalloc_range);
|
||||||
|
@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return remap_vmalloc_range_partial(vma, vma->vm_start,
|
return remap_vmalloc_range_partial(vma, vma->vm_start,
|
||||||
mdev_state->memblk,
|
mdev_state->memblk, 0,
|
||||||
vma->vm_end - vma->vm_start);
|
vma->vm_end - vma->vm_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -479,7 +479,7 @@ our $allocFunctions = qr{(?x:
|
|||||||
(?:kv|k|v)[czm]alloc(?:_node|_array)? |
|
(?:kv|k|v)[czm]alloc(?:_node|_array)? |
|
||||||
kstrdup(?:_const)? |
|
kstrdup(?:_const)? |
|
||||||
kmemdup(?:_nul)?) |
|
kmemdup(?:_nul)?) |
|
||||||
(?:\w+)?alloc_skb(?:ip_align)? |
|
(?:\w+)?alloc_skb(?:_ip_align)? |
|
||||||
# dev_alloc_skb/netdev_alloc_skb, et al
|
# dev_alloc_skb/netdev_alloc_skb, et al
|
||||||
dma_alloc_coherent
|
dma_alloc_coherent
|
||||||
)};
|
)};
|
||||||
|
@ -7,7 +7,7 @@ int main(int argc, char *argv[])
|
|||||||
{
|
{
|
||||||
uint64_t old, new = argc;
|
uint64_t old, new = argc;
|
||||||
|
|
||||||
argv = argv;
|
(void)argv;
|
||||||
do {
|
do {
|
||||||
old = __sync_val_compare_and_swap(&x, 0, 0);
|
old = __sync_val_compare_and_swap(&x, 0, 0);
|
||||||
} while (!__sync_bool_compare_and_swap(&x, old, new));
|
} while (!__sync_bool_compare_and_swap(&x, old, new));
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
# Makefile for vm tools
|
# Makefile for vm tools
|
||||||
#
|
#
|
||||||
|
include ../scripts/Makefile.include
|
||||||
|
|
||||||
TARGETS=page-types slabinfo page_owner_sort
|
TARGETS=page-types slabinfo page_owner_sort
|
||||||
|
|
||||||
LIB_DIR = ../lib/api
|
LIB_DIR = ../lib/api
|
||||||
|
Loading…
x
Reference in New Issue
Block a user