Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "17 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: hexagon: define ioremap_uc ocfs2: fix the crash due to call ocfs2_get_dlm_debug once less ocfs2: call journal flush to mark journal as empty after journal recovery when mount mm/hugetlb: defer freeing of huge pages if in non-task context mm/gup: fix memory leak in __gup_benchmark_ioctl mm/oom: fix pgtables units mismatch in Killed process message fs/posix_acl.c: fix kernel-doc warnings hexagon: work around compiler crash hexagon: parenthesize registers in asm predicates fs/namespace.c: make to_mnt_ns() static fs/nsfs.c: include headers for missing declarations fs/direct-io.c: include fs/internal.h for missing prototype mm: move_pages: return valid node id in status if the page is already on the target node memcg: account security cred as well to kmemcg kcov: fix struct layout for kcov_remote_arg mm/zsmalloc.c: fix the migrated zspage statistics. mm/memory_hotplug: shrink zones when offlining memory
This commit is contained in:
commit
36487907f3
@ -251,11 +251,11 @@ selectively from different subsystems.
|
||||
.. code-block:: c
|
||||
|
||||
struct kcov_remote_arg {
|
||||
unsigned trace_mode;
|
||||
unsigned area_size;
|
||||
unsigned num_handles;
|
||||
uint64_t common_handle;
|
||||
uint64_t handles[0];
|
||||
__u32 trace_mode;
|
||||
__u32 area_size;
|
||||
__u32 num_handles;
|
||||
__aligned_u64 common_handle;
|
||||
__aligned_u64 handles[0];
|
||||
};
|
||||
|
||||
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
|
||||
|
@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
/*
|
||||
* FIXME: Cleanup page tables (also in arch_add_memory() in case
|
||||
@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
|
||||
* unlocked yet.
|
||||
*/
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
||||
"1: %0 = memw_locked(%1);\n" \
|
||||
" %0 = "#op "(%0,%2);\n" \
|
||||
" memw_locked(%1,P3)=%0;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
" if (!P3) jump 1b;\n" \
|
||||
: "=&r" (output) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
"1: %0 = memw_locked(%1);\n" \
|
||||
" %0 = "#op "(%0,%2);\n" \
|
||||
" memw_locked(%1,P3)=%0;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
" if (!P3) jump 1b;\n" \
|
||||
: "=&r" (output) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
"1: %0 = memw_locked(%2);\n" \
|
||||
" %1 = "#op "(%0,%3);\n" \
|
||||
" memw_locked(%2,P3)=%1;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
" if (!P3) jump 1b;\n" \
|
||||
: "=&r" (output), "=&r" (val) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
" }"
|
||||
" memw_locked(%2, p3) = %1;"
|
||||
" {"
|
||||
" if !p3 jump 1b;"
|
||||
" if (!p3) jump 1b;"
|
||||
" }"
|
||||
"2:"
|
||||
: "=&r" (__oldval), "=&r" (tmp)
|
||||
|
@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
|
||||
"1: R12 = memw_locked(R10);\n"
|
||||
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
|
||||
" memw_locked(R10,P1) = R12;\n"
|
||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
: "=&r" (oldval)
|
||||
: "r" (addr), "r" (nr)
|
||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||
@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
|
||||
"1: R12 = memw_locked(R10);\n"
|
||||
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
|
||||
" memw_locked(R10,P1) = R12;\n"
|
||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
: "=&r" (oldval)
|
||||
: "r" (addr), "r" (nr)
|
||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||
@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
|
||||
"1: R12 = memw_locked(R10);\n"
|
||||
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
|
||||
" memw_locked(R10,P1) = R12;\n"
|
||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
: "=&r" (oldval)
|
||||
: "r" (addr), "r" (nr)
|
||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||
@ -223,7 +223,7 @@ static inline int ffs(int x)
|
||||
int r;
|
||||
|
||||
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
|
||||
"{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
|
||||
"{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
|
||||
: "=&r" (r)
|
||||
: "r" (x)
|
||||
: "p0");
|
||||
|
@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
|
||||
__asm__ __volatile__ (
|
||||
"1: %0 = memw_locked(%1);\n" /* load into retval */
|
||||
" memw_locked(%1,P0) = %2;\n" /* store into memory */
|
||||
" if !P0 jump 1b;\n"
|
||||
" if (!P0) jump 1b;\n"
|
||||
: "=&r" (retval)
|
||||
: "r" (ptr), "r" (x)
|
||||
: "memory", "p0"
|
||||
|
@ -16,7 +16,7 @@
|
||||
/* For example: %1 = %4 */ \
|
||||
insn \
|
||||
"2: memw_locked(%3,p2) = %1;\n" \
|
||||
" if !p2 jump 1b;\n" \
|
||||
" if (!p2) jump 1b;\n" \
|
||||
" %1 = #0;\n" \
|
||||
"3:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
|
||||
"1: %1 = memw_locked(%3)\n"
|
||||
" {\n"
|
||||
" p2 = cmp.eq(%1,%4)\n"
|
||||
" if !p2.new jump:NT 3f\n"
|
||||
" if (!p2.new) jump:NT 3f\n"
|
||||
" }\n"
|
||||
"2: memw_locked(%3,p2) = %5\n"
|
||||
" if !p2 jump 1b\n"
|
||||
" if (!p2) jump 1b\n"
|
||||
"3:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
"4: %0 = #%6\n"
|
||||
|
@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
|
||||
|
||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
|
||||
#define ioremap_nocache ioremap
|
||||
#define ioremap_uc(X, Y) ioremap((X), (Y))
|
||||
|
||||
|
||||
#define __raw_writel writel
|
||||
|
@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
|
||||
__asm__ __volatile__(
|
||||
"1: R6 = memw_locked(%0);\n"
|
||||
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
" memw_locked(%0,P3) = R6;\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
|
||||
"1: R6 = memw_locked(%0);\n"
|
||||
" R6 = add(R6,#-1);\n"
|
||||
" memw_locked(%0,P3) = R6\n"
|
||||
" if !P3 jump 1b;\n"
|
||||
" if (!P3) jump 1b;\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
|
||||
__asm__ __volatile__(
|
||||
" R6 = memw_locked(%1);\n"
|
||||
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
||||
" { if !P3 jump 1f; }\n"
|
||||
" { if (!P3) jump 1f; }\n"
|
||||
" memw_locked(%1,P3) = R6;\n"
|
||||
" { %0 = P3 }\n"
|
||||
"1:\n"
|
||||
@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
|
||||
__asm__ __volatile__(
|
||||
"1: R6 = memw_locked(%0)\n"
|
||||
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
" memw_locked(%0,P3) = R6;\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||
__asm__ __volatile__(
|
||||
" R6 = memw_locked(%1)\n"
|
||||
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
||||
" { if !P3 jump 1f; }\n"
|
||||
" { if (!P3) jump 1f; }\n"
|
||||
" memw_locked(%1,P3) = R6;\n"
|
||||
" %0 = P3;\n"
|
||||
"1:\n"
|
||||
@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
__asm__ __volatile__(
|
||||
"1: R6 = memw_locked(%0);\n"
|
||||
" P3 = cmp.eq(R6,#0);\n"
|
||||
" { if !P3 jump 1b; R6 = #1; }\n"
|
||||
" { if (!P3) jump 1b; R6 = #1; }\n"
|
||||
" memw_locked(%0,P3) = R6;\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
__asm__ __volatile__(
|
||||
" R6 = memw_locked(%1);\n"
|
||||
" P3 = cmp.eq(R6,#0);\n"
|
||||
" { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
|
||||
" { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
|
||||
" memw_locked(%1,P3) = R6;\n"
|
||||
" %0 = P3;\n"
|
||||
"1:\n"
|
||||
|
@ -11,8 +11,6 @@
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
register unsigned long current_frame_pointer asm("r30");
|
||||
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
unsigned long rets;
|
||||
@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
|
||||
|
||||
low = (unsigned long)task_stack_page(current);
|
||||
high = low + THREAD_SIZE;
|
||||
fp = current_frame_pointer;
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
|
||||
while (fp >= low && fp <= (high - sizeof(*frame))) {
|
||||
frame = (struct stackframe *)fp;
|
||||
|
@ -369,7 +369,7 @@ ret_from_fork:
|
||||
R26.L = #LO(do_work_pending);
|
||||
R0 = #VM_INT_DISABLE;
|
||||
}
|
||||
if P0 jump check_work_pending
|
||||
if (P0) jump check_work_pending
|
||||
{
|
||||
R0 = R25;
|
||||
callr R24
|
||||
|
@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
|
||||
int ret;
|
||||
|
||||
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
|
||||
/* Remove htab bolted mappings for this section of memory */
|
||||
start = (unsigned long)__va(start);
|
||||
|
@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
vmem_remove_mapping(start, size);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = PFN_DOWN(start);
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
|
||||
struct zone *zone = page_zone(page);
|
||||
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
kernel_physical_mapping_remove(start, start + size);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
@ -39,6 +39,8 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* How many user pages to map in one call to get_user_pages(). This determines
|
||||
* the size of a structure in the slab cache
|
||||
|
@ -1728,7 +1728,7 @@ static bool is_mnt_ns_file(struct dentry *dentry)
|
||||
dentry->d_fsdata == &mntns_operations;
|
||||
}
|
||||
|
||||
struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
|
||||
static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
|
||||
{
|
||||
return container_of(ns, struct mnt_namespace, ns);
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <linux/pseudo_fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/proc_ns.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/ktime.h>
|
||||
@ -11,6 +12,8 @@
|
||||
#include <linux/nsfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static struct vfsmount *nsfs_mnt;
|
||||
|
||||
static long ns_ioctl(struct file *filp, unsigned int ioctl,
|
||||
|
@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
|
||||
|
||||
debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
|
||||
&dlm_debug->d_filter_secs);
|
||||
ocfs2_get_dlm_debug(dlm_debug);
|
||||
}
|
||||
|
||||
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
|
||||
|
@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
|
||||
|
||||
ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
|
||||
|
||||
if (replayed) {
|
||||
jbd2_journal_lock_updates(journal->j_journal);
|
||||
status = jbd2_journal_flush(journal->j_journal);
|
||||
jbd2_journal_unlock_updates(journal->j_journal);
|
||||
if (status < 0)
|
||||
mlog_errno(status);
|
||||
}
|
||||
|
||||
status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
|
@ -631,12 +631,15 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
|
||||
|
||||
/**
|
||||
* posix_acl_update_mode - update mode in set_acl
|
||||
* @inode: target inode
|
||||
* @mode_p: mode (pointer) for update
|
||||
* @acl: acl pointer
|
||||
*
|
||||
* Update the file mode when setting an ACL: compute the new file permission
|
||||
* bits based on the ACL. In addition, if the ACL is equivalent to the new
|
||||
* file mode, set *acl to NULL to indicate that no ACL should be set.
|
||||
* file mode, set *@acl to NULL to indicate that no ACL should be set.
|
||||
*
|
||||
* As with chmod, clear the setgit bit if the caller is not in the owning group
|
||||
* As with chmod, clear the setgid bit if the caller is not in the owning group
|
||||
* or capable of CAP_FSETID (see inode_change_ok).
|
||||
*
|
||||
* Called from set_acl inode operations.
|
||||
|
@ -122,8 +122,8 @@ static inline bool movable_node_is_enabled(void)
|
||||
|
||||
extern void arch_remove_memory(int nid, u64 start, u64 size,
|
||||
struct vmem_altmap *altmap);
|
||||
extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
|
||||
unsigned long nr_pages, struct vmem_altmap *altmap);
|
||||
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
|
||||
struct vmem_altmap *altmap);
|
||||
|
||||
/* reasonably generic interface to expand the physical pages */
|
||||
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||
@ -342,6 +342,9 @@ extern int add_memory(int nid, u64 start, u64 size);
|
||||
extern int add_memory_resource(int nid, struct resource *resource);
|
||||
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
|
||||
unsigned long nr_pages, struct vmem_altmap *altmap);
|
||||
extern void remove_pfn_range_from_zone(struct zone *zone,
|
||||
unsigned long start_pfn,
|
||||
unsigned long nr_pages);
|
||||
extern bool is_memblock_offlined(struct memory_block *mem);
|
||||
extern int sparse_add_section(int nid, unsigned long pfn,
|
||||
unsigned long nr_pages, struct vmem_altmap *altmap);
|
||||
|
@ -9,11 +9,11 @@
|
||||
* and the comment before kcov_remote_start() for usage details.
|
||||
*/
|
||||
struct kcov_remote_arg {
|
||||
unsigned int trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
|
||||
unsigned int area_size; /* Length of coverage buffer in words */
|
||||
unsigned int num_handles; /* Size of handles array */
|
||||
__u64 common_handle;
|
||||
__u64 handles[0];
|
||||
__u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
|
||||
__u32 area_size; /* Length of coverage buffer in words */
|
||||
__u32 num_handles; /* Size of handles array */
|
||||
__aligned_u64 common_handle;
|
||||
__aligned_u64 handles[0];
|
||||
};
|
||||
|
||||
#define KCOV_REMOTE_MAX_HANDLES 0x100
|
||||
|
@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void)
|
||||
new->magic = CRED_MAGIC;
|
||||
#endif
|
||||
|
||||
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
|
||||
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
|
||||
return new;
|
||||
@ -282,7 +282,7 @@ struct cred *prepare_creds(void)
|
||||
new->security = NULL;
|
||||
#endif
|
||||
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
validate_creds(new);
|
||||
return new;
|
||||
@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
|
||||
#ifdef CONFIG_SECURITY
|
||||
new->security = NULL;
|
||||
#endif
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
|
||||
put_cred(old);
|
||||
|
@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
|
||||
unsigned long i, nr_pages, addr, next;
|
||||
int nr;
|
||||
struct page **pages;
|
||||
int ret = 0;
|
||||
|
||||
if (gup->size > ULONG_MAX)
|
||||
return -EINVAL;
|
||||
@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
|
||||
NULL);
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
kvfree(pages);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nr <= 0)
|
||||
@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
|
||||
gup->put_delta_usec = ktime_us_delta(end_time, start_time);
|
||||
|
||||
kvfree(pages);
|
||||
return 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
|
||||
|
51
mm/hugetlb.c
51
mm/hugetlb.c
@ -27,6 +27,7 @@
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/llist.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
@ -1136,7 +1137,7 @@ static inline void ClearPageHugeTemporary(struct page *page)
|
||||
page[2].mapping = NULL;
|
||||
}
|
||||
|
||||
void free_huge_page(struct page *page)
|
||||
static void __free_huge_page(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Can't pass hstate in here because it is called from the
|
||||
@ -1199,6 +1200,54 @@ void free_huge_page(struct page *page)
|
||||
spin_unlock(&hugetlb_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* As free_huge_page() can be called from a non-task context, we have
|
||||
* to defer the actual freeing in a workqueue to prevent potential
|
||||
* hugetlb_lock deadlock.
|
||||
*
|
||||
* free_hpage_workfn() locklessly retrieves the linked list of pages to
|
||||
* be freed and frees them one-by-one. As the page->mapping pointer is
|
||||
* going to be cleared in __free_huge_page() anyway, it is reused as the
|
||||
* llist_node structure of a lockless linked list of huge pages to be freed.
|
||||
*/
|
||||
static LLIST_HEAD(hpage_freelist);
|
||||
|
||||
static void free_hpage_workfn(struct work_struct *work)
|
||||
{
|
||||
struct llist_node *node;
|
||||
struct page *page;
|
||||
|
||||
node = llist_del_all(&hpage_freelist);
|
||||
|
||||
while (node) {
|
||||
page = container_of((struct address_space **)node,
|
||||
struct page, mapping);
|
||||
node = node->next;
|
||||
__free_huge_page(page);
|
||||
}
|
||||
}
|
||||
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
|
||||
|
||||
void free_huge_page(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
|
||||
*/
|
||||
if (!in_task()) {
|
||||
/*
|
||||
* Only call schedule_work() if hpage_freelist is previously
|
||||
* empty. Otherwise, schedule_work() had been called but the
|
||||
* workfn hasn't retrieved the list yet.
|
||||
*/
|
||||
if (llist_add((struct llist_node *)&page->mapping,
|
||||
&hpage_freelist))
|
||||
schedule_work(&free_hpage_work);
|
||||
return;
|
||||
}
|
||||
|
||||
__free_huge_page(page);
|
||||
}
|
||||
|
||||
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
|
||||
{
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
|
@ -483,8 +483,9 @@ static void update_pgdat_span(struct pglist_data *pgdat)
|
||||
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
|
||||
}
|
||||
|
||||
static void __remove_zone(struct zone *zone, unsigned long start_pfn,
|
||||
unsigned long nr_pages)
|
||||
void __ref remove_pfn_range_from_zone(struct zone *zone,
|
||||
unsigned long start_pfn,
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
struct pglist_data *pgdat = zone->zone_pgdat;
|
||||
unsigned long flags;
|
||||
@ -499,28 +500,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
|
||||
return;
|
||||
#endif
|
||||
|
||||
clear_zone_contiguous(zone);
|
||||
|
||||
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
||||
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
|
||||
update_pgdat_span(pgdat);
|
||||
pgdat_resize_unlock(zone->zone_pgdat, &flags);
|
||||
|
||||
set_zone_contiguous(zone);
|
||||
}
|
||||
|
||||
static void __remove_section(struct zone *zone, unsigned long pfn,
|
||||
unsigned long nr_pages, unsigned long map_offset,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __remove_section(unsigned long pfn, unsigned long nr_pages,
|
||||
unsigned long map_offset,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
|
||||
|
||||
if (WARN_ON_ONCE(!valid_section(ms)))
|
||||
return;
|
||||
|
||||
__remove_zone(zone, pfn, nr_pages);
|
||||
sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
|
||||
}
|
||||
|
||||
/**
|
||||
* __remove_pages() - remove sections of pages from a zone
|
||||
* @zone: zone from which pages need to be removed
|
||||
* __remove_pages() - remove sections of pages
|
||||
* @pfn: starting pageframe (must be aligned to start of a section)
|
||||
* @nr_pages: number of pages to remove (must be multiple of section size)
|
||||
* @altmap: alternative device page map or %NULL if default memmap is used
|
||||
@ -530,16 +533,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
|
||||
* sure that pages are marked reserved and zones are adjust properly by
|
||||
* calling offline_pages().
|
||||
*/
|
||||
void __remove_pages(struct zone *zone, unsigned long pfn,
|
||||
unsigned long nr_pages, struct vmem_altmap *altmap)
|
||||
void __remove_pages(unsigned long pfn, unsigned long nr_pages,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long map_offset = 0;
|
||||
unsigned long nr, start_sec, end_sec;
|
||||
|
||||
map_offset = vmem_altmap_offset(altmap);
|
||||
|
||||
clear_zone_contiguous(zone);
|
||||
|
||||
if (check_pfn_span(pfn, nr_pages, "remove"))
|
||||
return;
|
||||
|
||||
@ -551,13 +552,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
|
||||
cond_resched();
|
||||
pfns = min(nr_pages, PAGES_PER_SECTION
|
||||
- (pfn & ~PAGE_SECTION_MASK));
|
||||
__remove_section(zone, pfn, pfns, map_offset, altmap);
|
||||
__remove_section(pfn, pfns, map_offset, altmap);
|
||||
pfn += pfns;
|
||||
nr_pages -= pfns;
|
||||
map_offset = 0;
|
||||
}
|
||||
|
||||
set_zone_contiguous(zone);
|
||||
}
|
||||
|
||||
int set_online_page_callback(online_page_callback_t callback)
|
||||
@ -869,6 +868,7 @@ failed_addition:
|
||||
(unsigned long long) pfn << PAGE_SHIFT,
|
||||
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
|
||||
memory_notify(MEM_CANCEL_ONLINE, &arg);
|
||||
remove_pfn_range_from_zone(zone, pfn, nr_pages);
|
||||
mem_hotplug_done();
|
||||
return ret;
|
||||
}
|
||||
@ -1628,6 +1628,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
|
||||
writeback_set_ratelimit();
|
||||
|
||||
memory_notify(MEM_OFFLINE, &arg);
|
||||
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
|
||||
mem_hotplug_done();
|
||||
return 0;
|
||||
|
||||
|
@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
|
||||
|
||||
mem_hotplug_begin();
|
||||
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
||||
__remove_pages(page_zone(first_page), PHYS_PFN(res->start),
|
||||
__remove_pages(PHYS_PFN(res->start),
|
||||
PHYS_PFN(resource_size(res)), NULL);
|
||||
} else {
|
||||
arch_remove_memory(nid, res->start, resource_size(res),
|
||||
|
23
mm/migrate.c
23
mm/migrate.c
@ -1512,9 +1512,11 @@ static int do_move_pages_to_node(struct mm_struct *mm,
|
||||
/*
|
||||
* Resolves the given address to a struct page, isolates it from the LRU and
|
||||
* puts it to the given pagelist.
|
||||
* Returns -errno if the page cannot be found/isolated or 0 when it has been
|
||||
* queued or the page doesn't need to be migrated because it is already on
|
||||
* the target node
|
||||
* Returns:
|
||||
* errno - if the page cannot be found/isolated
|
||||
* 0 - when it doesn't have to be migrated because it is already on the
|
||||
* target node
|
||||
* 1 - when it has been queued
|
||||
*/
|
||||
static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
|
||||
int node, struct list_head *pagelist, bool migrate_all)
|
||||
@ -1553,7 +1555,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
|
||||
if (PageHuge(page)) {
|
||||
if (PageHead(page)) {
|
||||
isolate_huge_page(page, pagelist);
|
||||
err = 0;
|
||||
err = 1;
|
||||
}
|
||||
} else {
|
||||
struct page *head;
|
||||
@ -1563,7 +1565,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
|
||||
if (err)
|
||||
goto out_putpage;
|
||||
|
||||
err = 0;
|
||||
err = 1;
|
||||
list_add_tail(&head->lru, pagelist);
|
||||
mod_node_page_state(page_pgdat(head),
|
||||
NR_ISOLATED_ANON + page_is_file_cache(head),
|
||||
@ -1640,8 +1642,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
|
||||
*/
|
||||
err = add_page_for_migration(mm, addr, current_node,
|
||||
&pagelist, flags & MPOL_MF_MOVE_ALL);
|
||||
if (!err)
|
||||
|
||||
if (!err) {
|
||||
/* The page is already on the target node */
|
||||
err = store_status(status, i, current_node, 1);
|
||||
if (err)
|
||||
goto out_flush;
|
||||
continue;
|
||||
} else if (err > 0) {
|
||||
/* The page is successfully queued for migration */
|
||||
continue;
|
||||
}
|
||||
|
||||
err = store_status(status, i, err, 1);
|
||||
if (err)
|
||||
|
@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
|
||||
K(get_mm_counter(mm, MM_FILEPAGES)),
|
||||
K(get_mm_counter(mm, MM_SHMEMPAGES)),
|
||||
from_kuid(&init_user_ns, task_uid(victim)),
|
||||
mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
|
||||
mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
|
||||
task_unlock(victim);
|
||||
|
||||
/*
|
||||
|
@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
|
||||
zs_pool_dec_isolated(pool);
|
||||
}
|
||||
|
||||
if (page_zone(newpage) != page_zone(page)) {
|
||||
dec_zone_page_state(page, NR_ZSPAGES);
|
||||
inc_zone_page_state(newpage, NR_ZSPAGES);
|
||||
}
|
||||
|
||||
reset_page(page);
|
||||
put_page(page);
|
||||
page = newpage;
|
||||
|
Loading…
Reference in New Issue
Block a user