Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "146 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, vfs, and mm (slab-generic, slab, kmemleak, dax, kasan, debug, pagecache, gup, shmem, frontswap, memremap, memcg, selftests, pagemap, dma, vmalloc, memory-failure, hugetlb, userfaultfd, vmscan, mempolicy, oom-kill, hugetlbfs, migration, thp, ksm, page-poison, percpu, rmap, zswap, zram, cleanups, hmm, and damon)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (146 commits) mm/damon: hide kernel pointer from tracepoint event mm/damon/vaddr: hide kernel pointer from damon_va_three_regions() failure log mm/damon/vaddr: use pr_debug() for damon_va_three_regions() failure logging mm/damon/dbgfs: remove an unnecessary variable mm/damon: move the implementation of damon_insert_region to damon.h mm/damon: add access checking for hugetlb pages Docs/admin-guide/mm/damon/usage: update for schemes statistics mm/damon/dbgfs: support all DAMOS stats Docs/admin-guide/mm/damon/reclaim: document statistics parameters mm/damon/reclaim: provide reclamation statistics mm/damon/schemes: account how many times quota limit has exceeded mm/damon/schemes: account scheme actions that successfully applied mm/damon: remove a mistakenly added comment for a future feature Docs/admin-guide/mm/damon/usage: update for kdamond_pid and (mk|rm)_contexts Docs/admin-guide/mm/damon/usage: mention tracepoint at the beginning Docs/admin-guide/mm/damon/usage: remove redundant information Docs/admin-guide/mm/damon/usage: update for scheme quotas and watermarks mm/damon: convert macro functions to static inline functions mm/damon: modify damon_rand() macro to static inline function mm/damon: move damon_rand() definition into damon.h ...
This commit is contained in:
@@ -84,7 +84,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
|
||||
static bool cgroup_memory_nosocket __ro_after_init;
|
||||
|
||||
/* Kernel memory accounting disabled? */
|
||||
bool cgroup_memory_nokmem __ro_after_init;
|
||||
static bool cgroup_memory_nokmem __ro_after_init;
|
||||
|
||||
/* Whether the swap controller is active */
|
||||
#ifdef CONFIG_MEMCG_SWAP
|
||||
@@ -629,11 +629,17 @@ static DEFINE_SPINLOCK(stats_flush_lock);
|
||||
static DEFINE_PER_CPU(unsigned int, stats_updates);
|
||||
static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
|
||||
|
||||
static inline void memcg_rstat_updated(struct mem_cgroup *memcg)
|
||||
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
|
||||
{
|
||||
unsigned int x;
|
||||
|
||||
cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
|
||||
if (!(__this_cpu_inc_return(stats_updates) % MEMCG_CHARGE_BATCH))
|
||||
atomic_inc(&stats_flush_threshold);
|
||||
|
||||
x = __this_cpu_add_return(stats_updates, abs(val));
|
||||
if (x > MEMCG_CHARGE_BATCH) {
|
||||
atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
|
||||
__this_cpu_write(stats_updates, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void __mem_cgroup_flush_stats(void)
|
||||
@@ -656,7 +662,7 @@ void mem_cgroup_flush_stats(void)
|
||||
|
||||
static void flush_memcg_stats_dwork(struct work_struct *w)
|
||||
{
|
||||
mem_cgroup_flush_stats();
|
||||
__mem_cgroup_flush_stats();
|
||||
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
|
||||
}
|
||||
|
||||
@@ -672,7 +678,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
|
||||
return;
|
||||
|
||||
__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
|
||||
memcg_rstat_updated(memcg);
|
||||
memcg_rstat_updated(memcg, val);
|
||||
}
|
||||
|
||||
/* idx can be of type enum memcg_stat_item or node_stat_item. */
|
||||
@@ -705,7 +711,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
/* Update lruvec */
|
||||
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
|
||||
|
||||
memcg_rstat_updated(memcg);
|
||||
memcg_rstat_updated(memcg, val);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -789,7 +795,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
||||
return;
|
||||
|
||||
__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
|
||||
memcg_rstat_updated(memcg);
|
||||
memcg_rstat_updated(memcg, count);
|
||||
}
|
||||
|
||||
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
|
||||
@@ -1369,6 +1375,7 @@ static const struct memory_stat memory_stats[] = {
|
||||
{ "pagetables", NR_PAGETABLE },
|
||||
{ "percpu", MEMCG_PERCPU_B },
|
||||
{ "sock", MEMCG_SOCK },
|
||||
{ "vmalloc", MEMCG_VMALLOC },
|
||||
{ "shmem", NR_SHMEM },
|
||||
{ "file_mapped", NR_FILE_MAPPED },
|
||||
{ "file_dirty", NR_FILE_DIRTY },
|
||||
@@ -4850,6 +4857,17 @@ out_kfree:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
|
||||
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
|
||||
{
|
||||
/*
|
||||
* Deprecated.
|
||||
* Please, take a look at tools/cgroup/slabinfo.py .
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct cftype mem_cgroup_legacy_files[] = {
|
||||
{
|
||||
.name = "usage_in_bytes",
|
||||
@@ -4950,7 +4968,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
|
||||
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
|
||||
{
|
||||
.name = "kmem.slabinfo",
|
||||
.seq_show = memcg_slab_show,
|
||||
.seq_show = mem_cgroup_slab_show,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
@@ -5110,15 +5128,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
unsigned int size;
|
||||
int node;
|
||||
int __maybe_unused i;
|
||||
long error = -ENOMEM;
|
||||
|
||||
size = sizeof(struct mem_cgroup);
|
||||
size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
|
||||
|
||||
memcg = kzalloc(size, GFP_KERNEL);
|
||||
memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
|
||||
if (!memcg)
|
||||
return ERR_PTR(error);
|
||||
|
||||
@@ -6312,6 +6326,8 @@ static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
|
||||
seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
|
||||
seq_printf(m, "oom_kill %lu\n",
|
||||
atomic_long_read(&events[MEMCG_OOM_KILL]));
|
||||
seq_printf(m, "oom_group_kill %lu\n",
|
||||
atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
|
||||
}
|
||||
|
||||
static int memory_events_show(struct seq_file *m, void *v)
|
||||
|
||||
Reference in New Issue
Block a user