mm: memcg: put memcg1-specific struct mem_cgroup's members under CONFIG_MEMCG_V1

Put memcg1-specific members of struct mem_cgroup under the CONFIG_MEMCG_V1
config option.  Also group them close to the end of struct mem_cgroup just
before the dynamic per-node part.

Link: https://lkml.kernel.org/r/20240628210317.272856-7-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Roman Gushchin 2024-06-28 21:03:14 +00:00 committed by Andrew Morton
parent 05dfec123d
commit 94b7e5bf09

View File

@ -188,10 +188,6 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */
};
/* Legacy consumer-oriented counters */
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
/* Range enforcement for interrupt charges */
struct work_struct high_work;
@ -205,8 +201,6 @@ struct mem_cgroup {
bool zswap_writeback;
#endif
unsigned long soft_limit;
/* vmpressure notifications */
struct vmpressure vmpressure;
@ -215,13 +209,7 @@ struct mem_cgroup {
*/
bool oom_group;
/* protected by memcg_oom_lock */
bool oom_lock;
int under_oom;
int swappiness;
/* OOM-Killer disable */
int oom_kill_disable;
int swappiness;
/* memory.events and memory.events.local */
struct cgroup_file events_file;
@ -230,6 +218,66 @@ struct mem_cgroup {
/* handle for "memory.swap.events" */
struct cgroup_file swap_events_file;
CACHELINE_PADDING(_pad1_);
/* memory.stat */
struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
/*
* Hint of reclaim pressure for socket memroy management. Note
* that this indicator should NOT be used in legacy cgroup mode
* where socket memory is accounted/charged separately.
*/
unsigned long socket_pressure;
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
* process. memcg->orig_objcg preserves a pointer (and a reference)
* to the original objcg until the end of live of memcg.
*/
struct obj_cgroup __rcu *objcg;
struct obj_cgroup *orig_objcg;
/* list of inherited objcgs, protected by objcg_lock */
struct list_head objcg_list;
#endif
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split deferred_split_queue;
#endif
#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
#ifdef CONFIG_MEMCG_V1
/* Legacy consumer-oriented counters */
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
unsigned long soft_limit;
/* protected by memcg_oom_lock */
bool oom_lock;
int under_oom;
/* OOM-Killer disable */
int oom_kill_disable;
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@ -248,70 +296,25 @@ struct mem_cgroup {
*/
unsigned long move_charge_at_immigrate;
/* taken only while moving_account > 0 */
spinlock_t move_lock;
unsigned long move_lock_flags;
CACHELINE_PADDING(_pad1_);
/* memory.stat */
struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
/*
* Hint of reclaim pressure for socket memroy management. Note
* that this indicator should NOT be used in legacy cgroup mode
* where socket memory is accounted/charged separately.
*/
unsigned long socket_pressure;
spinlock_t move_lock;
unsigned long move_lock_flags;
/* Legacy tcp memory accounting */
bool tcpmem_active;
int tcpmem_pressure;
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
* process. memcg->orig_objcg preserves a pointer (and a reference)
* to the original objcg until the end of live of memcg.
*/
struct obj_cgroup __rcu *objcg;
struct obj_cgroup *orig_objcg;
/* list of inherited objcgs, protected by objcg_lock */
struct list_head objcg_list;
#endif
bool tcpmem_active;
int tcpmem_pressure;
CACHELINE_PADDING(_pad2_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif
atomic_t moving_account;
struct task_struct *move_lock_task;
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split deferred_split_queue;
#endif
#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
#endif /* CONFIG_MEMCG_V1 */
struct mem_cgroup_per_node *nodeinfo[];
};