mm: compaction: refactor compact_node()
Refactor compact_node() to handle both proactive and synchronous compact memory, which cleanups code a bit. Link: https://lkml.kernel.org/r/20240208013607.1731817-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b9ad003af1
commit
3e40b3f417
@ -2796,25 +2796,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|
||||
}
|
||||
|
||||
/*
|
||||
* Compact all zones within a node till each zone's fragmentation score
|
||||
* reaches within proactive compaction thresholds (as determined by the
|
||||
* proactiveness tunable).
|
||||
* compact_node() - compact all zones within a node
|
||||
* @pgdat: The node page data
|
||||
* @proactive: Whether the compaction is proactive
|
||||
*
|
||||
* It is possible that the function returns before reaching score targets
|
||||
* due to various back-off conditions, such as, contention on per-node or
|
||||
* per-zone locks.
|
||||
* For proactive compaction, compact till each zone's fragmentation score
|
||||
* reaches within proactive compaction thresholds (as determined by the
|
||||
* proactiveness tunable), it is possible that the function returns before
|
||||
* reaching score targets due to various back-off conditions, such as,
|
||||
* contention on per-node or per-zone locks.
|
||||
*/
|
||||
static void proactive_compact_node(pg_data_t *pgdat)
|
||||
static void compact_node(pg_data_t *pgdat, bool proactive)
|
||||
{
|
||||
int zoneid;
|
||||
struct zone *zone;
|
||||
struct compact_control cc = {
|
||||
.order = -1,
|
||||
.mode = MIGRATE_SYNC_LIGHT,
|
||||
.mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
|
||||
.ignore_skip_hint = true,
|
||||
.whole_zone = true,
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
.proactive_compaction = true,
|
||||
.proactive_compaction = proactive,
|
||||
};
|
||||
|
||||
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
|
||||
@ -2826,41 +2828,16 @@ static void proactive_compact_node(pg_data_t *pgdat)
|
||||
|
||||
compact_zone(&cc, NULL);
|
||||
|
||||
if (proactive) {
|
||||
count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
|
||||
cc.total_migrate_scanned);
|
||||
count_compact_events(KCOMPACTD_FREE_SCANNED,
|
||||
cc.total_free_scanned);
|
||||
}
|
||||
}
|
||||
|
||||
/* Compact all zones within a node */
|
||||
static void compact_node(int nid)
|
||||
{
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
int zoneid;
|
||||
struct zone *zone;
|
||||
struct compact_control cc = {
|
||||
.order = -1,
|
||||
.mode = MIGRATE_SYNC,
|
||||
.ignore_skip_hint = true,
|
||||
.whole_zone = true,
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
};
|
||||
|
||||
|
||||
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
|
||||
|
||||
zone = &pgdat->node_zones[zoneid];
|
||||
if (!populated_zone(zone))
|
||||
continue;
|
||||
|
||||
cc.zone = zone;
|
||||
|
||||
compact_zone(&cc, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Compact all nodes in the system */
|
||||
/* Compact all zones of all nodes in the system */
|
||||
static void compact_nodes(void)
|
||||
{
|
||||
int nid;
|
||||
@ -2869,7 +2846,7 @@ static void compact_nodes(void)
|
||||
lru_add_drain_all();
|
||||
|
||||
for_each_online_node(nid)
|
||||
compact_node(nid);
|
||||
compact_node(NODE_DATA(nid), false);
|
||||
}
|
||||
|
||||
static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
|
||||
@ -2931,7 +2908,7 @@ static ssize_t compact_store(struct device *dev,
|
||||
/* Flush pending updates to the LRU lists */
|
||||
lru_add_drain_all();
|
||||
|
||||
compact_node(nid);
|
||||
compact_node(NODE_DATA(nid), false);
|
||||
}
|
||||
|
||||
return count;
|
||||
@ -3140,7 +3117,7 @@ static int kcompactd(void *p)
|
||||
unsigned int prev_score, score;
|
||||
|
||||
prev_score = fragmentation_score_node(pgdat);
|
||||
proactive_compact_node(pgdat);
|
||||
compact_node(pgdat, true);
|
||||
score = fragmentation_score_node(pgdat);
|
||||
/*
|
||||
* Defer proactive compaction if the fragmentation
|
||||
|
Loading…
x
Reference in New Issue
Block a user