thp: set recommended min free kbytes
If transparent hugepage is enabled initialize min_free_kbytes to an optimal value by default. This moves the hugeadm algorithm in kernel. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
cd7548ab36
commit
f000565adb
@ -85,6 +85,47 @@ struct khugepaged_scan {
|
|||||||
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
|
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
static int set_recommended_min_free_kbytes(void)
|
||||||
|
{
|
||||||
|
struct zone *zone;
|
||||||
|
int nr_zones = 0;
|
||||||
|
unsigned long recommended_min;
|
||||||
|
extern int min_free_kbytes;
|
||||||
|
|
||||||
|
if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
|
||||||
|
&transparent_hugepage_flags) &&
|
||||||
|
!test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
|
||||||
|
&transparent_hugepage_flags))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
for_each_populated_zone(zone)
|
||||||
|
nr_zones++;
|
||||||
|
|
||||||
|
/* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
|
||||||
|
recommended_min = pageblock_nr_pages * nr_zones * 2;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure that on average at least two pageblocks are almost free
|
||||||
|
* of another type, one for a migratetype to fall back to and a
|
||||||
|
* second to avoid subsequent fallbacks of other types There are 3
|
||||||
|
* MIGRATE_TYPES we care about.
|
||||||
|
*/
|
||||||
|
recommended_min += pageblock_nr_pages * nr_zones *
|
||||||
|
MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
|
||||||
|
|
||||||
|
/* don't ever allow to reserve more than 5% of the lowmem */
|
||||||
|
recommended_min = min(recommended_min,
|
||||||
|
(unsigned long) nr_free_buffer_pages() / 20);
|
||||||
|
recommended_min <<= (PAGE_SHIFT-10);
|
||||||
|
|
||||||
|
if (recommended_min > min_free_kbytes)
|
||||||
|
min_free_kbytes = recommended_min;
|
||||||
|
setup_per_zone_wmarks();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
late_initcall(set_recommended_min_free_kbytes);
|
||||||
|
|
||||||
static int start_khugepaged(void)
|
static int start_khugepaged(void)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@ -108,6 +149,8 @@ static int start_khugepaged(void)
|
|||||||
mutex_unlock(&khugepaged_mutex);
|
mutex_unlock(&khugepaged_mutex);
|
||||||
if (wakeup)
|
if (wakeup)
|
||||||
wake_up_interruptible(&khugepaged_wait);
|
wake_up_interruptible(&khugepaged_wait);
|
||||||
|
|
||||||
|
set_recommended_min_free_kbytes();
|
||||||
} else
|
} else
|
||||||
/* wakeup to exit */
|
/* wakeup to exit */
|
||||||
wake_up_interruptible(&khugepaged_wait);
|
wake_up_interruptible(&khugepaged_wait);
|
||||||
@ -177,6 +220,13 @@ static ssize_t enabled_store(struct kobject *kobj,
|
|||||||
ret = err;
|
ret = err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ret > 0 &&
|
||||||
|
(test_bit(TRANSPARENT_HUGEPAGE_FLAG,
|
||||||
|
&transparent_hugepage_flags) ||
|
||||||
|
test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
|
||||||
|
&transparent_hugepage_flags)))
|
||||||
|
set_recommended_min_free_kbytes();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
static struct kobj_attribute enabled_attr =
|
static struct kobj_attribute enabled_attr =
|
||||||
@ -464,6 +514,8 @@ static int __init hugepage_init(void)
|
|||||||
|
|
||||||
start_khugepaged();
|
start_khugepaged();
|
||||||
|
|
||||||
|
set_recommended_min_free_kbytes();
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user