2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2008-07-24 08:26:49 +04:00
/*
* mm_init . c - Memory initialisation verification and debugging
*
* Copyright 2008 IBM Corporation , 2008
* Author Mel Gorman < mel @ csn . ul . ie >
*
*/
# include <linux/kernel.h>
# include <linux/init.h>
2008-07-24 08:27:39 +04:00
# include <linux/kobject.h>
2011-10-16 10:01:52 +04:00
# include <linux/export.h>
2013-07-04 02:02:44 +04:00
# include <linux/memory.h>
# include <linux/notifier.h>
2015-07-01 00:57:05 +03:00
# include <linux/sched.h>
2020-08-07 09:23:15 +03:00
# include <linux/mman.h>
2008-07-24 08:26:51 +04:00
# include "internal.h"
2008-07-24 08:26:49 +04:00
2008-07-24 08:27:39 +04:00
# ifdef CONFIG_DEBUG_MEMORY_INIT
2015-02-13 02:00:12 +03:00
int __meminitdata mminit_loglevel ;
2008-07-24 08:26:49 +04:00
2008-07-24 08:26:52 +04:00
/* The zonelists are simply reported, validation is manual. */
2015-02-13 02:00:09 +03:00
void __init mminit_verify_zonelist ( void )
2008-07-24 08:26:52 +04:00
{
int nid ;
if ( mminit_loglevel < MMINIT_VERIFY )
return ;
for_each_online_node ( nid ) {
pg_data_t * pgdat = NODE_DATA ( nid ) ;
struct zone * zone ;
struct zoneref * z ;
struct zonelist * zonelist ;
int i , listid , zoneid ;
2020-04-07 06:08:36 +03:00
BUILD_BUG_ON ( MAX_ZONELISTS > 2 ) ;
2008-07-24 08:26:52 +04:00
for ( i = 0 ; i < MAX_ZONELISTS * MAX_NR_ZONES ; i + + ) {
/* Identify the zone and nodelist */
zoneid = i % MAX_NR_ZONES ;
listid = i / MAX_NR_ZONES ;
zonelist = & pgdat - > node_zonelists [ listid ] ;
zone = & pgdat - > node_zones [ zoneid ] ;
if ( ! populated_zone ( zone ) )
continue ;
/* Print information about the zonelist */
printk ( KERN_DEBUG " mminit::zonelist %s %d:%s = " ,
listid > 0 ? " thisnode " : " general " , nid ,
zone - > name ) ;
/* Iterate the zonelist */
2018-08-22 07:53:32 +03:00
for_each_zone_zonelist ( zone , z , zonelist , zoneid )
pr_cont ( " %d:%s " , zone_to_nid ( zone ) , zone - > name ) ;
2016-03-18 00:19:50 +03:00
pr_cont ( " \n " ) ;
2008-07-24 08:26:52 +04:00
}
}
}
2008-07-24 08:26:51 +04:00
void __init mminit_verify_pageflags_layout ( void )
{
int shift , width ;
unsigned long or_mask , add_mask ;
shift = 8 * sizeof ( unsigned long ) ;
2020-06-02 07:52:49 +03:00
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH ;
2008-07-24 08:26:51 +04:00
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_widths " ,
2020-06-02 07:52:49 +03:00
" Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d \n " ,
2008-07-24 08:26:51 +04:00
SECTIONS_WIDTH ,
NODES_WIDTH ,
ZONES_WIDTH ,
2013-10-07 14:29:20 +04:00
LAST_CPUPID_WIDTH ,
2020-06-02 07:52:49 +03:00
KASAN_TAG_WIDTH ,
2008-07-24 08:26:51 +04:00
NR_PAGEFLAGS ) ;
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_shifts " ,
2020-06-02 07:52:49 +03:00
" Section %d Node %d Zone %d Lastcpupid %d Kasantag %d \n " ,
2008-07-24 08:26:51 +04:00
SECTIONS_SHIFT ,
NODES_SHIFT ,
2013-02-23 04:34:47 +04:00
ZONES_SHIFT ,
2020-06-02 07:52:49 +03:00
LAST_CPUPID_SHIFT ,
KASAN_TAG_WIDTH ) ;
2013-02-23 04:34:47 +04:00
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_pgshifts " ,
2020-06-02 07:52:49 +03:00
" Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu \n " ,
2008-07-24 08:26:51 +04:00
( unsigned long ) SECTIONS_PGSHIFT ,
( unsigned long ) NODES_PGSHIFT ,
2013-02-23 04:34:47 +04:00
( unsigned long ) ZONES_PGSHIFT ,
2020-06-02 07:52:49 +03:00
( unsigned long ) LAST_CPUPID_PGSHIFT ,
( unsigned long ) KASAN_TAG_PGSHIFT ) ;
2013-02-23 04:34:47 +04:00
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_nodezoneid " ,
" Node/Zone ID: %lu -> %lu \n " ,
( unsigned long ) ( ZONEID_PGOFF + ZONEID_SHIFT ) ,
( unsigned long ) ZONEID_PGOFF ) ;
2008-07-24 08:26:51 +04:00
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_usage " ,
2013-02-23 04:34:47 +04:00
" location: %d -> %d layout %d -> %d unused %d -> %d page-flags \n " ,
2008-07-24 08:26:51 +04:00
shift , width , width , NR_PAGEFLAGS , NR_PAGEFLAGS , 0 ) ;
# ifdef NODE_NOT_IN_PAGE_FLAGS
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_nodeflags " ,
" Node not in page flags " ) ;
# endif
2013-10-07 14:29:20 +04:00
# ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
2013-02-23 04:34:47 +04:00
mminit_dprintk ( MMINIT_TRACE , " pageflags_layout_nodeflags " ,
2013-10-07 14:29:20 +04:00
" Last cpupid not in page flags " ) ;
2013-02-23 04:34:47 +04:00
# endif
2008-07-24 08:26:51 +04:00
if ( SECTIONS_WIDTH ) {
shift - = SECTIONS_WIDTH ;
BUG_ON ( shift ! = SECTIONS_PGSHIFT ) ;
}
if ( NODES_WIDTH ) {
shift - = NODES_WIDTH ;
BUG_ON ( shift ! = NODES_PGSHIFT ) ;
}
if ( ZONES_WIDTH ) {
shift - = ZONES_WIDTH ;
BUG_ON ( shift ! = ZONES_PGSHIFT ) ;
}
/* Check for bitmask overlaps */
or_mask = ( ZONES_MASK < < ZONES_PGSHIFT ) |
( NODES_MASK < < NODES_PGSHIFT ) |
( SECTIONS_MASK < < SECTIONS_PGSHIFT ) ;
add_mask = ( ZONES_MASK < < ZONES_PGSHIFT ) +
( NODES_MASK < < NODES_PGSHIFT ) +
( SECTIONS_MASK < < SECTIONS_PGSHIFT ) ;
BUG_ON ( or_mask ! = add_mask ) ;
}
2008-07-24 08:26:49 +04:00
static __init int set_mminit_loglevel ( char * str )
{
get_option ( & str , & mminit_loglevel ) ;
return 0 ;
}
early_param ( " mminit_loglevel " , set_mminit_loglevel ) ;
2008-07-24 08:27:39 +04:00
# endif /* CONFIG_DEBUG_MEMORY_INIT */
2008-07-24 08:27:39 +04:00
struct kobject * mm_kobj ;
EXPORT_SYMBOL_GPL ( mm_kobj ) ;
2013-07-04 02:02:44 +04:00
# ifdef CONFIG_SMP
s32 vm_committed_as_batch = 32 ;
2020-08-07 09:23:15 +03:00
void mm_compute_batch ( int overcommit_policy )
2013-07-04 02:02:44 +04:00
{
u64 memsized_batch ;
s32 nr = num_present_cpus ( ) ;
s32 batch = max_t ( s32 , nr * 2 , 32 ) ;
2020-08-07 09:23:15 +03:00
unsigned long ram_pages = totalram_pages ( ) ;
/*
* For policy OVERCOMMIT_NEVER , set batch size to 0.4 % of
* ( total memory / # cpus ) , and lift it to 25 % for other policies
* to easy the possible lock contention for percpu_counter
* vm_committed_as , while the max limit is INT_MAX
*/
if ( overcommit_policy = = OVERCOMMIT_NEVER )
memsized_batch = min_t ( u64 , ram_pages / nr / 256 , INT_MAX ) ;
else
memsized_batch = min_t ( u64 , ram_pages / nr / 4 , INT_MAX ) ;
2013-07-04 02:02:44 +04:00
vm_committed_as_batch = max_t ( s32 , memsized_batch , batch ) ;
}
static int __meminit mm_compute_batch_notifier ( struct notifier_block * self ,
unsigned long action , void * arg )
{
switch ( action ) {
case MEM_ONLINE :
case MEM_OFFLINE :
2020-08-07 09:23:15 +03:00
mm_compute_batch ( sysctl_overcommit_memory ) ;
2020-12-15 06:15:00 +03:00
break ;
2013-07-04 02:02:44 +04:00
default :
break ;
}
return NOTIFY_OK ;
}
static struct notifier_block compute_batch_nb __meminitdata = {
. notifier_call = mm_compute_batch_notifier ,
. priority = IPC_CALLBACK_PRI , /* use lowest priority */
} ;
static int __init mm_compute_batch_init ( void )
{
2020-08-07 09:23:15 +03:00
mm_compute_batch ( sysctl_overcommit_memory ) ;
2013-07-04 02:02:44 +04:00
register_hotmemory_notifier ( & compute_batch_nb ) ;
return 0 ;
}
__initcall ( mm_compute_batch_init ) ;
# endif
2008-07-24 08:27:39 +04:00
static int __init mm_sysfs_init ( void )
{
mm_kobj = kobject_create_and_add ( " mm " , kernel_kobj ) ;
if ( ! mm_kobj )
return - ENOMEM ;
return 0 ;
}
2014-01-28 05:06:55 +04:00
postcore_initcall ( mm_sysfs_init ) ;