2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-06-26 11:41:28 +08:00
/*
* Copyright ( C ) 2010 Loongson Inc . & Lemote Inc . &
2015-07-07 20:56:04 +02:00
* Institute of Computing Technology
2014-06-26 11:41:28 +08:00
* Author : Xiang Gao , gaoxiang @ ict . ac . cn
* Huacai Chen , chenhc @ lemote . com
* Xiaofu Meng , Shuangshuang Zhang
*/
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/mmzone.h>
2017-01-28 21:05:57 -05:00
# include <linux/export.h>
2014-06-26 11:41:28 +08:00
# include <linux/nodemask.h>
# include <linux/swap.h>
# include <linux/memblock.h>
# include <linux/pfn.h>
# include <linux/highmem.h>
# include <asm/page.h>
# include <asm/pgalloc.h>
# include <asm/sections.h>
# include <linux/irq.h>
# include <asm/bootinfo.h>
# include <asm/mc146818-time.h>
# include <asm/time.h>
# include <asm/wbflush.h>
# include <boot_param.h>
2020-12-03 20:32:52 +08:00
# include <loongson.h>
2014-06-26 11:41:28 +08:00
unsigned char __node_distances [ MAX_NUMNODES ] [ MAX_NUMNODES ] ;
2014-11-13 11:08:06 +00:00
EXPORT_SYMBOL ( __node_distances ) ;
2019-10-20 23:01:35 +08:00
struct pglist_data * __node_data [ MAX_NUMNODES ] ;
2014-06-26 11:41:28 +08:00
EXPORT_SYMBOL ( __node_data ) ;
2019-10-20 23:01:35 +08:00
cpumask_t __node_cpumask [ MAX_NUMNODES ] ;
EXPORT_SYMBOL ( __node_cpumask ) ;
2014-06-26 11:41:28 +08:00
static void cpu_node_probe ( void )
{
int i ;
nodes_clear ( node_possible_map ) ;
nodes_clear ( node_online_map ) ;
for ( i = 0 ; i < loongson_sysconf . nr_nodes ; i + + ) {
node_set_state ( num_online_nodes ( ) , N_POSSIBLE ) ;
node_set_online ( num_online_nodes ( ) ) ;
}
pr_info ( " NUMA: Discovered %d cpus on %d nodes \n " ,
loongson_sysconf . nr_cpus , num_online_nodes ( ) ) ;
}
static int __init compute_node_distance ( int row , int col )
{
int package_row = row * loongson_sysconf . cores_per_node /
loongson_sysconf . cores_per_package ;
int package_col = col * loongson_sysconf . cores_per_node /
loongson_sysconf . cores_per_package ;
if ( col = = row )
2020-01-09 13:23:30 +01:00
return LOCAL_DISTANCE ;
2014-06-26 11:41:28 +08:00
else if ( package_row = = package_col )
return 40 ;
else
return 100 ;
}
static void __init init_topology_matrix ( void )
{
int row , col ;
for ( row = 0 ; row < MAX_NUMNODES ; row + + )
for ( col = 0 ; col < MAX_NUMNODES ; col + + )
__node_distances [ row ] [ col ] = - 1 ;
for_each_online_node ( row ) {
for_each_online_node ( col ) {
__node_distances [ row ] [ col ] =
compute_node_distance ( row , col ) ;
}
}
}
static void __init node_mem_init ( unsigned int node )
{
2021-03-09 16:02:10 +08:00
struct pglist_data * nd ;
2014-06-26 11:41:28 +08:00
unsigned long node_addrspace_offset ;
2018-09-10 12:23:18 +03:00
unsigned long start_pfn , end_pfn ;
2021-03-09 16:02:10 +08:00
unsigned long nd_pa ;
int tnid ;
const size_t nd_size = roundup ( sizeof ( pg_data_t ) , SMP_CACHE_BYTES ) ;
2014-06-26 11:41:28 +08:00
2020-10-11 07:47:52 +08:00
node_addrspace_offset = nid_to_addrbase ( node ) ;
2014-06-26 11:41:28 +08:00
pr_info ( " Node%d's addrspace_offset is 0x%lx \n " ,
node , node_addrspace_offset ) ;
get_pfn_range_for_nid ( node , & start_pfn , & end_pfn ) ;
2018-09-10 12:23:18 +03:00
pr_info ( " Node%d: start_pfn=0x%lx, end_pfn=0x%lx \n " ,
node , start_pfn , end_pfn ) ;
2014-06-26 11:41:28 +08:00
2021-03-09 16:02:10 +08:00
nd_pa = memblock_phys_alloc_try_nid ( nd_size , SMP_CACHE_BYTES , node ) ;
if ( ! nd_pa )
panic ( " Cannot allocate %zu bytes for node %d data \n " ,
nd_size , node ) ;
nd = __va ( nd_pa ) ;
memset ( nd , 0 , sizeof ( struct pglist_data ) ) ;
tnid = early_pfn_to_nid ( nd_pa > > PAGE_SHIFT ) ;
if ( tnid ! = node )
pr_info ( " NODE_DATA(%d) on node %d \n " , node , tnid ) ;
__node_data [ node ] = nd ;
2014-06-26 11:41:28 +08:00
NODE_DATA ( node ) - > node_start_pfn = start_pfn ;
NODE_DATA ( node ) - > node_spanned_pages = end_pfn - start_pfn ;
2018-09-10 12:23:18 +03:00
if ( node = = 0 ) {
2020-11-25 18:11:02 +08:00
/* kernel start address */
unsigned long kernel_start_pfn = PFN_DOWN ( __pa_symbol ( & _text ) ) ;
2018-09-10 12:23:18 +03:00
/* kernel end address */
unsigned long kernel_end_pfn = PFN_UP ( __pa_symbol ( & _end ) ) ;
/* used by finalize_initrd() */
2014-06-26 11:41:28 +08:00
max_low_pfn = end_pfn ;
2018-09-10 12:23:18 +03:00
/* Reserve the kernel text/data/bss */
2020-11-25 18:11:02 +08:00
memblock_reserve ( kernel_start_pfn < < PAGE_SHIFT ,
( ( kernel_end_pfn - kernel_start_pfn ) < < PAGE_SHIFT ) ) ;
2014-06-26 11:41:28 +08:00
2016-03-17 20:41:05 +08:00
/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
2018-09-10 12:23:18 +03:00
if ( node_end_pfn ( 0 ) > = ( 0xffffffff > > PAGE_SHIFT ) )
memblock_reserve ( ( node_addrspace_offset | 0xfe000000 ) ,
32 < < 20 ) ;
2021-06-28 17:11:05 +08:00
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve ( 0 , PAGE_SIZE * start_pfn ) ;
2024-01-27 17:12:21 +08:00
/* set nid for reserved memory on node 0 */
memblock_set_node ( 0 , 1ULL < < 44 , & memblock . reserved , 0 ) ;
2014-06-26 11:41:28 +08:00
}
}
static __init void prom_meminit ( void )
{
2014-11-04 14:13:26 +08:00
unsigned int node , cpu , active_cpu = 0 ;
2014-06-26 11:41:28 +08:00
cpu_node_probe ( ) ;
init_topology_matrix ( ) ;
for ( node = 0 ; node < loongson_sysconf . nr_nodes ; node + + ) {
if ( node_online ( node ) ) {
szmem ( node ) ;
node_mem_init ( node ) ;
2019-10-20 23:01:35 +08:00
cpumask_clear ( & __node_cpumask [ node ] ) ;
2014-06-26 11:41:28 +08:00
}
}
2018-11-12 22:18:01 +00:00
max_low_pfn = PHYS_PFN ( memblock_end_of_DRAM ( ) ) ;
2014-06-26 11:41:28 +08:00
for ( cpu = 0 ; cpu < loongson_sysconf . nr_cpus ; cpu + + ) {
node = cpu / loongson_sysconf . cores_per_node ;
if ( node > = num_online_nodes ( ) )
node = 0 ;
2014-11-04 14:13:26 +08:00
if ( loongson_sysconf . reserved_cpus_mask & ( 1 < < cpu ) )
continue ;
2019-10-20 23:01:35 +08:00
cpumask_set_cpu ( active_cpu , & __node_cpumask [ node ] ) ;
2014-11-04 14:13:26 +08:00
pr_info ( " NUMA: set cpumask cpu %d on node %d \n " , active_cpu , node ) ;
active_cpu + + ;
2014-06-26 11:41:28 +08:00
}
}
void __init paging_init ( void )
{
unsigned long zones_size [ MAX_NR_ZONES ] = { 0 , } ;
pagetable_init ( ) ;
zones_size [ ZONE_DMA32 ] = MAX_DMA32_PFN ;
zones_size [ ZONE_NORMAL ] = max_low_pfn ;
2020-06-03 15:57:10 -07:00
free_area_init ( zones_size ) ;
2014-06-26 11:41:28 +08:00
}
void __init mem_init ( void )
{
high_memory = ( void * ) __va ( get_num_physpages ( ) < < PAGE_SHIFT ) ;
2018-10-30 15:09:30 -07:00
memblock_free_all ( ) ;
2014-06-26 11:41:28 +08:00
setup_zero_pages ( ) ; /* This comes from node 0 */
}
/* All PCI device belongs to logical Node-0 */
int pcibus_to_node ( struct pci_bus * bus )
{
return 0 ;
}
EXPORT_SYMBOL ( pcibus_to_node ) ;
void __init prom_init_numa_memory ( void )
{
2020-11-03 15:12:01 +08:00
pr_info ( " CP0_Config3: CP0 16.3 (0x%x) \n " , read_c0_config3 ( ) ) ;
pr_info ( " CP0_PageGrain: CP0 5.1 (0x%x) \n " , read_c0_pagegrain ( ) ) ;
2014-06-26 11:41:28 +08:00
prom_meminit ( ) ;
}
2022-03-19 17:40:02 +08:00
pg_data_t * __init arch_alloc_nodedata ( int nid )
{
return memblock_alloc ( sizeof ( pg_data_t ) , SMP_CACHE_BYTES ) ;
}
void arch_refresh_nodedata ( int nid , pg_data_t * pgdat )
{
__node_data [ nid ] = pgdat ;
}