2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-04-29 23:25:44 +10:00
/*
* Copyright 2005 , Paul Mackerras , IBM Corporation .
* Copyright 2009 , Benjamin Herrenschmidt , IBM Corporation .
* Copyright 2015 - 2016 , Aneesh Kumar K . V , IBM Corporation .
*/
# include <linux/sched.h>
# include <linux/memblock.h>
# include <asm/pgalloc.h>
# include <asm/tlb.h>
# include <asm/dma.h>
2019-03-29 09:59:59 +00:00
# include <mm/mmu_decl.h>
2016-04-29 23:25:44 +10:00
# ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* On Book3E CPUs , the vmemmap is currently mapped in the top half of
* the vmalloc space using normal page tables , though the size of
* pages encoded in the PTEs can be different
*/
int __meminit vmemmap_create_mapping ( unsigned long start ,
unsigned long page_size ,
unsigned long phys )
{
/* Create a PTE encoding without page size */
unsigned long i , flags = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_KERNEL_RW ;
/* PTEs only contain page size encodings up to 32M */
BUG_ON ( mmu_psize_defs [ mmu_vmemmap_psize ] . enc > 0xf ) ;
/* Encode the size in the PTE */
flags | = mmu_psize_defs [ mmu_vmemmap_psize ] . enc < < 8 ;
/* For each PTE for that area, map things. Note that we don't
* increment phys because all PTEs are of the large size and
* thus must have the low bits clear
*/
for ( i = 0 ; i < page_size ; i + = PAGE_SIZE )
2018-10-09 13:51:45 +00:00
BUG_ON ( map_kernel_page ( start + i , phys , __pgprot ( flags ) ) ) ;
2016-04-29 23:25:44 +10:00
return 0 ;
}
# ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_remove_mapping ( unsigned long start ,
unsigned long page_size )
{
}
# endif
# endif /* CONFIG_SPARSEMEM_VMEMMAP */
2019-04-26 15:58:05 +00:00
static void __init * early_alloc_pgtable ( unsigned long size )
2016-04-29 23:25:44 +10:00
{
2019-03-11 23:30:31 -07:00
void * ptr ;
ptr = memblock_alloc_try_nid ( size , size , MEMBLOCK_LOW_LIMIT ,
__pa ( MAX_DMA_ADDRESS ) , NUMA_NO_NODE ) ;
if ( ! ptr )
panic ( " %s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx \n " ,
__func__ , size , size , __pa ( MAX_DMA_ADDRESS ) ) ;
return ptr ;
2016-04-29 23:25:44 +10:00
}
/*
* map_kernel_page currently only called by __ioremap
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT , possibly bolting it
*/
2019-04-26 15:58:05 +00:00
int __ref map_kernel_page ( unsigned long ea , unsigned long pa , pgprot_t prot )
2016-04-29 23:25:44 +10:00
{
pgd_t * pgdp ;
2020-06-04 16:46:44 -07:00
p4d_t * p4dp ;
2016-04-29 23:25:44 +10:00
pud_t * pudp ;
pmd_t * pmdp ;
pte_t * ptep ;
2016-04-29 23:25:49 +10:00
BUILD_BUG_ON ( TASK_SIZE_USER64 > PGTABLE_RANGE ) ;
2016-04-29 23:25:44 +10:00
if ( slab_is_available ( ) ) {
pgdp = pgd_offset_k ( ea ) ;
2020-06-04 16:46:44 -07:00
p4dp = p4d_offset ( pgdp , ea ) ;
pudp = pud_alloc ( & init_mm , p4dp , ea ) ;
2016-04-29 23:25:44 +10:00
if ( ! pudp )
return - ENOMEM ;
pmdp = pmd_alloc ( & init_mm , pudp , ea ) ;
if ( ! pmdp )
return - ENOMEM ;
ptep = pte_alloc_kernel ( pmdp , ea ) ;
if ( ! ptep )
return - ENOMEM ;
} else {
pgdp = pgd_offset_k ( ea ) ;
2020-06-04 16:46:44 -07:00
p4dp = p4d_offset ( pgdp , ea ) ;
if ( p4d_none ( * p4dp ) ) {
pmdp = early_alloc_pgtable ( PMD_TABLE_SIZE ) ;
p4d_populate ( & init_mm , p4dp , pmdp ) ;
2016-04-29 23:25:44 +10:00
}
2020-06-04 16:46:44 -07:00
pudp = pud_offset ( p4dp , ea ) ;
2016-04-29 23:25:44 +10:00
if ( pud_none ( * pudp ) ) {
pmdp = early_alloc_pgtable ( PMD_TABLE_SIZE ) ;
pud_populate ( & init_mm , pudp , pmdp ) ;
}
pmdp = pmd_offset ( pudp , ea ) ;
if ( ! pmd_present ( * pmdp ) ) {
ptep = early_alloc_pgtable ( PAGE_SIZE ) ;
pmd_populate_kernel ( & init_mm , pmdp , ptep ) ;
}
ptep = pte_offset_kernel ( pmdp , ea ) ;
}
2018-10-09 13:51:45 +00:00
set_pte_at ( & init_mm , ea , ptep , pfn_pte ( pa > > PAGE_SHIFT , prot ) ) ;
2016-04-29 23:25:44 +10:00
smp_wmb ( ) ;
return 0 ;
}