2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-12-07 08:47:24 +01:00
/*
* PowerPC version
* Copyright ( C ) 1995 - 1996 Gary Thomas ( gdt @ linuxppc . org )
*
* Modifications by Paul Mackerras ( PowerMac ) ( paulus @ cs . anu . edu . au )
* and Cort Dougan ( PReP ) ( cort @ cs . nmt . edu )
* Copyright ( C ) 1996 Paul Mackerras
*
* Derived from " arch/i386/mm/init.c "
* Copyright ( C ) 1991 , 1992 , 1993 , 1994 Linus Torvalds
*
* Dave Engebretsen < engebret @ us . ibm . com >
* Rework for PPC64 port .
*/
# undef DEBUG
# include <linux/string.h>
2020-06-08 21:32:38 -07:00
# include <linux/pgtable.h>
2020-06-08 21:32:42 -07:00
# include <asm/pgalloc.h>
2019-04-18 16:51:18 +10:00
# include <asm/kup.h>
2019-09-20 17:45:36 +08:00
phys_addr_t memstart_addr __ro_after_init = ( phys_addr_t ) ~ 0ull ;
EXPORT_SYMBOL_GPL ( memstart_addr ) ;
phys_addr_t kernstart_addr __ro_after_init ;
EXPORT_SYMBOL_GPL ( kernstart_addr ) ;
2019-09-20 17:45:37 +08:00
unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE ;
EXPORT_SYMBOL_GPL ( kernstart_virt_addr ) ;
2019-09-20 17:45:36 +08:00
2019-04-18 16:51:19 +10:00
static bool disable_kuep = ! IS_ENABLED ( CONFIG_PPC_KUEP ) ;
2019-04-18 16:51:20 +10:00
static bool disable_kuap = ! IS_ENABLED ( CONFIG_PPC_KUAP ) ;
2019-04-18 16:51:19 +10:00
static int __init parse_nosmep ( char * p )
{
disable_kuep = true ;
pr_warn ( " Disabling Kernel Userspace Execution Prevention \n " ) ;
return 0 ;
}
early_param ( " nosmep " , parse_nosmep ) ;
2019-04-18 16:51:20 +10:00
static int __init parse_nosmap ( char * p )
{
disable_kuap = true ;
pr_warn ( " Disabling Kernel Userspace Access Protection \n " ) ;
return 0 ;
}
early_param ( " nosmap " , parse_nosmap ) ;
2019-05-06 08:10:43 +00:00
void __ref setup_kup ( void )
2019-04-18 16:51:18 +10:00
{
2019-04-18 16:51:19 +10:00
setup_kuep ( disable_kuep ) ;
2019-04-18 16:51:20 +10:00
setup_kuap ( disable_kuap ) ;
2019-04-18 16:51:18 +10:00
}
2016-12-07 08:47:24 +01:00
2018-11-29 14:07:07 +00:00
# define CTOR(shift) static void ctor_##shift(void *addr) \
{ \
memset ( addr , 0 , sizeof ( void * ) < < ( shift ) ) ; \
2016-12-07 08:47:24 +01:00
}
2018-11-29 14:07:07 +00:00
CTOR ( 0 ) ; CTOR ( 1 ) ; CTOR ( 2 ) ; CTOR ( 3 ) ; CTOR ( 4 ) ; CTOR ( 5 ) ; CTOR ( 6 ) ; CTOR ( 7 ) ;
CTOR ( 8 ) ; CTOR ( 9 ) ; CTOR ( 10 ) ; CTOR ( 11 ) ; CTOR ( 12 ) ; CTOR ( 13 ) ; CTOR ( 14 ) ; CTOR ( 15 ) ;
2016-12-07 08:47:24 +01:00
2018-11-29 14:07:07 +00:00
static inline void ( * ctor ( int shift ) ) ( void * )
2016-12-07 08:47:24 +01:00
{
2018-11-29 14:07:07 +00:00
BUILD_BUG_ON ( MAX_PGTABLE_INDEX_SIZE ! = 15 ) ;
switch ( shift ) {
case 0 : return ctor_0 ;
case 1 : return ctor_1 ;
case 2 : return ctor_2 ;
case 3 : return ctor_3 ;
case 4 : return ctor_4 ;
case 5 : return ctor_5 ;
case 6 : return ctor_6 ;
case 7 : return ctor_7 ;
case 8 : return ctor_8 ;
case 9 : return ctor_9 ;
case 10 : return ctor_10 ;
case 11 : return ctor_11 ;
case 12 : return ctor_12 ;
case 13 : return ctor_13 ;
case 14 : return ctor_14 ;
case 15 : return ctor_15 ;
}
return NULL ;
2016-12-07 08:47:24 +01:00
}
2018-11-29 14:07:03 +00:00
struct kmem_cache * pgtable_cache [ MAX_PGTABLE_INDEX_SIZE + 1 ] ;
2017-01-30 21:21:38 +11:00
EXPORT_SYMBOL_GPL ( pgtable_cache ) ; /* used by kvm_hv module */
2016-12-07 08:47:24 +01:00
/*
* Create a kmem_cache ( ) for pagetables . This is not used for PTE
* pages - they ' re linked to struct page , come from the normal free
* pages pool and have a different entry size ( see real_pte_t ) to
* everything else . Caches created by this function are used for all
* the higher level pagetables , and for hugepage pagetables .
*/
2018-11-29 14:07:07 +00:00
void pgtable_cache_add ( unsigned int shift )
2016-12-07 08:47:24 +01:00
{
char * name ;
unsigned long table_size = sizeof ( void * ) < < shift ;
unsigned long align = table_size ;
/* When batching pgtable pointers for RCU freeing, we store
* the index size in the low bits . Table alignment must be
* big enough to fit it .
*
* Likewise , hugeapge pagetable pointers contain a ( different )
* shift value in the low bits . All tables must be aligned so
* as to leave enough 0 bits in the address to contain it . */
unsigned long minalign = max ( MAX_PGTABLE_INDEX_SIZE + 1 ,
HUGEPD_SHIFT_MASK + 1 ) ;
struct kmem_cache * new ;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment , gcc doesn ' t seem to recognize is_power_of_2 as a
* constant expression , so so much for that . */
BUG_ON ( ! is_power_of_2 ( minalign ) ) ;
2018-11-29 14:07:03 +00:00
BUG_ON ( shift > MAX_PGTABLE_INDEX_SIZE ) ;
2016-12-07 08:47:24 +01:00
if ( PGT_CACHE ( shift ) )
return ; /* Already have a cache of this size */
align = max_t ( unsigned long , align , minalign ) ;
name = kasprintf ( GFP_KERNEL , " pgtable-2^%d " , shift ) ;
2018-11-29 14:07:07 +00:00
new = kmem_cache_create ( name , table_size , align , 0 , ctor ( shift ) ) ;
2017-01-04 01:55:17 +10:00
if ( ! new )
panic ( " Could not allocate pgtable cache for order %d " , shift ) ;
2016-12-07 08:47:24 +01:00
kfree ( name ) ;
2018-11-29 14:07:03 +00:00
pgtable_cache [ shift ] = new ;
2017-01-04 01:55:17 +10:00
2016-12-07 08:47:24 +01:00
pr_debug ( " Allocated pgtable cache for order %d \n " , shift ) ;
}
2017-01-30 21:21:38 +11:00
EXPORT_SYMBOL_GPL ( pgtable_cache_add ) ; /* used by kvm_hv module */
2016-12-07 08:47:24 +01:00
void pgtable_cache_init ( void )
{
2018-11-29 14:07:07 +00:00
pgtable_cache_add ( PGD_INDEX_SIZE ) ;
2016-12-07 08:47:24 +01:00
2018-11-29 14:07:09 +00:00
if ( PMD_CACHE_INDEX )
2018-11-29 14:07:07 +00:00
pgtable_cache_add ( PMD_CACHE_INDEX ) ;
2016-12-07 08:47:24 +01:00
/*
* In all current configs , when the PUD index exists it ' s the
* same size as either the pgd or pmd index except with THP enabled
* on book3s 64
*/
2018-11-29 14:07:09 +00:00
if ( PUD_CACHE_INDEX )
2018-11-29 14:07:07 +00:00
pgtable_cache_add ( PUD_CACHE_INDEX ) ;
2016-12-07 08:47:24 +01:00
}