2019-02-03 21:37:20 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp . 2019
*/
2020-06-08 21:32:38 -07:00
# include <linux/pgtable.h>
2020-06-08 21:32:42 -07:00
# include <asm/mem_detect.h>
2019-02-03 21:37:20 +01:00
# include <asm/cpacf.h>
# include <asm/timex.h>
# include <asm/sclp.h>
# include "compressed/decompressor.h"
2019-07-17 19:38:42 +02:00
# include "boot.h"
2019-02-03 21:37:20 +01:00
# define PRNG_MODE_TDES 1
# define PRNG_MODE_SHA512 2
# define PRNG_MODE_TRNG 3
struct prno_parm {
u32 res ;
u32 reseed_counter ;
u64 stream_bytes ;
u8 V [ 112 ] ;
u8 C [ 112 ] ;
} ;
struct prng_parm {
u8 parm_block [ 32 ] ;
u32 reseed_counter ;
u64 byte_counter ;
} ;
static int check_prng ( void )
{
if ( ! cpacf_query_func ( CPACF_KMC , CPACF_KMC_PRNG ) ) {
sclp_early_printk ( " KASLR disabled: CPU has no PRNG \n " ) ;
return 0 ;
}
if ( cpacf_query_func ( CPACF_PRNO , CPACF_PRNO_TRNG ) )
return PRNG_MODE_TRNG ;
if ( cpacf_query_func ( CPACF_PRNO , CPACF_PRNO_SHA512_DRNG_GEN ) )
return PRNG_MODE_SHA512 ;
else
return PRNG_MODE_TDES ;
}
2020-09-23 09:37:43 +02:00
static int get_random ( unsigned long limit , unsigned long * value )
2019-02-03 21:37:20 +01:00
{
struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */
. parm_block = {
0x0F , 0x2B , 0x8E , 0x63 , 0x8C , 0x8E , 0xD2 , 0x52 ,
0x64 , 0xB7 , 0xA0 , 0x7B , 0x75 , 0x28 , 0xB8 , 0xF4 ,
0x75 , 0x5F , 0xD2 , 0xA6 , 0x8D , 0x97 , 0x11 , 0xFF ,
0x49 , 0xD8 , 0x23 , 0xF3 , 0x7E , 0x21 , 0xEC , 0xA0
} ,
} ;
unsigned long seed , random ;
struct prno_parm prno ;
__u64 entropy [ 4 ] ;
int mode , i ;
mode = check_prng ( ) ;
seed = get_tod_clock_fast ( ) ;
switch ( mode ) {
case PRNG_MODE_TRNG :
cpacf_trng ( NULL , 0 , ( u8 * ) & random , sizeof ( random ) ) ;
break ;
case PRNG_MODE_SHA512 :
cpacf_prno ( CPACF_PRNO_SHA512_DRNG_SEED , & prno , NULL , 0 ,
( u8 * ) & seed , sizeof ( seed ) ) ;
cpacf_prno ( CPACF_PRNO_SHA512_DRNG_GEN , & prno , ( u8 * ) & random ,
sizeof ( random ) , NULL , 0 ) ;
break ;
case PRNG_MODE_TDES :
/* add entropy */
* ( unsigned long * ) prng . parm_block ^ = seed ;
for ( i = 0 ; i < 16 ; i + + ) {
cpacf_kmc ( CPACF_KMC_PRNG , prng . parm_block ,
2020-02-08 07:10:52 -07:00
( u8 * ) entropy , ( u8 * ) entropy ,
2019-02-03 21:37:20 +01:00
sizeof ( entropy ) ) ;
memcpy ( prng . parm_block , entropy , sizeof ( entropy ) ) ;
}
random = seed ;
cpacf_kmc ( CPACF_KMC_PRNG , prng . parm_block , ( u8 * ) & random ,
( u8 * ) & random , sizeof ( random ) ) ;
break ;
default :
2020-09-23 09:37:43 +02:00
return - 1 ;
2019-02-03 21:37:20 +01:00
}
2020-09-23 09:37:43 +02:00
* value = random % limit ;
return 0 ;
2019-02-03 21:37:20 +01:00
}
2020-09-18 16:02:45 +02:00
/*
* To randomize kernel base address we have to consider several facts :
* 1. physical online memory might not be continuous and have holes . mem_detect
* info contains list of online memory ranges we should consider .
* 2. we have several memory regions which are occupied and we should not
* overlap and destroy them . Currently safe_addr tells us the border below
* which all those occupied regions are . We are safe to use anything above
* safe_addr .
* 3. the upper limit might apply as well , even if memory above that limit is
* online . Currently those limitations are :
* 3.1 . Limit set by " mem= " kernel command line option
* 3.2 . memory reserved at the end for kasan initialization .
* 4. kernel base address must be aligned to THREAD_SIZE ( kernel stack size ) .
* Which is required for CONFIG_CHECK_STACK . Currently THREAD_SIZE is 4 pages
* ( 16 pages when the kernel is built with kasan enabled )
* Assumptions :
* 1. kernel size ( including . bss size ) and upper memory limit are page aligned .
* 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
* aligned ( in practice memory configurations granularity on z / VM and LPAR
* is 1 mb ) .
*
* To guarantee uniform distribution of kernel base address among all suitable
* addresses we generate random value just once . For that we need to build a
* continuous range in which every value would be suitable . We can build this
* range by simply counting all suitable addresses ( let ' s call them positions )
* which would be valid as kernel base address . To count positions we iterate
* over online memory ranges . For each range which is big enough for the
* kernel image we count all suitable addresses we can put the kernel image at
* that is
* ( end - start - kernel_size ) / THREAD_SIZE + 1
* Two functions count_valid_kernel_positions and position_to_address help
* to count positions in memory range given and then convert position back
* to address .
*/
static unsigned long count_valid_kernel_positions ( unsigned long kernel_size ,
unsigned long _min ,
unsigned long _max )
{
unsigned long start , end , pos = 0 ;
int i ;
for_each_mem_detect_block ( i , & start , & end ) {
if ( _min > = end )
continue ;
if ( start > = _max )
break ;
start = max ( _min , start ) ;
end = min ( _max , end ) ;
if ( end - start < kernel_size )
continue ;
pos + = ( end - start - kernel_size ) / THREAD_SIZE + 1 ;
}
return pos ;
}
static unsigned long position_to_address ( unsigned long pos , unsigned long kernel_size ,
unsigned long _min , unsigned long _max )
{
unsigned long start , end ;
int i ;
for_each_mem_detect_block ( i , & start , & end ) {
if ( _min > = end )
continue ;
if ( start > = _max )
break ;
start = max ( _min , start ) ;
end = min ( _max , end ) ;
if ( end - start < kernel_size )
continue ;
if ( ( end - start - kernel_size ) / THREAD_SIZE + 1 > = pos )
return start + ( pos - 1 ) * THREAD_SIZE ;
pos - = ( end - start - kernel_size ) / THREAD_SIZE + 1 ;
}
return 0 ;
}
2019-02-03 21:37:20 +01:00
unsigned long get_random_base ( unsigned long safe_addr )
{
2020-09-18 16:02:45 +02:00
unsigned long memory_limit = get_mem_detect_end ( ) ;
unsigned long base_pos , max_pos , kernel_size ;
2019-08-19 23:19:00 +02:00
unsigned long kasan_needs ;
2019-02-03 21:37:20 +01:00
int i ;
2020-09-18 16:02:45 +02:00
if ( memory_end_set )
memory_limit = min ( memory_limit , memory_end ) ;
2019-02-03 21:37:20 +01:00
if ( IS_ENABLED ( CONFIG_BLK_DEV_INITRD ) & & INITRD_START & & INITRD_SIZE ) {
if ( safe_addr < INITRD_START + INITRD_SIZE )
safe_addr = INITRD_START + INITRD_SIZE ;
}
safe_addr = ALIGN ( safe_addr , THREAD_SIZE ) ;
2019-08-19 23:19:00 +02:00
if ( ( IS_ENABLED ( CONFIG_KASAN ) ) ) {
/*
* Estimate kasan memory requirements , which it will reserve
* at the very end of available physical memory . To estimate
* that , we take into account that kasan would require
* 1 / 8 of available physical memory ( for shadow memory ) +
* creating page tables for the whole memory + shadow memory
* region ( 1 + 1 / 8 ) . To keep page tables estimates simple take
* the double of combined ptes size .
*/
memory_limit = get_mem_detect_end ( ) ;
if ( memory_end_set & & memory_limit > memory_end )
memory_limit = memory_end ;
/* for shadow memory */
kasan_needs = memory_limit / 8 ;
/* for paging structures */
kasan_needs + = ( memory_limit + kasan_needs ) / PAGE_SIZE /
_PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2 ;
memory_limit - = kasan_needs ;
}
2019-02-03 21:37:20 +01:00
kernel_size = vmlinux . image_size + vmlinux . bss_size ;
2020-09-18 16:02:45 +02:00
if ( safe_addr + kernel_size > memory_limit )
return 0 ;
max_pos = count_valid_kernel_positions ( kernel_size , safe_addr , memory_limit ) ;
if ( ! max_pos ) {
2019-02-03 21:37:20 +01:00
sclp_early_printk ( " KASLR disabled: not enough memory \n " ) ;
return 0 ;
}
2020-09-18 16:02:45 +02:00
/* we need a value in the range [1, base_pos] inclusive */
if ( get_random ( max_pos , & base_pos ) )
2019-02-03 21:37:20 +01:00
return 0 ;
2020-09-18 16:02:45 +02:00
return position_to_address ( base_pos + 1 , kernel_size , safe_addr , memory_limit ) ;
2019-02-03 21:37:20 +01:00
}