2017-07-17 16:10:03 -05:00
/*
* AMD Memory Encryption Support
*
* Copyright ( C ) 2016 Advanced Micro Devices , Inc .
*
* Author : Tom Lendacky < thomas . lendacky @ amd . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/linkage.h>
2017-07-17 16:10:05 -05:00
# include <linux/init.h>
2017-07-17 16:10:07 -05:00
# include <linux/mm.h>
2017-07-17 16:10:21 -05:00
# include <linux/dma-mapping.h>
# include <linux/swiotlb.h>
2017-07-17 16:10:03 -05:00
2017-07-17 16:10:10 -05:00
# include <asm/tlbflush.h>
# include <asm/fixmap.h>
2017-07-17 16:10:11 -05:00
# include <asm/setup.h>
# include <asm/bootparam.h>
2017-07-17 16:10:21 -05:00
# include <asm/set_memory.h>
2017-07-17 16:10:10 -05:00
2017-07-17 16:10:03 -05:00
/*
* Since SME related variables are set early in the boot process they must
* reside in the . data section so as not to be zeroed out when the . bss
* section is later cleared .
*/
unsigned long sme_me_mask __section ( . data ) = 0 ;
EXPORT_SYMBOL_GPL ( sme_me_mask ) ;
2017-07-17 16:10:05 -05:00
2017-07-17 16:10:10 -05:00
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer [ PAGE_SIZE ] __aligned ( PAGE_SIZE ) ;
/*
* This routine does not change the underlying encryption setting of the
* page ( s ) that map this memory . It assumes that eventually the memory is
* meant to be accessed as either encrypted or decrypted but the contents
* are currently not in the desired state .
*
* This routine follows the steps outlined in the AMD64 Architecture
* Programmer ' s Manual Volume 2 , Section 7.10 .8 Encrypt - in - Place .
*/
static void __init __sme_early_enc_dec ( resource_size_t paddr ,
unsigned long size , bool enc )
{
void * src , * dst ;
size_t len ;
if ( ! sme_me_mask )
return ;
local_flush_tlb ( ) ;
wbinvd ( ) ;
/*
* There are limited number of early mapping slots , so map ( at most )
* one page at time .
*/
while ( size ) {
len = min_t ( size_t , sizeof ( sme_early_buffer ) , size ) ;
/*
* Create mappings for the current and desired format of
* the memory . Use a write - protected mapping for the source .
*/
src = enc ? early_memremap_decrypted_wp ( paddr , len ) :
early_memremap_encrypted_wp ( paddr , len ) ;
dst = enc ? early_memremap_encrypted ( paddr , len ) :
early_memremap_decrypted ( paddr , len ) ;
/*
* If a mapping can ' t be obtained to perform the operation ,
* then eventual access of that area in the desired mode
* will cause a crash .
*/
BUG_ON ( ! src | | ! dst ) ;
/*
* Use a temporary buffer , of cache - line multiple size , to
* avoid data corruption as documented in the APM .
*/
memcpy ( sme_early_buffer , src , len ) ;
memcpy ( dst , sme_early_buffer , len ) ;
early_memunmap ( dst , len ) ;
early_memunmap ( src , len ) ;
paddr + = len ;
size - = len ;
}
}
void __init sme_early_encrypt ( resource_size_t paddr , unsigned long size )
{
__sme_early_enc_dec ( paddr , size , true ) ;
}
void __init sme_early_decrypt ( resource_size_t paddr , unsigned long size )
{
__sme_early_enc_dec ( paddr , size , false ) ;
}
2017-07-17 16:10:11 -05:00
static void __init __sme_early_map_unmap_mem ( void * vaddr , unsigned long size ,
bool map )
{
unsigned long paddr = ( unsigned long ) vaddr - __PAGE_OFFSET ;
pmdval_t pmd_flags , pmd ;
/* Use early_pmd_flags but remove the encryption mask */
pmd_flags = __sme_clr ( early_pmd_flags ) ;
do {
pmd = map ? ( paddr & PMD_MASK ) + pmd_flags : 0 ;
__early_make_pgtable ( ( unsigned long ) vaddr , pmd ) ;
vaddr + = PMD_SIZE ;
paddr + = PMD_SIZE ;
size = ( size < = PMD_SIZE ) ? 0 : size - PMD_SIZE ;
} while ( size ) ;
__native_flush_tlb ( ) ;
}
void __init sme_unmap_bootdata ( char * real_mode_data )
{
struct boot_params * boot_data ;
unsigned long cmdline_paddr ;
if ( ! sme_active ( ) )
return ;
/* Get the command line address before unmapping the real_mode_data */
boot_data = ( struct boot_params * ) real_mode_data ;
cmdline_paddr = boot_data - > hdr . cmd_line_ptr | ( ( u64 ) boot_data - > ext_cmd_line_ptr < < 32 ) ;
__sme_early_map_unmap_mem ( real_mode_data , sizeof ( boot_params ) , false ) ;
if ( ! cmdline_paddr )
return ;
__sme_early_map_unmap_mem ( __va ( cmdline_paddr ) , COMMAND_LINE_SIZE , false ) ;
}
void __init sme_map_bootdata ( char * real_mode_data )
{
struct boot_params * boot_data ;
unsigned long cmdline_paddr ;
if ( ! sme_active ( ) )
return ;
__sme_early_map_unmap_mem ( real_mode_data , sizeof ( boot_params ) , true ) ;
/* Get the command line address after mapping the real_mode_data */
boot_data = ( struct boot_params * ) real_mode_data ;
cmdline_paddr = boot_data - > hdr . cmd_line_ptr | ( ( u64 ) boot_data - > ext_cmd_line_ptr < < 32 ) ;
if ( ! cmdline_paddr )
return ;
__sme_early_map_unmap_mem ( __va ( cmdline_paddr ) , COMMAND_LINE_SIZE , true ) ;
}
2017-07-17 16:10:07 -05:00
void __init sme_early_init ( void )
{
unsigned int i ;
if ( ! sme_me_mask )
return ;
early_pmd_flags = __sme_set ( early_pmd_flags ) ;
__supported_pte_mask = __sme_set ( __supported_pte_mask ) ;
/* Update the protection map with memory encryption mask */
for ( i = 0 ; i < ARRAY_SIZE ( protection_map ) ; i + + )
protection_map [ i ] = pgprot_encrypted ( protection_map [ i ] ) ;
}
2017-07-17 16:10:21 -05:00
/* Architecture __weak replacement functions */
void __init mem_encrypt_init ( void )
{
if ( ! sme_me_mask )
return ;
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes ( ) ;
}
void swiotlb_set_mem_attributes ( void * vaddr , unsigned long size )
{
WARN ( PAGE_ALIGN ( size ) ! = size ,
" size is not page-aligned (%#lx) \n " , size ) ;
/* Make the SWIOTLB buffer area decrypted */
set_memory_decrypted ( ( unsigned long ) vaddr , size > > PAGE_SHIFT ) ;
}
2017-07-17 16:10:05 -05:00
void __init sme_encrypt_kernel ( void )
{
}
void __init sme_enable ( void )
{
}