- Handle the case where the beginning virtual address of the address
range whose SEV encryption status needs to change, is not page aligned so that callers which round up the number of pages to be decrypted, would mark a trailing page as decrypted and thus cause corruption during live migration. - Return an error from the #VC handler on AMD SEV-* guests when the debug registers swapping is enabled as a DR7 access should not happen then - that register is guest/host switched. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmTsje4ACgkQEsHwGGHe VUrKWxAAqiTpQSjJCB32ReioSsLv3kl7vtLO3xtE42VpF0F7pAAPzRsh+bgDjGSM uqcEgbX1YtPlb8wK6yh5dyNLLvtzxhaAQkUfbfuEN2oqbvIEcJmhWAm/xw1yCsh2 GDphFPtvqgT4KUCkEHj8tC9eQzG+L0bwymPzqXooVDnm4rL0ulEl6ONffhHfJFVg bmL8UjmJNodFcO6YBfosQIDDfc4ayuwm9f/rGltNFl+jwCi62kMJaVdU1112agsV LE73DRoRpfHKLslj9o9ubRcvaHKS24y2Amflnj1tas0h8I2uXBRwIgxjQXl5vtXV pu5/5VHM9X13x8XKpKVkEohXkBzFRigs8yfHq+JlpyWXXB/ymW8Acbqqnvll12r4 JSy+XfBNa6V5Y/NDS/1faJiX6XSi5ZyZHZG70sf52XVoBYhzoms5kxqTJnHHisnY X50677/tQF3V9WsmKD0aj0Um2ztiq0/TNMI7FT3lzYRDNJb1ln3ZK9f04i8L5jA4 bsrSV5oCVpLkW4eQaAJwxttTB+dRb5MwwkeS7D/eTuJ1pgUmJMIbZp2YbJH7NP2F 6FShQdwHi8KYN7mxUM+WwOk7goaBm5L61w5UtRlt6aDE7LdEbMAeSSdmD3HlEZHR ntBqcEx4SkAT+Ru0izVXjsoWmtkn8+DY44oUC2X6eZxUSAT4Cm4= =td9F -----END PGP SIGNATURE----- Merge tag 'x86_sev_for_v6.6_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 SEV updates from Borislav Petkov: - Handle the case where the beginning virtual address of the address range whose SEV encryption status needs to change, is not page aligned so that callers which round up the number of pages to be decrypted, would mark a trailing page as decrypted and thus cause corruption during live migration. - Return an error from the #VC handler on AMD SEV-* guests when the debug registers swapping is enabled as a DR7 access should not happen then - that register is guest/host switched. * tag 'x86_sev_for_v6.6_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev: Make enc_dec_hypercall() accept a size instead of npages x86/sev: Do not handle #VC for DR7 read/write
This commit is contained in:
commit
f31f663fa9
@ -365,7 +365,7 @@ static void enforce_vmpl0(void)
|
||||
* by the guest kernel. As and when a new feature is implemented in the
|
||||
* guest kernel, a corresponding bit should be added to the mask.
|
||||
*/
|
||||
#define SNP_FEATURES_PRESENT (0)
|
||||
#define SNP_FEATURES_PRESENT MSR_AMD64_SNP_DEBUG_SWAP
|
||||
|
||||
u64 snp_get_unsupported_features(u64 status)
|
||||
{
|
||||
|
@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
|
||||
|
||||
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
|
||||
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
|
||||
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
|
||||
bool enc);
|
||||
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
|
||||
unsigned long size, bool enc);
|
||||
|
||||
void __init mem_encrypt_free_decrypted_mem(void);
|
||||
|
||||
@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
|
||||
static inline int __init
|
||||
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
|
||||
static inline void __init
|
||||
early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
|
||||
early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
|
||||
|
||||
static inline void mem_encrypt_free_decrypted_mem(void) { }
|
||||
|
||||
|
@ -966,10 +966,8 @@ static void __init kvm_init_platform(void)
|
||||
* Ensure that _bss_decrypted section is marked as decrypted in the
|
||||
* shared pages list.
|
||||
*/
|
||||
nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
|
||||
PAGE_SIZE);
|
||||
early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
|
||||
nr_pages, 0);
|
||||
__end_bss_decrypted - __start_bss_decrypted, 0);
|
||||
|
||||
/*
|
||||
* If not booted using EFI, enable Live migration support.
|
||||
|
@ -1575,6 +1575,9 @@ static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
|
||||
long val, *reg = vc_insn_get_rm(ctxt);
|
||||
enum es_result ret;
|
||||
|
||||
if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
|
||||
return ES_VMM_ERROR;
|
||||
|
||||
if (!reg)
|
||||
return ES_DECODE_FAILED;
|
||||
|
||||
@ -1612,6 +1615,9 @@ static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
|
||||
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
|
||||
long *reg = vc_insn_get_rm(ctxt);
|
||||
|
||||
if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
|
||||
return ES_VMM_ERROR;
|
||||
|
||||
if (!reg)
|
||||
return ES_DECODE_FAILED;
|
||||
|
||||
|
@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
|
||||
return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
|
||||
}
|
||||
|
||||
static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
|
||||
static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
unsigned long sz = npages << PAGE_SHIFT;
|
||||
unsigned long vaddr_end = vaddr + sz;
|
||||
unsigned long vaddr_end = vaddr + size;
|
||||
|
||||
while (vaddr < vaddr_end) {
|
||||
int psize, pmask, level;
|
||||
@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
|
||||
snp_set_memory_private(vaddr, npages);
|
||||
|
||||
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
enc_dec_hypercall(vaddr, npages, enc);
|
||||
enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
|
||||
|
||||
ret = 0;
|
||||
|
||||
early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
|
||||
early_set_mem_enc_dec_hypercall(start, size, enc);
|
||||
out:
|
||||
__flush_tlb_all();
|
||||
return ret;
|
||||
@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
|
||||
return early_set_memory_enc_dec(vaddr, size, true);
|
||||
}
|
||||
|
||||
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
|
||||
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
|
||||
{
|
||||
enc_dec_hypercall(vaddr, npages, enc);
|
||||
enc_dec_hypercall(vaddr, size, enc);
|
||||
}
|
||||
|
||||
void __init sme_early_init(void)
|
||||
|
Loading…
Reference in New Issue
Block a user