2021-10-25 08:21:08 -04:00
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper - V Isolation VM interface with paravisor and hypervisor
*
* Author :
* Tianyu Lan < Tianyu . Lan @ microsoft . com >
*/
2021-10-25 08:21:11 -04:00
# include <linux/bitfield.h>
2021-10-25 08:21:08 -04:00
# include <linux/hyperv.h>
# include <linux/types.h>
# include <linux/slab.h>
2021-10-25 08:21:11 -04:00
# include <asm/svm.h>
# include <asm/sev.h>
2021-10-25 08:21:08 -04:00
# include <asm/io.h>
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
# include <asm/coco.h>
# include <asm/mem_encrypt.h>
2021-10-25 08:21:08 -04:00
# include <asm/mshyperv.h>
2021-10-25 08:21:11 -04:00
# include <asm/hypervisor.h>
2023-05-02 14:09:19 +02:00
# include <asm/mtrr.h>
2021-10-25 08:21:11 -04:00
# ifdef CONFIG_AMD_MEM_ENCRYPT
2021-10-25 08:21:12 -04:00
# define GHCB_USAGE_HYPERV_CALL 1
2021-10-25 08:21:11 -04:00
union hv_ghcb {
struct ghcb ghcb ;
2021-10-25 08:21:12 -04:00
struct {
u64 hypercalldata [ 509 ] ;
u64 outputgpa ;
union {
union {
struct {
u32 callcode : 16 ;
u32 isfast : 1 ;
u32 reserved1 : 14 ;
u32 isnested : 1 ;
u32 countofelements : 12 ;
u32 reserved2 : 4 ;
u32 repstartindex : 12 ;
u32 reserved3 : 4 ;
} ;
u64 asuint64 ;
} hypercallinput ;
union {
struct {
u16 callstatus ;
u16 reserved1 ;
u32 elementsprocessed : 12 ;
u32 reserved2 : 20 ;
} ;
u64 asunit64 ;
} hypercalloutput ;
} ;
u64 reserved2 ;
} hypercall ;
2021-10-25 08:21:11 -04:00
} __packed __aligned ( HV_HYP_PAGE_SIZE ) ;
2022-06-13 21:45:53 -04:00
static u16 hv_ghcb_version __ro_after_init ;
2021-10-25 08:21:12 -04:00
u64 hv_ghcb_hypercall ( u64 control , void * input , void * output , u32 input_size )
{
union hv_ghcb * hv_ghcb ;
void * * ghcb_base ;
unsigned long flags ;
u64 status ;
if ( ! hv_ghcb_pg )
return - EFAULT ;
WARN_ON ( in_nmi ( ) ) ;
local_irq_save ( flags ) ;
ghcb_base = ( void * * ) this_cpu_ptr ( hv_ghcb_pg ) ;
hv_ghcb = ( union hv_ghcb * ) * ghcb_base ;
if ( ! hv_ghcb ) {
local_irq_restore ( flags ) ;
return - EFAULT ;
}
hv_ghcb - > ghcb . protocol_version = GHCB_PROTOCOL_MAX ;
hv_ghcb - > ghcb . ghcb_usage = GHCB_USAGE_HYPERV_CALL ;
hv_ghcb - > hypercall . outputgpa = ( u64 ) output ;
hv_ghcb - > hypercall . hypercallinput . asuint64 = 0 ;
hv_ghcb - > hypercall . hypercallinput . callcode = control ;
if ( input_size )
memcpy ( hv_ghcb - > hypercall . hypercalldata , input , input_size ) ;
VMGEXIT ( ) ;
hv_ghcb - > ghcb . ghcb_usage = 0xffffffff ;
memset ( hv_ghcb - > ghcb . save . valid_bitmap , 0 ,
sizeof ( hv_ghcb - > ghcb . save . valid_bitmap ) ) ;
status = hv_ghcb - > hypercall . hypercalloutput . callstatus ;
local_irq_restore ( flags ) ;
return status ;
}
2022-06-13 21:45:53 -04:00
static inline u64 rd_ghcb_msr ( void )
{
return __rdmsr ( MSR_AMD64_SEV_ES_GHCB ) ;
}
static inline void wr_ghcb_msr ( u64 val )
{
native_wrmsrl ( MSR_AMD64_SEV_ES_GHCB , val ) ;
}
static enum es_result hv_ghcb_hv_call ( struct ghcb * ghcb , u64 exit_code ,
u64 exit_info_1 , u64 exit_info_2 )
{
/* Fill in protocol and format specifiers */
ghcb - > protocol_version = hv_ghcb_version ;
ghcb - > ghcb_usage = GHCB_DEFAULT_USAGE ;
ghcb_set_sw_exit_code ( ghcb , exit_code ) ;
ghcb_set_sw_exit_info_1 ( ghcb , exit_info_1 ) ;
ghcb_set_sw_exit_info_2 ( ghcb , exit_info_2 ) ;
VMGEXIT ( ) ;
if ( ghcb - > save . sw_exit_info_1 & GENMASK_ULL ( 31 , 0 ) )
return ES_VMM_ERROR ;
else
return ES_OK ;
}
x86/hyperv: Mark hv_ghcb_terminate() as noreturn
Annotate the function prototype and definition as noreturn to prevent
objtool warnings like:
vmlinux.o: warning: objtool: hyperv_init+0x55c: unreachable instruction
Also, as per Josh's suggestion, add it to the global_noreturns list.
As a comparison, an objdump output without the annotation:
[...]
1b63: mov $0x1,%esi
1b68: xor %edi,%edi
1b6a: callq ffffffff8102f680 <hv_ghcb_terminate>
1b6f: jmpq ffffffff82f217ec <hyperv_init+0x9c> # unreachable
1b74: cmpq $0xffffffffffffffff,-0x702a24(%rip)
[...]
Now, after adding the __noreturn to the function prototype:
[...]
17df: callq ffffffff8102f6d0 <hv_ghcb_negotiate_protocol>
17e4: test %al,%al
17e6: je ffffffff82f21bb9 <hyperv_init+0x469>
[...] <many insns>
1bb9: mov $0x1,%esi
1bbe: xor %edi,%edi
1bc0: callq ffffffff8102f680 <hv_ghcb_terminate>
1bc5: nopw %cs:0x0(%rax,%rax,1) # end of function
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Guilherme G. Piccoli <gpiccoli@igalia.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/32453a703dfcf0d007b473c9acbf70718222b74b.1681342859.git.jpoimboe@kernel.org
2023-04-12 16:49:41 -07:00
void __noreturn hv_ghcb_terminate ( unsigned int set , unsigned int reason )
2022-06-13 21:45:53 -04:00
{
u64 val = GHCB_MSR_TERM_REQ ;
/* Tell the hypervisor what went wrong. */
val | = GHCB_SEV_TERM_REASON ( set , reason ) ;
/* Request Guest Termination from Hypvervisor */
wr_ghcb_msr ( val ) ;
VMGEXIT ( ) ;
while ( true )
asm volatile ( " hlt \n " : : : " memory " ) ;
}
bool hv_ghcb_negotiate_protocol ( void )
{
u64 ghcb_gpa ;
u64 val ;
/* Save ghcb page gpa. */
ghcb_gpa = rd_ghcb_msr ( ) ;
/* Do the GHCB protocol version negotiation */
wr_ghcb_msr ( GHCB_MSR_SEV_INFO_REQ ) ;
VMGEXIT ( ) ;
val = rd_ghcb_msr ( ) ;
if ( GHCB_MSR_INFO ( val ) ! = GHCB_MSR_SEV_INFO_RESP )
return false ;
if ( GHCB_MSR_PROTO_MAX ( val ) < GHCB_PROTOCOL_MIN | |
GHCB_MSR_PROTO_MIN ( val ) > GHCB_PROTOCOL_MAX )
return false ;
hv_ghcb_version = min_t ( size_t , GHCB_MSR_PROTO_MAX ( val ) ,
GHCB_PROTOCOL_MAX ) ;
/* Write ghcb page back after negotiating protocol. */
wr_ghcb_msr ( ghcb_gpa ) ;
VMGEXIT ( ) ;
return true ;
}
2021-10-25 08:21:11 -04:00
void hv_ghcb_msr_write ( u64 msr , u64 value )
{
union hv_ghcb * hv_ghcb ;
void * * ghcb_base ;
unsigned long flags ;
if ( ! hv_ghcb_pg )
return ;
WARN_ON ( in_nmi ( ) ) ;
local_irq_save ( flags ) ;
ghcb_base = ( void * * ) this_cpu_ptr ( hv_ghcb_pg ) ;
hv_ghcb = ( union hv_ghcb * ) * ghcb_base ;
if ( ! hv_ghcb ) {
local_irq_restore ( flags ) ;
return ;
}
ghcb_set_rcx ( & hv_ghcb - > ghcb , msr ) ;
ghcb_set_rax ( & hv_ghcb - > ghcb , lower_32_bits ( value ) ) ;
ghcb_set_rdx ( & hv_ghcb - > ghcb , upper_32_bits ( value ) ) ;
2022-06-13 21:45:53 -04:00
if ( hv_ghcb_hv_call ( & hv_ghcb - > ghcb , SVM_EXIT_MSR , 1 , 0 ) )
2021-10-25 08:21:11 -04:00
pr_warn ( " Fail to write msr via ghcb %llx. \n " , msr ) ;
local_irq_restore ( flags ) ;
}
EXPORT_SYMBOL_GPL ( hv_ghcb_msr_write ) ;
void hv_ghcb_msr_read ( u64 msr , u64 * value )
{
union hv_ghcb * hv_ghcb ;
void * * ghcb_base ;
unsigned long flags ;
/* Check size of union hv_ghcb here. */
BUILD_BUG_ON ( sizeof ( union hv_ghcb ) ! = HV_HYP_PAGE_SIZE ) ;
if ( ! hv_ghcb_pg )
return ;
WARN_ON ( in_nmi ( ) ) ;
local_irq_save ( flags ) ;
ghcb_base = ( void * * ) this_cpu_ptr ( hv_ghcb_pg ) ;
hv_ghcb = ( union hv_ghcb * ) * ghcb_base ;
if ( ! hv_ghcb ) {
local_irq_restore ( flags ) ;
return ;
}
ghcb_set_rcx ( & hv_ghcb - > ghcb , msr ) ;
2022-06-13 21:45:53 -04:00
if ( hv_ghcb_hv_call ( & hv_ghcb - > ghcb , SVM_EXIT_MSR , 0 , 0 ) )
2021-10-25 08:21:11 -04:00
pr_warn ( " Fail to read msr via ghcb %llx. \n " , msr ) ;
else
* value = ( u64 ) lower_32_bits ( hv_ghcb - > ghcb . save . rax )
| ( ( u64 ) lower_32_bits ( hv_ghcb - > ghcb . save . rdx ) < < 32 ) ;
local_irq_restore ( flags ) ;
}
EXPORT_SYMBOL_GPL ( hv_ghcb_msr_read ) ;
2021-10-25 08:21:08 -04:00
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall .
*
* In Isolation VM , all guest memory is encrypted from host and guest
* needs to set memory visible to host via hvcall before sharing memory
* with host .
*/
static int hv_mark_gpa_visibility ( u16 count , const u64 pfn [ ] ,
enum hv_mem_host_visibility visibility )
{
struct hv_gpa_range_for_visibility * * input_pcpu , * input ;
u16 pages_processed ;
u64 hv_status ;
unsigned long flags ;
/* no-op if partition isolation is not enabled */
if ( ! hv_is_isolation_supported ( ) )
return 0 ;
if ( count > HV_MAX_MODIFY_GPA_REP_COUNT ) {
pr_err ( " Hyper-V: GPA count:%d exceeds supported:%lu \n " , count ,
HV_MAX_MODIFY_GPA_REP_COUNT ) ;
return - EINVAL ;
}
local_irq_save ( flags ) ;
input_pcpu = ( struct hv_gpa_range_for_visibility * * )
this_cpu_ptr ( hyperv_pcpu_input_arg ) ;
input = * input_pcpu ;
if ( unlikely ( ! input ) ) {
local_irq_restore ( flags ) ;
return - EINVAL ;
}
input - > partition_id = HV_PARTITION_ID_SELF ;
input - > host_visibility = visibility ;
input - > reserved0 = 0 ;
input - > reserved1 = 0 ;
memcpy ( ( void * ) input - > gpa_page_list , pfn , count * sizeof ( * pfn ) ) ;
hv_status = hv_do_rep_hypercall (
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY , count ,
0 , input , & pages_processed ) ;
local_irq_restore ( flags ) ;
if ( hv_result_success ( hv_status ) )
return 0 ;
else
return - EFAULT ;
}
/*
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
* hv_vtom_set_host_visibility - Set specified memory visible to host .
2021-10-25 08:21:08 -04:00
*
* In Isolation VM , all guest memory is encrypted from host and guest
* needs to set memory visible to host via hvcall before sharing memory
* with host . This function works as wrap of hv_mark_gpa_visibility ( )
* with memory base and size .
*/
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
static bool hv_vtom_set_host_visibility ( unsigned long kbuffer , int pagecount , bool enc )
2021-10-25 08:21:08 -04:00
{
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
enum hv_mem_host_visibility visibility = enc ?
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE ;
2021-10-25 08:21:08 -04:00
u64 * pfn_array ;
int ret = 0 ;
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
bool result = true ;
2021-10-25 08:21:08 -04:00
int i , pfn ;
pfn_array = kmalloc ( HV_HYP_PAGE_SIZE , GFP_KERNEL ) ;
if ( ! pfn_array )
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
return false ;
2021-10-25 08:21:08 -04:00
for ( i = 0 , pfn = 0 ; i < pagecount ; i + + ) {
pfn_array [ pfn ] = virt_to_hvpfn ( ( void * ) kbuffer + i * HV_HYP_PAGE_SIZE ) ;
pfn + + ;
if ( pfn = = HV_MAX_MODIFY_GPA_REP_COUNT | | i = = pagecount - 1 ) {
ret = hv_mark_gpa_visibility ( pfn , pfn_array ,
visibility ) ;
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
if ( ret ) {
result = false ;
2021-10-25 08:21:08 -04:00
goto err_free_pfn_array ;
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
}
2021-10-25 08:21:08 -04:00
pfn = 0 ;
}
}
err_free_pfn_array :
kfree ( pfn_array ) ;
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
return result ;
2021-10-25 08:21:08 -04:00
}
2021-12-13 02:14:06 -05:00
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
static bool hv_vtom_tlb_flush_required ( bool private )
{
return true ;
}
static bool hv_vtom_cache_flush_required ( void )
{
return false ;
}
static bool hv_is_private_mmio ( u64 addr )
{
/*
* Hyper - V always provides a single IO - APIC in a guest VM .
* When a paravisor is used , it is emulated by the paravisor
* in the guest context and must be mapped private .
*/
if ( addr > = HV_IOAPIC_BASE_ADDRESS & &
addr < ( HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE ) )
return true ;
/* Same with a vTPM */
if ( addr > = VTPM_BASE_ADDRESS & &
addr < ( VTPM_BASE_ADDRESS + PAGE_SIZE ) )
return true ;
return false ;
}
void __init hv_vtom_init ( void )
{
/*
* By design , a VM using vTOM doesn ' t see the SEV setting ,
* so SEV initialization is bypassed and sev_status isn ' t set .
* Set it here to indicate a vTOM VM .
*/
sev_status = MSR_AMD64_SNP_VTOM ;
2023-05-08 12:44:28 +02:00
cc_vendor = CC_VENDOR_AMD ;
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
cc_set_mask ( ms_hyperv . shared_gpa_boundary ) ;
physical_mask & = ms_hyperv . shared_gpa_boundary - 1 ;
x86_platform . hyper . is_private_mmio = hv_is_private_mmio ;
x86_platform . guest . enc_cache_flush_required = hv_vtom_cache_flush_required ;
x86_platform . guest . enc_tlb_flush_required = hv_vtom_tlb_flush_required ;
x86_platform . guest . enc_status_change_finish = hv_vtom_set_host_visibility ;
2023-05-02 14:09:19 +02:00
/* Set WB as the default cache mode. */
mtrr_overwrite_state ( NULL , 0 , MTRR_TYPE_WRBACK ) ;
x86/hyperv: Change vTOM handling to use standard coco mechanisms
Hyper-V guests on AMD SEV-SNP hardware have the option of using the
"virtual Top Of Memory" (vTOM) feature specified by the SEV-SNP
architecture. With vTOM, shared vs. private memory accesses are
controlled by splitting the guest physical address space into two
halves.
vTOM is the dividing line where the uppermost bit of the physical
address space is set; e.g., with 47 bits of guest physical address
space, vTOM is 0x400000000000 (bit 46 is set). Guest physical memory is
accessible at two parallel physical addresses -- one below vTOM and one
above vTOM. Accesses below vTOM are private (encrypted) while accesses
above vTOM are shared (decrypted). In this sense, vTOM is like the
GPA.SHARED bit in Intel TDX.
Support for Hyper-V guests using vTOM was added to the Linux kernel in
two patch sets[1][2]. This support treats the vTOM bit as part of
the physical address. For accessing shared (decrypted) memory, these
patch sets create a second kernel virtual mapping that maps to physical
addresses above vTOM.
A better approach is to treat the vTOM bit as a protection flag, not
as part of the physical address. This new approach is like the approach
for the GPA.SHARED bit in Intel TDX. Rather than creating a second kernel
virtual mapping, the existing mapping is updated using recently added
coco mechanisms.
When memory is changed between private and shared using
set_memory_decrypted() and set_memory_encrypted(), the PTEs for the
existing kernel mapping are changed to add or remove the vTOM bit in the
guest physical address, just as with TDX. The hypercalls to change the
memory status on the host side are made using the existing callback
mechanism. Everything just works, with a minor tweak to map the IO-APIC
to use private accesses.
To accomplish the switch in approach, the following must be done:
* Update Hyper-V initialization to set the cc_mask based on vTOM
and do other coco initialization.
* Update physical_mask so the vTOM bit is no longer treated as part
of the physical address
* Remove CC_VENDOR_HYPERV and merge the associated vTOM functionality
under CC_VENDOR_AMD. Update cc_mkenc() and cc_mkdec() to set/clear
the vTOM bit as a protection flag.
* Code already exists to make hypercalls to inform Hyper-V about pages
changing between shared and private. Update this code to run as a
callback from __set_memory_enc_pgtable().
* Remove the Hyper-V special case from __set_memory_enc_dec()
* Remove the Hyper-V specific call to swiotlb_update_mem_attributes()
since mem_encrypt_init() will now do it.
* Add a Hyper-V specific implementation of the is_private_mmio()
callback that returns true for the IO-APIC and vTPM MMIO addresses
[1] https://lore.kernel.org/all/20211025122116.264793-1-ltykernel@gmail.com/
[2] https://lore.kernel.org/all/20211213071407.314309-1-ltykernel@gmail.com/
[ bp: Touchups. ]
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-7-git-send-email-mikelley@microsoft.com
2023-03-26 06:52:01 -07:00
}
# endif /* CONFIG_AMD_MEM_ENCRYPT */
2023-03-26 06:51:57 -07:00
enum hv_isolation_type hv_get_isolation_type ( void )
{
if ( ! ( ms_hyperv . priv_high & HV_ISOLATION ) )
return HV_ISOLATION_TYPE_NONE ;
return FIELD_GET ( HV_ISOLATION_TYPE , ms_hyperv . isolation_config_b ) ;
}
EXPORT_SYMBOL_GPL ( hv_get_isolation_type ) ;
/*
* hv_is_isolation_supported - Check system runs in the Hyper - V
* isolation VM .
*/
bool hv_is_isolation_supported ( void )
{
if ( ! cpu_feature_enabled ( X86_FEATURE_HYPERVISOR ) )
return false ;
if ( ! hypervisor_is_type ( X86_HYPER_MS_HYPERV ) )
return false ;
return hv_get_isolation_type ( ) ! = HV_ISOLATION_TYPE_NONE ;
}
DEFINE_STATIC_KEY_FALSE ( isolation_type_snp ) ;
/*
* hv_isolation_type_snp - Check system runs in the AMD SEV - SNP based
* isolation VM .
*/
bool hv_isolation_type_snp ( void )
{
return static_branch_unlikely ( & isolation_type_snp ) ;
}