2018-07-19 08:40:06 +00:00
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper - V nested virtualization code .
*
* Copyright ( C ) 2018 , Microsoft , Inc .
*
* Author : Lan Tianyu < Tianyu . Lan @ microsoft . com >
*/
2018-12-06 21:21:05 +08:00
# define pr_fmt(fmt) "Hyper-V: " fmt
2018-07-19 08:40:06 +00:00
# include <linux/types.h>
# include <asm/hyperv-tlfs.h>
# include <asm/mshyperv.h>
# include <asm/tlbflush.h>
2018-07-19 08:40:12 +00:00
# include <asm/trace/hyperv.h>
2018-07-19 08:40:06 +00:00
int hyperv_flush_guest_mapping ( u64 as )
{
struct hv_guest_mapping_flush * * flush_pcpu ;
struct hv_guest_mapping_flush * flush ;
u64 status ;
unsigned long flags ;
int ret = - ENOTSUPP ;
if ( ! hv_hypercall_pg )
goto fault ;
local_irq_save ( flags ) ;
flush_pcpu = ( struct hv_guest_mapping_flush * * )
this_cpu_ptr ( hyperv_pcpu_input_arg ) ;
flush = * flush_pcpu ;
if ( unlikely ( ! flush ) ) {
local_irq_restore ( flags ) ;
goto fault ;
}
flush - > address_space = as ;
flush - > flags = 0 ;
status = hv_do_hypercall ( HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE ,
flush , NULL ) ;
local_irq_restore ( flags ) ;
if ( ! ( status & HV_HYPERCALL_RESULT_MASK ) )
ret = 0 ;
fault :
2018-07-19 08:40:12 +00:00
trace_hyperv_nested_flush_guest_mapping ( as , ret ) ;
2018-07-19 08:40:06 +00:00
return ret ;
}
EXPORT_SYMBOL_GPL ( hyperv_flush_guest_mapping ) ;
2018-12-06 21:21:05 +08:00
int hyperv_fill_flush_guest_mapping_list (
struct hv_guest_mapping_flush_list * flush ,
u64 start_gfn , u64 pages )
{
u64 cur = start_gfn ;
u64 additional_pages ;
int gpa_n = 0 ;
do {
/*
* If flush requests exceed max flush count , go back to
* flush tlbs without range .
*/
if ( gpa_n > = HV_MAX_FLUSH_REP_COUNT )
return - ENOSPC ;
additional_pages = min_t ( u64 , pages , HV_MAX_FLUSH_PAGES ) - 1 ;
flush - > gpa_list [ gpa_n ] . page . additional_pages = additional_pages ;
flush - > gpa_list [ gpa_n ] . page . largepage = false ;
flush - > gpa_list [ gpa_n ] . page . basepfn = cur ;
pages - = additional_pages + 1 ;
cur + = additional_pages + 1 ;
gpa_n + + ;
} while ( pages > 0 ) ;
return gpa_n ;
}
EXPORT_SYMBOL_GPL ( hyperv_fill_flush_guest_mapping_list ) ;
int hyperv_flush_guest_mapping_range ( u64 as ,
hyperv_fill_flush_list_func fill_flush_list_func , void * data )
{
struct hv_guest_mapping_flush_list * * flush_pcpu ;
struct hv_guest_mapping_flush_list * flush ;
u64 status = 0 ;
unsigned long flags ;
int ret = - ENOTSUPP ;
int gpa_n = 0 ;
if ( ! hv_hypercall_pg | | ! fill_flush_list_func )
goto fault ;
local_irq_save ( flags ) ;
flush_pcpu = ( struct hv_guest_mapping_flush_list * * )
this_cpu_ptr ( hyperv_pcpu_input_arg ) ;
flush = * flush_pcpu ;
if ( unlikely ( ! flush ) ) {
local_irq_restore ( flags ) ;
goto fault ;
}
flush - > address_space = as ;
flush - > flags = 0 ;
gpa_n = fill_flush_list_func ( flush , data ) ;
if ( gpa_n < 0 ) {
local_irq_restore ( flags ) ;
goto fault ;
}
status = hv_do_rep_hypercall ( HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST ,
gpa_n , 0 , flush , NULL ) ;
local_irq_restore ( flags ) ;
if ( ! ( status & HV_HYPERCALL_RESULT_MASK ) )
ret = 0 ;
else
ret = status ;
fault :
trace_hyperv_nested_flush_guest_mapping_range ( as , ret ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( hyperv_flush_guest_mapping_range ) ;