2017-08-02 19:09:19 +03:00
# define pr_fmt(fmt) "Hyper-V: " fmt
# include <linux/hyperv.h>
# include <linux/log2.h>
# include <linux/slab.h>
# include <linux/types.h>
# include <asm/fpu/api.h>
# include <asm/mshyperv.h>
# include <asm/msr.h>
# include <asm/tlbflush.h>
2018-08-22 18:30:16 +03:00
# include <asm/tlb.h>
2017-08-02 19:09:19 +03:00
2017-08-02 19:09:21 +03:00
# define CREATE_TRACE_POINTS
# include <asm/trace/hyperv.h>
2017-08-02 19:09:19 +03:00
/* Each gva in gva_list encodes up to 4096 pages to flush */
# define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
2018-06-21 16:32:38 +03:00
static u64 hyperv_flush_tlb_others_ex ( const struct cpumask * cpus ,
const struct flush_tlb_info * info ) ;
2017-08-02 19:09:20 +03:00
2017-08-02 19:09:19 +03:00
/*
* Fills in gva_list starting from offset . Returns the number of items added .
*/
static inline int fill_gva_list ( u64 gva_list [ ] , int offset ,
unsigned long start , unsigned long end )
{
int gva_n = offset ;
unsigned long cur = start , diff ;
do {
diff = end > cur ? end - cur : 0 ;
gva_list [ gva_n ] = cur & PAGE_MASK ;
/*
* Lower 12 bits encode the number of additional
* pages to flush ( in addition to the ' cur ' page ) .
*/
2019-09-02 15:41:43 +03:00
if ( diff > = HV_TLB_FLUSH_UNIT ) {
2017-08-02 19:09:19 +03:00
gva_list [ gva_n ] | = ~ PAGE_MASK ;
2019-09-02 15:41:43 +03:00
cur + = HV_TLB_FLUSH_UNIT ;
} else if ( diff ) {
2017-08-02 19:09:19 +03:00
gva_list [ gva_n ] | = ( diff - 1 ) > > PAGE_SHIFT ;
2019-09-02 15:41:43 +03:00
cur = end ;
}
2017-08-02 19:09:19 +03:00
gva_n + + ;
} while ( cur < end ) ;
return gva_n - offset ;
}
2021-02-21 02:17:07 +03:00
static void hyperv_flush_tlb_multi ( const struct cpumask * cpus ,
const struct flush_tlb_info * info )
2017-08-02 19:09:19 +03:00
{
int cpu , vcpu , gva_n , max_gvas ;
2018-05-16 18:21:24 +03:00
struct hv_tlb_flush * * flush_pcpu ;
struct hv_tlb_flush * flush ;
2021-04-17 03:43:03 +03:00
u64 status ;
2017-08-02 19:09:19 +03:00
unsigned long flags ;
2021-02-21 02:17:07 +03:00
trace_hyperv_mmu_flush_tlb_multi ( cpus , info ) ;
2017-08-02 19:09:21 +03:00
2018-05-17 00:53:34 +03:00
if ( ! hv_hypercall_pg )
2017-08-02 19:09:19 +03:00
goto do_native ;
local_irq_save ( flags ) ;
2018-05-16 18:21:24 +03:00
flush_pcpu = ( struct hv_tlb_flush * * )
2018-05-17 00:53:34 +03:00
this_cpu_ptr ( hyperv_pcpu_input_arg ) ;
2017-10-05 14:39:24 +03:00
flush = * flush_pcpu ;
if ( unlikely ( ! flush ) ) {
local_irq_restore ( flags ) ;
goto do_native ;
}
2017-08-02 19:09:19 +03:00
if ( info - > mm ) {
2018-01-24 13:36:29 +03:00
/*
* AddressSpace argument must match the CR3 with PCID bits
* stripped out .
*/
2017-08-02 19:09:19 +03:00
flush - > address_space = virt_to_phys ( info - > mm - > pgd ) ;
2018-01-24 13:36:29 +03:00
flush - > address_space & = CR3_ADDR_MASK ;
2017-08-02 19:09:19 +03:00
flush - > flags = 0 ;
} else {
flush - > address_space = 0 ;
flush - > flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES ;
}
flush - > processor_mask = 0 ;
if ( cpumask_equal ( cpus , cpu_present_mask ) ) {
flush - > flags | = HV_FLUSH_ALL_PROCESSORS ;
} else {
2018-06-21 16:32:38 +03:00
/*
* From the supplied CPU set we need to figure out if we can get
* away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_ { LIST , SPACE }
* hypercalls . This is possible when the highest VP number in
* the set is < 64. As VP numbers are usually in ascending order
* and match Linux CPU ids , here is an optimization : we check
* the VP number for the highest bit in the supplied set first
* so we can quickly find out if using * _EX hypercalls is a
* must . We will also check all VP numbers when walking the
* supplied CPU set to remain correct in all cases .
*/
2022-01-06 12:46:11 +03:00
cpu = cpumask_last ( cpus ) ;
if ( cpu < nr_cpumask_bits & & hv_cpu_number_to_vp_number ( cpu ) > = 64 )
2018-06-21 16:32:38 +03:00
goto do_ex_hypercall ;
2017-08-02 19:09:19 +03:00
for_each_cpu ( cpu , cpus ) {
vcpu = hv_cpu_number_to_vp_number ( cpu ) ;
2018-07-09 20:40:12 +03:00
if ( vcpu = = VP_INVAL ) {
local_irq_restore ( flags ) ;
goto do_native ;
}
2017-08-02 19:09:19 +03:00
if ( vcpu > = 64 )
2018-06-21 16:32:38 +03:00
goto do_ex_hypercall ;
2017-08-02 19:09:19 +03:00
__set_bit ( vcpu , ( unsigned long * )
& flush - > processor_mask ) ;
}
2022-01-06 12:46:11 +03:00
/* nothing to flush if 'processor_mask' ends up being empty */
if ( ! flush - > processor_mask ) {
local_irq_restore ( flags ) ;
return ;
}
2017-08-02 19:09:19 +03:00
}
/*
* We can flush not more than max_gvas with one hypercall . Flush the
* whole address space if we were asked to do more .
*/
max_gvas = ( PAGE_SIZE - sizeof ( * flush ) ) / sizeof ( flush - > gva_list [ 0 ] ) ;
if ( info - > end = = TLB_FLUSH_ALL ) {
flush - > flags | = HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY ;
status = hv_do_hypercall ( HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ,
flush , NULL ) ;
} else if ( info - > end & &
( ( info - > end - info - > start ) / HV_TLB_FLUSH_UNIT ) > max_gvas ) {
status = hv_do_hypercall ( HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ,
flush , NULL ) ;
} else {
gva_n = fill_gva_list ( flush - > gva_list , 0 ,
info - > start , info - > end ) ;
status = hv_do_rep_hypercall ( HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ,
gva_n , 0 , flush , NULL ) ;
}
2018-06-21 16:32:38 +03:00
goto check_status ;
do_ex_hypercall :
status = hyperv_flush_tlb_others_ex ( cpus , info ) ;
2017-08-02 19:09:19 +03:00
2018-06-21 16:32:38 +03:00
check_status :
2017-08-02 19:09:19 +03:00
local_irq_restore ( flags ) ;
2021-04-17 03:43:03 +03:00
if ( hv_result_success ( status ) )
2017-08-02 19:09:19 +03:00
return ;
do_native :
2021-02-21 02:17:07 +03:00
native_flush_tlb_multi ( cpus , info ) ;
2017-08-02 19:09:19 +03:00
}
2018-06-21 16:32:38 +03:00
static u64 hyperv_flush_tlb_others_ex ( const struct cpumask * cpus ,
const struct flush_tlb_info * info )
2017-08-02 19:09:20 +03:00
{
int nr_bank = 0 , max_gvas , gva_n ;
2018-05-16 18:21:24 +03:00
struct hv_tlb_flush_ex * * flush_pcpu ;
struct hv_tlb_flush_ex * flush ;
2018-06-21 16:32:38 +03:00
u64 status ;
2017-08-02 19:09:21 +03:00
2018-06-21 16:32:38 +03:00
if ( ! ( ms_hyperv . hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED ) )
2021-04-17 03:43:03 +03:00
return HV_STATUS_INVALID_PARAMETER ;
2017-08-02 19:09:20 +03:00
2018-05-16 18:21:24 +03:00
flush_pcpu = ( struct hv_tlb_flush_ex * * )
2018-05-17 00:53:34 +03:00
this_cpu_ptr ( hyperv_pcpu_input_arg ) ;
2017-10-05 14:39:24 +03:00
flush = * flush_pcpu ;
2017-08-02 19:09:20 +03:00
if ( info - > mm ) {
2018-01-24 13:36:29 +03:00
/*
* AddressSpace argument must match the CR3 with PCID bits
* stripped out .
*/
2017-08-02 19:09:20 +03:00
flush - > address_space = virt_to_phys ( info - > mm - > pgd ) ;
2018-01-24 13:36:29 +03:00
flush - > address_space & = CR3_ADDR_MASK ;
2017-08-02 19:09:20 +03:00
flush - > flags = 0 ;
} else {
flush - > address_space = 0 ;
flush - > flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES ;
}
flush - > hv_vp_set . valid_bank_mask = 0 ;
2018-06-21 16:32:38 +03:00
flush - > hv_vp_set . format = HV_GENERIC_SET_SPARSE_4K ;
nr_bank = cpumask_to_vpset ( & ( flush - > hv_vp_set ) , cpus ) ;
2018-07-09 20:40:11 +03:00
if ( nr_bank < 0 )
2021-04-17 03:43:03 +03:00
return HV_STATUS_INVALID_PARAMETER ;
2017-08-02 19:09:20 +03:00
/*
* We can flush not more than max_gvas with one hypercall . Flush the
* whole address space if we were asked to do more .
*/
max_gvas =
( PAGE_SIZE - sizeof ( * flush ) - nr_bank *
sizeof ( flush - > hv_vp_set . bank_contents [ 0 ] ) ) /
sizeof ( flush - > gva_list [ 0 ] ) ;
if ( info - > end = = TLB_FLUSH_ALL ) {
flush - > flags | = HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY ;
status = hv_do_rep_hypercall (
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ,
2017-10-05 16:34:29 +03:00
0 , nr_bank , flush , NULL ) ;
2017-08-02 19:09:20 +03:00
} else if ( info - > end & &
( ( info - > end - info - > start ) / HV_TLB_FLUSH_UNIT ) > max_gvas ) {
status = hv_do_rep_hypercall (
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ,
2017-10-05 16:34:29 +03:00
0 , nr_bank , flush , NULL ) ;
2017-08-02 19:09:20 +03:00
} else {
gva_n = fill_gva_list ( flush - > gva_list , nr_bank ,
info - > start , info - > end ) ;
status = hv_do_rep_hypercall (
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX ,
2017-10-05 16:34:29 +03:00
gva_n , nr_bank , flush , NULL ) ;
2017-08-02 19:09:20 +03:00
}
2018-06-21 16:32:38 +03:00
return status ;
2017-08-02 19:09:20 +03:00
}
2017-08-02 19:09:19 +03:00
void hyperv_setup_mmu_ops ( void )
{
2017-08-02 19:09:20 +03:00
if ( ! ( ms_hyperv . hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED ) )
return ;
2018-06-21 16:32:38 +03:00
pr_info ( " Using hypercall for remote TLB flush \n " ) ;
2021-02-21 02:17:07 +03:00
pv_ops . mmu . flush_tlb_multi = hyperv_flush_tlb_multi ;
2018-08-28 10:40:19 +03:00
pv_ops . mmu . tlb_remove_table = tlb_remove_table ;
2017-08-02 19:09:19 +03:00
}