2018-05-16 14:53:30 -07:00
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper - V specific APIC code .
*
* Copyright ( C ) 2018 , Microsoft , Inc .
*
* Author : K . Y . Srinivasan < kys @ microsoft . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE , GOOD TITLE or
* NON INFRINGEMENT . See the GNU General Public License for more
* details .
*
*/
# include <linux/types.h>
# include <linux/vmalloc.h>
# include <linux/mm.h>
# include <linux/clockchips.h>
# include <linux/hyperv.h>
# include <linux/slab.h>
# include <linux/cpuhotplug.h>
# include <asm/hypervisor.h>
# include <asm/mshyperv.h>
2018-05-19 16:38:59 +02:00
# include <asm/apic.h>
2018-05-16 14:53:30 -07:00
2018-06-22 19:06:25 +02:00
# include <asm/trace/hyperv.h>
2018-05-16 14:53:31 -07:00
static struct apic orig_apic ;
2018-05-16 14:53:30 -07:00
static u64 hv_apic_icr_read ( void )
{
u64 reg_val ;
rdmsrl ( HV_X64_MSR_ICR , reg_val ) ;
return reg_val ;
}
static void hv_apic_icr_write ( u32 low , u32 id )
{
u64 reg_val ;
2022-05-19 05:26:54 -05:00
reg_val = SET_XAPIC_DEST_FIELD ( id ) ;
2018-05-16 14:53:30 -07:00
reg_val = reg_val < < 32 ;
reg_val | = low ;
wrmsrl ( HV_X64_MSR_ICR , reg_val ) ;
}
static u32 hv_apic_read ( u32 reg )
{
u32 reg_val , hi ;
switch ( reg ) {
case APIC_EOI :
rdmsr ( HV_X64_MSR_EOI , reg_val , hi ) ;
2021-03-23 10:50:13 +08:00
( void ) hi ;
2018-05-16 14:53:30 -07:00
return reg_val ;
case APIC_TASKPRI :
rdmsr ( HV_X64_MSR_TPR , reg_val , hi ) ;
2021-03-23 10:50:13 +08:00
( void ) hi ;
2018-05-16 14:53:30 -07:00
return reg_val ;
default :
return native_apic_mem_read ( reg ) ;
}
}
static void hv_apic_write ( u32 reg , u32 val )
{
switch ( reg ) {
case APIC_EOI :
wrmsr ( HV_X64_MSR_EOI , val , 0 ) ;
break ;
case APIC_TASKPRI :
wrmsr ( HV_X64_MSR_TPR , val , 0 ) ;
break ;
default :
native_apic_mem_write ( reg , val ) ;
}
}
static void hv_apic_eoi_write ( u32 reg , u32 val )
{
2019-04-03 19:03:09 +02:00
struct hv_vp_assist_page * hvp = hv_vp_assist_page [ smp_processor_id ( ) ] ;
if ( hvp & & ( xchg ( & hvp - > apic_assist , 0 ) & 0x1 ) )
return ;
2018-05-16 14:53:30 -07:00
wrmsr ( HV_X64_MSR_EOI , val , 0 ) ;
}
2023-03-27 06:16:06 -07:00
static bool cpu_is_self ( int cpu )
{
return cpu = = smp_processor_id ( ) ;
}
2018-05-16 14:53:31 -07:00
/*
* IPI implementation on Hyper - V .
*/
2021-09-10 18:57:14 +00:00
static bool __send_ipi_mask_ex ( const struct cpumask * mask , int vector ,
bool exclude_self )
2018-05-16 14:53:32 -07:00
{
2018-08-27 18:48:57 +02:00
struct hv_send_ipi_ex * * arg ;
struct hv_send_ipi_ex * ipi_arg ;
2018-05-16 14:53:32 -07:00
unsigned long flags ;
int nr_bank = 0 ;
2021-04-16 17:43:03 -07:00
u64 status = HV_STATUS_INVALID_PARAMETER ;
2018-05-16 14:53:32 -07:00
2018-06-22 19:06:24 +02:00
if ( ! ( ms_hyperv . hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED ) )
return false ;
2018-05-16 14:53:32 -07:00
local_irq_save ( flags ) ;
2018-08-27 18:48:57 +02:00
arg = ( struct hv_send_ipi_ex * * ) this_cpu_ptr ( hyperv_pcpu_input_arg ) ;
2018-05-16 14:53:32 -07:00
ipi_arg = * arg ;
if ( unlikely ( ! ipi_arg ) )
goto ipi_mask_ex_done ;
ipi_arg - > vector = vector ;
ipi_arg - > reserved = 0 ;
ipi_arg - > vp_set . valid_bank_mask = 0 ;
2021-10-06 14:50:16 +02:00
/*
* Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET
* when the IPI is sent to all currently present CPUs .
*/
if ( ! cpumask_equal ( mask , cpu_present_mask ) | | exclude_self ) {
2018-05-16 14:53:32 -07:00
ipi_arg - > vp_set . format = HV_GENERIC_SET_SPARSE_4K ;
2023-03-27 06:16:06 -07:00
nr_bank = cpumask_to_vpset_skip ( & ( ipi_arg - > vp_set ) , mask ,
exclude_self ? cpu_is_self : NULL ) ;
2021-10-06 14:50:16 +02:00
/*
* ' nr_bank < = 0 ' means some CPUs in cpumask can ' t be
* represented in VP_SET . Return an error and fall back to
* native ( architectural ) method of sending IPIs .
*/
if ( nr_bank < = 0 )
goto ipi_mask_ex_done ;
} else {
2018-05-16 14:53:32 -07:00
ipi_arg - > vp_set . format = HV_GENERIC_SET_ALL ;
2021-10-06 14:50:16 +02:00
}
2018-05-16 14:53:32 -07:00
2021-04-16 17:43:03 -07:00
status = hv_do_rep_hypercall ( HVCALL_SEND_IPI_EX , 0 , nr_bank ,
2018-05-16 14:53:32 -07:00
ipi_arg , NULL ) ;
ipi_mask_ex_done :
local_irq_restore ( flags ) ;
2021-04-16 17:43:03 -07:00
return hv_result_success ( status ) ;
2018-05-16 14:53:32 -07:00
}
2021-09-10 18:57:14 +00:00
static bool __send_ipi_mask ( const struct cpumask * mask , int vector ,
bool exclude_self )
2018-05-16 14:53:31 -07:00
{
2021-09-10 18:57:14 +00:00
int cur_cpu , vcpu , this_cpu = smp_processor_id ( ) ;
2018-08-27 18:48:57 +02:00
struct hv_send_ipi ipi_arg ;
2021-04-16 17:43:03 -07:00
u64 status ;
2021-09-10 18:57:14 +00:00
unsigned int weight ;
2018-05-16 14:53:31 -07:00
2018-06-22 19:06:25 +02:00
trace_hyperv_send_ipi_mask ( mask , vector ) ;
2021-09-10 18:57:14 +00:00
weight = cpumask_weight ( mask ) ;
/*
* Do nothing if
* 1. the mask is empty
* 2. the mask only contains self when exclude_self is true
*/
if ( weight = = 0 | |
( exclude_self & & weight = = 1 & & cpumask_test_cpu ( this_cpu , mask ) ) )
2018-05-16 14:53:31 -07:00
return true ;
if ( ! hv_hypercall_pg )
return false ;
if ( ( vector < HV_IPI_LOW_VECTOR ) | | ( vector > HV_IPI_HIGH_VECTOR ) )
return false ;
2018-06-22 19:06:24 +02:00
/*
* From the supplied CPU set we need to figure out if we can get away
* with cheaper HVCALL_SEND_IPI hypercall . This is possible when the
* highest VP number in the set is < 64. As VP numbers are usually in
* ascending order and match Linux CPU ids , here is an optimization :
* we check the VP number for the highest bit in the supplied set first
* so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
* a must . We will also check all VP numbers when walking the supplied
* CPU set to remain correct in all cases .
*/
if ( hv_cpu_number_to_vp_number ( cpumask_last ( mask ) ) > = 64 )
goto do_ex_hypercall ;
2018-05-16 14:53:32 -07:00
2018-06-22 19:06:23 +02:00
ipi_arg . vector = vector ;
ipi_arg . cpu_mask = 0 ;
2018-05-16 14:53:31 -07:00
for_each_cpu ( cur_cpu , mask ) {
2021-09-10 18:57:14 +00:00
if ( exclude_self & & cur_cpu = = this_cpu )
continue ;
2018-05-16 14:53:31 -07:00
vcpu = hv_cpu_number_to_vp_number ( cur_cpu ) ;
2018-07-03 16:01:55 -07:00
if ( vcpu = = VP_INVAL )
2018-07-20 03:50:09 +00:00
return false ;
2018-07-03 16:01:55 -07:00
2018-05-16 14:53:31 -07:00
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs .
*/
if ( vcpu > = 64 )
2018-06-22 19:06:24 +02:00
goto do_ex_hypercall ;
2018-05-16 14:53:31 -07:00
2018-06-22 19:06:23 +02:00
__set_bit ( vcpu , ( unsigned long * ) & ipi_arg . cpu_mask ) ;
2018-05-16 14:53:31 -07:00
}
2021-04-16 17:43:03 -07:00
status = hv_do_fast_hypercall16 ( HVCALL_SEND_IPI , ipi_arg . vector ,
2018-06-22 19:06:23 +02:00
ipi_arg . cpu_mask ) ;
2021-04-16 17:43:03 -07:00
return hv_result_success ( status ) ;
2018-06-22 19:06:24 +02:00
do_ex_hypercall :
2021-09-10 18:57:14 +00:00
return __send_ipi_mask_ex ( mask , vector , exclude_self ) ;
2018-05-16 14:53:31 -07:00
}
static bool __send_ipi_one ( int cpu , int vector )
{
2019-10-27 16:19:38 +01:00
int vp = hv_cpu_number_to_vp_number ( cpu ) ;
2021-04-16 17:43:03 -07:00
u64 status ;
2018-05-16 14:53:31 -07:00
2019-10-27 16:19:38 +01:00
trace_hyperv_send_ipi_one ( cpu , vector ) ;
if ( ! hv_hypercall_pg | | ( vp = = VP_INVAL ) )
return false ;
if ( ( vector < HV_IPI_LOW_VECTOR ) | | ( vector > HV_IPI_HIGH_VECTOR ) )
return false ;
if ( vp > = 64 )
2021-09-10 18:57:14 +00:00
return __send_ipi_mask_ex ( cpumask_of ( cpu ) , vector , false ) ;
2019-10-27 16:19:38 +01:00
2021-04-16 17:43:03 -07:00
status = hv_do_fast_hypercall16 ( HVCALL_SEND_IPI , vector , BIT_ULL ( vp ) ) ;
return hv_result_success ( status ) ;
2018-05-16 14:53:31 -07:00
}
static void hv_send_ipi ( int cpu , int vector )
{
if ( ! __send_ipi_one ( cpu , vector ) )
orig_apic . send_IPI ( cpu , vector ) ;
}
static void hv_send_ipi_mask ( const struct cpumask * mask , int vector )
{
2021-09-10 18:57:14 +00:00
if ( ! __send_ipi_mask ( mask , vector , false ) )
2018-05-16 14:53:31 -07:00
orig_apic . send_IPI_mask ( mask , vector ) ;
}
static void hv_send_ipi_mask_allbutself ( const struct cpumask * mask , int vector )
{
2021-09-10 18:57:14 +00:00
if ( ! __send_ipi_mask ( mask , vector , true ) )
2018-05-16 14:53:31 -07:00
orig_apic . send_IPI_mask_allbutself ( mask , vector ) ;
}
static void hv_send_ipi_allbutself ( int vector )
{
hv_send_ipi_mask_allbutself ( cpu_online_mask , vector ) ;
}
static void hv_send_ipi_all ( int vector )
{
2021-09-10 18:57:14 +00:00
if ( ! __send_ipi_mask ( cpu_online_mask , vector , false ) )
2018-05-16 14:53:31 -07:00
orig_apic . send_IPI_all ( vector ) ;
}
static void hv_send_ipi_self ( int vector )
{
if ( ! __send_ipi_one ( smp_processor_id ( ) , vector ) )
orig_apic . send_IPI_self ( vector ) ;
}
2018-05-16 14:53:30 -07:00
void __init hv_apic_init ( void )
{
2018-05-16 14:53:31 -07:00
if ( ms_hyperv . hints & HV_X64_CLUSTER_IPI_RECOMMENDED ) {
2018-06-22 19:06:24 +02:00
pr_info ( " Hyper-V: Using IPI hypercalls \n " ) ;
2018-05-16 14:53:31 -07:00
/*
* Set the IPI entry points .
*/
orig_apic = * apic ;
apic - > send_IPI = hv_send_ipi ;
apic - > send_IPI_mask = hv_send_ipi_mask ;
apic - > send_IPI_mask_allbutself = hv_send_ipi_mask_allbutself ;
apic - > send_IPI_allbutself = hv_send_ipi_allbutself ;
apic - > send_IPI_all = hv_send_ipi_all ;
apic - > send_IPI_self = hv_send_ipi_self ;
}
2018-05-16 14:53:30 -07:00
if ( ms_hyperv . hints & HV_X64_APIC_ACCESS_RECOMMENDED ) {
2019-10-10 12:33:05 +00:00
pr_info ( " Hyper-V: Using enlightened APIC (%s mode) " ,
x2apic_enabled ( ) ? " x2apic " : " xapic " ) ;
/*
2020-10-26 07:52:52 -07:00
* When in x2apic mode , don ' t use the Hyper - V specific APIC
* accessors since the field layout in the ICR register is
* different in x2apic mode . Furthermore , the architectural
* x2apic MSRs function just as well as the Hyper - V
* synthetic APIC MSRs , so there ' s no benefit in having
* separate Hyper - V accessors for x2apic mode . The only
* exception is hv_apic_eoi_write , because it benefits from
* lazy EOI when available , but the same accessor works for
* both xapic and x2apic because the field layout is the same .
2019-10-10 12:33:05 +00:00
*/
2018-05-16 14:53:30 -07:00
apic_set_eoi_write ( hv_apic_eoi_write ) ;
2019-10-10 12:33:05 +00:00
if ( ! x2apic_enabled ( ) ) {
apic - > read = hv_apic_read ;
apic - > write = hv_apic_write ;
apic - > icr_write = hv_apic_icr_write ;
apic - > icr_read = hv_apic_icr_read ;
}
2018-05-16 14:53:30 -07:00
}
}