2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-09-14 17:53:39 +04:00
# include <xen/xen.h>
2012-09-14 17:37:32 +04:00
# include <xen/events.h>
2012-09-13 16:06:52 +04:00
# include <xen/grant_table.h>
# include <xen/hvm.h>
2013-04-25 20:17:04 +04:00
# include <xen/interface/vcpu.h>
2012-09-14 17:53:39 +04:00
# include <xen/interface/xen.h>
# include <xen/interface/memory.h>
2012-09-13 16:06:52 +04:00
# include <xen/interface/hvm/params.h>
2012-08-08 21:20:18 +04:00
# include <xen/features.h>
2012-09-14 17:53:39 +04:00
# include <xen/platform_pci.h>
2012-09-13 16:06:52 +04:00
# include <xen/xenbus.h>
2012-10-03 15:28:26 +04:00
# include <xen/page.h>
2013-04-25 14:23:07 +04:00
# include <xen/interface/sched.h>
2012-10-03 19:37:09 +04:00
# include <xen/xen-ops.h>
2012-09-14 17:53:39 +04:00
# include <asm/xen/hypervisor.h>
# include <asm/xen/hypercall.h>
2013-04-25 14:23:07 +04:00
# include <asm/system_misc.h>
2016-05-12 15:19:54 +03:00
# include <asm/efi.h>
2012-09-14 17:37:32 +04:00
# include <linux/interrupt.h>
# include <linux/irqreturn.h>
2012-09-14 17:53:39 +04:00
# include <linux/module.h>
2012-09-14 14:47:52 +04:00
# include <linux/of.h>
2016-04-07 15:03:28 +03:00
# include <linux/of_fdt.h>
2012-09-14 14:47:52 +04:00
# include <linux/of_irq.h>
# include <linux/of_address.h>
2013-09-09 15:35:26 +04:00
# include <linux/cpuidle.h>
# include <linux/cpufreq.h>
2014-01-30 16:52:59 +04:00
# include <linux/cpu.h>
2015-05-06 17:14:22 +03:00
# include <linux/console.h>
2015-11-23 13:41:12 +03:00
# include <linux/pvclock_gtod.h>
2021-06-04 17:07:33 +03:00
# include <linux/reboot.h>
2015-11-23 13:41:12 +03:00
# include <linux/time64.h>
2015-11-23 13:40:12 +03:00
# include <linux/timekeeping.h>
2015-11-23 13:41:12 +03:00
# include <linux/timekeeper_internal.h>
2016-04-07 15:03:27 +03:00
# include <linux/acpi.h>
2012-09-14 17:53:39 +04:00
2012-10-03 19:37:09 +04:00
# include <linux/mm.h>
2020-04-15 11:48:53 +03:00
static struct start_info _xen_start_info ;
2012-09-14 17:53:39 +04:00
struct start_info * xen_start_info = & _xen_start_info ;
2014-12-21 23:30:58 +03:00
EXPORT_SYMBOL ( xen_start_info ) ;
2012-09-14 17:53:39 +04:00
enum xen_domain_type xen_domain_type = XEN_NATIVE ;
2014-12-21 23:30:58 +03:00
EXPORT_SYMBOL ( xen_domain_type ) ;
2012-09-14 17:53:39 +04:00
struct shared_info xen_dummy_shared_info ;
struct shared_info * HYPERVISOR_shared_info = ( void * ) & xen_dummy_shared_info ;
DEFINE_PER_CPU ( struct vcpu_info * , xen_vcpu ) ;
2013-04-25 20:17:04 +04:00
static struct vcpu_info __percpu * xen_vcpu_info ;
2012-09-14 17:53:39 +04:00
2016-06-30 18:56:37 +03:00
/* Linux <-> Xen vCPU id mapping */
2016-07-29 12:06:48 +03:00
DEFINE_PER_CPU ( uint32_t , xen_vcpu_id ) ;
2016-06-30 18:56:37 +03:00
EXPORT_PER_CPU_SYMBOL ( xen_vcpu_id ) ;
2012-10-03 15:28:26 +04:00
/* These are unused until we support booting "pre-ballooned" */
unsigned long xen_released_pages ;
struct xen_memory_region xen_extra_mem [ XEN_EXTRA_MEM_MAX_REGIONS ] __initdata ;
2015-05-06 17:09:06 +03:00
static __read_mostly unsigned int xen_events_irq ;
2012-09-14 17:37:32 +04:00
2018-06-08 11:40:38 +03:00
uint32_t xen_start_flags ;
EXPORT_SYMBOL ( xen_start_flags ) ;
2015-08-07 19:34:41 +03:00
int xen_unmap_domain_gfn_range ( struct vm_area_struct * vma ,
2012-10-03 19:37:09 +04:00
int nr , struct page * * pages )
{
2015-03-11 17:49:56 +03:00
return xen_xlate_unmap_gfn_range ( vma , nr , pages ) ;
2012-10-03 19:37:09 +04:00
}
2015-08-07 19:34:41 +03:00
EXPORT_SYMBOL_GPL ( xen_unmap_domain_gfn_range ) ;
2012-10-03 19:37:09 +04:00
2015-11-23 13:40:12 +03:00
static void xen_read_wallclock ( struct timespec64 * ts )
{
u32 version ;
struct timespec64 now , ts_monotonic ;
struct shared_info * s = HYPERVISOR_shared_info ;
struct pvclock_wall_clock * wall_clock = & ( s - > wc ) ;
/* get wallclock at system boot */
do {
version = wall_clock - > version ;
rmb ( ) ; /* fetch version before time */
now . tv_sec = ( ( uint64_t ) wall_clock - > sec_hi < < 32 ) | wall_clock - > sec ;
now . tv_nsec = wall_clock - > nsec ;
rmb ( ) ; /* fetch time before checking version */
} while ( ( wall_clock - > version & 1 ) | | ( version ! = wall_clock - > version ) ) ;
/* time since system boot */
ktime_get_ts64 ( & ts_monotonic ) ;
* ts = timespec64_add ( now , ts_monotonic ) ;
}
2015-11-23 13:41:12 +03:00
static int xen_pvclock_gtod_notify ( struct notifier_block * nb ,
unsigned long was_set , void * priv )
{
/* Protected by the calling core code serialization */
static struct timespec64 next_sync ;
struct xen_platform_op op ;
struct timespec64 now , system_time ;
struct timekeeper * tk = priv ;
now . tv_sec = tk - > xtime_sec ;
now . tv_nsec = ( long ) ( tk - > tkr_mono . xtime_nsec > > tk - > tkr_mono . shift ) ;
system_time = timespec64_add ( now , tk - > wall_to_monotonic ) ;
/*
* We only take the expensive HV call when the clock was set
* or when the 11 minutes RTC synchronization time elapsed .
*/
if ( ! was_set & & timespec64_compare ( & now , & next_sync ) < 0 )
return NOTIFY_OK ;
op . cmd = XENPF_settime64 ;
op . u . settime64 . mbz = 0 ;
op . u . settime64 . secs = now . tv_sec ;
op . u . settime64 . nsecs = now . tv_nsec ;
op . u . settime64 . system_time = timespec64_to_ns ( & system_time ) ;
( void ) HYPERVISOR_platform_op ( & op ) ;
/*
* Move the next drift compensation time 11 minutes
* ahead . That ' s emulating the sync_cmos_clock ( ) update for
* the hardware RTC .
*/
next_sync = now ;
next_sync . tv_sec + = 11 * 60 ;
return NOTIFY_OK ;
}
static struct notifier_block xen_pvclock_gtod_notifier = {
. notifier_call = xen_pvclock_gtod_notify ,
} ;
2016-07-13 20:16:52 +03:00
static int xen_starting_cpu ( unsigned int cpu )
2013-04-25 20:17:04 +04:00
{
struct vcpu_register_vcpu_info info ;
struct vcpu_info * vcpup ;
int err ;
2015-10-22 19:22:46 +03:00
/*
* VCPUOP_register_vcpu_info cannot be called twice for the same
* vcpu , so if vcpu_info is already registered , just get out . This
* can happen with cpu - hotplug .
*/
if ( per_cpu ( xen_vcpu , cpu ) ! = NULL )
goto after_register_vcpu_info ;
2013-04-25 20:17:04 +04:00
pr_info ( " Xen: initializing cpu%d \n " , cpu ) ;
vcpup = per_cpu_ptr ( xen_vcpu_info , cpu ) ;
2020-10-06 09:49:31 +03:00
info . mfn = percpu_to_gfn ( vcpup ) ;
2015-05-05 18:36:56 +03:00
info . offset = xen_offset_in_page ( vcpup ) ;
2013-04-25 20:17:04 +04:00
2016-06-30 18:56:38 +03:00
err = HYPERVISOR_vcpu_op ( VCPUOP_register_vcpu_info , xen_vcpu_nr ( cpu ) ,
& info ) ;
2013-05-08 17:02:38 +04:00
BUG_ON ( err ) ;
per_cpu ( xen_vcpu , cpu ) = vcpup ;
2020-09-25 02:49:55 +03:00
if ( ! xen_kernel_unmapped_at_usr ( ) )
xen_setup_runstate_info ( cpu ) ;
2015-11-23 13:35:12 +03:00
2015-10-22 19:22:46 +03:00
after_register_vcpu_info :
2013-05-08 15:59:01 +04:00
enable_percpu_irq ( xen_events_irq , 0 ) ;
2016-07-13 20:16:52 +03:00
return 0 ;
}
static int xen_dying_cpu ( unsigned int cpu )
{
disable_percpu_irq ( xen_events_irq ) ;
return 0 ;
2013-04-25 20:17:04 +04:00
}
2017-04-24 20:58:38 +03:00
void xen_reboot ( int reason )
2013-04-25 14:23:07 +04:00
{
2017-04-24 20:58:38 +03:00
struct sched_shutdown r = { . reason = reason } ;
2013-04-25 14:23:07 +04:00
int rc ;
2017-04-24 20:58:38 +03:00
2013-04-25 14:23:07 +04:00
rc = HYPERVISOR_sched_op ( SCHEDOP_shutdown , & r ) ;
2014-07-25 20:05:39 +04:00
BUG_ON ( rc ) ;
2013-04-25 14:23:07 +04:00
}
2021-06-04 17:07:33 +03:00
static int xen_restart ( struct notifier_block * nb , unsigned long action ,
void * data )
2017-04-24 20:58:38 +03:00
{
xen_reboot ( SHUTDOWN_reboot ) ;
2021-06-04 17:07:33 +03:00
return NOTIFY_DONE ;
2017-04-24 20:58:38 +03:00
}
2021-06-04 17:07:33 +03:00
static struct notifier_block xen_restart_nb = {
. notifier_call = xen_restart ,
. priority = 192 ,
} ;
2017-04-24 20:58:38 +03:00
2013-04-25 14:23:07 +04:00
static void xen_power_off ( void )
{
2017-04-24 20:58:38 +03:00
xen_reboot ( SHUTDOWN_poweroff ) ;
2013-04-25 14:23:07 +04:00
}
2014-01-30 16:52:59 +04:00
static irqreturn_t xen_arm_callback ( int irq , void * arg )
{
xen_hvm_evtchn_do_upcall ( ) ;
return IRQ_HANDLED ;
}
2016-04-07 15:03:28 +03:00
static __initdata struct {
const char * compat ;
const char * prefix ;
const char * version ;
bool found ;
} hyper_node = { " xen,xen " , " xen,xen- " , NULL , false } ;
static int __init fdt_find_hyper_node ( unsigned long node , const char * uname ,
int depth , void * data )
{
const void * s = NULL ;
int len ;
if ( depth ! = 1 | | strcmp ( uname , " hypervisor " ) ! = 0 )
return 0 ;
if ( of_flat_dt_is_compatible ( node , hyper_node . compat ) )
hyper_node . found = true ;
s = of_get_flat_dt_prop ( node , " compatible " , & len ) ;
if ( strlen ( hyper_node . prefix ) + 3 < len & &
! strncmp ( hyper_node . prefix , s , strlen ( hyper_node . prefix ) ) )
hyper_node . version = s + strlen ( hyper_node . prefix ) ;
2016-05-12 15:19:54 +03:00
/*
* Check if Xen supports EFI by checking whether there is the
* " /hypervisor/uefi " node in DT . If so , runtime services are available
* through proxy functions ( e . g . in case of Xen dom0 EFI implementation
* they call special hypercall which executes relevant EFI functions )
* and that is why they are always enabled .
*/
if ( IS_ENABLED ( CONFIG_XEN_EFI ) ) {
if ( ( of_get_flat_dt_subnode_by_name ( node , " uefi " ) > 0 ) & &
! efi_runtime_disabled ( ) )
set_bit ( EFI_RUNTIME_SERVICES , & efi . flags ) ;
}
2016-04-07 15:03:28 +03:00
return 0 ;
}
2012-09-14 14:47:52 +04:00
/*
* see Documentation / devicetree / bindings / arm / xen . txt for the
* documentation of the Xen Device Tree format .
*/
2015-05-06 17:13:31 +03:00
void __init xen_early_init ( void )
2012-09-14 14:47:52 +04:00
{
2016-04-07 15:03:28 +03:00
of_scan_flat_dt ( fdt_find_hyper_node , NULL ) ;
if ( ! hyper_node . found ) {
2012-09-14 14:47:52 +04:00
pr_debug ( " No Xen support \n " ) ;
2015-05-06 17:13:31 +03:00
return ;
2012-09-14 14:47:52 +04:00
}
2016-04-07 15:03:28 +03:00
if ( hyper_node . version = = NULL ) {
2012-09-14 14:47:52 +04:00
pr_debug ( " Xen version not found \n " ) ;
2015-05-06 17:13:31 +03:00
return ;
2015-05-06 17:09:06 +03:00
}
2016-04-07 15:03:28 +03:00
pr_info ( " Xen %s support found \n " , hyper_node . version ) ;
2014-01-30 16:52:59 +04:00
2012-09-14 14:47:52 +04:00
xen_domain_type = XEN_HVM_DOMAIN ;
2012-08-08 21:20:18 +04:00
xen_setup_features ( ) ;
2014-09-11 02:49:30 +04:00
2012-08-08 21:20:18 +04:00
if ( xen_feature ( XENFEAT_dom0 ) )
2018-06-08 11:40:38 +03:00
xen_start_flags | = SIF_INITDOMAIN | SIF_PRIVILEGED ;
2015-05-06 17:14:22 +03:00
if ( ! console_set_on_cmdline & & ! xen_initial_domain ( ) )
add_preferred_console ( " hvc " , 0 , NULL ) ;
2015-05-06 17:13:31 +03:00
}
2016-04-07 15:03:27 +03:00
static void __init xen_acpi_guest_init ( void )
{
# ifdef CONFIG_ACPI
struct xen_hvm_param a ;
int interrupt , trigger , polarity ;
a . domid = DOMID_SELF ;
a . index = HVM_PARAM_CALLBACK_IRQ ;
if ( HYPERVISOR_hvm_op ( HVMOP_get_param , & a )
| | ( a . value > > 56 ) ! = HVM_PARAM_CALLBACK_TYPE_PPI ) {
xen_events_irq = 0 ;
return ;
}
interrupt = a . value & 0xff ;
trigger = ( ( a . value > > 8 ) & 0x1 ) ? ACPI_EDGE_SENSITIVE
: ACPI_LEVEL_SENSITIVE ;
polarity = ( ( a . value > > 8 ) & 0x2 ) ? ACPI_ACTIVE_LOW
: ACPI_ACTIVE_HIGH ;
xen_events_irq = acpi_register_gsi ( NULL , interrupt , trigger , polarity ) ;
# endif
}
static void __init xen_dt_guest_init ( void )
{
2016-04-07 15:03:28 +03:00
struct device_node * xen_node ;
xen_node = of_find_compatible_node ( NULL , NULL , " xen,xen " ) ;
if ( ! xen_node ) {
pr_err ( " Xen support was detected before, but it has disappeared \n " ) ;
return ;
}
2016-04-07 15:03:27 +03:00
xen_events_irq = irq_of_parse_and_map ( xen_node , 0 ) ;
}
2015-05-06 17:13:31 +03:00
static int __init xen_guest_init ( void )
{
struct xen_add_to_physmap xatp ;
struct shared_info * shared_info_page = NULL ;
2016-09-08 12:48:28 +03:00
int cpu ;
2015-05-06 17:13:31 +03:00
if ( ! xen_domain ( ) )
return 0 ;
2016-04-07 15:03:27 +03:00
if ( ! acpi_disabled )
xen_acpi_guest_init ( ) ;
else
xen_dt_guest_init ( ) ;
2015-05-06 17:13:31 +03:00
if ( ! xen_events_irq ) {
pr_err ( " Xen event channel interrupt not found \n " ) ;
return - ENODEV ;
}
2016-05-12 15:19:54 +03:00
/*
* The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI
* parameters are found . Force enable runtime services .
*/
if ( efi_enabled ( EFI_RUNTIME_SERVICES ) )
xen_efi_runtime_setup ( ) ;
2015-05-06 17:13:31 +03:00
shared_info_page = ( struct shared_info * ) get_zeroed_page ( GFP_KERNEL ) ;
2012-08-08 21:20:18 +04:00
2012-09-14 14:47:52 +04:00
if ( ! shared_info_page ) {
pr_err ( " not enough memory \n " ) ;
return - ENOMEM ;
}
xatp . domid = DOMID_SELF ;
xatp . idx = 0 ;
xatp . space = XENMAPSPACE_shared_info ;
2015-05-05 18:36:56 +03:00
xatp . gpfn = virt_to_gfn ( shared_info_page ) ;
2012-09-14 14:47:52 +04:00
if ( HYPERVISOR_memory_op ( XENMEM_add_to_physmap , & xatp ) )
BUG ( ) ;
HYPERVISOR_shared_info = ( struct shared_info * ) shared_info_page ;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page , we use it in the event channel upcall and in some pvclock
2013-04-25 20:17:04 +04:00
* related functions .
2012-09-14 14:47:52 +04:00
* The shared info contains exactly 1 CPU ( the boot CPU ) . The guest
* is required to use VCPUOP_register_vcpu_info to place vcpu info
2013-04-25 20:17:04 +04:00
* for secondary CPUs as they are brought up .
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0 .
*/
2016-12-07 15:24:40 +03:00
xen_vcpu_info = alloc_percpu ( struct vcpu_info ) ;
2013-04-25 20:17:04 +04:00
if ( xen_vcpu_info = = NULL )
return - ENOMEM ;
2012-09-13 16:06:52 +04:00
2016-06-30 18:56:37 +03:00
/* Direct vCPU id mapping for ARM guests. */
2016-09-08 12:48:28 +03:00
for_each_possible_cpu ( cpu )
per_cpu ( xen_vcpu_id , cpu ) = cpu ;
2016-06-30 18:56:37 +03:00
2016-04-07 15:03:21 +03:00
xen_auto_xlat_grant_frames . count = gnttab_max_grant_frames ( ) ;
if ( xen_xlate_map_ballooned_pages ( & xen_auto_xlat_grant_frames . pfn ,
& xen_auto_xlat_grant_frames . vaddr ,
xen_auto_xlat_grant_frames . count ) ) {
2014-01-06 19:40:36 +04:00
free_percpu ( xen_vcpu_info ) ;
return - ENOMEM ;
}
2012-09-13 16:06:52 +04:00
gnttab_init ( ) ;
2013-09-09 15:35:26 +04:00
/*
* Making sure board specific code will not set up ops for
* cpu idle and cpu freq .
*/
disable_cpuidle ( ) ;
disable_cpufreq ( ) ;
2014-01-30 16:52:59 +04:00
xen_init_IRQ ( ) ;
if ( request_percpu_irq ( xen_events_irq , xen_arm_callback ,
" events " , & xen_vcpu ) ) {
pr_err ( " Error request IRQ %d \n " , xen_events_irq ) ;
return - EINVAL ;
}
2020-09-25 02:49:55 +03:00
if ( ! xen_kernel_unmapped_at_usr ( ) )
xen_time_setup_guest ( ) ;
2016-05-20 10:26:48 +03:00
2015-11-23 13:41:12 +03:00
if ( xen_initial_domain ( ) )
pvclock_gtod_register_notifier ( & xen_pvclock_gtod_notifier ) ;
2015-11-23 13:35:12 +03:00
2016-07-13 20:16:52 +03:00
return cpuhp_setup_state ( CPUHP_AP_ARM_XEN_STARTING ,
2016-12-21 22:19:54 +03:00
" arm/xen:starting " , xen_starting_cpu ,
2016-07-13 20:16:52 +03:00
xen_dying_cpu ) ;
2013-05-08 15:59:01 +04:00
}
2014-01-30 16:52:59 +04:00
early_initcall ( xen_guest_init ) ;
2013-05-08 15:59:01 +04:00
static int __init xen_pm_init ( void )
{
2013-08-29 16:43:52 +04:00
if ( ! xen_domain ( ) )
return - ENODEV ;
2013-04-25 14:23:07 +04:00
pm_power_off = xen_power_off ;
2021-06-04 17:07:33 +03:00
register_restart_handler ( & xen_restart_nb ) ;
2015-11-23 13:40:12 +03:00
if ( ! xen_initial_domain ( ) ) {
struct timespec64 ts ;
xen_read_wallclock ( & ts ) ;
do_settimeofday64 ( & ts ) ;
}
2013-04-25 14:23:07 +04:00
2012-09-14 14:47:52 +04:00
return 0 ;
}
2013-08-29 16:43:52 +04:00
late_initcall ( xen_pm_init ) ;
2012-09-14 17:37:32 +04:00
2014-05-08 19:54:02 +04:00
/* empty stubs */
void xen_arch_pre_suspend ( void ) { }
void xen_arch_post_suspend ( int suspend_cancelled ) { }
void xen_timer_resume ( void ) { }
void xen_arch_resume ( void ) { }
2015-05-07 19:55:23 +03:00
void xen_arch_suspend ( void ) { }
2014-05-08 19:54:02 +04:00
2015-09-14 16:20:52 +03:00
/* In the hypercall.S file. */
2012-11-07 02:06:52 +04:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_event_channel_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_grant_table_op ) ;
2012-11-08 19:58:55 +04:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_xen_version ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_console_io ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_sched_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_hvm_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_memory_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_physdev_op ) ;
2013-04-25 17:53:05 +04:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_vcpu_op ) ;
2019-10-01 20:38:03 +03:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_platform_op_raw ) ;
2014-05-09 20:10:49 +04:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_multicall ) ;
2016-07-06 08:00:29 +03:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_vm_assist ) ;
2017-02-13 20:03:23 +03:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_dm_op ) ;
2012-11-07 02:06:52 +04:00
EXPORT_SYMBOL_GPL ( privcmd_call ) ;