2012-09-14 13:53:39 +00:00
# include <xen/xen.h>
2012-09-14 13:37:32 +00:00
# include <xen/events.h>
2012-09-13 12:06:52 +00:00
# include <xen/grant_table.h>
# include <xen/hvm.h>
2013-04-25 16:17:04 +00:00
# include <xen/interface/vcpu.h>
2012-09-14 13:53:39 +00:00
# include <xen/interface/xen.h>
# include <xen/interface/memory.h>
2012-09-13 12:06:52 +00:00
# include <xen/interface/hvm/params.h>
2012-08-08 17:20:18 +00:00
# include <xen/features.h>
2012-09-14 13:53:39 +00:00
# include <xen/platform_pci.h>
2012-09-13 12:06:52 +00:00
# include <xen/xenbus.h>
2012-10-03 12:28:26 +01:00
# include <xen/page.h>
2013-04-25 10:23:07 +00:00
# include <xen/interface/sched.h>
2012-10-03 16:37:09 +01:00
# include <xen/xen-ops.h>
2012-09-14 13:53:39 +00:00
# include <asm/xen/hypervisor.h>
# include <asm/xen/hypercall.h>
2013-04-25 10:23:07 +00:00
# include <asm/system_misc.h>
2012-09-14 13:37:32 +00:00
# include <linux/interrupt.h>
# include <linux/irqreturn.h>
2012-09-14 13:53:39 +00:00
# include <linux/module.h>
2012-09-14 10:47:52 +00:00
# include <linux/of.h>
# include <linux/of_irq.h>
# include <linux/of_address.h>
2013-09-09 11:35:26 +00:00
# include <linux/cpuidle.h>
# include <linux/cpufreq.h>
2014-01-30 12:52:59 +00:00
# include <linux/cpu.h>
2012-09-14 13:53:39 +00:00
2012-10-03 16:37:09 +01:00
# include <linux/mm.h>
2012-09-14 13:53:39 +00:00
struct start_info _xen_start_info ;
struct start_info * xen_start_info = & _xen_start_info ;
EXPORT_SYMBOL_GPL ( xen_start_info ) ;
enum xen_domain_type xen_domain_type = XEN_NATIVE ;
EXPORT_SYMBOL_GPL ( xen_domain_type ) ;
struct shared_info xen_dummy_shared_info ;
struct shared_info * HYPERVISOR_shared_info = ( void * ) & xen_dummy_shared_info ;
DEFINE_PER_CPU ( struct vcpu_info * , xen_vcpu ) ;
2013-04-25 16:17:04 +00:00
static struct vcpu_info __percpu * xen_vcpu_info ;
2012-09-14 13:53:39 +00:00
2012-10-03 12:28:26 +01:00
/* These are unused until we support booting "pre-ballooned" */
unsigned long xen_released_pages ;
struct xen_memory_region xen_extra_mem [ XEN_EXTRA_MEM_MAX_REGIONS ] __initdata ;
2012-09-14 13:53:39 +00:00
/* TODO: to be removed */
__read_mostly int xen_have_vector_callback ;
EXPORT_SYMBOL_GPL ( xen_have_vector_callback ) ;
int xen_platform_pci_unplug = XEN_UNPLUG_ALL ;
EXPORT_SYMBOL_GPL ( xen_platform_pci_unplug ) ;
2012-09-14 13:37:32 +00:00
static __read_mostly int xen_events_irq = - 1 ;
2012-10-03 16:37:09 +01:00
/* map fgmfn of domid to lpfn in the current domain */
static int map_foreign_page ( unsigned long lpfn , unsigned long fgmfn ,
unsigned int domid )
{
int rc ;
struct xen_add_to_physmap_range xatp = {
. domid = DOMID_SELF ,
. foreign_domid = domid ,
. size = 1 ,
. space = XENMAPSPACE_gmfn_foreign ,
} ;
xen_ulong_t idx = fgmfn ;
xen_pfn_t gpfn = lpfn ;
2013-02-19 22:00:58 -05:00
int err = 0 ;
2012-10-03 16:37:09 +01:00
set_xen_guest_handle ( xatp . idxs , & idx ) ;
set_xen_guest_handle ( xatp . gpfns , & gpfn ) ;
2013-02-19 22:00:58 -05:00
set_xen_guest_handle ( xatp . errs , & err ) ;
2012-10-03 16:37:09 +01:00
rc = HYPERVISOR_memory_op ( XENMEM_add_to_physmap_range , & xatp ) ;
2013-02-19 22:00:58 -05:00
if ( rc | | err ) {
pr_warn ( " Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx \n " ,
rc , err , lpfn , fgmfn ) ;
2012-10-03 16:37:09 +01:00
return 1 ;
}
return 0 ;
}
struct remap_data {
xen_pfn_t fgmfn ; /* foreign domain's gmfn */
pgprot_t prot ;
domid_t domid ;
struct vm_area_struct * vma ;
int index ;
struct page * * pages ;
struct xen_remap_mfn_info * info ;
} ;
static int remap_pte_fn ( pte_t * ptep , pgtable_t token , unsigned long addr ,
void * data )
{
struct remap_data * info = data ;
struct page * page = info - > pages [ info - > index + + ] ;
unsigned long pfn = page_to_pfn ( page ) ;
2013-12-11 17:02:27 +00:00
pte_t pte = pte_mkspecial ( pfn_pte ( pfn , info - > prot ) ) ;
2012-10-03 16:37:09 +01:00
if ( map_foreign_page ( pfn , info - > fgmfn , info - > domid ) )
return - EFAULT ;
set_pte_at ( info - > vma - > vm_mm , addr , ptep , pte ) ;
return 0 ;
}
2012-09-14 13:53:39 +00:00
int xen_remap_domain_mfn_range ( struct vm_area_struct * vma ,
unsigned long addr ,
2012-10-03 16:37:09 +01:00
xen_pfn_t mfn , int nr ,
pgprot_t prot , unsigned domid ,
struct page * * pages )
2012-09-14 13:53:39 +00:00
{
2012-10-03 16:37:09 +01:00
int err ;
struct remap_data data ;
/* TBD: Batching, current sole caller only does page at a time */
if ( nr > 1 )
return - EINVAL ;
data . fgmfn = mfn ;
data . prot = prot ;
data . domid = domid ;
data . vma = vma ;
data . index = 0 ;
data . pages = pages ;
err = apply_to_page_range ( vma - > vm_mm , addr , nr < < PAGE_SHIFT ,
remap_pte_fn , & data ) ;
return err ;
2012-09-14 13:53:39 +00:00
}
EXPORT_SYMBOL_GPL ( xen_remap_domain_mfn_range ) ;
2012-09-14 10:47:52 +00:00
2012-10-03 16:37:09 +01:00
int xen_unmap_domain_mfn_range ( struct vm_area_struct * vma ,
int nr , struct page * * pages )
{
int i ;
for ( i = 0 ; i < nr ; i + + ) {
struct xen_remove_from_physmap xrp ;
unsigned long rc , pfn ;
pfn = page_to_pfn ( pages [ i ] ) ;
xrp . domid = DOMID_SELF ;
xrp . gpfn = pfn ;
rc = HYPERVISOR_memory_op ( XENMEM_remove_from_physmap , & xrp ) ;
if ( rc ) {
pr_warn ( " Failed to unmap pfn:%lx rc:%ld \n " ,
pfn , rc ) ;
return rc ;
}
}
return 0 ;
}
EXPORT_SYMBOL_GPL ( xen_unmap_domain_mfn_range ) ;
2014-01-30 12:52:59 +00:00
static void xen_percpu_init ( void )
2013-04-25 16:17:04 +00:00
{
struct vcpu_register_vcpu_info info ;
struct vcpu_info * vcpup ;
int err ;
2013-05-08 11:59:01 +00:00
int cpu = get_cpu ( ) ;
2013-04-25 16:17:04 +00:00
pr_info ( " Xen: initializing cpu%d \n " , cpu ) ;
vcpup = per_cpu_ptr ( xen_vcpu_info , cpu ) ;
info . mfn = __pa ( vcpup ) > > PAGE_SHIFT ;
info . offset = offset_in_page ( vcpup ) ;
err = HYPERVISOR_vcpu_op ( VCPUOP_register_vcpu_info , cpu , & info ) ;
2013-05-08 13:02:38 +00:00
BUG_ON ( err ) ;
per_cpu ( xen_vcpu , cpu ) = vcpup ;
2013-05-08 11:59:01 +00:00
enable_percpu_irq ( xen_events_irq , 0 ) ;
2013-07-29 17:06:05 +01:00
put_cpu ( ) ;
2013-04-25 16:17:04 +00:00
}
2013-07-21 15:17:54 +00:00
static void xen_restart ( enum reboot_mode reboot_mode , const char * cmd )
2013-04-25 10:23:07 +00:00
{
struct sched_shutdown r = { . reason = SHUTDOWN_reboot } ;
int rc ;
rc = HYPERVISOR_sched_op ( SCHEDOP_shutdown , & r ) ;
if ( rc )
BUG ( ) ;
}
static void xen_power_off ( void )
{
struct sched_shutdown r = { . reason = SHUTDOWN_poweroff } ;
int rc ;
rc = HYPERVISOR_sched_op ( SCHEDOP_shutdown , & r ) ;
if ( rc )
BUG ( ) ;
}
2014-01-30 12:52:59 +00:00
static int xen_cpu_notification ( struct notifier_block * self ,
unsigned long action ,
void * hcpu )
{
switch ( action ) {
case CPU_STARTING :
xen_percpu_init ( ) ;
break ;
default :
break ;
}
return NOTIFY_OK ;
}
static struct notifier_block xen_cpu_notifier = {
. notifier_call = xen_cpu_notification ,
} ;
static irqreturn_t xen_arm_callback ( int irq , void * arg )
{
xen_hvm_evtchn_do_upcall ( ) ;
return IRQ_HANDLED ;
}
2012-09-14 10:47:52 +00:00
/*
* see Documentation / devicetree / bindings / arm / xen . txt for the
* documentation of the Xen Device Tree format .
*/
2012-09-13 12:06:52 +00:00
# define GRANT_TABLE_PHYSADDR 0
2012-09-14 10:47:52 +00:00
static int __init xen_guest_init ( void )
{
struct xen_add_to_physmap xatp ;
static struct shared_info * shared_info_page = 0 ;
struct device_node * node ;
int len ;
const char * s = NULL ;
const char * version = NULL ;
const char * xen_prefix = " xen,xen- " ;
2012-09-13 12:06:52 +00:00
struct resource res ;
2014-01-30 12:56:34 +00:00
phys_addr_t grant_frames ;
2012-09-14 10:47:52 +00:00
node = of_find_compatible_node ( NULL , NULL , " xen,xen " ) ;
if ( ! node ) {
pr_debug ( " No Xen support \n " ) ;
return 0 ;
}
s = of_get_property ( node , " compatible " , & len ) ;
if ( strlen ( xen_prefix ) + 3 < len & &
! strncmp ( xen_prefix , s , strlen ( xen_prefix ) ) )
version = s + strlen ( xen_prefix ) ;
if ( version = = NULL ) {
pr_debug ( " Xen version not found \n " ) ;
return 0 ;
}
2012-09-13 12:06:52 +00:00
if ( of_address_to_resource ( node , GRANT_TABLE_PHYSADDR , & res ) )
return 0 ;
2014-01-06 10:40:36 -05:00
grant_frames = res . start ;
2012-09-14 13:37:32 +00:00
xen_events_irq = irq_of_parse_and_map ( node , 0 ) ;
2014-01-30 12:56:34 +00:00
pr_info ( " Xen %s support found, events_irq=%d gnttab_frame=%pa \n " ,
version , xen_events_irq , & grant_frames ) ;
2014-01-30 12:52:59 +00:00
if ( xen_events_irq < 0 )
return - ENODEV ;
2012-09-14 10:47:52 +00:00
xen_domain_type = XEN_HVM_DOMAIN ;
2012-08-08 17:20:18 +00:00
xen_setup_features ( ) ;
if ( xen_feature ( XENFEAT_dom0 ) )
xen_start_info - > flags | = SIF_INITDOMAIN | SIF_PRIVILEGED ;
else
xen_start_info - > flags & = ~ ( SIF_INITDOMAIN | SIF_PRIVILEGED ) ;
2012-09-14 10:47:52 +00:00
if ( ! shared_info_page )
shared_info_page = ( struct shared_info * )
get_zeroed_page ( GFP_KERNEL ) ;
if ( ! shared_info_page ) {
pr_err ( " not enough memory \n " ) ;
return - ENOMEM ;
}
xatp . domid = DOMID_SELF ;
xatp . idx = 0 ;
xatp . space = XENMAPSPACE_shared_info ;
xatp . gpfn = __pa ( shared_info_page ) > > PAGE_SHIFT ;
if ( HYPERVISOR_memory_op ( XENMEM_add_to_physmap , & xatp ) )
BUG ( ) ;
HYPERVISOR_shared_info = ( struct shared_info * ) shared_info_page ;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page , we use it in the event channel upcall and in some pvclock
2013-04-25 16:17:04 +00:00
* related functions .
2012-09-14 10:47:52 +00:00
* The shared info contains exactly 1 CPU ( the boot CPU ) . The guest
* is required to use VCPUOP_register_vcpu_info to place vcpu info
2013-04-25 16:17:04 +00:00
* for secondary CPUs as they are brought up .
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0 .
*/
xen_vcpu_info = __alloc_percpu ( sizeof ( struct vcpu_info ) ,
sizeof ( struct vcpu_info ) ) ;
if ( xen_vcpu_info = = NULL )
return - ENOMEM ;
2012-09-13 12:06:52 +00:00
2014-01-06 10:40:36 -05:00
if ( gnttab_setup_auto_xlat_frames ( grant_frames ) ) {
free_percpu ( xen_vcpu_info ) ;
return - ENOMEM ;
}
2012-09-13 12:06:52 +00:00
gnttab_init ( ) ;
if ( ! xen_initial_domain ( ) )
xenbus_probe ( NULL ) ;
2013-09-09 11:35:26 +00:00
/*
* Making sure board specific code will not set up ops for
* cpu idle and cpu freq .
*/
disable_cpuidle ( ) ;
disable_cpufreq ( ) ;
2014-01-30 12:52:59 +00:00
xen_init_IRQ ( ) ;
if ( request_percpu_irq ( xen_events_irq , xen_arm_callback ,
" events " , & xen_vcpu ) ) {
pr_err ( " Error request IRQ %d \n " , xen_events_irq ) ;
return - EINVAL ;
}
xen_percpu_init ( ) ;
register_cpu_notifier ( & xen_cpu_notifier ) ;
2013-05-08 11:59:01 +00:00
return 0 ;
}
2014-01-30 12:52:59 +00:00
early_initcall ( xen_guest_init ) ;
2013-05-08 11:59:01 +00:00
static int __init xen_pm_init ( void )
{
2013-08-29 07:43:52 -05:00
if ( ! xen_domain ( ) )
return - ENODEV ;
2013-04-25 10:23:07 +00:00
pm_power_off = xen_power_off ;
arm_pm_restart = xen_restart ;
2012-09-14 10:47:52 +00:00
return 0 ;
}
2013-08-29 07:43:52 -05:00
late_initcall ( xen_pm_init ) ;
2012-09-14 13:37:32 +00:00
2012-11-06 17:06:52 -05:00
/* In the hypervisor.S file. */
EXPORT_SYMBOL_GPL ( HYPERVISOR_event_channel_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_grant_table_op ) ;
2012-11-08 15:58:55 +00:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_xen_version ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_console_io ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_sched_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_hvm_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_memory_op ) ;
EXPORT_SYMBOL_GPL ( HYPERVISOR_physdev_op ) ;
2013-04-25 13:53:05 +00:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_vcpu_op ) ;
2013-07-02 10:42:40 +00:00
EXPORT_SYMBOL_GPL ( HYPERVISOR_tmem_op ) ;
2012-11-06 17:06:52 -05:00
EXPORT_SYMBOL_GPL ( privcmd_call ) ;