2013-10-17 16:22:27 +00:00
# include <linux/bootmem.h>
# include <linux/gfp.h>
# include <linux/export.h>
# include <linux/rwlock.h>
# include <linux/slab.h>
# include <linux/types.h>
# include <linux/dma-mapping.h>
# include <linux/vmalloc.h>
# include <linux/swiotlb.h>
# include <xen/xen.h>
# include <xen/interface/memory.h>
2015-06-17 15:28:02 +01:00
# include <xen/page.h>
2013-10-17 16:22:27 +00:00
# include <xen/swiotlb-xen.h>
# include <asm/cacheflush.h>
# include <asm/xen/hypercall.h>
# include <asm/xen/interface.h>
struct xen_p2m_entry {
unsigned long pfn ;
unsigned long mfn ;
unsigned long nr_pages ;
struct rb_node rbnode_phys ;
} ;
2013-11-17 16:31:34 +00:00
static rwlock_t p2m_lock ;
2013-10-17 16:22:27 +00:00
struct rb_root phys_to_mach = RB_ROOT ;
2013-11-18 08:48:07 -05:00
EXPORT_SYMBOL_GPL ( phys_to_mach ) ;
2013-10-17 16:22:27 +00:00
static int xen_add_phys_to_mach_entry ( struct xen_p2m_entry * new )
{
struct rb_node * * link = & phys_to_mach . rb_node ;
struct rb_node * parent = NULL ;
struct xen_p2m_entry * entry ;
int rc = 0 ;
while ( * link ) {
parent = * link ;
entry = rb_entry ( parent , struct xen_p2m_entry , rbnode_phys ) ;
if ( new - > pfn = = entry - > pfn )
goto err_out ;
if ( new - > pfn < entry - > pfn )
link = & ( * link ) - > rb_left ;
else
link = & ( * link ) - > rb_right ;
}
rb_link_node ( & new - > rbnode_phys , parent , link ) ;
rb_insert_color ( & new - > rbnode_phys , & phys_to_mach ) ;
goto out ;
err_out :
rc = - EINVAL ;
pr_warn ( " %s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists \n " ,
__func__ , & new - > pfn , & new - > mfn , & entry - > pfn , & entry - > mfn ) ;
out :
return rc ;
}
unsigned long __pfn_to_mfn ( unsigned long pfn )
{
struct rb_node * n = phys_to_mach . rb_node ;
struct xen_p2m_entry * entry ;
unsigned long irqflags ;
read_lock_irqsave ( & p2m_lock , irqflags ) ;
while ( n ) {
entry = rb_entry ( n , struct xen_p2m_entry , rbnode_phys ) ;
if ( entry - > pfn < = pfn & &
entry - > pfn + entry - > nr_pages > pfn ) {
read_unlock_irqrestore ( & p2m_lock , irqflags ) ;
return entry - > mfn + ( pfn - entry - > pfn ) ;
}
if ( pfn < entry - > pfn )
n = n - > rb_left ;
else
n = n - > rb_right ;
}
read_unlock_irqrestore ( & p2m_lock , irqflags ) ;
return INVALID_P2M_ENTRY ;
}
EXPORT_SYMBOL_GPL ( __pfn_to_mfn ) ;
2014-02-27 15:55:30 +00:00
int set_foreign_p2m_mapping ( struct gnttab_map_grant_ref * map_ops ,
struct gnttab_map_grant_ref * kmap_ops ,
struct page * * pages , unsigned int count )
{
int i ;
for ( i = 0 ; i < count ; i + + ) {
if ( map_ops [ i ] . status )
continue ;
2015-05-05 16:37:49 +01:00
set_phys_to_machine ( map_ops [ i ] . host_addr > > XEN_PAGE_SHIFT ,
map_ops [ i ] . dev_bus_addr > > XEN_PAGE_SHIFT ) ;
2014-02-27 15:55:30 +00:00
}
return 0 ;
}
EXPORT_SYMBOL_GPL ( set_foreign_p2m_mapping ) ;
int clear_foreign_p2m_mapping ( struct gnttab_unmap_grant_ref * unmap_ops ,
2015-01-05 14:13:41 +00:00
struct gnttab_unmap_grant_ref * kunmap_ops ,
2014-02-27 15:55:30 +00:00
struct page * * pages , unsigned int count )
{
int i ;
for ( i = 0 ; i < count ; i + + ) {
2015-05-05 16:37:49 +01:00
set_phys_to_machine ( unmap_ops [ i ] . host_addr > > XEN_PAGE_SHIFT ,
2014-02-27 15:55:30 +00:00
INVALID_P2M_ENTRY ) ;
}
return 0 ;
}
EXPORT_SYMBOL_GPL ( clear_foreign_p2m_mapping ) ;
2013-10-17 16:22:27 +00:00
bool __set_phys_to_machine_multi ( unsigned long pfn ,
unsigned long mfn , unsigned long nr_pages )
{
int rc ;
unsigned long irqflags ;
struct xen_p2m_entry * p2m_entry ;
struct rb_node * n = phys_to_mach . rb_node ;
if ( mfn = = INVALID_P2M_ENTRY ) {
write_lock_irqsave ( & p2m_lock , irqflags ) ;
while ( n ) {
p2m_entry = rb_entry ( n , struct xen_p2m_entry , rbnode_phys ) ;
if ( p2m_entry - > pfn < = pfn & &
p2m_entry - > pfn + p2m_entry - > nr_pages > pfn ) {
rb_erase ( & p2m_entry - > rbnode_phys , & phys_to_mach ) ;
write_unlock_irqrestore ( & p2m_lock , irqflags ) ;
2013-11-08 15:36:09 -05:00
kfree ( p2m_entry ) ;
2013-10-17 16:22:27 +00:00
return true ;
}
if ( pfn < p2m_entry - > pfn )
n = n - > rb_left ;
else
n = n - > rb_right ;
}
write_unlock_irqrestore ( & p2m_lock , irqflags ) ;
return true ;
}
p2m_entry = kzalloc ( sizeof ( struct xen_p2m_entry ) , GFP_NOWAIT ) ;
if ( ! p2m_entry ) {
pr_warn ( " cannot allocate xen_p2m_entry \n " ) ;
return false ;
}
p2m_entry - > pfn = pfn ;
p2m_entry - > nr_pages = nr_pages ;
p2m_entry - > mfn = mfn ;
write_lock_irqsave ( & p2m_lock , irqflags ) ;
2014-09-10 22:49:48 +00:00
if ( ( rc = xen_add_phys_to_mach_entry ( p2m_entry ) ) < 0 ) {
2013-10-17 16:22:27 +00:00
write_unlock_irqrestore ( & p2m_lock , irqflags ) ;
return false ;
}
write_unlock_irqrestore ( & p2m_lock , irqflags ) ;
return true ;
}
EXPORT_SYMBOL_GPL ( __set_phys_to_machine_multi ) ;
bool __set_phys_to_machine ( unsigned long pfn , unsigned long mfn )
{
return __set_phys_to_machine_multi ( pfn , mfn , 1 ) ;
}
EXPORT_SYMBOL_GPL ( __set_phys_to_machine ) ;
2013-11-17 16:31:34 +00:00
static int p2m_init ( void )
2013-10-17 16:22:27 +00:00
{
rwlock_init ( & p2m_lock ) ;
return 0 ;
}
arch_initcall ( p2m_init ) ;