2014-11-21 11:06:39 +00:00
# include <linux/cpu.h>
# include <linux/dma-mapping.h>
2013-10-10 13:40:44 +00:00
# include <linux/bootmem.h>
# include <linux/gfp.h>
2014-11-21 11:06:39 +00:00
# include <linux/highmem.h>
2013-10-10 13:40:44 +00:00
# include <linux/export.h>
2015-04-24 10:16:40 +01:00
# include <linux/memblock.h>
2014-11-21 11:06:39 +00:00
# include <linux/of_address.h>
2013-10-10 13:40:44 +00:00
# include <linux/slab.h>
# include <linux/types.h>
# include <linux/dma-mapping.h>
# include <linux/vmalloc.h>
# include <linux/swiotlb.h>
# include <xen/xen.h>
2014-11-21 11:08:39 +00:00
# include <xen/interface/grant_table.h>
2013-10-10 13:40:44 +00:00
# include <xen/interface/memory.h>
2015-06-17 15:28:02 +01:00
# include <xen/page.h>
2013-10-10 13:40:44 +00:00
# include <xen/swiotlb-xen.h>
# include <asm/cacheflush.h>
# include <asm/xen/hypercall.h>
# include <asm/xen/interface.h>
2015-04-24 10:16:40 +01:00
unsigned long xen_get_swiotlb_free_pages ( unsigned int order )
{
struct memblock_region * reg ;
2015-11-06 16:28:21 -08:00
gfp_t flags = __GFP_NOWARN | __GFP_KSWAPD_RECLAIM ;
2015-04-24 10:16:40 +01:00
for_each_memblock ( memory , reg ) {
if ( reg - > base < ( phys_addr_t ) 0xffffffff ) {
flags | = __GFP_DMA ;
break ;
}
}
return __get_free_pages ( flags , order ) ;
}
2014-11-21 11:06:39 +00:00
enum dma_cache_op {
DMA_UNMAP ,
DMA_MAP ,
} ;
2014-11-21 11:08:39 +00:00
static bool hypercall_cflush = false ;
2014-11-21 11:06:39 +00:00
/* functions called by SWIOTLB */
static void dma_cache_maint ( dma_addr_t handle , unsigned long offset ,
size_t size , enum dma_data_direction dir , enum dma_cache_op op )
{
2014-11-21 11:08:39 +00:00
struct gnttab_cache_flush cflush ;
2015-09-09 15:18:45 +01:00
unsigned long xen_pfn ;
2014-11-21 11:06:39 +00:00
size_t left = size ;
2015-09-09 15:18:45 +01:00
xen_pfn = ( handle > > XEN_PAGE_SHIFT ) + offset / XEN_PAGE_SIZE ;
offset % = XEN_PAGE_SIZE ;
2014-11-21 11:06:39 +00:00
do {
size_t len = left ;
2014-11-21 11:08:39 +00:00
/* buffers in highmem or foreign pages cannot cross page
* boundaries */
2015-09-09 15:18:45 +01:00
if ( len + offset > XEN_PAGE_SIZE )
len = XEN_PAGE_SIZE - offset ;
2014-11-21 11:08:39 +00:00
cflush . op = 0 ;
2015-09-09 15:18:45 +01:00
cflush . a . dev_bus_addr = xen_pfn < < XEN_PAGE_SHIFT ;
2014-11-21 11:08:39 +00:00
cflush . offset = offset ;
cflush . length = len ;
if ( op = = DMA_UNMAP & & dir ! = DMA_TO_DEVICE )
cflush . op = GNTTAB_CACHE_INVAL ;
if ( op = = DMA_MAP ) {
if ( dir = = DMA_FROM_DEVICE )
cflush . op = GNTTAB_CACHE_INVAL ;
else
cflush . op = GNTTAB_CACHE_CLEAN ;
}
if ( cflush . op )
HYPERVISOR_grant_table_op ( GNTTABOP_cache_flush , & cflush , 1 ) ;
2014-11-21 11:06:39 +00:00
offset = 0 ;
2015-09-09 15:18:45 +01:00
xen_pfn + + ;
2014-11-21 11:06:39 +00:00
left - = len ;
} while ( left ) ;
}
static void __xen_dma_page_dev_to_cpu ( struct device * hwdev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir )
{
dma_cache_maint ( handle & PAGE_MASK , handle & ~ PAGE_MASK , size , dir , DMA_UNMAP ) ;
}
static void __xen_dma_page_cpu_to_dev ( struct device * hwdev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir )
{
dma_cache_maint ( handle & PAGE_MASK , handle & ~ PAGE_MASK , size , dir , DMA_MAP ) ;
}
void __xen_dma_map_page ( struct device * hwdev , struct page * page ,
dma_addr_t dev_addr , unsigned long offset , size_t size ,
2016-08-03 13:46:00 -07:00
enum dma_data_direction dir , unsigned long attrs )
2014-11-21 11:06:39 +00:00
{
if ( is_device_dma_coherent ( hwdev ) )
return ;
2016-08-03 13:46:00 -07:00
if ( attrs & DMA_ATTR_SKIP_CPU_SYNC )
2014-11-21 11:06:39 +00:00
return ;
__xen_dma_page_cpu_to_dev ( hwdev , dev_addr , size , dir ) ;
}
void __xen_dma_unmap_page ( struct device * hwdev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2014-11-21 11:06:39 +00:00
{
if ( is_device_dma_coherent ( hwdev ) )
return ;
2016-08-03 13:46:00 -07:00
if ( attrs & DMA_ATTR_SKIP_CPU_SYNC )
2014-11-21 11:06:39 +00:00
return ;
__xen_dma_page_dev_to_cpu ( hwdev , handle , size , dir ) ;
}
void __xen_dma_sync_single_for_cpu ( struct device * hwdev ,
dma_addr_t handle , size_t size , enum dma_data_direction dir )
{
if ( is_device_dma_coherent ( hwdev ) )
return ;
__xen_dma_page_dev_to_cpu ( hwdev , handle , size , dir ) ;
}
void __xen_dma_sync_single_for_device ( struct device * hwdev ,
dma_addr_t handle , size_t size , enum dma_data_direction dir )
{
if ( is_device_dma_coherent ( hwdev ) )
return ;
__xen_dma_page_cpu_to_dev ( hwdev , handle , size , dir ) ;
}
2014-11-21 11:07:39 +00:00
bool xen_arch_need_swiotlb ( struct device * dev ,
2015-09-09 15:17:33 +01:00
phys_addr_t phys ,
dma_addr_t dev_addr )
2014-11-21 11:07:39 +00:00
{
2015-09-09 15:18:45 +01:00
unsigned int xen_pfn = XEN_PFN_DOWN ( phys ) ;
unsigned int bfn = XEN_PFN_DOWN ( dev_addr ) ;
/*
* The swiotlb buffer should be used if
* - Xen doesn ' t have the cache flush hypercall
* - The Linux page refers to foreign memory
* - The device doesn ' t support coherent DMA request
*
* The Linux page may be spanned acrros multiple Xen page , although
* it ' s not possible to have a mix of local and foreign Xen page .
* Furthermore , range_straddles_page_boundary is already checking
* if buffer is physically contiguous in the host RAM .
*
* Therefore we only need to check the first Xen page to know if we
* require a bounce buffer because the device doesn ' t support coherent
* memory and we are not able to flush the cache .
*/
return ( ! hypercall_cflush & & ( xen_pfn ! = bfn ) & &
! is_device_dma_coherent ( dev ) ) ;
2014-11-21 11:07:39 +00:00
}
2013-10-10 13:41:10 +00:00
int xen_create_contiguous_region ( phys_addr_t pstart , unsigned int order ,
2013-10-10 13:40:44 +00:00
unsigned int address_bits ,
dma_addr_t * dma_handle )
{
if ( ! xen_initial_domain ( ) )
return - EINVAL ;
/* we assume that dom0 is mapped 1:1 for now */
2013-10-10 13:41:10 +00:00
* dma_handle = pstart ;
2013-10-10 13:40:44 +00:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( xen_create_contiguous_region ) ;
2013-10-10 13:41:10 +00:00
void xen_destroy_contiguous_region ( phys_addr_t pstart , unsigned int order )
2013-10-10 13:40:44 +00:00
{
return ;
}
EXPORT_SYMBOL_GPL ( xen_destroy_contiguous_region ) ;
2017-01-20 13:04:01 -08:00
const struct dma_map_ops * xen_dma_ops ;
2014-12-21 12:30:58 -08:00
EXPORT_SYMBOL ( xen_dma_ops ) ;
2013-10-10 13:40:44 +00:00
int __init xen_mm_init ( void )
{
2014-11-21 11:08:39 +00:00
struct gnttab_cache_flush cflush ;
2013-10-10 13:40:44 +00:00
if ( ! xen_initial_domain ( ) )
return 0 ;
xen_swiotlb_init ( 1 , false ) ;
xen_dma_ops = & xen_swiotlb_dma_ops ;
2014-11-21 11:08:39 +00:00
cflush . op = 0 ;
cflush . a . dev_bus_addr = 0 ;
cflush . offset = 0 ;
cflush . length = 0 ;
if ( HYPERVISOR_grant_table_op ( GNTTABOP_cache_flush , & cflush , 1 ) ! = - ENOSYS )
hypercall_cflush = true ;
2013-10-10 13:40:44 +00:00
return 0 ;
}
arch_initcall ( xen_mm_init ) ;