2019-05-19 13:08:55 +01:00
// SPDX-License-Identifier: GPL-2.0-only
2014-11-21 11:06:39 +00:00
# include <linux/cpu.h>
2020-07-10 15:34:26 -07:00
# include <linux/dma-direct.h>
2020-09-22 15:36:11 +02:00
# include <linux/dma-map-ops.h>
2013-10-10 13:40:44 +00:00
# include <linux/gfp.h>
2014-11-21 11:06:39 +00:00
# include <linux/highmem.h>
2013-10-10 13:40:44 +00:00
# include <linux/export.h>
2015-04-24 10:16:40 +01:00
# include <linux/memblock.h>
2014-11-21 11:06:39 +00:00
# include <linux/of_address.h>
2013-10-10 13:40:44 +00:00
# include <linux/slab.h>
# include <linux/types.h>
# include <linux/vmalloc.h>
# include <linux/swiotlb.h>
# include <xen/xen.h>
2014-11-21 11:08:39 +00:00
# include <xen/interface/grant_table.h>
2013-10-10 13:40:44 +00:00
# include <xen/interface/memory.h>
2015-06-17 15:28:02 +01:00
# include <xen/page.h>
2019-10-22 13:50:06 +01:00
# include <xen/xen-ops.h>
2013-10-10 13:40:44 +00:00
# include <xen/swiotlb-xen.h>
# include <asm/cacheflush.h>
# include <asm/xen/hypercall.h>
# include <asm/xen/interface.h>
2015-04-24 10:16:40 +01:00
unsigned long xen_get_swiotlb_free_pages ( unsigned int order )
{
2020-10-13 16:58:08 -07:00
phys_addr_t base ;
2015-11-06 16:28:21 -08:00
gfp_t flags = __GFP_NOWARN | __GFP_KSWAPD_RECLAIM ;
2020-10-13 16:58:08 -07:00
u64 i ;
2015-04-24 10:16:40 +01:00
2020-10-13 16:58:08 -07:00
for_each_mem_range ( i , & base , NULL ) {
if ( base < ( phys_addr_t ) 0xffffffff ) {
2019-09-16 09:51:33 +00:00
if ( IS_ENABLED ( CONFIG_ZONE_DMA32 ) )
flags | = __GFP_DMA32 ;
else
flags | = __GFP_DMA ;
2015-04-24 10:16:40 +01:00
break ;
}
}
return __get_free_pages ( flags , order ) ;
}
2014-11-21 11:08:39 +00:00
static bool hypercall_cflush = false ;
2014-11-21 11:06:39 +00:00
2019-07-24 15:26:08 +02:00
/* buffers in highmem or foreign pages cannot cross page boundaries */
2020-07-10 15:34:27 -07:00
static void dma_cache_maint ( struct device * dev , dma_addr_t handle ,
size_t size , u32 op )
2014-11-21 11:06:39 +00:00
{
2014-11-21 11:08:39 +00:00
struct gnttab_cache_flush cflush ;
2014-11-21 11:06:39 +00:00
2019-07-24 15:26:08 +02:00
cflush . offset = xen_offset_in_page ( handle ) ;
cflush . op = op ;
2020-07-10 15:34:27 -07:00
handle & = XEN_PAGE_MASK ;
2014-11-21 11:06:39 +00:00
do {
2020-07-10 15:34:27 -07:00
cflush . a . dev_bus_addr = dma_to_phys ( dev , handle ) ;
2019-07-24 15:26:08 +02:00
if ( size + cflush . offset > XEN_PAGE_SIZE )
cflush . length = XEN_PAGE_SIZE - cflush . offset ;
else
cflush . length = size ;
HYPERVISOR_grant_table_op ( GNTTABOP_cache_flush , & cflush , 1 ) ;
2014-11-21 11:06:39 +00:00
2019-07-24 15:26:08 +02:00
cflush . offset = 0 ;
2020-07-10 15:34:27 -07:00
handle + = cflush . length ;
2019-07-24 15:26:08 +02:00
size - = cflush . length ;
} while ( size ) ;
2014-11-21 11:06:39 +00:00
}
2019-09-05 10:04:30 +02:00
/*
* Dom0 is mapped 1 : 1 , and while the Linux page can span across multiple Xen
* pages , it is not possible for it to contain a mix of local and foreign Xen
* pages . Calling pfn_valid on a foreign mfn will always return false , so if
* pfn_valid returns true the pages is local and we can use the native
* dma - direct functions , otherwise we call the Xen specific version .
*/
2020-07-10 15:34:21 -07:00
void xen_dma_sync_for_cpu ( struct device * dev , dma_addr_t handle ,
2020-07-10 15:34:26 -07:00
size_t size , enum dma_data_direction dir )
2014-11-21 11:06:39 +00:00
{
2020-07-10 15:34:26 -07:00
if ( dir ! = DMA_TO_DEVICE )
2020-07-10 15:34:27 -07:00
dma_cache_maint ( dev , handle , size , GNTTAB_CACHE_INVAL ) ;
2014-11-21 11:06:39 +00:00
}
2020-07-10 15:34:22 -07:00
void xen_dma_sync_for_device ( struct device * dev , dma_addr_t handle ,
2020-07-10 15:34:26 -07:00
size_t size , enum dma_data_direction dir )
2014-11-21 11:06:39 +00:00
{
2020-07-10 15:34:26 -07:00
if ( dir = = DMA_FROM_DEVICE )
2020-07-10 15:34:27 -07:00
dma_cache_maint ( dev , handle , size , GNTTAB_CACHE_INVAL ) ;
2019-07-24 15:26:08 +02:00
else
2020-07-10 15:34:27 -07:00
dma_cache_maint ( dev , handle , size , GNTTAB_CACHE_CLEAN ) ;
2014-11-21 11:06:39 +00:00
}
2014-11-21 11:07:39 +00:00
bool xen_arch_need_swiotlb ( struct device * dev ,
2015-09-09 15:17:33 +01:00
phys_addr_t phys ,
dma_addr_t dev_addr )
2014-11-21 11:07:39 +00:00
{
2015-09-09 15:18:45 +01:00
unsigned int xen_pfn = XEN_PFN_DOWN ( phys ) ;
2020-07-10 15:34:26 -07:00
unsigned int bfn = XEN_PFN_DOWN ( dma_to_phys ( dev , dev_addr ) ) ;
2015-09-09 15:18:45 +01:00
/*
* The swiotlb buffer should be used if
* - Xen doesn ' t have the cache flush hypercall
* - The Linux page refers to foreign memory
* - The device doesn ' t support coherent DMA request
*
* The Linux page may be spanned acrros multiple Xen page , although
* it ' s not possible to have a mix of local and foreign Xen page .
* Furthermore , range_straddles_page_boundary is already checking
* if buffer is physically contiguous in the host RAM .
*
* Therefore we only need to check the first Xen page to know if we
* require a bounce buffer because the device doesn ' t support coherent
* memory and we are not able to flush the cache .
*/
return ( ! hypercall_cflush & & ( xen_pfn ! = bfn ) & &
2019-07-24 14:06:54 +02:00
! dev_is_dma_coherent ( dev ) ) ;
2014-11-21 11:07:39 +00:00
}
2013-10-10 13:41:10 +00:00
int xen_create_contiguous_region ( phys_addr_t pstart , unsigned int order ,
2013-10-10 13:40:44 +00:00
unsigned int address_bits ,
dma_addr_t * dma_handle )
{
if ( ! xen_initial_domain ( ) )
return - EINVAL ;
/* we assume that dom0 is mapped 1:1 for now */
2013-10-10 13:41:10 +00:00
* dma_handle = pstart ;
2013-10-10 13:40:44 +00:00
return 0 ;
}
2013-10-10 13:41:10 +00:00
void xen_destroy_contiguous_region ( phys_addr_t pstart , unsigned int order )
2013-10-10 13:40:44 +00:00
{
return ;
}
2019-10-22 13:52:51 +01:00
static int __init xen_mm_init ( void )
2013-10-10 13:40:44 +00:00
{
2014-11-21 11:08:39 +00:00
struct gnttab_cache_flush cflush ;
2021-05-12 13:18:23 -07:00
int rc ;
2021-03-19 13:01:40 -07:00
if ( ! xen_swiotlb_detect ( ) )
2013-10-10 13:40:44 +00:00
return 0 ;
2021-05-12 13:18:23 -07:00
rc = xen_swiotlb_init ( ) ;
/* we can work with the default swiotlb */
if ( rc < 0 & & rc ! = - EEXIST )
return rc ;
2014-11-21 11:08:39 +00:00
cflush . op = 0 ;
cflush . a . dev_bus_addr = 0 ;
cflush . offset = 0 ;
cflush . length = 0 ;
if ( HYPERVISOR_grant_table_op ( GNTTABOP_cache_flush , & cflush , 1 ) ! = - ENOSYS )
hypercall_cflush = true ;
2013-10-10 13:40:44 +00:00
return 0 ;
}
arch_initcall ( xen_mm_init ) ;