2019-05-29 07:18:01 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2010-05-11 10:05:49 -04:00
/*
* Copyright 2010
* by Konrad Rzeszutek Wilk < konrad . wilk @ oracle . com >
*
* This code provides a IOMMU for Xen PV guests with PCI passthrough .
*
* PV guests under Xen are running in an non - contiguous memory architecture .
*
* When PCI pass - through is utilized , this necessitates an IOMMU for
* translating bus ( DMA ) to virtual and vice - versa and also providing a
* mechanism to have contiguous pages for device drivers operations ( say DMA
* operations ) .
*
* Specifically , under Xen the Linux idea of pages is an illusion . It
* assumes that pages start at zero and go up to the available memory . To
* help with that , the Linux Xen MMU provides a lookup mechanism to
* translate the page frame numbers ( PFN ) to machine frame numbers ( MFN )
* and vice - versa . The MFN are the " real " frame numbers . Furthermore
* memory is not contiguous . Xen hypervisor stitches memory for guests
* from different pools , which means there is no guarantee that PFN = = MFN
* and PFN + 1 = = MFN + 1. Lastly with Xen 4.0 , pages ( in debug mode ) are
* allocated in descending order ( high to low ) , meaning the guest might
* never get any MFN ' s under the 4 GB mark .
*/
2013-06-28 03:21:41 -07:00
# define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
2018-10-30 15:09:21 -07:00
# include <linux/memblock.h>
2018-01-10 16:21:13 +01:00
# include <linux/dma-direct.h>
2020-09-22 15:36:11 +02:00
# include <linux/dma-map-ops.h>
2011-07-10 13:22:07 -04:00
# include <linux/export.h>
2010-05-11 10:05:49 -04:00
# include <xen/swiotlb-xen.h>
# include <xen/page.h>
# include <xen/xen-ops.h>
2011-07-22 12:46:43 -04:00
# include <xen/hvc-console.h>
2013-09-04 21:11:05 +01:00
2013-10-10 13:40:44 +00:00
# include <asm/dma-mapping.h>
2013-10-10 13:41:10 +00:00
# include <asm/xen/page-coherent.h>
2013-11-08 15:36:09 -05:00
2013-09-04 21:11:05 +01:00
# include <trace/events/swiotlb.h>
2019-09-02 14:09:58 +05:30
# define MAX_DMA_BITS 32
2010-05-11 10:05:49 -04:00
/*
* Quick lookup value of the bus address of the IOTLB .
*/
2020-07-10 15:34:25 -07:00
static inline phys_addr_t xen_phys_to_bus ( struct device * dev , phys_addr_t paddr )
2010-05-11 10:05:49 -04:00
{
2015-09-09 15:18:45 +01:00
unsigned long bfn = pfn_to_bfn ( XEN_PFN_DOWN ( paddr ) ) ;
2020-07-10 15:34:25 -07:00
phys_addr_t baddr = ( phys_addr_t ) bfn < < XEN_PAGE_SHIFT ;
2014-01-20 11:30:41 +00:00
2020-07-10 15:34:25 -07:00
baddr | = paddr & ~ XEN_PAGE_MASK ;
return baddr ;
}
2014-01-20 11:30:41 +00:00
2020-07-10 15:34:25 -07:00
static inline dma_addr_t xen_phys_to_dma ( struct device * dev , phys_addr_t paddr )
{
return phys_to_dma ( dev , xen_phys_to_bus ( dev , paddr ) ) ;
2010-05-11 10:05:49 -04:00
}
2020-07-10 15:34:25 -07:00
static inline phys_addr_t xen_bus_to_phys ( struct device * dev ,
phys_addr_t baddr )
2010-05-11 10:05:49 -04:00
{
2015-09-09 15:18:45 +01:00
unsigned long xen_pfn = bfn_to_pfn ( XEN_PFN_DOWN ( baddr ) ) ;
2020-07-10 15:34:25 -07:00
phys_addr_t paddr = ( xen_pfn < < XEN_PAGE_SHIFT ) |
( baddr & ~ XEN_PAGE_MASK ) ;
2014-01-20 11:30:41 +00:00
return paddr ;
2010-05-11 10:05:49 -04:00
}
2020-07-10 15:34:25 -07:00
static inline phys_addr_t xen_dma_to_phys ( struct device * dev ,
dma_addr_t dma_addr )
{
return xen_bus_to_phys ( dev , dma_to_phys ( dev , dma_addr ) ) ;
}
2019-06-14 07:46:03 +02:00
static inline int range_straddles_page_boundary ( phys_addr_t p , size_t size )
2010-05-11 10:05:49 -04:00
{
2019-06-14 07:46:03 +02:00
unsigned long next_bfn , xen_pfn = XEN_PFN_DOWN ( p ) ;
unsigned int i , nr_pages = XEN_PFN_UP ( xen_offset_in_page ( p ) + size ) ;
2010-05-11 10:05:49 -04:00
2015-09-09 15:18:45 +01:00
next_bfn = pfn_to_bfn ( xen_pfn ) ;
2010-05-11 10:05:49 -04:00
2019-06-14 07:46:03 +02:00
for ( i = 1 ; i < nr_pages ; i + + )
2015-09-09 15:18:45 +01:00
if ( pfn_to_bfn ( + + xen_pfn ) ! = + + next_bfn )
2019-06-14 07:46:03 +02:00
return 1 ;
2010-05-11 10:05:49 -04:00
2019-06-14 07:46:03 +02:00
return 0 ;
2010-05-11 10:05:49 -04:00
}
2020-07-10 15:34:23 -07:00
static int is_xen_swiotlb_buffer ( struct device * dev , dma_addr_t dma_addr )
2010-05-11 10:05:49 -04:00
{
2020-07-10 15:34:25 -07:00
unsigned long bfn = XEN_PFN_DOWN ( dma_to_phys ( dev , dma_addr ) ) ;
2015-09-09 15:18:45 +01:00
unsigned long xen_pfn = bfn_to_local_pfn ( bfn ) ;
2020-07-10 15:34:24 -07:00
phys_addr_t paddr = ( phys_addr_t ) xen_pfn < < XEN_PAGE_SHIFT ;
2010-05-11 10:05:49 -04:00
/* If the address is outside our domain, it CAN
* have the same virtual address as another address
* in our domain . Therefore _only_ check address within our domain .
*/
2021-03-01 08:44:27 +01:00
if ( pfn_valid ( PFN_DOWN ( paddr ) ) )
return is_swiotlb_buffer ( paddr ) ;
2010-05-11 10:05:49 -04:00
return 0 ;
}
2021-03-01 08:44:33 +01:00
static int xen_swiotlb_fixup ( void * buf , unsigned long nslabs )
2010-05-11 10:05:49 -04:00
{
int i , rc ;
int dma_bits ;
2013-10-09 16:56:32 +00:00
dma_addr_t dma_handle ;
2013-10-10 13:41:10 +00:00
phys_addr_t p = virt_to_phys ( buf ) ;
2010-05-11 10:05:49 -04:00
dma_bits = get_order ( IO_TLB_SEGSIZE < < IO_TLB_SHIFT ) + PAGE_SHIFT ;
i = 0 ;
do {
int slabs = min ( nslabs - i , ( unsigned long ) IO_TLB_SEGSIZE ) ;
do {
rc = xen_create_contiguous_region (
2013-10-10 13:41:10 +00:00
p + ( i < < IO_TLB_SHIFT ) ,
2010-05-11 10:05:49 -04:00
get_order ( slabs < < IO_TLB_SHIFT ) ,
2013-10-09 16:56:32 +00:00
dma_bits , & dma_handle ) ;
2019-09-02 14:09:58 +05:30
} while ( rc & & dma_bits + + < MAX_DMA_BITS ) ;
2010-05-11 10:05:49 -04:00
if ( rc )
return rc ;
i + = slabs ;
} while ( i < nslabs ) ;
return 0 ;
}
2012-08-23 14:03:55 -04:00
enum xen_swiotlb_err {
XEN_SWIOTLB_UNKNOWN = 0 ,
XEN_SWIOTLB_ENOMEM ,
XEN_SWIOTLB_EFIXUP
} ;
static const char * xen_swiotlb_error ( enum xen_swiotlb_err err )
{
switch ( err ) {
case XEN_SWIOTLB_ENOMEM :
return " Cannot allocate Xen-SWIOTLB buffer \n " ;
case XEN_SWIOTLB_EFIXUP :
return " Failed to get contiguous memory for DMA from Xen! \n " \
" You either: don't have the permissions, do not have " \
" enough free memory under 4GB, or the hypervisor memory " \
" is too fragmented! " ;
default :
break ;
}
return " " ;
}
2021-03-01 08:44:29 +01:00
# define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
2021-03-01 08:44:32 +01:00
int __ref xen_swiotlb_init ( void )
2010-05-11 10:05:49 -04:00
{
2012-08-23 14:03:55 -04:00
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN ;
2021-03-18 17:14:23 +01:00
unsigned long bytes = swiotlb_size_or_default ( ) ;
unsigned long nslabs = bytes > > IO_TLB_SHIFT ;
unsigned int order , repeat = 3 ;
2021-03-01 08:44:32 +01:00
int rc = - ENOMEM ;
2021-03-01 08:44:30 +01:00
char * start ;
2011-06-05 11:47:29 +09:00
2021-05-12 13:18:23 -07:00
if ( io_tlb_default_mem ! = NULL ) {
pr_warn ( " swiotlb buffer already initialized \n " ) ;
return - EEXIST ;
}
2021-03-01 08:44:32 +01:00
retry :
m_ret = XEN_SWIOTLB_ENOMEM ;
2021-03-01 08:44:29 +01:00
order = get_order ( bytes ) ;
2019-05-28 15:48:22 -07:00
2010-05-11 10:05:49 -04:00
/*
* Get IO TLB memory from any location .
*/
2012-08-23 14:36:15 -04:00
# define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
# define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
2021-03-01 08:44:32 +01:00
while ( ( SLABS_PER_PAGE < < order ) > IO_TLB_MIN_SLABS ) {
start = ( void * ) xen_get_swiotlb_free_pages ( order ) ;
if ( start )
break ;
order - - ;
2012-08-23 14:36:15 -04:00
}
2021-03-01 08:44:32 +01:00
if ( ! start )
2011-07-22 12:46:43 -04:00
goto error ;
2021-03-01 08:44:32 +01:00
if ( order ! = get_order ( bytes ) ) {
pr_warn ( " Warning: only able to allocate %ld MB for software IO TLB \n " ,
( PAGE_SIZE < < order ) > > 20 ) ;
nslabs = SLABS_PER_PAGE < < order ;
bytes = nslabs < < IO_TLB_SHIFT ;
2011-07-22 12:46:43 -04:00
}
2021-03-01 08:44:32 +01:00
2010-05-11 10:05:49 -04:00
/*
* And replace that memory with pages under 4 GB .
*/
2021-03-01 08:44:33 +01:00
rc = xen_swiotlb_fixup ( start , nslabs ) ;
2011-07-22 12:46:43 -04:00
if ( rc ) {
2021-03-01 08:44:32 +01:00
free_pages ( ( unsigned long ) start , order ) ;
2012-08-23 14:03:55 -04:00
m_ret = XEN_SWIOTLB_EFIXUP ;
2010-05-11 10:05:49 -04:00
goto error ;
2011-07-22 12:46:43 -04:00
}
2021-03-01 08:44:32 +01:00
rc = swiotlb_late_init_with_tbl ( start , nslabs ) ;
if ( rc )
return rc ;
swiotlb_set_max_segment ( PAGE_SIZE ) ;
return 0 ;
2010-05-11 10:05:49 -04:00
error :
2011-07-22 12:46:43 -04:00
if ( repeat - - ) {
2021-03-01 08:44:32 +01:00
/* Min is 2MB */
nslabs = max ( 1024UL , ( nslabs > > 1 ) ) ;
2013-06-28 03:21:41 -07:00
pr_info ( " Lowering to %luMB \n " ,
2021-03-01 08:44:30 +01:00
( nslabs < < IO_TLB_SHIFT ) > > 20 ) ;
2011-07-22 12:46:43 -04:00
goto retry ;
}
2013-06-28 03:21:41 -07:00
pr_err ( " %s (rc:%d) \n " , xen_swiotlb_error ( m_ret ) , rc ) ;
2021-03-01 08:44:32 +01:00
free_pages ( ( unsigned long ) start , order ) ;
2012-08-23 14:36:15 -04:00
return rc ;
2010-05-11 10:05:49 -04:00
}
2017-05-21 13:15:13 +02:00
2021-03-01 08:44:32 +01:00
# ifdef CONFIG_X86
void __init xen_swiotlb_init_early ( void )
{
2021-03-18 17:14:23 +01:00
unsigned long bytes = swiotlb_size_or_default ( ) ;
unsigned long nslabs = bytes > > IO_TLB_SHIFT ;
2021-03-01 08:44:32 +01:00
unsigned int repeat = 3 ;
char * start ;
int rc ;
retry :
/*
* Get IO TLB memory from any location .
*/
start = memblock_alloc ( PAGE_ALIGN ( bytes ) , PAGE_SIZE ) ;
if ( ! start )
panic ( " %s: Failed to allocate %lu bytes align=0x%lx \n " ,
__func__ , PAGE_ALIGN ( bytes ) , PAGE_SIZE ) ;
/*
* And replace that memory with pages under 4 GB .
*/
2021-03-01 08:44:33 +01:00
rc = xen_swiotlb_fixup ( start , nslabs ) ;
2021-03-01 08:44:32 +01:00
if ( rc ) {
memblock_free ( __pa ( start ) , PAGE_ALIGN ( bytes ) ) ;
if ( repeat - - ) {
/* Min is 2MB */
nslabs = max ( 1024UL , ( nslabs > > 1 ) ) ;
2021-03-18 17:14:23 +01:00
bytes = nslabs < < IO_TLB_SHIFT ;
pr_info ( " Lowering to %luMB \n " , bytes > > 20 ) ;
2021-03-01 08:44:32 +01:00
goto retry ;
}
panic ( " %s (rc:%d) " , xen_swiotlb_error ( XEN_SWIOTLB_EFIXUP ) , rc ) ;
}
if ( swiotlb_init_with_tbl ( start , nslabs , false ) )
panic ( " Cannot allocate SWIOTLB buffer " ) ;
swiotlb_set_max_segment ( PAGE_SIZE ) ;
}
# endif /* CONFIG_X86 */
2017-05-21 13:15:13 +02:00
static void *
2010-05-11 10:05:49 -04:00
xen_swiotlb_alloc_coherent ( struct device * hwdev , size_t size ,
2012-03-27 14:28:18 +02:00
dma_addr_t * dma_handle , gfp_t flags ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2010-05-11 10:05:49 -04:00
{
void * ret ;
int order = get_order ( size ) ;
u64 dma_mask = DMA_BIT_MASK ( 32 ) ;
2011-08-25 16:13:54 -04:00
phys_addr_t phys ;
dma_addr_t dev_addr ;
2010-05-11 10:05:49 -04:00
/*
* Ignore region specifiers - the kernel ' s ideas of
* pseudo - phys memory layout has nothing to do with the
* machine physical layout . We can ' t allocate highmem
* because we can ' t return a pointer to it .
*/
flags & = ~ ( __GFP_DMA | __GFP_HIGHMEM ) ;
2018-10-16 15:21:16 -07:00
/* Convert the size to actually allocated. */
size = 1UL < < ( order + XEN_PAGE_SHIFT ) ;
2013-10-10 13:41:10 +00:00
/* On ARM this function returns an ioremap'ped virtual address for
* which virt_to_phys doesn ' t return the corresponding physical
* address . In fact on ARM virt_to_phys only works for kernel direct
* mapped RAM memory . Also see comment below .
*/
ret = xen_alloc_coherent_pages ( hwdev , size , dma_handle , flags , attrs ) ;
2010-05-11 10:05:49 -04:00
2011-08-25 16:13:54 -04:00
if ( ! ret )
return ret ;
2010-05-11 10:05:49 -04:00
if ( hwdev & & hwdev - > coherent_dma_mask )
2018-03-19 11:38:14 +01:00
dma_mask = hwdev - > coherent_dma_mask ;
2010-05-11 10:05:49 -04:00
2020-07-10 15:34:25 -07:00
/* At this point dma_handle is the dma address, next we are
2013-10-10 13:41:10 +00:00
* going to set it to the machine address .
* Do not use virt_to_phys ( ret ) because on ARM it doesn ' t correspond
* to * dma_handle . */
2020-07-10 15:34:25 -07:00
phys = dma_to_phys ( hwdev , * dma_handle ) ;
dev_addr = xen_phys_to_dma ( hwdev , phys ) ;
2011-08-25 16:13:54 -04:00
if ( ( ( dev_addr + size - 1 < = dma_mask ) ) & &
! range_straddles_page_boundary ( phys , size ) )
* dma_handle = dev_addr ;
else {
2013-10-10 13:41:10 +00:00
if ( xen_create_contiguous_region ( phys , order ,
2013-10-09 16:56:32 +00:00
fls64 ( dma_mask ) , dma_handle ) ! = 0 ) {
2013-10-10 13:41:10 +00:00
xen_free_coherent_pages ( hwdev , size , ret , ( dma_addr_t ) phys , attrs ) ;
2010-05-11 10:05:49 -04:00
return NULL ;
}
2020-07-10 15:34:25 -07:00
* dma_handle = phys_to_dma ( hwdev , * dma_handle ) ;
2019-06-14 07:46:04 +02:00
SetPageXenRemapped ( virt_to_page ( ret ) ) ;
2010-05-11 10:05:49 -04:00
}
2011-08-25 16:13:54 -04:00
memset ( ret , 0 , size ) ;
2010-05-11 10:05:49 -04:00
return ret ;
}
2017-05-21 13:15:13 +02:00
static void
2010-05-11 10:05:49 -04:00
xen_swiotlb_free_coherent ( struct device * hwdev , size_t size , void * vaddr ,
2016-08-03 13:46:00 -07:00
dma_addr_t dev_addr , unsigned long attrs )
2010-05-11 10:05:49 -04:00
{
int order = get_order ( size ) ;
2011-08-25 16:13:54 -04:00
phys_addr_t phys ;
u64 dma_mask = DMA_BIT_MASK ( 32 ) ;
2020-07-10 15:34:17 -07:00
struct page * page ;
2010-05-11 10:05:49 -04:00
2011-08-25 16:13:54 -04:00
if ( hwdev & & hwdev - > coherent_dma_mask )
dma_mask = hwdev - > coherent_dma_mask ;
2013-10-10 13:41:10 +00:00
/* do not use virt_to_phys because on ARM it doesn't return you the
* physical address */
2020-07-10 15:34:25 -07:00
phys = xen_dma_to_phys ( hwdev , dev_addr ) ;
2011-08-25 16:13:54 -04:00
2018-10-16 15:21:16 -07:00
/* Convert the size to actually allocated. */
size = 1UL < < ( order + XEN_PAGE_SHIFT ) ;
2020-07-10 15:34:17 -07:00
if ( is_vmalloc_addr ( vaddr ) )
page = vmalloc_to_page ( vaddr ) ;
else
page = virt_to_page ( vaddr ) ;
2019-06-14 07:46:02 +02:00
if ( ! WARN_ON ( ( dev_addr + size - 1 > dma_mask ) | |
2019-06-14 07:46:04 +02:00
range_straddles_page_boundary ( phys , size ) ) & &
2020-07-10 15:34:17 -07:00
TestClearPageXenRemapped ( page ) )
2013-10-10 13:41:10 +00:00
xen_destroy_contiguous_region ( phys , order ) ;
2011-08-25 16:13:54 -04:00
2020-07-10 15:34:25 -07:00
xen_free_coherent_pages ( hwdev , size , vaddr , phys_to_dma ( hwdev , phys ) ,
attrs ) ;
2010-05-11 10:05:49 -04:00
}
/*
* Map a single buffer of the indicated size for DMA in streaming mode . The
* physical address to use is returned .
*
* Once the device is given the dma address , the device owns this memory until
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed .
*/
2017-05-21 13:15:13 +02:00
static dma_addr_t xen_swiotlb_map_page ( struct device * dev , struct page * page ,
2010-05-11 10:05:49 -04:00
unsigned long offset , size_t size ,
enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2010-05-11 10:05:49 -04:00
{
2012-10-15 10:19:39 -07:00
phys_addr_t map , phys = page_to_phys ( page ) + offset ;
2020-07-10 15:34:25 -07:00
dma_addr_t dev_addr = xen_phys_to_dma ( dev , phys ) ;
2010-05-11 10:05:49 -04:00
BUG_ON ( dir = = DMA_NONE ) ;
/*
* If the address happens to be in the device ' s DMA window ,
* we can safely return the device addr and not worry about bounce
* buffering it .
*/
2019-11-19 17:38:58 +01:00
if ( dma_capable ( dev , dev_addr , size , true ) & &
2014-11-21 11:07:39 +00:00
! range_straddles_page_boundary ( phys , size ) & &
2015-09-09 15:17:33 +01:00
! xen_arch_need_swiotlb ( dev , phys , dev_addr ) & &
2019-04-11 09:20:00 +02:00
swiotlb_force ! = SWIOTLB_FORCE )
goto done ;
2010-05-11 10:05:49 -04:00
/*
* Oh well , have to allocate and map a bounce buffer .
*/
2013-09-04 21:11:05 +01:00
trace_swiotlb_bounced ( dev , dev_addr , size , swiotlb_force ) ;
2020-10-23 08:33:09 +02:00
map = swiotlb_tbl_map_single ( dev , phys , size , size , dir , attrs ) ;
2019-06-17 15:28:43 +02:00
if ( map = = ( phys_addr_t ) DMA_MAPPING_ERROR )
2018-11-21 19:38:19 +01:00
return DMA_MAPPING_ERROR ;
2010-05-11 10:05:49 -04:00
2019-09-05 10:04:30 +02:00
phys = map ;
2020-07-10 15:34:25 -07:00
dev_addr = xen_phys_to_dma ( dev , map ) ;
2010-05-11 10:05:49 -04:00
/*
* Ensure that the address returned is DMA ' ble
*/
2019-11-19 17:38:58 +01:00
if ( unlikely ( ! dma_capable ( dev , dev_addr , size , true ) ) ) {
2021-03-01 08:44:24 +01:00
swiotlb_tbl_unmap_single ( dev , map , size , dir ,
2019-04-11 09:20:00 +02:00
attrs | DMA_ATTR_SKIP_CPU_SYNC ) ;
return DMA_MAPPING_ERROR ;
}
2016-11-02 07:12:47 -04:00
2019-04-11 09:20:00 +02:00
done :
2020-07-10 15:34:26 -07:00
if ( ! dev_is_dma_coherent ( dev ) & & ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( dev , dev_addr ) ) ) )
arch_sync_dma_for_device ( phys , size , dir ) ;
else
xen_dma_sync_for_device ( dev , dev_addr , size , dir ) ;
}
2019-04-11 09:20:00 +02:00
return dev_addr ;
2010-05-11 10:05:49 -04:00
}
/*
* Unmap a single streaming mode DMA translation . The dma_addr and size must
* match what was provided for in a previous xen_swiotlb_map_page call . All
* other usages are undefined .
*
* After this call , reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there .
*/
2019-07-24 16:18:41 +02:00
static void xen_swiotlb_unmap_page ( struct device * hwdev , dma_addr_t dev_addr ,
size_t size , enum dma_data_direction dir , unsigned long attrs )
2010-05-11 10:05:49 -04:00
{
2020-07-10 15:34:25 -07:00
phys_addr_t paddr = xen_dma_to_phys ( hwdev , dev_addr ) ;
2010-05-11 10:05:49 -04:00
BUG_ON ( dir = = DMA_NONE ) ;
2020-07-10 15:34:26 -07:00
if ( ! dev_is_dma_coherent ( hwdev ) & & ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( hwdev , dev_addr ) ) ) )
arch_sync_dma_for_cpu ( paddr , size , dir ) ;
else
xen_dma_sync_for_cpu ( hwdev , dev_addr , size , dir ) ;
}
2013-10-25 10:33:25 +00:00
2010-05-11 10:05:49 -04:00
/* NOTE: We use dev_addr here, not paddr! */
2020-07-10 15:34:23 -07:00
if ( is_xen_swiotlb_buffer ( hwdev , dev_addr ) )
2021-03-01 08:44:24 +01:00
swiotlb_tbl_unmap_single ( hwdev , paddr , size , dir , attrs ) ;
2010-05-11 10:05:49 -04:00
}
static void
2019-04-11 09:19:59 +02:00
xen_swiotlb_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction dir )
2010-05-11 10:05:49 -04:00
{
2020-07-10 15:34:25 -07:00
phys_addr_t paddr = xen_dma_to_phys ( dev , dma_addr ) ;
2013-10-25 10:33:25 +00:00
2020-07-10 15:34:26 -07:00
if ( ! dev_is_dma_coherent ( dev ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( dev , dma_addr ) ) ) )
arch_sync_dma_for_cpu ( paddr , size , dir ) ;
else
xen_dma_sync_for_cpu ( dev , dma_addr , size , dir ) ;
}
2013-10-25 10:33:25 +00:00
2020-07-10 15:34:23 -07:00
if ( is_xen_swiotlb_buffer ( dev , dma_addr ) )
2021-03-01 08:44:26 +01:00
swiotlb_sync_single_for_cpu ( dev , paddr , size , dir ) ;
2010-05-11 10:05:49 -04:00
}
2019-04-11 09:19:59 +02:00
static void
xen_swiotlb_sync_single_for_device ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction dir )
2010-05-11 10:05:49 -04:00
{
2020-07-10 15:34:25 -07:00
phys_addr_t paddr = xen_dma_to_phys ( dev , dma_addr ) ;
2010-05-11 10:05:49 -04:00
2020-07-10 15:34:23 -07:00
if ( is_xen_swiotlb_buffer ( dev , dma_addr ) )
2021-03-01 08:44:26 +01:00
swiotlb_sync_single_for_device ( dev , paddr , size , dir ) ;
2019-04-11 09:19:59 +02:00
2020-07-10 15:34:26 -07:00
if ( ! dev_is_dma_coherent ( dev ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( dev , dma_addr ) ) ) )
arch_sync_dma_for_device ( paddr , size , dir ) ;
else
xen_dma_sync_for_device ( dev , dma_addr , size , dir ) ;
}
2010-05-11 10:05:49 -04:00
}
2017-05-21 13:15:13 +02:00
/*
* Unmap a set of streaming mode DMA translations . Again , cpu read rules
* concerning calls here are the same as for swiotlb_unmap_page ( ) above .
*/
static void
2019-04-11 09:19:57 +02:00
xen_swiotlb_unmap_sg ( struct device * hwdev , struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir , unsigned long attrs )
2017-05-21 13:15:13 +02:00
{
struct scatterlist * sg ;
int i ;
BUG_ON ( dir = = DMA_NONE ) ;
for_each_sg ( sgl , sg , nelems , i )
2019-07-24 16:18:41 +02:00
xen_swiotlb_unmap_page ( hwdev , sg - > dma_address , sg_dma_len ( sg ) ,
dir , attrs ) ;
2017-05-21 13:15:13 +02:00
}
2010-05-11 10:05:49 -04:00
2017-05-21 13:15:13 +02:00
static int
2019-04-11 09:19:58 +02:00
xen_swiotlb_map_sg ( struct device * dev , struct scatterlist * sgl , int nelems ,
2019-04-11 09:19:57 +02:00
enum dma_data_direction dir , unsigned long attrs )
2010-05-11 10:05:49 -04:00
{
struct scatterlist * sg ;
int i ;
BUG_ON ( dir = = DMA_NONE ) ;
for_each_sg ( sgl , sg , nelems , i ) {
2019-04-11 09:19:58 +02:00
sg - > dma_address = xen_swiotlb_map_page ( dev , sg_page ( sg ) ,
sg - > offset , sg - > length , dir , attrs ) ;
if ( sg - > dma_address = = DMA_MAPPING_ERROR )
goto out_unmap ;
2013-08-05 17:30:48 +01:00
sg_dma_len ( sg ) = sg - > length ;
2010-05-11 10:05:49 -04:00
}
2019-04-11 09:19:58 +02:00
2010-05-11 10:05:49 -04:00
return nelems ;
2019-04-11 09:19:58 +02:00
out_unmap :
xen_swiotlb_unmap_sg ( dev , sgl , i , dir , attrs | DMA_ATTR_SKIP_CPU_SYNC ) ;
sg_dma_len ( sgl ) = 0 ;
return 0 ;
2010-05-11 10:05:49 -04:00
}
static void
2019-04-11 09:19:59 +02:00
xen_swiotlb_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sgl ,
int nelems , enum dma_data_direction dir )
2010-05-11 10:05:49 -04:00
{
struct scatterlist * sg ;
int i ;
2019-04-11 09:19:59 +02:00
for_each_sg ( sgl , sg , nelems , i ) {
xen_swiotlb_sync_single_for_cpu ( dev , sg - > dma_address ,
sg - > length , dir ) ;
}
2010-05-11 10:05:49 -04:00
}
2017-05-21 13:15:13 +02:00
static void
2019-04-11 09:19:59 +02:00
xen_swiotlb_sync_sg_for_device ( struct device * dev , struct scatterlist * sgl ,
2010-05-11 10:05:49 -04:00
int nelems , enum dma_data_direction dir )
{
2019-04-11 09:19:59 +02:00
struct scatterlist * sg ;
int i ;
for_each_sg ( sgl , sg , nelems , i ) {
xen_swiotlb_sync_single_for_device ( dev , sg - > dma_address ,
sg - > length , dir ) ;
}
2010-05-11 10:05:49 -04:00
}
/*
* Return whether the given device DMA address mask can be supported
* properly . For example , if your device can only drive the low 24 - bits
* during bus mastering , then you would pass 0x00ffffff as the mask to
* this function .
*/
2017-05-21 13:15:13 +02:00
static int
2010-05-11 10:05:49 -04:00
xen_swiotlb_dma_supported ( struct device * hwdev , u64 mask )
{
2021-03-18 17:14:23 +01:00
return xen_phys_to_dma ( hwdev , io_tlb_default_mem - > end - 1 ) < = mask ;
2010-05-11 10:05:49 -04:00
}
2013-10-09 16:56:33 +00:00
2017-05-21 13:15:13 +02:00
const struct dma_map_ops xen_swiotlb_dma_ops = {
. alloc = xen_swiotlb_alloc_coherent ,
. free = xen_swiotlb_free_coherent ,
. sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu ,
. sync_single_for_device = xen_swiotlb_sync_single_for_device ,
. sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu ,
. sync_sg_for_device = xen_swiotlb_sync_sg_for_device ,
2019-04-11 09:19:57 +02:00
. map_sg = xen_swiotlb_map_sg ,
. unmap_sg = xen_swiotlb_unmap_sg ,
2017-05-21 13:15:13 +02:00
. map_page = xen_swiotlb_map_page ,
. unmap_page = xen_swiotlb_unmap_page ,
. dma_supported = xen_swiotlb_dma_supported ,
2019-09-02 10:45:39 +02:00
. mmap = dma_common_mmap ,
. get_sgtable = dma_common_get_sgtable ,
2020-09-01 13:34:33 +02:00
. alloc_pages = dma_common_alloc_pages ,
. free_pages = dma_common_free_pages ,
2017-05-21 13:15:13 +02:00
} ;