2019-05-29 17:18:01 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-05-11 18:05:49 +04:00
/*
* Copyright 2010
* by Konrad Rzeszutek Wilk < konrad . wilk @ oracle . com >
*
* This code provides a IOMMU for Xen PV guests with PCI passthrough .
*
* PV guests under Xen are running in an non - contiguous memory architecture .
*
* When PCI pass - through is utilized , this necessitates an IOMMU for
* translating bus ( DMA ) to virtual and vice - versa and also providing a
* mechanism to have contiguous pages for device drivers operations ( say DMA
* operations ) .
*
* Specifically , under Xen the Linux idea of pages is an illusion . It
* assumes that pages start at zero and go up to the available memory . To
* help with that , the Linux Xen MMU provides a lookup mechanism to
* translate the page frame numbers ( PFN ) to machine frame numbers ( MFN )
* and vice - versa . The MFN are the " real " frame numbers . Furthermore
* memory is not contiguous . Xen hypervisor stitches memory for guests
* from different pools , which means there is no guarantee that PFN = = MFN
* and PFN + 1 = = MFN + 1. Lastly with Xen 4.0 , pages ( in debug mode ) are
* allocated in descending order ( high to low ) , meaning the guest might
* never get any MFN ' s under the 4 GB mark .
*/
2013-06-28 14:21:41 +04:00
# define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
2018-10-31 01:09:21 +03:00
# include <linux/memblock.h>
2018-01-10 18:21:13 +03:00
# include <linux/dma-direct.h>
2020-09-22 16:36:11 +03:00
# include <linux/dma-map-ops.h>
2011-07-10 21:22:07 +04:00
# include <linux/export.h>
2010-05-11 18:05:49 +04:00
# include <xen/swiotlb-xen.h>
# include <xen/page.h>
# include <xen/xen-ops.h>
2011-07-22 20:46:43 +04:00
# include <xen/hvc-console.h>
2013-09-05 00:11:05 +04:00
2013-10-10 17:40:44 +04:00
# include <asm/dma-mapping.h>
2013-11-09 00:36:09 +04:00
2013-09-05 00:11:05 +04:00
# include <trace/events/swiotlb.h>
2019-09-02 11:39:58 +03:00
# define MAX_DMA_BITS 32
2010-05-11 18:05:49 +04:00
/*
* Quick lookup value of the bus address of the IOTLB .
*/
2020-07-11 01:34:25 +03:00
static inline phys_addr_t xen_phys_to_bus ( struct device * dev , phys_addr_t paddr )
2010-05-11 18:05:49 +04:00
{
2015-09-09 17:18:45 +03:00
unsigned long bfn = pfn_to_bfn ( XEN_PFN_DOWN ( paddr ) ) ;
2020-07-11 01:34:25 +03:00
phys_addr_t baddr = ( phys_addr_t ) bfn < < XEN_PAGE_SHIFT ;
2014-01-20 15:30:41 +04:00
2020-07-11 01:34:25 +03:00
baddr | = paddr & ~ XEN_PAGE_MASK ;
return baddr ;
}
2014-01-20 15:30:41 +04:00
2020-07-11 01:34:25 +03:00
static inline dma_addr_t xen_phys_to_dma ( struct device * dev , phys_addr_t paddr )
{
return phys_to_dma ( dev , xen_phys_to_bus ( dev , paddr ) ) ;
2010-05-11 18:05:49 +04:00
}
2020-07-11 01:34:25 +03:00
static inline phys_addr_t xen_bus_to_phys ( struct device * dev ,
phys_addr_t baddr )
2010-05-11 18:05:49 +04:00
{
2015-09-09 17:18:45 +03:00
unsigned long xen_pfn = bfn_to_pfn ( XEN_PFN_DOWN ( baddr ) ) ;
2020-07-11 01:34:25 +03:00
phys_addr_t paddr = ( xen_pfn < < XEN_PAGE_SHIFT ) |
( baddr & ~ XEN_PAGE_MASK ) ;
2014-01-20 15:30:41 +04:00
return paddr ;
2010-05-11 18:05:49 +04:00
}
2020-07-11 01:34:25 +03:00
static inline phys_addr_t xen_dma_to_phys ( struct device * dev ,
dma_addr_t dma_addr )
{
return xen_bus_to_phys ( dev , dma_to_phys ( dev , dma_addr ) ) ;
}
2019-06-14 08:46:03 +03:00
static inline int range_straddles_page_boundary ( phys_addr_t p , size_t size )
2010-05-11 18:05:49 +04:00
{
2019-06-14 08:46:03 +03:00
unsigned long next_bfn , xen_pfn = XEN_PFN_DOWN ( p ) ;
unsigned int i , nr_pages = XEN_PFN_UP ( xen_offset_in_page ( p ) + size ) ;
2010-05-11 18:05:49 +04:00
2015-09-09 17:18:45 +03:00
next_bfn = pfn_to_bfn ( xen_pfn ) ;
2010-05-11 18:05:49 +04:00
2019-06-14 08:46:03 +03:00
for ( i = 1 ; i < nr_pages ; i + + )
2015-09-09 17:18:45 +03:00
if ( pfn_to_bfn ( + + xen_pfn ) ! = + + next_bfn )
2019-06-14 08:46:03 +03:00
return 1 ;
2010-05-11 18:05:49 +04:00
2019-06-14 08:46:03 +03:00
return 0 ;
2010-05-11 18:05:49 +04:00
}
2020-07-11 01:34:23 +03:00
static int is_xen_swiotlb_buffer ( struct device * dev , dma_addr_t dma_addr )
2010-05-11 18:05:49 +04:00
{
2020-07-11 01:34:25 +03:00
unsigned long bfn = XEN_PFN_DOWN ( dma_to_phys ( dev , dma_addr ) ) ;
2015-09-09 17:18:45 +03:00
unsigned long xen_pfn = bfn_to_local_pfn ( bfn ) ;
2020-07-11 01:34:24 +03:00
phys_addr_t paddr = ( phys_addr_t ) xen_pfn < < XEN_PAGE_SHIFT ;
2010-05-11 18:05:49 +04:00
/* If the address is outside our domain, it CAN
* have the same virtual address as another address
* in our domain . Therefore _only_ check address within our domain .
*/
2021-03-01 10:44:27 +03:00
if ( pfn_valid ( PFN_DOWN ( paddr ) ) )
2021-06-19 06:40:35 +03:00
return is_swiotlb_buffer ( dev , paddr ) ;
2010-05-11 18:05:49 +04:00
return 0 ;
}
2022-04-22 07:37:57 +03:00
# ifdef CONFIG_X86
2022-03-14 09:58:45 +03:00
int xen_swiotlb_fixup ( void * buf , unsigned long nslabs )
2010-05-11 18:05:49 +04:00
{
2021-09-07 15:05:12 +03:00
int rc ;
unsigned int order = get_order ( IO_TLB_SEGSIZE < < IO_TLB_SHIFT ) ;
unsigned int i , dma_bits = order + PAGE_SHIFT ;
2013-10-09 20:56:32 +04:00
dma_addr_t dma_handle ;
2013-10-10 17:41:10 +04:00
phys_addr_t p = virt_to_phys ( buf ) ;
2010-05-11 18:05:49 +04:00
2021-09-07 15:05:12 +03:00
BUILD_BUG_ON ( IO_TLB_SEGSIZE & ( IO_TLB_SEGSIZE - 1 ) ) ;
BUG_ON ( nslabs % IO_TLB_SEGSIZE ) ;
2010-05-11 18:05:49 +04:00
i = 0 ;
do {
do {
rc = xen_create_contiguous_region (
2021-09-07 15:05:12 +03:00
p + ( i < < IO_TLB_SHIFT ) , order ,
2013-10-09 20:56:32 +04:00
dma_bits , & dma_handle ) ;
2019-09-02 11:39:58 +03:00
} while ( rc & & dma_bits + + < MAX_DMA_BITS ) ;
2010-05-11 18:05:49 +04:00
if ( rc )
return rc ;
2021-09-07 15:05:12 +03:00
i + = IO_TLB_SEGSIZE ;
2010-05-11 18:05:49 +04:00
} while ( i < nslabs ) ;
return 0 ;
}
2017-05-21 14:15:13 +03:00
static void *
2022-04-22 07:37:57 +03:00
xen_swiotlb_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flags , unsigned long attrs )
2010-05-11 18:05:49 +04:00
{
2022-04-22 07:37:57 +03:00
u64 dma_mask = dev - > coherent_dma_mask ;
2010-05-11 18:05:49 +04:00
int order = get_order ( size ) ;
2011-08-26 00:13:54 +04:00
phys_addr_t phys ;
2022-04-22 07:37:57 +03:00
void * ret ;
2010-05-11 18:05:49 +04:00
2022-04-22 07:37:57 +03:00
/* Align the allocation to the Xen page size */
2018-10-17 01:21:16 +03:00
size = 1UL < < ( order + XEN_PAGE_SHIFT ) ;
2022-04-22 07:37:57 +03:00
ret = ( void * ) __get_free_pages ( flags , get_order ( size ) ) ;
2011-08-26 00:13:54 +04:00
if ( ! ret )
return ret ;
2022-04-22 07:37:57 +03:00
phys = virt_to_phys ( ret ) ;
* dma_handle = xen_phys_to_dma ( dev , phys ) ;
if ( * dma_handle + size - 1 > dma_mask | |
range_straddles_page_boundary ( phys , size ) ) {
if ( xen_create_contiguous_region ( phys , order , fls64 ( dma_mask ) ,
dma_handle ) ! = 0 )
goto out_free_pages ;
2019-06-14 08:46:04 +03:00
SetPageXenRemapped ( virt_to_page ( ret ) ) ;
2010-05-11 18:05:49 +04:00
}
2022-04-22 07:37:57 +03:00
2011-08-26 00:13:54 +04:00
memset ( ret , 0 , size ) ;
2010-05-11 18:05:49 +04:00
return ret ;
2022-04-22 07:37:57 +03:00
out_free_pages :
free_pages ( ( unsigned long ) ret , get_order ( size ) ) ;
return NULL ;
2010-05-11 18:05:49 +04:00
}
2017-05-21 14:15:13 +03:00
static void
2022-04-22 07:37:57 +03:00
xen_swiotlb_free_coherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle , unsigned long attrs )
2010-05-11 18:05:49 +04:00
{
2022-04-22 07:37:57 +03:00
phys_addr_t phys = virt_to_phys ( vaddr ) ;
2010-05-11 18:05:49 +04:00
int order = get_order ( size ) ;
2011-08-26 00:13:54 +04:00
2018-10-17 01:21:16 +03:00
/* Convert the size to actually allocated. */
size = 1UL < < ( order + XEN_PAGE_SHIFT ) ;
2022-04-22 07:37:57 +03:00
if ( WARN_ON_ONCE ( dma_handle + size - 1 > dev - > coherent_dma_mask ) | |
WARN_ON_ONCE ( range_straddles_page_boundary ( phys , size ) ) )
return ;
2020-07-11 01:34:17 +03:00
2022-04-22 07:37:57 +03:00
if ( TestClearPageXenRemapped ( virt_to_page ( vaddr ) ) )
2013-10-10 17:41:10 +04:00
xen_destroy_contiguous_region ( phys , order ) ;
2022-04-22 07:37:57 +03:00
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
2010-05-11 18:05:49 +04:00
}
2022-04-22 07:37:57 +03:00
# endif /* CONFIG_X86 */
2010-05-11 18:05:49 +04:00
/*
* Map a single buffer of the indicated size for DMA in streaming mode . The
* physical address to use is returned .
*
* Once the device is given the dma address , the device owns this memory until
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed .
*/
2017-05-21 14:15:13 +03:00
static dma_addr_t xen_swiotlb_map_page ( struct device * dev , struct page * page ,
2010-05-11 18:05:49 +04:00
unsigned long offset , size_t size ,
enum dma_data_direction dir ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2010-05-11 18:05:49 +04:00
{
2012-10-15 21:19:39 +04:00
phys_addr_t map , phys = page_to_phys ( page ) + offset ;
2020-07-11 01:34:25 +03:00
dma_addr_t dev_addr = xen_phys_to_dma ( dev , phys ) ;
2010-05-11 18:05:49 +04:00
BUG_ON ( dir = = DMA_NONE ) ;
/*
* If the address happens to be in the device ' s DMA window ,
* we can safely return the device addr and not worry about bounce
* buffering it .
*/
2019-11-19 19:38:58 +03:00
if ( dma_capable ( dev , dev_addr , size , true ) & &
2014-11-21 14:07:39 +03:00
! range_straddles_page_boundary ( phys , size ) & &
2015-09-09 17:17:33 +03:00
! xen_arch_need_swiotlb ( dev , phys , dev_addr ) & &
2021-06-24 18:55:20 +03:00
! is_swiotlb_force_bounce ( dev ) )
2019-04-11 10:20:00 +03:00
goto done ;
2010-05-11 18:05:49 +04:00
/*
* Oh well , have to allocate and map a bounce buffer .
*/
2022-03-29 18:27:33 +03:00
trace_swiotlb_bounced ( dev , dev_addr , size ) ;
2013-09-05 00:11:05 +04:00
2021-09-29 05:32:59 +03:00
map = swiotlb_tbl_map_single ( dev , phys , size , size , 0 , dir , attrs ) ;
2019-06-17 16:28:43 +03:00
if ( map = = ( phys_addr_t ) DMA_MAPPING_ERROR )
2018-11-21 21:38:19 +03:00
return DMA_MAPPING_ERROR ;
2010-05-11 18:05:49 +04:00
2019-09-05 11:04:30 +03:00
phys = map ;
2020-07-11 01:34:25 +03:00
dev_addr = xen_phys_to_dma ( dev , map ) ;
2010-05-11 18:05:49 +04:00
/*
* Ensure that the address returned is DMA ' ble
*/
2019-11-19 19:38:58 +03:00
if ( unlikely ( ! dma_capable ( dev , dev_addr , size , true ) ) ) {
2021-03-01 10:44:24 +03:00
swiotlb_tbl_unmap_single ( dev , map , size , dir ,
2019-04-11 10:20:00 +03:00
attrs | DMA_ATTR_SKIP_CPU_SYNC ) ;
return DMA_MAPPING_ERROR ;
}
2016-11-02 14:12:47 +03:00
2019-04-11 10:20:00 +03:00
done :
2020-07-11 01:34:26 +03:00
if ( ! dev_is_dma_coherent ( dev ) & & ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( dev , dev_addr ) ) ) )
arch_sync_dma_for_device ( phys , size , dir ) ;
else
xen_dma_sync_for_device ( dev , dev_addr , size , dir ) ;
}
2019-04-11 10:20:00 +03:00
return dev_addr ;
2010-05-11 18:05:49 +04:00
}
/*
* Unmap a single streaming mode DMA translation . The dma_addr and size must
* match what was provided for in a previous xen_swiotlb_map_page call . All
* other usages are undefined .
*
* After this call , reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there .
*/
2019-07-24 17:18:41 +03:00
static void xen_swiotlb_unmap_page ( struct device * hwdev , dma_addr_t dev_addr ,
size_t size , enum dma_data_direction dir , unsigned long attrs )
2010-05-11 18:05:49 +04:00
{
2020-07-11 01:34:25 +03:00
phys_addr_t paddr = xen_dma_to_phys ( hwdev , dev_addr ) ;
2010-05-11 18:05:49 +04:00
BUG_ON ( dir = = DMA_NONE ) ;
2020-07-11 01:34:26 +03:00
if ( ! dev_is_dma_coherent ( hwdev ) & & ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( hwdev , dev_addr ) ) ) )
arch_sync_dma_for_cpu ( paddr , size , dir ) ;
else
xen_dma_sync_for_cpu ( hwdev , dev_addr , size , dir ) ;
}
2013-10-25 14:33:25 +04:00
2010-05-11 18:05:49 +04:00
/* NOTE: We use dev_addr here, not paddr! */
2020-07-11 01:34:23 +03:00
if ( is_xen_swiotlb_buffer ( hwdev , dev_addr ) )
2021-03-01 10:44:24 +03:00
swiotlb_tbl_unmap_single ( hwdev , paddr , size , dir , attrs ) ;
2010-05-11 18:05:49 +04:00
}
static void
2019-04-11 10:19:59 +03:00
xen_swiotlb_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction dir )
2010-05-11 18:05:49 +04:00
{
2020-07-11 01:34:25 +03:00
phys_addr_t paddr = xen_dma_to_phys ( dev , dma_addr ) ;
2013-10-25 14:33:25 +04:00
2020-07-11 01:34:26 +03:00
if ( ! dev_is_dma_coherent ( dev ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( dev , dma_addr ) ) ) )
arch_sync_dma_for_cpu ( paddr , size , dir ) ;
else
xen_dma_sync_for_cpu ( dev , dma_addr , size , dir ) ;
}
2013-10-25 14:33:25 +04:00
2020-07-11 01:34:23 +03:00
if ( is_xen_swiotlb_buffer ( dev , dma_addr ) )
2021-03-01 10:44:26 +03:00
swiotlb_sync_single_for_cpu ( dev , paddr , size , dir ) ;
2010-05-11 18:05:49 +04:00
}
2019-04-11 10:19:59 +03:00
static void
xen_swiotlb_sync_single_for_device ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction dir )
2010-05-11 18:05:49 +04:00
{
2020-07-11 01:34:25 +03:00
phys_addr_t paddr = xen_dma_to_phys ( dev , dma_addr ) ;
2010-05-11 18:05:49 +04:00
2020-07-11 01:34:23 +03:00
if ( is_xen_swiotlb_buffer ( dev , dma_addr ) )
2021-03-01 10:44:26 +03:00
swiotlb_sync_single_for_device ( dev , paddr , size , dir ) ;
2019-04-11 10:19:59 +03:00
2020-07-11 01:34:26 +03:00
if ( ! dev_is_dma_coherent ( dev ) ) {
if ( pfn_valid ( PFN_DOWN ( dma_to_phys ( dev , dma_addr ) ) ) )
arch_sync_dma_for_device ( paddr , size , dir ) ;
else
xen_dma_sync_for_device ( dev , dma_addr , size , dir ) ;
}
2010-05-11 18:05:49 +04:00
}
2017-05-21 14:15:13 +03:00
/*
* Unmap a set of streaming mode DMA translations . Again , cpu read rules
* concerning calls here are the same as for swiotlb_unmap_page ( ) above .
*/
static void
2019-04-11 10:19:57 +03:00
xen_swiotlb_unmap_sg ( struct device * hwdev , struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir , unsigned long attrs )
2017-05-21 14:15:13 +03:00
{
struct scatterlist * sg ;
int i ;
BUG_ON ( dir = = DMA_NONE ) ;
for_each_sg ( sgl , sg , nelems , i )
2019-07-24 17:18:41 +03:00
xen_swiotlb_unmap_page ( hwdev , sg - > dma_address , sg_dma_len ( sg ) ,
dir , attrs ) ;
2017-05-21 14:15:13 +03:00
}
2010-05-11 18:05:49 +04:00
2017-05-21 14:15:13 +03:00
static int
2019-04-11 10:19:58 +03:00
xen_swiotlb_map_sg ( struct device * dev , struct scatterlist * sgl , int nelems ,
2019-04-11 10:19:57 +03:00
enum dma_data_direction dir , unsigned long attrs )
2010-05-11 18:05:49 +04:00
{
struct scatterlist * sg ;
int i ;
BUG_ON ( dir = = DMA_NONE ) ;
for_each_sg ( sgl , sg , nelems , i ) {
2019-04-11 10:19:58 +03:00
sg - > dma_address = xen_swiotlb_map_page ( dev , sg_page ( sg ) ,
sg - > offset , sg - > length , dir , attrs ) ;
if ( sg - > dma_address = = DMA_MAPPING_ERROR )
goto out_unmap ;
2013-08-05 20:30:48 +04:00
sg_dma_len ( sg ) = sg - > length ;
2010-05-11 18:05:49 +04:00
}
2019-04-11 10:19:58 +03:00
2010-05-11 18:05:49 +04:00
return nelems ;
2019-04-11 10:19:58 +03:00
out_unmap :
xen_swiotlb_unmap_sg ( dev , sgl , i , dir , attrs | DMA_ATTR_SKIP_CPU_SYNC ) ;
sg_dma_len ( sgl ) = 0 ;
2021-07-29 23:15:35 +03:00
return - EIO ;
2010-05-11 18:05:49 +04:00
}
static void
2019-04-11 10:19:59 +03:00
xen_swiotlb_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sgl ,
int nelems , enum dma_data_direction dir )
2010-05-11 18:05:49 +04:00
{
struct scatterlist * sg ;
int i ;
2019-04-11 10:19:59 +03:00
for_each_sg ( sgl , sg , nelems , i ) {
xen_swiotlb_sync_single_for_cpu ( dev , sg - > dma_address ,
sg - > length , dir ) ;
}
2010-05-11 18:05:49 +04:00
}
2017-05-21 14:15:13 +03:00
static void
2019-04-11 10:19:59 +03:00
xen_swiotlb_sync_sg_for_device ( struct device * dev , struct scatterlist * sgl ,
2010-05-11 18:05:49 +04:00
int nelems , enum dma_data_direction dir )
{
2019-04-11 10:19:59 +03:00
struct scatterlist * sg ;
int i ;
for_each_sg ( sgl , sg , nelems , i ) {
xen_swiotlb_sync_single_for_device ( dev , sg - > dma_address ,
sg - > length , dir ) ;
}
2010-05-11 18:05:49 +04:00
}
/*
* Return whether the given device DMA address mask can be supported
* properly . For example , if your device can only drive the low 24 - bits
* during bus mastering , then you would pass 0x00ffffff as the mask to
* this function .
*/
2017-05-21 14:15:13 +03:00
static int
2010-05-11 18:05:49 +04:00
xen_swiotlb_dma_supported ( struct device * hwdev , u64 mask )
{
2023-08-01 09:23:57 +03:00
return xen_phys_to_dma ( hwdev , default_swiotlb_limit ( ) ) < = mask ;
2010-05-11 18:05:49 +04:00
}
2013-10-09 20:56:33 +04:00
2017-05-21 14:15:13 +03:00
const struct dma_map_ops xen_swiotlb_dma_ops = {
2022-04-22 07:37:57 +03:00
# ifdef CONFIG_X86
2017-05-21 14:15:13 +03:00
. alloc = xen_swiotlb_alloc_coherent ,
. free = xen_swiotlb_free_coherent ,
2022-04-22 07:37:57 +03:00
# else
. alloc = dma_direct_alloc ,
. free = dma_direct_free ,
# endif
2017-05-21 14:15:13 +03:00
. sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu ,
. sync_single_for_device = xen_swiotlb_sync_single_for_device ,
. sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu ,
. sync_sg_for_device = xen_swiotlb_sync_sg_for_device ,
2019-04-11 10:19:57 +03:00
. map_sg = xen_swiotlb_map_sg ,
. unmap_sg = xen_swiotlb_unmap_sg ,
2017-05-21 14:15:13 +03:00
. map_page = xen_swiotlb_map_page ,
. unmap_page = xen_swiotlb_unmap_page ,
. dma_supported = xen_swiotlb_dma_supported ,
2019-09-02 11:45:39 +03:00
. mmap = dma_common_mmap ,
. get_sgtable = dma_common_get_sgtable ,
2020-09-01 14:34:33 +03:00
. alloc_pages = dma_common_alloc_pages ,
. free_pages = dma_common_free_pages ,
2023-11-06 20:12:30 +03:00
. max_mapping_size = swiotlb_max_mapping_size ,
2017-05-21 14:15:13 +03:00
} ;