2010-05-28 23:09:12 -04:00
/*
* Copyright 2010 Tilera Corporation . All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation , version 2.
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE , GOOD TITLE or
* NON INFRINGEMENT . See the GNU General Public License for
* more details .
*/
# include <linux/mm.h>
# include <linux/dma-mapping.h>
2012-06-15 15:23:06 -04:00
# include <linux/swiotlb.h>
2010-05-28 23:09:12 -04:00
# include <linux/vmalloc.h>
2011-12-01 11:37:20 -05:00
# include <linux/export.h>
2010-05-28 23:09:12 -04:00
# include <asm/tlbflush.h>
# include <asm/homecache.h>
/* Generic DMA mapping functions: */
/*
2012-06-13 14:46:40 -04:00
* Allocate what Linux calls " coherent " memory . On TILEPro this is
* uncached memory ; on TILE - Gx it is hash - for - home memory .
2010-05-28 23:09:12 -04:00
*/
2012-06-13 14:46:40 -04:00
# ifdef __tilepro__
# define PAGE_HOME_DMA PAGE_HOME_UNCACHED
# else
# define PAGE_HOME_DMA PAGE_HOME_HASH
# endif
2012-06-15 15:23:06 -04:00
static void * tile_dma_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t gfp ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
2013-08-05 12:37:24 -04:00
u64 dma_mask = ( dev & & dev - > coherent_dma_mask ) ?
dev - > coherent_dma_mask : DMA_BIT_MASK ( 32 ) ;
int node = dev ? dev_to_node ( dev ) : 0 ;
2010-05-28 23:09:12 -04:00
int order = get_order ( size ) ;
struct page * pg ;
dma_addr_t addr ;
2010-06-05 09:05:47 -04:00
gfp | = __GFP_ZERO ;
2010-05-28 23:09:12 -04:00
/*
2012-05-09 12:26:30 -04:00
* If the mask specifies that the memory be in the first 4 GB , then
* we force the allocation to come from the DMA zone . We also
* force the node to 0 since that ' s the only node where the DMA
* zone isn ' t empty . If the mask size is smaller than 32 bits , we
* may still not be able to guarantee a suitable memory address , in
* which case we will return NULL . But such devices are uncommon .
2010-05-28 23:09:12 -04:00
*/
2012-05-09 12:26:30 -04:00
if ( dma_mask < = DMA_BIT_MASK ( 32 ) ) {
gfp | = GFP_DMA ;
2010-05-28 23:09:12 -04:00
node = 0 ;
2012-05-09 12:26:30 -04:00
}
2010-05-28 23:09:12 -04:00
2012-06-13 14:46:40 -04:00
pg = homecache_alloc_pages_node ( node , gfp , order , PAGE_HOME_DMA ) ;
2010-05-28 23:09:12 -04:00
if ( pg = = NULL )
return NULL ;
addr = page_to_phys ( pg ) ;
if ( addr + size > dma_mask ) {
2012-06-13 14:46:40 -04:00
__homecache_free_pages ( pg , order ) ;
2010-05-28 23:09:12 -04:00
return NULL ;
}
* dma_handle = addr ;
2012-06-15 15:23:06 -04:00
2010-05-28 23:09:12 -04:00
return page_address ( pg ) ;
}
/*
2012-06-15 15:23:06 -04:00
* Free memory that was allocated with tile_dma_alloc_coherent .
2010-05-28 23:09:12 -04:00
*/
2012-06-15 15:23:06 -04:00
static void tile_dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
homecache_free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
/*
* The map routines " map " the specified address range for DMA
* accesses . The memory belongs to the device after this call is
* issued , until it is unmapped with dma_unmap_single .
*
* We don ' t need to do any mapping , we just flush the address range
* out of the cache and return a DMA address .
*
* The unmap routines do whatever is necessary before the processor
* accesses the memory again , and must be called before the driver
* touches the memory . We can get away with a cache invalidate if we
* can count on nothing having been touched .
*/
2012-06-13 14:46:40 -04:00
/* Set up a single page for DMA access. */
static void __dma_prep_page ( struct page * page , unsigned long offset ,
size_t size , enum dma_data_direction direction )
2011-02-28 16:37:34 -05:00
{
2012-06-13 14:46:40 -04:00
/*
* Flush the page from cache if necessary .
* On tilegx , data is delivered to hash - for - home L3 ; on tilepro ,
* data is delivered direct to memory .
*
* NOTE : If we were just doing DMA_TO_DEVICE we could optimize
* this to be a " flush " not a " finv " and keep some of the
* state in cache across the DMA operation , but it doesn ' t seem
* worth creating the necessary flush_buffer_xxx ( ) infrastructure .
*/
int home = page_home ( page ) ;
switch ( home ) {
case PAGE_HOME_HASH :
# ifdef __tilegx__
return ;
# endif
break ;
case PAGE_HOME_UNCACHED :
# ifdef __tilepro__
return ;
# endif
break ;
case PAGE_HOME_IMMUTABLE :
/* Should be going to the device only. */
BUG_ON ( direction = = DMA_FROM_DEVICE | |
direction = = DMA_BIDIRECTIONAL ) ;
return ;
case PAGE_HOME_INCOHERENT :
/* Incoherent anyway, so no need to work hard here. */
return ;
default :
BUG_ON ( home < 0 | | home > = NR_CPUS ) ;
break ;
}
homecache_finv_page ( page ) ;
# ifdef DEBUG_ALIGNMENT
/* Warn if the region isn't cacheline aligned. */
if ( offset & ( L2_CACHE_BYTES - 1 ) | | ( size & ( L2_CACHE_BYTES - 1 ) ) )
pr_warn ( " Unaligned DMA to non-hfh memory: PA %#llx/%#lx \n " ,
PFN_PHYS ( page_to_pfn ( page ) ) + offset , size ) ;
# endif
}
2011-02-28 16:37:34 -05:00
2012-06-13 14:46:40 -04:00
/* Make the page ready to be read by the core. */
static void __dma_complete_page ( struct page * page , unsigned long offset ,
size_t size , enum dma_data_direction direction )
{
# ifdef __tilegx__
switch ( page_home ( page ) ) {
case PAGE_HOME_HASH :
/* I/O device delivered data the way the cpu wanted it. */
break ;
case PAGE_HOME_INCOHERENT :
/* Incoherent anyway, so no need to work hard here. */
break ;
case PAGE_HOME_IMMUTABLE :
/* Extra read-only copies are not a problem. */
break ;
default :
/* Flush the bogus hash-for-home I/O entries to memory. */
homecache_finv_map_page ( page , PAGE_HOME_HASH ) ;
break ;
}
# endif
}
2011-02-28 16:37:34 -05:00
2012-06-13 14:46:40 -04:00
static void __dma_prep_pa_range ( dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
struct page * page = pfn_to_page ( PFN_DOWN ( dma_addr ) ) ;
unsigned long offset = dma_addr & ( PAGE_SIZE - 1 ) ;
size_t bytes = min ( size , ( size_t ) ( PAGE_SIZE - offset ) ) ;
while ( size ! = 0 ) {
__dma_prep_page ( page , offset , bytes , direction ) ;
size - = bytes ;
+ + page ;
offset = 0 ;
bytes = min ( ( size_t ) PAGE_SIZE , size ) ;
}
}
static void __dma_complete_pa_range ( dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
struct page * page = pfn_to_page ( PFN_DOWN ( dma_addr ) ) ;
unsigned long offset = dma_addr & ( PAGE_SIZE - 1 ) ;
size_t bytes = min ( size , ( size_t ) ( PAGE_SIZE - offset ) ) ;
while ( size ! = 0 ) {
__dma_complete_page ( page , offset , bytes , direction ) ;
size - = bytes ;
+ + page ;
offset = 0 ;
bytes = min ( ( size_t ) PAGE_SIZE , size ) ;
2011-02-28 16:37:34 -05:00
}
}
2010-05-28 23:09:12 -04:00
2012-06-15 15:23:06 -04:00
static int tile_dma_map_sg ( struct device * dev , struct scatterlist * sglist ,
int nents , enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
struct scatterlist * sg ;
int i ;
2012-06-13 14:46:40 -04:00
2012-06-15 15:23:06 -04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
WARN_ON ( nents = = 0 | | sglist - > length = = 0 ) ;
for_each_sg ( sglist , sg , nents , i ) {
sg - > dma_address = sg_phys ( sg ) ;
__dma_prep_pa_range ( sg - > dma_address , sg - > length , direction ) ;
# ifdef CONFIG_NEED_SG_DMA_LENGTH
sg - > dma_length = sg - > length ;
# endif
}
return nents ;
}
static void tile_dma_unmap_sg ( struct device * dev , struct scatterlist * sglist ,
int nents , enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
struct scatterlist * sg ;
int i ;
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
for_each_sg ( sglist , sg , nents , i ) {
sg - > dma_address = sg_phys ( sg ) ;
__dma_complete_pa_range ( sg - > dma_address , sg - > length ,
direction ) ;
}
}
static dma_addr_t tile_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
2012-06-15 15:23:06 -04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
BUG_ON ( offset + size > PAGE_SIZE ) ;
__dma_prep_page ( page , offset , size , direction ) ;
return page_to_pa ( page ) + offset ;
}
static void tile_dma_unmap_page ( struct device * dev , dma_addr_t dma_address ,
size_t size , enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
__dma_complete_page ( pfn_to_page ( PFN_DOWN ( dma_address ) ) ,
dma_address & PAGE_OFFSET , size , direction ) ;
}
2010-05-28 23:09:12 -04:00
2012-06-15 15:23:06 -04:00
static void tile_dma_sync_single_for_cpu ( struct device * dev ,
dma_addr_t dma_handle ,
size_t size ,
enum dma_data_direction direction )
{
2010-05-28 23:09:12 -04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2012-06-15 15:23:06 -04:00
__dma_complete_pa_range ( dma_handle , size , direction ) ;
}
2010-05-28 23:09:12 -04:00
2012-06-15 15:23:06 -04:00
static void tile_dma_sync_single_for_device ( struct device * dev ,
dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
__dma_prep_pa_range ( dma_handle , size , direction ) ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_dma_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sglist , int nelems ,
enum dma_data_direction direction )
2010-05-28 23:09:12 -04:00
{
2012-06-15 15:23:06 -04:00
struct scatterlist * sg ;
int i ;
2010-05-28 23:09:12 -04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2012-06-15 15:23:06 -04:00
WARN_ON ( nelems = = 0 | | sglist - > length = = 0 ) ;
for_each_sg ( sglist , sg , nelems , i ) {
dma_sync_single_for_cpu ( dev , sg - > dma_address ,
sg_dma_len ( sg ) , direction ) ;
}
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_dma_sync_sg_for_device ( struct device * dev ,
struct scatterlist * sglist , int nelems ,
enum dma_data_direction direction )
{
struct scatterlist * sg ;
int i ;
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
WARN_ON ( nelems = = 0 | | sglist - > length = = 0 ) ;
for_each_sg ( sglist , sg , nelems , i ) {
dma_sync_single_for_device ( dev , sg - > dma_address ,
sg_dma_len ( sg ) , direction ) ;
}
}
static inline int
tile_dma_mapping_error ( struct device * dev , dma_addr_t dma_addr )
{
return 0 ;
}
static inline int
tile_dma_supported ( struct device * dev , u64 mask )
{
return 1 ;
}
static struct dma_map_ops tile_default_dma_map_ops = {
. alloc = tile_dma_alloc_coherent ,
. free = tile_dma_free_coherent ,
. map_page = tile_dma_map_page ,
. unmap_page = tile_dma_unmap_page ,
. map_sg = tile_dma_map_sg ,
. unmap_sg = tile_dma_unmap_sg ,
. sync_single_for_cpu = tile_dma_sync_single_for_cpu ,
. sync_single_for_device = tile_dma_sync_single_for_device ,
. sync_sg_for_cpu = tile_dma_sync_sg_for_cpu ,
. sync_sg_for_device = tile_dma_sync_sg_for_device ,
. mapping_error = tile_dma_mapping_error ,
. dma_supported = tile_dma_supported
} ;
struct dma_map_ops * tile_dma_map_ops = & tile_default_dma_map_ops ;
EXPORT_SYMBOL ( tile_dma_map_ops ) ;
/* Generic PCI DMA mapping functions */
static void * tile_pci_dma_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t gfp ,
struct dma_attrs * attrs )
{
int node = dev_to_node ( dev ) ;
int order = get_order ( size ) ;
struct page * pg ;
dma_addr_t addr ;
gfp | = __GFP_ZERO ;
pg = homecache_alloc_pages_node ( node , gfp , order , PAGE_HOME_DMA ) ;
if ( pg = = NULL )
return NULL ;
addr = page_to_phys ( pg ) ;
2013-08-02 12:24:42 -04:00
* dma_handle = addr + get_dma_offset ( dev ) ;
2012-06-15 15:23:06 -04:00
return page_address ( pg ) ;
}
/*
* Free memory that was allocated with tile_pci_dma_alloc_coherent .
*/
static void tile_pci_dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle ,
struct dma_attrs * attrs )
{
homecache_free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
static int tile_pci_dma_map_sg ( struct device * dev , struct scatterlist * sglist ,
int nents , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
struct scatterlist * sg ;
int i ;
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
WARN_ON ( nents = = 0 | | sglist - > length = = 0 ) ;
for_each_sg ( sglist , sg , nents , i ) {
sg - > dma_address = sg_phys ( sg ) ;
2012-06-13 14:46:40 -04:00
__dma_prep_pa_range ( sg - > dma_address , sg - > length , direction ) ;
2012-06-15 15:23:06 -04:00
2013-08-02 12:24:42 -04:00
sg - > dma_address = sg - > dma_address + get_dma_offset ( dev ) ;
2012-06-15 15:23:06 -04:00
# ifdef CONFIG_NEED_SG_DMA_LENGTH
sg - > dma_length = sg - > length ;
# endif
2010-05-28 23:09:12 -04:00
}
return nents ;
}
2012-06-15 15:23:06 -04:00
static void tile_pci_dma_unmap_sg ( struct device * dev ,
struct scatterlist * sglist , int nents ,
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
2012-06-13 14:46:40 -04:00
struct scatterlist * sg ;
int i ;
2010-05-28 23:09:12 -04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2012-06-13 14:46:40 -04:00
for_each_sg ( sglist , sg , nents , i ) {
sg - > dma_address = sg_phys ( sg ) ;
__dma_complete_pa_range ( sg - > dma_address , sg - > length ,
direction ) ;
}
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static dma_addr_t tile_pci_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2011-02-28 16:37:34 -05:00
BUG_ON ( offset + size > PAGE_SIZE ) ;
2012-06-13 14:46:40 -04:00
__dma_prep_page ( page , offset , size , direction ) ;
2012-06-15 15:23:06 -04:00
2013-08-02 12:24:42 -04:00
return page_to_pa ( page ) + offset + get_dma_offset ( dev ) ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_pci_dma_unmap_page ( struct device * dev , dma_addr_t dma_address ,
size_t size ,
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2012-06-15 15:23:06 -04:00
2013-08-02 12:24:42 -04:00
dma_address - = get_dma_offset ( dev ) ;
2012-06-15 15:23:06 -04:00
2012-06-13 14:46:40 -04:00
__dma_complete_page ( pfn_to_page ( PFN_DOWN ( dma_address ) ) ,
dma_address & PAGE_OFFSET , size , direction ) ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_pci_dma_sync_single_for_cpu ( struct device * dev ,
dma_addr_t dma_handle ,
size_t size ,
enum dma_data_direction direction )
2010-05-28 23:09:12 -04:00
{
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2012-06-15 15:23:06 -04:00
2013-08-02 12:24:42 -04:00
dma_handle - = get_dma_offset ( dev ) ;
2012-06-15 15:23:06 -04:00
2012-06-13 14:46:40 -04:00
__dma_complete_pa_range ( dma_handle , size , direction ) ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_pci_dma_sync_single_for_device ( struct device * dev ,
dma_addr_t dma_handle ,
size_t size ,
enum dma_data_direction
direction )
2010-05-28 23:09:12 -04:00
{
2013-08-02 12:24:42 -04:00
dma_handle - = get_dma_offset ( dev ) ;
2012-06-15 15:23:06 -04:00
2012-06-13 14:46:40 -04:00
__dma_prep_pa_range ( dma_handle , size , direction ) ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_pci_dma_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sglist ,
int nelems ,
enum dma_data_direction direction )
2010-05-28 23:09:12 -04:00
{
2012-06-13 14:46:40 -04:00
struct scatterlist * sg ;
int i ;
2010-05-28 23:09:12 -04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2012-06-13 14:46:40 -04:00
WARN_ON ( nelems = = 0 | | sglist - > length = = 0 ) ;
for_each_sg ( sglist , sg , nelems , i ) {
dma_sync_single_for_cpu ( dev , sg - > dma_address ,
sg_dma_len ( sg ) , direction ) ;
}
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static void tile_pci_dma_sync_sg_for_device ( struct device * dev ,
struct scatterlist * sglist ,
int nelems ,
enum dma_data_direction direction )
2010-05-28 23:09:12 -04:00
{
struct scatterlist * sg ;
int i ;
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
WARN_ON ( nelems = = 0 | | sglist - > length = = 0 ) ;
for_each_sg ( sglist , sg , nelems , i ) {
dma_sync_single_for_device ( dev , sg - > dma_address ,
sg_dma_len ( sg ) , direction ) ;
}
}
2012-06-15 15:23:06 -04:00
static inline int
tile_pci_dma_mapping_error ( struct device * dev , dma_addr_t dma_addr )
2010-05-28 23:09:12 -04:00
{
2012-06-15 15:23:06 -04:00
return 0 ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static inline int
tile_pci_dma_supported ( struct device * dev , u64 mask )
2010-05-28 23:09:12 -04:00
{
2012-06-15 15:23:06 -04:00
return 1 ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static struct dma_map_ops tile_pci_default_dma_map_ops = {
. alloc = tile_pci_dma_alloc_coherent ,
. free = tile_pci_dma_free_coherent ,
. map_page = tile_pci_dma_map_page ,
. unmap_page = tile_pci_dma_unmap_page ,
. map_sg = tile_pci_dma_map_sg ,
. unmap_sg = tile_pci_dma_unmap_sg ,
. sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu ,
. sync_single_for_device = tile_pci_dma_sync_single_for_device ,
. sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu ,
. sync_sg_for_device = tile_pci_dma_sync_sg_for_device ,
. mapping_error = tile_pci_dma_mapping_error ,
. dma_supported = tile_pci_dma_supported
} ;
struct dma_map_ops * gx_pci_dma_map_ops = & tile_pci_default_dma_map_ops ;
EXPORT_SYMBOL ( gx_pci_dma_map_ops ) ;
/* PCI DMA mapping functions for legacy PCI devices */
# ifdef CONFIG_SWIOTLB
static void * tile_swiotlb_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t gfp ,
struct dma_attrs * attrs )
{
gfp | = GFP_DMA ;
return swiotlb_alloc_coherent ( dev , size , dma_handle , gfp ) ;
}
static void tile_swiotlb_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_addr ,
struct dma_attrs * attrs )
2010-05-28 23:09:12 -04:00
{
2012-06-15 15:23:06 -04:00
swiotlb_free_coherent ( dev , size , vaddr , dma_addr ) ;
2010-05-28 23:09:12 -04:00
}
2012-06-15 15:23:06 -04:00
static struct dma_map_ops pci_swiotlb_dma_ops = {
. alloc = tile_swiotlb_alloc_coherent ,
. free = tile_swiotlb_free_coherent ,
. map_page = swiotlb_map_page ,
. unmap_page = swiotlb_unmap_page ,
. map_sg = swiotlb_map_sg_attrs ,
. unmap_sg = swiotlb_unmap_sg_attrs ,
. sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
. sync_single_for_device = swiotlb_sync_single_for_device ,
. sync_sg_for_cpu = swiotlb_sync_sg_for_cpu ,
. sync_sg_for_device = swiotlb_sync_sg_for_device ,
. dma_supported = swiotlb_dma_supported ,
. mapping_error = swiotlb_dma_mapping_error ,
} ;
2013-08-02 12:24:42 -04:00
static struct dma_map_ops pci_hybrid_dma_ops = {
. alloc = tile_swiotlb_alloc_coherent ,
. free = tile_swiotlb_free_coherent ,
. map_page = tile_pci_dma_map_page ,
. unmap_page = tile_pci_dma_unmap_page ,
. map_sg = tile_pci_dma_map_sg ,
. unmap_sg = tile_pci_dma_unmap_sg ,
. sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu ,
. sync_single_for_device = tile_pci_dma_sync_single_for_device ,
. sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu ,
. sync_sg_for_device = tile_pci_dma_sync_sg_for_device ,
. mapping_error = tile_pci_dma_mapping_error ,
. dma_supported = tile_pci_dma_supported
} ;
2012-06-15 15:23:06 -04:00
struct dma_map_ops * gx_legacy_pci_dma_map_ops = & pci_swiotlb_dma_ops ;
2013-08-02 12:24:42 -04:00
struct dma_map_ops * gx_hybrid_pci_dma_map_ops = & pci_hybrid_dma_ops ;
2012-06-15 15:23:06 -04:00
# else
struct dma_map_ops * gx_legacy_pci_dma_map_ops ;
2013-08-02 12:24:42 -04:00
struct dma_map_ops * gx_hybrid_pci_dma_map_ops ;
2012-06-15 15:23:06 -04:00
# endif
EXPORT_SYMBOL ( gx_legacy_pci_dma_map_ops ) ;
2013-08-02 12:24:42 -04:00
EXPORT_SYMBOL ( gx_hybrid_pci_dma_map_ops ) ;
2012-06-15 15:23:06 -04:00
# ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask ( struct device * dev , u64 mask )
{
struct dma_map_ops * dma_ops = get_dma_ops ( dev ) ;
2013-08-02 12:24:42 -04:00
/* Handle hybrid PCI devices with limited memory addressability. */
if ( ( dma_ops = = gx_pci_dma_map_ops | |
dma_ops = = gx_hybrid_pci_dma_map_ops | |
dma_ops = = gx_legacy_pci_dma_map_ops ) & &
2012-06-15 15:23:06 -04:00
( mask < = DMA_BIT_MASK ( 32 ) ) ) {
2013-08-02 12:24:42 -04:00
if ( dma_ops = = gx_pci_dma_map_ops )
set_dma_ops ( dev , gx_hybrid_pci_dma_map_ops ) ;
2012-06-15 15:23:06 -04:00
if ( mask > dev - > archdata . max_direct_dma_addr )
mask = dev - > archdata . max_direct_dma_addr ;
}
if ( ! dma_supported ( dev , mask ) )
return - EIO ;
dev - > coherent_dma_mask = mask ;
return 0 ;
}
EXPORT_SYMBOL ( dma_set_coherent_mask ) ;
# endif
2013-08-05 13:40:47 -04:00
# ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
/*
* The generic dma_get_required_mask ( ) uses the highest physical address
* ( max_pfn ) to provide the hint to the PCI drivers regarding 32 - bit or
* 64 - bit DMA configuration . Since TILEGx has I / O TLB / MMU , allowing the
* DMAs to use the full 64 - bit PCI address space and not limited by
* the physical memory space , we always let the PCI devices use
* 64 - bit DMA if they have that capability , by returning the 64 - bit
* DMA mask here . The device driver has the option to use 32 - bit DMA if
* the device is not capable of 64 - bit DMA .
*/
u64 dma_get_required_mask ( struct device * dev )
{
return DMA_BIT_MASK ( 64 ) ;
}
EXPORT_SYMBOL_GPL ( dma_get_required_mask ) ;
# endif