2005-04-16 15:20:36 -07:00
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386 . */
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/pci.h>
# include <linux/string.h>
2006-06-26 13:59:05 +02:00
# include <linux/dma-mapping.h>
2007-07-24 12:39:27 +02:00
# include <linux/scatterlist.h>
2006-06-26 13:59:05 +02:00
2008-07-11 10:23:42 +09:00
# include <asm/iommu.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
2006-01-11 22:44:42 +01:00
# include <asm/dma.h>
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
static int
check_addr ( char * name , struct device * hwdev , dma_addr_t bus , size_t size )
2005-04-16 15:20:36 -07:00
{
2008-09-10 01:06:48 +09:00
if ( hwdev & & ! is_buffer_dma_capable ( * hwdev - > dma_mask , bus , size ) ) {
2006-06-26 13:59:05 +02:00
if ( * hwdev - > dma_mask > = DMA_32BIT_MASK )
2006-05-15 18:19:38 +02:00
printk ( KERN_ERR
2006-06-26 13:59:05 +02:00
" nommu_%s: overflow %Lx+%zu of device mask %Lx \n " ,
name , ( long long ) bus , size ,
( long long ) * hwdev - > dma_mask ) ;
2006-01-11 22:44:42 +01:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2006-01-11 22:44:42 +01:00
return 1 ;
}
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
static dma_addr_t
2008-04-19 19:19:56 +02:00
nommu_map_single ( struct device * hwdev , phys_addr_t paddr , size_t size ,
2006-01-11 22:44:42 +01:00
int direction )
{
2008-04-19 19:19:56 +02:00
dma_addr_t bus = paddr ;
2008-04-08 13:20:49 -03:00
WARN_ON ( size = = 0 ) ;
2006-01-11 22:44:42 +01:00
if ( ! check_addr ( " map_single " , hwdev , bus , size ) )
return bad_dma_address ;
2008-04-08 13:20:46 -03:00
flush_write_buffers ( ) ;
2006-01-11 22:44:42 +01:00
return bus ;
2005-04-16 15:20:36 -07:00
}
2006-01-11 22:44:42 +01:00
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA . This is the scatter - gather version of the
* above pci_map_single interface . Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length . They are obtained via sg_dma_ { address , length } ( SG ) .
*
* NOTE : An implementation may be able to use a smaller number of
* DMA address / length pairs than there are SG table elements .
* ( for example via virtual mapping capabilities )
* The routine returns the number of addr / length pairs actually
* used , at most nents .
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here .
*/
2007-07-21 17:11:23 +02:00
static int nommu_map_sg ( struct device * hwdev , struct scatterlist * sg ,
2006-01-11 22:44:42 +01:00
int nents , int direction )
2005-04-16 15:20:36 -07:00
{
2007-07-24 12:39:27 +02:00
struct scatterlist * s ;
2006-01-11 22:44:42 +01:00
int i ;
2005-04-16 15:20:36 -07:00
2008-04-08 13:20:49 -03:00
WARN_ON ( nents = = 0 | | sg [ 0 ] . length = = 0 ) ;
2007-07-24 12:39:27 +02:00
for_each_sg ( sg , s , nents , i ) {
2007-10-22 20:02:46 +02:00
BUG_ON ( ! sg_page ( s ) ) ;
2008-04-08 13:20:47 -03:00
s - > dma_address = sg_phys ( s ) ;
2006-01-11 22:44:42 +01:00
if ( ! check_addr ( " map_sg " , hwdev , s - > dma_address , s - > length ) )
return 0 ;
s - > dma_length = s - > length ;
}
2008-04-08 13:20:46 -03:00
flush_write_buffers ( ) ;
2006-01-11 22:44:42 +01:00
return nents ;
}
2005-04-16 15:20:36 -07:00
2008-08-19 16:32:43 +02:00
static void nommu_free_coherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_addr )
{
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
2008-07-25 19:44:49 -07:00
struct dma_mapping_ops nommu_dma_ops = {
2008-09-24 20:48:35 +09:00
. alloc_coherent = dma_generic_alloc_coherent ,
2008-08-19 16:32:43 +02:00
. free_coherent = nommu_free_coherent ,
2006-01-11 22:44:42 +01:00
. map_single = nommu_map_single ,
. map_sg = nommu_map_sg ,
. is_phys = 1 ,
} ;
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
void __init no_iommu_init ( void )
{
if ( dma_ops )
return ;
2006-08-02 22:37:31 +02:00
force_iommu = 0 ; /* no HW IOMMU */
2006-01-11 22:44:42 +01:00
dma_ops = & nommu_dma_ops ;
}