2005-04-17 02:20:36 +04:00
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386 . */
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/pci.h>
# include <linux/string.h>
2006-06-26 15:59:05 +04:00
# include <linux/dma-mapping.h>
2007-07-24 14:39:27 +04:00
# include <linux/scatterlist.h>
2006-06-26 15:59:05 +04:00
2007-10-24 14:49:47 +04:00
# include <asm/gart.h>
2005-04-17 02:20:36 +04:00
# include <asm/processor.h>
2006-01-12 00:44:42 +03:00
# include <asm/dma.h>
2005-04-17 02:20:36 +04:00
2006-01-12 00:44:42 +03:00
static int
check_addr ( char * name , struct device * hwdev , dma_addr_t bus , size_t size )
2005-04-17 02:20:36 +04:00
{
2008-04-08 20:20:52 +04:00
if ( hwdev & & bus + size > * hwdev - > dma_mask ) {
2006-06-26 15:59:05 +04:00
if ( * hwdev - > dma_mask > = DMA_32BIT_MASK )
2006-05-15 20:19:38 +04:00
printk ( KERN_ERR
2006-06-26 15:59:05 +04:00
" nommu_%s: overflow %Lx+%zu of device mask %Lx \n " ,
name , ( long long ) bus , size ,
( long long ) * hwdev - > dma_mask ) ;
2006-01-12 00:44:42 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-01-12 00:44:42 +03:00
return 1 ;
}
2005-04-17 02:20:36 +04:00
2006-01-12 00:44:42 +03:00
static dma_addr_t
2008-04-19 21:19:56 +04:00
nommu_map_single ( struct device * hwdev , phys_addr_t paddr , size_t size ,
2006-01-12 00:44:42 +03:00
int direction )
{
2008-04-19 21:19:56 +04:00
dma_addr_t bus = paddr ;
2008-04-08 20:20:49 +04:00
WARN_ON ( size = = 0 ) ;
2006-01-12 00:44:42 +03:00
if ( ! check_addr ( " map_single " , hwdev , bus , size ) )
return bad_dma_address ;
2008-04-08 20:20:46 +04:00
flush_write_buffers ( ) ;
2006-01-12 00:44:42 +03:00
return bus ;
2005-04-17 02:20:36 +04:00
}
2006-01-12 00:44:42 +03:00
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA . This is the scatter - gather version of the
* above pci_map_single interface . Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length . They are obtained via sg_dma_ { address , length } ( SG ) .
*
* NOTE : An implementation may be able to use a smaller number of
* DMA address / length pairs than there are SG table elements .
* ( for example via virtual mapping capabilities )
* The routine returns the number of addr / length pairs actually
* used , at most nents .
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here .
*/
2007-07-21 19:11:23 +04:00
static int nommu_map_sg ( struct device * hwdev , struct scatterlist * sg ,
2006-01-12 00:44:42 +03:00
int nents , int direction )
2005-04-17 02:20:36 +04:00
{
2007-07-24 14:39:27 +04:00
struct scatterlist * s ;
2006-01-12 00:44:42 +03:00
int i ;
2005-04-17 02:20:36 +04:00
2008-04-08 20:20:49 +04:00
WARN_ON ( nents = = 0 | | sg [ 0 ] . length = = 0 ) ;
2007-07-24 14:39:27 +04:00
for_each_sg ( sg , s , nents , i ) {
2007-10-22 22:02:46 +04:00
BUG_ON ( ! sg_page ( s ) ) ;
2008-04-08 20:20:47 +04:00
s - > dma_address = sg_phys ( s ) ;
2006-01-12 00:44:42 +03:00
if ( ! check_addr ( " map_sg " , hwdev , s - > dma_address , s - > length ) )
return 0 ;
s - > dma_length = s - > length ;
}
2008-04-08 20:20:46 +04:00
flush_write_buffers ( ) ;
2006-01-12 00:44:42 +03:00
return nents ;
}
2005-04-17 02:20:36 +04:00
2008-04-08 20:20:45 +04:00
/* Make sure we keep the same behaviour */
static int nommu_mapping_error ( dma_addr_t dma_addr )
{
# ifdef CONFIG_X86_32
return 0 ;
# else
return ( dma_addr = = bad_dma_address ) ;
# endif
}
2007-05-02 21:27:06 +04:00
const struct dma_mapping_ops nommu_dma_ops = {
2006-01-12 00:44:42 +03:00
. map_single = nommu_map_single ,
. map_sg = nommu_map_sg ,
2008-04-08 20:20:45 +04:00
. mapping_error = nommu_mapping_error ,
2006-01-12 00:44:42 +03:00
. is_phys = 1 ,
} ;
2005-04-17 02:20:36 +04:00
2006-01-12 00:44:42 +03:00
void __init no_iommu_init ( void )
{
if ( dma_ops )
return ;
2006-08-03 00:37:31 +04:00
force_iommu = 0 ; /* no HW IOMMU */
2006-01-12 00:44:42 +03:00
dma_ops = & nommu_dma_ops ;
}