2005-04-16 15:20:36 -07:00
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386 . */
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/pci.h>
# include <linux/string.h>
2006-06-26 13:59:05 +02:00
# include <linux/dma-mapping.h>
2007-07-24 12:39:27 +02:00
# include <linux/scatterlist.h>
2006-06-26 13:59:05 +02:00
2007-10-24 12:49:47 +02:00
# include <asm/gart.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
2006-01-11 22:44:42 +01:00
# include <asm/dma.h>
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
static int
check_addr ( char * name , struct device * hwdev , dma_addr_t bus , size_t size )
2005-04-16 15:20:36 -07:00
{
2006-01-11 22:44:42 +01:00
if ( hwdev & & bus + size > * hwdev - > dma_mask ) {
2006-06-26 13:59:05 +02:00
if ( * hwdev - > dma_mask > = DMA_32BIT_MASK )
2006-05-15 18:19:38 +02:00
printk ( KERN_ERR
2006-06-26 13:59:05 +02:00
" nommu_%s: overflow %Lx+%zu of device mask %Lx \n " ,
name , ( long long ) bus , size ,
( long long ) * hwdev - > dma_mask ) ;
2006-01-11 22:44:42 +01:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2006-01-11 22:44:42 +01:00
return 1 ;
}
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
static dma_addr_t
nommu_map_single ( struct device * hwdev , void * ptr , size_t size ,
int direction )
{
dma_addr_t bus = virt_to_bus ( ptr ) ;
if ( ! check_addr ( " map_single " , hwdev , bus , size ) )
return bad_dma_address ;
return bus ;
2005-04-16 15:20:36 -07:00
}
2007-07-21 17:11:23 +02:00
static void nommu_unmap_single ( struct device * dev , dma_addr_t addr , size_t size ,
2006-01-11 22:44:42 +01:00
int direction )
2005-04-16 15:20:36 -07:00
{
}
2006-01-11 22:44:42 +01:00
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA . This is the scatter - gather version of the
* above pci_map_single interface . Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length . They are obtained via sg_dma_ { address , length } ( SG ) .
*
* NOTE : An implementation may be able to use a smaller number of
* DMA address / length pairs than there are SG table elements .
* ( for example via virtual mapping capabilities )
* The routine returns the number of addr / length pairs actually
* used , at most nents .
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here .
*/
2007-07-21 17:11:23 +02:00
static int nommu_map_sg ( struct device * hwdev , struct scatterlist * sg ,
2006-01-11 22:44:42 +01:00
int nents , int direction )
2005-04-16 15:20:36 -07:00
{
2007-07-24 12:39:27 +02:00
struct scatterlist * s ;
2006-01-11 22:44:42 +01:00
int i ;
2005-04-16 15:20:36 -07:00
2007-07-24 12:39:27 +02:00
for_each_sg ( sg , s , nents , i ) {
2007-10-22 20:02:46 +02:00
BUG_ON ( ! sg_page ( s ) ) ;
s - > dma_address = virt_to_bus ( sg_virt ( s ) ) ;
2006-01-11 22:44:42 +01:00
if ( ! check_addr ( " map_sg " , hwdev , s - > dma_address , s - > length ) )
return 0 ;
s - > dma_length = s - > length ;
}
return nents ;
}
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
/* Unmap a set of streaming mode DMA translations.
* Again , cpu read rules concerning calls here are the same as for
* pci_unmap_single ( ) above .
*/
2007-07-21 17:11:23 +02:00
static void nommu_unmap_sg ( struct device * dev , struct scatterlist * sg ,
2006-01-11 22:44:42 +01:00
int nents , int dir )
2005-04-16 15:20:36 -07:00
{
}
2007-05-02 19:27:06 +02:00
const struct dma_mapping_ops nommu_dma_ops = {
2006-01-11 22:44:42 +01:00
. map_single = nommu_map_single ,
. unmap_single = nommu_unmap_single ,
. map_sg = nommu_map_sg ,
. unmap_sg = nommu_unmap_sg ,
. is_phys = 1 ,
} ;
2005-04-16 15:20:36 -07:00
2006-01-11 22:44:42 +01:00
void __init no_iommu_init ( void )
{
if ( dma_ops )
return ;
2006-08-02 22:37:31 +02:00
force_iommu = 0 ; /* no HW IOMMU */
2006-01-11 22:44:42 +01:00
dma_ops = & nommu_dma_ops ;
}