2005-04-16 15:20:36 -07:00
/*
* iommu . c : IOMMU specific routines for memory management .
*
* Copyright ( C ) 1995 David S . Miller ( davem @ caip . rutgers . edu )
* Copyright ( C ) 1995 , 2002 Pete Zaitcev ( zaitcev @ yahoo . com )
* Copyright ( C ) 1996 Eddie C . Dost ( ecd @ skynet . be )
* Copyright ( C ) 1997 , 1998 Jakub Jelinek ( jj @ sunsite . mff . cuni . cz )
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
2007-05-14 15:44:38 +02:00
# include <linux/scatterlist.h>
2008-08-27 19:54:01 -07:00
# include <linux/of.h>
# include <linux/of_device.h>
2005-04-16 15:20:36 -07:00
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
# include <asm/io.h>
# include <asm/mxcc.h>
# include <asm/mbus.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
# include <asm/bitext.h>
# include <asm/iommu.h>
# include <asm/dma.h>
2014-04-21 21:39:19 +02:00
# include "mm_32.h"
2005-04-16 15:20:36 -07:00
/*
* This can be sized dynamically , but we will do this
* only when we have a guidance about actual I / O pressures .
*/
# define IOMMU_RNGE IOMMU_RNGE_256MB
# define IOMMU_START 0xF0000000
# define IOMMU_WINSIZE (256*1024*1024U)
2013-03-29 03:44:44 +00:00
# define IOMMU_NPTES (IOMMU_WINSIZE / PAGE_SIZE) /* 64K PTEs, 256KB */
2005-04-16 15:20:36 -07:00
# define IOMMU_ORDER 6 /* 4096 * (1<<6) */
static int viking_flush ;
/* viking.S */
extern void viking_flush_page ( unsigned long page ) ;
extern void viking_mxcc_flush_page ( unsigned long page ) ;
/*
* Values precomputed according to CPU type .
*/
static unsigned int ioperm_noc ; /* Consistent mapping iopte flags */
static pgprot_t dvma_prot ; /* Consistent mapping pte flags */
# define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
# define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
2010-07-22 16:04:30 -06:00
static void __init sbus_iommu_init ( struct platform_device * op )
2005-04-16 15:20:36 -07:00
{
struct iommu_struct * iommu ;
2008-08-25 22:47:20 -07:00
unsigned int impl , vers ;
2005-04-16 15:20:36 -07:00
unsigned long * bitmap ;
2014-05-16 23:25:40 +02:00
unsigned long control ;
unsigned long base ;
2008-08-25 22:47:20 -07:00
unsigned long tmp ;
2010-08-02 16:04:21 -07:00
iommu = kmalloc ( sizeof ( struct iommu_struct ) , GFP_KERNEL ) ;
2005-04-16 15:20:36 -07:00
if ( ! iommu ) {
prom_printf ( " Unable to allocate iommu structure \n " ) ;
prom_halt ( ) ;
}
2008-08-25 22:47:20 -07:00
2008-08-27 04:54:04 -07:00
iommu - > regs = of_ioremap ( & op - > resource [ 0 ] , 0 , PAGE_SIZE * 3 ,
2008-08-25 22:47:20 -07:00
" iommu_regs " ) ;
2005-04-16 15:20:36 -07:00
if ( ! iommu - > regs ) {
prom_printf ( " Cannot map IOMMU registers \n " ) ;
prom_halt ( ) ;
}
2014-05-16 23:25:40 +02:00
control = sbus_readl ( & iommu - > regs - > control ) ;
impl = ( control & IOMMU_CTRL_IMPL ) > > 28 ;
vers = ( control & IOMMU_CTRL_VERS ) > > 24 ;
control & = ~ ( IOMMU_CTRL_RNGE ) ;
control | = ( IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB ) ;
sbus_writel ( control , & iommu - > regs - > control ) ;
2005-04-16 15:20:36 -07:00
iommu_invalidate ( iommu - > regs ) ;
iommu - > start = IOMMU_START ;
iommu - > end = 0xffffffff ;
/* Allocate IOMMU page table */
/* Stupid alignment constraints give me a headache.
We need 256 K or 512 K or 1 M or 2 M area aligned to
its size and current gfp will fortunately give
it to us . */
tmp = __get_free_pages ( GFP_KERNEL , IOMMU_ORDER ) ;
if ( ! tmp ) {
2012-09-29 03:14:49 +00:00
prom_printf ( " Unable to allocate iommu table [0x%lx] \n " ,
IOMMU_NPTES * sizeof ( iopte_t ) ) ;
2005-04-16 15:20:36 -07:00
prom_halt ( ) ;
}
iommu - > page_table = ( iopte_t * ) tmp ;
/* Initialize new table. */
memset ( iommu - > page_table , 0 , IOMMU_NPTES * sizeof ( iopte_t ) ) ;
flush_cache_all ( ) ;
flush_tlb_all ( ) ;
2014-05-16 23:25:40 +02:00
base = __pa ( ( unsigned long ) iommu - > page_table ) > > 4 ;
sbus_writel ( base , & iommu - > regs - > base ) ;
2005-04-16 15:20:36 -07:00
iommu_invalidate ( iommu - > regs ) ;
bitmap = kmalloc ( IOMMU_NPTES > > 3 , GFP_KERNEL ) ;
if ( ! bitmap ) {
prom_printf ( " Unable to allocate iommu bitmap [%d] \n " ,
( int ) ( IOMMU_NPTES > > 3 ) ) ;
prom_halt ( ) ;
}
bit_map_init ( & iommu - > usemap , bitmap , IOMMU_NPTES ) ;
/* To be coherent on HyperSparc, the page color of DVMA
* and physical addresses must match .
*/
if ( srmmu_modtype = = HyperSparc )
iommu - > usemap . num_colors = vac_cache_size > > PAGE_SHIFT ;
else
iommu - > usemap . num_colors = 1 ;
2008-08-27 04:54:04 -07:00
printk ( KERN_INFO " IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b] \n " ,
impl , vers , iommu - > page_table ,
( int ) ( IOMMU_NPTES * sizeof ( iopte_t ) ) , ( int ) IOMMU_NPTES ) ;
2005-04-16 15:20:36 -07:00
2008-08-25 22:47:20 -07:00
op - > dev . archdata . iommu = iommu ;
2005-04-16 15:20:36 -07:00
}
2008-08-27 04:54:04 -07:00
static int __init iommu_init ( void )
{
struct device_node * dp ;
for_each_node_by_name ( dp , " iommu " ) {
2010-07-22 16:04:30 -06:00
struct platform_device * op = of_find_device_by_node ( dp ) ;
2008-08-27 04:54:04 -07:00
sbus_iommu_init ( op ) ;
of_propagate_archdata ( op ) ;
}
return 0 ;
}
subsys_initcall ( iommu_init ) ;
2005-04-16 15:20:36 -07:00
/* Flush the iotlb entries to ram. */
/* This could be better if we didn't have to flush whole pages. */
static void iommu_flush_iotlb ( iopte_t * iopte , unsigned int niopte )
{
unsigned long start ;
unsigned long end ;
2006-06-20 00:36:56 -07:00
start = ( unsigned long ) iopte ;
2005-04-16 15:20:36 -07:00
end = PAGE_ALIGN ( start + niopte * sizeof ( iopte_t ) ) ;
2006-06-20 00:36:56 -07:00
start & = PAGE_MASK ;
2005-04-16 15:20:36 -07:00
if ( viking_mxcc_present ) {
while ( start < end ) {
viking_mxcc_flush_page ( start ) ;
start + = PAGE_SIZE ;
}
} else if ( viking_flush ) {
while ( start < end ) {
viking_flush_page ( start ) ;
start + = PAGE_SIZE ;
}
} else {
while ( start < end ) {
__flush_page_to_ram ( start ) ;
start + = PAGE_SIZE ;
}
}
}
2008-08-26 23:00:58 -07:00
static u32 iommu_get_one ( struct device * dev , struct page * page , int npages )
2005-04-16 15:20:36 -07:00
{
2008-08-26 23:00:58 -07:00
struct iommu_struct * iommu = dev - > archdata . iommu ;
2005-04-16 15:20:36 -07:00
int ioptex ;
iopte_t * iopte , * iopte0 ;
unsigned int busa , busa0 ;
int i ;
/* page color = pfn of page */
ioptex = bit_map_string_get ( & iommu - > usemap , npages , page_to_pfn ( page ) ) ;
if ( ioptex < 0 )
panic ( " iommu out " ) ;
busa0 = iommu - > start + ( ioptex < < PAGE_SHIFT ) ;
iopte0 = & iommu - > page_table [ ioptex ] ;
busa = busa0 ;
iopte = iopte0 ;
for ( i = 0 ; i < npages ; i + + ) {
iopte_val ( * iopte ) = MKIOPTE ( page_to_pfn ( page ) , IOPERM ) ;
iommu_invalidate_page ( iommu - > regs , busa ) ;
busa + = PAGE_SIZE ;
iopte + + ;
page + + ;
}
iommu_flush_iotlb ( iopte0 , npages ) ;
return busa0 ;
}
2008-08-26 23:00:58 -07:00
static u32 iommu_get_scsi_one ( struct device * dev , char * vaddr , unsigned int len )
2005-04-16 15:20:36 -07:00
{
unsigned long off ;
int npages ;
struct page * page ;
u32 busa ;
off = ( unsigned long ) vaddr & ~ PAGE_MASK ;
npages = ( off + len + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
page = virt_to_page ( ( unsigned long ) vaddr & PAGE_MASK ) ;
2008-08-26 23:00:58 -07:00
busa = iommu_get_one ( dev , page , npages ) ;
2005-04-16 15:20:36 -07:00
return busa + off ;
}
2008-08-26 23:00:58 -07:00
static __u32 iommu_get_scsi_one_gflush ( struct device * dev , char * vaddr , unsigned long len )
2005-04-16 15:20:36 -07:00
{
flush_page_for_dma ( 0 ) ;
2008-08-26 23:00:58 -07:00
return iommu_get_scsi_one ( dev , vaddr , len ) ;
2005-04-16 15:20:36 -07:00
}
2008-08-26 23:00:58 -07:00
static __u32 iommu_get_scsi_one_pflush ( struct device * dev , char * vaddr , unsigned long len )
2005-04-16 15:20:36 -07:00
{
unsigned long page = ( ( unsigned long ) vaddr ) & PAGE_MASK ;
while ( page < ( ( unsigned long ) ( vaddr + len ) ) ) {
flush_page_for_dma ( page ) ;
page + = PAGE_SIZE ;
}
2008-08-26 23:00:58 -07:00
return iommu_get_scsi_one ( dev , vaddr , len ) ;
2005-04-16 15:20:36 -07:00
}
2008-08-26 23:00:58 -07:00
static void iommu_get_scsi_sgl_gflush ( struct device * dev , struct scatterlist * sg , int sz )
2005-04-16 15:20:36 -07:00
{
int n ;
flush_page_for_dma ( 0 ) ;
while ( sz ! = 0 ) {
- - sz ;
n = ( sg - > length + sg - > offset + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
2008-12-11 20:24:58 -08:00
sg - > dma_address = iommu_get_one ( dev , sg_page ( sg ) , n ) + sg - > offset ;
sg - > dma_length = sg - > length ;
2007-05-14 15:44:38 +02:00
sg = sg_next ( sg ) ;
2005-04-16 15:20:36 -07:00
}
}
2008-08-26 23:00:58 -07:00
static void iommu_get_scsi_sgl_pflush ( struct device * dev , struct scatterlist * sg , int sz )
2005-04-16 15:20:36 -07:00
{
unsigned long page , oldpage = 0 ;
int n , i ;
while ( sz ! = 0 ) {
- - sz ;
n = ( sg - > length + sg - > offset + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
/*
* We expect unmapped highmem pages to be not in the cache .
* XXX Is this a good assumption ?
* XXX What if someone else unmaps it here and races us ?
*/
2007-10-22 20:02:46 +02:00
if ( ( page = ( unsigned long ) page_address ( sg_page ( sg ) ) ) ! = 0 ) {
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < n ; i + + ) {
if ( page ! = oldpage ) { /* Already flushed? */
flush_page_for_dma ( page ) ;
oldpage = page ;
}
page + = PAGE_SIZE ;
}
}
2008-12-11 20:24:58 -08:00
sg - > dma_address = iommu_get_one ( dev , sg_page ( sg ) , n ) + sg - > offset ;
sg - > dma_length = sg - > length ;
2007-05-14 15:44:38 +02:00
sg = sg_next ( sg ) ;
2005-04-16 15:20:36 -07:00
}
}
2008-08-26 23:00:58 -07:00
static void iommu_release_one ( struct device * dev , u32 busa , int npages )
2005-04-16 15:20:36 -07:00
{
2008-08-26 23:00:58 -07:00
struct iommu_struct * iommu = dev - > archdata . iommu ;
2005-04-16 15:20:36 -07:00
int ioptex ;
int i ;
2006-01-17 15:36:05 -08:00
BUG_ON ( busa < iommu - > start ) ;
2005-04-16 15:20:36 -07:00
ioptex = ( busa - iommu - > start ) > > PAGE_SHIFT ;
for ( i = 0 ; i < npages ; i + + ) {
iopte_val ( iommu - > page_table [ ioptex + i ] ) = 0 ;
iommu_invalidate_page ( iommu - > regs , busa ) ;
busa + = PAGE_SIZE ;
}
bit_map_clear ( & iommu - > usemap , ioptex , npages ) ;
}
2008-08-26 23:00:58 -07:00
static void iommu_release_scsi_one ( struct device * dev , __u32 vaddr , unsigned long len )
2005-04-16 15:20:36 -07:00
{
unsigned long off ;
int npages ;
off = vaddr & ~ PAGE_MASK ;
npages = ( off + len + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
2008-08-26 23:00:58 -07:00
iommu_release_one ( dev , vaddr & PAGE_MASK , npages ) ;
2005-04-16 15:20:36 -07:00
}
2008-08-26 23:00:58 -07:00
static void iommu_release_scsi_sgl ( struct device * dev , struct scatterlist * sg , int sz )
2005-04-16 15:20:36 -07:00
{
int n ;
while ( sz ! = 0 ) {
- - sz ;
n = ( sg - > length + sg - > offset + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
2008-12-11 20:24:58 -08:00
iommu_release_one ( dev , sg - > dma_address & PAGE_MASK , n ) ;
sg - > dma_address = 0x21212121 ;
2007-05-14 15:44:38 +02:00
sg = sg_next ( sg ) ;
2005-04-16 15:20:36 -07:00
}
}
# ifdef CONFIG_SBUS
2008-08-27 18:40:38 -07:00
static int iommu_map_dma_area ( struct device * dev , dma_addr_t * pba , unsigned long va ,
unsigned long addr , int len )
2005-04-16 15:20:36 -07:00
{
2008-08-27 18:40:38 -07:00
struct iommu_struct * iommu = dev - > archdata . iommu ;
2005-04-16 15:20:36 -07:00
unsigned long page , end ;
iopte_t * iopte = iommu - > page_table ;
iopte_t * first ;
int ioptex ;
2006-01-17 15:36:05 -08:00
BUG_ON ( ( va & ~ PAGE_MASK ) ! = 0 ) ;
BUG_ON ( ( addr & ~ PAGE_MASK ) ! = 0 ) ;
BUG_ON ( ( len & ~ PAGE_MASK ) ! = 0 ) ;
2005-04-16 15:20:36 -07:00
/* page color = physical address */
ioptex = bit_map_string_get ( & iommu - > usemap , len > > PAGE_SHIFT ,
addr > > PAGE_SHIFT ) ;
if ( ioptex < 0 )
panic ( " iommu out " ) ;
iopte + = ioptex ;
first = iopte ;
end = addr + len ;
while ( addr < end ) {
page = va ;
{
pgd_t * pgdp ;
pmd_t * pmdp ;
pte_t * ptep ;
if ( viking_mxcc_present )
viking_mxcc_flush_page ( page ) ;
else if ( viking_flush )
viking_flush_page ( page ) ;
else
__flush_page_to_ram ( page ) ;
pgdp = pgd_offset ( & init_mm , addr ) ;
pmdp = pmd_offset ( pgdp , addr ) ;
ptep = pte_offset_map ( pmdp , addr ) ;
set_pte ( ptep , mk_pte ( virt_to_page ( page ) , dvma_prot ) ) ;
}
iopte_val ( * iopte + + ) =
MKIOPTE ( page_to_pfn ( virt_to_page ( page ) ) , ioperm_noc ) ;
addr + = PAGE_SIZE ;
va + = PAGE_SIZE ;
}
/* P3: why do we need this?
*
* DAVEM : Because there are several aspects , none of which
* are handled by a single interface . Some cpus are
* completely not I / O DMA coherent , and some have
* virtually indexed caches . The driver DMA flushing
* methods handle the former case , but here during
* IOMMU page table modifications , and usage of non - cacheable
* cpu mappings of pages potentially in the cpu caches , we have
* to handle the latter case as well .
*/
flush_cache_all ( ) ;
iommu_flush_iotlb ( first , len > > PAGE_SHIFT ) ;
flush_tlb_all ( ) ;
iommu_invalidate ( iommu - > regs ) ;
* pba = iommu - > start + ( ioptex < < PAGE_SHIFT ) ;
return 0 ;
}
2008-08-27 18:40:38 -07:00
static void iommu_unmap_dma_area ( struct device * dev , unsigned long busa , int len )
2005-04-16 15:20:36 -07:00
{
2008-08-27 18:40:38 -07:00
struct iommu_struct * iommu = dev - > archdata . iommu ;
2005-04-16 15:20:36 -07:00
iopte_t * iopte = iommu - > page_table ;
unsigned long end ;
int ioptex = ( busa - iommu - > start ) > > PAGE_SHIFT ;
2006-01-17 15:36:05 -08:00
BUG_ON ( ( busa & ~ PAGE_MASK ) ! = 0 ) ;
BUG_ON ( ( len & ~ PAGE_MASK ) ! = 0 ) ;
2005-04-16 15:20:36 -07:00
iopte + = ioptex ;
end = busa + len ;
while ( busa < end ) {
iopte_val ( * iopte + + ) = 0 ;
busa + = PAGE_SIZE ;
}
flush_tlb_all ( ) ;
iommu_invalidate ( iommu - > regs ) ;
bit_map_clear ( & iommu - > usemap , ioptex , len > > PAGE_SHIFT ) ;
}
# endif
2012-05-13 13:57:05 -07:00
static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
. get_scsi_one = iommu_get_scsi_one_gflush ,
. get_scsi_sgl = iommu_get_scsi_sgl_gflush ,
. release_scsi_one = iommu_release_scsi_one ,
. release_scsi_sgl = iommu_release_scsi_sgl ,
# ifdef CONFIG_SBUS
. map_dma_area = iommu_map_dma_area ,
. unmap_dma_area = iommu_unmap_dma_area ,
# endif
} ;
static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
. get_scsi_one = iommu_get_scsi_one_pflush ,
. get_scsi_sgl = iommu_get_scsi_sgl_pflush ,
. release_scsi_one = iommu_release_scsi_one ,
. release_scsi_sgl = iommu_release_scsi_sgl ,
# ifdef CONFIG_SBUS
. map_dma_area = iommu_map_dma_area ,
. unmap_dma_area = iommu_unmap_dma_area ,
# endif
} ;
2005-04-16 15:20:36 -07:00
void __init ld_mmu_iommu ( void )
{
2012-05-13 20:49:31 -07:00
if ( flush_page_for_dma_global ) {
2005-04-16 15:20:36 -07:00
/* flush_page_for_dma flushes everything, no matter of what page is it */
2012-05-13 13:57:05 -07:00
sparc32_dma_ops = & iommu_dma_gflush_ops ;
2005-04-16 15:20:36 -07:00
} else {
2012-05-13 13:57:05 -07:00
sparc32_dma_ops = & iommu_dma_pflush_ops ;
2005-04-16 15:20:36 -07:00
}
if ( viking_mxcc_present | | srmmu_modtype = = HyperSparc ) {
dvma_prot = __pgprot ( SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV ) ;
ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID ;
} else {
dvma_prot = __pgprot ( SRMMU_ET_PTE | SRMMU_PRIV ) ;
ioperm_noc = IOPTE_WRITE | IOPTE_VALID ;
}
}