2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* ( C ) Copyright 1995 1996 Linus Torvalds
* ( C ) Copyright 2001 , 2002 Ralf Baechle
*/
2006-12-08 13:38:04 +03:00
# include <linux/mm.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <asm/addrspace.h>
# include <asm/byteorder.h>
# include <linux/vmalloc.h>
2006-12-08 13:38:04 +03:00
# include <linux/io.h>
2005-04-17 02:20:36 +04:00
/*
* Generic mapping function ( not visible outside ) :
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
# define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
2005-06-30 14:48:40 +04:00
void __iomem * __ioremap ( phys_t phys_addr , phys_t size , unsigned long flags )
2005-04-17 02:20:36 +04:00
{
struct vm_struct * area ;
unsigned long offset ;
phys_t last_addr ;
void * addr ;
2006-12-08 13:38:04 +03:00
pgprot_t pgprot ;
2005-04-17 02:20:36 +04:00
phys_addr = fixup_bigphys_addr ( phys_addr , size ) ;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
/*
* Map uncached objects in the low 512 mb of address space using KSEG1 ,
* otherwise map using page tables .
*/
if ( IS_LOW512 ( phys_addr ) & & IS_LOW512 ( last_addr ) & &
flags = = _CACHE_UNCACHED )
2005-06-30 14:48:40 +04:00
return ( void __iomem * ) CKSEG1ADDR ( phys_addr ) ;
2005-04-17 02:20:36 +04:00
/*
* Don ' t allow anybody to remap normal RAM that we ' re using . .
*/
if ( phys_addr < virt_to_phys ( high_memory ) ) {
char * t_addr , * t_end ;
struct page * page ;
t_addr = __va ( phys_addr ) ;
t_end = t_addr + ( size - 1 ) ;
for ( page = virt_to_page ( t_addr ) ; page < = virt_to_page ( t_end ) ; page + + )
if ( ! PageReserved ( page ) )
return NULL ;
}
2006-12-08 13:38:04 +03:00
pgprot = __pgprot ( _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
| __WRITEABLE | flags ) ;
2005-04-17 02:20:36 +04:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
/*
* Ok , go for it . .
*/
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
addr = area - > addr ;
2006-12-08 13:38:04 +03:00
if ( ioremap_page_range ( ( unsigned long ) addr , ( unsigned long ) addr + size ,
phys_addr , pgprot ) ) {
2005-04-17 02:20:36 +04:00
vunmap ( addr ) ;
return NULL ;
}
2005-06-30 14:48:40 +04:00
return ( void __iomem * ) ( offset + ( char * ) addr ) ;
2005-04-17 02:20:36 +04:00
}
2005-02-10 15:19:59 +03:00
# define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
2005-04-17 02:20:36 +04:00
2006-10-19 17:21:47 +04:00
void __iounmap ( const volatile void __iomem * addr )
2005-04-17 02:20:36 +04:00
{
struct vm_struct * p ;
if ( IS_KSEG1 ( addr ) )
return ;
p = remove_vm_area ( ( void * ) ( PAGE_MASK & ( unsigned long __force ) addr ) ) ;
2005-02-10 15:19:59 +03:00
if ( ! p )
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " iounmap: bad address %p \n " , addr ) ;
kfree ( p ) ;
}
EXPORT_SYMBOL ( __ioremap ) ;
EXPORT_SYMBOL ( __iounmap ) ;