2005-04-16 15:20:36 -07:00
/*
* arch / sh / mm / ioremap . c
*
* Re - map IO memory to kernel address space so that we can access it .
* This is needed for high PCI addresses that aren ' t mapped in the
* 640 k - 1 MB IO memory area on PC ' s
*
* ( C ) Copyright 1995 1996 Linus Torvalds
2006-01-16 22:14:15 -08:00
* ( C ) Copyright 2005 , 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General
* Public License . See the file " COPYING " in the main directory of this
* archive for more details .
2005-04-16 15:20:36 -07:00
*/
# include <linux/vmalloc.h>
2006-01-16 22:14:15 -08:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <linux/mm.h>
2006-09-27 16:45:22 +09:00
# include <linux/pci.h>
2006-12-08 02:38:07 -08:00
# include <linux/io.h>
2005-04-16 15:20:36 -07:00
# include <asm/page.h>
# include <asm/pgalloc.h>
2006-01-16 22:14:15 -08:00
# include <asm/addrspace.h>
2005-04-16 15:20:36 -07:00
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
2007-06-04 10:58:23 +09:00
# include <asm/mmu.h>
2005-04-16 15:20:36 -07:00
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
2006-01-16 22:14:15 -08:00
void __iomem * __ioremap ( unsigned long phys_addr , unsigned long size ,
unsigned long flags )
2005-04-16 15:20:36 -07:00
{
struct vm_struct * area ;
2006-01-16 22:14:15 -08:00
unsigned long offset , last_addr , addr , orig_addr ;
2006-12-08 02:38:07 -08:00
pgprot_t pgprot ;
2005-04-16 15:20:36 -07:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
2006-09-27 16:45:22 +09:00
/*
2009-04-20 18:24:57 +09:00
* If we ' re in the fixed PCI memory range , mapping through page
* tables is not only pointless , but also fundamentally broken .
* Just return the physical address instead .
2006-09-27 16:45:22 +09:00
*
* For boards that map a small PCI memory aperture somewhere in
* P1 / P2 space , ioremap ( ) will already do the right thing ,
* and we ' ll never get this far .
*/
2009-04-20 18:24:57 +09:00
if ( is_pci_memory_fixed_range ( phys_addr , size ) )
2006-09-27 16:45:22 +09:00
return ( void __iomem * ) phys_addr ;
2009-03-10 15:49:54 +09:00
# if !defined(CONFIG_PMB_FIXED)
2005-04-16 15:20:36 -07:00
/*
* Don ' t allow anybody to remap normal RAM that we ' re using . .
*/
if ( phys_addr < virt_to_phys ( high_memory ) )
return NULL ;
2009-03-10 15:49:54 +09:00
# endif
2005-04-16 15:20:36 -07:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
/*
* Ok , go for it . .
*/
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
area - > phys_addr = phys_addr ;
2006-01-16 22:14:15 -08:00
orig_addr = addr = ( unsigned long ) area - > addr ;
2009-03-10 15:49:54 +09:00
# ifdef CONFIG_PMB
2006-01-16 22:14:15 -08:00
/*
* First try to remap through the PMB once a valid VMA has been
* established . Smaller allocations ( or the rest of the size
* remaining after a PMB mapping due to the size not being
* perfectly aligned on a PMB size boundary ) are then mapped
* through the UTLB using conventional page tables .
*
* PMB entries are all pre - faulted .
*/
if ( unlikely ( size > = 0x1000000 ) ) {
unsigned long mapped = pmb_remap ( addr , phys_addr , size , flags ) ;
if ( likely ( mapped ) ) {
addr + = mapped ;
phys_addr + = mapped ;
size - = mapped ;
}
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
# endif
2006-12-08 02:38:07 -08:00
pgprot = __pgprot ( pgprot_val ( PAGE_KERNEL_NOCACHE ) | flags ) ;
2006-01-16 22:14:15 -08:00
if ( likely ( size ) )
2006-12-08 02:38:07 -08:00
if ( ioremap_page_range ( addr , addr + size , phys_addr , pgprot ) ) {
2006-01-16 22:14:15 -08:00
vunmap ( ( void * ) orig_addr ) ;
return NULL ;
}
return ( void __iomem * ) ( offset + ( char * ) orig_addr ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
EXPORT_SYMBOL ( __ioremap ) ;
2005-04-16 15:20:36 -07:00
2006-01-16 22:14:15 -08:00
void __iounmap ( void __iomem * addr )
2005-04-16 15:20:36 -07:00
{
2006-01-16 22:14:15 -08:00
unsigned long vaddr = ( unsigned long __force ) addr ;
2008-11-25 21:57:29 +09:00
unsigned long seg = PXSEG ( vaddr ) ;
2006-01-16 22:14:15 -08:00
struct vm_struct * p ;
2009-04-20 18:24:57 +09:00
if ( seg < P3SEG | | vaddr > = P3_ADDR_MAX )
return ;
if ( is_pci_memory_fixed_range ( vaddr , 0 ) )
2006-01-16 22:14:15 -08:00
return ;
2009-03-10 15:49:54 +09:00
# ifdef CONFIG_PMB
2006-01-16 22:14:15 -08:00
/*
* Purge any PMB entries that may have been established for this
* mapping , then proceed with conventional VMA teardown .
*
* XXX : Note that due to the way that remove_vm_area ( ) does
* matching of the resultant VMA , we aren ' t able to fast - forward
* the address past the PMB space until the end of the VMA where
* the page tables reside . As such , unmap_vm_area ( ) will be
* forced to linearly scan over the area until it finds the page
* tables where PTEs that need to be unmapped actually reside ,
* which is far from optimal . Perhaps we need to use a separate
* VMA for the PMB mappings ?
* - - PFM .
*/
pmb_unmap ( vaddr ) ;
# endif
p = remove_vm_area ( ( void * ) ( vaddr & PAGE_MASK ) ) ;
if ( ! p ) {
2008-03-04 15:23:47 -08:00
printk ( KERN_ERR " %s: bad address %p \n " , __func__ , addr ) ;
2006-01-16 22:14:15 -08:00
return ;
}
kfree ( p ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
EXPORT_SYMBOL ( __iounmap ) ;