2005-04-16 15:20:36 -07:00
/*
* arch / sh / mm / ioremap . c
*
* Re - map IO memory to kernel address space so that we can access it .
* This is needed for high PCI addresses that aren ' t mapped in the
* 640 k - 1 MB IO memory area on PC ' s
*
* ( C ) Copyright 1995 1996 Linus Torvalds
2006-01-16 22:14:15 -08:00
* ( C ) Copyright 2005 , 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General
* Public License . See the file " COPYING " in the main directory of this
* archive for more details .
2005-04-16 15:20:36 -07:00
*/
# include <linux/vmalloc.h>
2006-01-16 22:14:15 -08:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <linux/mm.h>
2006-09-27 16:45:22 +09:00
# include <linux/pci.h>
2005-04-16 15:20:36 -07:00
# include <asm/io.h>
# include <asm/page.h>
# include <asm/pgalloc.h>
2006-01-16 22:14:15 -08:00
# include <asm/addrspace.h>
2005-04-16 15:20:36 -07:00
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
static inline void remap_area_pte ( pte_t * pte , unsigned long address ,
unsigned long size , unsigned long phys_addr , unsigned long flags )
{
unsigned long end ;
unsigned long pfn ;
2006-11-20 14:30:26 +09:00
pgprot_t pgprot = __pgprot ( pgprot_val ( PAGE_KERNEL_NOCACHE ) | flags ) ;
2005-04-16 15:20:36 -07:00
address & = ~ PMD_MASK ;
end = address + size ;
if ( end > PMD_SIZE )
end = PMD_SIZE ;
if ( address > = end )
BUG ( ) ;
pfn = phys_addr > > PAGE_SHIFT ;
do {
if ( ! pte_none ( * pte ) ) {
printk ( " remap_area_pte: page already exists \n " ) ;
BUG ( ) ;
}
set_pte ( pte , pfn_pte ( pfn , pgprot ) ) ;
address + = PAGE_SIZE ;
pfn + + ;
pte + + ;
} while ( address & & ( address < end ) ) ;
}
static inline int remap_area_pmd ( pmd_t * pmd , unsigned long address ,
unsigned long size , unsigned long phys_addr , unsigned long flags )
{
unsigned long end ;
address & = ~ PGDIR_MASK ;
end = address + size ;
if ( end > PGDIR_SIZE )
end = PGDIR_SIZE ;
phys_addr - = address ;
if ( address > = end )
BUG ( ) ;
do {
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-29 18:16:21 -07:00
pte_t * pte = pte_alloc_kernel ( pmd , address ) ;
2005-04-16 15:20:36 -07:00
if ( ! pte )
return - ENOMEM ;
remap_area_pte ( pte , address , end - address , address + phys_addr , flags ) ;
address = ( address + PMD_SIZE ) & PMD_MASK ;
pmd + + ;
} while ( address & & ( address < end ) ) ;
return 0 ;
}
int remap_area_pages ( unsigned long address , unsigned long phys_addr ,
unsigned long size , unsigned long flags )
{
int error ;
pgd_t * dir ;
unsigned long end = address + size ;
phys_addr - = address ;
dir = pgd_offset_k ( address ) ;
flush_cache_all ( ) ;
if ( address > = end )
BUG ( ) ;
do {
2006-01-16 22:14:15 -08:00
pud_t * pud ;
2005-04-16 15:20:36 -07:00
pmd_t * pmd ;
2006-01-16 22:14:15 -08:00
2005-04-16 15:20:36 -07:00
error = - ENOMEM ;
2006-01-16 22:14:15 -08:00
pud = pud_alloc ( & init_mm , dir , address ) ;
if ( ! pud )
break ;
pmd = pmd_alloc ( & init_mm , pud , address ) ;
2005-04-16 15:20:36 -07:00
if ( ! pmd )
break ;
if ( remap_area_pmd ( pmd , address , end - address ,
phys_addr + address , flags ) )
break ;
error = 0 ;
address = ( address + PGDIR_SIZE ) & PGDIR_MASK ;
dir + + ;
} while ( address & & ( address < end ) ) ;
flush_tlb_all ( ) ;
return error ;
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
2006-01-16 22:14:15 -08:00
void __iomem * __ioremap ( unsigned long phys_addr , unsigned long size ,
unsigned long flags )
2005-04-16 15:20:36 -07:00
{
struct vm_struct * area ;
2006-01-16 22:14:15 -08:00
unsigned long offset , last_addr , addr , orig_addr ;
2005-04-16 15:20:36 -07:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
/*
* Don ' t remap the low PCI / ISA area , it ' s always mapped . .
*/
if ( phys_addr > = 0xA0000 & & last_addr < 0x100000 )
2006-01-16 22:14:15 -08:00
return ( void __iomem * ) phys_to_virt ( phys_addr ) ;
2005-04-16 15:20:36 -07:00
2006-09-27 16:45:22 +09:00
/*
* If we ' re on an SH7751 or SH7780 PCI controller , PCI memory is
* mapped at the end of the address space ( typically 0xfd000000 )
* in a non - translatable area , so mapping through page tables for
* this area is not only pointless , but also fundamentally
* broken . Just return the physical address instead .
*
* For boards that map a small PCI memory aperture somewhere in
* P1 / P2 space , ioremap ( ) will already do the right thing ,
* and we ' ll never get this far .
*/
if ( is_pci_memaddr ( phys_addr ) & & is_pci_memaddr ( last_addr ) )
return ( void __iomem * ) phys_addr ;
2005-04-16 15:20:36 -07:00
/*
* Don ' t allow anybody to remap normal RAM that we ' re using . .
*/
if ( phys_addr < virt_to_phys ( high_memory ) )
return NULL ;
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
/*
* Ok , go for it . .
*/
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
area - > phys_addr = phys_addr ;
2006-01-16 22:14:15 -08:00
orig_addr = addr = ( unsigned long ) area - > addr ;
# ifdef CONFIG_32BIT
/*
* First try to remap through the PMB once a valid VMA has been
* established . Smaller allocations ( or the rest of the size
* remaining after a PMB mapping due to the size not being
* perfectly aligned on a PMB size boundary ) are then mapped
* through the UTLB using conventional page tables .
*
* PMB entries are all pre - faulted .
*/
if ( unlikely ( size > = 0x1000000 ) ) {
unsigned long mapped = pmb_remap ( addr , phys_addr , size , flags ) ;
if ( likely ( mapped ) ) {
addr + = mapped ;
phys_addr + = mapped ;
size - = mapped ;
}
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
# endif
if ( likely ( size ) )
if ( remap_area_pages ( addr , phys_addr , size , flags ) ) {
vunmap ( ( void * ) orig_addr ) ;
return NULL ;
}
return ( void __iomem * ) ( offset + ( char * ) orig_addr ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
EXPORT_SYMBOL ( __ioremap ) ;
2005-04-16 15:20:36 -07:00
2006-01-16 22:14:15 -08:00
void __iounmap ( void __iomem * addr )
2005-04-16 15:20:36 -07:00
{
2006-01-16 22:14:15 -08:00
unsigned long vaddr = ( unsigned long __force ) addr ;
struct vm_struct * p ;
2006-09-27 16:45:22 +09:00
if ( PXSEG ( vaddr ) < P3SEG | | is_pci_memaddr ( vaddr ) )
2006-01-16 22:14:15 -08:00
return ;
# ifdef CONFIG_32BIT
/*
* Purge any PMB entries that may have been established for this
* mapping , then proceed with conventional VMA teardown .
*
* XXX : Note that due to the way that remove_vm_area ( ) does
* matching of the resultant VMA , we aren ' t able to fast - forward
* the address past the PMB space until the end of the VMA where
* the page tables reside . As such , unmap_vm_area ( ) will be
* forced to linearly scan over the area until it finds the page
* tables where PTEs that need to be unmapped actually reside ,
* which is far from optimal . Perhaps we need to use a separate
* VMA for the PMB mappings ?
* - - PFM .
*/
pmb_unmap ( vaddr ) ;
# endif
p = remove_vm_area ( ( void * ) ( vaddr & PAGE_MASK ) ) ;
if ( ! p ) {
printk ( KERN_ERR " %s: bad address %p \n " , __FUNCTION__ , addr ) ;
return ;
}
kfree ( p ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-16 22:14:15 -08:00
EXPORT_SYMBOL ( __iounmap ) ;