2005-04-17 02:20:36 +04:00
/*
* Re - map IO memory to kernel address space so that we can access it .
* This is needed for high PCI addresses that aren ' t mapped in the
* 640 k - 1 MB IO memory area on PC ' s
*
* ( C ) Copyright 1995 1996 Linus Torvalds
*/
2008-01-30 15:34:05 +03:00
# include <linux/bootmem.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
2006-10-01 10:29:17 +04:00
# include <linux/io.h>
2008-01-30 15:34:05 +03:00
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
2008-01-30 15:34:05 +03:00
# include <asm/e820.h>
# include <asm/fixmap.h>
2005-04-17 02:20:36 +04:00
# include <asm/pgtable.h>
2008-01-30 15:34:05 +03:00
# include <asm/tlbflush.h>
2005-04-17 02:20:36 +04:00
2008-01-30 15:34:05 +03:00
# ifdef CONFIG_X86_64
unsigned long __phys_addr ( unsigned long x )
{
if ( x > = __START_KERNEL_map )
return x - __START_KERNEL_map + phys_base ;
return x - PAGE_OFFSET ;
}
EXPORT_SYMBOL ( __phys_addr ) ;
# endif
2008-01-30 15:34:06 +03:00
int page_is_ram ( unsigned long pagenr )
{
unsigned long addr , end ;
int i ;
for ( i = 0 ; i < e820 . nr_map ; i + + ) {
/*
* Not usable memory :
*/
if ( e820 . map [ i ] . type ! = E820_RAM )
continue ;
addr = ( e820 . map [ i ] . addr + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
end = ( e820 . map [ i ] . addr + e820 . map [ i ] . size ) > > PAGE_SHIFT ;
2008-01-30 15:34:06 +03:00
/*
* Sanity check : Some BIOSen report areas as RAM that
* are not . Notably the 640 - > 1 Mb area , which is the
* PCI BIOS area .
*/
if ( addr > = ( BIOS_BEGIN > > PAGE_SHIFT ) & &
end < ( BIOS_END > > PAGE_SHIFT ) )
continue ;
2008-01-30 15:34:06 +03:00
if ( ( pagenr > = addr ) & & ( pagenr < end ) )
return 1 ;
}
return 0 ;
}
2008-01-30 15:34:05 +03:00
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts .
*/
static int ioremap_change_attr ( unsigned long phys_addr , unsigned long size ,
pgprot_t prot )
{
unsigned long npages , vaddr , last_addr = phys_addr + size - 1 ;
int err , level ;
/* No change for pages after the last mapping */
if ( last_addr > = ( max_pfn_mapped < < PAGE_SHIFT ) )
return 0 ;
npages = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
vaddr = ( unsigned long ) __va ( phys_addr ) ;
/*
* If there is no identity map for this address ,
* change_page_attr_addr is unnecessary
*/
if ( ! lookup_address ( vaddr , & level ) )
return 0 ;
/*
* Must use an address here and not struct page because the
* phys addr can be a in hole between nodes and not have a
* memmap entry .
*/
err = change_page_attr_addr ( vaddr , npages , prot ) ;
2008-01-30 15:34:05 +03:00
2008-01-30 15:34:05 +03:00
if ( ! err )
global_flush_tlb ( ) ;
return err ;
}
2005-04-17 02:20:36 +04:00
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
2008-01-30 15:34:06 +03:00
static void __iomem * __ioremap ( unsigned long phys_addr , unsigned long size ,
unsigned long flags )
2005-04-17 02:20:36 +04:00
{
2008-01-30 15:34:05 +03:00
void __iomem * addr ;
struct vm_struct * area ;
2005-04-17 02:20:36 +04:00
unsigned long offset , last_addr ;
2008-01-30 15:34:05 +03:00
pgprot_t pgprot ;
2005-04-17 02:20:36 +04:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
/*
* Don ' t remap the low PCI / ISA area , it ' s always mapped . .
*/
if ( phys_addr > = ISA_START_ADDRESS & & last_addr < ISA_END_ADDRESS )
2008-01-30 15:34:05 +03:00
return ( __force void __iomem * ) phys_to_virt ( phys_addr ) ;
2005-04-17 02:20:36 +04:00
/*
* Don ' t allow anybody to remap normal RAM that we ' re using . .
*/
2008-01-30 15:34:06 +03:00
for ( offset = phys_addr > > PAGE_SHIFT ; offset < max_pfn_mapped & &
( offset < < PAGE_SHIFT ) < last_addr ; offset + + ) {
if ( page_is_ram ( offset ) )
return NULL ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:34:05 +03:00
pgprot = MAKE_GLOBAL ( __PAGE_KERNEL | flags ) ;
2006-10-01 10:29:17 +04:00
2005-04-17 02:20:36 +04:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
/*
* Ok , go for it . .
*/
2008-01-30 15:34:05 +03:00
area = get_vm_area ( size , VM_IOREMAP ) ;
2005-04-17 02:20:36 +04:00
if ( ! area )
return NULL ;
area - > phys_addr = phys_addr ;
addr = ( void __iomem * ) area - > addr ;
2008-01-30 15:34:05 +03:00
if ( ioremap_page_range ( ( unsigned long ) addr , ( unsigned long ) addr + size ,
phys_addr , pgprot ) ) {
2008-01-30 15:34:05 +03:00
remove_vm_area ( ( void * ) ( PAGE_MASK & ( unsigned long ) addr ) ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
2008-01-30 15:34:05 +03:00
if ( ioremap_change_attr ( phys_addr , size , pgprot ) < 0 ) {
vunmap ( addr ) ;
return NULL ;
}
2005-04-17 02:20:36 +04:00
return ( void __iomem * ) ( offset + ( char __iomem * ) addr ) ;
}
/**
* ioremap_nocache - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
2008-01-30 15:34:05 +03:00
* address .
2005-04-17 02:20:36 +04:00
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
2008-01-30 15:34:05 +03:00
* the PCI bus . Note that there are other caches and buffers on many
2005-04-17 02:20:36 +04:00
* busses . In particular driver authors should read up on PCI writes
*
* It ' s useful if some control registers are in such an area and
* write combining or read caching is not desirable :
2008-01-30 15:34:05 +03:00
*
2005-04-17 02:20:36 +04:00
* Must be freed with iounmap .
*/
2008-01-30 15:34:05 +03:00
void __iomem * ioremap_nocache ( unsigned long phys_addr , unsigned long size )
2005-04-17 02:20:36 +04:00
{
2008-01-30 15:34:05 +03:00
return __ioremap ( phys_addr , size , _PAGE_PCD | _PAGE_PWT ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( ioremap_nocache ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:34:06 +03:00
void __iomem * ioremap_cache ( unsigned long phys_addr , unsigned long size )
{
return __ioremap ( phys_addr , size , 0 ) ;
}
EXPORT_SYMBOL ( ioremap_cache ) ;
2005-12-13 09:17:09 +03:00
/**
* iounmap - Free a IO remapping
* @ addr : virtual address from ioremap_ *
*
* Caller must ensure there is only one unmapping for the same pointer .
*/
2005-04-17 02:20:36 +04:00
void iounmap ( volatile void __iomem * addr )
{
2005-12-13 09:17:09 +03:00
struct vm_struct * p , * o ;
2005-07-08 04:56:02 +04:00
if ( ( void __force * ) addr < = high_memory )
2005-04-17 02:20:36 +04:00
return ;
/*
* __ioremap special - cases the PCI / ISA range by not instantiating a
* vm_area and by simply returning an address into the kernel mapping
* of ISA space . So handle that here .
*/
if ( addr > = phys_to_virt ( ISA_START_ADDRESS ) & &
2008-01-30 15:34:05 +03:00
addr < phys_to_virt ( ISA_END_ADDRESS ) )
2005-04-17 02:20:36 +04:00
return ;
2008-01-30 15:34:05 +03:00
addr = ( volatile void __iomem * )
( PAGE_MASK & ( unsigned long __force ) addr ) ;
2005-12-13 09:17:09 +03:00
/* Use the vm area unlocked, assuming the caller
ensures there isn ' t another iounmap for the same address
in parallel . Reuse of the virtual address is prevented by
leaving it in the global lists until we ' re done with it .
cpa takes care of the direct mappings . */
read_lock ( & vmlist_lock ) ;
for ( p = vmlist ; p ; p = p - > next ) {
if ( p - > addr = = addr )
break ;
}
read_unlock ( & vmlist_lock ) ;
if ( ! p ) {
2008-01-30 15:34:05 +03:00
printk ( KERN_ERR " iounmap: bad address %p \n " , addr ) ;
2005-07-08 04:56:02 +04:00
dump_stack ( ) ;
2005-12-13 09:17:09 +03:00
return ;
2005-04-17 02:20:36 +04:00
}
2005-12-13 09:17:09 +03:00
/* Reset the direct mapping. Can block */
2008-01-30 15:34:05 +03:00
ioremap_change_attr ( p - > phys_addr , p - > size , PAGE_KERNEL ) ;
2005-12-13 09:17:09 +03:00
/* Finally remove it */
o = remove_vm_area ( ( void * ) addr ) ;
BUG_ON ( p ! = o | | o = = NULL ) ;
2008-01-30 15:34:05 +03:00
kfree ( p ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( iounmap ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:34:05 +03:00
# ifdef CONFIG_X86_32
2008-01-30 15:33:45 +03:00
int __initdata early_ioremap_debug ;
static int __init early_ioremap_debug_setup ( char * str )
{
early_ioremap_debug = 1 ;
2008-01-30 15:33:45 +03:00
return 0 ;
2008-01-30 15:33:45 +03:00
}
2008-01-30 15:33:45 +03:00
early_param ( " early_ioremap_debug " , early_ioremap_debug_setup ) ;
2008-01-30 15:33:45 +03:00
2008-01-30 15:33:44 +03:00
static __initdata int after_paging_init ;
static __initdata unsigned long bm_pte [ 1024 ]
__attribute__ ( ( aligned ( PAGE_SIZE ) ) ) ;
2008-01-30 15:33:44 +03:00
static inline unsigned long * __init early_ioremap_pgd ( unsigned long addr )
2008-01-30 15:33:44 +03:00
{
return ( unsigned long * ) swapper_pg_dir + ( ( addr > > 22 ) & 1023 ) ;
}
2008-01-30 15:33:44 +03:00
static inline unsigned long * __init early_ioremap_pte ( unsigned long addr )
2008-01-30 15:33:44 +03:00
{
return bm_pte + ( ( addr > > PAGE_SHIFT ) & 1023 ) ;
}
2008-01-30 15:33:44 +03:00
void __init early_ioremap_init ( void )
2008-01-30 15:33:44 +03:00
{
unsigned long * pgd ;
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug )
2008-01-30 15:34:05 +03:00
printk ( KERN_DEBUG " early_ioremap_init() \n " ) ;
2008-01-30 15:33:45 +03:00
2008-01-30 15:33:44 +03:00
pgd = early_ioremap_pgd ( fix_to_virt ( FIX_BTMAP_BEGIN ) ) ;
2008-01-30 15:33:44 +03:00
* pgd = __pa ( bm_pte ) | _PAGE_TABLE ;
memset ( bm_pte , 0 , sizeof ( bm_pte ) ) ;
2008-01-30 15:33:49 +03:00
/*
* The boot - ioremap range spans multiple pgds , for which
* we are not prepared :
*/
if ( pgd ! = early_ioremap_pgd ( fix_to_virt ( FIX_BTMAP_END ) ) ) {
WARN_ON ( 1 ) ;
2008-01-30 15:34:05 +03:00
printk ( KERN_WARNING " pgd %p != %p \n " ,
pgd , early_ioremap_pgd ( fix_to_virt ( FIX_BTMAP_END ) ) ) ;
printk ( KERN_WARNING " fix_to_virt(FIX_BTMAP_BEGIN): %08lx \n " ,
fix_to_virt ( FIX_BTMAP_BEGIN ) ) ;
printk ( KERN_WARNING " fix_to_virt(FIX_BTMAP_END): %08lx \n " ,
fix_to_virt ( FIX_BTMAP_END ) ) ;
printk ( KERN_WARNING " FIX_BTMAP_END: %d \n " , FIX_BTMAP_END ) ;
printk ( KERN_WARNING " FIX_BTMAP_BEGIN: %d \n " ,
FIX_BTMAP_BEGIN ) ;
2008-01-30 15:33:49 +03:00
}
2008-01-30 15:33:44 +03:00
}
2008-01-30 15:33:44 +03:00
void __init early_ioremap_clear ( void )
2008-01-30 15:33:44 +03:00
{
unsigned long * pgd ;
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug )
2008-01-30 15:34:05 +03:00
printk ( KERN_DEBUG " early_ioremap_clear() \n " ) ;
2008-01-30 15:33:45 +03:00
2008-01-30 15:33:44 +03:00
pgd = early_ioremap_pgd ( fix_to_virt ( FIX_BTMAP_BEGIN ) ) ;
2008-01-30 15:33:44 +03:00
* pgd = 0 ;
__flush_tlb_all ( ) ;
}
2008-01-30 15:33:44 +03:00
void __init early_ioremap_reset ( void )
2008-01-30 15:33:44 +03:00
{
enum fixed_addresses idx ;
unsigned long * pte , phys , addr ;
after_paging_init = 1 ;
2008-01-30 15:33:44 +03:00
for ( idx = FIX_BTMAP_BEGIN ; idx > = FIX_BTMAP_END ; idx - - ) {
2008-01-30 15:33:44 +03:00
addr = fix_to_virt ( idx ) ;
2008-01-30 15:33:44 +03:00
pte = early_ioremap_pte ( addr ) ;
2008-01-30 15:33:44 +03:00
if ( ! * pte & _PAGE_PRESENT ) {
phys = * pte & PAGE_MASK ;
set_fixmap ( idx , phys ) ;
}
}
}
2008-01-30 15:33:44 +03:00
static void __init __early_set_fixmap ( enum fixed_addresses idx ,
2008-01-30 15:33:44 +03:00
unsigned long phys , pgprot_t flags )
{
unsigned long * pte , addr = __fix_to_virt ( idx ) ;
if ( idx > = __end_of_fixed_addresses ) {
BUG ( ) ;
return ;
}
2008-01-30 15:33:44 +03:00
pte = early_ioremap_pte ( addr ) ;
2008-01-30 15:33:44 +03:00
if ( pgprot_val ( flags ) )
* pte = ( phys & PAGE_MASK ) | pgprot_val ( flags ) ;
else
* pte = 0 ;
__flush_tlb_one ( addr ) ;
}
2008-01-30 15:33:44 +03:00
static inline void __init early_set_fixmap ( enum fixed_addresses idx ,
2008-01-30 15:33:44 +03:00
unsigned long phys )
{
if ( after_paging_init )
set_fixmap ( idx , phys ) ;
else
2008-01-30 15:33:44 +03:00
__early_set_fixmap ( idx , phys , PAGE_KERNEL ) ;
2008-01-30 15:33:44 +03:00
}
2008-01-30 15:33:44 +03:00
static inline void __init early_clear_fixmap ( enum fixed_addresses idx )
2008-01-30 15:33:44 +03:00
{
if ( after_paging_init )
clear_fixmap ( idx ) ;
else
2008-01-30 15:33:44 +03:00
__early_set_fixmap ( idx , 0 , __pgprot ( 0 ) ) ;
2008-01-30 15:33:44 +03:00
}
2008-01-30 15:33:45 +03:00
int __initdata early_ioremap_nested ;
2008-01-30 15:33:47 +03:00
static int __init check_early_ioremap_leak ( void )
{
if ( ! early_ioremap_nested )
return 0 ;
printk ( KERN_WARNING
2008-01-30 15:34:05 +03:00
" Debug warning: early ioremap leak of %d areas detected. \n " ,
early_ioremap_nested ) ;
2008-01-30 15:33:47 +03:00
printk ( KERN_WARNING
2008-01-30 15:34:05 +03:00
" please boot with early_ioremap_debug and report the dmesg. \n " ) ;
2008-01-30 15:33:47 +03:00
WARN_ON ( 1 ) ;
return 1 ;
}
late_initcall ( check_early_ioremap_leak ) ;
2008-01-30 15:33:44 +03:00
void __init * early_ioremap ( unsigned long phys_addr , unsigned long size )
2005-04-17 02:20:36 +04:00
{
unsigned long offset , last_addr ;
2008-01-30 15:33:45 +03:00
unsigned int nrpages , nesting ;
enum fixed_addresses idx0 , idx ;
WARN_ON ( system_state ! = SYSTEM_BOOTING ) ;
nesting = early_ioremap_nested ;
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug ) {
2008-01-30 15:34:05 +03:00
printk ( KERN_DEBUG " early_ioremap(%08lx, %08lx) [%d] => " ,
phys_addr , size , nesting ) ;
2008-01-30 15:33:45 +03:00
dump_stack ( ) ;
}
2005-04-17 02:20:36 +04:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
2008-01-30 15:33:45 +03:00
if ( ! size | | last_addr < phys_addr ) {
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
2008-01-30 15:33:45 +03:00
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:33:45 +03:00
if ( nesting > = FIX_BTMAPS_NESTING ) {
WARN_ON ( 1 ) ;
2008-01-30 15:33:45 +03:00
return NULL ;
2008-01-30 15:33:45 +03:00
}
2008-01-30 15:33:45 +03:00
early_ioremap_nested + + ;
2005-04-17 02:20:36 +04:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr ) - phys_addr ;
/*
* Mappings have to fit in the FIX_BTMAP area .
*/
nrpages = size > > PAGE_SHIFT ;
2008-01-30 15:33:45 +03:00
if ( nrpages > NR_FIX_BTMAPS ) {
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
2008-01-30 15:33:45 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Ok , go for it . .
*/
2008-01-30 15:33:45 +03:00
idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS * nesting ;
idx = idx0 ;
2005-04-17 02:20:36 +04:00
while ( nrpages > 0 ) {
2008-01-30 15:33:44 +03:00
early_set_fixmap ( idx , phys_addr ) ;
2005-04-17 02:20:36 +04:00
phys_addr + = PAGE_SIZE ;
- - idx ;
- - nrpages ;
}
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug )
printk ( KERN_CONT " %08lx + %08lx \n " , offset , fix_to_virt ( idx0 ) ) ;
2008-01-30 15:33:45 +03:00
2008-01-30 15:34:05 +03:00
return ( void * ) ( offset + fix_to_virt ( idx0 ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-30 15:33:44 +03:00
void __init early_iounmap ( void * addr , unsigned long size )
2005-04-17 02:20:36 +04:00
{
unsigned long virt_addr ;
unsigned long offset ;
unsigned int nrpages ;
enum fixed_addresses idx ;
2008-01-30 15:33:45 +03:00
unsigned int nesting ;
nesting = - - early_ioremap_nested ;
2008-01-30 15:33:45 +03:00
WARN_ON ( nesting < 0 ) ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug ) {
2008-01-30 15:34:05 +03:00
printk ( KERN_DEBUG " early_iounmap(%p, %08lx) [%d] \n " , addr ,
size , nesting ) ;
2008-01-30 15:33:45 +03:00
dump_stack ( ) ;
}
2005-04-17 02:20:36 +04:00
virt_addr = ( unsigned long ) addr ;
2008-01-30 15:33:45 +03:00
if ( virt_addr < fix_to_virt ( FIX_BTMAP_BEGIN ) ) {
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return ;
2008-01-30 15:33:45 +03:00
}
2005-04-17 02:20:36 +04:00
offset = virt_addr & ~ PAGE_MASK ;
nrpages = PAGE_ALIGN ( offset + size - 1 ) > > PAGE_SHIFT ;
2008-01-30 15:33:45 +03:00
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS * nesting ;
2005-04-17 02:20:36 +04:00
while ( nrpages > 0 ) {
2008-01-30 15:33:44 +03:00
early_clear_fixmap ( idx ) ;
2005-04-17 02:20:36 +04:00
- - idx ;
- - nrpages ;
}
}
2008-01-30 15:33:45 +03:00
void __this_fixmap_does_not_exist ( void )
{
WARN_ON ( 1 ) ;
}
2008-01-30 15:34:05 +03:00
# endif /* CONFIG_X86_32 */