2005-04-16 15:20:36 -07:00
/*
* arch / x86_64 / mm / ioremap . c
*
* Re - map IO memory to kernel address space so that we can access it .
* This is needed for high PCI addresses that aren ' t mapped in the
* 640 k - 1 MB IO memory area on PC ' s
*
* ( C ) Copyright 1995 1996 Linus Torvalds
*/
# include <linux/vmalloc.h>
# include <linux/init.h>
# include <linux/slab.h>
2006-06-26 13:59:44 +02:00
# include <linux/module.h>
2006-09-30 23:29:19 -07:00
# include <linux/io.h>
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
2005-04-16 15:20:36 -07:00
# include <asm/pgalloc.h>
# include <asm/fixmap.h>
# include <asm/tlbflush.h>
2006-09-30 23:29:19 -07:00
# include <asm/cacheflush.h>
2005-04-16 15:20:36 -07:00
# include <asm/proto.h>
2008-01-30 13:33:08 +01:00
# include <asm/e820.h>
2005-04-16 15:20:36 -07:00
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
unsigned long __phys_addr ( unsigned long x )
{
if ( x > = __START_KERNEL_map )
return x - __START_KERNEL_map + phys_base ;
return x - PAGE_OFFSET ;
}
EXPORT_SYMBOL ( __phys_addr ) ;
2005-04-16 15:20:36 -07:00
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts .
*/
static int
ioremap_change_attr ( unsigned long phys_addr , unsigned long size ,
unsigned long flags )
{
int err = 0 ;
2005-05-20 14:27:57 -07:00
if ( phys_addr + size - 1 < ( end_pfn_map < < PAGE_SHIFT ) ) {
2005-04-16 15:20:36 -07:00
unsigned long npages = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
unsigned long vaddr = ( unsigned long ) __va ( phys_addr ) ;
/*
* Must use a address here and not struct page because the phys addr
* can be a in hole between nodes and not have an memmap entry .
*/
err = change_page_attr_addr ( vaddr , npages , __pgprot ( __PAGE_KERNEL | flags ) ) ;
if ( ! err )
global_flush_tlb ( ) ;
}
return err ;
}
/*
* Generic mapping function
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
void __iomem * __ioremap ( unsigned long phys_addr , unsigned long size , unsigned long flags )
{
void * addr ;
struct vm_struct * area ;
unsigned long offset , last_addr ;
2006-09-30 23:29:19 -07:00
pgprot_t pgprot ;
2005-04-16 15:20:36 -07:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
/*
* Don ' t remap the low PCI / ISA area , it ' s always mapped . .
*/
if ( phys_addr > = ISA_START_ADDRESS & & last_addr < ISA_END_ADDRESS )
return ( __force void __iomem * ) phys_to_virt ( phys_addr ) ;
2008-01-30 13:33:43 +01:00
pgprot = __pgprot ( __PAGE_KERNEL_EXEC | _PAGE_GLOBAL | flags ) ;
2005-04-16 15:20:36 -07:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
/*
* Ok , go for it . .
*/
area = get_vm_area ( size , VM_IOREMAP | ( flags < < 20 ) ) ;
if ( ! area )
return NULL ;
area - > phys_addr = phys_addr ;
addr = area - > addr ;
2006-09-30 23:29:19 -07:00
if ( ioremap_page_range ( ( unsigned long ) addr , ( unsigned long ) addr + size ,
phys_addr , pgprot ) ) {
2005-04-16 15:20:36 -07:00
remove_vm_area ( ( void * ) ( PAGE_MASK & ( unsigned long ) addr ) ) ;
return NULL ;
}
2005-05-20 14:27:57 -07:00
if ( flags & & ioremap_change_attr ( phys_addr , size , flags ) < 0 ) {
2005-04-16 15:20:36 -07:00
area - > flags & = 0xffffff ;
vunmap ( addr ) ;
return NULL ;
}
return ( __force void __iomem * ) ( offset + ( char * ) addr ) ;
}
2006-06-26 13:59:44 +02:00
EXPORT_SYMBOL ( __ioremap ) ;
2005-04-16 15:20:36 -07:00
/**
* ioremap_nocache - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
* address .
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
* the PCI bus . Note that there are other caches and buffers on many
* busses . In particular driver authors should read up on PCI writes
*
* It ' s useful if some control registers are in such an area and
* write combining or read caching is not desirable :
*
* Must be freed with iounmap .
*/
void __iomem * ioremap_nocache ( unsigned long phys_addr , unsigned long size )
{
2008-01-30 13:33:43 +01:00
return __ioremap ( phys_addr , size , _PAGE_PCD | _PAGE_PWT ) ;
2005-04-16 15:20:36 -07:00
}
2006-06-26 13:59:44 +02:00
EXPORT_SYMBOL ( ioremap_nocache ) ;
2005-04-16 15:20:36 -07:00
2005-12-12 22:17:09 -08:00
/**
* iounmap - Free a IO remapping
* @ addr : virtual address from ioremap_ *
*
* Caller must ensure there is only one unmapping for the same pointer .
*/
2005-04-16 15:20:36 -07:00
void iounmap ( volatile void __iomem * addr )
{
2005-12-12 22:17:09 -08:00
struct vm_struct * p , * o ;
2005-04-16 15:20:36 -07:00
if ( addr < = high_memory )
return ;
if ( addr > = phys_to_virt ( ISA_START_ADDRESS ) & &
addr < phys_to_virt ( ISA_END_ADDRESS ) )
return ;
2005-12-15 09:17:50 +00:00
addr = ( volatile void __iomem * ) ( PAGE_MASK & ( unsigned long __force ) addr ) ;
2005-12-12 22:17:09 -08:00
/* Use the vm area unlocked, assuming the caller
ensures there isn ' t another iounmap for the same address
in parallel . Reuse of the virtual address is prevented by
leaving it in the global lists until we ' re done with it .
cpa takes care of the direct mappings . */
read_lock ( & vmlist_lock ) ;
for ( p = vmlist ; p ; p = p - > next ) {
if ( p - > addr = = addr )
break ;
}
read_unlock ( & vmlist_lock ) ;
if ( ! p ) {
2005-05-20 14:27:57 -07:00
printk ( " iounmap: bad address %p \n " , addr ) ;
2005-12-12 22:17:09 -08:00
dump_stack ( ) ;
return ;
}
/* Reset the direct mapping. Can block */
if ( p - > flags > > 20 )
2005-05-20 14:27:57 -07:00
ioremap_change_attr ( p - > phys_addr , p - > size , 0 ) ;
2005-12-12 22:17:09 -08:00
/* Finally remove it */
o = remove_vm_area ( ( void * ) addr ) ;
BUG_ON ( p ! = o | | o = = NULL ) ;
2005-04-16 15:20:36 -07:00
kfree ( p ) ;
}
2006-06-26 13:59:44 +02:00
EXPORT_SYMBOL ( iounmap ) ;