2005-04-17 02:20:36 +04:00
/*
* Re - map IO memory to kernel address space so that we can access it .
* This is needed for high PCI addresses that aren ' t mapped in the
* 640 k - 1 MB IO memory area on PC ' s
*
* ( C ) Copyright 1995 1996 Linus Torvalds
*/
2008-01-30 15:34:05 +03:00
# include <linux/bootmem.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
2006-10-01 10:29:17 +04:00
# include <linux/io.h>
2008-01-30 15:34:05 +03:00
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
2008-05-12 23:20:57 +04:00
# include <linux/mmiotrace.h>
2008-01-30 15:34:05 +03:00
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
2008-01-30 15:34:05 +03:00
# include <asm/e820.h>
# include <asm/fixmap.h>
2005-04-17 02:20:36 +04:00
# include <asm/pgtable.h>
2008-01-30 15:34:05 +03:00
# include <asm/tlbflush.h>
2008-01-30 15:34:11 +03:00
# include <asm/pgalloc.h>
2008-03-19 03:00:17 +03:00
# include <asm/pat.h>
2005-04-17 02:20:36 +04:00
2009-09-10 21:09:38 +04:00
# include "physaddr.h"
2008-01-30 15:34:05 +03:00
2008-01-30 15:34:06 +03:00
int page_is_ram ( unsigned long pagenr )
{
2008-03-25 10:31:17 +03:00
resource_size_t addr , end ;
2008-01-30 15:34:06 +03:00
int i ;
2008-02-18 20:54:33 +03:00
/*
* A special case is the first 4 Kb of memory ;
* This is a BIOS owned area , not kernel ram , but generally
* not listed as such in the E820 table .
*/
if ( pagenr = = 0 )
return 0 ;
2008-02-18 20:58:45 +03:00
/*
* Second special case : Some BIOSen report the PC BIOS
* area ( 640 - > 1 Mb ) as ram even though it is not .
*/
if ( pagenr > = ( BIOS_BEGIN > > PAGE_SHIFT ) & &
pagenr < ( BIOS_END > > PAGE_SHIFT ) )
return 0 ;
2008-02-18 20:54:33 +03:00
2008-01-30 15:34:06 +03:00
for ( i = 0 ; i < e820 . nr_map ; i + + ) {
/*
* Not usable memory :
*/
if ( e820 . map [ i ] . type ! = E820_RAM )
continue ;
addr = ( e820 . map [ i ] . addr + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
end = ( e820 . map [ i ] . addr + e820 . map [ i ] . size ) > > PAGE_SHIFT ;
2008-01-30 15:34:06 +03:00
2008-01-30 15:34:06 +03:00
if ( ( pagenr > = addr ) & & ( pagenr < end ) )
return 1 ;
}
return 0 ;
}
2008-01-30 15:34:05 +03:00
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts .
*/
2008-03-19 03:00:16 +03:00
int ioremap_change_attr ( unsigned long vaddr , unsigned long size ,
unsigned long prot_val )
2008-01-30 15:34:05 +03:00
{
2008-01-30 15:34:06 +03:00
unsigned long nrpages = size > > PAGE_SHIFT ;
2008-02-01 19:49:43 +03:00
int err ;
2008-01-30 15:34:05 +03:00
2008-03-19 03:00:16 +03:00
switch ( prot_val ) {
case _PAGE_CACHE_UC :
2008-01-30 15:34:06 +03:00
default :
2008-03-19 03:00:18 +03:00
err = _set_memory_uc ( vaddr , nrpages ) ;
2008-01-30 15:34:06 +03:00
break ;
2008-03-19 03:00:24 +03:00
case _PAGE_CACHE_WC :
err = _set_memory_wc ( vaddr , nrpages ) ;
break ;
2008-03-19 03:00:16 +03:00
case _PAGE_CACHE_WB :
2008-03-19 03:00:18 +03:00
err = _set_memory_wb ( vaddr , nrpages ) ;
2008-01-30 15:34:06 +03:00
break ;
}
2008-01-30 15:34:05 +03:00
return err ;
}
2005-04-17 02:20:36 +04:00
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
2008-04-28 13:12:42 +04:00
static void __iomem * __ioremap_caller ( resource_size_t phys_addr ,
unsigned long size , unsigned long prot_val , void * caller )
2005-04-17 02:20:36 +04:00
{
2008-03-25 10:31:17 +03:00
unsigned long pfn , offset , vaddr ;
resource_size_t last_addr ;
2008-05-12 23:21:03 +04:00
const resource_size_t unaligned_phys_addr = phys_addr ;
const unsigned long unaligned_size = size ;
2008-01-30 15:34:05 +03:00
struct vm_struct * area ;
2008-03-19 03:00:17 +03:00
unsigned long new_prot_val ;
2008-01-30 15:34:06 +03:00
pgprot_t prot ;
2008-03-25 00:39:55 +03:00
int retval ;
2008-05-12 23:20:57 +04:00
void __iomem * ret_addr ;
2005-04-17 02:20:36 +04:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
2008-02-27 22:57:40 +03:00
if ( ! phys_addr_valid ( phys_addr ) ) {
2008-03-19 03:00:25 +03:00
printk ( KERN_WARNING " ioremap: invalid physical address %llx \n " ,
2008-04-11 02:09:50 +04:00
( unsigned long long ) phys_addr ) ;
2008-02-27 22:57:40 +03:00
WARN_ON_ONCE ( 1 ) ;
return NULL ;
}
2005-04-17 02:20:36 +04:00
/*
* Don ' t remap the low PCI / ISA area , it ' s always mapped . .
*/
2008-06-20 23:58:46 +04:00
if ( is_ISA_range ( phys_addr , last_addr ) )
2008-01-30 15:34:05 +03:00
return ( __force void __iomem * ) phys_to_virt ( phys_addr ) ;
2005-04-17 02:20:36 +04:00
2008-09-26 05:43:34 +04:00
/*
* Check if the request spans more than any BAR in the iomem resource
* tree .
*/
2008-12-12 11:20:12 +03:00
WARN_ONCE ( iomem_map_sanity_check ( phys_addr , size ) ,
KERN_INFO " Info: mapping multiple BARs. Your kernel is fine. " ) ;
2008-09-26 05:43:34 +04:00
2005-04-17 02:20:36 +04:00
/*
* Don ' t allow anybody to remap normal RAM that we ' re using . .
*/
2008-04-30 19:30:24 +04:00
for ( pfn = phys_addr > > PAGE_SHIFT ;
( pfn < < PAGE_SHIFT ) < ( last_addr & PAGE_MASK ) ;
pfn + + ) {
2008-02-28 16:10:49 +03:00
2008-03-03 11:37:41 +03:00
int is_ram = page_is_ram ( pfn ) ;
if ( is_ram & & pfn_valid ( pfn ) & & ! PageReserved ( pfn_to_page ( pfn ) ) )
2008-01-30 15:34:06 +03:00
return NULL ;
2008-03-03 11:37:41 +03:00
WARN_ON_ONCE ( is_ram ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-19 03:00:17 +03:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
2008-08-15 20:12:47 +04:00
retval = reserve_memtype ( phys_addr , ( u64 ) phys_addr + size ,
2008-03-25 00:39:55 +03:00
prot_val , & new_prot_val ) ;
if ( retval ) {
2009-07-10 20:57:33 +04:00
printk ( KERN_ERR " ioremap reserve_memtype failed %d \n " , retval ) ;
2008-03-25 00:39:55 +03:00
return NULL ;
}
if ( prot_val ! = new_prot_val ) {
2009-08-27 04:17:51 +04:00
if ( ! is_new_memtype_allowed ( phys_addr , size ,
prot_val , new_prot_val ) ) {
2009-07-10 20:57:33 +04:00
printk ( KERN_ERR
2008-03-19 03:00:25 +03:00
" ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx \n " ,
2008-04-11 02:09:50 +04:00
( unsigned long long ) phys_addr ,
( unsigned long long ) ( phys_addr + size ) ,
2008-03-19 03:00:25 +03:00
prot_val , new_prot_val ) ;
2008-03-19 03:00:17 +03:00
free_memtype ( phys_addr , phys_addr + size ) ;
return NULL ;
}
prot_val = new_prot_val ;
}
2008-03-19 03:00:16 +03:00
switch ( prot_val ) {
case _PAGE_CACHE_UC :
2008-01-30 15:34:06 +03:00
default :
2008-09-08 02:21:13 +04:00
prot = PAGE_KERNEL_IO_NOCACHE ;
2008-01-30 15:34:06 +03:00
break ;
2008-04-26 04:07:22 +04:00
case _PAGE_CACHE_UC_MINUS :
2008-09-08 02:21:13 +04:00
prot = PAGE_KERNEL_IO_UC_MINUS ;
2008-04-26 04:07:22 +04:00
break ;
2008-03-19 03:00:24 +03:00
case _PAGE_CACHE_WC :
2008-09-08 02:21:13 +04:00
prot = PAGE_KERNEL_IO_WC ;
2008-03-19 03:00:24 +03:00
break ;
2008-03-19 03:00:16 +03:00
case _PAGE_CACHE_WB :
2008-09-08 02:21:13 +04:00
prot = PAGE_KERNEL_IO ;
2008-01-30 15:34:06 +03:00
break ;
}
2006-10-01 10:29:17 +04:00
2005-04-17 02:20:36 +04:00
/*
* Ok , go for it . .
*/
2008-04-28 13:12:42 +04:00
area = get_vm_area_caller ( size , VM_IOREMAP , caller ) ;
2005-04-17 02:20:36 +04:00
if ( ! area )
return NULL ;
area - > phys_addr = phys_addr ;
2008-02-04 18:48:05 +03:00
vaddr = ( unsigned long ) area - > addr ;
2009-04-10 01:26:47 +04:00
if ( kernel_map_sync_memtype ( phys_addr , size , prot_val ) ) {
2008-03-19 03:00:17 +03:00
free_memtype ( phys_addr , phys_addr + size ) ;
2008-02-28 16:02:08 +03:00
free_vm_area ( area ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
2008-01-30 15:34:05 +03:00
2009-04-10 01:26:47 +04:00
if ( ioremap_page_range ( vaddr , vaddr + size , phys_addr , prot ) ) {
2008-03-19 03:00:17 +03:00
free_memtype ( phys_addr , phys_addr + size ) ;
2009-04-10 01:26:47 +04:00
free_vm_area ( area ) ;
2008-01-30 15:34:05 +03:00
return NULL ;
}
2008-05-12 23:20:57 +04:00
ret_addr = ( void __iomem * ) ( vaddr + offset ) ;
2008-05-12 23:21:03 +04:00
mmiotrace_ioremap ( unaligned_phys_addr , unaligned_size , ret_addr ) ;
2008-05-12 23:20:57 +04:00
return ret_addr ;
2005-04-17 02:20:36 +04:00
}
/**
* ioremap_nocache - map bus memory into CPU space
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb / readw / readl / writeb /
* writew / writel functions and the other mmio helpers . The returned
* address is not guaranteed to be usable directly as a virtual
2008-01-30 15:34:05 +03:00
* address .
2005-04-17 02:20:36 +04:00
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
2008-01-30 15:34:05 +03:00
* the PCI bus . Note that there are other caches and buffers on many
2005-04-17 02:20:36 +04:00
* busses . In particular driver authors should read up on PCI writes
*
* It ' s useful if some control registers are in such an area and
* write combining or read caching is not desirable :
2008-01-30 15:34:05 +03:00
*
2005-04-17 02:20:36 +04:00
* Must be freed with iounmap .
*/
2008-03-24 21:22:39 +03:00
void __iomem * ioremap_nocache ( resource_size_t phys_addr , unsigned long size )
2005-04-17 02:20:36 +04:00
{
2008-04-26 04:07:22 +04:00
/*
* Ideally , this should be :
2008-06-10 18:06:21 +04:00
* pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS ;
2008-04-26 04:07:22 +04:00
*
* Till we fix all X drivers to use ioremap_wc ( ) , we will use
* UC MINUS .
*/
unsigned long val = _PAGE_CACHE_UC_MINUS ;
return __ioremap_caller ( phys_addr , size , val ,
2008-04-28 13:12:42 +04:00
__builtin_return_address ( 0 ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( ioremap_nocache ) ;
2005-04-17 02:20:36 +04:00
2008-03-19 03:00:24 +03:00
/**
* ioremap_wc - map memory into CPU space write combined
* @ offset : bus address of the memory
* @ size : size of the resource to map
*
* This version of ioremap ensures that the memory is marked write combining .
* Write combining allows faster writes to some hardware devices .
*
* Must be freed with iounmap .
*/
2009-01-10 03:13:13 +03:00
void __iomem * ioremap_wc ( resource_size_t phys_addr , unsigned long size )
2008-03-19 03:00:24 +03:00
{
2008-06-10 18:06:21 +04:00
if ( pat_enabled )
2008-04-28 13:12:42 +04:00
return __ioremap_caller ( phys_addr , size , _PAGE_CACHE_WC ,
__builtin_return_address ( 0 ) ) ;
2008-03-19 03:00:24 +03:00
else
return ioremap_nocache ( phys_addr , size ) ;
}
EXPORT_SYMBOL ( ioremap_wc ) ;
2008-03-24 21:22:39 +03:00
void __iomem * ioremap_cache ( resource_size_t phys_addr , unsigned long size )
2008-01-30 15:34:06 +03:00
{
2008-04-28 13:12:42 +04:00
return __ioremap_caller ( phys_addr , size , _PAGE_CACHE_WB ,
__builtin_return_address ( 0 ) ) ;
2008-01-30 15:34:06 +03:00
}
EXPORT_SYMBOL ( ioremap_cache ) ;
2008-07-10 12:09:59 +04:00
static void __iomem * ioremap_default ( resource_size_t phys_addr ,
unsigned long size )
{
unsigned long flags ;
2008-10-29 08:46:04 +03:00
void __iomem * ret ;
2008-07-10 12:09:59 +04:00
int err ;
/*
* - WB for WB - able memory and no other conflicting mappings
* - UC_MINUS for non - WB - able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
*/
2009-04-10 01:26:51 +04:00
err = reserve_memtype ( phys_addr , phys_addr + size ,
_PAGE_CACHE_WB , & flags ) ;
2008-07-10 12:09:59 +04:00
if ( err < 0 )
return NULL ;
2008-10-29 08:46:04 +03:00
ret = __ioremap_caller ( phys_addr , size , flags ,
__builtin_return_address ( 0 ) ) ;
2008-07-10 12:09:59 +04:00
free_memtype ( phys_addr , phys_addr + size ) ;
2008-10-29 08:46:04 +03:00
return ret ;
2008-07-10 12:09:59 +04:00
}
2008-07-24 08:27:05 +04:00
void __iomem * ioremap_prot ( resource_size_t phys_addr , unsigned long size ,
unsigned long prot_val )
{
return __ioremap_caller ( phys_addr , size , ( prot_val & _PAGE_CACHE_MASK ) ,
__builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( ioremap_prot ) ;
2005-12-13 09:17:09 +03:00
/**
* iounmap - Free a IO remapping
* @ addr : virtual address from ioremap_ *
*
* Caller must ensure there is only one unmapping for the same pointer .
*/
2005-04-17 02:20:36 +04:00
void iounmap ( volatile void __iomem * addr )
{
2005-12-13 09:17:09 +03:00
struct vm_struct * p , * o ;
2005-07-08 04:56:02 +04:00
if ( ( void __force * ) addr < = high_memory )
2005-04-17 02:20:36 +04:00
return ;
/*
* __ioremap special - cases the PCI / ISA range by not instantiating a
* vm_area and by simply returning an address into the kernel mapping
* of ISA space . So handle that here .
*/
2008-05-12 17:43:35 +04:00
if ( ( void __force * ) addr > = phys_to_virt ( ISA_START_ADDRESS ) & &
( void __force * ) addr < phys_to_virt ( ISA_END_ADDRESS ) )
2005-04-17 02:20:36 +04:00
return ;
2008-01-30 15:34:05 +03:00
addr = ( volatile void __iomem * )
( PAGE_MASK & ( unsigned long __force ) addr ) ;
2005-12-13 09:17:09 +03:00
2008-05-12 23:20:57 +04:00
mmiotrace_iounmap ( addr ) ;
2005-12-13 09:17:09 +03:00
/* Use the vm area unlocked, assuming the caller
ensures there isn ' t another iounmap for the same address
in parallel . Reuse of the virtual address is prevented by
leaving it in the global lists until we ' re done with it .
cpa takes care of the direct mappings . */
read_lock ( & vmlist_lock ) ;
for ( p = vmlist ; p ; p = p - > next ) {
2008-05-12 17:43:35 +04:00
if ( p - > addr = = ( void __force * ) addr )
2005-12-13 09:17:09 +03:00
break ;
}
read_unlock ( & vmlist_lock ) ;
if ( ! p ) {
2008-01-30 15:34:05 +03:00
printk ( KERN_ERR " iounmap: bad address %p \n " , addr ) ;
2005-07-08 04:56:02 +04:00
dump_stack ( ) ;
2005-12-13 09:17:09 +03:00
return ;
2005-04-17 02:20:36 +04:00
}
2008-03-19 03:00:17 +03:00
free_memtype ( p - > phys_addr , p - > phys_addr + get_vm_area_size ( p ) ) ;
2005-12-13 09:17:09 +03:00
/* Finally remove it */
2008-05-12 17:43:35 +04:00
o = remove_vm_area ( ( void __force * ) addr ) ;
2005-12-13 09:17:09 +03:00
BUG_ON ( p ! = o | | o = = NULL ) ;
2008-01-30 15:34:05 +03:00
kfree ( p ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( iounmap ) ;
2005-04-17 02:20:36 +04:00
2008-03-19 03:00:15 +03:00
/*
* Convert a physical pointer to a virtual kernel pointer for / dev / mem
* access
*/
void * xlate_dev_mem_ptr ( unsigned long phys )
{
void * addr ;
unsigned long start = phys & PAGE_MASK ;
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
if ( page_is_ram ( start > > PAGE_SHIFT ) )
return __va ( phys ) ;
2008-07-12 09:29:02 +04:00
addr = ( void __force * ) ioremap_default ( start , PAGE_SIZE ) ;
2008-03-19 03:00:15 +03:00
if ( addr )
addr = ( void * ) ( ( unsigned long ) addr | ( phys & ~ PAGE_MASK ) ) ;
return addr ;
}
void unxlate_dev_mem_ptr ( unsigned long phys , void * addr )
{
if ( page_is_ram ( phys > > PAGE_SHIFT ) )
return ;
iounmap ( ( void __iomem * ) ( ( unsigned long ) addr & PAGE_MASK ) ) ;
return ;
}
2008-07-23 16:09:16 +04:00
static int __initdata early_ioremap_debug ;
2008-01-30 15:33:45 +03:00
static int __init early_ioremap_debug_setup ( char * str )
{
early_ioremap_debug = 1 ;
2008-01-30 15:33:45 +03:00
return 0 ;
2008-01-30 15:33:45 +03:00
}
2008-01-30 15:33:45 +03:00
early_param ( " early_ioremap_debug " , early_ioremap_debug_setup ) ;
2008-01-30 15:33:45 +03:00
2008-01-30 15:33:44 +03:00
static __initdata int after_paging_init ;
2009-03-21 03:53:34 +03:00
static pte_t bm_pte [ PAGE_SIZE / sizeof ( pte_t ) ] __page_aligned_bss ;
2008-01-30 15:33:44 +03:00
2008-02-10 01:24:09 +03:00
static inline pmd_t * __init early_ioremap_pmd ( unsigned long addr )
2008-01-30 15:33:44 +03:00
{
2008-02-13 18:20:35 +03:00
/* Don't assume we're using swapper_pg_dir at this point */
pgd_t * base = __va ( read_cr3 ( ) ) ;
pgd_t * pgd = & base [ pgd_index ( addr ) ] ;
2008-02-10 01:24:09 +03:00
pud_t * pud = pud_offset ( pgd , addr ) ;
pmd_t * pmd = pmd_offset ( pud , addr ) ;
return pmd ;
2008-01-30 15:33:44 +03:00
}
2008-02-10 01:24:09 +03:00
static inline pte_t * __init early_ioremap_pte ( unsigned long addr )
2008-01-30 15:33:44 +03:00
{
2008-02-10 01:24:09 +03:00
return & bm_pte [ pte_index ( addr ) ] ;
2008-01-30 15:33:44 +03:00
}
2009-03-07 08:34:19 +03:00
static unsigned long slot_virt [ FIX_BTMAPS_SLOTS ] __initdata ;
2008-01-30 15:33:44 +03:00
void __init early_ioremap_init ( void )
2008-01-30 15:33:44 +03:00
{
2008-02-10 01:24:09 +03:00
pmd_t * pmd ;
2009-03-07 08:34:19 +03:00
int i ;
2008-01-30 15:33:44 +03:00
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug )
2008-01-30 15:34:08 +03:00
printk ( KERN_INFO " early_ioremap_init() \n " ) ;
2008-01-30 15:33:45 +03:00
2009-03-07 08:34:19 +03:00
for ( i = 0 ; i < FIX_BTMAPS_SLOTS ; i + + )
2009-03-25 16:07:11 +03:00
slot_virt [ i ] = __fix_to_virt ( FIX_BTMAP_BEGIN - NR_FIX_BTMAPS * i ) ;
2009-03-07 08:34:19 +03:00
2008-02-10 01:24:09 +03:00
pmd = early_ioremap_pmd ( fix_to_virt ( FIX_BTMAP_BEGIN ) ) ;
2009-03-21 03:53:34 +03:00
memset ( bm_pte , 0 , sizeof ( bm_pte ) ) ;
pmd_populate_kernel ( & init_mm , pmd , bm_pte ) ;
2008-02-10 01:24:09 +03:00
2008-01-30 15:33:49 +03:00
/*
2008-02-10 01:24:09 +03:00
* The boot - ioremap range spans multiple pmds , for which
2008-01-30 15:33:49 +03:00
* we are not prepared :
*/
2008-02-10 01:24:09 +03:00
if ( pmd ! = early_ioremap_pmd ( fix_to_virt ( FIX_BTMAP_END ) ) ) {
2008-01-30 15:33:49 +03:00
WARN_ON ( 1 ) ;
2008-02-10 01:24:09 +03:00
printk ( KERN_WARNING " pmd %p != %p \n " ,
pmd , early_ioremap_pmd ( fix_to_virt ( FIX_BTMAP_END ) ) ) ;
2008-01-30 15:34:05 +03:00
printk ( KERN_WARNING " fix_to_virt(FIX_BTMAP_BEGIN): %08lx \n " ,
2008-02-10 01:24:09 +03:00
fix_to_virt ( FIX_BTMAP_BEGIN ) ) ;
2008-01-30 15:34:05 +03:00
printk ( KERN_WARNING " fix_to_virt(FIX_BTMAP_END): %08lx \n " ,
2008-02-10 01:24:09 +03:00
fix_to_virt ( FIX_BTMAP_END ) ) ;
2008-01-30 15:34:05 +03:00
printk ( KERN_WARNING " FIX_BTMAP_END: %d \n " , FIX_BTMAP_END ) ;
printk ( KERN_WARNING " FIX_BTMAP_BEGIN: %d \n " ,
FIX_BTMAP_BEGIN ) ;
2008-01-30 15:33:49 +03:00
}
2008-01-30 15:33:44 +03:00
}
2008-01-30 15:33:44 +03:00
void __init early_ioremap_reset ( void )
2008-01-30 15:33:44 +03:00
{
after_paging_init = 1 ;
}
2008-01-30 15:33:44 +03:00
static void __init __early_set_fixmap ( enum fixed_addresses idx ,
2009-04-09 21:55:33 +04:00
phys_addr_t phys , pgprot_t flags )
2008-01-30 15:33:44 +03:00
{
2008-02-10 01:24:09 +03:00
unsigned long addr = __fix_to_virt ( idx ) ;
pte_t * pte ;
2008-01-30 15:33:44 +03:00
if ( idx > = __end_of_fixed_addresses ) {
BUG ( ) ;
return ;
}
2008-01-30 15:33:44 +03:00
pte = early_ioremap_pte ( addr ) ;
2008-06-25 08:19:03 +04:00
2008-01-30 15:33:44 +03:00
if ( pgprot_val ( flags ) )
2008-02-10 01:24:09 +03:00
set_pte ( pte , pfn_pte ( phys > > PAGE_SHIFT , flags ) ) ;
2008-01-30 15:33:44 +03:00
else
2008-06-25 08:19:19 +04:00
pte_clear ( & init_mm , addr , pte ) ;
2008-01-30 15:33:44 +03:00
__flush_tlb_one ( addr ) ;
}
2008-01-30 15:33:44 +03:00
static inline void __init early_set_fixmap ( enum fixed_addresses idx ,
2009-04-09 21:55:33 +04:00
phys_addr_t phys , pgprot_t prot )
2008-01-30 15:33:44 +03:00
{
if ( after_paging_init )
2008-09-08 02:21:15 +04:00
__set_fixmap ( idx , phys , prot ) ;
2008-01-30 15:33:44 +03:00
else
2008-09-08 02:21:15 +04:00
__early_set_fixmap ( idx , phys , prot ) ;
2008-01-30 15:33:44 +03:00
}
2008-01-30 15:33:44 +03:00
static inline void __init early_clear_fixmap ( enum fixed_addresses idx )
2008-01-30 15:33:44 +03:00
{
if ( after_paging_init )
clear_fixmap ( idx ) ;
else
2008-01-30 15:33:44 +03:00
__early_set_fixmap ( idx , 0 , __pgprot ( 0 ) ) ;
2008-01-30 15:33:44 +03:00
}
2008-10-29 08:46:04 +03:00
static void __iomem * prev_map [ FIX_BTMAPS_SLOTS ] __initdata ;
2008-09-14 13:33:12 +04:00
static unsigned long prev_size [ FIX_BTMAPS_SLOTS ] __initdata ;
2009-03-07 08:34:19 +03:00
2008-01-30 15:33:47 +03:00
static int __init check_early_ioremap_leak ( void )
{
2008-09-14 13:33:12 +04:00
int count = 0 ;
int i ;
for ( i = 0 ; i < FIX_BTMAPS_SLOTS ; i + + )
if ( prev_map [ i ] )
count + + ;
if ( ! count )
2008-01-30 15:33:47 +03:00
return 0 ;
2008-07-08 20:50:22 +04:00
WARN ( 1 , KERN_WARNING
2008-01-30 15:34:05 +03:00
" Debug warning: early ioremap leak of %d areas detected. \n " ,
2008-09-14 13:33:12 +04:00
count ) ;
2008-01-30 15:33:47 +03:00
printk ( KERN_WARNING
2008-07-08 20:50:22 +04:00
" please boot with early_ioremap_debug and report the dmesg. \n " ) ;
2008-01-30 15:33:47 +03:00
return 1 ;
}
late_initcall ( check_early_ioremap_leak ) ;
2009-03-07 08:34:19 +03:00
static void __init __iomem *
2009-04-09 21:55:33 +04:00
__early_ioremap ( resource_size_t phys_addr , unsigned long size , pgprot_t prot )
2005-04-17 02:20:36 +04:00
{
2009-04-09 21:55:33 +04:00
unsigned long offset ;
resource_size_t last_addr ;
2008-09-14 13:33:12 +04:00
unsigned int nrpages ;
2008-01-30 15:33:45 +03:00
enum fixed_addresses idx0 , idx ;
2008-09-14 13:33:12 +04:00
int i , slot ;
2008-01-30 15:33:45 +03:00
WARN_ON ( system_state ! = SYSTEM_BOOTING ) ;
2008-09-14 13:33:12 +04:00
slot = - 1 ;
for ( i = 0 ; i < FIX_BTMAPS_SLOTS ; i + + ) {
if ( ! prev_map [ i ] ) {
slot = i ;
break ;
}
}
if ( slot < 0 ) {
2009-04-09 21:55:33 +04:00
printk ( KERN_INFO " early_iomap(%08llx, %08lx) not found slot \n " ,
( u64 ) phys_addr , size ) ;
2008-09-14 13:33:12 +04:00
WARN_ON ( 1 ) ;
return NULL ;
}
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug ) {
2009-04-09 21:55:33 +04:00
printk ( KERN_INFO " early_ioremap(%08llx, %08lx) [%d] => " ,
( u64 ) phys_addr , size , slot ) ;
2008-01-30 15:33:45 +03:00
dump_stack ( ) ;
}
2005-04-17 02:20:36 +04:00
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
2008-01-30 15:33:45 +03:00
if ( ! size | | last_addr < phys_addr ) {
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
2008-01-30 15:33:45 +03:00
}
2005-04-17 02:20:36 +04:00
2008-09-14 13:33:12 +04:00
prev_size [ slot ] = size ;
2005-04-17 02:20:36 +04:00
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
2008-10-10 13:46:45 +04:00
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
2005-04-17 02:20:36 +04:00
/*
* Mappings have to fit in the FIX_BTMAP area .
*/
nrpages = size > > PAGE_SHIFT ;
2008-01-30 15:33:45 +03:00
if ( nrpages > NR_FIX_BTMAPS ) {
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
2008-01-30 15:33:45 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Ok , go for it . .
*/
2008-09-14 13:33:12 +04:00
idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS * slot ;
2008-01-30 15:33:45 +03:00
idx = idx0 ;
2005-04-17 02:20:36 +04:00
while ( nrpages > 0 ) {
2008-09-08 02:21:15 +04:00
early_set_fixmap ( idx , phys_addr , prot ) ;
2005-04-17 02:20:36 +04:00
phys_addr + = PAGE_SIZE ;
- - idx ;
- - nrpages ;
}
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug )
2009-03-07 08:34:19 +03:00
printk ( KERN_CONT " %08lx + %08lx \n " , offset , slot_virt [ slot ] ) ;
2008-01-30 15:33:45 +03:00
2009-03-07 08:34:19 +03:00
prev_map [ slot ] = ( void __iomem * ) ( offset + slot_virt [ slot ] ) ;
2008-09-14 13:33:12 +04:00
return prev_map [ slot ] ;
2005-04-17 02:20:36 +04:00
}
2008-09-08 02:21:15 +04:00
/* Remap an IO device */
2009-04-09 21:55:33 +04:00
void __init __iomem *
early_ioremap ( resource_size_t phys_addr , unsigned long size )
2008-09-08 02:21:15 +04:00
{
return __early_ioremap ( phys_addr , size , PAGE_KERNEL_IO ) ;
}
/* Remap memory */
2009-04-09 21:55:33 +04:00
void __init __iomem *
early_memremap ( resource_size_t phys_addr , unsigned long size )
2008-09-08 02:21:15 +04:00
{
return __early_ioremap ( phys_addr , size , PAGE_KERNEL ) ;
}
2008-10-29 08:46:04 +03:00
void __init early_iounmap ( void __iomem * addr , unsigned long size )
2005-04-17 02:20:36 +04:00
{
unsigned long virt_addr ;
unsigned long offset ;
unsigned int nrpages ;
enum fixed_addresses idx ;
2008-09-14 13:33:12 +04:00
int i , slot ;
slot = - 1 ;
for ( i = 0 ; i < FIX_BTMAPS_SLOTS ; i + + ) {
if ( prev_map [ i ] = = addr ) {
slot = i ;
break ;
}
}
2008-01-30 15:33:45 +03:00
2008-09-14 13:33:12 +04:00
if ( slot < 0 ) {
printk ( KERN_INFO " early_iounmap(%p, %08lx) not found slot \n " ,
addr , size ) ;
WARN_ON ( 1 ) ;
return ;
}
if ( prev_size [ slot ] ! = size ) {
printk ( KERN_INFO " early_iounmap(%p, %08lx) [%d] size not consistent %08lx \n " ,
addr , size , slot , prev_size [ slot ] ) ;
WARN_ON ( 1 ) ;
2008-05-27 11:56:49 +04:00
return ;
2008-09-14 13:33:12 +04:00
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:33:45 +03:00
if ( early_ioremap_debug ) {
2008-01-30 15:34:08 +03:00
printk ( KERN_INFO " early_iounmap(%p, %08lx) [%d] \n " , addr ,
2008-09-14 13:33:12 +04:00
size , slot ) ;
2008-01-30 15:33:45 +03:00
dump_stack ( ) ;
}
2005-04-17 02:20:36 +04:00
virt_addr = ( unsigned long ) addr ;
2008-01-30 15:33:45 +03:00
if ( virt_addr < fix_to_virt ( FIX_BTMAP_BEGIN ) ) {
WARN_ON ( 1 ) ;
2005-04-17 02:20:36 +04:00
return ;
2008-01-30 15:33:45 +03:00
}
2005-04-17 02:20:36 +04:00
offset = virt_addr & ~ PAGE_MASK ;
nrpages = PAGE_ALIGN ( offset + size - 1 ) > > PAGE_SHIFT ;
2008-09-14 13:33:12 +04:00
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS * slot ;
2005-04-17 02:20:36 +04:00
while ( nrpages > 0 ) {
2008-01-30 15:33:44 +03:00
early_clear_fixmap ( idx ) ;
2005-04-17 02:20:36 +04:00
- - idx ;
- - nrpages ;
}
2008-10-29 08:46:04 +03:00
prev_map [ slot ] = NULL ;
2005-04-17 02:20:36 +04:00
}