2012-03-05 15:49:29 +04:00
/*
* Based on arch / arm / mm / ioremap . c
*
* ( C ) Copyright 1995 1996 Linus Torvalds
* Hacked for ARM by Phil Blundell < philb @ gnu . org >
* Hacked to allow all architectures to build , and various cleanups
* by Russell King
* Copyright ( C ) 2012 ARM Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/export.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
# include <linux/io.h>
2014-04-08 02:39:52 +04:00
# include <asm/fixmap.h>
# include <asm/tlbflush.h>
# include <asm/pgalloc.h>
2012-03-05 15:49:29 +04:00
static void __iomem * __ioremap_caller ( phys_addr_t phys_addr , size_t size ,
pgprot_t prot , void * caller )
{
unsigned long last_addr ;
unsigned long offset = phys_addr & ~ PAGE_MASK ;
int err ;
unsigned long addr ;
struct vm_struct * area ;
/*
* Page align the mapping address and size , taking account of any
* offset .
*/
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( size + offset ) ;
/*
* Don ' t allow wraparound , zero size or outside PHYS_MASK .
*/
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr | | ( last_addr & ~ PHYS_MASK ) )
return NULL ;
/*
* Don ' t allow RAM to be mapped .
*/
if ( WARN_ON ( pfn_valid ( __phys_to_pfn ( phys_addr ) ) ) )
return NULL ;
area = get_vm_area_caller ( size , VM_IOREMAP , caller ) ;
if ( ! area )
return NULL ;
addr = ( unsigned long ) area - > addr ;
err = ioremap_page_range ( addr , addr + size , phys_addr , prot ) ;
if ( err ) {
vunmap ( ( void * ) addr ) ;
return NULL ;
}
return ( void __iomem * ) ( offset + addr ) ;
}
void __iomem * __ioremap ( phys_addr_t phys_addr , size_t size , pgprot_t prot )
{
return __ioremap_caller ( phys_addr , size , prot ,
__builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( __ioremap ) ;
void __iounmap ( volatile void __iomem * io_addr )
{
2013-10-24 18:54:17 +04:00
unsigned long addr = ( unsigned long ) io_addr & PAGE_MASK ;
2012-03-05 15:49:29 +04:00
2013-10-24 18:54:17 +04:00
/*
* We could get an address outside vmalloc range in case
* of ioremap_cache ( ) reusing a RAM mapping .
*/
if ( VMALLOC_START < = addr & & addr < VMALLOC_END )
vunmap ( ( void * ) addr ) ;
2012-03-05 15:49:29 +04:00
}
EXPORT_SYMBOL ( __iounmap ) ;
2013-10-24 18:54:17 +04:00
void __iomem * ioremap_cache ( phys_addr_t phys_addr , size_t size )
{
/* For normal memory we already have a cacheable mapping. */
if ( pfn_valid ( __phys_to_pfn ( phys_addr ) ) )
return ( void __iomem * ) __phys_to_virt ( phys_addr ) ;
return __ioremap_caller ( phys_addr , size , __pgprot ( PROT_NORMAL ) ,
__builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( ioremap_cache ) ;
2014-04-08 02:39:52 +04:00
static pte_t bm_pte [ PTRS_PER_PTE ] __page_aligned_bss ;
2014-07-15 18:37:21 +04:00
# if CONFIG_ARM64_PGTABLE_LEVELS > 2
2014-07-16 15:07:17 +04:00
static pte_t bm_pmd [ PTRS_PER_PMD ] __page_aligned_bss ;
2014-04-08 02:39:52 +04:00
# endif
2014-07-15 18:37:21 +04:00
# if CONFIG_ARM64_PGTABLE_LEVELS > 3
2014-05-12 13:40:51 +04:00
static pte_t bm_pud [ PTRS_PER_PUD ] __page_aligned_bss ;
# endif
2014-04-08 02:39:52 +04:00
2014-07-16 15:07:17 +04:00
static inline pud_t * __init early_ioremap_pud ( unsigned long addr )
2014-04-08 02:39:52 +04:00
{
pgd_t * pgd ;
pgd = pgd_offset_k ( addr ) ;
BUG_ON ( pgd_none ( * pgd ) | | pgd_bad ( * pgd ) ) ;
2014-07-16 15:07:17 +04:00
return pud_offset ( pgd , addr ) ;
}
static inline pmd_t * __init early_ioremap_pmd ( unsigned long addr )
{
pud_t * pud = early_ioremap_pud ( addr ) ;
2014-04-08 02:39:52 +04:00
BUG_ON ( pud_none ( * pud ) | | pud_bad ( * pud ) ) ;
return pmd_offset ( pud , addr ) ;
}
static inline pte_t * __init early_ioremap_pte ( unsigned long addr )
{
pmd_t * pmd = early_ioremap_pmd ( addr ) ;
BUG_ON ( pmd_none ( * pmd ) | | pmd_bad ( * pmd ) ) ;
return pte_offset_kernel ( pmd , addr ) ;
}
void __init early_ioremap_init ( void )
{
2014-07-16 15:07:17 +04:00
pgd_t * pgd ;
pud_t * pud ;
2014-04-08 02:39:52 +04:00
pmd_t * pmd ;
2014-07-16 15:07:17 +04:00
unsigned long addr = fix_to_virt ( FIX_BTMAP_BEGIN ) ;
2014-04-08 02:39:52 +04:00
2014-07-16 15:07:17 +04:00
pgd = pgd_offset_k ( addr ) ;
2014-05-12 13:40:51 +04:00
pgd_populate ( & init_mm , pgd , bm_pud ) ;
2014-07-16 15:07:17 +04:00
pud = pud_offset ( pgd , addr ) ;
pud_populate ( & init_mm , pud , bm_pmd ) ;
pmd = pmd_offset ( pud , addr ) ;
2014-04-08 02:39:52 +04:00
pmd_populate_kernel ( & init_mm , pmd , bm_pte ) ;
2014-07-16 15:07:17 +04:00
2014-04-08 02:39:52 +04:00
/*
* The boot - ioremap range spans multiple pmds , for which
* we are not prepared :
*/
BUILD_BUG_ON ( ( __fix_to_virt ( FIX_BTMAP_BEGIN ) > > PMD_SHIFT )
! = ( __fix_to_virt ( FIX_BTMAP_END ) > > PMD_SHIFT ) ) ;
if ( pmd ! = early_ioremap_pmd ( fix_to_virt ( FIX_BTMAP_END ) ) ) {
WARN_ON ( 1 ) ;
pr_warn ( " pmd %p != %p \n " ,
pmd , early_ioremap_pmd ( fix_to_virt ( FIX_BTMAP_END ) ) ) ;
pr_warn ( " fix_to_virt(FIX_BTMAP_BEGIN): %08lx \n " ,
fix_to_virt ( FIX_BTMAP_BEGIN ) ) ;
pr_warn ( " fix_to_virt(FIX_BTMAP_END): %08lx \n " ,
fix_to_virt ( FIX_BTMAP_END ) ) ;
pr_warn ( " FIX_BTMAP_END: %d \n " , FIX_BTMAP_END ) ;
pr_warn ( " FIX_BTMAP_BEGIN: %d \n " ,
FIX_BTMAP_BEGIN ) ;
}
early_ioremap_setup ( ) ;
}
void __init __early_set_fixmap ( enum fixed_addresses idx ,
phys_addr_t phys , pgprot_t flags )
{
unsigned long addr = __fix_to_virt ( idx ) ;
pte_t * pte ;
if ( idx > = __end_of_fixed_addresses ) {
BUG ( ) ;
return ;
}
pte = early_ioremap_pte ( addr ) ;
if ( pgprot_val ( flags ) )
set_pte ( pte , pfn_pte ( phys > > PAGE_SHIFT , flags ) ) ;
else {
pte_clear ( & init_mm , addr , pte ) ;
flush_tlb_kernel_range ( addr , addr + PAGE_SIZE ) ;
}
}