2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2012-03-05 11:49:29 +00:00
/*
* Based on arch / arm / mm / ioremap . c
*
* ( C ) Copyright 1995 1996 Linus Torvalds
* Hacked for ARM by Phil Blundell < philb @ gnu . org >
* Hacked to allow all architectures to build , and various cleanups
* by Russell King
* Copyright ( C ) 2012 ARM Ltd .
*/
# include <linux/export.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
# include <linux/io.h>
2014-04-07 15:39:52 -07:00
# include <asm/fixmap.h>
# include <asm/tlbflush.h>
2012-03-05 11:49:29 +00:00
static void __iomem * __ioremap_caller ( phys_addr_t phys_addr , size_t size ,
pgprot_t prot , void * caller )
{
unsigned long last_addr ;
unsigned long offset = phys_addr & ~ PAGE_MASK ;
int err ;
unsigned long addr ;
struct vm_struct * area ;
/*
* Page align the mapping address and size , taking account of any
* offset .
*/
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( size + offset ) ;
/*
* Don ' t allow wraparound , zero size or outside PHYS_MASK .
*/
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr | | ( last_addr & ~ PHYS_MASK ) )
return NULL ;
/*
* Don ' t allow RAM to be mapped .
*/
if ( WARN_ON ( pfn_valid ( __phys_to_pfn ( phys_addr ) ) ) )
return NULL ;
area = get_vm_area_caller ( size , VM_IOREMAP , caller ) ;
if ( ! area )
return NULL ;
addr = ( unsigned long ) area - > addr ;
2014-12-26 16:52:10 +00:00
area - > phys_addr = phys_addr ;
2012-03-05 11:49:29 +00:00
err = ioremap_page_range ( addr , addr + size , phys_addr , prot ) ;
if ( err ) {
vunmap ( ( void * ) addr ) ;
return NULL ;
}
return ( void __iomem * ) ( offset + addr ) ;
}
void __iomem * __ioremap ( phys_addr_t phys_addr , size_t size , pgprot_t prot )
{
return __ioremap_caller ( phys_addr , size , prot ,
__builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( __ioremap ) ;
2019-09-02 10:06:23 +02:00
void iounmap ( volatile void __iomem * io_addr )
2012-03-05 11:49:29 +00:00
{
2013-10-24 15:54:17 +01:00
unsigned long addr = ( unsigned long ) io_addr & PAGE_MASK ;
2012-03-05 11:49:29 +00:00
2013-10-24 15:54:17 +01:00
/*
* We could get an address outside vmalloc range in case
* of ioremap_cache ( ) reusing a RAM mapping .
*/
2017-02-09 19:45:34 +08:00
if ( is_vmalloc_addr ( ( void * ) addr ) )
2013-10-24 15:54:17 +01:00
vunmap ( ( void * ) addr ) ;
2012-03-05 11:49:29 +00:00
}
2019-09-02 10:06:23 +02:00
EXPORT_SYMBOL ( iounmap ) ;
2013-10-24 15:54:17 +01:00
void __iomem * ioremap_cache ( phys_addr_t phys_addr , size_t size )
{
/* For normal memory we already have a cacheable mapping. */
if ( pfn_valid ( __phys_to_pfn ( phys_addr ) ) )
return ( void __iomem * ) __phys_to_virt ( phys_addr ) ;
return __ioremap_caller ( phys_addr , size , __pgprot ( PROT_NORMAL ) ,
__builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( ioremap_cache ) ;
2014-04-07 15:39:52 -07:00
2014-11-21 21:50:42 +00:00
/*
* Must be called after early_fixmap_init
*/
2014-04-07 15:39:52 -07:00
void __init early_ioremap_init ( void )
{
early_ioremap_setup ( ) ;
}