2013-01-18 15:12:20 +05:30
/*
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/vmalloc.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/io.h>
# include <linux/mm.h>
# include <linux/slab.h>
2013-03-06 16:53:44 +05:30
# include <linux/cache.h>
2013-01-18 15:12:20 +05:30
2015-10-24 19:31:16 +05:30
static inline bool arc_uncached_addr_space ( phys_addr_t paddr )
{
if ( is_isa_arcompact ( ) ) {
if ( paddr > = ARC_UNCACHED_ADDR_SPACE )
return true ;
2016-08-26 15:41:29 -07:00
} else if ( paddr > = perip_base & & paddr < = perip_end ) {
2015-10-24 19:31:16 +05:30
return true ;
}
return false ;
}
2016-03-16 15:04:39 +05:30
void __iomem * ioremap ( phys_addr_t paddr , unsigned long size )
2013-01-18 15:12:20 +05:30
{
2016-03-16 15:04:39 +05:30
phys_addr_t end ;
2013-01-18 15:12:20 +05:30
/* Don't allow wraparound or zero size */
end = paddr + size - 1 ;
if ( ! size | | ( end < paddr ) )
return NULL ;
2016-03-16 15:04:39 +05:30
/*
* If the region is h / w uncached , MMU mapping can be elided as optim
* The cast to u32 is fine as this region can only be inside 4 GB
*/
2015-10-24 19:31:16 +05:30
if ( arc_uncached_addr_space ( paddr ) )
2016-03-16 15:04:39 +05:30
return ( void __iomem * ) ( u32 ) paddr ;
2013-01-18 15:12:20 +05:30
2013-01-22 16:48:45 +05:30
return ioremap_prot ( paddr , size , PAGE_KERNEL_NO_CACHE ) ;
}
EXPORT_SYMBOL ( ioremap ) ;
/*
* ioremap with access flags
* Cache semantics wise it is same as ioremap - " forced " uncached .
2016-06-29 20:43:58 +03:00
* However unlike vanilla ioremap which bypasses ARC MMU for addresses in
2013-01-22 16:48:45 +05:30
* ARC hardware uncached region , this one still goes thru the MMU as caller
* might need finer access control ( R / W / X )
*/
void __iomem * ioremap_prot ( phys_addr_t paddr , unsigned long size ,
unsigned long flags )
{
2016-03-16 15:04:39 +05:30
unsigned long vaddr ;
2013-01-22 16:48:45 +05:30
struct vm_struct * area ;
2016-03-16 15:04:39 +05:30
phys_addr_t off , end ;
2013-01-22 16:48:45 +05:30
pgprot_t prot = __pgprot ( flags ) ;
/* Don't allow wraparound, zero size */
end = paddr + size - 1 ;
if ( ( ! size ) | | ( end < paddr ) )
return NULL ;
2013-01-18 15:12:20 +05:30
/* An early platform driver might end up here */
if ( ! slab_is_available ( ) )
return NULL ;
2013-01-22 16:48:45 +05:30
/* force uncached */
prot = pgprot_noncached ( prot ) ;
/* Mappings have to be page-aligned */
2013-01-18 15:12:20 +05:30
off = paddr & ~ PAGE_MASK ;
paddr & = PAGE_MASK ;
size = PAGE_ALIGN ( end + 1 ) - paddr ;
/*
* Ok , go for it . .
*/
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
area - > phys_addr = paddr ;
2016-03-16 15:04:39 +05:30
vaddr = ( unsigned long ) area - > addr ;
if ( ioremap_page_range ( vaddr , vaddr + size , paddr , prot ) ) {
2013-01-22 16:48:45 +05:30
vunmap ( ( void __force * ) vaddr ) ;
2013-01-18 15:12:20 +05:30
return NULL ;
}
return ( void __iomem * ) ( off + ( char __iomem * ) vaddr ) ;
}
2013-01-22 16:48:45 +05:30
EXPORT_SYMBOL ( ioremap_prot ) ;
2013-01-18 15:12:20 +05:30
void iounmap ( const void __iomem * addr )
{
2015-10-24 19:31:16 +05:30
/* weird double cast to handle phys_addr_t > 32 bits */
if ( arc_uncached_addr_space ( ( phys_addr_t ) ( u32 ) addr ) )
2013-01-18 15:12:20 +05:30
return ;
vfree ( ( void * ) ( PAGE_MASK & ( unsigned long __force ) addr ) ) ;
}
EXPORT_SYMBOL ( iounmap ) ;