2019-08-20 17:07:14 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
# include <linux/io.h>
2019-08-20 17:07:18 +03:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
2019-08-20 17:07:14 +03:00
# include <asm/io-workarounds.h>
2019-08-20 17:07:16 +03:00
unsigned long ioremap_bot ;
EXPORT_SYMBOL ( ioremap_bot ) ;
2019-08-20 17:07:14 +03:00
void __iomem * ioremap ( phys_addr_t addr , unsigned long size )
{
pgprot_t prot = pgprot_noncached ( PAGE_KERNEL ) ;
void * caller = __builtin_return_address ( 0 ) ;
if ( iowa_is_active ( ) )
return iowa_ioremap ( addr , size , prot , caller ) ;
return __ioremap_caller ( addr , size , prot , caller ) ;
}
EXPORT_SYMBOL ( ioremap ) ;
void __iomem * ioremap_wc ( phys_addr_t addr , unsigned long size )
{
pgprot_t prot = pgprot_noncached_wc ( PAGE_KERNEL ) ;
void * caller = __builtin_return_address ( 0 ) ;
if ( iowa_is_active ( ) )
return iowa_ioremap ( addr , size , prot , caller ) ;
return __ioremap_caller ( addr , size , prot , caller ) ;
}
EXPORT_SYMBOL ( ioremap_wc ) ;
void __iomem * ioremap_coherent ( phys_addr_t addr , unsigned long size )
{
pgprot_t prot = pgprot_cached ( PAGE_KERNEL ) ;
void * caller = __builtin_return_address ( 0 ) ;
if ( iowa_is_active ( ) )
return iowa_ioremap ( addr , size , prot , caller ) ;
return __ioremap_caller ( addr , size , prot , caller ) ;
}
2019-08-20 17:07:15 +03:00
void __iomem * ioremap_prot ( phys_addr_t addr , unsigned long size , unsigned long flags )
{
pte_t pte = __pte ( flags ) ;
void * caller = __builtin_return_address ( 0 ) ;
/* writeable implies dirty for kernel addresses */
if ( pte_write ( pte ) )
pte = pte_mkdirty ( pte ) ;
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
pte = pte_exprotect ( pte ) ;
pte = pte_mkprivileged ( pte ) ;
if ( iowa_is_active ( ) )
return iowa_ioremap ( addr , size , pte_pgprot ( pte ) , caller ) ;
return __ioremap_caller ( addr , size , pte_pgprot ( pte ) , caller ) ;
}
EXPORT_SYMBOL ( ioremap_prot ) ;
2019-08-20 17:07:18 +03:00
int ioremap_range ( unsigned long ea , phys_addr_t pa , unsigned long size , pgprot_t prot )
{
unsigned long i ;
if ( slab_is_available ( ) ) {
int err = ioremap_page_range ( ea , ea + size , pa , prot ) ;
if ( err )
unmap_kernel_range ( ea , size ) ;
return err ;
}
for ( i = 0 ; i < size ; i + = PAGE_SIZE ) {
int err = map_kernel_page ( ea + i , pa + i , prot ) ;
if ( WARN_ON_ONCE ( err ) ) /* Should clean up */
return err ;
}
return 0 ;
}
2019-08-20 17:07:19 +03:00
void __iomem * do_ioremap ( phys_addr_t pa , phys_addr_t offset , unsigned long size ,
pgprot_t prot , void * caller )
{
struct vm_struct * area ;
int ret ;
area = __get_vm_area_caller ( size , VM_IOREMAP , IOREMAP_START , IOREMAP_END , caller ) ;
if ( area = = NULL )
return NULL ;
area - > phys_addr = pa ;
ret = ioremap_range ( ( unsigned long ) area - > addr , pa , size , prot ) ;
if ( ! ret )
return ( void __iomem * ) area - > addr + offset ;
free_vm_area ( area ) ;
return NULL ;
}