2014-02-04 02:17:09 +04:00
/*
* High memory support for Xtensa architecture
*
* This file is subject to the terms and conditions of the GNU General
* Public License . See the file " COPYING " in the main directory of
* this archive for more details .
*
* Copyright ( C ) 2014 Cadence Design Systems Inc .
*/
# include <linux/export.h>
# include <linux/highmem.h>
# include <asm/tlbflush.h>
static pte_t * kmap_pte ;
2014-07-17 05:04:49 +04:00
# if DCACHE_WAY_SIZE > PAGE_SIZE
unsigned int last_pkmap_nr_arr [ DCACHE_N_COLORS ] ;
wait_queue_head_t pkmap_map_wait_arr [ DCACHE_N_COLORS ] ;
static void __init kmap_waitqueues_init ( void )
{
unsigned int i ;
for ( i = 0 ; i < ARRAY_SIZE ( pkmap_map_wait_arr ) ; + + i )
init_waitqueue_head ( pkmap_map_wait_arr + i ) ;
}
# else
static inline void kmap_waitqueues_init ( void )
{
}
# endif
2014-07-15 02:51:49 +04:00
static inline enum fixed_addresses kmap_idx ( int type , unsigned long color )
{
return ( type + KM_TYPE_NR * smp_processor_id ( ) ) * DCACHE_N_COLORS +
color ;
}
2014-02-04 02:17:09 +04:00
void * kmap_atomic ( struct page * page )
{
enum fixed_addresses idx ;
unsigned long vaddr ;
2015-05-11 17:52:09 +02:00
preempt_disable ( ) ;
2014-02-04 02:17:09 +04:00
pagefault_disable ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
2014-07-15 02:51:49 +04:00
idx = kmap_idx ( kmap_atomic_idx_push ( ) ,
DCACHE_ALIAS ( page_to_phys ( page ) ) ) ;
2014-02-04 02:17:09 +04:00
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
2014-07-15 02:27:50 +04:00
BUG_ON ( ! pte_none ( * ( kmap_pte + idx ) ) ) ;
2014-02-04 02:17:09 +04:00
# endif
2014-07-15 02:27:50 +04:00
set_pte ( kmap_pte + idx , mk_pte ( page , PAGE_KERNEL_EXEC ) ) ;
2014-02-04 02:17:09 +04:00
return ( void * ) vaddr ;
}
EXPORT_SYMBOL ( kmap_atomic ) ;
void __kunmap_atomic ( void * kvaddr )
{
if ( kvaddr > = ( void * ) FIXADDR_START & &
kvaddr < ( void * ) FIXADDR_TOP ) {
2014-07-15 02:51:49 +04:00
int idx = kmap_idx ( kmap_atomic_idx ( ) ,
DCACHE_ALIAS ( ( unsigned long ) kvaddr ) ) ;
2014-02-04 02:17:09 +04:00
/*
* Force other mappings to Oops if they ' ll try to access this
* pte without first remap it . Keeping stale mappings around
* is a bad idea also , in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor .
*/
2014-07-15 02:27:50 +04:00
pte_clear ( & init_mm , kvaddr , kmap_pte + idx ) ;
2014-02-04 02:17:09 +04:00
local_flush_tlb_kernel_range ( ( unsigned long ) kvaddr ,
( unsigned long ) kvaddr + PAGE_SIZE ) ;
kmap_atomic_idx_pop ( ) ;
}
pagefault_enable ( ) ;
2015-05-11 17:52:09 +02:00
preempt_enable ( ) ;
2014-02-04 02:17:09 +04:00
}
EXPORT_SYMBOL ( __kunmap_atomic ) ;
void __init kmap_init ( void )
{
unsigned long kmap_vstart ;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt ( FIX_KMAP_BEGIN ) ;
kmap_pte = kmap_get_fixmap_pte ( kmap_vstart ) ;
2014-07-17 05:04:49 +04:00
kmap_waitqueues_init ( ) ;
2014-02-04 02:17:09 +04:00
}