2005-04-16 15:20:36 -07:00
/*
* highmem . c : virtual kernel memory mappings for high memory
*
* Provides kernel - static versions of atomic kmap functions originally
* found as inlines in include / asm - sparc / highmem . h . These became
* needed as kmap_atomic ( ) and kunmap_atomic ( ) started getting
* called from within modules .
* - - Tomas Szepe < szepe @ pinerecords . com > , September 2002
*
* But kmap_atomic ( ) and kunmap_atomic ( ) cannot be inlined in
* modules because they are loaded with btfixup - ped functions .
*/
/*
* The use of kmap_atomic / kunmap_atomic is discouraged - kmap / kunmap
* gives a more generic ( and caching ) interface . But kmap_atomic can
* be used in IRQ contexts , so in some ( very limited ) cases we need it .
*
* XXX This is an old text . Actually , it ' s good to use atomic kmaps ,
* provided you remember that they are atomic and not try to sleep
* with a kmap taken , much like a spinlock . Non - atomic kmaps are
* shared by CPUs , and so precious , and establishing them requires IPI .
* Atomic kmaps are lightweight and we may have NCPUS more of them .
*/
# include <linux/mm.h>
# include <linux/highmem.h>
2011-07-18 15:57:46 -04:00
# include <linux/export.h>
2005-04-16 15:20:36 -07:00
# include <asm/pgalloc.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
# include <asm/fixmap.h>
2010-10-26 14:21:51 -07:00
void * __kmap_atomic ( struct page * page )
2005-04-16 15:20:36 -07:00
{
unsigned long vaddr ;
2010-10-26 14:21:51 -07:00
long idx , type ;
2005-04-16 15:20:36 -07:00
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
2006-12-06 20:32:20 -08:00
pagefault_disable ( ) ;
2005-04-16 15:20:36 -07:00
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
2010-10-26 14:21:51 -07:00
type = kmap_atomic_idx_push ( ) ;
2005-04-16 15:20:36 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
/* XXX Fix - Anton */
#if 0
__flush_cache_one ( vaddr ) ;
# else
flush_cache_all ( ) ;
# endif
# ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
# endif
set_pte ( kmap_pte - idx , mk_pte ( page , kmap_prot ) ) ;
/* XXX Fix - Anton */
#if 0
__flush_tlb_one ( vaddr ) ;
# else
flush_tlb_all ( ) ;
# endif
return ( void * ) vaddr ;
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL ( __kmap_atomic ) ;
2005-04-16 15:20:36 -07:00
2010-10-26 14:21:51 -07:00
void __kunmap_atomic ( void * kvaddr )
2005-04-16 15:20:36 -07:00
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
2010-10-26 14:21:51 -07:00
int type ;
2005-04-16 15:20:36 -07:00
if ( vaddr < FIXADDR_START ) { // FIXME
2006-12-06 20:32:20 -08:00
pagefault_enable ( ) ;
2005-04-16 15:20:36 -07:00
return ;
}
2010-10-27 15:32:58 -07:00
type = kmap_atomic_idx ( ) ;
2005-04-16 15:20:36 -07:00
2010-10-26 14:21:51 -07:00
# ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned long idx ;
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
BUG_ON ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
/* XXX Fix - Anton */
2005-04-16 15:20:36 -07:00
#if 0
2010-10-26 14:21:51 -07:00
__flush_cache_one ( vaddr ) ;
2005-04-16 15:20:36 -07:00
# else
2010-10-26 14:21:51 -07:00
flush_cache_all ( ) ;
2005-04-16 15:20:36 -07:00
# endif
2010-10-26 14:21:51 -07:00
/*
* force other mappings to Oops if they ' ll try to access
* this pte without first remap it
*/
pte_clear ( & init_mm , vaddr , kmap_pte - idx ) ;
/* XXX Fix - Anton */
2005-04-16 15:20:36 -07:00
#if 0
2010-10-26 14:21:51 -07:00
__flush_tlb_one ( vaddr ) ;
2005-04-16 15:20:36 -07:00
# else
2010-10-26 14:21:51 -07:00
flush_tlb_all ( ) ;
2005-04-16 15:20:36 -07:00
# endif
2010-10-26 14:21:51 -07:00
}
2005-04-16 15:20:36 -07:00
# endif
2010-10-27 15:32:58 -07:00
kmap_atomic_idx_pop ( ) ;
2006-12-06 20:32:20 -08:00
pagefault_enable ( ) ;
2005-04-16 15:20:36 -07:00
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL ( __kunmap_atomic ) ;
2005-04-16 15:20:36 -07:00
/* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page * kmap_atomic_to_page ( void * ptr )
{
unsigned long idx , vaddr = ( unsigned long ) ptr ;
pte_t * pte ;
if ( vaddr < SRMMU_NOCACHE_VADDR )
return virt_to_page ( ptr ) ;
if ( vaddr < PKMAP_BASE )
return pfn_to_page ( __nocache_pa ( vaddr ) > > PAGE_SHIFT ) ;
BUG_ON ( vaddr < FIXADDR_START ) ;
BUG_ON ( vaddr > FIXADDR_TOP ) ;
idx = virt_to_fix ( vaddr ) ;
pte = kmap_pte - ( idx - FIX_KMAP_BEGIN ) ;
return pte_page ( * pte ) ;
}