2005-04-16 15:20:36 -07:00
/*
* highmem . c : virtual kernel memory mappings for high memory
*
* Provides kernel - static versions of atomic kmap functions originally
* found as inlines in include / asm - sparc / highmem . h . These became
* needed as kmap_atomic ( ) and kunmap_atomic ( ) started getting
* called from within modules .
* - - Tomas Szepe < szepe @ pinerecords . com > , September 2002
*
* But kmap_atomic ( ) and kunmap_atomic ( ) cannot be inlined in
* modules because they are loaded with btfixup - ped functions .
*/
/*
* The use of kmap_atomic / kunmap_atomic is discouraged - kmap / kunmap
* gives a more generic ( and caching ) interface . But kmap_atomic can
* be used in IRQ contexts , so in some ( very limited ) cases we need it .
*
* XXX This is an old text . Actually , it ' s good to use atomic kmaps ,
* provided you remember that they are atomic and not try to sleep
* with a kmap taken , much like a spinlock . Non - atomic kmaps are
* shared by CPUs , and so precious , and establishing them requires IPI .
* Atomic kmaps are lightweight and we may have NCPUS more of them .
*/
# include <linux/mm.h>
# include <linux/highmem.h>
# include <asm/pgalloc.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
# include <asm/fixmap.h>
void * kmap_atomic ( struct page * page , enum km_type type )
{
unsigned long idx ;
unsigned long vaddr ;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
2006-12-06 20:32:20 -08:00
pagefault_disable ( ) ;
2005-04-16 15:20:36 -07:00
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
/* XXX Fix - Anton */
#if 0
__flush_cache_one ( vaddr ) ;
# else
flush_cache_all ( ) ;
# endif
# ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
# endif
set_pte ( kmap_pte - idx , mk_pte ( page , kmap_prot ) ) ;
/* XXX Fix - Anton */
#if 0
__flush_tlb_one ( vaddr ) ;
# else
flush_tlb_all ( ) ;
# endif
return ( void * ) vaddr ;
}
void kunmap_atomic ( void * kvaddr , enum km_type type )
{
# ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
unsigned long idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
if ( vaddr < FIXADDR_START ) { // FIXME
2006-12-06 20:32:20 -08:00
pagefault_enable ( ) ;
2005-04-16 15:20:36 -07:00
return ;
}
BUG_ON ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
/* XXX Fix - Anton */
#if 0
__flush_cache_one ( vaddr ) ;
# else
flush_cache_all ( ) ;
# endif
/*
* force other mappings to Oops if they ' ll try to access
* this pte without first remap it
*/
pte_clear ( & init_mm , vaddr , kmap_pte - idx ) ;
/* XXX Fix - Anton */
#if 0
__flush_tlb_one ( vaddr ) ;
# else
flush_tlb_all ( ) ;
# endif
# endif
2006-12-06 20:32:20 -08:00
pagefault_enable ( ) ;
2005-04-16 15:20:36 -07:00
}
/* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page * kmap_atomic_to_page ( void * ptr )
{
unsigned long idx , vaddr = ( unsigned long ) ptr ;
pte_t * pte ;
if ( vaddr < SRMMU_NOCACHE_VADDR )
return virt_to_page ( ptr ) ;
if ( vaddr < PKMAP_BASE )
return pfn_to_page ( __nocache_pa ( vaddr ) > > PAGE_SHIFT ) ;
BUG_ON ( vaddr < FIXADDR_START ) ;
BUG_ON ( vaddr > FIXADDR_TOP ) ;
idx = virt_to_fix ( vaddr ) ;
pte = kmap_pte - ( idx - FIX_KMAP_BEGIN ) ;
return pte_page ( * pte ) ;
}