2005-04-17 02:20:36 +04:00
# include <linux/highmem.h>
2005-06-23 11:08:33 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
void * kmap ( struct page * page )
{
might_sleep ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
return kmap_high ( page ) ;
}
void kunmap ( struct page * page )
{
if ( in_interrupt ( ) )
BUG ( ) ;
if ( ! PageHighMem ( page ) )
return ;
kunmap_high ( page ) ;
}
/*
* kmap_atomic / kunmap_atomic is significantly faster than kmap / kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps .
*
* However when holding an atomic kmap is is not legal to sleep , so atomic
* kmaps are appropriate for short , tight code paths only .
*/
void * kmap_atomic ( struct page * page , enum km_type type )
{
enum fixed_addresses idx ;
unsigned long vaddr ;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
2006-12-07 07:32:20 +03:00
pagefault_disable ( ) ;
2007-02-10 12:46:36 +03:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
set_pte ( kmap_pte - idx , mk_pte ( page , kmap_prot ) ) ;
return ( void * ) vaddr ;
}
void kunmap_atomic ( void * kvaddr , enum km_type type )
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
/*
2006-10-01 10:29:35 +04:00
* Force other mappings to Oops if they ' ll try to access this pte
* without first remap it . Keeping stale mappings around is a bad idea
* also , in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor .
2005-04-17 02:20:36 +04:00
*/
2006-12-07 07:32:22 +03:00
if ( vaddr = = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) )
kpte_clear_flush ( kmap_pte - idx , vaddr ) ;
else {
# ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON ( vaddr < PAGE_OFFSET ) ;
BUG_ON ( vaddr > = ( unsigned long ) high_memory ) ;
# endif
}
2005-04-17 02:20:36 +04:00
2006-12-07 07:32:20 +03:00
pagefault_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-26 01:58:19 +04:00
/* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it .
*/
void * kmap_atomic_pfn ( unsigned long pfn , enum km_type type )
{
enum fixed_addresses idx ;
unsigned long vaddr ;
2006-12-07 07:32:20 +03:00
pagefault_disable ( ) ;
2005-06-26 01:58:19 +04:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
set_pte ( kmap_pte - idx , pfn_pte ( pfn , kmap_prot ) ) ;
return ( void * ) vaddr ;
}
2005-04-17 02:20:36 +04:00
struct page * kmap_atomic_to_page ( void * ptr )
{
unsigned long idx , vaddr = ( unsigned long ) ptr ;
pte_t * pte ;
if ( vaddr < FIXADDR_START )
return virt_to_page ( ptr ) ;
idx = virt_to_fix ( vaddr ) ;
pte = kmap_pte - ( idx - FIX_KMAP_BEGIN ) ;
return pte_page ( * pte ) ;
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( kmap ) ;
EXPORT_SYMBOL ( kunmap ) ;
EXPORT_SYMBOL ( kmap_atomic ) ;
EXPORT_SYMBOL ( kunmap_atomic ) ;
EXPORT_SYMBOL ( kmap_atomic_to_page ) ;