2005-04-16 15:20:36 -07:00
# include <linux/highmem.h>
2005-06-23 00:08:33 -07:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
void * kmap ( struct page * page )
{
might_sleep ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
return kmap_high ( page ) ;
}
void kunmap ( struct page * page )
{
if ( in_interrupt ( ) )
BUG ( ) ;
if ( ! PageHighMem ( page ) )
return ;
kunmap_high ( page ) ;
}
/*
* kmap_atomic / kunmap_atomic is significantly faster than kmap / kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps .
*
* However when holding an atomic kmap is is not legal to sleep , so atomic
* kmaps are appropriate for short , tight code paths only .
*/
2007-05-02 19:27:15 +02:00
void * kmap_atomic_prot ( struct page * page , enum km_type type , pgprot_t prot )
2005-04-16 15:20:36 -07:00
{
enum fixed_addresses idx ;
unsigned long vaddr ;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
2006-12-06 20:32:20 -08:00
pagefault_disable ( ) ;
2007-02-10 01:46:36 -08:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
2005-04-16 15:20:36 -07:00
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
2007-05-02 19:27:15 +02:00
set_pte ( kmap_pte - idx , mk_pte ( page , prot ) ) ;
2007-04-08 16:04:01 -07:00
arch_flush_lazy_mmu_mode ( ) ;
2005-04-16 15:20:36 -07:00
return ( void * ) vaddr ;
}
2007-05-02 19:27:15 +02:00
void * kmap_atomic ( struct page * page , enum km_type type )
{
return kmap_atomic_prot ( page , type , kmap_prot ) ;
}
2005-04-16 15:20:36 -07:00
void kunmap_atomic ( void * kvaddr , enum km_type type )
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
/*
2006-09-30 23:29:35 -07:00
* Force other mappings to Oops if they ' ll try to access this pte
* without first remap it . Keeping stale mappings around is a bad idea
* also , in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor .
2005-04-16 15:20:36 -07:00
*/
2006-12-06 20:32:22 -08:00
if ( vaddr = = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) )
kpte_clear_flush ( kmap_pte - idx , vaddr ) ;
else {
# ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON ( vaddr < PAGE_OFFSET ) ;
BUG_ON ( vaddr > = ( unsigned long ) high_memory ) ;
# endif
}
2005-04-16 15:20:36 -07:00
2007-05-02 19:27:15 +02:00
arch_flush_lazy_mmu_mode ( ) ;
2006-12-06 20:32:20 -08:00
pagefault_enable ( ) ;
2005-04-16 15:20:36 -07:00
}
2005-06-25 14:58:19 -07:00
/* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it .
*/
void * kmap_atomic_pfn ( unsigned long pfn , enum km_type type )
{
enum fixed_addresses idx ;
unsigned long vaddr ;
2006-12-06 20:32:20 -08:00
pagefault_disable ( ) ;
2005-06-25 14:58:19 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
set_pte ( kmap_pte - idx , pfn_pte ( pfn , kmap_prot ) ) ;
2007-04-08 16:04:01 -07:00
arch_flush_lazy_mmu_mode ( ) ;
2005-06-25 14:58:19 -07:00
return ( void * ) vaddr ;
}
2005-04-16 15:20:36 -07:00
struct page * kmap_atomic_to_page ( void * ptr )
{
unsigned long idx , vaddr = ( unsigned long ) ptr ;
pte_t * pte ;
if ( vaddr < FIXADDR_START )
return virt_to_page ( ptr ) ;
idx = virt_to_fix ( vaddr ) ;
pte = kmap_pte - ( idx - FIX_KMAP_BEGIN ) ;
return pte_page ( * pte ) ;
}
2005-06-23 00:08:33 -07:00
EXPORT_SYMBOL ( kmap ) ;
EXPORT_SYMBOL ( kunmap ) ;
EXPORT_SYMBOL ( kmap_atomic ) ;
EXPORT_SYMBOL ( kunmap_atomic ) ;
EXPORT_SYMBOL ( kmap_atomic_to_page ) ;