2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <linux/highmem.h>
2010-02-20 15:23:22 +03:00
# include <linux/sched.h>
2009-06-19 17:05:26 +04:00
# include <linux/smp.h>
2009-04-25 13:25:34 +04:00
# include <asm/fixmap.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
2009-04-25 13:25:34 +04:00
static pte_t * kmap_pte ;
unsigned long highstart_pfn , highend_pfn ;
2005-04-17 02:20:36 +04:00
void * __kmap ( struct page * page )
{
void * addr ;
might_sleep ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
addr = kmap_high ( page ) ;
flush_tlb_one ( ( unsigned long ) addr ) ;
return addr ;
}
2009-04-25 13:25:34 +04:00
EXPORT_SYMBOL ( __kmap ) ;
2005-04-17 02:20:36 +04:00
void __kunmap ( struct page * page )
{
2009-03-30 16:49:44 +04:00
BUG_ON ( in_interrupt ( ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! PageHighMem ( page ) )
return ;
kunmap_high ( page ) ;
}
2009-04-25 13:25:34 +04:00
EXPORT_SYMBOL ( __kunmap ) ;
2005-04-17 02:20:36 +04:00
/*
* kmap_atomic / kunmap_atomic is significantly faster than kmap / kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps .
*
* However when holding an atomic kmap is is not legal to sleep , so atomic
* kmaps are appropriate for short , tight code paths only .
*/
void * __kmap_atomic ( struct page * page , enum km_type type )
{
enum fixed_addresses idx ;
unsigned long vaddr ;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
2006-12-07 07:32:20 +03:00
pagefault_disable ( ) ;
2005-04-17 02:20:36 +04:00
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
2009-04-01 02:23:25 +04:00
debug_kmap_atomic ( type ) ;
2005-04-17 02:20:36 +04:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
2009-03-30 16:49:44 +04:00
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
2005-04-17 02:20:36 +04:00
# endif
2009-04-25 13:25:34 +04:00
set_pte ( kmap_pte - idx , mk_pte ( page , PAGE_KERNEL ) ) ;
2005-04-17 02:20:36 +04:00
local_flush_tlb_one ( ( unsigned long ) vaddr ) ;
return ( void * ) vaddr ;
}
2009-04-25 13:25:34 +04:00
EXPORT_SYMBOL ( __kmap_atomic ) ;
2005-04-17 02:20:36 +04:00
void __kunmap_atomic ( void * kvaddr , enum km_type type )
{
# ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
if ( vaddr < FIXADDR_START ) { // FIXME
2006-12-07 07:32:20 +03:00
pagefault_enable ( ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2009-03-30 16:49:44 +04:00
BUG_ON ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
2005-04-17 02:20:36 +04:00
/*
* force other mappings to Oops if they ' ll try to access
* this pte without first remap it
*/
pte_clear ( & init_mm , vaddr , kmap_pte - idx ) ;
local_flush_tlb_one ( vaddr ) ;
# endif
2006-12-07 07:32:20 +03:00
pagefault_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
2009-04-25 13:25:34 +04:00
EXPORT_SYMBOL ( __kunmap_atomic ) ;
2005-04-17 02:20:36 +04:00
2005-07-12 00:45:51 +04:00
/*
* This is the same as kmap_atomic ( ) but can map memory that doesn ' t
* have a struct page associated with it .
*/
void * kmap_atomic_pfn ( unsigned long pfn , enum km_type type )
{
enum fixed_addresses idx ;
unsigned long vaddr ;
2006-12-07 07:32:20 +03:00
pagefault_disable ( ) ;
2005-07-12 00:45:51 +04:00
2009-04-01 02:23:25 +04:00
debug_kmap_atomic ( type ) ;
2005-07-12 00:45:51 +04:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
2009-04-25 13:25:34 +04:00
set_pte ( kmap_pte - idx , pfn_pte ( pfn , PAGE_KERNEL ) ) ;
2005-07-12 00:45:51 +04:00
flush_tlb_one ( vaddr ) ;
return ( void * ) vaddr ;
}
2005-04-17 02:20:36 +04:00
struct page * __kmap_atomic_to_page ( void * ptr )
{
unsigned long idx , vaddr = ( unsigned long ) ptr ;
pte_t * pte ;
if ( vaddr < FIXADDR_START )
return virt_to_page ( ptr ) ;
idx = virt_to_fix ( vaddr ) ;
pte = kmap_pte - ( idx - FIX_KMAP_BEGIN ) ;
return pte_page ( * pte ) ;
}
2009-04-25 13:25:34 +04:00
void __init kmap_init ( void )
{
unsigned long kmap_vstart ;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt ( FIX_KMAP_BEGIN ) ;
kmap_pte = kmap_get_fixmap_pte ( kmap_vstart ) ;
}