2005-04-16 15:20:36 -07:00
# include <linux/highmem.h>
2016-07-13 20:18:55 -04:00
# include <linux/export.h>
2009-03-03 14:10:12 +02:00
# include <linux/swap.h> /* for totalram_pages */
2013-07-03 15:03:11 -07:00
# include <linux/bootmem.h>
2005-04-16 15:20:36 -07:00
void * kmap ( struct page * page )
{
might_sleep ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
return kmap_high ( page ) ;
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL ( kmap ) ;
2005-04-16 15:20:36 -07:00
void kunmap ( struct page * page )
{
if ( in_interrupt ( ) )
BUG ( ) ;
if ( ! PageHighMem ( page ) )
return ;
kunmap_high ( page ) ;
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL ( kunmap ) ;
2005-04-16 15:20:36 -07:00
/*
* kmap_atomic / kunmap_atomic is significantly faster than kmap / kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps .
*
2009-06-29 12:02:55 +08:00
* However when holding an atomic kmap it is not legal to sleep , so atomic
2005-04-16 15:20:36 -07:00
* kmaps are appropriate for short , tight code paths only .
*/
2010-10-26 14:21:51 -07:00
void * kmap_atomic_prot ( struct page * page , pgprot_t prot )
2005-04-16 15:20:36 -07:00
{
unsigned long vaddr ;
2010-10-26 14:21:51 -07:00
int idx , type ;
2008-01-30 13:30:47 +01:00
2015-05-11 17:52:09 +02:00
preempt_disable ( ) ;
2006-12-06 20:32:20 -08:00
pagefault_disable ( ) ;
2007-02-10 01:46:36 -08:00
2005-04-16 15:20:36 -07:00
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
2010-10-26 14:21:51 -07:00
type = kmap_atomic_idx_push ( ) ;
2007-09-11 15:24:10 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
2005-04-16 15:20:36 -07:00
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
2007-09-11 15:24:10 -07:00
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
2007-05-02 19:27:15 +02:00
set_pte ( kmap_pte - idx , mk_pte ( page , prot ) ) ;
2011-11-15 14:49:09 -08:00
arch_flush_lazy_mmu_mode ( ) ;
2005-04-16 15:20:36 -07:00
2007-09-11 15:24:10 -07:00
return ( void * ) vaddr ;
2005-04-16 15:20:36 -07:00
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL ( kmap_atomic_prot ) ;
2011-11-26 10:53:39 +08:00
void * kmap_atomic ( struct page * page )
2010-10-26 14:21:51 -07:00
{
return kmap_atomic_prot ( page , kmap_prot ) ;
}
2011-11-26 10:53:39 +08:00
EXPORT_SYMBOL ( kmap_atomic ) ;
2005-04-16 15:20:36 -07:00
2010-10-26 14:21:51 -07:00
/*
* This is the same as kmap_atomic ( ) but can map memory that doesn ' t
* have a struct page associated with it .
*/
void * kmap_atomic_pfn ( unsigned long pfn )
2007-05-02 19:27:15 +02:00
{
2010-10-26 14:21:51 -07:00
return kmap_atomic_prot_pfn ( pfn , kmap_prot ) ;
2007-05-02 19:27:15 +02:00
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL_GPL ( kmap_atomic_pfn ) ;
2007-05-02 19:27:15 +02:00
2010-10-26 14:21:51 -07:00
void __kunmap_atomic ( void * kvaddr )
2005-04-16 15:20:36 -07:00
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
2010-10-26 14:21:51 -07:00
if ( vaddr > = __fix_to_virt ( FIX_KMAP_END ) & &
vaddr < = __fix_to_virt ( FIX_KMAP_BEGIN ) ) {
int idx , type ;
2010-10-27 15:32:58 -07:00
type = kmap_atomic_idx ( ) ;
2010-10-26 14:21:51 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
# endif
/*
* Force other mappings to Oops if they ' ll try to access this
* pte without first remap it . Keeping stale mappings around
* is a bad idea also , in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor .
*/
2006-12-06 20:32:22 -08:00
kpte_clear_flush ( kmap_pte - idx , vaddr ) ;
2010-10-27 15:32:58 -07:00
kmap_atomic_idx_pop ( ) ;
2011-11-15 14:49:09 -08:00
arch_flush_lazy_mmu_mode ( ) ;
2010-10-26 14:21:51 -07:00
}
2006-12-06 20:32:22 -08:00
# ifdef CONFIG_DEBUG_HIGHMEM
2010-10-26 14:21:51 -07:00
else {
2006-12-06 20:32:22 -08:00
BUG_ON ( vaddr < PAGE_OFFSET ) ;
BUG_ON ( vaddr > = ( unsigned long ) high_memory ) ;
}
2010-10-26 14:21:51 -07:00
# endif
2005-04-16 15:20:36 -07:00
2006-12-06 20:32:20 -08:00
pagefault_enable ( ) ;
2015-05-11 17:52:09 +02:00
preempt_enable ( ) ;
2005-04-16 15:20:36 -07:00
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL ( __kunmap_atomic ) ;
2005-06-25 14:58:19 -07:00
2009-03-03 14:10:12 +02:00
void __init set_highmem_pages_init ( void )
{
struct zone * zone ;
int nid ;
2013-07-03 15:03:11 -07:00
/*
* Explicitly reset zone - > managed_pages because set_highmem_pages_init ( )
2018-10-30 15:09:30 -07:00
* is invoked before memblock_free_all ( )
2013-07-03 15:03:11 -07:00
*/
reset_all_zones_managed_pages ( ) ;
2009-03-03 14:10:12 +02:00
for_each_zone ( zone ) {
unsigned long zone_start_pfn , zone_end_pfn ;
if ( ! is_highmem ( zone ) )
continue ;
zone_start_pfn = zone - > zone_start_pfn ;
zone_end_pfn = zone_start_pfn + zone - > spanned_pages ;
nid = zone_to_nid ( zone ) ;
printk ( KERN_INFO " Initializing %s for node %d (%08lx:%08lx) \n " ,
zone - > name , nid , zone_start_pfn , zone_end_pfn ) ;
add_highpages_with_active_regions ( nid , zone_start_pfn ,
zone_end_pfn ) ;
}
}