2019-05-19 13:08:55 +01:00
// SPDX-License-Identifier: GPL-2.0-only
2005-04-16 15:20:36 -07:00
# include <linux/highmem.h>
2016-07-13 20:18:55 -04:00
# include <linux/export.h>
2009-03-03 14:10:12 +02:00
# include <linux/swap.h> /* for totalram_pages */
2018-10-30 15:09:49 -07:00
# include <linux/memblock.h>
2005-04-16 15:20:36 -07:00
2020-06-04 16:47:38 -07:00
void * kmap_atomic_high_prot ( struct page * page , pgprot_t prot )
2005-04-16 15:20:36 -07:00
{
unsigned long vaddr ;
2010-10-26 14:21:51 -07:00
int idx , type ;
2008-01-30 13:30:47 +01:00
2010-10-26 14:21:51 -07:00
type = kmap_atomic_idx_push ( ) ;
2007-09-11 15:24:10 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
2005-04-16 15:20:36 -07:00
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
2007-09-11 15:24:10 -07:00
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
2007-05-02 19:27:15 +02:00
set_pte ( kmap_pte - idx , mk_pte ( page , prot ) ) ;
2011-11-15 14:49:09 -08:00
arch_flush_lazy_mmu_mode ( ) ;
2005-04-16 15:20:36 -07:00
2007-09-11 15:24:10 -07:00
return ( void * ) vaddr ;
2005-04-16 15:20:36 -07:00
}
2020-06-04 16:47:38 -07:00
EXPORT_SYMBOL ( kmap_atomic_high_prot ) ;
2010-10-26 14:21:51 -07:00
/*
* This is the same as kmap_atomic ( ) but can map memory that doesn ' t
* have a struct page associated with it .
*/
void * kmap_atomic_pfn ( unsigned long pfn )
2007-05-02 19:27:15 +02:00
{
2010-10-26 14:21:51 -07:00
return kmap_atomic_prot_pfn ( pfn , kmap_prot ) ;
2007-05-02 19:27:15 +02:00
}
2010-10-26 14:21:51 -07:00
EXPORT_SYMBOL_GPL ( kmap_atomic_pfn ) ;
2007-05-02 19:27:15 +02:00
2020-06-04 16:47:46 -07:00
void kunmap_atomic_high ( void * kvaddr )
2005-04-16 15:20:36 -07:00
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
2010-10-26 14:21:51 -07:00
if ( vaddr > = __fix_to_virt ( FIX_KMAP_END ) & &
vaddr < = __fix_to_virt ( FIX_KMAP_BEGIN ) ) {
int idx , type ;
2010-10-27 15:32:58 -07:00
type = kmap_atomic_idx ( ) ;
2010-10-26 14:21:51 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
# endif
/*
* Force other mappings to Oops if they ' ll try to access this
* pte without first remap it . Keeping stale mappings around
* is a bad idea also , in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor .
*/
2006-12-06 20:32:22 -08:00
kpte_clear_flush ( kmap_pte - idx , vaddr ) ;
2010-10-27 15:32:58 -07:00
kmap_atomic_idx_pop ( ) ;
2011-11-15 14:49:09 -08:00
arch_flush_lazy_mmu_mode ( ) ;
2010-10-26 14:21:51 -07:00
}
2006-12-06 20:32:22 -08:00
# ifdef CONFIG_DEBUG_HIGHMEM
2010-10-26 14:21:51 -07:00
else {
2006-12-06 20:32:22 -08:00
BUG_ON ( vaddr < PAGE_OFFSET ) ;
BUG_ON ( vaddr > = ( unsigned long ) high_memory ) ;
}
2010-10-26 14:21:51 -07:00
# endif
2005-04-16 15:20:36 -07:00
}
2020-06-04 16:47:46 -07:00
EXPORT_SYMBOL ( kunmap_atomic_high ) ;
2005-06-25 14:58:19 -07:00
2009-03-03 14:10:12 +02:00
void __init set_highmem_pages_init ( void )
{
struct zone * zone ;
int nid ;
2013-07-03 15:03:11 -07:00
/*
* Explicitly reset zone - > managed_pages because set_highmem_pages_init ( )
2018-10-30 15:09:30 -07:00
* is invoked before memblock_free_all ( )
2013-07-03 15:03:11 -07:00
*/
reset_all_zones_managed_pages ( ) ;
2009-03-03 14:10:12 +02:00
for_each_zone ( zone ) {
unsigned long zone_start_pfn , zone_end_pfn ;
if ( ! is_highmem ( zone ) )
continue ;
zone_start_pfn = zone - > zone_start_pfn ;
zone_end_pfn = zone_start_pfn + zone - > spanned_pages ;
nid = zone_to_nid ( zone ) ;
printk ( KERN_INFO " Initializing %s for node %d (%08lx:%08lx) \n " ,
zone - > name , nid , zone_start_pfn , zone_end_pfn ) ;
add_highpages_with_active_regions ( nid , zone_start_pfn ,
zone_end_pfn ) ;
}
}