2018-09-05 14:25:12 +08:00
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
# include <linux/module.h>
# include <linux/highmem.h>
# include <linux/smp.h>
2018-10-30 15:07:44 -07:00
# include <linux/memblock.h>
2018-09-05 14:25:12 +08:00
# include <asm/fixmap.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
static pte_t * kmap_pte ;
unsigned long highstart_pfn , highend_pfn ;
void * kmap ( struct page * page )
{
void * addr ;
might_sleep ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
addr = kmap_high ( page ) ;
flush_tlb_one ( ( unsigned long ) addr ) ;
return addr ;
}
EXPORT_SYMBOL ( kmap ) ;
void kunmap ( struct page * page )
{
2020-06-04 16:47:22 -07:00
might_sleep ( ) ;
2018-09-05 14:25:12 +08:00
if ( ! PageHighMem ( page ) )
return ;
kunmap_high ( page ) ;
}
EXPORT_SYMBOL ( kunmap ) ;
void * kmap_atomic ( struct page * page )
{
unsigned long vaddr ;
int idx , type ;
preempt_disable ( ) ;
pagefault_disable ( ) ;
if ( ! PageHighMem ( page ) )
return page_address ( page ) ;
type = kmap_atomic_idx_push ( ) ;
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON ( ! pte_none ( * ( kmap_pte - idx ) ) ) ;
# endif
set_pte ( kmap_pte - idx , mk_pte ( page , PAGE_KERNEL ) ) ;
flush_tlb_one ( ( unsigned long ) vaddr ) ;
return ( void * ) vaddr ;
}
EXPORT_SYMBOL ( kmap_atomic ) ;
void __kunmap_atomic ( void * kvaddr )
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
int idx ;
if ( vaddr < FIXADDR_START )
goto out ;
# ifdef CONFIG_DEBUG_HIGHMEM
idx = KM_TYPE_NR * smp_processor_id ( ) + kmap_atomic_idx ( ) ;
BUG_ON ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
pte_clear ( & init_mm , vaddr , kmap_pte - idx ) ;
flush_tlb_one ( vaddr ) ;
# else
( void ) idx ; /* to kill a warning */
# endif
kmap_atomic_idx_pop ( ) ;
out :
pagefault_enable ( ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( __kunmap_atomic ) ;
/*
* This is the same as kmap_atomic ( ) but can map memory that doesn ' t
* have a struct page associated with it .
*/
void * kmap_atomic_pfn ( unsigned long pfn )
{
unsigned long vaddr ;
int idx , type ;
pagefault_disable ( ) ;
type = kmap_atomic_idx_push ( ) ;
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
set_pte ( kmap_pte - idx , pfn_pte ( pfn , PAGE_KERNEL ) ) ;
flush_tlb_one ( vaddr ) ;
return ( void * ) vaddr ;
}
struct page * kmap_atomic_to_page ( void * ptr )
{
unsigned long idx , vaddr = ( unsigned long ) ptr ;
pte_t * pte ;
if ( vaddr < FIXADDR_START )
return virt_to_page ( ptr ) ;
idx = virt_to_fix ( vaddr ) ;
pte = kmap_pte - ( idx - FIX_KMAP_BEGIN ) ;
return pte_page ( * pte ) ;
}
2019-12-01 22:34:19 +08:00
static void __init kmap_pages_init ( void )
2018-09-05 14:25:12 +08:00
{
unsigned long vaddr ;
pgd_t * pgd ;
pmd_t * pmd ;
pud_t * pud ;
pte_t * pte ;
2019-12-01 22:34:19 +08:00
2018-09-05 14:25:12 +08:00
vaddr = PKMAP_BASE ;
2019-12-01 22:34:19 +08:00
fixrange_init ( vaddr , vaddr + PAGE_SIZE * LAST_PKMAP , swapper_pg_dir ) ;
2018-09-05 14:25:12 +08:00
pgd = swapper_pg_dir + __pgd_offset ( vaddr ) ;
pud = ( pud_t * ) pgd ;
pmd = pmd_offset ( pud , vaddr ) ;
pte = pte_offset_kernel ( pmd , vaddr ) ;
pkmap_page_table = pte ;
}
void __init kmap_init ( void )
{
unsigned long vaddr ;
2019-12-01 22:34:19 +08:00
kmap_pages_init ( ) ;
2018-09-05 14:25:12 +08:00
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN ) ;
kmap_pte = pte_offset_kernel ( ( pmd_t * ) pgd_offset_k ( vaddr ) , vaddr ) ;
}