2009-03-04 16:21:31 +01:00
/*
* xtensa mmu stuff
*
* Extracted from init . c
*/
2014-02-04 02:17:09 +04:00
# include <linux/bootmem.h>
2009-03-04 16:21:31 +01:00
# include <linux/percpu.h>
# include <linux/init.h>
# include <linux/string.h>
# include <linux/slab.h>
# include <linux/cache.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
# include <asm/mmu_context.h>
# include <asm/page.h>
2013-12-29 11:03:30 +02:00
# include <asm/initialize_mmu.h>
# include <asm/io.h>
2009-03-04 16:21:31 +01:00
2014-02-04 02:17:09 +04:00
# if defined(CONFIG_HIGHMEM)
2014-07-15 02:49:15 +04:00
static void * __init init_pmd ( unsigned long vaddr , unsigned long n_pages )
2014-02-04 02:17:09 +04:00
{
pgd_t * pgd = pgd_offset_k ( vaddr ) ;
pmd_t * pmd = pmd_offset ( pgd , vaddr ) ;
2014-07-15 02:49:15 +04:00
pte_t * pte ;
unsigned long i ;
2014-02-04 02:17:09 +04:00
2014-07-15 02:49:15 +04:00
n_pages = ALIGN ( n_pages , PTRS_PER_PTE ) ;
2014-02-04 02:17:09 +04:00
2014-07-15 02:49:15 +04:00
pr_debug ( " %s: vaddr: 0x%08lx, n_pages: %ld \n " ,
__func__ , vaddr , n_pages ) ;
2014-02-04 02:17:09 +04:00
2014-07-15 02:49:15 +04:00
pte = alloc_bootmem_low_pages ( n_pages * sizeof ( pte_t ) ) ;
for ( i = 0 ; i < n_pages ; + + i )
pte_clear ( NULL , 0 , pte + i ) ;
for ( i = 0 ; i < n_pages ; i + = PTRS_PER_PTE , + + pmd ) {
pte_t * cur_pte = pte + i ;
BUG_ON ( ! pmd_none ( * pmd ) ) ;
set_pmd ( pmd , __pmd ( ( ( unsigned long ) cur_pte ) & PAGE_MASK ) ) ;
BUG_ON ( cur_pte ! = pte_offset_kernel ( pmd , 0 ) ) ;
pr_debug ( " %s: pmd: 0x%p, pte: 0x%p \n " ,
__func__ , pmd , cur_pte ) ;
2014-02-04 02:17:09 +04:00
}
2014-07-15 02:49:15 +04:00
return pte ;
2014-02-04 02:17:09 +04:00
}
static void __init fixedrange_init ( void )
{
2014-07-15 02:49:15 +04:00
init_pmd ( __fix_to_virt ( 0 ) , __end_of_fixed_addresses ) ;
2014-02-04 02:17:09 +04:00
}
# endif
2009-03-04 16:21:31 +01:00
void __init paging_init ( void )
{
memset ( swapper_pg_dir , 0 , PAGE_SIZE ) ;
2014-02-04 02:17:09 +04:00
# ifdef CONFIG_HIGHMEM
fixedrange_init ( ) ;
2014-07-15 02:49:15 +04:00
pkmap_page_table = init_pmd ( PKMAP_BASE , LAST_PKMAP ) ;
2014-02-04 02:17:09 +04:00
kmap_init ( ) ;
# endif
2009-03-04 16:21:31 +01:00
}
/*
* Flush the mmu and reset associated register to default values .
*/
2013-10-17 02:42:26 +04:00
void init_mmu ( void )
2009-03-04 16:21:31 +01:00
{
2012-12-03 15:01:43 +04:00
# if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
/*
* Writing zeros to the instruction and data TLBCFG special
* registers ensure that valid values exist in the register .
*
* For existing PGSZID < w > fields , zero selects the first element
* of the page - size array . For nonexistent PGSZID < w > fields ,
* zero is the best value to write . Also , when changing PGSZID < w >
2009-03-04 16:21:31 +01:00
* fields , the corresponding TLB must be flushed .
*/
set_itlbcfg_register ( 0 ) ;
set_dtlbcfg_register ( 0 ) ;
2012-12-03 15:01:43 +04:00
# endif
2014-01-16 03:38:58 +04:00
# if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
2013-12-29 11:03:30 +02:00
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
*/
write_dtlb_entry ( __pte ( xtensa_kio_paddr + CA_WRITEBACK ) ,
XCHAL_KIO_CACHED_VADDR + 6 ) ;
write_itlb_entry ( __pte ( xtensa_kio_paddr + CA_WRITEBACK ) ,
XCHAL_KIO_CACHED_VADDR + 6 ) ;
write_dtlb_entry ( __pte ( xtensa_kio_paddr + CA_BYPASS ) ,
XCHAL_KIO_BYPASS_VADDR + 6 ) ;
write_itlb_entry ( __pte ( xtensa_kio_paddr + CA_BYPASS ) ,
XCHAL_KIO_BYPASS_VADDR + 6 ) ;
# endif
2013-10-17 02:42:26 +04:00
local_flush_tlb_all ( ) ;
2009-03-04 16:21:31 +01:00
/* Set rasid register to a known value. */
2012-12-11 01:26:24 +04:00
set_rasid_register ( ASID_INSERT ( ASID_USER_FIRST ) ) ;
2009-03-04 16:21:31 +01:00
/* Set PTEVADDR special register to the start of the page
* table , which is in kernel mappable space ( ie . not
* statically mapped ) . This register ' s value is undefined on
* reset .
*/
set_ptevaddr_register ( PGTABLE_START ) ;
}