2006-09-27 10:08:07 +04:00
/*
* arch / sh / mm / pmb . c
*
* Privileged Space Mapping Buffer ( PMB ) Support .
*
2010-01-18 13:33:10 +03:00
* Copyright ( C ) 2005 - 2010 Paul Mundt
* Copyright ( C ) 2010 Matt Fleming
2006-09-27 10:08:07 +04:00
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/init.h>
# include <linux/kernel.h>
2009-03-11 13:39:02 +03:00
# include <linux/sysdev.h>
# include <linux/cpu.h>
2006-09-27 10:08:07 +04:00
# include <linux/module.h>
# include <linux/bitops.h>
# include <linux/debugfs.h>
# include <linux/fs.h>
# include <linux/seq_file.h>
# include <linux/err.h>
2010-02-17 09:33:30 +03:00
# include <linux/io.h>
2010-02-17 15:17:02 +03:00
# include <linux/spinlock.h>
2010-02-23 10:20:53 +03:00
# include <linux/vmalloc.h>
2010-03-04 10:44:20 +03:00
# include <asm/cacheflush.h>
2010-02-17 09:33:30 +03:00
# include <asm/sizes.h>
2006-09-27 10:08:07 +04:00
# include <asm/system.h>
# include <asm/uaccess.h>
2006-09-27 10:16:42 +04:00
# include <asm/pgtable.h>
2010-02-17 07:23:00 +03:00
# include <asm/page.h>
2006-09-27 10:08:07 +04:00
# include <asm/mmu.h>
2007-11-26 15:32:40 +03:00
# include <asm/mmu_context.h>
2006-09-27 10:08:07 +04:00
2010-02-17 15:17:02 +03:00
struct pmb_entry ;
struct pmb_entry {
unsigned long vpn ;
unsigned long ppn ;
unsigned long flags ;
unsigned long size ;
2010-10-13 22:49:15 +04:00
raw_spinlock_t lock ;
2010-02-17 15:17:02 +03:00
/*
* 0 . . NR_PMB_ENTRIES for specific entry selection , or
* PMB_NO_ENTRY to search for a free one
*/
int entry ;
/* Adjacent entry link for contiguous multi-entry mappings */
struct pmb_entry * link ;
} ;
2010-02-23 10:20:53 +03:00
static struct {
unsigned long size ;
int flag ;
} pmb_sizes [ ] = {
{ . size = SZ_512M , . flag = PMB_SZ_512M , } ,
{ . size = SZ_128M , . flag = PMB_SZ_128M , } ,
{ . size = SZ_64M , . flag = PMB_SZ_64M , } ,
{ . size = SZ_16M , . flag = PMB_SZ_16M , } ,
} ;
2010-02-18 12:13:51 +03:00
static void pmb_unmap_entry ( struct pmb_entry * , int depth ) ;
2009-10-07 01:22:22 +04:00
2010-02-17 15:17:02 +03:00
static DEFINE_RWLOCK ( pmb_rwlock ) ;
2009-10-07 01:22:29 +04:00
static struct pmb_entry pmb_entry_list [ NR_PMB_ENTRIES ] ;
2010-02-17 09:33:30 +03:00
static DECLARE_BITMAP ( pmb_map , NR_PMB_ENTRIES ) ;
2006-09-27 10:08:07 +04:00
2010-03-02 10:49:50 +03:00
static unsigned int pmb_iomapping_enabled ;
2010-02-17 09:33:30 +03:00
static __always_inline unsigned long mk_pmb_entry ( unsigned int entry )
2006-09-27 10:08:07 +04:00
{
return ( entry & PMB_E_MASK ) < < PMB_E_SHIFT ;
}
2010-02-17 09:33:30 +03:00
static __always_inline unsigned long mk_pmb_addr ( unsigned int entry )
2006-09-27 10:08:07 +04:00
{
return mk_pmb_entry ( entry ) | PMB_ADDR ;
}
2010-02-17 09:33:30 +03:00
static __always_inline unsigned long mk_pmb_data ( unsigned int entry )
2006-09-27 10:08:07 +04:00
{
return mk_pmb_entry ( entry ) | PMB_DATA ;
}
2010-02-23 10:20:53 +03:00
static __always_inline unsigned int pmb_ppn_in_range ( unsigned long ppn )
{
return ppn > = __pa ( memory_start ) & & ppn < __pa ( memory_end ) ;
}
/*
* Ensure that the PMB entries match our cache configuration .
*
* When we are in 32 - bit address extended mode , CCR . CB becomes
* invalid , so care must be taken to manually adjust cacheable
* translations .
*/
static __always_inline unsigned long pmb_cache_flags ( void )
{
unsigned long flags = 0 ;
# if defined(CONFIG_CACHE_OFF)
flags | = PMB_WT | PMB_UB ;
# elif defined(CONFIG_CACHE_WRITETHROUGH)
flags | = PMB_C | PMB_WT | PMB_UB ;
# elif defined(CONFIG_CACHE_WRITEBACK)
flags | = PMB_C ;
# endif
return flags ;
}
/*
* Convert typical pgprot value to the PMB equivalent
*/
static inline unsigned long pgprot_to_pmb_flags ( pgprot_t prot )
{
unsigned long pmb_flags = 0 ;
u64 flags = pgprot_val ( prot ) ;
if ( flags & _PAGE_CACHABLE )
pmb_flags | = PMB_C ;
if ( flags & _PAGE_WT )
pmb_flags | = PMB_WT | PMB_UB ;
return pmb_flags ;
}
2010-03-03 07:13:25 +03:00
static inline bool pmb_can_merge ( struct pmb_entry * a , struct pmb_entry * b )
2010-02-23 10:20:53 +03:00
{
return ( b - > vpn = = ( a - > vpn + a - > size ) ) & &
( b - > ppn = = ( a - > ppn + a - > size ) ) & &
( b - > flags = = a - > flags ) ;
}
2010-03-03 07:13:25 +03:00
static bool pmb_mapping_exists ( unsigned long vaddr , phys_addr_t phys ,
unsigned long size )
{
int i ;
read_lock ( & pmb_rwlock ) ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
struct pmb_entry * pmbe , * iter ;
unsigned long span ;
if ( ! test_bit ( i , pmb_map ) )
continue ;
pmbe = & pmb_entry_list [ i ] ;
/*
* See if VPN and PPN are bounded by an existing mapping .
*/
if ( ( vaddr < pmbe - > vpn ) | | ( vaddr > = ( pmbe - > vpn + pmbe - > size ) ) )
continue ;
if ( ( phys < pmbe - > ppn ) | | ( phys > = ( pmbe - > ppn + pmbe - > size ) ) )
continue ;
/*
* Now see if we ' re in range of a simple mapping .
*/
if ( size < = pmbe - > size ) {
read_unlock ( & pmb_rwlock ) ;
return true ;
}
span = pmbe - > size ;
/*
* Finally for sizes that involve compound mappings , walk
* the chain .
*/
for ( iter = pmbe - > link ; iter ; iter = iter - > link )
span + = iter - > size ;
/*
* Nothing else to do if the range requirements are met .
*/
if ( size < = span ) {
read_unlock ( & pmb_rwlock ) ;
return true ;
}
}
read_unlock ( & pmb_rwlock ) ;
return false ;
}
2010-02-23 10:20:53 +03:00
static bool pmb_size_valid ( unsigned long size )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_sizes ) ; i + + )
if ( pmb_sizes [ i ] . size = = size )
return true ;
return false ;
}
static inline bool pmb_addr_valid ( unsigned long addr , unsigned long size )
{
return ( addr > = P1SEG & & ( addr + size - 1 ) < P3SEG ) ;
}
static inline bool pmb_prot_valid ( pgprot_t prot )
{
return ( pgprot_val ( prot ) & _PAGE_USER ) = = 0 ;
}
static int pmb_size_to_flags ( unsigned long size )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_sizes ) ; i + + )
if ( pmb_sizes [ i ] . size = = size )
return pmb_sizes [ i ] . flag ;
return 0 ;
}
2009-10-07 01:22:23 +04:00
static int pmb_alloc_entry ( void )
{
2010-02-17 15:17:02 +03:00
int pos ;
2009-10-07 01:22:23 +04:00
2010-02-17 09:33:30 +03:00
pos = find_first_zero_bit ( pmb_map , NR_PMB_ENTRIES ) ;
2010-02-17 15:17:02 +03:00
if ( pos > = 0 & & pos < NR_PMB_ENTRIES )
__set_bit ( pos , pmb_map ) ;
else
pos = - ENOSPC ;
2009-10-07 01:22:23 +04:00
return pos ;
}
2009-10-07 01:22:28 +04:00
static struct pmb_entry * pmb_alloc ( unsigned long vpn , unsigned long ppn ,
2009-10-07 01:22:33 +04:00
unsigned long flags , int entry )
2006-09-27 10:08:07 +04:00
{
struct pmb_entry * pmbe ;
2010-02-17 15:17:02 +03:00
unsigned long irqflags ;
void * ret = NULL ;
2009-10-07 01:22:23 +04:00
int pos ;
2010-02-17 15:17:02 +03:00
write_lock_irqsave ( & pmb_rwlock , irqflags ) ;
2009-10-07 01:22:33 +04:00
if ( entry = = PMB_NO_ENTRY ) {
pos = pmb_alloc_entry ( ) ;
2010-02-17 15:17:02 +03:00
if ( unlikely ( pos < 0 ) ) {
ret = ERR_PTR ( pos ) ;
goto out ;
}
2009-10-07 01:22:33 +04:00
} else {
2010-02-17 15:17:02 +03:00
if ( __test_and_set_bit ( entry , pmb_map ) ) {
ret = ERR_PTR ( - ENOSPC ) ;
goto out ;
}
2009-10-07 01:22:33 +04:00
pos = entry ;
}
2006-09-27 10:08:07 +04:00
2010-02-17 15:17:02 +03:00
write_unlock_irqrestore ( & pmb_rwlock , irqflags ) ;
2009-10-07 01:22:29 +04:00
pmbe = & pmb_entry_list [ pos ] ;
2010-02-17 15:17:02 +03:00
2010-02-18 12:13:51 +03:00
memset ( pmbe , 0 , sizeof ( struct pmb_entry ) ) ;
2010-10-13 22:49:15 +04:00
raw_spin_lock_init ( & pmbe - > lock ) ;
2006-09-27 10:08:07 +04:00
pmbe - > vpn = vpn ;
pmbe - > ppn = ppn ;
pmbe - > flags = flags ;
2009-10-07 01:22:23 +04:00
pmbe - > entry = pos ;
2006-09-27 10:08:07 +04:00
return pmbe ;
2010-02-17 15:17:02 +03:00
out :
write_unlock_irqrestore ( & pmb_rwlock , irqflags ) ;
return ret ;
2006-09-27 10:08:07 +04:00
}
2009-10-07 01:22:28 +04:00
static void pmb_free ( struct pmb_entry * pmbe )
2006-09-27 10:08:07 +04:00
{
2010-02-17 15:17:02 +03:00
__clear_bit ( pmbe - > entry , pmb_map ) ;
2010-02-18 12:13:51 +03:00
pmbe - > entry = PMB_NO_ENTRY ;
pmbe - > link = NULL ;
2006-09-27 10:08:07 +04:00
}
/*
2010-02-17 09:33:30 +03:00
* Must be run uncached .
2006-09-27 10:08:07 +04:00
*/
2010-02-17 15:17:02 +03:00
static void __set_pmb_entry ( struct pmb_entry * pmbe )
2006-09-27 10:08:07 +04:00
{
2010-03-04 10:44:20 +03:00
unsigned long addr , data ;
addr = mk_pmb_addr ( pmbe - > entry ) ;
data = mk_pmb_data ( pmbe - > entry ) ;
jump_to_uncached ( ) ;
2010-02-23 10:20:53 +03:00
/* Set V-bit */
2010-03-04 10:44:20 +03:00
__raw_writel ( pmbe - > vpn | PMB_V , addr ) ;
__raw_writel ( pmbe - > ppn | pmbe - > flags | PMB_V , data ) ;
back_to_cached ( ) ;
2006-09-27 10:08:07 +04:00
}
2010-02-17 15:17:02 +03:00
static void __clear_pmb_entry ( struct pmb_entry * pmbe )
2006-09-27 10:08:07 +04:00
{
2010-02-18 07:26:05 +03:00
unsigned long addr , data ;
unsigned long addr_val , data_val ;
2006-09-27 10:08:07 +04:00
2010-02-18 07:26:05 +03:00
addr = mk_pmb_addr ( pmbe - > entry ) ;
data = mk_pmb_data ( pmbe - > entry ) ;
2006-09-27 10:08:07 +04:00
2010-02-18 07:26:05 +03:00
addr_val = __raw_readl ( addr ) ;
data_val = __raw_readl ( data ) ;
2006-09-27 10:08:07 +04:00
2010-02-18 07:26:05 +03:00
/* Clear V-bit */
writel_uncached ( addr_val & ~ PMB_V , addr ) ;
writel_uncached ( data_val & ~ PMB_V , data ) ;
2006-09-27 10:08:07 +04:00
}
2010-03-23 01:09:58 +03:00
# ifdef CONFIG_PM
2010-02-17 15:17:02 +03:00
static void set_pmb_entry ( struct pmb_entry * pmbe )
{
unsigned long flags ;
2010-10-13 22:49:15 +04:00
raw_spin_lock_irqsave ( & pmbe - > lock , flags ) ;
2010-02-17 15:17:02 +03:00
__set_pmb_entry ( pmbe ) ;
2010-10-13 22:49:15 +04:00
raw_spin_unlock_irqrestore ( & pmbe - > lock , flags ) ;
2010-02-17 15:17:02 +03:00
}
2010-03-23 01:09:58 +03:00
# endif /* CONFIG_PM */
2010-02-17 15:17:02 +03:00
2010-02-23 10:20:53 +03:00
int pmb_bolt_mapping ( unsigned long vaddr , phys_addr_t phys ,
unsigned long size , pgprot_t prot )
2006-09-27 10:16:42 +04:00
{
2009-10-07 01:22:22 +04:00
struct pmb_entry * pmbp , * pmbe ;
2010-03-04 10:44:20 +03:00
unsigned long orig_addr , orig_size ;
2010-03-03 07:13:25 +03:00
unsigned long flags , pmb_flags ;
2010-02-23 10:20:53 +03:00
int i , mapped ;
2010-05-11 08:50:29 +04:00
if ( size < SZ_16M )
return - EINVAL ;
2010-03-02 11:22:29 +03:00
if ( ! pmb_addr_valid ( vaddr , size ) )
return - EFAULT ;
2010-03-03 07:13:25 +03:00
if ( pmb_mapping_exists ( vaddr , phys , size ) )
return 0 ;
2010-02-17 07:23:00 +03:00
2010-03-04 10:44:20 +03:00
orig_addr = vaddr ;
orig_size = size ;
flush_tlb_kernel_range ( vaddr , vaddr + size ) ;
2010-02-23 10:20:53 +03:00
pmb_flags = pgprot_to_pmb_flags ( prot ) ;
2010-03-02 11:22:29 +03:00
pmbp = NULL ;
2006-09-27 10:16:42 +04:00
2010-03-03 07:13:25 +03:00
do {
for ( i = mapped = 0 ; i < ARRAY_SIZE ( pmb_sizes ) ; i + + ) {
if ( size < pmb_sizes [ i ] . size )
continue ;
2006-09-27 10:16:42 +04:00
2010-03-03 07:13:25 +03:00
pmbe = pmb_alloc ( vaddr , phys , pmb_flags |
pmb_sizes [ i ] . flag , PMB_NO_ENTRY ) ;
if ( IS_ERR ( pmbe ) ) {
pmb_unmap_entry ( pmbp , mapped ) ;
return PTR_ERR ( pmbe ) ;
}
2010-02-17 15:17:02 +03:00
2010-10-13 22:49:15 +04:00
raw_spin_lock_irqsave ( & pmbe - > lock , flags ) ;
2006-09-27 10:16:42 +04:00
2010-03-03 07:13:25 +03:00
pmbe - > size = pmb_sizes [ i ] . size ;
2006-09-27 10:16:42 +04:00
2010-03-03 07:13:25 +03:00
__set_pmb_entry ( pmbe ) ;
2010-02-17 11:56:38 +03:00
2010-03-03 07:13:25 +03:00
phys + = pmbe - > size ;
vaddr + = pmbe - > size ;
size - = pmbe - > size ;
2006-09-27 10:16:42 +04:00
2010-03-03 07:13:25 +03:00
/*
* Link adjacent entries that span multiple PMB
* entries for easier tear - down .
*/
if ( likely ( pmbp ) ) {
2010-10-13 22:49:15 +04:00
raw_spin_lock_nested ( & pmbp - > lock ,
SINGLE_DEPTH_NESTING ) ;
2010-03-03 07:13:25 +03:00
pmbp - > link = pmbe ;
2010-10-13 22:49:15 +04:00
raw_spin_unlock ( & pmbp - > lock ) ;
2010-03-03 07:13:25 +03:00
}
2009-10-07 01:22:34 +04:00
2010-03-03 07:13:25 +03:00
pmbp = pmbe ;
2010-02-17 15:17:02 +03:00
2010-03-03 07:13:25 +03:00
/*
* Instead of trying smaller sizes on every
* iteration ( even if we succeed in allocating
* space ) , try using pmb_sizes [ i ] . size again .
*/
i - - ;
mapped + + ;
2006-09-27 10:16:42 +04:00
2010-10-13 22:49:15 +04:00
raw_spin_unlock_irqrestore ( & pmbe - > lock , flags ) ;
2010-03-03 07:13:25 +03:00
}
} while ( size > = SZ_16M ) ;
2006-09-27 10:16:42 +04:00
2010-03-04 10:44:20 +03:00
flush_cache_vmap ( orig_addr , orig_addr + orig_size ) ;
2010-03-02 11:22:29 +03:00
return 0 ;
}
void __iomem * pmb_remap_caller ( phys_addr_t phys , unsigned long size ,
pgprot_t prot , void * caller )
{
2010-03-04 10:44:20 +03:00
unsigned long vaddr ;
2010-03-02 11:22:29 +03:00
phys_addr_t offset , last_addr ;
phys_addr_t align_mask ;
unsigned long aligned ;
struct vm_struct * area ;
int i , ret ;
if ( ! pmb_iomapping_enabled )
return NULL ;
/*
* Small mappings need to go through the TLB .
*/
if ( size < SZ_16M )
return ERR_PTR ( - EINVAL ) ;
if ( ! pmb_prot_valid ( prot ) )
return ERR_PTR ( - EINVAL ) ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_sizes ) ; i + + )
if ( size > = pmb_sizes [ i ] . size )
break ;
last_addr = phys + size ;
align_mask = ~ ( pmb_sizes [ i ] . size - 1 ) ;
offset = phys & ~ align_mask ;
phys & = align_mask ;
aligned = ALIGN ( last_addr , pmb_sizes [ i ] . size ) - phys ;
2010-03-04 10:44:20 +03:00
/*
* XXX : This should really start from uncached_end , but this
* causes the MMU to reset , so for now we restrict it to the
* 0xb000 . . .0 xc000 range .
*/
area = __get_vm_area_caller ( aligned , VM_IOREMAP , 0xb0000000 ,
2010-03-02 11:22:29 +03:00
P3SEG , caller ) ;
if ( ! area )
return NULL ;
area - > phys_addr = phys ;
2010-03-04 10:44:20 +03:00
vaddr = ( unsigned long ) area - > addr ;
2010-03-02 11:22:29 +03:00
ret = pmb_bolt_mapping ( vaddr , phys , size , prot ) ;
2010-03-03 07:13:25 +03:00
if ( unlikely ( ret ! = 0 ) )
2010-03-02 11:22:29 +03:00
return ERR_PTR ( ret ) ;
2010-03-04 10:44:20 +03:00
return ( void __iomem * ) ( offset + ( char * ) vaddr ) ;
2006-09-27 10:16:42 +04:00
}
2010-02-23 10:20:53 +03:00
int pmb_unmap ( void __iomem * addr )
2006-09-27 10:16:42 +04:00
{
2010-02-17 15:17:02 +03:00
struct pmb_entry * pmbe = NULL ;
2010-02-23 10:20:53 +03:00
unsigned long vaddr = ( unsigned long __force ) addr ;
int i , found = 0 ;
2006-09-27 10:16:42 +04:00
2010-02-17 15:17:02 +03:00
read_lock ( & pmb_rwlock ) ;
2009-10-07 01:22:29 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
2010-02-17 09:33:30 +03:00
if ( test_bit ( i , pmb_map ) ) {
2009-10-07 01:22:29 +04:00
pmbe = & pmb_entry_list [ i ] ;
2010-02-23 10:20:53 +03:00
if ( pmbe - > vpn = = vaddr ) {
found = 1 ;
2009-10-07 01:22:29 +04:00
break ;
2010-02-23 10:20:53 +03:00
}
2009-10-07 01:22:29 +04:00
}
}
2010-02-17 15:17:02 +03:00
read_unlock ( & pmb_rwlock ) ;
2010-02-23 10:20:53 +03:00
if ( found ) {
pmb_unmap_entry ( pmbe , NR_PMB_ENTRIES ) ;
return 0 ;
}
2006-09-27 10:16:42 +04:00
2010-02-23 10:20:53 +03:00
return - EINVAL ;
2010-02-18 12:13:51 +03:00
}
static void __pmb_unmap_entry ( struct pmb_entry * pmbe , int depth )
{
2006-09-27 10:16:42 +04:00
do {
struct pmb_entry * pmblink = pmbe ;
2009-10-07 01:22:23 +04:00
/*
* We may be called before this pmb_entry has been
* entered into the PMB table via set_pmb_entry ( ) , but
* that ' s OK because we ' ve allocated a unique slot for
* this entry in pmb_alloc ( ) ( even if we haven ' t filled
* it yet ) .
*
2010-02-17 15:17:02 +03:00
* Therefore , calling __clear_pmb_entry ( ) is safe as no
2009-10-07 01:22:23 +04:00
* other mapping can be using that slot .
*/
2010-02-17 15:17:02 +03:00
__clear_pmb_entry ( pmbe ) ;
2009-10-07 01:22:22 +04:00
2010-03-04 10:44:20 +03:00
flush_cache_vunmap ( pmbe - > vpn , pmbe - > vpn + pmbe - > size ) ;
2006-09-27 10:16:42 +04:00
pmbe = pmblink - > link ;
pmb_free ( pmblink ) ;
2010-02-18 12:13:51 +03:00
} while ( pmbe & & - - depth ) ;
}
static void pmb_unmap_entry ( struct pmb_entry * pmbe , int depth )
{
unsigned long flags ;
2010-02-17 15:17:02 +03:00
2010-02-18 12:13:51 +03:00
if ( unlikely ( ! pmbe ) )
return ;
write_lock_irqsave ( & pmb_rwlock , flags ) ;
__pmb_unmap_entry ( pmbe , depth ) ;
2010-02-17 15:17:02 +03:00
write_unlock_irqrestore ( & pmb_rwlock , flags ) ;
2006-09-27 10:16:42 +04:00
}
2010-02-18 12:13:51 +03:00
static void __init pmb_notify ( void )
2009-10-07 01:22:33 +04:00
{
2010-02-18 12:13:51 +03:00
int i ;
2009-10-07 01:22:33 +04:00
2010-02-16 12:39:30 +03:00
pr_info ( " PMB: boot mappings: \n " ) ;
2009-10-07 01:22:33 +04:00
2010-02-18 12:13:51 +03:00
read_lock ( & pmb_rwlock ) ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
struct pmb_entry * pmbe ;
if ( ! test_bit ( i , pmb_map ) )
continue ;
pmbe = & pmb_entry_list [ i ] ;
pr_info ( " 0x%08lx -> 0x%08lx [ %4ldMB %2scached ] \n " ,
pmbe - > vpn > > PAGE_SHIFT , pmbe - > ppn > > PAGE_SHIFT ,
pmbe - > size > > 20 , ( pmbe - > flags & PMB_C ) ? " " : " un " ) ;
}
read_unlock ( & pmb_rwlock ) ;
}
/*
* Sync our software copy of the PMB mappings with those in hardware . The
* mappings in the hardware PMB were either set up by the bootloader or
* very early on by the kernel .
*/
static void __init pmb_synchronize ( void )
{
struct pmb_entry * pmbp = NULL ;
int i , j ;
2010-01-18 13:33:10 +03:00
/*
2010-02-16 12:39:30 +03:00
* Run through the initial boot mappings , log the established
* ones , and blow away anything that falls outside of the valid
* PPN range . Specifically , we only care about existing mappings
* that impact the cached / uncached sections .
2010-01-18 13:33:10 +03:00
*
2010-02-16 12:39:30 +03:00
* Note that touching these can be a bit of a minefield ; the boot
* loader can establish multi - page mappings with the same caching
* attributes , so we need to ensure that we aren ' t modifying a
* mapping that we ' re presently executing from , or may execute
* from in the case of straddling page boundaries .
2010-01-18 13:33:10 +03:00
*
2010-02-16 12:39:30 +03:00
* In the future we will have to tidy up after the boot loader by
* jumping between the cached and uncached mappings and tearing
* down alternating mappings while executing from the other .
2010-01-18 13:33:10 +03:00
*/
2010-02-17 09:33:30 +03:00
for ( i = 0 ; i < NR_PMB_ENTRIES ; i + + ) {
2010-01-18 13:33:10 +03:00
unsigned long addr , data ;
unsigned long addr_val , data_val ;
2010-02-16 12:39:30 +03:00
unsigned long ppn , vpn , flags ;
2010-02-17 15:17:02 +03:00
unsigned long irqflags ;
2010-02-17 11:56:38 +03:00
unsigned int size ;
2010-02-16 12:39:30 +03:00
struct pmb_entry * pmbe ;
2009-10-07 01:22:33 +04:00
2010-01-18 13:33:10 +03:00
addr = mk_pmb_addr ( i ) ;
data = mk_pmb_data ( i ) ;
2009-10-07 01:22:33 +04:00
2010-01-18 13:33:10 +03:00
addr_val = __raw_readl ( addr ) ;
data_val = __raw_readl ( data ) ;
2009-10-07 01:22:33 +04:00
2010-01-18 13:33:10 +03:00
/*
* Skip over any bogus entries
*/
if ( ! ( data_val & PMB_V ) | | ! ( addr_val & PMB_V ) )
continue ;
2009-10-07 01:22:33 +04:00
2010-01-18 13:33:10 +03:00
ppn = data_val & PMB_PFN_MASK ;
vpn = addr_val & PMB_PFN_MASK ;
2010-01-13 12:31:48 +03:00
2010-01-18 13:33:10 +03:00
/*
* Only preserve in - range mappings .
*/
2010-02-16 12:39:30 +03:00
if ( ! pmb_ppn_in_range ( ppn ) ) {
2010-01-18 13:33:10 +03:00
/*
* Invalidate anything out of bounds .
*/
2010-02-18 07:26:05 +03:00
writel_uncached ( addr_val & ~ PMB_V , addr ) ;
writel_uncached ( data_val & ~ PMB_V , data ) ;
2010-02-16 12:39:30 +03:00
continue ;
2010-01-18 13:33:10 +03:00
}
2010-02-16 12:39:30 +03:00
/*
* Update the caching attributes if necessary
*/
if ( data_val & PMB_C ) {
2010-02-17 12:05:23 +03:00
data_val & = ~ PMB_CACHE_MASK ;
data_val | = pmb_cache_flags ( ) ;
2010-02-18 07:26:05 +03:00
writel_uncached ( data_val , data ) ;
2010-02-16 12:39:30 +03:00
}
2010-02-17 11:56:38 +03:00
size = data_val & PMB_SZ_MASK ;
flags = size | ( data_val & PMB_CACHE_MASK ) ;
2010-02-16 12:39:30 +03:00
pmbe = pmb_alloc ( vpn , ppn , flags , i ) ;
if ( IS_ERR ( pmbe ) ) {
WARN_ON_ONCE ( 1 ) ;
continue ;
}
2010-10-13 22:49:15 +04:00
raw_spin_lock_irqsave ( & pmbe - > lock , irqflags ) ;
2010-02-17 15:17:02 +03:00
2010-02-17 11:56:38 +03:00
for ( j = 0 ; j < ARRAY_SIZE ( pmb_sizes ) ; j + + )
if ( pmb_sizes [ j ] . flag = = size )
pmbe - > size = pmb_sizes [ j ] . size ;
2010-02-17 15:17:02 +03:00
if ( pmbp ) {
2010-10-13 22:49:15 +04:00
raw_spin_lock_nested ( & pmbp - > lock , SINGLE_DEPTH_NESTING ) ;
2010-02-17 15:17:02 +03:00
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping . If so ,
2010-02-18 12:13:51 +03:00
* setup the entry links accordingly . Compound mappings
* are later coalesced .
2010-02-17 15:17:02 +03:00
*/
2010-02-18 12:13:51 +03:00
if ( pmb_can_merge ( pmbp , pmbe ) )
2010-02-17 15:17:02 +03:00
pmbp - > link = pmbe ;
2010-10-13 22:49:15 +04:00
raw_spin_unlock ( & pmbp - > lock ) ;
2010-02-17 15:17:02 +03:00
}
2010-02-17 11:56:38 +03:00
pmbp = pmbe ;
2010-10-13 22:49:15 +04:00
raw_spin_unlock_irqrestore ( & pmbe - > lock , irqflags ) ;
2010-02-18 12:13:51 +03:00
}
}
2010-02-17 15:17:02 +03:00
2010-02-18 12:13:51 +03:00
static void __init pmb_merge ( struct pmb_entry * head )
{
unsigned long span , newsize ;
struct pmb_entry * tail ;
int i = 1 , depth = 0 ;
span = newsize = head - > size ;
2010-02-16 12:39:30 +03:00
2010-02-18 12:13:51 +03:00
tail = head - > link ;
while ( tail ) {
span + = tail - > size ;
if ( pmb_size_valid ( span ) ) {
newsize = span ;
depth = i ;
}
/* This is the end of the line.. */
if ( ! tail - > link )
break ;
tail = tail - > link ;
i + + ;
2010-01-13 12:31:48 +03:00
}
2010-02-18 12:13:51 +03:00
/*
* The merged page size must be valid .
*/
2010-04-25 20:29:07 +04:00
if ( ! depth | | ! pmb_size_valid ( newsize ) )
2010-02-18 12:13:51 +03:00
return ;
head - > flags & = ~ PMB_SZ_MASK ;
head - > flags | = pmb_size_to_flags ( newsize ) ;
head - > size = newsize ;
__pmb_unmap_entry ( head - > link , depth ) ;
__set_pmb_entry ( head ) ;
2010-01-13 12:31:48 +03:00
}
2010-02-18 12:13:51 +03:00
static void __init pmb_coalesce ( void )
2010-01-13 12:31:48 +03:00
{
2010-02-18 12:13:51 +03:00
unsigned long flags ;
int i ;
write_lock_irqsave ( & pmb_rwlock , flags ) ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
struct pmb_entry * pmbe ;
if ( ! test_bit ( i , pmb_map ) )
continue ;
pmbe = & pmb_entry_list [ i ] ;
/*
* We ' re only interested in compound mappings
*/
if ( ! pmbe - > link )
continue ;
/*
* Nothing to do if it already uses the largest possible
* page size .
*/
if ( pmbe - > size = = SZ_512M )
continue ;
pmb_merge ( pmbe ) ;
}
write_unlock_irqrestore ( & pmb_rwlock , flags ) ;
}
# ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize ( void )
{
int i ;
2010-01-13 12:31:48 +03:00
/*
2010-02-18 12:13:51 +03:00
* If the uncached mapping was constructed by the kernel , it will
* already be a reasonable size .
2010-01-13 12:31:48 +03:00
*/
2010-02-18 12:13:51 +03:00
if ( uncached_size = = SZ_16M )
return ;
read_lock ( & pmb_rwlock ) ;
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
struct pmb_entry * pmbe ;
unsigned long flags ;
if ( ! test_bit ( i , pmb_map ) )
continue ;
pmbe = & pmb_entry_list [ i ] ;
if ( pmbe - > vpn ! = uncached_start )
continue ;
/*
* Found it , now resize it .
*/
2010-10-13 22:49:15 +04:00
raw_spin_lock_irqsave ( & pmbe - > lock , flags ) ;
2010-02-18 12:13:51 +03:00
pmbe - > size = SZ_16M ;
pmbe - > flags & = ~ PMB_SZ_MASK ;
pmbe - > flags | = pmb_size_to_flags ( pmbe - > size ) ;
uncached_resize ( pmbe - > size ) ;
__set_pmb_entry ( pmbe ) ;
2010-10-13 22:49:15 +04:00
raw_spin_unlock_irqrestore ( & pmbe - > lock , flags ) ;
2010-02-18 12:13:51 +03:00
}
2010-06-20 15:24:54 +04:00
read_unlock ( & pmb_rwlock ) ;
2010-02-18 12:13:51 +03:00
}
# endif
2010-03-02 10:49:50 +03:00
static int __init early_pmb ( char * p )
{
if ( ! p )
return 0 ;
if ( strstr ( p , " iomap " ) )
pmb_iomapping_enabled = 1 ;
return 0 ;
}
early_param ( " pmb " , early_pmb ) ;
2010-02-18 12:13:51 +03:00
void __init pmb_init ( void )
{
/* Synchronize software state */
pmb_synchronize ( ) ;
/* Attempt to combine compound mappings */
pmb_coalesce ( ) ;
# ifdef CONFIG_UNCACHED_MAPPING
/* Resize initial mappings, if necessary */
pmb_resize ( ) ;
# endif
/* Log them */
pmb_notify ( ) ;
2010-01-18 13:33:10 +03:00
2010-02-18 07:26:05 +03:00
writel_uncached ( 0 , PMB_IRMCR ) ;
2010-01-13 12:31:48 +03:00
/* Flush out the TLB */
2010-03-21 22:51:52 +03:00
local_flush_tlb_all ( ) ;
2010-02-18 07:26:05 +03:00
ctrl_barrier ( ) ;
2009-10-07 01:22:33 +04:00
}
2006-09-27 10:08:07 +04:00
2010-01-20 10:40:48 +03:00
bool __in_29bit_mode ( void )
{
return ( __raw_readl ( PMB_PASCR ) & PASCR_SE ) = = 0 ;
}
2006-09-27 10:08:07 +04:00
static int pmb_seq_show ( struct seq_file * file , void * iter )
{
int i ;
seq_printf ( file , " V: Valid, C: Cacheable, WT: Write-Through \n "
" CB: Copy-Back, B: Buffered, UB: Unbuffered \n " ) ;
seq_printf ( file , " ety vpn ppn size flags \n " ) ;
for ( i = 0 ; i < NR_PMB_ENTRIES ; i + + ) {
unsigned long addr , data ;
unsigned int size ;
char * sz_str = NULL ;
2010-01-26 06:58:40 +03:00
addr = __raw_readl ( mk_pmb_addr ( i ) ) ;
data = __raw_readl ( mk_pmb_data ( i ) ) ;
2006-09-27 10:08:07 +04:00
size = data & PMB_SZ_MASK ;
sz_str = ( size = = PMB_SZ_16M ) ? " 16MB " :
( size = = PMB_SZ_64M ) ? " 64MB " :
( size = = PMB_SZ_128M ) ? " 128MB " :
" 512MB " ;
/* 02: V 0x88 0x08 128MB C CB B */
seq_printf ( file , " %02d: %c 0x%02lx 0x%02lx %s %c %s %s \n " ,
i , ( ( addr & PMB_V ) & & ( data & PMB_V ) ) ? ' V ' : ' ' ,
( addr > > 24 ) & 0xff , ( data > > 24 ) & 0xff ,
sz_str , ( data & PMB_C ) ? ' C ' : ' ' ,
( data & PMB_WT ) ? " WT " : " CB " ,
( data & PMB_UB ) ? " UB " : " B " ) ;
}
return 0 ;
}
static int pmb_debugfs_open ( struct inode * inode , struct file * file )
{
return single_open ( file , pmb_seq_show , NULL ) ;
}
2007-02-12 11:55:31 +03:00
static const struct file_operations pmb_debugfs_fops = {
2006-09-27 10:08:07 +04:00
. owner = THIS_MODULE ,
. open = pmb_debugfs_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2008-06-24 09:30:23 +04:00
. release = single_release ,
2006-09-27 10:08:07 +04:00
} ;
static int __init pmb_debugfs_init ( void )
{
struct dentry * dentry ;
dentry = debugfs_create_file ( " pmb " , S_IFREG | S_IRUGO ,
2010-09-23 23:04:26 +04:00
arch_debugfs_dir , NULL , & pmb_debugfs_fops ) ;
2008-10-17 15:25:09 +04:00
if ( ! dentry )
return - ENOMEM ;
2006-09-27 10:08:07 +04:00
return 0 ;
}
2010-02-19 13:26:31 +03:00
subsys_initcall ( pmb_debugfs_init ) ;
2009-03-11 13:39:02 +03:00
# ifdef CONFIG_PM
static int pmb_sysdev_suspend ( struct sys_device * dev , pm_message_t state )
{
static pm_message_t prev_state ;
2009-10-07 01:22:29 +04:00
int i ;
2009-03-11 13:39:02 +03:00
/* Restore the PMB after a resume from hibernation */
if ( state . event = = PM_EVENT_ON & &
prev_state . event = = PM_EVENT_FREEZE ) {
struct pmb_entry * pmbe ;
2010-02-17 15:17:02 +03:00
read_lock ( & pmb_rwlock ) ;
2009-10-07 01:22:29 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
2010-02-17 09:33:30 +03:00
if ( test_bit ( i , pmb_map ) ) {
2009-10-07 01:22:29 +04:00
pmbe = & pmb_entry_list [ i ] ;
set_pmb_entry ( pmbe ) ;
}
}
2010-02-17 15:17:02 +03:00
read_unlock ( & pmb_rwlock ) ;
2009-03-11 13:39:02 +03:00
}
2010-02-17 15:17:02 +03:00
2009-03-11 13:39:02 +03:00
prev_state = state ;
2010-02-17 15:17:02 +03:00
2009-03-11 13:39:02 +03:00
return 0 ;
}
static int pmb_sysdev_resume ( struct sys_device * dev )
{
return pmb_sysdev_suspend ( dev , PMSG_ON ) ;
}
static struct sysdev_driver pmb_sysdev_driver = {
. suspend = pmb_sysdev_suspend ,
. resume = pmb_sysdev_resume ,
} ;
static int __init pmb_sysdev_init ( void )
{
return sysdev_driver_register ( & cpu_sysdev_class , & pmb_sysdev_driver ) ;
}
subsys_initcall ( pmb_sysdev_init ) ;
# endif