2006-09-27 15:08:07 +09:00
/*
* arch / sh / mm / pmb . c
*
* Privileged Space Mapping Buffer ( PMB ) Support .
*
2010-01-18 19:33:10 +09:00
* Copyright ( C ) 2005 - 2010 Paul Mundt
* Copyright ( C ) 2010 Matt Fleming
2006-09-27 15:08:07 +09:00
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/init.h>
# include <linux/kernel.h>
2009-03-11 10:39:02 +00:00
# include <linux/sysdev.h>
# include <linux/cpu.h>
2006-09-27 15:08:07 +09:00
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/bitops.h>
# include <linux/debugfs.h>
# include <linux/fs.h>
# include <linux/seq_file.h>
# include <linux/err.h>
# include <asm/system.h>
# include <asm/uaccess.h>
2006-09-27 15:16:42 +09:00
# include <asm/pgtable.h>
2010-02-17 13:23:00 +09:00
# include <asm/page.h>
2006-09-27 15:08:07 +09:00
# include <asm/mmu.h>
# include <asm/io.h>
2007-11-26 21:32:40 +09:00
# include <asm/mmu_context.h>
2006-09-27 15:08:07 +09:00
# define NR_PMB_ENTRIES 16
2009-10-06 21:22:22 +00:00
static void __pmb_unmap ( struct pmb_entry * ) ;
2009-10-06 21:22:29 +00:00
static struct pmb_entry pmb_entry_list [ NR_PMB_ENTRIES ] ;
2006-09-27 15:08:07 +09:00
static unsigned long pmb_map ;
static inline unsigned long mk_pmb_entry ( unsigned int entry )
{
return ( entry & PMB_E_MASK ) < < PMB_E_SHIFT ;
}
static inline unsigned long mk_pmb_addr ( unsigned int entry )
{
return mk_pmb_entry ( entry ) | PMB_ADDR ;
}
static inline unsigned long mk_pmb_data ( unsigned int entry )
{
return mk_pmb_entry ( entry ) | PMB_DATA ;
}
2009-10-06 21:22:23 +00:00
static int pmb_alloc_entry ( void )
{
unsigned int pos ;
repeat :
pos = find_first_zero_bit ( & pmb_map , NR_PMB_ENTRIES ) ;
if ( unlikely ( pos > NR_PMB_ENTRIES ) )
return - ENOSPC ;
if ( test_and_set_bit ( pos , & pmb_map ) )
goto repeat ;
return pos ;
}
2009-10-06 21:22:28 +00:00
static struct pmb_entry * pmb_alloc ( unsigned long vpn , unsigned long ppn ,
2009-10-06 21:22:33 +00:00
unsigned long flags , int entry )
2006-09-27 15:08:07 +09:00
{
struct pmb_entry * pmbe ;
2009-10-06 21:22:23 +00:00
int pos ;
2009-10-06 21:22:33 +00:00
if ( entry = = PMB_NO_ENTRY ) {
pos = pmb_alloc_entry ( ) ;
if ( pos < 0 )
return ERR_PTR ( pos ) ;
} else {
2010-02-16 17:14:04 +09:00
if ( test_and_set_bit ( entry , & pmb_map ) )
2009-10-06 21:22:33 +00:00
return ERR_PTR ( - ENOSPC ) ;
pos = entry ;
}
2006-09-27 15:08:07 +09:00
2009-10-06 21:22:29 +00:00
pmbe = & pmb_entry_list [ pos ] ;
2006-09-27 15:08:07 +09:00
if ( ! pmbe )
return ERR_PTR ( - ENOMEM ) ;
pmbe - > vpn = vpn ;
pmbe - > ppn = ppn ;
pmbe - > flags = flags ;
2009-10-06 21:22:23 +00:00
pmbe - > entry = pos ;
2006-09-27 15:08:07 +09:00
return pmbe ;
}
2009-10-06 21:22:28 +00:00
static void pmb_free ( struct pmb_entry * pmbe )
2006-09-27 15:08:07 +09:00
{
2009-10-06 21:22:29 +00:00
int pos = pmbe - > entry ;
2007-05-11 11:26:10 +09:00
2009-10-06 21:22:29 +00:00
pmbe - > vpn = 0 ;
pmbe - > ppn = 0 ;
pmbe - > flags = 0 ;
pmbe - > entry = 0 ;
clear_bit ( pos , & pmb_map ) ;
2006-09-27 15:08:07 +09:00
}
/*
* Must be in P2 for __set_pmb_entry ( )
*/
2009-10-06 21:22:28 +00:00
static void __set_pmb_entry ( unsigned long vpn , unsigned long ppn ,
unsigned long flags , int pos )
2006-09-27 15:08:07 +09:00
{
2010-01-26 12:58:40 +09:00
__raw_writel ( vpn | PMB_V , mk_pmb_addr ( pos ) ) ;
2006-09-27 15:08:07 +09:00
2007-07-31 17:07:28 +09:00
# ifdef CONFIG_CACHE_WRITETHROUGH
2006-09-27 15:08:07 +09:00
/*
* When we are in 32 - bit address extended mode , CCR . CB becomes
* invalid , so care must be taken to manually adjust cacheable
* translations .
*/
if ( likely ( flags & PMB_C ) )
flags | = PMB_WT ;
# endif
2010-01-26 12:58:40 +09:00
__raw_writel ( ppn | flags | PMB_V , mk_pmb_data ( pos ) ) ;
2006-09-27 15:08:07 +09:00
}
2010-01-21 16:05:25 +09:00
static void set_pmb_entry ( struct pmb_entry * pmbe )
2006-09-27 15:08:07 +09:00
{
2007-11-30 17:06:36 +09:00
jump_to_uncached ( ) ;
2009-10-06 21:22:23 +00:00
__set_pmb_entry ( pmbe - > vpn , pmbe - > ppn , pmbe - > flags , pmbe - > entry ) ;
2007-11-30 17:06:36 +09:00
back_to_cached ( ) ;
2006-09-27 15:08:07 +09:00
}
2010-01-21 16:05:25 +09:00
static void clear_pmb_entry ( struct pmb_entry * pmbe )
2006-09-27 15:08:07 +09:00
{
unsigned int entry = pmbe - > entry ;
unsigned long addr ;
2009-10-06 21:22:30 +00:00
if ( unlikely ( entry > = NR_PMB_ENTRIES ) )
2006-09-27 15:08:07 +09:00
return ;
2007-11-30 17:06:36 +09:00
jump_to_uncached ( ) ;
2006-09-27 15:08:07 +09:00
/* Clear V-bit */
addr = mk_pmb_addr ( entry ) ;
2010-01-26 12:58:40 +09:00
__raw_writel ( __raw_readl ( addr ) & ~ PMB_V , addr ) ;
2006-09-27 15:08:07 +09:00
addr = mk_pmb_data ( entry ) ;
2010-01-26 12:58:40 +09:00
__raw_writel ( __raw_readl ( addr ) & ~ PMB_V , addr ) ;
2006-09-27 15:08:07 +09:00
2007-11-30 17:06:36 +09:00
back_to_cached ( ) ;
2006-09-27 15:08:07 +09:00
}
2006-09-27 15:16:42 +09:00
static struct {
unsigned long size ;
int flag ;
} pmb_sizes [ ] = {
{ . size = 0x20000000 , . flag = PMB_SZ_512M , } ,
{ . size = 0x08000000 , . flag = PMB_SZ_128M , } ,
{ . size = 0x04000000 , . flag = PMB_SZ_64M , } ,
{ . size = 0x01000000 , . flag = PMB_SZ_16M , } ,
} ;
long pmb_remap ( unsigned long vaddr , unsigned long phys ,
2010-02-17 13:23:00 +09:00
unsigned long size , pgprot_t prot )
2006-09-27 15:16:42 +09:00
{
2009-10-06 21:22:22 +00:00
struct pmb_entry * pmbp , * pmbe ;
2006-09-27 15:16:42 +09:00
unsigned long wanted ;
int pmb_flags , i ;
2009-10-06 21:22:22 +00:00
long err ;
2010-02-17 13:23:00 +09:00
u64 flags ;
flags = pgprot_val ( prot ) ;
2006-09-27 15:16:42 +09:00
/* Convert typical pgprot value to the PMB equivalent */
if ( flags & _PAGE_CACHABLE ) {
if ( flags & _PAGE_WT )
pmb_flags = PMB_WT ;
else
pmb_flags = PMB_C ;
} else
pmb_flags = PMB_WT | PMB_UB ;
pmbp = NULL ;
wanted = size ;
again :
for ( i = 0 ; i < ARRAY_SIZE ( pmb_sizes ) ; i + + ) {
if ( size < pmb_sizes [ i ] . size )
continue ;
2009-10-06 21:22:33 +00:00
pmbe = pmb_alloc ( vaddr , phys , pmb_flags | pmb_sizes [ i ] . flag ,
PMB_NO_ENTRY ) ;
2009-10-06 21:22:22 +00:00
if ( IS_ERR ( pmbe ) ) {
err = PTR_ERR ( pmbe ) ;
goto out ;
}
2006-09-27 15:16:42 +09:00
2009-10-06 21:22:23 +00:00
set_pmb_entry ( pmbe ) ;
2006-09-27 15:16:42 +09:00
phys + = pmb_sizes [ i ] . size ;
vaddr + = pmb_sizes [ i ] . size ;
size - = pmb_sizes [ i ] . size ;
/*
* Link adjacent entries that span multiple PMB entries
* for easier tear - down .
*/
if ( likely ( pmbp ) )
pmbp - > link = pmbe ;
pmbp = pmbe ;
2009-10-06 21:22:34 +00:00
/*
* Instead of trying smaller sizes on every iteration
* ( even if we succeed in allocating space ) , try using
* pmb_sizes [ i ] . size again .
*/
i - - ;
2006-09-27 15:16:42 +09:00
}
if ( size > = 0x1000000 )
goto again ;
return wanted - size ;
2009-10-06 21:22:22 +00:00
out :
if ( pmbp )
__pmb_unmap ( pmbp ) ;
return err ;
2006-09-27 15:16:42 +09:00
}
void pmb_unmap ( unsigned long addr )
{
2009-10-06 21:22:29 +00:00
struct pmb_entry * pmbe = NULL ;
int i ;
2006-09-27 15:16:42 +09:00
2009-10-06 21:22:29 +00:00
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
if ( test_bit ( i , & pmb_map ) ) {
pmbe = & pmb_entry_list [ i ] ;
if ( pmbe - > vpn = = addr )
break ;
}
}
2006-09-27 15:16:42 +09:00
if ( unlikely ( ! pmbe ) )
return ;
2009-10-06 21:22:22 +00:00
__pmb_unmap ( pmbe ) ;
}
static void __pmb_unmap ( struct pmb_entry * pmbe )
{
2009-10-06 21:22:29 +00:00
BUG_ON ( ! test_bit ( pmbe - > entry , & pmb_map ) ) ;
2006-09-27 15:16:42 +09:00
do {
struct pmb_entry * pmblink = pmbe ;
2009-10-06 21:22:23 +00:00
/*
* We may be called before this pmb_entry has been
* entered into the PMB table via set_pmb_entry ( ) , but
* that ' s OK because we ' ve allocated a unique slot for
* this entry in pmb_alloc ( ) ( even if we haven ' t filled
* it yet ) .
*
* Therefore , calling clear_pmb_entry ( ) is safe as no
* other mapping can be using that slot .
*/
clear_pmb_entry ( pmbe ) ;
2009-10-06 21:22:22 +00:00
2006-09-27 15:16:42 +09:00
pmbe = pmblink - > link ;
pmb_free ( pmblink ) ;
} while ( pmbe ) ;
}
2010-02-16 18:39:30 +09:00
static inline void
pmb_log_mapping ( unsigned long data_val , unsigned long vpn , unsigned long ppn )
{
unsigned int size ;
const char * sz_str ;
size = data_val & PMB_SZ_MASK ;
sz_str = ( size = = PMB_SZ_16M ) ? " 16MB " :
( size = = PMB_SZ_64M ) ? " 64MB " :
( size = = PMB_SZ_128M ) ? " 128MB " :
" 512MB " ;
pr_info ( " \t 0x%08lx -> 0x%08lx [ %s %scached ] \n " ,
vpn > > PAGE_SHIFT , ppn > > PAGE_SHIFT , sz_str ,
( data_val & PMB_C ) ? " " : " un " ) ;
}
2010-01-18 19:33:10 +09:00
static inline unsigned int pmb_ppn_in_range ( unsigned long ppn )
{
2010-02-16 18:39:30 +09:00
return ppn > = __pa ( memory_start ) & & ppn < __pa ( memory_end ) ;
2010-01-18 19:33:10 +09:00
}
2010-02-16 18:39:30 +09:00
static int pmb_synchronize_mappings ( void )
2009-10-06 21:22:33 +00:00
{
2010-01-13 18:31:48 +09:00
unsigned int applied = 0 ;
2010-01-18 19:33:10 +09:00
int i ;
2009-10-06 21:22:33 +00:00
2010-02-16 18:39:30 +09:00
pr_info ( " PMB: boot mappings: \n " ) ;
2009-10-06 21:22:33 +00:00
2010-01-18 19:33:10 +09:00
/*
2010-02-16 18:39:30 +09:00
* Run through the initial boot mappings , log the established
* ones , and blow away anything that falls outside of the valid
* PPN range . Specifically , we only care about existing mappings
* that impact the cached / uncached sections .
2010-01-18 19:33:10 +09:00
*
2010-02-16 18:39:30 +09:00
* Note that touching these can be a bit of a minefield ; the boot
* loader can establish multi - page mappings with the same caching
* attributes , so we need to ensure that we aren ' t modifying a
* mapping that we ' re presently executing from , or may execute
* from in the case of straddling page boundaries .
2010-01-18 19:33:10 +09:00
*
2010-02-16 18:39:30 +09:00
* In the future we will have to tidy up after the boot loader by
* jumping between the cached and uncached mappings and tearing
* down alternating mappings while executing from the other .
2010-01-18 19:33:10 +09:00
*/
for ( i = 0 ; i < PMB_ENTRY_MAX ; i + + ) {
unsigned long addr , data ;
unsigned long addr_val , data_val ;
2010-02-16 18:39:30 +09:00
unsigned long ppn , vpn , flags ;
struct pmb_entry * pmbe ;
2009-10-06 21:22:33 +00:00
2010-01-18 19:33:10 +09:00
addr = mk_pmb_addr ( i ) ;
data = mk_pmb_data ( i ) ;
2009-10-06 21:22:33 +00:00
2010-01-18 19:33:10 +09:00
addr_val = __raw_readl ( addr ) ;
data_val = __raw_readl ( data ) ;
2009-10-06 21:22:33 +00:00
2010-01-18 19:33:10 +09:00
/*
* Skip over any bogus entries
*/
if ( ! ( data_val & PMB_V ) | | ! ( addr_val & PMB_V ) )
continue ;
2009-10-06 21:22:33 +00:00
2010-01-18 19:33:10 +09:00
ppn = data_val & PMB_PFN_MASK ;
vpn = addr_val & PMB_PFN_MASK ;
2010-01-13 18:31:48 +09:00
2010-01-18 19:33:10 +09:00
/*
* Only preserve in - range mappings .
*/
2010-02-16 18:39:30 +09:00
if ( ! pmb_ppn_in_range ( ppn ) ) {
2010-01-18 19:33:10 +09:00
/*
* Invalidate anything out of bounds .
*/
__raw_writel ( addr_val & ~ PMB_V , addr ) ;
__raw_writel ( data_val & ~ PMB_V , data ) ;
2010-02-16 18:39:30 +09:00
continue ;
2010-01-18 19:33:10 +09:00
}
2010-02-16 18:39:30 +09:00
/*
* Update the caching attributes if necessary
*/
if ( data_val & PMB_C ) {
# if defined(CONFIG_CACHE_WRITETHROUGH)
data_val | = PMB_WT ;
# elif defined(CONFIG_CACHE_WRITEBACK)
data_val & = ~ PMB_WT ;
# else
data_val & = ~ ( PMB_C | PMB_WT ) ;
# endif
__raw_writel ( data_val , data ) ;
}
flags = data_val & ( PMB_SZ_MASK | PMB_CACHE_MASK ) ;
pmbe = pmb_alloc ( vpn , ppn , flags , i ) ;
if ( IS_ERR ( pmbe ) ) {
WARN_ON_ONCE ( 1 ) ;
continue ;
}
pmb_log_mapping ( data_val , vpn , ppn ) ;
applied + + ;
2010-01-13 18:31:48 +09:00
}
return ( applied = = 0 ) ;
}
2010-01-21 16:05:25 +09:00
int pmb_init ( void )
2010-01-13 18:31:48 +09:00
{
2010-02-16 18:39:30 +09:00
int ret ;
2010-01-13 18:31:48 +09:00
jump_to_uncached ( ) ;
/*
2010-01-18 19:33:10 +09:00
* Sync our software copy of the PMB mappings with those in
* hardware . The mappings in the hardware PMB were either set up
* by the bootloader or very early on by the kernel .
2010-01-13 18:31:48 +09:00
*/
2010-02-16 18:39:30 +09:00
ret = pmb_synchronize_mappings ( ) ;
if ( unlikely ( ret = = 0 ) ) {
back_to_cached ( ) ;
return 0 ;
2010-01-18 19:33:10 +09:00
}
2010-01-26 12:58:40 +09:00
__raw_writel ( 0 , PMB_IRMCR ) ;
2010-01-13 18:31:48 +09:00
/* Flush out the TLB */
2010-02-16 18:39:30 +09:00
__raw_writel ( __raw_readl ( MMUCR ) | MMUCR_TI , MMUCR ) ;
2010-01-13 18:31:48 +09:00
2009-10-06 21:22:33 +00:00
back_to_cached ( ) ;
return 0 ;
}
2006-09-27 15:08:07 +09:00
2010-01-20 16:40:48 +09:00
bool __in_29bit_mode ( void )
{
return ( __raw_readl ( PMB_PASCR ) & PASCR_SE ) = = 0 ;
}
2006-09-27 15:08:07 +09:00
static int pmb_seq_show ( struct seq_file * file , void * iter )
{
int i ;
seq_printf ( file , " V: Valid, C: Cacheable, WT: Write-Through \n "
" CB: Copy-Back, B: Buffered, UB: Unbuffered \n " ) ;
seq_printf ( file , " ety vpn ppn size flags \n " ) ;
for ( i = 0 ; i < NR_PMB_ENTRIES ; i + + ) {
unsigned long addr , data ;
unsigned int size ;
char * sz_str = NULL ;
2010-01-26 12:58:40 +09:00
addr = __raw_readl ( mk_pmb_addr ( i ) ) ;
data = __raw_readl ( mk_pmb_data ( i ) ) ;
2006-09-27 15:08:07 +09:00
size = data & PMB_SZ_MASK ;
sz_str = ( size = = PMB_SZ_16M ) ? " 16MB " :
( size = = PMB_SZ_64M ) ? " 64MB " :
( size = = PMB_SZ_128M ) ? " 128MB " :
" 512MB " ;
/* 02: V 0x88 0x08 128MB C CB B */
seq_printf ( file , " %02d: %c 0x%02lx 0x%02lx %s %c %s %s \n " ,
i , ( ( addr & PMB_V ) & & ( data & PMB_V ) ) ? ' V ' : ' ' ,
( addr > > 24 ) & 0xff , ( data > > 24 ) & 0xff ,
sz_str , ( data & PMB_C ) ? ' C ' : ' ' ,
( data & PMB_WT ) ? " WT " : " CB " ,
( data & PMB_UB ) ? " UB " : " B " ) ;
}
return 0 ;
}
static int pmb_debugfs_open ( struct inode * inode , struct file * file )
{
return single_open ( file , pmb_seq_show , NULL ) ;
}
2007-02-12 00:55:31 -08:00
static const struct file_operations pmb_debugfs_fops = {
2006-09-27 15:08:07 +09:00
. owner = THIS_MODULE ,
. open = pmb_debugfs_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2008-06-24 13:30:23 +08:00
. release = single_release ,
2006-09-27 15:08:07 +09:00
} ;
static int __init pmb_debugfs_init ( void )
{
struct dentry * dentry ;
dentry = debugfs_create_file ( " pmb " , S_IFREG | S_IRUGO ,
2008-03-07 17:19:58 +09:00
sh_debugfs_root , NULL , & pmb_debugfs_fops ) ;
2008-10-17 19:25:09 +08:00
if ( ! dentry )
return - ENOMEM ;
2006-09-27 15:08:07 +09:00
if ( IS_ERR ( dentry ) )
return PTR_ERR ( dentry ) ;
return 0 ;
}
postcore_initcall ( pmb_debugfs_init ) ;
2009-03-11 10:39:02 +00:00
# ifdef CONFIG_PM
static int pmb_sysdev_suspend ( struct sys_device * dev , pm_message_t state )
{
static pm_message_t prev_state ;
2009-10-06 21:22:29 +00:00
int i ;
2009-03-11 10:39:02 +00:00
/* Restore the PMB after a resume from hibernation */
if ( state . event = = PM_EVENT_ON & &
prev_state . event = = PM_EVENT_FREEZE ) {
struct pmb_entry * pmbe ;
2009-10-06 21:22:29 +00:00
for ( i = 0 ; i < ARRAY_SIZE ( pmb_entry_list ) ; i + + ) {
if ( test_bit ( i , & pmb_map ) ) {
pmbe = & pmb_entry_list [ i ] ;
set_pmb_entry ( pmbe ) ;
}
}
2009-03-11 10:39:02 +00:00
}
prev_state = state ;
return 0 ;
}
static int pmb_sysdev_resume ( struct sys_device * dev )
{
return pmb_sysdev_suspend ( dev , PMSG_ON ) ;
}
static struct sysdev_driver pmb_sysdev_driver = {
. suspend = pmb_sysdev_suspend ,
. resume = pmb_sysdev_resume ,
} ;
static int __init pmb_sysdev_init ( void )
{
return sysdev_driver_register ( & cpu_sysdev_class , & pmb_sysdev_driver ) ;
}
subsys_initcall ( pmb_sysdev_init ) ;
# endif