2005-04-17 02:20:36 +04:00
/*
* native hashtable management .
*
* SMP scalability work :
* Copyright ( C ) 2001 Anton Blanchard < anton @ au . ibm . com > , IBM
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
2005-11-07 03:06:55 +03:00
# undef DEBUG_LOW
2005-04-17 02:20:36 +04:00
# include <linux/spinlock.h>
# include <linux/bitops.h>
# include <linux/threads.h>
# include <linux/smp.h>
# include <asm/abs_addr.h>
# include <asm/machdep.h>
# include <asm/mmu.h>
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
# include <asm/tlb.h>
# include <asm/cputable.h>
2005-11-07 03:06:55 +03:00
# include <asm/udbg.h>
2007-05-02 18:19:11 +04:00
# include <asm/kexec.h>
2009-04-30 00:58:01 +04:00
# include <asm/ppc-opcode.h>
2005-11-07 03:06:55 +03:00
# ifdef DEBUG_LOW
# define DBG_LOW(fmt...) udbg_printf(fmt)
# else
# define DBG_LOW(fmt...)
# endif
2005-04-17 02:20:36 +04:00
# define HPTE_LOCK_BIT 3
static DEFINE_SPINLOCK ( native_tlbie_lock ) ;
2007-10-11 14:37:10 +04:00
static inline void __tlbie ( unsigned long va , int psize , int ssize )
2005-11-07 03:06:55 +03:00
{
unsigned int penc ;
/* clear top 16 bits, non SLS segment */
va & = ~ ( 0xffffULL < < 48 ) ;
switch ( psize ) {
case MMU_PAGE_4K :
va & = ~ 0xffful ;
2007-10-11 14:37:10 +04:00
va | = ssize < < 8 ;
2009-04-30 00:58:01 +04:00
asm volatile ( ASM_MMU_FTR_IFCLR ( " tlbie %0,0 " , PPC_TLBIE ( % 1 , % 0 ) ,
% 2 )
: : " r " ( va ) , " r " ( 0 ) , " i " ( MMU_FTR_TLBIE_206 )
: " memory " ) ;
2005-11-07 03:06:55 +03:00
break ;
default :
penc = mmu_psize_defs [ psize ] . penc ;
va & = ~ ( ( 1ul < < mmu_psize_defs [ psize ] . shift ) - 1 ) ;
2006-06-15 15:15:44 +04:00
va | = penc < < 12 ;
2007-10-11 14:37:10 +04:00
va | = ssize < < 8 ;
2009-04-30 00:58:01 +04:00
va | = 1 ; /* L */
asm volatile ( ASM_MMU_FTR_IFCLR ( " tlbie %0,1 " , PPC_TLBIE ( % 1 , % 0 ) ,
% 2 )
: : " r " ( va ) , " r " ( 0 ) , " i " ( MMU_FTR_TLBIE_206 )
: " memory " ) ;
2005-11-07 03:06:55 +03:00
break ;
}
}
2007-10-11 14:37:10 +04:00
static inline void __tlbiel ( unsigned long va , int psize , int ssize )
2005-11-07 03:06:55 +03:00
{
unsigned int penc ;
/* clear top 16 bits, non SLS segment */
va & = ~ ( 0xffffULL < < 48 ) ;
switch ( psize ) {
case MMU_PAGE_4K :
va & = ~ 0xffful ;
2007-10-11 14:37:10 +04:00
va | = ssize < < 8 ;
2005-11-07 03:06:55 +03:00
asm volatile ( " .long 0x7c000224 | (%0 << 11) | (0 << 21) "
: : " r " ( va ) : " memory " ) ;
break ;
default :
penc = mmu_psize_defs [ psize ] . penc ;
va & = ~ ( ( 1ul < < mmu_psize_defs [ psize ] . shift ) - 1 ) ;
2006-06-15 15:15:44 +04:00
va | = penc < < 12 ;
2007-10-11 14:37:10 +04:00
va | = ssize < < 8 ;
2009-04-30 00:58:01 +04:00
va | = 1 ; /* L */
2005-11-07 03:06:55 +03:00
asm volatile ( " .long 0x7c000224 | (%0 << 11) | (1 << 21) "
: : " r " ( va ) : " memory " ) ;
break ;
}
}
2007-10-11 14:37:10 +04:00
static inline void tlbie ( unsigned long va , int psize , int ssize , int local )
2005-11-07 03:06:55 +03:00
{
unsigned int use_local = local & & cpu_has_feature ( CPU_FTR_TLBIEL ) ;
int lock_tlbie = ! cpu_has_feature ( CPU_FTR_LOCKLESS_TLBIE ) ;
if ( use_local )
use_local = mmu_psize_defs [ psize ] . tlbiel ;
if ( lock_tlbie & & ! use_local )
spin_lock ( & native_tlbie_lock ) ;
asm volatile ( " ptesync " : : : " memory " ) ;
if ( use_local ) {
2007-10-11 14:37:10 +04:00
__tlbiel ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
asm volatile ( " ptesync " : : : " memory " ) ;
} else {
2007-10-11 14:37:10 +04:00
__tlbie ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
asm volatile ( " eieio; tlbsync; ptesync " : : : " memory " ) ;
}
if ( lock_tlbie & & ! use_local )
spin_unlock ( & native_tlbie_lock ) ;
}
2007-06-13 08:52:56 +04:00
static inline void native_lock_hpte ( struct hash_pte * hptep )
2005-04-17 02:20:36 +04:00
{
2005-07-13 12:11:42 +04:00
unsigned long * word = & hptep - > v ;
2005-04-17 02:20:36 +04:00
while ( 1 ) {
2010-02-10 04:03:06 +03:00
if ( ! test_and_set_bit_lock ( HPTE_LOCK_BIT , word ) )
2005-04-17 02:20:36 +04:00
break ;
while ( test_bit ( HPTE_LOCK_BIT , word ) )
cpu_relax ( ) ;
}
}
2007-06-13 08:52:56 +04:00
static inline void native_unlock_hpte ( struct hash_pte * hptep )
2005-04-17 02:20:36 +04:00
{
2005-07-13 12:11:42 +04:00
unsigned long * word = & hptep - > v ;
2005-04-17 02:20:36 +04:00
2010-02-10 04:03:06 +03:00
clear_bit_unlock ( HPTE_LOCK_BIT , word ) ;
2005-04-17 02:20:36 +04:00
}
2006-10-05 22:35:10 +04:00
static long native_hpte_insert ( unsigned long hpte_group , unsigned long va ,
2005-11-07 03:06:55 +03:00
unsigned long pa , unsigned long rflags ,
2007-10-11 14:37:10 +04:00
unsigned long vflags , int psize , int ssize )
2005-04-17 02:20:36 +04:00
{
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep = htab_address + hpte_group ;
2005-07-13 12:11:42 +04:00
unsigned long hpte_v , hpte_r ;
2005-04-17 02:20:36 +04:00
int i ;
2005-11-07 03:06:55 +03:00
if ( ! ( vflags & HPTE_V_BOLTED ) ) {
DBG_LOW ( " insert(group=%lx, va=%016lx, pa=%016lx, "
" rflags=%lx, vflags=%lx, psize=%d) \n " ,
hpte_group , va , pa , rflags , vflags , psize ) ;
}
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < HPTES_PER_GROUP ; i + + ) {
2005-07-13 12:11:42 +04:00
if ( ! ( hptep - > v & HPTE_V_VALID ) ) {
2005-04-17 02:20:36 +04:00
/* retry with lock held */
native_lock_hpte ( hptep ) ;
2005-07-13 12:11:42 +04:00
if ( ! ( hptep - > v & HPTE_V_VALID ) )
2005-04-17 02:20:36 +04:00
break ;
native_unlock_hpte ( hptep ) ;
}
hptep + + ;
}
if ( i = = HPTES_PER_GROUP )
return - 1 ;
2007-10-11 14:37:10 +04:00
hpte_v = hpte_encode_v ( va , psize , ssize ) | vflags | HPTE_V_VALID ;
2005-11-07 03:06:55 +03:00
hpte_r = hpte_encode_r ( pa , psize ) | rflags ;
if ( ! ( vflags & HPTE_V_BOLTED ) ) {
DBG_LOW ( " i=%x hpte_v=%016lx, hpte_r=%016lx \n " ,
i , hpte_v , hpte_r ) ;
}
2005-04-17 02:20:36 +04:00
2005-07-13 12:11:42 +04:00
hptep - > r = hpte_r ;
2005-04-17 02:20:36 +04:00
/* Guarantee the second dword is visible before the valid bit */
2007-07-10 08:49:09 +04:00
eieio ( ) ;
2005-04-17 02:20:36 +04:00
/*
* Now set the first dword including the valid bit
* NOTE : this also unlocks the hpte
*/
2005-07-13 12:11:42 +04:00
hptep - > v = hpte_v ;
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ ( " ptesync " : : : " memory " ) ;
2005-07-13 12:11:42 +04:00
return i | ( ! ! ( vflags & HPTE_V_SECONDARY ) < < 3 ) ;
2005-04-17 02:20:36 +04:00
}
static long native_hpte_remove ( unsigned long hpte_group )
{
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep ;
2005-04-17 02:20:36 +04:00
int i ;
int slot_offset ;
2005-07-13 12:11:42 +04:00
unsigned long hpte_v ;
2005-04-17 02:20:36 +04:00
2005-11-07 03:06:55 +03:00
DBG_LOW ( " remove(group=%lx) \n " , hpte_group ) ;
2005-04-17 02:20:36 +04:00
/* pick a random entry to start at */
slot_offset = mftb ( ) & 0x7 ;
for ( i = 0 ; i < HPTES_PER_GROUP ; i + + ) {
hptep = htab_address + hpte_group + slot_offset ;
2005-07-13 12:11:42 +04:00
hpte_v = hptep - > v ;
2005-04-17 02:20:36 +04:00
2005-07-13 12:11:42 +04:00
if ( ( hpte_v & HPTE_V_VALID ) & & ! ( hpte_v & HPTE_V_BOLTED ) ) {
2005-04-17 02:20:36 +04:00
/* retry with lock held */
native_lock_hpte ( hptep ) ;
2005-07-13 12:11:42 +04:00
hpte_v = hptep - > v ;
if ( ( hpte_v & HPTE_V_VALID )
& & ! ( hpte_v & HPTE_V_BOLTED ) )
2005-04-17 02:20:36 +04:00
break ;
native_unlock_hpte ( hptep ) ;
}
slot_offset + + ;
slot_offset & = 0x7 ;
}
if ( i = = HPTES_PER_GROUP )
return - 1 ;
/* Invalidate the hpte. NOTE: this also unlocks it */
2005-07-13 12:11:42 +04:00
hptep - > v = 0 ;
2005-04-17 02:20:36 +04:00
return i ;
}
2005-11-07 03:06:55 +03:00
static long native_hpte_updatepp ( unsigned long slot , unsigned long newpp ,
2007-10-11 14:37:10 +04:00
unsigned long va , int psize , int ssize ,
int local )
2005-04-17 02:20:36 +04:00
{
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep = htab_address + slot ;
2005-11-07 03:06:55 +03:00
unsigned long hpte_v , want_v ;
int ret = 0 ;
2007-10-11 14:37:10 +04:00
want_v = hpte_encode_v ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
DBG_LOW ( " update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x) " ,
va , want_v & HPTE_V_AVPN , slot , newpp ) ;
native_lock_hpte ( hptep ) ;
hpte_v = hptep - > v ;
/* Even if we miss, we need to invalidate the TLB */
if ( ! HPTE_V_COMPARE ( hpte_v , want_v ) | | ! ( hpte_v & HPTE_V_VALID ) ) {
DBG_LOW ( " -> miss \n " ) ;
ret = - 1 ;
} else {
DBG_LOW ( " -> hit \n " ) ;
/* Update the HPTE */
hptep - > r = ( hptep - > r & ~ ( HPTE_R_PP | HPTE_R_N ) ) |
2006-05-30 08:14:19 +04:00
( newpp & ( HPTE_R_PP | HPTE_R_N | HPTE_R_C ) ) ;
2005-11-07 03:06:55 +03:00
}
2007-05-17 22:49:22 +04:00
native_unlock_hpte ( hptep ) ;
2005-11-07 03:06:55 +03:00
/* Ensure it is out of the tlb too. */
2007-10-11 14:37:10 +04:00
tlbie ( va , psize , ssize , local ) ;
2005-11-07 03:06:55 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-10-11 14:37:10 +04:00
static long native_hpte_find ( unsigned long va , int psize , int ssize )
2005-04-17 02:20:36 +04:00
{
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep ;
2005-04-17 02:20:36 +04:00
unsigned long hash ;
2007-10-11 14:37:10 +04:00
unsigned long i ;
2005-04-17 02:20:36 +04:00
long slot ;
2005-11-07 03:06:55 +03:00
unsigned long want_v , hpte_v ;
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
hash = hpt_hash ( va , mmu_psize_defs [ psize ] . shift , ssize ) ;
want_v = hpte_encode_v ( va , psize , ssize ) ;
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
/* Bolted mappings are only ever in the primary group */
slot = ( hash & htab_hash_mask ) * HPTES_PER_GROUP ;
for ( i = 0 ; i < HPTES_PER_GROUP ; i + + ) {
hptep = htab_address + slot ;
hpte_v = hptep - > v ;
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
if ( HPTE_V_COMPARE ( hpte_v , want_v ) & & ( hpte_v & HPTE_V_VALID ) )
/* HPTE matches */
return slot ;
+ + slot ;
2005-04-17 02:20:36 +04:00
}
return - 1 ;
}
/*
* Update the page protection bits . Intended to be used to create
* guard pages for kernel data structures on pages which are bolted
* in the HPT . Assumes pages being operated on will not be stolen .
*
* No need to lock here because we should be the only user .
*/
2005-11-07 03:06:55 +03:00
static void native_hpte_updateboltedpp ( unsigned long newpp , unsigned long ea ,
2007-10-11 14:37:10 +04:00
int psize , int ssize )
2005-04-17 02:20:36 +04:00
{
2005-11-07 03:06:55 +03:00
unsigned long vsid , va ;
2005-04-17 02:20:36 +04:00
long slot ;
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep ;
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
vsid = get_kernel_vsid ( ea , ssize ) ;
va = hpt_va ( ea , vsid , ssize ) ;
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
slot = native_hpte_find ( va , psize , ssize ) ;
2005-04-17 02:20:36 +04:00
if ( slot = = - 1 )
panic ( " could not find page to bolt \n " ) ;
hptep = htab_address + slot ;
2005-11-07 03:06:55 +03:00
/* Update the HPTE */
hptep - > r = ( hptep - > r & ~ ( HPTE_R_PP | HPTE_R_N ) ) |
( newpp & ( HPTE_R_PP | HPTE_R_N ) ) ;
2005-04-17 02:20:36 +04:00
2005-11-07 03:06:55 +03:00
/* Ensure it is out of the tlb too. */
2007-10-11 14:37:10 +04:00
tlbie ( va , psize , ssize , 0 ) ;
2005-04-17 02:20:36 +04:00
}
static void native_hpte_invalidate ( unsigned long slot , unsigned long va ,
2007-10-11 14:37:10 +04:00
int psize , int ssize , int local )
2005-04-17 02:20:36 +04:00
{
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep = htab_address + slot ;
2005-07-13 12:11:42 +04:00
unsigned long hpte_v ;
2005-11-07 03:06:55 +03:00
unsigned long want_v ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
local_irq_save ( flags ) ;
2005-11-07 03:06:55 +03:00
DBG_LOW ( " invalidate(va=%016lx, hash: %x) \n " , va , slot ) ;
2007-10-11 14:37:10 +04:00
want_v = hpte_encode_v ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
native_lock_hpte ( hptep ) ;
2005-07-13 12:11:42 +04:00
hpte_v = hptep - > v ;
2005-04-17 02:20:36 +04:00
/* Even if we miss, we need to invalidate the TLB */
2005-11-07 03:06:55 +03:00
if ( ! HPTE_V_COMPARE ( hpte_v , want_v ) | | ! ( hpte_v & HPTE_V_VALID ) )
2005-04-17 02:20:36 +04:00
native_unlock_hpte ( hptep ) ;
2005-11-07 03:06:55 +03:00
else
2005-04-17 02:20:36 +04:00
/* Invalidate the hpte. NOTE: this also unlocks it */
2005-07-13 12:11:42 +04:00
hptep - > v = 0 ;
2005-04-17 02:20:36 +04:00
2005-11-07 03:06:55 +03:00
/* Invalidate the TLB */
2007-10-11 14:37:10 +04:00
tlbie ( va , psize , ssize , local ) ;
2005-11-07 03:06:55 +03:00
2005-04-17 02:20:36 +04:00
local_irq_restore ( flags ) ;
}
2007-05-02 18:19:11 +04:00
# define LP_SHIFT 12
# define LP_BITS 8
# define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
2005-11-07 03:06:55 +03:00
2007-06-13 08:52:56 +04:00
static void hpte_decode ( struct hash_pte * hpte , unsigned long slot ,
2007-10-11 14:37:10 +04:00
int * psize , int * ssize , unsigned long * va )
2007-05-02 18:19:11 +04:00
{
unsigned long hpte_r = hpte - > r ;
unsigned long hpte_v = hpte - > v ;
unsigned long avpn ;
2007-05-11 09:38:34 +04:00
int i , size , shift , penc ;
2007-05-02 18:19:11 +04:00
if ( ! ( hpte_v & HPTE_V_LARGE ) )
size = MMU_PAGE_4K ;
else {
for ( i = 0 ; i < LP_BITS ; i + + ) {
if ( ( hpte_r & LP_MASK ( i + 1 ) ) = = LP_MASK ( i + 1 ) )
break ;
}
penc = LP_MASK ( i + 1 ) > > LP_SHIFT ;
for ( size = 0 ; size < MMU_PAGE_COUNT ; size + + ) {
2005-11-07 03:06:55 +03:00
2007-05-02 18:19:11 +04:00
/* 4K pages are not represented by LP */
if ( size = = MMU_PAGE_4K )
continue ;
2005-11-07 03:06:55 +03:00
2007-05-02 18:19:11 +04:00
/* valid entries have a shift value */
if ( ! mmu_psize_defs [ size ] . shift )
continue ;
2005-11-07 03:06:55 +03:00
2007-05-02 18:19:11 +04:00
if ( penc = = mmu_psize_defs [ size ] . penc )
break ;
}
}
2005-11-07 03:06:55 +03:00
2007-05-10 09:28:44 +04:00
/* This works for all page sizes, and for 256M and 1T segments */
2007-05-02 18:19:11 +04:00
shift = mmu_psize_defs [ size ] . shift ;
2007-05-10 09:28:44 +04:00
avpn = ( HPTE_V_AVPN_VAL ( hpte_v ) & ~ mmu_psize_defs [ size ] . avpnm ) < < 23 ;
2007-05-02 18:19:11 +04:00
2007-05-10 09:28:44 +04:00
if ( shift < 23 ) {
unsigned long vpi , vsid , pteg ;
2007-05-02 18:19:11 +04:00
2007-05-10 09:28:44 +04:00
pteg = slot / HPTES_PER_GROUP ;
if ( hpte_v & HPTE_V_SECONDARY )
pteg = ~ pteg ;
switch ( hpte_v > > HPTE_V_SSIZE_SHIFT ) {
case MMU_SEGSIZE_256M :
2007-05-02 18:19:11 +04:00
vpi = ( ( avpn > > 28 ) ^ pteg ) & htab_hash_mask ;
2007-05-10 09:28:44 +04:00
break ;
case MMU_SEGSIZE_1T :
vsid = avpn > > 40 ;
vpi = ( vsid ^ ( vsid < < 25 ) ^ pteg ) & htab_hash_mask ;
break ;
default :
2007-05-11 09:37:38 +04:00
avpn = vpi = size = 0 ;
2007-05-02 18:19:11 +04:00
}
2007-05-10 09:28:44 +04:00
avpn | = ( vpi < < mmu_psize_defs [ size ] . shift ) ;
2005-11-07 03:06:55 +03:00
}
2007-05-02 18:19:11 +04:00
* va = avpn ;
* psize = size ;
2007-10-11 14:37:10 +04:00
* ssize = hpte_v > > HPTE_V_SSIZE_SHIFT ;
2005-11-07 03:06:55 +03:00
}
2005-06-26 01:58:08 +04:00
/*
* clear all mappings on kexec . All cpus are in real mode ( or they will
* be when they isi ) , and we are the only one left . We rely on our kernel
* mapping being 0xC0 ' s and the hardware ignoring those two real bits .
*
* TODO : add batching support when enabled . remember , no dynamic memory here ,
* athough there is the control page available . . .
*/
static void native_hpte_clear ( void )
{
unsigned long slot , slots , flags ;
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep = htab_address ;
2007-05-02 18:19:11 +04:00
unsigned long hpte_v , va ;
2005-06-26 01:58:08 +04:00
unsigned long pteg_count ;
2007-10-11 14:37:10 +04:00
int psize , ssize ;
2005-06-26 01:58:08 +04:00
pteg_count = htab_hash_mask + 1 ;
local_irq_save ( flags ) ;
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once .
*/
spin_lock ( & native_tlbie_lock ) ;
slots = pteg_count * HPTES_PER_GROUP ;
for ( slot = 0 ; slot < slots ; slot + + , hptep + + ) {
/*
* we could lock the pte here , but we are the only cpu
* running , right ? and for crash dump , we probably
* don ' t want to wait for a maybe bad cpu .
*/
2005-07-13 12:11:42 +04:00
hpte_v = hptep - > v ;
2005-06-26 01:58:08 +04:00
2006-02-22 19:13:08 +03:00
/*
* Call __tlbie ( ) here rather than tlbie ( ) since we
* already hold the native_tlbie_lock .
*/
2005-07-13 12:11:42 +04:00
if ( hpte_v & HPTE_V_VALID ) {
2007-10-11 14:37:10 +04:00
hpte_decode ( hptep , slot , & psize , & ssize , & va ) ;
2005-07-13 12:11:42 +04:00
hptep - > v = 0 ;
2007-10-11 14:37:10 +04:00
__tlbie ( va , psize , ssize ) ;
2005-06-26 01:58:08 +04:00
}
}
2006-02-22 19:13:08 +03:00
asm volatile ( " eieio; tlbsync; ptesync " : : : " memory " ) ;
2005-06-26 01:58:08 +04:00
spin_unlock ( & native_tlbie_lock ) ;
local_irq_restore ( flags ) ;
}
2005-11-07 03:06:55 +03:00
/*
* Batched hash table flush , we batch the tlbie ' s to avoid taking / releasing
* the lock all the time
*/
2005-09-20 07:52:50 +04:00
static void native_flush_hash_range ( unsigned long number , int local )
2005-04-17 02:20:36 +04:00
{
2005-11-07 03:06:55 +03:00
unsigned long va , hash , index , hidx , shift , slot ;
2007-06-13 08:52:56 +04:00
struct hash_pte * hptep ;
2005-07-13 12:11:42 +04:00
unsigned long hpte_v ;
2005-11-07 03:06:55 +03:00
unsigned long want_v ;
unsigned long flags ;
real_pte_t pte ;
2005-04-17 02:20:36 +04:00
struct ppc64_tlb_batch * batch = & __get_cpu_var ( ppc64_tlb_batch ) ;
2005-11-07 03:06:55 +03:00
unsigned long psize = batch - > psize ;
2007-10-11 14:37:10 +04:00
int ssize = batch - > ssize ;
2005-11-07 03:06:55 +03:00
int i ;
2005-04-17 02:20:36 +04:00
local_irq_save ( flags ) ;
for ( i = 0 ; i < number ; i + + ) {
2005-11-07 03:06:55 +03:00
va = batch - > vaddr [ i ] ;
pte = batch - > pte [ i ] ;
pte_iterate_hashed_subpages ( pte , psize , va , index , shift ) {
2007-10-11 14:37:10 +04:00
hash = hpt_hash ( va , shift , ssize ) ;
2005-11-07 03:06:55 +03:00
hidx = __rpte_to_hidx ( pte , index ) ;
if ( hidx & _PTEIDX_SECONDARY )
hash = ~ hash ;
slot = ( hash & htab_hash_mask ) * HPTES_PER_GROUP ;
slot + = hidx & _PTEIDX_GROUP_IX ;
hptep = htab_address + slot ;
2007-10-11 14:37:10 +04:00
want_v = hpte_encode_v ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
native_lock_hpte ( hptep ) ;
hpte_v = hptep - > v ;
if ( ! HPTE_V_COMPARE ( hpte_v , want_v ) | |
! ( hpte_v & HPTE_V_VALID ) )
native_unlock_hpte ( hptep ) ;
else
hptep - > v = 0 ;
} pte_iterate_hashed_end ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-07 03:06:55 +03:00
if ( cpu_has_feature ( CPU_FTR_TLBIEL ) & &
mmu_psize_defs [ psize ] . tlbiel & & local ) {
2005-04-17 02:20:36 +04:00
asm volatile ( " ptesync " : : : " memory " ) ;
2005-11-07 03:06:55 +03:00
for ( i = 0 ; i < number ; i + + ) {
va = batch - > vaddr [ i ] ;
pte = batch - > pte [ i ] ;
pte_iterate_hashed_subpages ( pte , psize , va , index ,
shift ) {
2007-10-11 14:37:10 +04:00
__tlbiel ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
} pte_iterate_hashed_end ( ) ;
}
2005-04-17 02:20:36 +04:00
asm volatile ( " ptesync " : : : " memory " ) ;
} else {
int lock_tlbie = ! cpu_has_feature ( CPU_FTR_LOCKLESS_TLBIE ) ;
if ( lock_tlbie )
spin_lock ( & native_tlbie_lock ) ;
asm volatile ( " ptesync " : : : " memory " ) ;
2005-11-07 03:06:55 +03:00
for ( i = 0 ; i < number ; i + + ) {
va = batch - > vaddr [ i ] ;
pte = batch - > pte [ i ] ;
pte_iterate_hashed_subpages ( pte , psize , va , index ,
shift ) {
2007-10-11 14:37:10 +04:00
__tlbie ( va , psize , ssize ) ;
2005-11-07 03:06:55 +03:00
} pte_iterate_hashed_end ( ) ;
}
2005-04-17 02:20:36 +04:00
asm volatile ( " eieio; tlbsync; ptesync " : : : " memory " ) ;
if ( lock_tlbie )
spin_unlock ( & native_tlbie_lock ) ;
}
local_irq_restore ( flags ) ;
}
# ifdef CONFIG_PPC_PSERIES
/* Disable TLB batching on nighthawk */
static inline int tlb_batching_enabled ( void )
{
struct device_node * root = of_find_node_by_path ( " / " ) ;
int enabled = 1 ;
if ( root ) {
2007-04-03 16:26:41 +04:00
const char * model = of_get_property ( root , " model " , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( model & & ! strcmp ( model , " IBM,9076-N81 " ) )
enabled = 0 ;
of_node_put ( root ) ;
}
return enabled ;
}
# else
static inline int tlb_batching_enabled ( void )
{
return 1 ;
}
# endif
2006-06-23 12:16:38 +04:00
void __init hpte_init_native ( void )
2005-04-17 02:20:36 +04:00
{
ppc_md . hpte_invalidate = native_hpte_invalidate ;
ppc_md . hpte_updatepp = native_hpte_updatepp ;
ppc_md . hpte_updateboltedpp = native_hpte_updateboltedpp ;
ppc_md . hpte_insert = native_hpte_insert ;
2005-06-26 01:58:08 +04:00
ppc_md . hpte_remove = native_hpte_remove ;
ppc_md . hpte_clear_all = native_hpte_clear ;
2005-04-17 02:20:36 +04:00
if ( tlb_batching_enabled ( ) )
ppc_md . flush_hash_range = native_flush_hash_range ;
}