2015-12-01 09:06:43 +05:30
/*
* Copyright IBM Corporation , 2015
* Author Aneesh Kumar K . V < aneesh . kumar @ linux . vnet . ibm . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of version 2 of the GNU Lesser General Public License
* as published by the Free Software Foundation .
*
* This program is distributed in the hope that it would be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE .
*
*/
# include <linux/mm.h>
# include <asm/machdep.h>
# include <asm/mmu.h>
2015-12-01 09:06:45 +05:30
/*
* index from 0 - 15
*/
bool __rpte_sub_valid ( real_pte_t rpte , unsigned long index )
{
unsigned long g_idx ;
unsigned long ptev = pte_val ( rpte . pte ) ;
2016-04-29 23:25:45 +10:00
g_idx = ( ptev & H_PAGE_COMBO_VALID ) > > H_PAGE_F_GIX_SHIFT ;
2015-12-01 09:06:45 +05:30
index = index > > 2 ;
if ( g_idx & ( 0x1 < < index ) )
return true ;
else
return false ;
}
/*
* index from 0 - 15
*/
static unsigned long mark_subptegroup_valid ( unsigned long ptev , unsigned long index )
{
unsigned long g_idx ;
2016-04-29 23:25:45 +10:00
if ( ! ( ptev & H_PAGE_COMBO ) )
2015-12-01 09:06:45 +05:30
return ptev ;
index = index > > 2 ;
g_idx = 0x1 < < index ;
2016-04-29 23:25:45 +10:00
return ptev | ( g_idx < < H_PAGE_F_GIX_SHIFT ) ;
2015-12-01 09:06:45 +05:30
}
2015-12-01 09:06:43 +05:30
int __hash_page_4K ( unsigned long ea , unsigned long access , unsigned long vsid ,
pte_t * ptep , unsigned long trap , unsigned long flags ,
int ssize , int subpg_prot )
{
real_pte_t rpte ;
unsigned long * hidxp ;
unsigned long hpte_group ;
unsigned int subpg_index ;
unsigned long rflags , pa , hidx ;
unsigned long old_pte , new_pte , subpg_pte ;
unsigned long vpn , hash , slot ;
unsigned long shift = mmu_psize_defs [ MMU_PAGE_4K ] . shift ;
/*
* atomically mark the linux large page PTE busy and dirty
*/
do {
pte_t pte = READ_ONCE ( * ptep ) ;
old_pte = pte_val ( pte ) ;
/* If PTE busy, retry the access */
2016-04-29 23:25:45 +10:00
if ( unlikely ( old_pte & H_PAGE_BUSY ) )
2015-12-01 09:06:43 +05:30
return 0 ;
/* If PTE permissions don't match, take page fault */
2016-04-29 23:25:34 +10:00
if ( unlikely ( ! check_pte_access ( access , old_pte ) ) )
2015-12-01 09:06:43 +05:30
return 1 ;
/*
* Try to lock the PTE , add ACCESSED and DIRTY if it was
* a write access . Since this is 4 K insert of 64 K page size
2016-04-29 23:25:45 +10:00
* also add H_PAGE_COMBO
2015-12-01 09:06:43 +05:30
*/
2016-04-29 23:25:45 +10:00
new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO ;
2016-04-29 23:25:30 +10:00
if ( access & _PAGE_WRITE )
2015-12-01 09:06:43 +05:30
new_pte | = _PAGE_DIRTY ;
2016-04-29 23:25:27 +10:00
} while ( ! pte_xchg ( ptep , __pte ( old_pte ) , __pte ( new_pte ) ) ) ;
2015-12-01 09:06:43 +05:30
/*
* Handle the subpage protection bits
*/
subpg_pte = new_pte & ~ subpg_prot ;
2015-12-01 09:06:50 +05:30
rflags = htab_convert_pte_flags ( subpg_pte ) ;
2015-12-01 09:06:43 +05:30
2016-11-29 13:13:46 +11:00
if ( cpu_has_feature ( CPU_FTR_NOEXECUTE ) & &
2015-12-01 09:06:43 +05:30
! cpu_has_feature ( CPU_FTR_COHERENT_ICACHE ) ) {
/*
* No CPU has hugepages but lacks no execute , so we
* don ' t need to worry about that case
*/
rflags = hash_page_do_lazy_icache ( rflags , __pte ( old_pte ) , trap ) ;
}
subpg_index = ( ea & ( PAGE_SIZE - 1 ) ) > > shift ;
vpn = hpt_vpn ( ea , vsid , ssize ) ;
rpte = __real_pte ( __pte ( old_pte ) , ptep ) ;
/*
* None of the sub 4 k page is hashed
*/
2016-04-29 23:25:45 +10:00
if ( ! ( old_pte & H_PAGE_HASHPTE ) )
2015-12-01 09:06:43 +05:30
goto htab_insert_hpte ;
/*
* Check if the pte was already inserted into the hash table
* as a 64 k HW page , and invalidate the 64 k HPTE if so .
*/
2016-04-29 23:25:45 +10:00
if ( ! ( old_pte & H_PAGE_COMBO ) ) {
2015-12-01 09:06:43 +05:30
flush_hash_page ( vpn , rpte , MMU_PAGE_64K , ssize , flags ) ;
2016-02-20 20:41:54 +05:30
/*
* clear the old slot details from the old and new pte .
* On hash insert failure we use old pte value and we don ' t
* want slot information there if we have a insert failure .
*/
2016-04-29 23:25:45 +10:00
old_pte & = ~ ( H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND ) ;
new_pte & = ~ ( H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND ) ;
2015-12-01 09:06:43 +05:30
goto htab_insert_hpte ;
}
/*
* Check for sub page valid and update
*/
if ( __rpte_sub_valid ( rpte , subpg_index ) ) {
int ret ;
hash = hpt_hash ( vpn , shift , ssize ) ;
hidx = __rpte_to_hidx ( rpte , subpg_index ) ;
if ( hidx & _PTEIDX_SECONDARY )
hash = ~ hash ;
slot = ( hash & htab_hash_mask ) * HPTES_PER_GROUP ;
slot + = hidx & _PTEIDX_GROUP_IX ;
2016-07-05 15:03:58 +10:00
ret = mmu_hash_ops . hpte_updatepp ( slot , rflags , vpn ,
MMU_PAGE_4K , MMU_PAGE_4K ,
ssize , flags ) ;
2015-12-01 09:06:43 +05:30
/*
* if we failed because typically the HPTE wasn ' t really here
* we try an insertion .
*/
if ( ret = = - 1 )
goto htab_insert_hpte ;
2016-04-29 23:25:45 +10:00
* ptep = __pte ( new_pte & ~ H_PAGE_BUSY ) ;
2015-12-01 09:06:43 +05:30
return 0 ;
}
htab_insert_hpte :
/*
2016-04-29 23:25:45 +10:00
* handle H_PAGE_4K_PFN case
2015-12-01 09:06:43 +05:30
*/
2016-04-29 23:25:45 +10:00
if ( old_pte & H_PAGE_4K_PFN ) {
2015-12-01 09:06:43 +05:30
/*
* All the sub 4 k page have the same
* physical address .
*/
pa = pte_pfn ( __pte ( old_pte ) ) < < HW_PAGE_SHIFT ;
} else {
pa = pte_pfn ( __pte ( old_pte ) ) < < PAGE_SHIFT ;
pa + = ( subpg_index < < shift ) ;
}
hash = hpt_hash ( vpn , shift , ssize ) ;
repeat :
hpte_group = ( ( hash & htab_hash_mask ) * HPTES_PER_GROUP ) & ~ 0x7UL ;
/* Insert into the hash table, primary slot */
2016-07-05 15:03:58 +10:00
slot = mmu_hash_ops . hpte_insert ( hpte_group , vpn , pa , rflags , 0 ,
MMU_PAGE_4K , MMU_PAGE_4K , ssize ) ;
2015-12-01 09:06:43 +05:30
/*
* Primary is full , try the secondary
*/
if ( unlikely ( slot = = - 1 ) ) {
hpte_group = ( ( ~ hash & htab_hash_mask ) * HPTES_PER_GROUP ) & ~ 0x7UL ;
2016-07-05 15:03:58 +10:00
slot = mmu_hash_ops . hpte_insert ( hpte_group , vpn , pa ,
rflags , HPTE_V_SECONDARY ,
MMU_PAGE_4K , MMU_PAGE_4K ,
ssize ) ;
2015-12-01 09:06:43 +05:30
if ( slot = = - 1 ) {
if ( mftb ( ) & 0x1 )
hpte_group = ( ( hash & htab_hash_mask ) *
HPTES_PER_GROUP ) & ~ 0x7UL ;
2016-07-05 15:03:58 +10:00
mmu_hash_ops . hpte_remove ( hpte_group ) ;
2015-12-01 09:06:43 +05:30
/*
* FIXME ! ! Should be try the group from which we removed ?
*/
goto repeat ;
}
}
/*
2016-03-01 12:59:17 +05:30
* Hypervisor failure . Restore old pte and return - 1
2015-12-01 09:06:43 +05:30
* similar to __hash_page_ *
*/
if ( unlikely ( slot = = - 2 ) ) {
* ptep = __pte ( old_pte ) ;
hash_failure_debug ( ea , access , vsid , trap , ssize ,
MMU_PAGE_4K , MMU_PAGE_4K , old_pte ) ;
return - 1 ;
}
/*
* Insert slot number & secondary bit in PTE second half ,
2016-04-29 23:25:45 +10:00
* clear H_PAGE_BUSY and set appropriate HPTE slot bit
* Since we have H_PAGE_BUSY set on ptep , we can be sure
2015-12-01 09:06:43 +05:30
* nobody is undating hidx .
*/
hidxp = ( unsigned long * ) ( ptep + PTRS_PER_PTE ) ;
rpte . hidx & = ~ ( 0xfUL < < ( subpg_index < < 2 ) ) ;
* hidxp = rpte . hidx | ( slot < < ( subpg_index < < 2 ) ) ;
2015-12-01 09:06:45 +05:30
new_pte = mark_subptegroup_valid ( new_pte , subpg_index ) ;
2016-04-29 23:25:45 +10:00
new_pte | = H_PAGE_HASHPTE ;
2015-12-01 09:06:43 +05:30
/*
* check __real_pte for details on matching smp_rmb ( )
*/
smp_wmb ( ) ;
2016-04-29 23:25:45 +10:00
* ptep = __pte ( new_pte & ~ H_PAGE_BUSY ) ;
2015-12-01 09:06:43 +05:30
return 0 ;
}
2015-12-01 09:06:48 +05:30
int __hash_page_64K ( unsigned long ea , unsigned long access ,
unsigned long vsid , pte_t * ptep , unsigned long trap ,
unsigned long flags , int ssize )
{
unsigned long hpte_group ;
unsigned long rflags , pa ;
unsigned long old_pte , new_pte ;
unsigned long vpn , hash , slot ;
unsigned long shift = mmu_psize_defs [ MMU_PAGE_64K ] . shift ;
/*
* atomically mark the linux large page PTE busy and dirty
*/
do {
pte_t pte = READ_ONCE ( * ptep ) ;
old_pte = pte_val ( pte ) ;
/* If PTE busy, retry the access */
2016-04-29 23:25:45 +10:00
if ( unlikely ( old_pte & H_PAGE_BUSY ) )
2015-12-01 09:06:48 +05:30
return 0 ;
/* If PTE permissions don't match, take page fault */
2016-04-29 23:25:34 +10:00
if ( unlikely ( ! check_pte_access ( access , old_pte ) ) )
2015-12-01 09:06:48 +05:30
return 1 ;
/*
* Check if PTE has the cache - inhibit bit set
* If so , bail out and refault as a 4 k page
*/
if ( ! mmu_has_feature ( MMU_FTR_CI_LARGE_PAGE ) & &
2016-04-29 23:25:38 +10:00
unlikely ( pte_ci ( pte ) ) )
2015-12-01 09:06:48 +05:30
return 0 ;
/*
* Try to lock the PTE , add ACCESSED and DIRTY if it was
2016-02-22 13:41:12 +11:00
* a write access .
2015-12-01 09:06:48 +05:30
*/
2016-04-29 23:25:45 +10:00
new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED ;
2016-04-29 23:25:30 +10:00
if ( access & _PAGE_WRITE )
2015-12-01 09:06:48 +05:30
new_pte | = _PAGE_DIRTY ;
2016-04-29 23:25:27 +10:00
} while ( ! pte_xchg ( ptep , __pte ( old_pte ) , __pte ( new_pte ) ) ) ;
2015-12-01 09:06:50 +05:30
rflags = htab_convert_pte_flags ( new_pte ) ;
2015-12-01 09:06:48 +05:30
2016-11-29 13:13:46 +11:00
if ( cpu_has_feature ( CPU_FTR_NOEXECUTE ) & &
2015-12-01 09:06:48 +05:30
! cpu_has_feature ( CPU_FTR_COHERENT_ICACHE ) )
rflags = hash_page_do_lazy_icache ( rflags , __pte ( old_pte ) , trap ) ;
vpn = hpt_vpn ( ea , vsid , ssize ) ;
2016-04-29 23:25:45 +10:00
if ( unlikely ( old_pte & H_PAGE_HASHPTE ) ) {
2015-12-01 09:06:48 +05:30
/*
* There MIGHT be an HPTE for this pte
*/
hash = hpt_hash ( vpn , shift , ssize ) ;
2016-04-29 23:25:45 +10:00
if ( old_pte & H_PAGE_F_SECOND )
2015-12-01 09:06:48 +05:30
hash = ~ hash ;
slot = ( hash & htab_hash_mask ) * HPTES_PER_GROUP ;
2016-04-29 23:25:45 +10:00
slot + = ( old_pte & H_PAGE_F_GIX ) > > H_PAGE_F_GIX_SHIFT ;
2015-12-01 09:06:48 +05:30
2016-07-05 15:03:58 +10:00
if ( mmu_hash_ops . hpte_updatepp ( slot , rflags , vpn , MMU_PAGE_64K ,
MMU_PAGE_64K , ssize ,
flags ) = = - 1 )
2015-12-01 09:06:48 +05:30
old_pte & = ~ _PAGE_HPTEFLAGS ;
}
2016-04-29 23:25:45 +10:00
if ( likely ( ! ( old_pte & H_PAGE_HASHPTE ) ) ) {
2015-12-01 09:06:48 +05:30
pa = pte_pfn ( __pte ( old_pte ) ) < < PAGE_SHIFT ;
hash = hpt_hash ( vpn , shift , ssize ) ;
repeat :
hpte_group = ( ( hash & htab_hash_mask ) * HPTES_PER_GROUP ) & ~ 0x7UL ;
/* Insert into the hash table, primary slot */
2016-07-05 15:03:58 +10:00
slot = mmu_hash_ops . hpte_insert ( hpte_group , vpn , pa , rflags , 0 ,
MMU_PAGE_64K , MMU_PAGE_64K ,
ssize ) ;
2015-12-01 09:06:48 +05:30
/*
* Primary is full , try the secondary
*/
if ( unlikely ( slot = = - 1 ) ) {
hpte_group = ( ( ~ hash & htab_hash_mask ) * HPTES_PER_GROUP ) & ~ 0x7UL ;
2016-07-05 15:03:58 +10:00
slot = mmu_hash_ops . hpte_insert ( hpte_group , vpn , pa ,
rflags ,
HPTE_V_SECONDARY ,
MMU_PAGE_64K ,
MMU_PAGE_64K , ssize ) ;
2015-12-01 09:06:48 +05:30
if ( slot = = - 1 ) {
if ( mftb ( ) & 0x1 )
hpte_group = ( ( hash & htab_hash_mask ) *
HPTES_PER_GROUP ) & ~ 0x7UL ;
2016-07-05 15:03:58 +10:00
mmu_hash_ops . hpte_remove ( hpte_group ) ;
2015-12-01 09:06:48 +05:30
/*
* FIXME ! ! Should be try the group from which we removed ?
*/
goto repeat ;
}
}
/*
2016-03-01 12:59:17 +05:30
* Hypervisor failure . Restore old pte and return - 1
2015-12-01 09:06:48 +05:30
* similar to __hash_page_ *
*/
if ( unlikely ( slot = = - 2 ) ) {
* ptep = __pte ( old_pte ) ;
hash_failure_debug ( ea , access , vsid , trap , ssize ,
MMU_PAGE_64K , MMU_PAGE_64K , old_pte ) ;
return - 1 ;
}
2016-04-29 23:25:45 +10:00
new_pte = ( new_pte & ~ _PAGE_HPTEFLAGS ) | H_PAGE_HASHPTE ;
new_pte | = ( slot < < H_PAGE_F_GIX_SHIFT ) &
( H_PAGE_F_SECOND | H_PAGE_F_GIX ) ;
2015-12-01 09:06:48 +05:30
}
2016-04-29 23:25:45 +10:00
* ptep = __pte ( new_pte & ~ H_PAGE_BUSY ) ;
2015-12-01 09:06:48 +05:30
return 0 ;
}