2013-10-30 20:05:11 +05:30
/*
* Machine check exception handling CPU - side for power7 and power8
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* Copyright 2013 IBM Corporation
* Author : Mahesh Salgaonkar < mahesh @ linux . vnet . ibm . com >
*/
# undef DEBUG
# define pr_fmt(fmt) "mce_power: " fmt
# include <linux/types.h>
# include <linux/ptrace.h>
# include <asm/mmu.h>
# include <asm/mce.h>
2013-12-16 10:46:24 +05:30
# include <asm/machdep.h>
2013-10-30 20:05:11 +05:30
2014-12-19 08:41:05 +05:30
static void flush_tlb_206 ( unsigned int num_sets , unsigned int action )
{
unsigned long rb ;
unsigned int i ;
switch ( action ) {
case TLB_INVAL_SCOPE_GLOBAL :
rb = TLBIEL_INVAL_SET ;
break ;
case TLB_INVAL_SCOPE_LPID :
rb = TLBIEL_INVAL_SET_LPID ;
break ;
default :
BUG ( ) ;
break ;
}
asm volatile ( " ptesync " : : : " memory " ) ;
for ( i = 0 ; i < num_sets ; i + + ) {
asm volatile ( " tlbiel %0 " : : " r " ( rb ) ) ;
rb + = 1 < < TLBIEL_INVAL_SET_SHIFT ;
}
asm volatile ( " ptesync " : : : " memory " ) ;
}
/*
2016-02-19 11:16:24 +11:00
* Generic routines to flush TLB on POWER processors . These routines
* are used as flush_tlb hook in the cpu_spec .
2014-12-19 08:41:05 +05:30
*
* action = > TLB_INVAL_SCOPE_GLOBAL : Invalidate all TLBs .
* TLB_INVAL_SCOPE_LPID : Invalidate TLB for current LPID .
*/
void __flush_tlb_power7 ( unsigned int action )
{
flush_tlb_206 ( POWER7_TLB_SETS , action ) ;
}
void __flush_tlb_power8 ( unsigned int action )
{
flush_tlb_206 ( POWER8_TLB_SETS , action ) ;
}
2016-02-19 11:16:24 +11:00
void __flush_tlb_power9 ( unsigned int action )
{
2016-04-29 23:26:05 +10:00
if ( radix_enabled ( ) )
flush_tlb_206 ( POWER9_TLB_SETS_RADIX , action ) ;
2016-02-19 11:16:24 +11:00
flush_tlb_206 ( POWER9_TLB_SETS_HASH , action ) ;
}
2013-10-30 20:05:11 +05:30
/* flush SLBs and reload */
2016-05-03 08:59:27 +02:00
# ifdef CONFIG_PPC_STD_MMU_64
2013-10-30 20:05:11 +05:30
static void flush_and_reload_slb ( void )
{
struct slb_shadow * slb ;
unsigned long i , n ;
/* Invalidate all SLBs */
asm volatile ( " slbmte %0,%0; slbia " : : " r " ( 0 ) ) ;
# ifdef CONFIG_KVM_BOOK3S_HANDLER
/*
* If machine check is hit when in guest or in transition , we will
* only flush the SLBs and continue .
*/
if ( get_paca ( ) - > kvm_hstate . in_guest )
return ;
# endif
/* For host kernel, reload the SLBs from shadow SLB buffer. */
slb = get_slb_shadow ( ) ;
if ( ! slb )
return ;
2013-12-16 10:47:54 +11:00
n = min_t ( u32 , be32_to_cpu ( slb - > persistent ) , SLB_MIN_SIZE ) ;
2013-10-30 20:05:11 +05:30
/* Load up the SLB entries from shadow SLB */
for ( i = 0 ; i < n ; i + + ) {
2013-12-16 10:47:54 +11:00
unsigned long rb = be64_to_cpu ( slb - > save_area [ i ] . esid ) ;
unsigned long rs = be64_to_cpu ( slb - > save_area [ i ] . vsid ) ;
2013-10-30 20:05:11 +05:30
rb = ( rb & ~ 0xFFFul ) | i ;
asm volatile ( " slbmte %0,%1 " : : " r " ( rs ) , " r " ( rb ) ) ;
}
}
2016-04-29 23:26:07 +10:00
# endif
2013-10-30 20:05:11 +05:30
static long mce_handle_derror ( uint64_t dsisr , uint64_t slb_error_bits )
{
long handled = 1 ;
/*
* flush and reload SLBs for SLB errors and flush TLBs for TLB errors .
* reset the error bits whenever we handle them so that at the end
* we can check whether we handled all of them or not .
* */
2016-05-03 08:59:27 +02:00
# ifdef CONFIG_PPC_STD_MMU_64
2013-10-30 20:05:11 +05:30
if ( dsisr & slb_error_bits ) {
flush_and_reload_slb ( ) ;
/* reset error bits */
dsisr & = ~ ( slb_error_bits ) ;
}
if ( dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB ) {
if ( cur_cpu_spec & & cur_cpu_spec - > flush_tlb )
2014-12-19 08:41:05 +05:30
cur_cpu_spec - > flush_tlb ( TLB_INVAL_SCOPE_GLOBAL ) ;
2013-10-30 20:05:11 +05:30
/* reset error bits */
dsisr & = ~ P7_DSISR_MC_TLB_MULTIHIT_MFTLB ;
}
2016-04-29 23:26:07 +10:00
# endif
2013-10-30 20:05:11 +05:30
/* Any other errors we don't understand? */
if ( dsisr & 0xffffffffUL )
handled = 0 ;
return handled ;
}
static long mce_handle_derror_p7 ( uint64_t dsisr )
{
return mce_handle_derror ( dsisr , P7_DSISR_MC_SLB_ERRORS ) ;
}
static long mce_handle_common_ierror ( uint64_t srr1 )
{
long handled = 0 ;
switch ( P7_SRR1_MC_IFETCH ( srr1 ) ) {
case 0 :
break ;
2016-05-03 08:59:27 +02:00
# ifdef CONFIG_PPC_STD_MMU_64
2013-10-30 20:05:11 +05:30
case P7_SRR1_MC_IFETCH_SLB_PARITY :
case P7_SRR1_MC_IFETCH_SLB_MULTIHIT :
/* flush and reload SLBs for SLB errors. */
flush_and_reload_slb ( ) ;
handled = 1 ;
break ;
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT :
if ( cur_cpu_spec & & cur_cpu_spec - > flush_tlb ) {
2014-12-19 08:41:05 +05:30
cur_cpu_spec - > flush_tlb ( TLB_INVAL_SCOPE_GLOBAL ) ;
2013-10-30 20:05:11 +05:30
handled = 1 ;
}
break ;
2016-04-29 23:26:07 +10:00
# endif
2013-10-30 20:05:11 +05:30
default :
break ;
}
return handled ;
}
static long mce_handle_ierror_p7 ( uint64_t srr1 )
{
long handled = 0 ;
handled = mce_handle_common_ierror ( srr1 ) ;
2016-05-03 08:59:27 +02:00
# ifdef CONFIG_PPC_STD_MMU_64
2013-10-30 20:05:11 +05:30
if ( P7_SRR1_MC_IFETCH ( srr1 ) = = P7_SRR1_MC_IFETCH_SLB_BOTH ) {
flush_and_reload_slb ( ) ;
handled = 1 ;
}
2016-04-29 23:26:07 +10:00
# endif
2013-10-30 20:05:11 +05:30
return handled ;
}
2013-10-30 20:05:40 +05:30
static void mce_get_common_ierror ( struct mce_error_info * mce_err , uint64_t srr1 )
{
switch ( P7_SRR1_MC_IFETCH ( srr1 ) ) {
case P7_SRR1_MC_IFETCH_SLB_PARITY :
mce_err - > error_type = MCE_ERROR_TYPE_SLB ;
mce_err - > u . slb_error_type = MCE_SLB_ERROR_PARITY ;
break ;
case P7_SRR1_MC_IFETCH_SLB_MULTIHIT :
mce_err - > error_type = MCE_ERROR_TYPE_SLB ;
mce_err - > u . slb_error_type = MCE_SLB_ERROR_MULTIHIT ;
break ;
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT :
mce_err - > error_type = MCE_ERROR_TYPE_TLB ;
mce_err - > u . tlb_error_type = MCE_TLB_ERROR_MULTIHIT ;
break ;
case P7_SRR1_MC_IFETCH_UE :
case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL :
mce_err - > error_type = MCE_ERROR_TYPE_UE ;
mce_err - > u . ue_error_type = MCE_UE_ERROR_IFETCH ;
break ;
case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD :
mce_err - > error_type = MCE_ERROR_TYPE_UE ;
mce_err - > u . ue_error_type =
MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH ;
break ;
}
}
static void mce_get_ierror_p7 ( struct mce_error_info * mce_err , uint64_t srr1 )
{
mce_get_common_ierror ( mce_err , srr1 ) ;
if ( P7_SRR1_MC_IFETCH ( srr1 ) = = P7_SRR1_MC_IFETCH_SLB_BOTH ) {
mce_err - > error_type = MCE_ERROR_TYPE_SLB ;
mce_err - > u . slb_error_type = MCE_SLB_ERROR_INDETERMINATE ;
}
}
static void mce_get_derror_p7 ( struct mce_error_info * mce_err , uint64_t dsisr )
{
if ( dsisr & P7_DSISR_MC_UE ) {
mce_err - > error_type = MCE_ERROR_TYPE_UE ;
mce_err - > u . ue_error_type = MCE_UE_ERROR_LOAD_STORE ;
} else if ( dsisr & P7_DSISR_MC_UE_TABLEWALK ) {
mce_err - > error_type = MCE_ERROR_TYPE_UE ;
mce_err - > u . ue_error_type =
MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE ;
} else if ( dsisr & P7_DSISR_MC_ERAT_MULTIHIT ) {
mce_err - > error_type = MCE_ERROR_TYPE_ERAT ;
mce_err - > u . erat_error_type = MCE_ERAT_ERROR_MULTIHIT ;
} else if ( dsisr & P7_DSISR_MC_SLB_MULTIHIT ) {
mce_err - > error_type = MCE_ERROR_TYPE_SLB ;
mce_err - > u . slb_error_type = MCE_SLB_ERROR_MULTIHIT ;
} else if ( dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB ) {
mce_err - > error_type = MCE_ERROR_TYPE_SLB ;
mce_err - > u . slb_error_type = MCE_SLB_ERROR_PARITY ;
} else if ( dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB ) {
mce_err - > error_type = MCE_ERROR_TYPE_TLB ;
mce_err - > u . tlb_error_type = MCE_TLB_ERROR_MULTIHIT ;
} else if ( dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY ) {
mce_err - > error_type = MCE_ERROR_TYPE_SLB ;
mce_err - > u . slb_error_type = MCE_SLB_ERROR_INDETERMINATE ;
}
}
2013-12-16 10:46:24 +05:30
static long mce_handle_ue_error ( struct pt_regs * regs )
{
long handled = 0 ;
/*
* On specific SCOM read via MMIO we may get a machine check
* exception with SRR0 pointing inside opal . If that is the
* case OPAL may have recovery address to re - read SCOM data in
* different way and hence we can recover from this MC .
*/
if ( ppc_md . mce_check_early_recovery ) {
if ( ppc_md . mce_check_early_recovery ( regs ) )
handled = 1 ;
}
return handled ;
}
2013-10-30 20:05:11 +05:30
long __machine_check_early_realmode_p7 ( struct pt_regs * regs )
{
2013-12-16 10:46:24 +05:30
uint64_t srr1 , nip , addr ;
2013-10-30 20:05:11 +05:30
long handled = 1 ;
2013-10-30 20:05:40 +05:30
struct mce_error_info mce_error_info = { 0 } ;
2013-10-30 20:05:11 +05:30
srr1 = regs - > msr ;
2013-12-16 10:46:24 +05:30
nip = regs - > nip ;
2013-10-30 20:05:11 +05:30
2013-10-30 20:05:40 +05:30
/*
* Handle memory errors depending whether this was a load / store or
* ifetch exception . Also , populate the mce error_type and
* type - specific error_type from either SRR1 or DSISR , depending
* whether this was a load / store or ifetch exception
*/
if ( P7_SRR1_MC_LOADSTORE ( srr1 ) ) {
2013-10-30 20:05:11 +05:30
handled = mce_handle_derror_p7 ( regs - > dsisr ) ;
2013-10-30 20:05:40 +05:30
mce_get_derror_p7 ( & mce_error_info , regs - > dsisr ) ;
addr = regs - > dar ;
} else {
2013-10-30 20:05:11 +05:30
handled = mce_handle_ierror_p7 ( srr1 ) ;
2013-10-30 20:05:40 +05:30
mce_get_ierror_p7 ( & mce_error_info , srr1 ) ;
addr = regs - > nip ;
}
2013-10-30 20:05:11 +05:30
2013-12-16 10:46:24 +05:30
/* Handle UE error. */
if ( mce_error_info . error_type = = MCE_ERROR_TYPE_UE )
handled = mce_handle_ue_error ( regs ) ;
save_mce_event ( regs , handled , & mce_error_info , nip , addr ) ;
2013-10-30 20:05:11 +05:30
return handled ;
}
2013-10-30 20:05:26 +05:30
2013-10-30 20:05:40 +05:30
static void mce_get_ierror_p8 ( struct mce_error_info * mce_err , uint64_t srr1 )
{
mce_get_common_ierror ( mce_err , srr1 ) ;
if ( P7_SRR1_MC_IFETCH ( srr1 ) = = P8_SRR1_MC_IFETCH_ERAT_MULTIHIT ) {
mce_err - > error_type = MCE_ERROR_TYPE_ERAT ;
mce_err - > u . erat_error_type = MCE_ERAT_ERROR_MULTIHIT ;
}
}
static void mce_get_derror_p8 ( struct mce_error_info * mce_err , uint64_t dsisr )
{
mce_get_derror_p7 ( mce_err , dsisr ) ;
if ( dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC ) {
mce_err - > error_type = MCE_ERROR_TYPE_ERAT ;
mce_err - > u . erat_error_type = MCE_ERAT_ERROR_MULTIHIT ;
}
}
2013-10-30 20:05:26 +05:30
static long mce_handle_ierror_p8 ( uint64_t srr1 )
{
long handled = 0 ;
handled = mce_handle_common_ierror ( srr1 ) ;
2016-05-03 08:59:27 +02:00
# ifdef CONFIG_PPC_STD_MMU_64
2013-10-30 20:05:26 +05:30
if ( P7_SRR1_MC_IFETCH ( srr1 ) = = P8_SRR1_MC_IFETCH_ERAT_MULTIHIT ) {
flush_and_reload_slb ( ) ;
handled = 1 ;
}
2016-04-29 23:26:07 +10:00
# endif
2013-10-30 20:05:26 +05:30
return handled ;
}
static long mce_handle_derror_p8 ( uint64_t dsisr )
{
return mce_handle_derror ( dsisr , P8_DSISR_MC_SLB_ERRORS ) ;
}
long __machine_check_early_realmode_p8 ( struct pt_regs * regs )
{
2013-12-16 10:46:24 +05:30
uint64_t srr1 , nip , addr ;
2013-10-30 20:05:26 +05:30
long handled = 1 ;
2013-10-30 20:05:40 +05:30
struct mce_error_info mce_error_info = { 0 } ;
2013-10-30 20:05:26 +05:30
srr1 = regs - > msr ;
2013-12-16 10:46:24 +05:30
nip = regs - > nip ;
2013-10-30 20:05:26 +05:30
2013-10-30 20:05:40 +05:30
if ( P7_SRR1_MC_LOADSTORE ( srr1 ) ) {
2013-10-30 20:05:26 +05:30
handled = mce_handle_derror_p8 ( regs - > dsisr ) ;
2013-10-30 20:05:40 +05:30
mce_get_derror_p8 ( & mce_error_info , regs - > dsisr ) ;
addr = regs - > dar ;
} else {
2013-10-30 20:05:26 +05:30
handled = mce_handle_ierror_p8 ( srr1 ) ;
2013-10-30 20:05:40 +05:30
mce_get_ierror_p8 ( & mce_error_info , srr1 ) ;
addr = regs - > nip ;
}
2013-10-30 20:05:26 +05:30
2013-12-16 10:46:24 +05:30
/* Handle UE error. */
if ( mce_error_info . error_type = = MCE_ERROR_TYPE_UE )
handled = mce_handle_ue_error ( regs ) ;
save_mce_event ( regs , handled , & mce_error_info , nip , addr ) ;
2013-10-30 20:05:26 +05:30
return handled ;
}