2014-11-14 15:54:09 +00:00
/*
* Contains CPU specific errata definitions
*
* Copyright ( C ) 2014 ARM Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2018-06-05 13:50:07 +02:00
# include <linux/arm-smccc.h>
# include <linux/psci.h>
2014-11-14 15:54:09 +00:00
# include <linux/types.h>
# include <asm/cpu.h>
# include <asm/cputype.h>
# include <asm/cpufeature.h>
2014-11-14 15:54:10 +00:00
static bool __maybe_unused
2016-04-22 12:25:31 +01:00
is_affected_midr_range ( const struct arm64_cpu_capabilities * entry , int scope )
2014-11-14 15:54:10 +00:00
{
2018-03-06 17:15:34 +00:00
const struct arm64_midr_revidr * fix ;
u32 midr = read_cpuid_id ( ) , revidr ;
2016-04-22 12:25:31 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-03-26 15:12:44 +01:00
if ( ! is_midr_in_range ( midr , & entry - > midr_range ) )
2018-03-06 17:15:34 +00:00
return false ;
midr & = MIDR_REVISION_MASK | MIDR_VARIANT_MASK ;
revidr = read_cpuid ( REVIDR_EL1 ) ;
for ( fix = entry - > fixed_revs ; fix & & fix - > revidr_mask ; fix + + )
if ( midr = = fix - > midr_rv & & ( revidr & fix - > revidr_mask ) )
return false ;
return true ;
2014-11-14 15:54:10 +00:00
}
2018-03-26 15:12:45 +01:00
static bool __maybe_unused
is_affected_midr_range_list ( const struct arm64_cpu_capabilities * entry ,
int scope )
2014-11-14 15:54:10 +00:00
{
2016-04-22 12:25:31 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-03-26 15:12:45 +01:00
return is_midr_in_range_list ( read_cpuid_id ( ) , entry - > midr_range_list ) ;
2014-11-14 15:54:10 +00:00
}
2017-12-13 14:19:37 -08:00
static bool __maybe_unused
is_kryo_midr ( const struct arm64_cpu_capabilities * entry , int scope )
{
u32 model ;
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
model = read_cpuid_id ( ) ;
model & = MIDR_IMPLEMENTOR_MASK | ( 0xf00 < < MIDR_PARTNUM_SHIFT ) |
MIDR_ARCHITECTURE_MASK ;
2018-03-26 15:12:44 +01:00
return model = = entry - > midr_range . model ;
2017-12-13 14:19:37 -08:00
}
2016-09-09 14:07:16 +01:00
static bool
2018-07-04 23:07:46 +01:00
has_mismatched_cache_type ( const struct arm64_cpu_capabilities * entry ,
int scope )
2016-09-09 14:07:16 +01:00
{
2018-10-09 14:47:06 +01:00
u64 mask = arm64_ftr_reg_ctrel0 . strict_mask ;
u64 sys = arm64_ftr_reg_ctrel0 . sys_val & mask ;
u64 ctr_raw , ctr_real ;
2018-07-04 23:07:46 +01:00
2016-09-09 14:07:16 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-10-09 14:47:06 +01:00
/*
* We want to make sure that all the CPUs in the system expose
* a consistent CTR_EL0 to make sure that applications behaves
* correctly with migration .
*
* If a CPU has CTR_EL0 . IDC but does not advertise it via CTR_EL0 :
*
* 1 ) It is safe if the system doesn ' t support IDC , as CPU anyway
* reports IDC = 0 , consistent with the rest .
*
* 2 ) If the system has IDC , it is still safe as we trap CTR_EL0
* access on this CPU via the ARM64_HAS_CACHE_IDC capability .
*
* So , we need to make sure either the raw CTR_EL0 or the effective
* CTR_EL0 matches the system ' s copy to allow a secondary CPU to boot .
*/
ctr_raw = read_cpuid_cachetype ( ) & mask ;
ctr_real = read_cpuid_effective_cachetype ( ) & mask ;
return ( ctr_real ! = sys ) & & ( ctr_raw ! = sys ) ;
2016-09-09 14:07:16 +01:00
}
2018-03-26 15:12:28 +01:00
static void
cpu_enable_trap_ctr_access ( const struct arm64_cpu_capabilities * __unused )
2016-09-09 14:07:16 +01:00
{
2018-10-09 14:47:07 +01:00
u64 mask = arm64_ftr_reg_ctrel0 . strict_mask ;
/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
if ( ( read_cpuid_cachetype ( ) & mask ) ! =
( arm64_ftr_reg_ctrel0 . sys_val & mask ) )
sysreg_clear_set ( sctlr_el1 , SCTLR_EL1_UCT , 0 ) ;
2016-09-09 14:07:16 +01:00
}
2018-03-13 12:40:39 +00:00
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT ( - 1 ) ;
2018-01-03 11:17:58 +00:00
# ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
# include <asm/mmu_context.h>
# include <asm/cacheflush.h>
DEFINE_PER_CPU_READ_MOSTLY ( struct bp_hardening_data , bp_hardening_data ) ;
2018-04-10 11:36:45 +01:00
# ifdef CONFIG_KVM_INDIRECT_VECTORS
2018-02-06 17:56:20 +00:00
extern char __smccc_workaround_1_smc_start [ ] ;
extern char __smccc_workaround_1_smc_end [ ] ;
2018-01-03 12:46:21 +00:00
2018-01-03 11:17:58 +00:00
static void __copy_hyp_vect_bpi ( int slot , const char * hyp_vecs_start ,
const char * hyp_vecs_end )
{
void * dst = lm_alias ( __bp_harden_hyp_vecs_start + slot * SZ_2K ) ;
int i ;
for ( i = 0 ; i < SZ_2K ; i + = 0x80 )
memcpy ( dst + i , hyp_vecs_start , hyp_vecs_end - hyp_vecs_start ) ;
2018-06-11 14:22:09 +01:00
__flush_icache_range ( ( uintptr_t ) dst , ( uintptr_t ) dst + SZ_2K ) ;
2018-01-03 11:17:58 +00:00
}
2019-04-15 16:21:23 -05:00
static void install_bp_hardening_cb ( bp_hardening_cb_t fn ,
const char * hyp_vecs_start ,
const char * hyp_vecs_end )
2018-01-03 11:17:58 +00:00
{
2018-11-27 15:35:21 +00:00
static DEFINE_RAW_SPINLOCK ( bp_lock ) ;
2018-01-03 11:17:58 +00:00
int cpu , slot = - 1 ;
2018-09-21 21:49:19 +01:00
/*
* enable_smccc_arch_workaround_1 ( ) passes NULL for the hyp_vecs
* start / end if we ' re a guest . Skip the hyp - vectors work .
*/
if ( ! hyp_vecs_start ) {
__this_cpu_write ( bp_hardening_data . fn , fn ) ;
return ;
}
2018-11-27 15:35:21 +00:00
raw_spin_lock ( & bp_lock ) ;
2018-01-03 11:17:58 +00:00
for_each_possible_cpu ( cpu ) {
if ( per_cpu ( bp_hardening_data . fn , cpu ) = = fn ) {
slot = per_cpu ( bp_hardening_data . hyp_vectors_slot , cpu ) ;
break ;
}
}
if ( slot = = - 1 ) {
2018-03-13 12:40:39 +00:00
slot = atomic_inc_return ( & arm64_el2_vector_last_slot ) ;
BUG_ON ( slot > = BP_HARDEN_EL2_SLOTS ) ;
2018-01-03 11:17:58 +00:00
__copy_hyp_vect_bpi ( slot , hyp_vecs_start , hyp_vecs_end ) ;
}
__this_cpu_write ( bp_hardening_data . hyp_vectors_slot , slot ) ;
__this_cpu_write ( bp_hardening_data . fn , fn ) ;
2018-11-27 15:35:21 +00:00
raw_spin_unlock ( & bp_lock ) ;
2018-01-03 11:17:58 +00:00
}
# else
2018-02-06 17:56:20 +00:00
# define __smccc_workaround_1_smc_start NULL
# define __smccc_workaround_1_smc_end NULL
2018-01-03 12:46:21 +00:00
2019-04-15 16:21:23 -05:00
static void install_bp_hardening_cb ( bp_hardening_cb_t fn ,
2018-01-03 11:17:58 +00:00
const char * hyp_vecs_start ,
const char * hyp_vecs_end )
{
__this_cpu_write ( bp_hardening_data . fn , fn ) ;
}
2018-04-10 11:36:45 +01:00
# endif /* CONFIG_KVM_INDIRECT_VECTORS */
2018-01-03 11:17:58 +00:00
2018-02-06 17:56:20 +00:00
# include <uapi/linux/psci.h>
# include <linux/arm-smccc.h>
2018-01-03 12:46:21 +00:00
# include <linux/psci.h>
2018-02-06 17:56:20 +00:00
static void call_smc_arch_workaround_1 ( void )
{
arm_smccc_1_1_smc ( ARM_SMCCC_ARCH_WORKAROUND_1 , NULL ) ;
}
static void call_hvc_arch_workaround_1 ( void )
{
arm_smccc_1_1_hvc ( ARM_SMCCC_ARCH_WORKAROUND_1 , NULL ) ;
}
2018-04-10 11:36:42 +01:00
static void qcom_link_stack_sanitization ( void )
{
u64 tmp ;
asm volatile ( " mov %0, x30 \n "
" .rept 16 \n "
" bl . + 4 \n "
" .endr \n "
" mov x30, %0 \n "
: " =&r " ( tmp ) ) ;
}
2019-04-15 16:21:20 -05:00
static bool __nospectre_v2 ;
static int __init parse_nospectre_v2 ( char * str )
{
__nospectre_v2 = true ;
return 0 ;
}
early_param ( " nospectre_v2 " , parse_nospectre_v2 ) ;
2019-04-15 16:21:23 -05:00
/*
* - 1 : No workaround
* 0 : No workaround required
* 1 : Workaround installed
*/
static int detect_harden_bp_fw ( void )
2018-02-06 17:56:20 +00:00
{
bp_hardening_cb_t cb ;
void * smccc_start , * smccc_end ;
struct arm_smccc_res res ;
2018-04-10 11:36:42 +01:00
u32 midr = read_cpuid_id ( ) ;
2018-02-06 17:56:20 +00:00
if ( psci_ops . smccc_version = = SMCCC_VERSION_1_0 )
2019-04-15 16:21:23 -05:00
return - 1 ;
2018-02-06 17:56:20 +00:00
switch ( psci_ops . conduit ) {
case PSCI_CONDUIT_HVC :
arm_smccc_1_1_hvc ( ARM_SMCCC_ARCH_FEATURES_FUNC_ID ,
ARM_SMCCC_ARCH_WORKAROUND_1 , & res ) ;
2018-03-09 15:40:50 +00:00
if ( ( int ) res . a0 < 0 )
2019-04-15 16:21:23 -05:00
return - 1 ;
2018-02-06 17:56:20 +00:00
cb = call_hvc_arch_workaround_1 ;
2018-04-10 11:36:44 +01:00
/* This is a guest, no need to patch KVM vectors */
smccc_start = NULL ;
smccc_end = NULL ;
2018-02-06 17:56:20 +00:00
break ;
case PSCI_CONDUIT_SMC :
arm_smccc_1_1_smc ( ARM_SMCCC_ARCH_FEATURES_FUNC_ID ,
ARM_SMCCC_ARCH_WORKAROUND_1 , & res ) ;
2018-03-09 15:40:50 +00:00
if ( ( int ) res . a0 < 0 )
2019-04-15 16:21:23 -05:00
return - 1 ;
2018-02-06 17:56:20 +00:00
cb = call_smc_arch_workaround_1 ;
smccc_start = __smccc_workaround_1_smc_start ;
smccc_end = __smccc_workaround_1_smc_end ;
break ;
default :
2019-04-15 16:21:23 -05:00
return - 1 ;
2018-02-06 17:56:20 +00:00
}
2018-04-10 11:36:42 +01:00
if ( ( ( midr & MIDR_CPU_MODEL_MASK ) = = MIDR_QCOM_FALKOR ) | |
( ( midr & MIDR_CPU_MODEL_MASK ) = = MIDR_QCOM_FALKOR_V1 ) )
cb = qcom_link_stack_sanitization ;
2019-04-15 16:21:23 -05:00
install_bp_hardening_cb ( cb , smccc_start , smccc_end ) ;
2018-02-06 17:56:20 +00:00
2019-04-15 16:21:23 -05:00
return 1 ;
2018-01-03 12:46:21 +00:00
}
2018-01-03 11:17:58 +00:00
# endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
2018-05-29 13:11:06 +01:00
# ifdef CONFIG_ARM64_SSBD
2018-05-29 13:11:07 +01:00
DEFINE_PER_CPU_READ_MOSTLY ( u64 , arm64_ssbd_callback_required ) ;
2018-05-29 13:11:09 +01:00
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL ;
static const struct ssbd_options {
const char * str ;
int state ;
} ssbd_options [ ] = {
{ " force-on " , ARM64_SSBD_FORCE_ENABLE , } ,
{ " force-off " , ARM64_SSBD_FORCE_DISABLE , } ,
{ " kernel " , ARM64_SSBD_KERNEL , } ,
} ;
static int __init ssbd_cfg ( char * buf )
{
int i ;
if ( ! buf | | ! buf [ 0 ] )
return - EINVAL ;
for ( i = 0 ; i < ARRAY_SIZE ( ssbd_options ) ; i + + ) {
int len = strlen ( ssbd_options [ i ] . str ) ;
if ( strncmp ( buf , ssbd_options [ i ] . str , len ) )
continue ;
ssbd_state = ssbd_options [ i ] . state ;
return 0 ;
}
return - EINVAL ;
}
early_param ( " ssbd " , ssbd_cfg ) ;
2018-05-29 13:11:06 +01:00
void __init arm64_update_smccc_conduit ( struct alt_instr * alt ,
__le32 * origptr , __le32 * updptr ,
int nr_inst )
{
u32 insn ;
BUG_ON ( nr_inst ! = 1 ) ;
switch ( psci_ops . conduit ) {
case PSCI_CONDUIT_HVC :
insn = aarch64_insn_get_hvc_value ( ) ;
break ;
case PSCI_CONDUIT_SMC :
insn = aarch64_insn_get_smc_value ( ) ;
break ;
default :
return ;
}
* updptr = cpu_to_le32 ( insn ) ;
}
2018-05-29 13:11:08 +01:00
2018-05-29 13:11:11 +01:00
void __init arm64_enable_wa2_handling ( struct alt_instr * alt ,
__le32 * origptr , __le32 * updptr ,
int nr_inst )
{
BUG_ON ( nr_inst ! = 1 ) ;
/*
* Only allow mitigation on EL1 entry / exit and guest
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
* be flipped .
*/
if ( arm64_get_ssbd_state ( ) = = ARM64_SSBD_KERNEL )
* updptr = cpu_to_le32 ( aarch64_insn_gen_nop ( ) ) ;
}
2018-05-29 13:11:12 +01:00
void arm64_set_ssbd_mitigation ( bool state )
2018-05-29 13:11:08 +01:00
{
2018-08-07 13:47:06 +01:00
if ( this_cpu_has_cap ( ARM64_SSBS ) ) {
if ( state )
asm volatile ( SET_PSTATE_SSBS ( 0 ) ) ;
else
asm volatile ( SET_PSTATE_SSBS ( 1 ) ) ;
return ;
}
2018-05-29 13:11:08 +01:00
switch ( psci_ops . conduit ) {
case PSCI_CONDUIT_HVC :
arm_smccc_1_1_hvc ( ARM_SMCCC_ARCH_WORKAROUND_2 , state , NULL ) ;
break ;
case PSCI_CONDUIT_SMC :
arm_smccc_1_1_smc ( ARM_SMCCC_ARCH_WORKAROUND_2 , state , NULL ) ;
break ;
default :
WARN_ON_ONCE ( 1 ) ;
break ;
}
}
static bool has_ssbd_mitigation ( const struct arm64_cpu_capabilities * entry ,
int scope )
{
struct arm_smccc_res res ;
2018-05-29 13:11:09 +01:00
bool required = true ;
s32 val ;
2018-05-29 13:11:08 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-08-07 13:47:06 +01:00
if ( this_cpu_has_cap ( ARM64_SSBS ) ) {
required = false ;
goto out_printmsg ;
}
2018-05-29 13:11:09 +01:00
if ( psci_ops . smccc_version = = SMCCC_VERSION_1_0 ) {
ssbd_state = ARM64_SSBD_UNKNOWN ;
2018-05-29 13:11:08 +01:00
return false ;
2018-05-29 13:11:09 +01:00
}
2018-05-29 13:11:08 +01:00
switch ( psci_ops . conduit ) {
case PSCI_CONDUIT_HVC :
arm_smccc_1_1_hvc ( ARM_SMCCC_ARCH_FEATURES_FUNC_ID ,
ARM_SMCCC_ARCH_WORKAROUND_2 , & res ) ;
break ;
case PSCI_CONDUIT_SMC :
arm_smccc_1_1_smc ( ARM_SMCCC_ARCH_FEATURES_FUNC_ID ,
ARM_SMCCC_ARCH_WORKAROUND_2 , & res ) ;
break ;
default :
2018-05-29 13:11:09 +01:00
ssbd_state = ARM64_SSBD_UNKNOWN ;
return false ;
2018-05-29 13:11:08 +01:00
}
2018-05-29 13:11:09 +01:00
val = ( s32 ) res . a0 ;
switch ( val ) {
case SMCCC_RET_NOT_SUPPORTED :
ssbd_state = ARM64_SSBD_UNKNOWN ;
return false ;
case SMCCC_RET_NOT_REQUIRED :
pr_info_once ( " %s mitigation not required \n " , entry - > desc ) ;
ssbd_state = ARM64_SSBD_MITIGATED ;
return false ;
case SMCCC_RET_SUCCESS :
required = true ;
break ;
case 1 : /* Mitigation not required on this CPU */
required = false ;
break ;
default :
WARN_ON ( 1 ) ;
return false ;
}
switch ( ssbd_state ) {
case ARM64_SSBD_FORCE_DISABLE :
arm64_set_ssbd_mitigation ( false ) ;
required = false ;
break ;
case ARM64_SSBD_KERNEL :
if ( required ) {
__this_cpu_write ( arm64_ssbd_callback_required , 1 ) ;
arm64_set_ssbd_mitigation ( true ) ;
}
break ;
case ARM64_SSBD_FORCE_ENABLE :
2018-05-29 13:11:08 +01:00
arm64_set_ssbd_mitigation ( true ) ;
2018-05-29 13:11:09 +01:00
required = true ;
break ;
default :
WARN_ON ( 1 ) ;
break ;
2018-05-29 13:11:08 +01:00
}
2018-08-07 13:47:06 +01:00
out_printmsg :
switch ( ssbd_state ) {
case ARM64_SSBD_FORCE_DISABLE :
pr_info_once ( " %s disabled from command-line \n " , entry - > desc ) ;
break ;
case ARM64_SSBD_FORCE_ENABLE :
pr_info_once ( " %s forced from command-line \n " , entry - > desc ) ;
break ;
}
2018-05-29 13:11:09 +01:00
return required ;
2018-05-29 13:11:08 +01:00
}
2018-05-29 13:11:06 +01:00
# endif /* CONFIG_ARM64_SSBD */
2018-08-07 13:53:41 +01:00
static void __maybe_unused
cpu_enable_cache_maint_trap ( const struct arm64_cpu_capabilities * __unused )
{
sysreg_clear_set ( sctlr_el1 , SCTLR_EL1_UCI , 0 ) ;
}
2018-03-26 15:12:43 +01:00
# define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
. matches = is_affected_midr_range , \
2018-03-26 15:12:44 +01:00
. midr_range = MIDR_RANGE ( model , v_min , r_min , v_max , r_max )
2018-03-26 15:12:43 +01:00
# define CAP_MIDR_ALL_VERSIONS(model) \
. matches = is_affected_midr_range , \
2018-03-26 15:12:44 +01:00
. midr_range = MIDR_ALL_VERSIONS ( model )
2017-02-01 14:38:46 +00:00
2018-03-06 17:15:34 +00:00
# define MIDR_FIXED(rev, revidr_mask) \
. fixed_revs = ( struct arm64_midr_revidr [ ] ) { { ( rev ) , ( revidr_mask ) } , { } }
2018-03-26 15:12:43 +01:00
# define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM , \
CAP_MIDR_RANGE ( model , v_min , r_min , v_max , r_max )
2018-03-26 15:12:45 +01:00
# define CAP_MIDR_RANGE_LIST(list) \
. matches = is_affected_midr_range_list , \
. midr_range_list = list
2018-03-26 15:12:43 +01:00
/* Errata affecting a range of revisions of given model variant */
# define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
ERRATA_MIDR_RANGE ( m , var , r_min , var , r_max )
/* Errata affecting a single variant/revision of a model */
# define ERRATA_MIDR_REV(model, var, rev) \
ERRATA_MIDR_RANGE ( model , var , rev , var , rev )
/* Errata affecting all variants/revisions of a given a model */
# define ERRATA_MIDR_ALL_VERSIONS(model) \
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM , \
CAP_MIDR_ALL_VERSIONS ( model )
2018-03-26 15:12:45 +01:00
/* Errata affecting a list of midr ranges, with same work around */
# define ERRATA_MIDR_RANGE_LIST(midr_list) \
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM , \
CAP_MIDR_RANGE_LIST ( midr_list )
# ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/*
2019-04-15 16:21:23 -05:00
* List of CPUs that do not need any Spectre - v2 mitigation at all .
2018-03-26 15:12:45 +01:00
*/
2019-04-15 16:21:23 -05:00
static const struct midr_range spectre_v2_safe_list [ ] = {
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A35 ) ,
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A53 ) ,
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A55 ) ,
{ /* sentinel */ }
2018-03-26 15:12:45 +01:00
} ;
2019-04-15 16:21:23 -05:00
static bool __maybe_unused
check_branch_predictor ( const struct arm64_cpu_capabilities * entry , int scope )
{
int need_wa ;
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
/* If the CPU has CSV2 set, we're safe */
if ( cpuid_feature_extract_unsigned_field ( read_cpuid ( ID_AA64PFR0_EL1 ) ,
ID_AA64PFR0_CSV2_SHIFT ) )
return false ;
/* Alternatively, we have a list of unaffected CPUs */
if ( is_midr_in_range_list ( read_cpuid_id ( ) , spectre_v2_safe_list ) )
return false ;
/* Fallback to firmware detection */
need_wa = detect_harden_bp_fw ( ) ;
if ( ! need_wa )
return false ;
/* forced off */
if ( __nospectre_v2 ) {
pr_info_once ( " spectrev2 mitigation disabled by command line option \n " ) ;
return false ;
}
if ( need_wa < 0 )
pr_warn_once ( " ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware \n " ) ;
return ( need_wa > 0 ) ;
}
2018-03-26 15:12:45 +01:00
# endif
2017-02-01 14:38:46 +00:00
2018-04-10 11:36:43 +01:00
# ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors [ ] = {
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A57 ) ,
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A72 ) ,
{ } ,
} ;
2018-03-28 12:46:07 +01:00
# endif
2018-11-19 11:27:28 +00:00
# ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
static const struct midr_range arm64_repeat_tlbi_cpus [ ] = {
# ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
MIDR_RANGE ( MIDR_QCOM_FALKOR_V1 , 0 , 0 , 0 , 0 ) ,
# endif
# ifdef CONFIG_ARM64_ERRATUM_1286807
MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 3 , 0 ) ,
# endif
{ } ,
} ;
# endif
2018-11-30 17:18:01 +00:00
# ifdef CONFIG_CAVIUM_ERRATUM_27456
2019-01-08 16:19:01 +00:00
const struct midr_range cavium_erratum_27456_cpus [ ] = {
2018-11-30 17:18:01 +00:00
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
MIDR_RANGE ( MIDR_THUNDERX , 0 , 0 , 1 , 1 ) ,
/* Cavium ThunderX, T81 pass 1.0 */
MIDR_REV ( MIDR_THUNDERX_81XX , 0 , 0 ) ,
{ } ,
} ;
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_30115
static const struct midr_range cavium_erratum_30115_cpus [ ] = {
/* Cavium ThunderX, T88 pass 1.x - 2.2 */
MIDR_RANGE ( MIDR_THUNDERX , 0 , 0 , 1 , 2 ) ,
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
MIDR_REV_RANGE ( MIDR_THUNDERX_81XX , 0 , 0 , 2 ) ,
/* Cavium ThunderX, T83 pass 1.0 */
MIDR_REV ( MIDR_THUNDERX_83XX , 0 , 0 ) ,
{ } ,
} ;
# endif
2018-11-30 17:18:02 +00:00
# ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
static const struct arm64_cpu_capabilities qcom_erratum_1003_list [ ] = {
{
ERRATA_MIDR_REV ( MIDR_QCOM_FALKOR_V1 , 0 , 0 ) ,
} ,
{
. midr_range . model = MIDR_QCOM_KRYO ,
. matches = is_kryo_midr ,
} ,
{ } ,
} ;
# endif
2018-11-30 17:18:00 +00:00
# ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
static const struct midr_range workaround_clean_cache [ ] = {
2014-11-14 15:54:12 +00:00
# if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined ( CONFIG_ARM64_ERRATUM_827319 ) | | \
defined ( CONFIG_ARM64_ERRATUM_824069 )
2018-11-30 17:18:00 +00:00
/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 2 ) ,
# endif
# ifdef CONFIG_ARM64_ERRATUM_819472
/* Cortex-A53 r0p[01] : ARM errata 819472 */
MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 1 ) ,
2014-11-14 15:54:12 +00:00
# endif
2018-11-30 17:18:00 +00:00
{ } ,
} ;
# endif
const struct arm64_cpu_capabilities arm64_errata [ ] = {
# ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
2014-11-14 15:54:12 +00:00
{
2018-11-30 17:18:00 +00:00
. desc = " ARM errata 826319, 827319, 824069, 819472 " ,
2014-11-14 15:54:12 +00:00
. capability = ARM64_WORKAROUND_CLEAN_CACHE ,
2018-11-30 17:18:00 +00:00
ERRATA_MIDR_RANGE_LIST ( workaround_clean_cache ) ,
2018-03-26 15:12:28 +01:00
. cpu_enable = cpu_enable_cache_maint_trap ,
2014-11-14 15:54:12 +00:00
} ,
# endif
# ifdef CONFIG_ARM64_ERRATUM_832075
2014-11-14 15:54:10 +00:00
{
2014-11-14 15:54:11 +00:00
/* Cortex-A57 r0p0 - r1p2 */
. desc = " ARM erratum 832075 " ,
. capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A57 ,
0 , 0 ,
1 , 2 ) ,
2014-11-14 15:54:11 +00:00
} ,
2015-03-23 19:07:02 +00:00
# endif
2015-11-16 10:28:18 +00:00
# ifdef CONFIG_ARM64_ERRATUM_834220
{
/* Cortex-A57 r0p0 - r1p2 */
. desc = " ARM erratum 834220 " ,
. capability = ARM64_WORKAROUND_834220 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A57 ,
0 , 0 ,
1 , 2 ) ,
2015-11-16 10:28:18 +00:00
} ,
# endif
2018-03-06 17:15:35 +00:00
# ifdef CONFIG_ARM64_ERRATUM_843419
{
/* Cortex-A53 r0p[01234] */
. desc = " ARM erratum 843419 " ,
. capability = ARM64_WORKAROUND_843419 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 4 ) ,
2018-03-06 17:15:35 +00:00
MIDR_FIXED ( 0x4 , BIT ( 8 ) ) ,
2015-11-16 10:28:18 +00:00
} ,
# endif
2015-03-23 19:07:02 +00:00
# ifdef CONFIG_ARM64_ERRATUM_845719
{
/* Cortex-A53 r0p[01234] */
. desc = " ARM erratum 845719 " ,
. capability = ARM64_WORKAROUND_845719 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 4 ) ,
2015-03-23 19:07:02 +00:00
} ,
2015-09-21 22:58:35 +02:00
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_23154
{
/* Cavium ThunderX, pass 1.x */
. desc = " Cavium erratum 23154 " ,
. capability = ARM64_WORKAROUND_CAVIUM_23154 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_REV_RANGE ( MIDR_THUNDERX , 0 , 0 , 1 ) ,
2015-09-21 22:58:35 +02:00
} ,
2016-02-24 17:44:57 -08:00
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_27456
{
2016-07-07 10:18:17 +05:30
. desc = " Cavium erratum 27456 " ,
. capability = ARM64_WORKAROUND_CAVIUM_27456 ,
2018-11-30 17:18:01 +00:00
ERRATA_MIDR_RANGE_LIST ( cavium_erratum_27456_cpus ) ,
2016-07-07 10:18:17 +05:30
} ,
2017-06-09 12:49:48 +01:00
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_30115
{
. desc = " Cavium erratum 30115 " ,
. capability = ARM64_WORKAROUND_CAVIUM_30115 ,
2018-11-30 17:18:01 +00:00
ERRATA_MIDR_RANGE_LIST ( cavium_erratum_30115_cpus ) ,
2017-06-09 12:49:48 +01:00
} ,
2014-11-14 15:54:12 +00:00
# endif
2016-09-09 14:07:16 +01:00
{
2018-09-19 11:41:21 +01:00
. desc = " Mismatched cache type (CTR_EL0) " ,
2018-07-04 23:07:46 +01:00
. capability = ARM64_MISMATCHED_CACHE_TYPE ,
. matches = has_mismatched_cache_type ,
2018-03-26 15:12:32 +01:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
2018-03-26 15:12:28 +01:00
. cpu_enable = cpu_enable_trap_ctr_access ,
2016-09-09 14:07:16 +01:00
} ,
2017-02-08 15:08:37 -05:00
# ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{
2018-11-30 17:18:02 +00:00
. desc = " Qualcomm Technologies Falkor/Kryo erratum 1003 " ,
2017-12-13 14:19:37 -08:00
. capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003 ,
2018-12-12 15:53:54 +00:00
. matches = cpucap_multi_entry_cap_matches ,
2018-11-30 17:18:02 +00:00
. match_list = qcom_erratum_1003_list ,
2017-12-13 14:19:37 -08:00
} ,
2017-02-08 15:08:37 -05:00
# endif
2018-11-19 11:27:28 +00:00
# ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
2017-01-31 12:50:19 -05:00
{
2018-11-19 11:27:28 +00:00
. desc = " Qualcomm erratum 1009, ARM erratum 1286807 " ,
2017-01-31 12:50:19 -05:00
. capability = ARM64_WORKAROUND_REPEAT_TLBI ,
2018-11-19 11:27:28 +00:00
ERRATA_MIDR_RANGE_LIST ( arm64_repeat_tlbi_cpus ) ,
2017-01-31 12:50:19 -05:00
} ,
2017-03-20 17:18:06 +00:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_858921
{
/* Cortex-A73 all versions */
. desc = " ARM erratum 858921 " ,
. capability = ARM64_WORKAROUND_858921 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_ALL_VERSIONS ( MIDR_CORTEX_A73 ) ,
2017-03-20 17:18:06 +00:00
} ,
2018-01-03 12:46:21 +00:00
# endif
# ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{
. capability = ARM64_HARDEN_BRANCH_PREDICTOR ,
2019-04-15 16:21:23 -05:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = check_branch_predictor ,
2018-01-19 04:22:47 -08:00
} ,
2018-02-15 11:49:20 +00:00
# endif
# ifdef CONFIG_HARDEN_EL2_VECTORS
{
2018-04-10 11:36:43 +01:00
. desc = " EL2 vector hardening " ,
2018-02-15 11:49:20 +00:00
. capability = ARM64_HARDEN_EL2_VECTORS ,
2018-04-10 11:36:43 +01:00
ERRATA_MIDR_RANGE_LIST ( arm64_harden_el2_vectors ) ,
2018-02-15 11:49:20 +00:00
} ,
2018-05-29 13:11:08 +01:00
# endif
# ifdef CONFIG_ARM64_SSBD
{
. desc = " Speculative Store Bypass Disable " ,
. capability = ARM64_SSBD ,
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = has_ssbd_mitigation ,
} ,
2018-09-27 17:15:34 +01:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_1188873
{
/* Cortex-A76 r0p0 to r2p0 */
. desc = " ARM erratum 1188873 " ,
. capability = ARM64_WORKAROUND_1188873 ,
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 2 , 0 ) ,
} ,
2018-12-06 17:31:23 +00:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_1165522
{
/* Cortex-A76 r0p0 to r2p0 */
. desc = " ARM erratum 1165522 " ,
. capability = ARM64_WORKAROUND_1165522 ,
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 2 , 0 ) ,
} ,
2017-01-31 12:50:19 -05:00
# endif
2014-11-14 15:54:11 +00:00
{
2014-11-14 15:54:10 +00:00
}
2014-11-14 15:54:09 +00:00
} ;
2019-04-15 16:21:21 -05:00
ssize_t cpu_show_spectre_v1 ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sprintf ( buf , " Mitigation: __user pointer sanitization \n " ) ;
}