2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2014-11-14 15:54:09 +00:00
/*
* Contains CPU specific errata definitions
*
* Copyright ( C ) 2014 ARM Ltd .
*/
2018-06-05 13:50:07 +02:00
# include <linux/arm-smccc.h>
2014-11-14 15:54:09 +00:00
# include <linux/types.h>
2019-04-12 15:39:32 -05:00
# include <linux/cpu.h>
2014-11-14 15:54:09 +00:00
# include <asm/cpu.h>
# include <asm/cputype.h>
# include <asm/cpufeature.h>
2020-02-18 19:58:39 +00:00
# include <asm/kvm_asm.h>
2019-04-09 16:26:21 +01:00
# include <asm/smp_plat.h>
2014-11-14 15:54:09 +00:00
2014-11-14 15:54:10 +00:00
static bool __maybe_unused
2016-04-22 12:25:31 +01:00
is_affected_midr_range ( const struct arm64_cpu_capabilities * entry , int scope )
2014-11-14 15:54:10 +00:00
{
2018-03-06 17:15:34 +00:00
const struct arm64_midr_revidr * fix ;
u32 midr = read_cpuid_id ( ) , revidr ;
2016-04-22 12:25:31 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-03-26 15:12:44 +01:00
if ( ! is_midr_in_range ( midr , & entry - > midr_range ) )
2018-03-06 17:15:34 +00:00
return false ;
midr & = MIDR_REVISION_MASK | MIDR_VARIANT_MASK ;
revidr = read_cpuid ( REVIDR_EL1 ) ;
for ( fix = entry - > fixed_revs ; fix & & fix - > revidr_mask ; fix + + )
if ( midr = = fix - > midr_rv & & ( revidr & fix - > revidr_mask ) )
return false ;
return true ;
2014-11-14 15:54:10 +00:00
}
2018-03-26 15:12:45 +01:00
static bool __maybe_unused
is_affected_midr_range_list ( const struct arm64_cpu_capabilities * entry ,
int scope )
2014-11-14 15:54:10 +00:00
{
2016-04-22 12:25:31 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-03-26 15:12:45 +01:00
return is_midr_in_range_list ( read_cpuid_id ( ) , entry - > midr_range_list ) ;
2014-11-14 15:54:10 +00:00
}
2017-12-13 14:19:37 -08:00
static bool __maybe_unused
is_kryo_midr ( const struct arm64_cpu_capabilities * entry , int scope )
{
u32 model ;
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
model = read_cpuid_id ( ) ;
model & = MIDR_IMPLEMENTOR_MASK | ( 0xf00 < < MIDR_PARTNUM_SHIFT ) |
MIDR_ARCHITECTURE_MASK ;
2018-03-26 15:12:44 +01:00
return model = = entry - > midr_range . model ;
2017-12-13 14:19:37 -08:00
}
2016-09-09 14:07:16 +01:00
static bool
2018-07-04 23:07:46 +01:00
has_mismatched_cache_type ( const struct arm64_cpu_capabilities * entry ,
int scope )
2016-09-09 14:07:16 +01:00
{
2018-10-09 14:47:06 +01:00
u64 mask = arm64_ftr_reg_ctrel0 . strict_mask ;
u64 sys = arm64_ftr_reg_ctrel0 . sys_val & mask ;
u64 ctr_raw , ctr_real ;
2018-07-04 23:07:46 +01:00
2016-09-09 14:07:16 +01:00
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
2018-10-09 14:47:06 +01:00
/*
* We want to make sure that all the CPUs in the system expose
* a consistent CTR_EL0 to make sure that applications behaves
* correctly with migration .
*
* If a CPU has CTR_EL0 . IDC but does not advertise it via CTR_EL0 :
*
* 1 ) It is safe if the system doesn ' t support IDC , as CPU anyway
* reports IDC = 0 , consistent with the rest .
*
* 2 ) If the system has IDC , it is still safe as we trap CTR_EL0
* access on this CPU via the ARM64_HAS_CACHE_IDC capability .
*
* So , we need to make sure either the raw CTR_EL0 or the effective
* CTR_EL0 matches the system ' s copy to allow a secondary CPU to boot .
*/
ctr_raw = read_cpuid_cachetype ( ) & mask ;
ctr_real = read_cpuid_effective_cachetype ( ) & mask ;
return ( ctr_real ! = sys ) & & ( ctr_raw ! = sys ) ;
2016-09-09 14:07:16 +01:00
}
2018-03-26 15:12:28 +01:00
static void
2019-10-17 18:42:58 +01:00
cpu_enable_trap_ctr_access ( const struct arm64_cpu_capabilities * cap )
2016-09-09 14:07:16 +01:00
{
2018-10-09 14:47:07 +01:00
u64 mask = arm64_ftr_reg_ctrel0 . strict_mask ;
2019-10-17 18:42:58 +01:00
bool enable_uct_trap = false ;
2018-10-09 14:47:07 +01:00
/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
if ( ( read_cpuid_cachetype ( ) & mask ) ! =
( arm64_ftr_reg_ctrel0 . sys_val & mask ) )
2019-10-17 18:42:58 +01:00
enable_uct_trap = true ;
/* ... or if the system is affected by an erratum */
if ( cap - > capability = = ARM64_WORKAROUND_1542419 )
enable_uct_trap = true ;
if ( enable_uct_trap )
2018-10-09 14:47:07 +01:00
sysreg_clear_set ( sctlr_el1 , SCTLR_EL1_UCT , 0 ) ;
2016-09-09 14:07:16 +01:00
}
2019-04-29 13:03:57 +01:00
# ifdef CONFIG_ARM64_ERRATUM_1463225
static bool
has_cortex_a76_erratum_1463225 ( const struct arm64_cpu_capabilities * entry ,
int scope )
{
2020-06-30 23:30:54 +05:30
return is_affected_midr_range_list ( entry , scope ) & & is_kernel_in_hyp_mode ( ) ;
2019-04-29 13:03:57 +01:00
}
# endif
2018-08-07 13:53:41 +01:00
static void __maybe_unused
cpu_enable_cache_maint_trap ( const struct arm64_cpu_capabilities * __unused )
{
sysreg_clear_set ( sctlr_el1 , SCTLR_EL1_UCI , 0 ) ;
}
2018-03-26 15:12:43 +01:00
# define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
. matches = is_affected_midr_range , \
2018-03-26 15:12:44 +01:00
. midr_range = MIDR_RANGE ( model , v_min , r_min , v_max , r_max )
2018-03-26 15:12:43 +01:00
# define CAP_MIDR_ALL_VERSIONS(model) \
. matches = is_affected_midr_range , \
2018-03-26 15:12:44 +01:00
. midr_range = MIDR_ALL_VERSIONS ( model )
2017-02-01 14:38:46 +00:00
2018-03-06 17:15:34 +00:00
# define MIDR_FIXED(rev, revidr_mask) \
. fixed_revs = ( struct arm64_midr_revidr [ ] ) { { ( rev ) , ( revidr_mask ) } , { } }
2018-03-26 15:12:43 +01:00
# define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM , \
CAP_MIDR_RANGE ( model , v_min , r_min , v_max , r_max )
2018-03-26 15:12:45 +01:00
# define CAP_MIDR_RANGE_LIST(list) \
. matches = is_affected_midr_range_list , \
. midr_range_list = list
2018-03-26 15:12:43 +01:00
/* Errata affecting a range of revisions of given model variant */
# define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
ERRATA_MIDR_RANGE ( m , var , r_min , var , r_max )
/* Errata affecting a single variant/revision of a model */
# define ERRATA_MIDR_REV(model, var, rev) \
ERRATA_MIDR_RANGE ( model , var , rev , var , rev )
/* Errata affecting all variants/revisions of a given a model */
# define ERRATA_MIDR_ALL_VERSIONS(model) \
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM , \
CAP_MIDR_ALL_VERSIONS ( model )
2018-03-26 15:12:45 +01:00
/* Errata affecting a list of midr ranges, with same work around */
# define ERRATA_MIDR_RANGE_LIST(midr_list) \
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM , \
CAP_MIDR_RANGE_LIST ( midr_list )
2019-04-09 16:26:21 +01:00
static const __maybe_unused struct midr_range tx2_family_cpus [ ] = {
MIDR_ALL_VERSIONS ( MIDR_BRCM_VULCAN ) ,
MIDR_ALL_VERSIONS ( MIDR_CAVIUM_THUNDERX2 ) ,
{ } ,
} ;
static bool __maybe_unused
needs_tx2_tvm_workaround ( const struct arm64_cpu_capabilities * entry ,
int scope )
{
int i ;
if ( ! is_affected_midr_range_list ( entry , scope ) | |
! is_hyp_mode_available ( ) )
return false ;
for_each_possible_cpu ( i ) {
if ( MPIDR_AFFINITY_LEVEL ( cpu_logical_map ( i ) , 0 ) ! = 0 )
return true ;
}
return false ;
}
2019-10-17 18:42:58 +01:00
static bool __maybe_unused
has_neoverse_n1_erratum_1542419 ( const struct arm64_cpu_capabilities * entry ,
int scope )
{
u32 midr = read_cpuid_id ( ) ;
bool has_dic = read_cpuid_cachetype ( ) & BIT ( CTR_DIC_SHIFT ) ;
const struct midr_range range = MIDR_ALL_VERSIONS ( MIDR_NEOVERSE_N1 ) ;
WARN_ON ( scope ! = SCOPE_LOCAL_CPU | | preemptible ( ) ) ;
return is_midr_in_range ( midr , & range ) & & has_dic ;
}
2018-04-10 11:36:43 +01:00
2018-11-19 11:27:28 +00:00
# ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
2019-10-29 16:27:38 -07:00
static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list [ ] = {
2018-11-19 11:27:28 +00:00
# ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
2019-10-29 16:27:38 -07:00
{
ERRATA_MIDR_REV ( MIDR_QCOM_FALKOR_V1 , 0 , 0 )
} ,
{
. midr_range . model = MIDR_QCOM_KRYO ,
. matches = is_kryo_midr ,
} ,
2018-11-19 11:27:28 +00:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_1286807
2019-10-29 16:27:38 -07:00
{
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 3 , 0 ) ,
} ,
2018-11-19 11:27:28 +00:00
# endif
{ } ,
} ;
# endif
2018-11-30 17:18:01 +00:00
# ifdef CONFIG_CAVIUM_ERRATUM_27456
2019-01-08 16:19:01 +00:00
const struct midr_range cavium_erratum_27456_cpus [ ] = {
2018-11-30 17:18:01 +00:00
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
MIDR_RANGE ( MIDR_THUNDERX , 0 , 0 , 1 , 1 ) ,
/* Cavium ThunderX, T81 pass 1.0 */
MIDR_REV ( MIDR_THUNDERX_81XX , 0 , 0 ) ,
{ } ,
} ;
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_30115
static const struct midr_range cavium_erratum_30115_cpus [ ] = {
/* Cavium ThunderX, T88 pass 1.x - 2.2 */
MIDR_RANGE ( MIDR_THUNDERX , 0 , 0 , 1 , 2 ) ,
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
MIDR_REV_RANGE ( MIDR_THUNDERX_81XX , 0 , 0 , 2 ) ,
/* Cavium ThunderX, T83 pass 1.0 */
MIDR_REV ( MIDR_THUNDERX_83XX , 0 , 0 ) ,
{ } ,
} ;
# endif
2018-11-30 17:18:02 +00:00
# ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
static const struct arm64_cpu_capabilities qcom_erratum_1003_list [ ] = {
{
ERRATA_MIDR_REV ( MIDR_QCOM_FALKOR_V1 , 0 , 0 ) ,
} ,
{
. midr_range . model = MIDR_QCOM_KRYO ,
. matches = is_kryo_midr ,
} ,
{ } ,
} ;
# endif
2018-11-30 17:18:00 +00:00
# ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
static const struct midr_range workaround_clean_cache [ ] = {
2014-11-14 15:54:12 +00:00
# if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined ( CONFIG_ARM64_ERRATUM_827319 ) | | \
defined ( CONFIG_ARM64_ERRATUM_824069 )
2018-11-30 17:18:00 +00:00
/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 2 ) ,
# endif
# ifdef CONFIG_ARM64_ERRATUM_819472
/* Cortex-A53 r0p[01] : ARM errata 819472 */
MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 1 ) ,
2014-11-14 15:54:12 +00:00
# endif
2018-11-30 17:18:00 +00:00
{ } ,
} ;
# endif
2019-05-23 11:24:50 +01:00
# ifdef CONFIG_ARM64_ERRATUM_1418040
/*
* - 1188873 affects r0p0 to r2p0
* - 1418040 affects r0p0 to r3p1
*/
static const struct midr_range erratum_1418040_list [ ] = {
/* Cortex-A76 r0p0 to r3p1 */
MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 3 , 1 ) ,
/* Neoverse-N1 r0p0 to r3p1 */
MIDR_RANGE ( MIDR_NEOVERSE_N1 , 0 , 0 , 3 , 1 ) ,
2020-06-30 23:30:54 +05:30
/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
MIDR_RANGE ( MIDR_QCOM_KRYO_4XX_GOLD , 0xc , 0xe , 0xf , 0xf ) ,
2019-04-15 13:03:54 +01:00
{ } ,
} ;
# endif
2019-10-31 14:47:23 -07:00
# ifdef CONFIG_ARM64_ERRATUM_845719
static const struct midr_range erratum_845719_list [ ] = {
/* Cortex-A53 r0p[01234] */
MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 4 ) ,
/* Brahma-B53 r0p[0] */
MIDR_REV ( MIDR_BRAHMA_B53 , 0 , 0 ) ,
2020-11-05 00:22:13 +01:00
/* Kryo2XX Silver rAp4 */
MIDR_REV ( MIDR_QCOM_KRYO_2XX_SILVER , 0xa , 0x4 ) ,
2019-10-31 14:47:23 -07:00
{ } ,
} ;
# endif
2019-10-31 14:47:25 -07:00
# ifdef CONFIG_ARM64_ERRATUM_843419
static const struct arm64_cpu_capabilities erratum_843419_list [ ] = {
{
/* Cortex-A53 r0p[01234] */
. matches = is_affected_midr_range ,
ERRATA_MIDR_REV_RANGE ( MIDR_CORTEX_A53 , 0 , 0 , 4 ) ,
MIDR_FIXED ( 0x4 , BIT ( 8 ) ) ,
} ,
{
/* Brahma-B53 r0p[0] */
. matches = is_affected_midr_range ,
ERRATA_MIDR_REV ( MIDR_BRAHMA_B53 , 0 , 0 ) ,
} ,
{ } ,
} ;
# endif
2020-05-04 10:48:58 +01:00
# ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
static const struct midr_range erratum_speculative_at_list [ ] = {
2019-12-16 11:56:29 +00:00
# ifdef CONFIG_ARM64_ERRATUM_1165522
/* Cortex A76 r0p0 to r2p0 */
MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 2 , 0 ) ,
2019-12-16 11:56:31 +00:00
# endif
2020-05-04 10:48:58 +01:00
# ifdef CONFIG_ARM64_ERRATUM_1319367
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A57 ) ,
MIDR_ALL_VERSIONS ( MIDR_CORTEX_A72 ) ,
# endif
2019-12-16 11:56:31 +00:00
# ifdef CONFIG_ARM64_ERRATUM_1530923
/* Cortex A55 r0p0 to r2p0 */
MIDR_RANGE ( MIDR_CORTEX_A55 , 0 , 0 , 2 , 0 ) ,
2020-06-30 23:30:55 +05:30
/* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV ( MIDR_QCOM_KRYO_4XX_SILVER , 0xd , 0xe ) ,
2019-12-16 11:56:29 +00:00
# endif
{ } ,
} ;
# endif
2020-06-30 23:30:54 +05:30
# ifdef CONFIG_ARM64_ERRATUM_1463225
static const struct midr_range erratum_1463225 [ ] = {
/* Cortex-A76 r0p0 - r3p1 */
MIDR_RANGE ( MIDR_CORTEX_A76 , 0 , 0 , 3 , 1 ) ,
/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
MIDR_RANGE ( MIDR_QCOM_KRYO_4XX_GOLD , 0xc , 0xe , 0xf , 0xf ) ,
2020-07-08 22:13:40 -07:00
{ } ,
2020-06-30 23:30:54 +05:30
} ;
# endif
2018-11-30 17:18:00 +00:00
const struct arm64_cpu_capabilities arm64_errata [ ] = {
# ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
2014-11-14 15:54:12 +00:00
{
2020-05-12 16:52:55 +02:00
. desc = " ARM errata 826319, 827319, 824069, or 819472 " ,
2014-11-14 15:54:12 +00:00
. capability = ARM64_WORKAROUND_CLEAN_CACHE ,
2018-11-30 17:18:00 +00:00
ERRATA_MIDR_RANGE_LIST ( workaround_clean_cache ) ,
2018-03-26 15:12:28 +01:00
. cpu_enable = cpu_enable_cache_maint_trap ,
2014-11-14 15:54:12 +00:00
} ,
# endif
# ifdef CONFIG_ARM64_ERRATUM_832075
2014-11-14 15:54:10 +00:00
{
2014-11-14 15:54:11 +00:00
/* Cortex-A57 r0p0 - r1p2 */
. desc = " ARM erratum 832075 " ,
. capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A57 ,
0 , 0 ,
1 , 2 ) ,
2014-11-14 15:54:11 +00:00
} ,
2015-03-23 19:07:02 +00:00
# endif
2015-11-16 10:28:18 +00:00
# ifdef CONFIG_ARM64_ERRATUM_834220
{
/* Cortex-A57 r0p0 - r1p2 */
. desc = " ARM erratum 834220 " ,
. capability = ARM64_WORKAROUND_834220 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A57 ,
0 , 0 ,
1 , 2 ) ,
2015-11-16 10:28:18 +00:00
} ,
# endif
2018-03-06 17:15:35 +00:00
# ifdef CONFIG_ARM64_ERRATUM_843419
{
. desc = " ARM erratum 843419 " ,
. capability = ARM64_WORKAROUND_843419 ,
2019-10-31 14:47:25 -07:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = cpucap_multi_entry_cap_matches ,
. match_list = erratum_843419_list ,
2015-11-16 10:28:18 +00:00
} ,
# endif
2015-03-23 19:07:02 +00:00
# ifdef CONFIG_ARM64_ERRATUM_845719
{
. desc = " ARM erratum 845719 " ,
. capability = ARM64_WORKAROUND_845719 ,
2019-10-31 14:47:23 -07:00
ERRATA_MIDR_RANGE_LIST ( erratum_845719_list ) ,
2015-03-23 19:07:02 +00:00
} ,
2015-09-21 22:58:35 +02:00
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_23154
{
/* Cavium ThunderX, pass 1.x */
. desc = " Cavium erratum 23154 " ,
. capability = ARM64_WORKAROUND_CAVIUM_23154 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_REV_RANGE ( MIDR_THUNDERX , 0 , 0 , 1 ) ,
2015-09-21 22:58:35 +02:00
} ,
2016-02-24 17:44:57 -08:00
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_27456
{
2016-07-07 10:18:17 +05:30
. desc = " Cavium erratum 27456 " ,
. capability = ARM64_WORKAROUND_CAVIUM_27456 ,
2018-11-30 17:18:01 +00:00
ERRATA_MIDR_RANGE_LIST ( cavium_erratum_27456_cpus ) ,
2016-07-07 10:18:17 +05:30
} ,
2017-06-09 12:49:48 +01:00
# endif
# ifdef CONFIG_CAVIUM_ERRATUM_30115
{
. desc = " Cavium erratum 30115 " ,
. capability = ARM64_WORKAROUND_CAVIUM_30115 ,
2018-11-30 17:18:01 +00:00
ERRATA_MIDR_RANGE_LIST ( cavium_erratum_30115_cpus ) ,
2017-06-09 12:49:48 +01:00
} ,
2014-11-14 15:54:12 +00:00
# endif
2016-09-09 14:07:16 +01:00
{
2018-09-19 11:41:21 +01:00
. desc = " Mismatched cache type (CTR_EL0) " ,
2018-07-04 23:07:46 +01:00
. capability = ARM64_MISMATCHED_CACHE_TYPE ,
. matches = has_mismatched_cache_type ,
2018-03-26 15:12:32 +01:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
2018-03-26 15:12:28 +01:00
. cpu_enable = cpu_enable_trap_ctr_access ,
2016-09-09 14:07:16 +01:00
} ,
2017-02-08 15:08:37 -05:00
# ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{
2018-11-30 17:18:02 +00:00
. desc = " Qualcomm Technologies Falkor/Kryo erratum 1003 " ,
2017-12-13 14:19:37 -08:00
. capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003 ,
2019-10-29 10:15:39 -07:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
2018-12-12 15:53:54 +00:00
. matches = cpucap_multi_entry_cap_matches ,
2018-11-30 17:18:02 +00:00
. match_list = qcom_erratum_1003_list ,
2017-12-13 14:19:37 -08:00
} ,
2017-02-08 15:08:37 -05:00
# endif
2018-11-19 11:27:28 +00:00
# ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
2017-01-31 12:50:19 -05:00
{
2020-05-12 16:52:55 +02:00
. desc = " Qualcomm erratum 1009, or ARM erratum 1286807 " ,
2017-01-31 12:50:19 -05:00
. capability = ARM64_WORKAROUND_REPEAT_TLBI ,
2019-10-29 16:27:38 -07:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = cpucap_multi_entry_cap_matches ,
. match_list = arm64_repeat_tlbi_list ,
2017-01-31 12:50:19 -05:00
} ,
2017-03-20 17:18:06 +00:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_858921
{
/* Cortex-A73 all versions */
. desc = " ARM erratum 858921 " ,
. capability = ARM64_WORKAROUND_858921 ,
2018-03-26 15:12:43 +01:00
ERRATA_MIDR_ALL_VERSIONS ( MIDR_CORTEX_A73 ) ,
2017-03-20 17:18:06 +00:00
} ,
2018-01-03 12:46:21 +00:00
# endif
{
2020-09-15 23:30:17 +01:00
. desc = " Spectre-v2 " ,
2020-09-15 23:00:31 +01:00
. capability = ARM64_SPECTRE_V2 ,
2019-04-15 16:21:23 -05:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
2020-09-15 23:30:17 +01:00
. matches = has_spectre_v2 ,
. cpu_enable = spectre_v2_enable_mitigation ,
2018-01-19 04:22:47 -08:00
} ,
2020-07-21 10:44:45 +01:00
# ifdef CONFIG_RANDOMIZE_BASE
2018-02-15 11:49:20 +00:00
{
2020-11-13 11:38:44 +00:00
/* Must come after the Spectre-v2 entry */
2020-11-13 11:38:45 +00:00
. desc = " Spectre-v3a " ,
. capability = ARM64_SPECTRE_V3A ,
2020-11-13 11:38:46 +00:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = has_spectre_v3a ,
2020-11-13 11:38:45 +00:00
. cpu_enable = spectre_v3a_enable_mitigation ,
2018-02-15 11:49:20 +00:00
} ,
2018-05-29 13:11:08 +01:00
# endif
{
2020-09-18 11:54:33 +01:00
. desc = " Spectre-v4 " ,
2020-09-15 23:00:31 +01:00
. capability = ARM64_SPECTRE_V4 ,
2018-05-29 13:11:08 +01:00
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
2020-09-18 11:54:33 +01:00
. matches = has_spectre_v4 ,
. cpu_enable = spectre_v4_enable_mitigation ,
2018-05-29 13:11:08 +01:00
} ,
2019-05-23 11:24:50 +01:00
# ifdef CONFIG_ARM64_ERRATUM_1418040
2018-09-27 17:15:34 +01:00
{
2019-05-23 11:24:50 +01:00
. desc = " ARM erratum 1418040 " ,
. capability = ARM64_WORKAROUND_1418040 ,
ERRATA_MIDR_RANGE_LIST ( erratum_1418040_list ) ,
2020-09-11 19:16:11 +01:00
/*
* We need to allow affected CPUs to come in late , but
* also need the non - affected CPUs to be able to come
* in at any point in time . Wonderful .
*/
. type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE ,
2018-09-27 17:15:34 +01:00
} ,
2018-12-06 17:31:23 +00:00
# endif
2020-05-04 10:48:58 +01:00
# ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
2018-12-06 17:31:23 +00:00
{
2020-05-28 18:02:51 +01:00
. desc = " ARM errata 1165522, 1319367, or 1530923 " ,
2020-05-04 10:48:58 +01:00
. capability = ARM64_WORKAROUND_SPECULATIVE_AT ,
ERRATA_MIDR_RANGE_LIST ( erratum_speculative_at_list ) ,
2018-12-06 17:31:23 +00:00
} ,
2019-04-29 13:03:57 +01:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_1463225
{
. desc = " ARM erratum 1463225 " ,
. capability = ARM64_WORKAROUND_1463225 ,
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = has_cortex_a76_erratum_1463225 ,
2020-06-30 23:30:54 +05:30
. midr_range_list = erratum_1463225 ,
2019-04-29 13:03:57 +01:00
} ,
2019-04-09 16:26:21 +01:00
# endif
# ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
{
. desc = " Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping) " ,
. capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM ,
ERRATA_MIDR_RANGE_LIST ( tx2_family_cpus ) ,
. matches = needs_tx2_tvm_workaround ,
} ,
2019-04-09 16:22:24 +01:00
{
. desc = " Cavium ThunderX2 erratum 219 (PRFM removal) " ,
. capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM ,
ERRATA_MIDR_RANGE_LIST ( tx2_family_cpus ) ,
} ,
2019-10-28 16:12:40 +00:00
# endif
2019-10-17 18:42:58 +01:00
# ifdef CONFIG_ARM64_ERRATUM_1542419
{
/* we depend on the firmware portion for correctness */
. desc = " ARM erratum 1542419 (kernel portion) " ,
. capability = ARM64_WORKAROUND_1542419 ,
. type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
. matches = has_neoverse_n1_erratum_1542419 ,
. cpu_enable = cpu_enable_trap_ctr_access ,
} ,
2020-10-28 13:28:39 -05:00
# endif
# ifdef CONFIG_ARM64_ERRATUM_1508412
{
/* we depend on the firmware portion for correctness */
. desc = " ARM erratum 1508412 (kernel portion) " ,
. capability = ARM64_WORKAROUND_1508412 ,
ERRATA_MIDR_RANGE ( MIDR_CORTEX_A77 ,
0 , 0 ,
1 , 0 ) ,
} ,
2021-03-23 17:28:09 -07:00
# endif
# ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
{
/* NVIDIA Carmel */
. desc = " NVIDIA Carmel CNP erratum " ,
. capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP ,
ERRATA_MIDR_ALL_VERSIONS ( MIDR_NVIDIA_CARMEL ) ,
} ,
2017-01-31 12:50:19 -05:00
# endif
2014-11-14 15:54:11 +00:00
{
2014-11-14 15:54:10 +00:00
}
2014-11-14 15:54:09 +00:00
} ;