Merge branch 'topic/ppc-kvm' into next
This brings in two series from Paul, one of which touches KVM code and may need to be merged into the kvm-ppc tree to resolve conflicts.
This commit is contained in:
commit
a26cf1c9fe
@ -138,4 +138,7 @@ extern int __ucmpdi2(u64, u64);
|
||||
void _mcount(void);
|
||||
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
|
||||
|
||||
void pnv_power9_force_smt4_catch(void);
|
||||
void pnv_power9_force_smt4_release(void);
|
||||
|
||||
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
|
||||
|
@ -131,41 +131,48 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
|
||||
/* CPU kernel features */
|
||||
|
||||
/* Retain the 32b definitions all use bottom half of word */
|
||||
/* Definitions for features that we have on both 32-bit and 64-bit chips */
|
||||
#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001)
|
||||
#define CPU_FTR_L2CR ASM_CONST(0x00000002)
|
||||
#define CPU_FTR_SPEC7450 ASM_CONST(0x00000004)
|
||||
#define CPU_FTR_ALTIVEC ASM_CONST(0x00000008)
|
||||
#define CPU_FTR_TAU ASM_CONST(0x00000010)
|
||||
#define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020)
|
||||
#define CPU_FTR_USE_TB ASM_CONST(0x00000040)
|
||||
#define CPU_FTR_L2CSR ASM_CONST(0x00000080)
|
||||
#define CPU_FTR_601 ASM_CONST(0x00000100)
|
||||
#define CPU_FTR_DBELL ASM_CONST(0x00000200)
|
||||
#define CPU_FTR_CAN_NAP ASM_CONST(0x00000400)
|
||||
#define CPU_FTR_L3CR ASM_CONST(0x00000800)
|
||||
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00001000)
|
||||
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00002000)
|
||||
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00004000)
|
||||
#define CPU_FTR_NO_DPM ASM_CONST(0x00008000)
|
||||
#define CPU_FTR_476_DD2 ASM_CONST(0x00010000)
|
||||
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x00020000)
|
||||
#define CPU_FTR_NO_BTIC ASM_CONST(0x00040000)
|
||||
#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00080000)
|
||||
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00100000)
|
||||
#define CPU_FTR_PPC_LE ASM_CONST(0x00200000)
|
||||
#define CPU_FTR_REAL_LE ASM_CONST(0x00400000)
|
||||
#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00800000)
|
||||
#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x01000000)
|
||||
#define CPU_FTR_SPE ASM_CONST(0x02000000)
|
||||
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x04000000)
|
||||
#define CPU_FTR_LWSYNC ASM_CONST(0x08000000)
|
||||
#define CPU_FTR_NOEXECUTE ASM_CONST(0x10000000)
|
||||
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x20000000)
|
||||
#define CPU_FTR_EMB_HV ASM_CONST(0x40000000)
|
||||
#define CPU_FTR_ALTIVEC ASM_CONST(0x00000002)
|
||||
#define CPU_FTR_DBELL ASM_CONST(0x00000004)
|
||||
#define CPU_FTR_CAN_NAP ASM_CONST(0x00000008)
|
||||
#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00000010)
|
||||
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00000020)
|
||||
#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00000040)
|
||||
#define CPU_FTR_LWSYNC ASM_CONST(0x00000080)
|
||||
#define CPU_FTR_NOEXECUTE ASM_CONST(0x00000100)
|
||||
#define CPU_FTR_EMB_HV ASM_CONST(0x00000200)
|
||||
|
||||
/* Definitions for features that only exist on 32-bit chips */
|
||||
#ifdef CONFIG_PPC32
|
||||
#define CPU_FTR_601 ASM_CONST(0x00001000)
|
||||
#define CPU_FTR_L2CR ASM_CONST(0x00002000)
|
||||
#define CPU_FTR_SPEC7450 ASM_CONST(0x00004000)
|
||||
#define CPU_FTR_TAU ASM_CONST(0x00008000)
|
||||
#define CPU_FTR_CAN_DOZE ASM_CONST(0x00010000)
|
||||
#define CPU_FTR_USE_RTC ASM_CONST(0x00020000)
|
||||
#define CPU_FTR_L3CR ASM_CONST(0x00040000)
|
||||
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00080000)
|
||||
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00100000)
|
||||
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00200000)
|
||||
#define CPU_FTR_NO_DPM ASM_CONST(0x00400000)
|
||||
#define CPU_FTR_476_DD2 ASM_CONST(0x00800000)
|
||||
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x01000000)
|
||||
#define CPU_FTR_NO_BTIC ASM_CONST(0x02000000)
|
||||
#define CPU_FTR_PPC_LE ASM_CONST(0x04000000)
|
||||
#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x08000000)
|
||||
#define CPU_FTR_SPE ASM_CONST(0x10000000)
|
||||
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x20000000)
|
||||
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x40000000)
|
||||
|
||||
#else /* CONFIG_PPC32 */
|
||||
/* Define these to 0 for the sake of tests in common code */
|
||||
#define CPU_FTR_601 (0)
|
||||
#define CPU_FTR_PPC_LE (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Add the 64-bit processor unique features in the top half of the word;
|
||||
* Definitions for the 64-bit processor unique features;
|
||||
* on 32-bit, make the names available but defined to be 0.
|
||||
*/
|
||||
#ifdef __powerpc64__
|
||||
@ -174,37 +181,40 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
#define LONG_ASM_CONST(x) 0
|
||||
#endif
|
||||
|
||||
#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000)
|
||||
#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
|
||||
#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
|
||||
#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000)
|
||||
#define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000001000000000)
|
||||
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
|
||||
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
|
||||
#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000)
|
||||
#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000010000000000)
|
||||
#define CPU_FTR_PURR LONG_ASM_CONST(0x0000020000000000)
|
||||
#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000040000000000)
|
||||
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000080000000000)
|
||||
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000100000000000)
|
||||
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000200000000000)
|
||||
#define CPU_FTR_SAO LONG_ASM_CONST(0x0000400000000000)
|
||||
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000800000000000)
|
||||
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0001000000000000)
|
||||
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0002000000000000)
|
||||
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0004000000000000)
|
||||
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0008000000000000)
|
||||
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0010000000000000)
|
||||
#define CPU_FTR_PKEY LONG_ASM_CONST(0x0020000000000000)
|
||||
#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000)
|
||||
#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000)
|
||||
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
|
||||
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
|
||||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
||||
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000)
|
||||
#define CPU_FTR_REAL_LE LONG_ASM_CONST(0x0000000000001000)
|
||||
#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000000002000)
|
||||
#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000000004000)
|
||||
#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000000008000)
|
||||
#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000000010000)
|
||||
#define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000000000020000)
|
||||
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000000000040000)
|
||||
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000000000080000)
|
||||
#define CPU_FTR_SMT LONG_ASM_CONST(0x0000000000100000)
|
||||
#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000000000200000)
|
||||
#define CPU_FTR_PURR LONG_ASM_CONST(0x0000000000400000)
|
||||
#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000000000800000)
|
||||
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
|
||||
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
|
||||
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
|
||||
#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
|
||||
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
|
||||
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
|
||||
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
|
||||
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0000000080000000)
|
||||
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0000000100000000)
|
||||
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0000000200000000)
|
||||
#define CPU_FTR_PKEY LONG_ASM_CONST(0x0000000400000000)
|
||||
#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0000000800000000)
|
||||
#define CPU_FTR_TM LONG_ASM_CONST(0x0000001000000000)
|
||||
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000002000000000)
|
||||
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0000004000000000)
|
||||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000)
|
||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000)
|
||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000)
|
||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000)
|
||||
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
|
||||
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
|
||||
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -285,21 +295,19 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
#endif
|
||||
|
||||
#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
|
||||
#define CPU_FTRS_603 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC)
|
||||
#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_604 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_740 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
|
||||
CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_750 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
|
||||
CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_750CL (CPU_FTRS_750)
|
||||
@ -308,125 +316,118 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
|
||||
#define CPU_FTRS_750GX (CPU_FTRS_750FX)
|
||||
#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
|
||||
CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
|
||||
CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
|
||||
CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
|
||||
CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
|
||||
#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE)
|
||||
#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP)
|
||||
CPU_FTR_MAYBE_CAN_NAP)
|
||||
#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_COMMON)
|
||||
#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
|
||||
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
|
||||
#define CPU_FTRS_8XX (CPU_FTR_USE_TB | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
|
||||
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON)
|
||||
#define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_40X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_44X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_440x6 (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
|
||||
CPU_FTR_INDEXED_DCR)
|
||||
#define CPU_FTRS_47X (CPU_FTRS_440x6)
|
||||
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
|
||||
#define CPU_FTRS_E200 (CPU_FTR_SPE_COMP | \
|
||||
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
|
||||
CPU_FTR_DEBUG_LVL_EXC)
|
||||
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||
#define CPU_FTRS_E500MC (CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||
CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
|
||||
/*
|
||||
* e5500/e6500 erratum A-006958 is a timebase bug that can use the
|
||||
* same workaround as CPU_FTR_CELL_TB_BUG.
|
||||
*/
|
||||
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||
#define CPU_FTRS_E5500 (CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
|
||||
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||
#define CPU_FTRS_E6500 (CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
|
||||
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
|
||||
|
||||
/* 64-bit CPUs */
|
||||
#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER4 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS)
|
||||
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
|
||||
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
|
||||
CPU_FTR_HVMODE | CPU_FTR_DABRX)
|
||||
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER5 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
|
||||
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER6 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
@ -434,7 +435,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
|
||||
CPU_FTR_DABRX)
|
||||
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER7 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
@ -443,7 +444,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
|
||||
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX | CPU_FTR_PKEY)
|
||||
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER8 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
@ -455,7 +456,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY)
|
||||
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
|
||||
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
|
||||
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
@ -470,15 +471,17 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
(~CPU_FTR_SAO))
|
||||
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
|
||||
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
|
||||
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_P9_TM_HV_ASSIST | \
|
||||
CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
#define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
|
||||
CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
|
||||
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
#define CPU_FTRS_PA6T (CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
|
||||
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
|
||||
#define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2)
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
@ -489,7 +492,8 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
|
||||
CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
|
||||
CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9 | \
|
||||
CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1)
|
||||
CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \
|
||||
CPU_FTRS_POWER9_DD2_2)
|
||||
#endif
|
||||
#else
|
||||
enum {
|
||||
|
@ -108,6 +108,8 @@
|
||||
|
||||
/* book3s_hv */
|
||||
|
||||
#define BOOK3S_INTERRUPT_HV_SOFTPATCH 0x1500
|
||||
|
||||
/*
|
||||
* Special trap used to indicate to host that this is a
|
||||
* passthrough interrupt that could not be handled
|
||||
|
@ -241,6 +241,10 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
|
||||
unsigned long mask);
|
||||
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
|
||||
|
||||
extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
|
||||
extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
|
||||
extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_entry_trampoline(void);
|
||||
extern void kvmppc_hv_entry_trampoline(void);
|
||||
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
|
@ -472,6 +472,49 @@ static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
|
||||
set_bit_le(i, map);
|
||||
}
|
||||
|
||||
static inline u64 sanitize_msr(u64 msr)
|
||||
{
|
||||
msr &= ~MSR_HV;
|
||||
msr |= MSR_ME;
|
||||
return msr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cr = vcpu->arch.cr_tm;
|
||||
vcpu->arch.xer = vcpu->arch.xer_tm;
|
||||
vcpu->arch.lr = vcpu->arch.lr_tm;
|
||||
vcpu->arch.ctr = vcpu->arch.ctr_tm;
|
||||
vcpu->arch.amr = vcpu->arch.amr_tm;
|
||||
vcpu->arch.ppr = vcpu->arch.ppr_tm;
|
||||
vcpu->arch.dscr = vcpu->arch.dscr_tm;
|
||||
vcpu->arch.tar = vcpu->arch.tar_tm;
|
||||
memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm,
|
||||
sizeof(vcpu->arch.gpr));
|
||||
vcpu->arch.fp = vcpu->arch.fp_tm;
|
||||
vcpu->arch.vr = vcpu->arch.vr_tm;
|
||||
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
||||
}
|
||||
|
||||
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cr_tm = vcpu->arch.cr;
|
||||
vcpu->arch.xer_tm = vcpu->arch.xer;
|
||||
vcpu->arch.lr_tm = vcpu->arch.lr;
|
||||
vcpu->arch.ctr_tm = vcpu->arch.ctr;
|
||||
vcpu->arch.amr_tm = vcpu->arch.amr;
|
||||
vcpu->arch.ppr_tm = vcpu->arch.ppr;
|
||||
vcpu->arch.dscr_tm = vcpu->arch.dscr;
|
||||
vcpu->arch.tar_tm = vcpu->arch.tar;
|
||||
memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr,
|
||||
sizeof(vcpu->arch.gpr));
|
||||
vcpu->arch.fp_tm = vcpu->arch.fp;
|
||||
vcpu->arch.vr_tm = vcpu->arch.vr;
|
||||
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
||||
|
@ -119,6 +119,7 @@ struct kvmppc_host_state {
|
||||
u8 host_ipi;
|
||||
u8 ptid; /* thread number within subcore when split */
|
||||
u8 tid; /* thread number within whole core */
|
||||
u8 fake_suspend;
|
||||
struct kvm_vcpu *kvm_vcpu;
|
||||
struct kvmppc_vcore *kvm_vcore;
|
||||
void __iomem *xics_phys;
|
||||
|
@ -610,6 +610,7 @@ struct kvm_vcpu_arch {
|
||||
u64 tfhar;
|
||||
u64 texasr;
|
||||
u64 tfiar;
|
||||
u64 orig_texasr;
|
||||
|
||||
u32 cr_tm;
|
||||
u64 xer_tm;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <asm/accounting.h>
|
||||
#include <asm/hmi.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
register struct paca_struct *local_paca asm("r13");
|
||||
|
||||
@ -177,6 +178,8 @@ struct paca_struct {
|
||||
u8 thread_mask;
|
||||
/* Mask to denote subcore sibling threads */
|
||||
u8 subcore_sibling_mask;
|
||||
/* Flag to request this thread not to stop */
|
||||
atomic_t dont_stop;
|
||||
/*
|
||||
* Pointer to an array which contains pointer
|
||||
* to the sibling threads' paca.
|
||||
|
@ -40,6 +40,7 @@ static inline int pnv_npu2_handle_fault(struct npu_context *context,
|
||||
}
|
||||
|
||||
static inline void pnv_tm_init(void) { }
|
||||
static inline void pnv_power9_force_smt4(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_POWERNV_H */
|
||||
|
@ -232,6 +232,7 @@
|
||||
#define PPC_INST_MSGSYNC 0x7c0006ec
|
||||
#define PPC_INST_MSGSNDP 0x7c00011c
|
||||
#define PPC_INST_MSGCLRP 0x7c00015c
|
||||
#define PPC_INST_MTMSRD 0x7c000164
|
||||
#define PPC_INST_MTTMR 0x7c0003dc
|
||||
#define PPC_INST_NOP 0x60000000
|
||||
#define PPC_INST_PASTE 0x7c20070d
|
||||
@ -239,8 +240,10 @@
|
||||
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
|
||||
#define PPC_INST_POPCNTD 0x7c0003f4
|
||||
#define PPC_INST_POPCNTW 0x7c0002f4
|
||||
#define PPC_INST_RFEBB 0x4c000124
|
||||
#define PPC_INST_RFCI 0x4c000066
|
||||
#define PPC_INST_RFDI 0x4c00004e
|
||||
#define PPC_INST_RFID 0x4c000024
|
||||
#define PPC_INST_RFMCI 0x4c00004c
|
||||
#define PPC_INST_MFSPR 0x7c0002a6
|
||||
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
|
||||
@ -278,6 +281,7 @@
|
||||
#define PPC_INST_TRECHKPT 0x7c0007dd
|
||||
#define PPC_INST_TRECLAIM 0x7c00075d
|
||||
#define PPC_INST_TABORT 0x7c00071d
|
||||
#define PPC_INST_TSR 0x7c0005dd
|
||||
|
||||
#define PPC_INST_NAP 0x4c000364
|
||||
#define PPC_INST_SLEEP 0x4c0003a4
|
||||
|
@ -156,6 +156,8 @@
|
||||
#define PSSCR_SD 0x00400000 /* Status Disable */
|
||||
#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
|
||||
#define PSSCR_GUEST_VIS 0xf0000000000003ff /* Guest-visible PSSCR fields */
|
||||
#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */
|
||||
#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */
|
||||
|
||||
/* Floating Point Status and Control Register (FPSCR) Fields */
|
||||
#define FPSCR_FX 0x80000000 /* FPU exception summary */
|
||||
@ -237,7 +239,12 @@
|
||||
#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
|
||||
#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
|
||||
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
|
||||
#define TEXASR_ABORT __MASK(63-31) /* terminated by tabort or treclaim */
|
||||
#define TEXASR_SUSP __MASK(63-32) /* tx failed in suspended state */
|
||||
#define TEXASR_HV __MASK(63-34) /* MSR[HV] when failure occurred */
|
||||
#define TEXASR_PR __MASK(63-35) /* MSR[PR] when failure occurred */
|
||||
#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
|
||||
#define TEXASR_EXACT __MASK(63-37) /* TFIAR value is exact */
|
||||
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
|
||||
#define SPRN_TIDR 144 /* Thread ID register */
|
||||
#define SPRN_CTRLF 0x088
|
||||
|
@ -47,7 +47,7 @@ struct div_result {
|
||||
/* Accessor functions for the timebase (RTC on 601) registers. */
|
||||
/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
|
||||
#ifdef CONFIG_6xx
|
||||
#define __USE_RTC() (!cpu_has_feature(CPU_FTR_USE_TB))
|
||||
#define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC))
|
||||
#else
|
||||
#define __USE_RTC() 0
|
||||
#endif
|
||||
|
@ -568,6 +568,7 @@ int main(void)
|
||||
OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar);
|
||||
OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar);
|
||||
OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr);
|
||||
OFFSET(VCPU_ORIG_TEXASR, kvm_vcpu, arch.orig_texasr);
|
||||
OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm);
|
||||
OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr);
|
||||
OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr);
|
||||
@ -650,6 +651,7 @@ int main(void)
|
||||
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
|
||||
HSTATE_FIELD(HSTATE_PTID, ptid);
|
||||
HSTATE_FIELD(HSTATE_TID, tid);
|
||||
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
|
||||
HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
|
||||
HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
|
||||
HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]);
|
||||
@ -759,6 +761,7 @@ int main(void)
|
||||
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
|
||||
OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
|
||||
OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
|
||||
OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
|
||||
#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
|
||||
STOP_SPR(STOP_PID, pid);
|
||||
STOP_SPR(STOP_LDBAR, ldbar);
|
||||
|
@ -226,7 +226,7 @@ BEGIN_FTR_SECTION
|
||||
beq 1f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
|
||||
lwz r6,CPU_SPEC_FEATURES(r4)
|
||||
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
|
||||
andis. r0,r6,CPU_FTR_L3_DISABLE_NAP@h
|
||||
beq 1f
|
||||
li r7,CPU_FTR_CAN_NAP
|
||||
andc r6,r6,r7
|
||||
|
@ -162,7 +162,7 @@ _GLOBAL(__setup_cpu_e5500)
|
||||
* the feature on the primary core, avoid doing it on the
|
||||
* secondary core.
|
||||
*/
|
||||
andis. r6, r3, CPU_FTR_EMB_HV@h
|
||||
andi. r6, r3, CPU_FTR_EMB_HV
|
||||
beq 2f
|
||||
rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
|
||||
stw r3, CPU_SPEC_FEATURES(r4)
|
||||
|
@ -553,11 +553,30 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Power9 DD 2.1 or later (see DD2.0 above) */
|
||||
{ /* Power9 DD 2.1 */
|
||||
.pvr_mask = 0xffffefff,
|
||||
.pvr_value = 0x004e0201,
|
||||
.cpu_name = "POWER9 (raw)",
|
||||
.cpu_features = CPU_FTRS_POWER9_DD2_1,
|
||||
.cpu_user_features = COMMON_USER_POWER9,
|
||||
.cpu_user_features2 = COMMON_USER2_POWER9,
|
||||
.mmu_features = MMU_FTRS_POWER9,
|
||||
.icache_bsize = 128,
|
||||
.dcache_bsize = 128,
|
||||
.num_pmcs = 6,
|
||||
.pmc_type = PPC_PMC_IBM,
|
||||
.oprofile_cpu_type = "ppc64/power9",
|
||||
.oprofile_type = PPC_OPROFILE_INVALID,
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Power9 DD2.2 or later */
|
||||
.pvr_mask = 0xffff0000,
|
||||
.pvr_value = 0x004e0000,
|
||||
.cpu_name = "POWER9 (raw)",
|
||||
.cpu_features = CPU_FTRS_POWER9_DD2_1,
|
||||
.cpu_features = CPU_FTRS_POWER9_DD2_2,
|
||||
.cpu_user_features = COMMON_USER_POWER9,
|
||||
.cpu_user_features2 = COMMON_USER2_POWER9,
|
||||
.mmu_features = MMU_FTRS_POWER9,
|
||||
|
@ -54,8 +54,7 @@ struct dt_cpu_feature {
|
||||
};
|
||||
|
||||
#define CPU_FTRS_BASE \
|
||||
(CPU_FTR_USE_TB | \
|
||||
CPU_FTR_LWSYNC | \
|
||||
(CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_FPU_UNAVAILABLE |\
|
||||
CPU_FTR_NODSISRALIGN |\
|
||||
CPU_FTR_NOEXECUTE |\
|
||||
@ -590,6 +589,8 @@ static struct dt_cpu_feature_match __initdata
|
||||
{"virtual-page-class-key-protection", feat_enable, 0},
|
||||
{"transactional-memory", feat_enable_tm, CPU_FTR_TM},
|
||||
{"transactional-memory-v3", feat_enable_tm, 0},
|
||||
{"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
|
||||
{"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
|
||||
{"idle-nap", feat_enable_idle_nap, 0},
|
||||
{"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
|
||||
{"idle-stop", feat_enable_idle_stop, 0},
|
||||
@ -709,6 +710,9 @@ static __init void cpufeatures_cpu_quirks(void)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
|
||||
else if ((version & 0xffffefff) == 0x004e0201)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
|
||||
else if ((version & 0xffffefff) == 0x004e0202)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST |
|
||||
CPU_FTR_P9_TM_XER_SO_BUG;
|
||||
}
|
||||
|
||||
static void __init cpufeatures_setup_finished(void)
|
||||
|
@ -1273,7 +1273,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
|
||||
bne+ denorm_assist
|
||||
#endif
|
||||
|
||||
KVMTEST_PR(0x1500)
|
||||
KVMTEST_HV(0x1500)
|
||||
EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
|
||||
EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
|
||||
|
||||
@ -1285,7 +1285,7 @@ EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
|
||||
EXC_VIRT_NONE(0x5500, 0x100)
|
||||
#endif
|
||||
|
||||
TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500)
|
||||
TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
|
||||
|
||||
#ifdef CONFIG_PPC_DENORMALISATION
|
||||
TRAMP_REAL_BEGIN(denorm_assist)
|
||||
|
@ -339,6 +339,7 @@ power_enter_stop:
|
||||
bne .Lhandle_esl_ec_set
|
||||
PPC_STOP
|
||||
li r3,0 /* Since we didn't lose state, return 0 */
|
||||
std r3, PACA_REQ_PSSCR(r13)
|
||||
|
||||
/*
|
||||
* pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
|
||||
@ -429,11 +430,29 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
|
||||
* r3 contains desired PSSCR register value.
|
||||
*/
|
||||
_GLOBAL(power9_idle_stop)
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r5, PACA_DONT_STOP(r13)
|
||||
cmpwi r5, 0
|
||||
bne 1f
|
||||
std r3, PACA_REQ_PSSCR(r13)
|
||||
sync
|
||||
lwz r5, PACA_DONT_STOP(r13)
|
||||
cmpwi r5, 0
|
||||
bne 1f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
mtspr SPRN_PSSCR,r3
|
||||
LOAD_REG_ADDR(r4,power_enter_stop)
|
||||
b pnv_powersave_common
|
||||
/* No return */
|
||||
1:
|
||||
/*
|
||||
* We get here when TM / thread reconfiguration bug workaround
|
||||
* code wants to get the CPU into SMT4 mode, and therefore
|
||||
* we are being asked not to stop.
|
||||
*/
|
||||
li r3, 0
|
||||
std r3, PACA_REQ_PSSCR(r13)
|
||||
blr /* return 0 for wakeup cause / SRR1 value */
|
||||
|
||||
/*
|
||||
* On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
|
||||
@ -584,6 +603,8 @@ FTR_SECTION_ELSE_NESTED(71)
|
||||
mfspr r5, SPRN_PSSCR
|
||||
rldicl r5,r5,4,60
|
||||
ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71)
|
||||
li r0, 0 /* clear requested_psscr to say we're awake */
|
||||
std r0, PACA_REQ_PSSCR(r13)
|
||||
cmpd cr4,r5,r4
|
||||
bge cr4,pnv_wakeup_tb_loss /* returns to caller */
|
||||
|
||||
|
@ -99,26 +99,28 @@ static struct vdso_patch_def vdso_patches[] = {
|
||||
CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE,
|
||||
"__kernel_sync_dicache", "__kernel_sync_dicache_p5"
|
||||
},
|
||||
#ifdef CONFIG_PPC32
|
||||
{
|
||||
CPU_FTR_USE_TB, 0,
|
||||
CPU_FTR_USE_RTC, CPU_FTR_USE_RTC,
|
||||
"__kernel_gettimeofday", NULL
|
||||
},
|
||||
{
|
||||
CPU_FTR_USE_TB, 0,
|
||||
CPU_FTR_USE_RTC, CPU_FTR_USE_RTC,
|
||||
"__kernel_clock_gettime", NULL
|
||||
},
|
||||
{
|
||||
CPU_FTR_USE_TB, 0,
|
||||
CPU_FTR_USE_RTC, CPU_FTR_USE_RTC,
|
||||
"__kernel_clock_getres", NULL
|
||||
},
|
||||
{
|
||||
CPU_FTR_USE_TB, 0,
|
||||
CPU_FTR_USE_RTC, CPU_FTR_USE_RTC,
|
||||
"__kernel_get_tbfreq", NULL
|
||||
},
|
||||
{
|
||||
CPU_FTR_USE_TB, 0,
|
||||
CPU_FTR_USE_RTC, CPU_FTR_USE_RTC,
|
||||
"__kernel_time", NULL
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -74,9 +74,15 @@ kvm-hv-y += \
|
||||
book3s_64_mmu_hv.o \
|
||||
book3s_64_mmu_radix.o
|
||||
|
||||
kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
|
||||
book3s_hv_tm.o
|
||||
|
||||
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
|
||||
book3s_hv_rm_xics.o book3s_hv_rm_xive.o
|
||||
|
||||
kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
|
||||
book3s_hv_tm_builtin.o
|
||||
|
||||
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_hmi.o \
|
||||
@ -84,6 +90,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_rm_mmu.o \
|
||||
book3s_hv_ras.o \
|
||||
book3s_hv_builtin.o \
|
||||
$(kvm-book3s_64-builtin-tm-objs-y) \
|
||||
$(kvm-book3s_64-builtin-xics-objs-y)
|
||||
endif
|
||||
|
||||
|
@ -1206,6 +1206,19 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
r = RESUME_GUEST;
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
|
||||
/*
|
||||
* This occurs for various TM-related instructions that
|
||||
* we need to emulate on POWER9 DD2.2. We have already
|
||||
* handled the cases where the guest was in real-suspend
|
||||
* mode and was transitioning to transactional state.
|
||||
*/
|
||||
r = kvmhv_p9_tm_emulation(vcpu);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case BOOK3S_INTERRUPT_HV_RM_HARD:
|
||||
r = RESUME_PASSTHROUGH;
|
||||
break;
|
||||
@ -1978,7 +1991,9 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
* turn off the HFSCR bit, which causes those instructions to trap.
|
||||
*/
|
||||
vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
vcpu->arch.hfscr |= HFSCR_TM;
|
||||
else if (!cpu_has_feature(CPU_FTR_TM_COMP))
|
||||
vcpu->arch.hfscr &= ~HFSCR_TM;
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
vcpu->arch.hfscr &= ~HFSCR_MSGP;
|
||||
@ -2242,6 +2257,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
|
||||
tpaca = &paca[cpu];
|
||||
tpaca->kvm_hstate.kvm_vcpu = vcpu;
|
||||
tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
|
||||
tpaca->kvm_hstate.fake_suspend = 0;
|
||||
/* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
|
||||
smp_wmb();
|
||||
tpaca->kvm_hstate.kvm_vcore = vc;
|
||||
|
@ -787,12 +787,18 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Branch around the call if both CPU_FTR_TM and
|
||||
* CPU_FTR_P9_TM_HV_ASSIST are off.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
b 91f
|
||||
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||
/*
|
||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||
*/
|
||||
bl kvmppc_restore_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
91:
|
||||
#endif
|
||||
|
||||
/* Load guest PMU registers */
|
||||
@ -915,11 +921,14 @@ BEGIN_FTR_SECTION
|
||||
mtspr SPRN_ACOP, r6
|
||||
mtspr SPRN_CSIGR, r7
|
||||
mtspr SPRN_TACR, r8
|
||||
nop
|
||||
FTR_SECTION_ELSE
|
||||
/* POWER9-only registers */
|
||||
ld r5, VCPU_TID(r4)
|
||||
ld r6, VCPU_PSSCR(r4)
|
||||
lbz r8, HSTATE_FAKE_SUSPEND(r13)
|
||||
oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
|
||||
rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
|
||||
ld r7, VCPU_HFSCR(r4)
|
||||
mtspr SPRN_TIDR, r5
|
||||
mtspr SPRN_PSSCR, r6
|
||||
@ -1370,6 +1379,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
std r3, VCPU_CTR(r9)
|
||||
std r4, VCPU_XER(r9)
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* For softpatch interrupt, go off and do TM instruction emulation */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
|
||||
beq kvmppc_tm_emul
|
||||
#endif
|
||||
|
||||
/* If this is a page table miss then see if it's theirs or ours */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
|
||||
beq kvmppc_hdsi
|
||||
@ -1729,12 +1744,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||
bl kvmppc_save_fp
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Branch around the call if both CPU_FTR_TM and
|
||||
* CPU_FTR_P9_TM_HV_ASSIST are off.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
b 91f
|
||||
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||
/*
|
||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||
*/
|
||||
bl kvmppc_save_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
91:
|
||||
#endif
|
||||
|
||||
/* Increment yield count if they have a VPA */
|
||||
@ -2054,6 +2075,42 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Softpatch interrupt for transactional memory emulation cases
|
||||
* on POWER9 DD2.2. This is early in the guest exit path - we
|
||||
* haven't saved registers or done a treclaim yet.
|
||||
*/
|
||||
kvmppc_tm_emul:
|
||||
/* Save instruction image in HEIR */
|
||||
mfspr r3, SPRN_HEIR
|
||||
stw r3, VCPU_HEIR(r9)
|
||||
|
||||
/*
|
||||
* The cases we want to handle here are those where the guest
|
||||
* is in real suspend mode and is trying to transition to
|
||||
* transactional mode.
|
||||
*/
|
||||
lbz r0, HSTATE_FAKE_SUSPEND(r13)
|
||||
cmpwi r0, 0 /* keep exiting guest if in fake suspend */
|
||||
bne guest_exit_cont
|
||||
rldicl r3, r11, 64 - MSR_TS_S_LG, 62
|
||||
cmpwi r3, 1 /* or if not in suspend state */
|
||||
bne guest_exit_cont
|
||||
|
||||
/* Call C code to do the emulation */
|
||||
mr r3, r9
|
||||
bl kvmhv_p9_tm_emulation_early
|
||||
nop
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
|
||||
cmpwi r3, 0
|
||||
beq guest_exit_cont /* continue exiting if not handled */
|
||||
ld r10, VCPU_PC(r9)
|
||||
ld r11, VCPU_MSR(r9)
|
||||
b fast_interrupt_c_return /* go back to guest if handled */
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
/*
|
||||
* Check whether an HDSI is an HPTE not found fault or something else.
|
||||
* If it is an HPTE not found fault that is due to the guest accessing
|
||||
@ -2587,13 +2644,19 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
|
||||
bl kvmppc_save_fp
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Branch around the call if both CPU_FTR_TM and
|
||||
* CPU_FTR_P9_TM_HV_ASSIST are off.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
b 91f
|
||||
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||
/*
|
||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||
*/
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
bl kvmppc_save_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
91:
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -2700,12 +2763,18 @@ kvm_end_cede:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Branch around the call if both CPU_FTR_TM and
|
||||
* CPU_FTR_P9_TM_HV_ASSIST are off.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
b 91f
|
||||
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||
/*
|
||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||
*/
|
||||
bl kvmppc_restore_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
91:
|
||||
#endif
|
||||
|
||||
/* load up FP state */
|
||||
@ -3032,6 +3101,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
kvmppc_save_tm:
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
@ -3046,6 +3116,24 @@ kvmppc_save_tm:
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
|
||||
cmpwi r0, 0
|
||||
beq 3f
|
||||
rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
|
||||
beq 4f
|
||||
BEGIN_FTR_SECTION_NESTED(96)
|
||||
bl pnv_power9_force_smt4_catch
|
||||
END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
|
||||
nop
|
||||
b 6f
|
||||
3:
|
||||
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
|
||||
mfspr r6, SPRN_TEXASR
|
||||
std r6, VCPU_ORIG_TEXASR(r9)
|
||||
6:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
@ -3057,6 +3145,43 @@ kvmppc_save_tm:
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
std r9, PACATMSCRATCH(r13)
|
||||
|
||||
/* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */
|
||||
BEGIN_FTR_SECTION
|
||||
lbz r9, HSTATE_FAKE_SUSPEND(r13)
|
||||
cmpwi r9, 0
|
||||
beq 2f
|
||||
/*
|
||||
* We were in fake suspend, so we are not going to save the
|
||||
* register state as the guest checkpointed state (since
|
||||
* we already have it), therefore we can now use any volatile GPR.
|
||||
*/
|
||||
/* Reload stack pointer and TOC. */
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATOC(r13)
|
||||
/* Set MSR RI now we have r1 and r13 back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
HMT_MEDIUM
|
||||
ld r6, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r6
|
||||
BEGIN_FTR_SECTION_NESTED(96)
|
||||
bl pnv_power9_force_smt4_release
|
||||
END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
|
||||
nop
|
||||
|
||||
4:
|
||||
mfspr r3, SPRN_PSSCR
|
||||
/* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
|
||||
li r0, PSSCR_FAKE_SUSPEND
|
||||
andc r3, r3, r0
|
||||
mtspr SPRN_PSSCR, r3
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
/* Don't save TEXASR, use value from last exit in real suspend state */
|
||||
b 11f
|
||||
2:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
|
||||
/* Get a few more GPRs free. */
|
||||
@ -3127,13 +3252,15 @@ kvmppc_save_tm:
|
||||
* change these outside of a transaction, so they must always be
|
||||
* context switched.
|
||||
*/
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
11:
|
||||
mfspr r5, SPRN_TFHAR
|
||||
mfspr r6, SPRN_TFIAR
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
@ -3168,6 +3295,8 @@ kvmppc_restore_tm:
|
||||
mtspr SPRN_TFIAR, r6
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
li r0, 0
|
||||
stb r0, HSTATE_FAKE_SUSPEND(r13)
|
||||
ld r5, VCPU_MSR(r4)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beqlr /* TM not active in guest */
|
||||
@ -3181,6 +3310,15 @@ kvmppc_restore_tm:
|
||||
oris r7, r7, (TEXASR_FS)@h
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
/*
|
||||
* If we are doing TM emulation for the guest on a POWER9 DD2,
|
||||
* then we don't actually do a trechkpt -- we either set up
|
||||
* fake-suspend mode, or emulate a TM rollback.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
b .Ldo_tm_fake_load
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
|
||||
/*
|
||||
* We need to load up the checkpointed state for the guest.
|
||||
* We need to do this early as it will blow away any GPRs, VSRs and
|
||||
@ -3253,10 +3391,24 @@ kvmppc_restore_tm:
|
||||
/* Set the MSR RI since we have our registers back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
9:
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
.Ldo_tm_fake_load:
|
||||
cmpwi r5, 1 /* check for suspended state */
|
||||
bgt 10f
|
||||
stb r5, HSTATE_FAKE_SUSPEND(r13)
|
||||
b 9b /* and return */
|
||||
10: stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
/* guest is in transactional state, so simulate rollback */
|
||||
mr r3, r4
|
||||
bl kvmhv_emulate_tm_rollback
|
||||
nop
|
||||
ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
b 9b
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
216
arch/powerpc/kvm/book3s_hv_tm.c
Normal file
216
arch/powerpc/kvm/book3s_hv_tm.c
Normal file
@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/kvm_book3s.h>
|
||||
#include <asm/kvm_book3s_64.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
|
||||
{
|
||||
u64 texasr, tfiar;
|
||||
u64 msr = vcpu->arch.shregs.msr;
|
||||
|
||||
tfiar = vcpu->arch.pc & ~0x3ull;
|
||||
texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
|
||||
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
|
||||
texasr |= TEXASR_SUSP;
|
||||
if (msr & MSR_PR) {
|
||||
texasr |= TEXASR_PR;
|
||||
tfiar |= 1;
|
||||
}
|
||||
vcpu->arch.tfiar = tfiar;
|
||||
/* Preserve ROT and TL fields of existing TEXASR */
|
||||
vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
|
||||
* We expect to find a TM-related instruction to be emulated. The
|
||||
* instruction image is in vcpu->arch.emul_inst. If the guest was in
|
||||
* TM suspended or transactional state, the checkpointed state has been
|
||||
* reclaimed and is in the vcpu struct. The CPU is in virtual mode in
|
||||
* host context.
|
||||
*/
|
||||
int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 instr = vcpu->arch.emul_inst;
|
||||
u64 msr = vcpu->arch.shregs.msr;
|
||||
u64 newmsr, bescr;
|
||||
int ra, rs;
|
||||
|
||||
switch (instr & 0xfc0007ff) {
|
||||
case PPC_INST_RFID:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
newmsr = vcpu->arch.shregs.srr1;
|
||||
/* should only get here for Sx -> T1 transition */
|
||||
WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
|
||||
MSR_TM_TRANSACTIONAL(newmsr) &&
|
||||
(newmsr & MSR_TM)));
|
||||
newmsr = sanitize_msr(newmsr);
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
||||
vcpu->arch.pc = vcpu->arch.shregs.srr0;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_RFEBB:
|
||||
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* check EBB facility is available */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_EBB_LG << 56);
|
||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
bescr = vcpu->arch.bescr;
|
||||
/* expect to see a S->T transition requested */
|
||||
WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
|
||||
((bescr >> 30) & 3) == 2));
|
||||
bescr &= ~BESCR_GE;
|
||||
if (instr & (1 << 11))
|
||||
bescr |= BESCR_GE;
|
||||
vcpu->arch.bescr = bescr;
|
||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
||||
vcpu->arch.pc = vcpu->arch.ebbrr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_MTMSRD:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
rs = (instr >> 21) & 0x1f;
|
||||
newmsr = kvmppc_get_gpr(vcpu, rs);
|
||||
/* check this is a Sx -> T1 transition */
|
||||
WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
|
||||
MSR_TM_TRANSACTIONAL(newmsr) &&
|
||||
(newmsr & MSR_TM)));
|
||||
/* mtmsrd doesn't change LE */
|
||||
newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
|
||||
newmsr = sanitize_msr(newmsr);
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TSR:
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
if (!(msr & MSR_TM)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_TM_LG << 56);
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* Set CR0 to indicate previous transactional state */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
|
||||
/* L=1 => tresume, L=0 => tsuspend */
|
||||
if (instr & (1 << 21)) {
|
||||
if (MSR_TM_SUSPENDED(msr))
|
||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||
} else {
|
||||
if (MSR_TM_TRANSACTIONAL(msr))
|
||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
|
||||
}
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TRECLAIM:
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
if (!(msr & MSR_TM)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_TM_LG << 56);
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* If no transaction active, generate TM bad thing */
|
||||
if (!MSR_TM_ACTIVE(msr)) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* If failure was not previously recorded, recompute TEXASR */
|
||||
if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
|
||||
ra = (instr >> 16) & 0x1f;
|
||||
if (ra)
|
||||
ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
|
||||
emulate_tx_failure(vcpu, ra);
|
||||
}
|
||||
|
||||
copy_from_checkpoint(vcpu);
|
||||
|
||||
/* Set CR0 to indicate previous transactional state */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
|
||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TRECHKPT:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
if (!(msr & MSR_TM)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_TM_LG << 56);
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
/* If transaction active or TEXASR[FS] = 0, bad thing */
|
||||
if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
copy_to_checkpoint(vcpu);
|
||||
|
||||
/* Set CR0 to indicate previous transactional state */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
|
||||
vcpu->arch.shregs.msr = msr | MSR_TS_S;
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
/* What should we do here? We didn't recognize the instruction */
|
||||
WARN_ON_ONCE(1);
|
||||
return RESUME_GUEST;
|
||||
}
|
109
arch/powerpc/kvm/book3s_hv_tm_builtin.c
Normal file
109
arch/powerpc/kvm/book3s_hv_tm_builtin.c
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/kvm_book3s.h>
|
||||
#include <asm/kvm_book3s_64.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
/*
|
||||
* This handles the cases where the guest is in real suspend mode
|
||||
* and we want to get back to the guest without dooming the transaction.
|
||||
* The caller has checked that the guest is in real-suspend mode
|
||||
* (MSR[TS] = S and the fake-suspend flag is not set).
|
||||
*/
|
||||
int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 instr = vcpu->arch.emul_inst;
|
||||
u64 newmsr, msr, bescr;
|
||||
int rs;
|
||||
|
||||
switch (instr & 0xfc0007ff) {
|
||||
case PPC_INST_RFID:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
newmsr = vcpu->arch.shregs.srr1;
|
||||
/* should only get here for Sx -> T1 transition */
|
||||
if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM)))
|
||||
return 0;
|
||||
newmsr = sanitize_msr(newmsr);
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
||||
vcpu->arch.pc = vcpu->arch.shregs.srr0;
|
||||
return 1;
|
||||
|
||||
case PPC_INST_RFEBB:
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
msr = vcpu->arch.shregs.msr;
|
||||
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206))
|
||||
return 0;
|
||||
/* check EBB facility is available */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_EBB) ||
|
||||
((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB)))
|
||||
return 0;
|
||||
bescr = mfspr(SPRN_BESCR);
|
||||
/* expect to see a S->T transition requested */
|
||||
if (((bescr >> 30) & 3) != 2)
|
||||
return 0;
|
||||
bescr &= ~BESCR_GE;
|
||||
if (instr & (1 << 11))
|
||||
bescr |= BESCR_GE;
|
||||
mtspr(SPRN_BESCR, bescr);
|
||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
||||
vcpu->arch.pc = mfspr(SPRN_EBBRR);
|
||||
return 1;
|
||||
|
||||
case PPC_INST_MTMSRD:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
rs = (instr >> 21) & 0x1f;
|
||||
newmsr = kvmppc_get_gpr(vcpu, rs);
|
||||
msr = vcpu->arch.shregs.msr;
|
||||
/* check this is a Sx -> T1 transition */
|
||||
if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM)))
|
||||
return 0;
|
||||
/* mtmsrd doesn't change LE */
|
||||
newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
|
||||
newmsr = sanitize_msr(newmsr);
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
return 1;
|
||||
|
||||
case PPC_INST_TSR:
|
||||
/* we know the MSR has the TS field = S (0b01) here */
|
||||
msr = vcpu->arch.shregs.msr;
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206))
|
||||
return 0;
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM))
|
||||
return 0;
|
||||
/* L=1 => tresume => set TS to T (0b10) */
|
||||
if (instr & (1 << 21))
|
||||
vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||
/* Set CR0 to 0b0010 */
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called when we are returning to a guest in TM transactional
|
||||
* state. We roll the guest state back to the checkpointed state.
|
||||
*/
|
||||
void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
|
||||
vcpu->arch.pc = vcpu->arch.tfhar;
|
||||
copy_from_checkpoint(vcpu);
|
||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
|
||||
}
|
@ -646,10 +646,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = hv_enabled;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_CAP_PPC_HTM:
|
||||
r = hv_enabled &&
|
||||
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
|
||||
(!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/runlatch.h>
|
||||
#include <asm/dbell.h>
|
||||
|
||||
#include "powernv.h"
|
||||
#include "subcore.h"
|
||||
@ -387,6 +388,86 @@ void power9_idle(void)
|
||||
power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* This is used in working around bugs in thread reconfiguration
|
||||
* on POWER9 (at least up to Nimbus DD2.2) relating to transactional
|
||||
* memory and the way that XER[SO] is checkpointed.
|
||||
* This function forces the core into SMT4 in order by asking
|
||||
* all other threads not to stop, and sending a message to any
|
||||
* that are in a stop state.
|
||||
* Must be called with preemption disabled.
|
||||
*
|
||||
* DO NOT call this unless cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG) is
|
||||
* true; otherwise this function will hang the system, due to the
|
||||
* optimization in power9_idle_stop.
|
||||
*/
|
||||
void pnv_power9_force_smt4_catch(void)
|
||||
{
|
||||
int cpu, cpu0, thr;
|
||||
struct paca_struct *tpaca;
|
||||
int awake_threads = 1; /* this thread is awake */
|
||||
int poke_threads = 0;
|
||||
int need_awake = threads_per_core;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpu0 = cpu & ~(threads_per_core - 1);
|
||||
tpaca = &paca[cpu0];
|
||||
for (thr = 0; thr < threads_per_core; ++thr) {
|
||||
if (cpu != cpu0 + thr)
|
||||
atomic_inc(&tpaca[thr].dont_stop);
|
||||
}
|
||||
/* order setting dont_stop vs testing requested_psscr */
|
||||
mb();
|
||||
for (thr = 0; thr < threads_per_core; ++thr) {
|
||||
if (!tpaca[thr].requested_psscr)
|
||||
++awake_threads;
|
||||
else
|
||||
poke_threads |= (1 << thr);
|
||||
}
|
||||
|
||||
/* If at least 3 threads are awake, the core is in SMT4 already */
|
||||
if (awake_threads < need_awake) {
|
||||
/* We have to wake some threads; we'll use msgsnd */
|
||||
for (thr = 0; thr < threads_per_core; ++thr) {
|
||||
if (poke_threads & (1 << thr)) {
|
||||
ppc_msgsnd_sync();
|
||||
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
|
||||
tpaca[thr].hw_cpu_id);
|
||||
}
|
||||
}
|
||||
/* now spin until at least 3 threads are awake */
|
||||
do {
|
||||
for (thr = 0; thr < threads_per_core; ++thr) {
|
||||
if ((poke_threads & (1 << thr)) &&
|
||||
!tpaca[thr].requested_psscr) {
|
||||
++awake_threads;
|
||||
poke_threads &= ~(1 << thr);
|
||||
}
|
||||
}
|
||||
} while (awake_threads < need_awake);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
|
||||
|
||||
void pnv_power9_force_smt4_release(void)
|
||||
{
|
||||
int cpu, cpu0, thr;
|
||||
struct paca_struct *tpaca;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpu0 = cpu & ~(threads_per_core - 1);
|
||||
tpaca = &paca[cpu0];
|
||||
|
||||
/* clear all the dont_stop flags */
|
||||
for (thr = 0; thr < threads_per_core; ++thr) {
|
||||
if (cpu != cpu0 + thr)
|
||||
atomic_dec(&tpaca[thr].dont_stop);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user