2019-05-29 07:12:40 -07:00
/* SPDX-License-Identifier: GPL-2.0-only */
2010-07-29 14:47:57 +02:00
/ *
*
* Copyright S U S E L i n u x P r o d u c t s G m b H 2 0 1 0
2011-11-08 18:23:28 -06:00
* Copyright 2 0 1 0 - 2 0 1 1 F r e e s c a l e S e m i c o n d u c t o r , I n c .
2010-07-29 14:47:57 +02:00
*
* Authors : Alexander G r a f < a g r a f @suse.de>
* /
# include < a s m / p p c _ a s m . h >
# include < a s m / k v m _ a s m . h >
# include < a s m / r e g . h >
# include < a s m / p a g e . h >
# include < a s m / a s m - o f f s e t s . h >
2018-07-05 16:24:57 +00:00
# include < a s m / a s m - c o m p a t . h >
2010-07-29 14:47:57 +02:00
# define K V M _ M A G I C _ P A G E ( - 4 0 9 6 )
2010-07-29 14:48:03 +02:00
# ifdef C O N F I G _ 6 4 B I T
# define L L 6 4 ( r e g , o f f s , r e g 2 ) l d r e g , ( o f f s ) ( r e g 2 )
# define S T L 6 4 ( r e g , o f f s , r e g 2 ) s t d r e g , ( o f f s ) ( r e g 2 )
# else
# define L L 6 4 ( r e g , o f f s , r e g 2 ) l w z r e g , ( o f f s + 4 ) ( r e g 2 )
# define S T L 6 4 ( r e g , o f f s , r e g 2 ) s t w r e g , ( o f f s + 4 ) ( r e g 2 )
# endif
# define S C R A T C H _ S A V E \
/ * Enable c r i t i c a l s e c t i o n . W e a r e c r i t i c a l i f \
shared- > c r i t i c a l = = r1 * / \
STL6 4 ( r1 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ C R I T I C A L , 0 ) ; \
\
/* Save state */ \
PPC_ S T L r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 1 ) ( 0 ) ; \
PPC_ S T L r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 2 ) ( 0 ) ; \
mfcr r31 ; \
stw r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 3 ) ( 0 ) ;
# define S C R A T C H _ R E S T O R E \
/* Restore state */ \
PPC_ L L r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 1 ) ( 0 ) ; \
lwz r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 3 ) ( 0 ) ; \
mtcr r30 ; \
PPC_ L L r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 2 ) ( 0 ) ; \
\
/ * Disable c r i t i c a l s e c t i o n . W e a r e c r i t i c a l i f \
shared- > c r i t i c a l = = r1 a n d r2 i s a l w a y s ! = r1 * / \
STL6 4 ( r2 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ C R I T I C A L , 0 ) ;
2010-07-29 14:48:04 +02:00
2011-12-01 20:22:53 +00:00
.global kvm_template_start
kvm_template_start :
2010-07-29 14:48:04 +02:00
.global kvm_emulate_mtmsrd
kvm_emulate_mtmsrd :
SCRATCH_ S A V E
/* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
lis r30 , ( ~ ( M S R _ E E | M S R _ R I ) ) @h
ori r30 , r30 , ( ~ ( M S R _ E E | M S R _ R I ) ) @l
and r31 , r31 , r30
/* OR the register's (MSR_EE|MSR_RI) on MSR */
kvm_emulate_mtmsrd_reg :
2010-08-05 15:44:41 +02:00
ori r30 , r0 , 0
andi. r30 , r30 , ( M S R _ E E | M S R _ R I )
2010-07-29 14:48:04 +02:00
or r31 , r31 , r30
/* Put MSR back into magic page */
STL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
/* Check if we have to fetch an interrupt */
lwz r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ I N T ) ( 0 )
cmpwi r31 , 0
beq+ n o _ c h e c k
/* Check if we may trigger an interrupt */
andi. r30 , r30 , M S R _ E E
beq n o _ c h e c k
SCRATCH_ R E S T O R E
/* Nag hypervisor */
2010-08-05 15:44:41 +02:00
kvm_emulate_mtmsrd_orig_ins :
2010-07-29 14:48:04 +02:00
tlbsync
b k v m _ e m u l a t e _ m t m s r d _ b r a n c h
no_check :
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_mtmsrd_branch :
b .
kvm_emulate_mtmsrd_end :
.global kvm_emulate_mtmsrd_branch_offs
kvm_emulate_mtmsrd_branch_offs :
.long ( kvm_ e m u l a t e _ m t m s r d _ b r a n c h - k v m _ e m u l a t e _ m t m s r d ) / 4
.global kvm_emulate_mtmsrd_reg_offs
kvm_emulate_mtmsrd_reg_offs :
.long ( kvm_ e m u l a t e _ m t m s r d _ r e g - k v m _ e m u l a t e _ m t m s r d ) / 4
2010-08-05 15:44:41 +02:00
.global kvm_emulate_mtmsrd_orig_ins_offs
kvm_emulate_mtmsrd_orig_ins_offs :
.long ( kvm_ e m u l a t e _ m t m s r d _ o r i g _ i n s - k v m _ e m u l a t e _ m t m s r d ) / 4
2010-07-29 14:48:04 +02:00
.global kvm_emulate_mtmsrd_len
kvm_emulate_mtmsrd_len :
.long ( kvm_ e m u l a t e _ m t m s r d _ e n d - k v m _ e m u l a t e _ m t m s r d ) / 4
2010-07-29 14:48:05 +02:00
2012-05-20 23:21:53 +00:00
# define M S R _ S A F E _ B I T S ( M S R _ E E | M S R _ R I )
2010-07-29 14:48:05 +02:00
# define M S R _ C R I T I C A L _ B I T S ~ M S R _ S A F E _ B I T S
.global kvm_emulate_mtmsr
kvm_emulate_mtmsr :
SCRATCH_ S A V E
/* Fetch old MSR in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
/* Find the changed bits between old and new MSR */
kvm_emulate_mtmsr_reg1 :
2010-08-05 11:26:04 +02:00
ori r30 , r0 , 0
xor r31 , r30 , r31
2010-07-29 14:48:05 +02:00
/* Check if we need to really do mtmsr */
LOAD_ R E G _ I M M E D I A T E ( r30 , M S R _ C R I T I C A L _ B I T S )
and. r31 , r31 , r30
/* No critical bits changed? Maybe we can stay in the guest. */
beq m a y b e _ s t a y _ i n _ g u e s t
do_mtmsr :
SCRATCH_ R E S T O R E
/* Just fire off the mtmsr if it's critical */
kvm_emulate_mtmsr_orig_ins :
mtmsr r0
b k v m _ e m u l a t e _ m t m s r _ b r a n c h
maybe_stay_in_guest :
2010-08-05 11:26:04 +02:00
/* Get the target register in r30 */
kvm_emulate_mtmsr_reg2 :
ori r30 , r0 , 0
2011-10-13 15:17:08 +05:30
/* Put MSR into magic page because we don't call mtmsr */
STL6 4 ( r30 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
2010-07-29 14:48:05 +02:00
/* Check if we have to fetch an interrupt */
lwz r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ I N T ) ( 0 )
cmpwi r31 , 0
beq+ n o _ m t m s r
/* Check if we may trigger an interrupt */
2010-08-05 11:26:04 +02:00
andi. r31 , r30 , M S R _ E E
2011-10-13 15:17:08 +05:30
bne d o _ m t m s r
2010-07-29 14:48:05 +02:00
no_mtmsr :
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_mtmsr_branch :
b .
kvm_emulate_mtmsr_end :
.global kvm_emulate_mtmsr_branch_offs
kvm_emulate_mtmsr_branch_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ b r a n c h - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_reg1_offs
kvm_emulate_mtmsr_reg1_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ r e g 1 - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_reg2_offs
kvm_emulate_mtmsr_reg2_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ r e g 2 - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_orig_ins_offs
kvm_emulate_mtmsr_orig_ins_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ o r i g _ i n s - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_len
kvm_emulate_mtmsr_len :
.long ( kvm_ e m u l a t e _ m t m s r _ e n d - k v m _ e m u l a t e _ m t m s r ) / 4
2010-07-29 14:48:06 +02:00
2019-09-11 21:57:46 +10:00
# ifdef C O N F I G _ B O O K E
2011-11-08 18:23:28 -06:00
/* also used for wrteei 1 */
.global kvm_emulate_wrtee
kvm_emulate_wrtee :
2010-07-29 14:48:06 +02:00
2011-11-08 18:23:28 -06:00
SCRATCH_ S A V E
/* Fetch old MSR in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
2010-07-29 14:48:06 +02:00
2011-11-08 18:23:28 -06:00
/* Insert new MSR[EE] */
kvm_emulate_wrtee_reg :
ori r30 , r0 , 0
rlwimi r31 , r30 , 0 , M S R _ E E
/ *
* If M S R [ E E ] i s n o w s e t , c h e c k f o r a p e n d i n g i n t e r r u p t .
* We c o u l d s k i p t h i s i f M S R [ E E ] w a s a l r e a d y o n , b u t t h a t
* should b e r a r e , s o d o n ' t b o t h e r .
* /
andi. r30 , r30 , M S R _ E E
2010-07-29 14:48:06 +02:00
2011-11-08 18:23:28 -06:00
/* Put MSR into magic page because we don't call wrtee */
STL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
beq n o _ w r t e e
/* Check if we have to fetch an interrupt */
lwz r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ I N T ) ( 0 )
cmpwi r30 , 0
bne d o _ w r t e e
no_wrtee :
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_wrtee_branch :
b .
do_wrtee :
SCRATCH_ R E S T O R E
/* Just fire off the wrtee if it's critical */
kvm_emulate_wrtee_orig_ins :
wrtee r0
b k v m _ e m u l a t e _ w r t e e _ b r a n c h
kvm_emulate_wrtee_end :
.global kvm_emulate_wrtee_branch_offs
kvm_emulate_wrtee_branch_offs :
.long ( kvm_ e m u l a t e _ w r t e e _ b r a n c h - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrtee_reg_offs
kvm_emulate_wrtee_reg_offs :
.long ( kvm_ e m u l a t e _ w r t e e _ r e g - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrtee_orig_ins_offs
kvm_emulate_wrtee_orig_ins_offs :
.long ( kvm_ e m u l a t e _ w r t e e _ o r i g _ i n s - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrtee_len
kvm_emulate_wrtee_len :
.long ( kvm_ e m u l a t e _ w r t e e _ e n d - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrteei_0
kvm_emulate_wrteei_0 :
2010-07-29 14:48:06 +02:00
SCRATCH_ S A V E
/* Fetch old MSR in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
/* Remove MSR_EE from old MSR */
2011-11-08 18:23:28 -06:00
rlwinm r31 , r31 , 0 , ~ M S R _ E E
2010-07-29 14:48:06 +02:00
/* Write new MSR value back */
STL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
SCRATCH_ R E S T O R E
/* Go back to caller */
2011-11-08 18:23:28 -06:00
kvm_emulate_wrteei_0_branch :
2010-07-29 14:48:06 +02:00
b .
2011-11-08 18:23:28 -06:00
kvm_emulate_wrteei_0_end :
2010-07-29 14:48:06 +02:00
2011-11-08 18:23:28 -06:00
.global kvm_emulate_wrteei_0_branch_offs
kvm_emulate_wrteei_0_branch_offs :
.long ( kvm_ e m u l a t e _ w r t e e i _ 0 _ b r a n c h - k v m _ e m u l a t e _ w r t e e i _ 0 ) / 4
2010-08-03 10:39:35 +02:00
2011-11-08 18:23:28 -06:00
.global kvm_emulate_wrteei_0_len
kvm_emulate_wrteei_0_len :
.long ( kvm_ e m u l a t e _ w r t e e i _ 0 _ e n d - k v m _ e m u l a t e _ w r t e e i _ 0 ) / 4
2010-08-03 10:39:35 +02:00
2019-09-11 21:57:46 +10:00
# endif / * C O N F I G _ B O O K E * /
# ifdef C O N F I G _ P P C _ B O O K 3 S _ 3 2
2010-08-03 10:39:35 +02:00
.global kvm_emulate_mtsrin
kvm_emulate_mtsrin :
SCRATCH_ S A V E
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
andi. r31 , r31 , M S R _ D R | M S R _ I R
beq k v m _ e m u l a t e _ m t s r i n _ r e g 1
SCRATCH_ R E S T O R E
kvm_emulate_mtsrin_orig_ins :
nop
b k v m _ e m u l a t e _ m t s r i n _ b r a n c h
kvm_emulate_mtsrin_reg1 :
/* rX >> 26 */
rlwinm r30 ,r0 ,6 ,2 6 ,2 9
kvm_emulate_mtsrin_reg2 :
stw r0 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S R ) ( r30 )
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_mtsrin_branch :
b .
kvm_emulate_mtsrin_end :
.global kvm_emulate_mtsrin_branch_offs
kvm_emulate_mtsrin_branch_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ b r a n c h - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_reg1_offs
kvm_emulate_mtsrin_reg1_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ r e g 1 - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_reg2_offs
kvm_emulate_mtsrin_reg2_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ r e g 2 - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_orig_ins_offs
kvm_emulate_mtsrin_orig_ins_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ o r i g _ i n s - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_len
kvm_emulate_mtsrin_len :
.long ( kvm_ e m u l a t e _ m t s r i n _ e n d - k v m _ e m u l a t e _ m t s r i n ) / 4
2011-12-01 20:22:53 +00:00
2019-09-11 21:57:46 +10:00
# endif / * C O N F I G _ P P C _ B O O K 3 S _ 3 2 * /
powerpc/kvm: Move kvm_tmp into .text, shrink to 64K
In some configurations of KVM, guests binary patch themselves to
avoid/reduce trapping into the hypervisor. For some instructions this
requires replacing one instruction with a sequence of instructions.
For those cases we need to write the sequence of instructions
somewhere and then patch the location of the original instruction to
branch to the sequence. That requires that the location of the
sequence be within 32MB of the original instruction.
The current solution for this is that we create a 1MB array in BSS,
write sequences into there, and then free the remainder of the array.
This has a few problems:
- it confuses kmemleak.
- it confuses lockdep.
- it requires mapping kvm_tmp executable, which can cause adjacent
areas to also be mapped executable if we're using 16M pages for the
linear mapping.
- the 32MB limit can be exceeded if the kernel is big enough,
especially with STRICT_KERNEL_RWX enabled, which then prevents the
patching from working at all.
We can fix all those problems by making kvm_tmp just a region of
regular .text. However currently it's 1MB in size, and we don't want
to waste 1MB of text. In practice however I only see ~30KB of kvm_tmp
being used even for an allyes_config. So shrink kvm_tmp to 64K, which
ought to be enough for everyone, and move it into .text.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190911115746.12433-1-mpe@ellerman.id.au
2019-09-11 21:57:43 +10:00
.balign 4
.global kvm_tmp
kvm_tmp :
.space ( 6 4 * 1 0 2 4 )
.global kvm_tmp_end
kvm_tmp_end :
2011-12-01 20:22:53 +00:00
.global kvm_template_end
kvm_template_end :