2010-07-29 16:47:57 +04:00
/ *
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e , v e r s i o n 2 , a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m ; if not, write to the Free Software
* Foundation, 5 1 F r a n k l i n S t r e e t , F i f t h F l o o r , B o s t o n , M A 0 2 1 1 0 - 1 3 0 1 , U S A .
*
* Copyright S U S E L i n u x P r o d u c t s G m b H 2 0 1 0
2011-11-09 04:23:28 +04:00
* Copyright 2 0 1 0 - 2 0 1 1 F r e e s c a l e S e m i c o n d u c t o r , I n c .
2010-07-29 16:47:57 +04:00
*
* Authors : Alexander G r a f < a g r a f @suse.de>
* /
# include < a s m / p p c _ a s m . h >
# include < a s m / k v m _ a s m . h >
# include < a s m / r e g . h >
# include < a s m / p a g e . h >
# include < a s m / a s m - o f f s e t s . h >
# define K V M _ M A G I C _ P A G E ( - 4 0 9 6 )
2010-07-29 16:48:03 +04:00
# ifdef C O N F I G _ 6 4 B I T
# define L L 6 4 ( r e g , o f f s , r e g 2 ) l d r e g , ( o f f s ) ( r e g 2 )
# define S T L 6 4 ( r e g , o f f s , r e g 2 ) s t d r e g , ( o f f s ) ( r e g 2 )
# else
# define L L 6 4 ( r e g , o f f s , r e g 2 ) l w z r e g , ( o f f s + 4 ) ( r e g 2 )
# define S T L 6 4 ( r e g , o f f s , r e g 2 ) s t w r e g , ( o f f s + 4 ) ( r e g 2 )
# endif
# define S C R A T C H _ S A V E \
/ * Enable c r i t i c a l s e c t i o n . W e a r e c r i t i c a l i f \
shared- > c r i t i c a l = = r1 * / \
STL6 4 ( r1 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ C R I T I C A L , 0 ) ; \
\
/* Save state */ \
PPC_ S T L r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 1 ) ( 0 ) ; \
PPC_ S T L r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 2 ) ( 0 ) ; \
mfcr r31 ; \
stw r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 3 ) ( 0 ) ;
# define S C R A T C H _ R E S T O R E \
/* Restore state */ \
PPC_ L L r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 1 ) ( 0 ) ; \
lwz r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 3 ) ( 0 ) ; \
mtcr r30 ; \
PPC_ L L r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S C R A T C H 2 ) ( 0 ) ; \
\
/ * Disable c r i t i c a l s e c t i o n . W e a r e c r i t i c a l i f \
shared- > c r i t i c a l = = r1 a n d r2 i s a l w a y s ! = r1 * / \
STL6 4 ( r2 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ C R I T I C A L , 0 ) ;
2010-07-29 16:48:04 +04:00
2011-12-02 00:22:53 +04:00
.global kvm_template_start
kvm_template_start :
2010-07-29 16:48:04 +04:00
.global kvm_emulate_mtmsrd
kvm_emulate_mtmsrd :
SCRATCH_ S A V E
/* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
lis r30 , ( ~ ( M S R _ E E | M S R _ R I ) ) @h
ori r30 , r30 , ( ~ ( M S R _ E E | M S R _ R I ) ) @l
and r31 , r31 , r30
/* OR the register's (MSR_EE|MSR_RI) on MSR */
kvm_emulate_mtmsrd_reg :
2010-08-05 17:44:41 +04:00
ori r30 , r0 , 0
andi. r30 , r30 , ( M S R _ E E | M S R _ R I )
2010-07-29 16:48:04 +04:00
or r31 , r31 , r30
/* Put MSR back into magic page */
STL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
/* Check if we have to fetch an interrupt */
lwz r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ I N T ) ( 0 )
cmpwi r31 , 0
beq+ n o _ c h e c k
/* Check if we may trigger an interrupt */
andi. r30 , r30 , M S R _ E E
beq n o _ c h e c k
SCRATCH_ R E S T O R E
/* Nag hypervisor */
2010-08-05 17:44:41 +04:00
kvm_emulate_mtmsrd_orig_ins :
2010-07-29 16:48:04 +04:00
tlbsync
b k v m _ e m u l a t e _ m t m s r d _ b r a n c h
no_check :
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_mtmsrd_branch :
b .
kvm_emulate_mtmsrd_end :
.global kvm_emulate_mtmsrd_branch_offs
kvm_emulate_mtmsrd_branch_offs :
.long ( kvm_ e m u l a t e _ m t m s r d _ b r a n c h - k v m _ e m u l a t e _ m t m s r d ) / 4
.global kvm_emulate_mtmsrd_reg_offs
kvm_emulate_mtmsrd_reg_offs :
.long ( kvm_ e m u l a t e _ m t m s r d _ r e g - k v m _ e m u l a t e _ m t m s r d ) / 4
2010-08-05 17:44:41 +04:00
.global kvm_emulate_mtmsrd_orig_ins_offs
kvm_emulate_mtmsrd_orig_ins_offs :
.long ( kvm_ e m u l a t e _ m t m s r d _ o r i g _ i n s - k v m _ e m u l a t e _ m t m s r d ) / 4
2010-07-29 16:48:04 +04:00
.global kvm_emulate_mtmsrd_len
kvm_emulate_mtmsrd_len :
.long ( kvm_ e m u l a t e _ m t m s r d _ e n d - k v m _ e m u l a t e _ m t m s r d ) / 4
2010-07-29 16:48:05 +04:00
2012-05-21 03:21:53 +04:00
# define M S R _ S A F E _ B I T S ( M S R _ E E | M S R _ R I )
2010-07-29 16:48:05 +04:00
# define M S R _ C R I T I C A L _ B I T S ~ M S R _ S A F E _ B I T S
.global kvm_emulate_mtmsr
kvm_emulate_mtmsr :
SCRATCH_ S A V E
/* Fetch old MSR in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
/* Find the changed bits between old and new MSR */
kvm_emulate_mtmsr_reg1 :
2010-08-05 13:26:04 +04:00
ori r30 , r0 , 0
xor r31 , r30 , r31
2010-07-29 16:48:05 +04:00
/* Check if we need to really do mtmsr */
LOAD_ R E G _ I M M E D I A T E ( r30 , M S R _ C R I T I C A L _ B I T S )
and. r31 , r31 , r30
/* No critical bits changed? Maybe we can stay in the guest. */
beq m a y b e _ s t a y _ i n _ g u e s t
do_mtmsr :
SCRATCH_ R E S T O R E
/* Just fire off the mtmsr if it's critical */
kvm_emulate_mtmsr_orig_ins :
mtmsr r0
b k v m _ e m u l a t e _ m t m s r _ b r a n c h
maybe_stay_in_guest :
2010-08-05 13:26:04 +04:00
/* Get the target register in r30 */
kvm_emulate_mtmsr_reg2 :
ori r30 , r0 , 0
2011-10-13 13:47:08 +04:00
/* Put MSR into magic page because we don't call mtmsr */
STL6 4 ( r30 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
2010-07-29 16:48:05 +04:00
/* Check if we have to fetch an interrupt */
lwz r31 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ I N T ) ( 0 )
cmpwi r31 , 0
beq+ n o _ m t m s r
/* Check if we may trigger an interrupt */
2010-08-05 13:26:04 +04:00
andi. r31 , r30 , M S R _ E E
2011-10-13 13:47:08 +04:00
bne d o _ m t m s r
2010-07-29 16:48:05 +04:00
no_mtmsr :
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_mtmsr_branch :
b .
kvm_emulate_mtmsr_end :
.global kvm_emulate_mtmsr_branch_offs
kvm_emulate_mtmsr_branch_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ b r a n c h - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_reg1_offs
kvm_emulate_mtmsr_reg1_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ r e g 1 - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_reg2_offs
kvm_emulate_mtmsr_reg2_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ r e g 2 - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_orig_ins_offs
kvm_emulate_mtmsr_orig_ins_offs :
.long ( kvm_ e m u l a t e _ m t m s r _ o r i g _ i n s - k v m _ e m u l a t e _ m t m s r ) / 4
.global kvm_emulate_mtmsr_len
kvm_emulate_mtmsr_len :
.long ( kvm_ e m u l a t e _ m t m s r _ e n d - k v m _ e m u l a t e _ m t m s r ) / 4
2010-07-29 16:48:06 +04:00
2011-11-09 04:23:28 +04:00
/* also used for wrteei 1 */
.global kvm_emulate_wrtee
kvm_emulate_wrtee :
2010-07-29 16:48:06 +04:00
2011-11-09 04:23:28 +04:00
SCRATCH_ S A V E
/* Fetch old MSR in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
2010-07-29 16:48:06 +04:00
2011-11-09 04:23:28 +04:00
/* Insert new MSR[EE] */
kvm_emulate_wrtee_reg :
ori r30 , r0 , 0
rlwimi r31 , r30 , 0 , M S R _ E E
/ *
* If M S R [ E E ] i s n o w s e t , c h e c k f o r a p e n d i n g i n t e r r u p t .
* We c o u l d s k i p t h i s i f M S R [ E E ] w a s a l r e a d y o n , b u t t h a t
* should b e r a r e , s o d o n ' t b o t h e r .
* /
andi. r30 , r30 , M S R _ E E
2010-07-29 16:48:06 +04:00
2011-11-09 04:23:28 +04:00
/* Put MSR into magic page because we don't call wrtee */
STL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
beq n o _ w r t e e
/* Check if we have to fetch an interrupt */
lwz r30 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ I N T ) ( 0 )
cmpwi r30 , 0
bne d o _ w r t e e
no_wrtee :
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_wrtee_branch :
b .
do_wrtee :
SCRATCH_ R E S T O R E
/* Just fire off the wrtee if it's critical */
kvm_emulate_wrtee_orig_ins :
wrtee r0
b k v m _ e m u l a t e _ w r t e e _ b r a n c h
kvm_emulate_wrtee_end :
.global kvm_emulate_wrtee_branch_offs
kvm_emulate_wrtee_branch_offs :
.long ( kvm_ e m u l a t e _ w r t e e _ b r a n c h - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrtee_reg_offs
kvm_emulate_wrtee_reg_offs :
.long ( kvm_ e m u l a t e _ w r t e e _ r e g - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrtee_orig_ins_offs
kvm_emulate_wrtee_orig_ins_offs :
.long ( kvm_ e m u l a t e _ w r t e e _ o r i g _ i n s - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrtee_len
kvm_emulate_wrtee_len :
.long ( kvm_ e m u l a t e _ w r t e e _ e n d - k v m _ e m u l a t e _ w r t e e ) / 4
.global kvm_emulate_wrteei_0
kvm_emulate_wrteei_0 :
2010-07-29 16:48:06 +04:00
SCRATCH_ S A V E
/* Fetch old MSR in r31 */
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
/* Remove MSR_EE from old MSR */
2011-11-09 04:23:28 +04:00
rlwinm r31 , r31 , 0 , ~ M S R _ E E
2010-07-29 16:48:06 +04:00
/* Write new MSR value back */
STL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
SCRATCH_ R E S T O R E
/* Go back to caller */
2011-11-09 04:23:28 +04:00
kvm_emulate_wrteei_0_branch :
2010-07-29 16:48:06 +04:00
b .
2011-11-09 04:23:28 +04:00
kvm_emulate_wrteei_0_end :
2010-07-29 16:48:06 +04:00
2011-11-09 04:23:28 +04:00
.global kvm_emulate_wrteei_0_branch_offs
kvm_emulate_wrteei_0_branch_offs :
.long ( kvm_ e m u l a t e _ w r t e e i _ 0 _ b r a n c h - k v m _ e m u l a t e _ w r t e e i _ 0 ) / 4
2010-08-03 12:39:35 +04:00
2011-11-09 04:23:28 +04:00
.global kvm_emulate_wrteei_0_len
kvm_emulate_wrteei_0_len :
.long ( kvm_ e m u l a t e _ w r t e e i _ 0 _ e n d - k v m _ e m u l a t e _ w r t e e i _ 0 ) / 4
2010-08-03 12:39:35 +04:00
.global kvm_emulate_mtsrin
kvm_emulate_mtsrin :
SCRATCH_ S A V E
LL6 4 ( r31 , K V M _ M A G I C _ P A G E + K V M _ M A G I C _ M S R , 0 )
andi. r31 , r31 , M S R _ D R | M S R _ I R
beq k v m _ e m u l a t e _ m t s r i n _ r e g 1
SCRATCH_ R E S T O R E
kvm_emulate_mtsrin_orig_ins :
nop
b k v m _ e m u l a t e _ m t s r i n _ b r a n c h
kvm_emulate_mtsrin_reg1 :
/* rX >> 26 */
rlwinm r30 ,r0 ,6 ,2 6 ,2 9
kvm_emulate_mtsrin_reg2 :
stw r0 , ( K V M _ M A G I C _ P A G E + K V M _ M A G I C _ S R ) ( r30 )
SCRATCH_ R E S T O R E
/* Go back to caller */
kvm_emulate_mtsrin_branch :
b .
kvm_emulate_mtsrin_end :
.global kvm_emulate_mtsrin_branch_offs
kvm_emulate_mtsrin_branch_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ b r a n c h - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_reg1_offs
kvm_emulate_mtsrin_reg1_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ r e g 1 - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_reg2_offs
kvm_emulate_mtsrin_reg2_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ r e g 2 - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_orig_ins_offs
kvm_emulate_mtsrin_orig_ins_offs :
.long ( kvm_ e m u l a t e _ m t s r i n _ o r i g _ i n s - k v m _ e m u l a t e _ m t s r i n ) / 4
.global kvm_emulate_mtsrin_len
kvm_emulate_mtsrin_len :
.long ( kvm_ e m u l a t e _ m t s r i n _ e n d - k v m _ e m u l a t e _ m t s r i n ) / 4
2011-12-02 00:22:53 +04:00
.global kvm_template_end
kvm_template_end :