2009-11-02 12:02:29 +00:00
/ *
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e , v e r s i o n 2 , a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m ; if not, write to the Free Software
* Foundation, 5 1 F r a n k l i n S t r e e t , F i f t h F l o o r , B o s t o n , M A 0 2 1 1 0 - 1 3 0 1 , U S A .
*
* Copyright S U S E L i n u x P r o d u c t s G m b H 2 0 0 9
*
* Authors : Alexander G r a f < a g r a f @suse.de>
* /
# define S H A D O W _ S L B _ E S I D ( n u m ) ( S L B S H A D O W _ S A V E A R E A + ( n u m * 0 x10 ) )
# define S H A D O W _ S L B _ V S I D ( n u m ) ( S L B S H A D O W _ S A V E A R E A + ( n u m * 0 x10 ) + 0 x8 )
# define U N B O L T _ S L B _ E N T R Y ( n u m ) \
ld r9 , S H A D O W _ S L B _ E S I D ( n u m ) ( r12 ) ; \
/* Invalid? Skip. */ ; \
rldicl. r0 , r9 , 3 7 , 6 3 ; \
beq s l b _ e n t r y _ s k i p _ ## n u m ; \
xoris r9 , r9 , S L B _ E S I D _ V @h; \
std r9 , S H A D O W _ S L B _ E S I D ( n u m ) ( r12 ) ; \
slb_ e n t r y _ s k i p _ ## n u m :
# define R E B O L T _ S L B _ E N T R Y ( n u m ) \
ld r10 , S H A D O W _ S L B _ E S I D ( n u m ) ( r11 ) ; \
cmpdi r10 , 0 ; \
2010-01-04 22:19:22 +01:00
beq s l b _ e x i t _ s k i p _ ## n u m ; \
2009-11-02 12:02:29 +00:00
oris r10 , r10 , S L B _ E S I D _ V @h; \
ld r9 , S H A D O W _ S L B _ V S I D ( n u m ) ( r11 ) ; \
slbmte r9 , r10 ; \
std r10 , S H A D O W _ S L B _ E S I D ( n u m ) ( r11 ) ; \
slb_ e x i t _ s k i p _ ## n u m :
/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Entry c o d e *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /
.global kvmppc_handler_trampoline_enter
kvmppc_handler_trampoline_enter :
/ * Required s t a t e :
*
* MSR = ~ I R | D R
* R1 3 = P A C A
2010-01-08 02:58:03 +01:00
* R1 = h o s t R 1
* R2 = h o s t R 2
2009-11-02 12:02:29 +00:00
* R9 = g u e s t I P
* R1 0 = g u e s t M S R
2010-01-08 02:58:03 +01:00
* all o t h e r G P R S = f r e e
* PACA[ K V M _ C R ] = g u e s t C R
* PACA[ K V M _ X E R ] = g u e s t X E R
2009-11-02 12:02:29 +00:00
* /
mtsrr0 r9
mtsrr1 r10
2010-01-08 02:58:06 +01:00
/* Activate guest mode, so faults get handled by KVM */
li r11 , K V M _ G U E S T _ M O D E _ G U E S T
stb r11 , P A C A _ K V M _ I N _ G U E S T ( r13 )
2009-11-02 12:02:29 +00:00
/* Remove LPAR shadow entries */
# if S L B _ N U M _ B O L T E D = = 3
ld r12 , P A C A _ S L B S H A D O W P T R ( r13 )
/* Save off the first entry so we can slbie it later */
ld r10 , S H A D O W _ S L B _ E S I D ( 0 ) ( r12 )
ld r11 , S H A D O W _ S L B _ V S I D ( 0 ) ( r12 )
/* Remove bolted entries */
UNBOLT_ S L B _ E N T R Y ( 0 )
UNBOLT_ S L B _ E N T R Y ( 1 )
UNBOLT_ S L B _ E N T R Y ( 2 )
# else
# error u n k n o w n n u m b e r o f b o l t e d e n t r i e s
# endif
/* Flush SLB */
slbia
/* r0 = esid & ESID_MASK */
rldicr r10 , r10 , 0 , 3 5
/* r0 |= CLASS_BIT(VSID) */
rldic r12 , r11 , 5 6 - 3 6 , 3 6
or r10 , r10 , r12
slbie r10
isync
/* Fill SLB with our shadow */
lbz r12 , P A C A _ K V M _ S L B _ M A X ( r13 )
mulli r12 , r12 , 1 6
addi r12 , r12 , P A C A _ K V M _ S L B
add r12 , r12 , r13
/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
li r11 , P A C A _ K V M _ S L B
add r11 , r11 , r13
slb_loop_enter :
ld r10 , 0 ( r11 )
rldicl. r0 , r10 , 3 7 , 6 3
beq s l b _ l o o p _ e n t e r _ s k i p
ld r9 , 8 ( r11 )
slbmte r9 , r10
slb_loop_enter_skip :
addi r11 , r11 , 1 6
cmpd c r0 , r11 , r12
blt s l b _ l o o p _ e n t e r
slb_do_enter :
/* Enter guest */
2010-01-08 02:58:03 +01:00
ld r0 , ( P A C A _ K V M _ R 0 ) ( r13 )
ld r1 , ( P A C A _ K V M _ R 1 ) ( r13 )
ld r2 , ( P A C A _ K V M _ R 2 ) ( r13 )
ld r3 , ( P A C A _ K V M _ R 3 ) ( r13 )
ld r4 , ( P A C A _ K V M _ R 4 ) ( r13 )
ld r5 , ( P A C A _ K V M _ R 5 ) ( r13 )
ld r6 , ( P A C A _ K V M _ R 6 ) ( r13 )
ld r7 , ( P A C A _ K V M _ R 7 ) ( r13 )
ld r8 , ( P A C A _ K V M _ R 8 ) ( r13 )
ld r9 , ( P A C A _ K V M _ R 9 ) ( r13 )
ld r10 , ( P A C A _ K V M _ R 1 0 ) ( r13 )
ld r12 , ( P A C A _ K V M _ R 1 2 ) ( r13 )
lwz r11 , ( P A C A _ K V M _ C R ) ( r13 )
2009-11-02 12:02:29 +00:00
mtcr r11
2010-03-24 21:48:26 +01:00
lwz r11 , ( P A C A _ K V M _ X E R ) ( r13 )
2009-11-02 12:02:29 +00:00
mtxer r11
2010-01-08 02:58:03 +01:00
ld r11 , ( P A C A _ K V M _ R 1 1 ) ( r13 )
ld r13 , ( P A C A _ K V M _ R 1 3 ) ( r13 )
2009-11-02 12:02:29 +00:00
RFI
kvmppc_handler_trampoline_enter_end :
/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Exit c o d e *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit :
/ * Register u s a g e a t t h i s p o i n t :
*
2010-01-08 02:58:03 +01:00
* SPRG_ S C R A T C H 0 = g u e s t R 1 3
* R1 2 = e x i t h a n d l e r i d
* R1 3 = P A C A
* PACA. K V M . S C R A T C H 0 = g u e s t R 1 2
* PACA. K V M . S C R A T C H 1 = g u e s t C R
2009-11-02 12:02:29 +00:00
*
* /
/* Save registers */
2010-01-08 02:58:03 +01:00
std r0 , P A C A _ K V M _ R 0 ( r13 )
std r1 , P A C A _ K V M _ R 1 ( r13 )
std r2 , P A C A _ K V M _ R 2 ( r13 )
std r3 , P A C A _ K V M _ R 3 ( r13 )
std r4 , P A C A _ K V M _ R 4 ( r13 )
std r5 , P A C A _ K V M _ R 5 ( r13 )
std r6 , P A C A _ K V M _ R 6 ( r13 )
std r7 , P A C A _ K V M _ R 7 ( r13 )
std r8 , P A C A _ K V M _ R 8 ( r13 )
std r9 , P A C A _ K V M _ R 9 ( r13 )
std r10 , P A C A _ K V M _ R 1 0 ( r13 )
std r11 , P A C A _ K V M _ R 1 1 ( r13 )
/* Restore R1/R2 so we can handle faults */
ld r1 , P A C A _ K V M _ H O S T _ R 1 ( r13 )
ld r2 , P A C A _ K V M _ H O S T _ R 2 ( r13 )
/* Save guest PC and MSR in GPRs */
mfsrr0 r3
mfsrr1 r4
/* Get scratch'ed off registers */
mfspr r9 , S P R N _ S P R G _ S C R A T C H 0
std r9 , P A C A _ K V M _ R 1 3 ( r13 )
ld r8 , P A C A _ K V M _ S C R A T C H 0 ( r13 )
std r8 , P A C A _ K V M _ R 1 2 ( r13 )
lwz r7 , P A C A _ K V M _ S C R A T C H 1 ( r13 )
stw r7 , P A C A _ K V M _ C R ( r13 )
/* Save more register state */
mfxer r6
stw r6 , P A C A _ K V M _ X E R ( r13 )
mfdar r5
mfdsisr r6
2009-11-02 12:02:29 +00:00
/ *
* In o r d e r f o r u s t o e a s i l y g e t t h e l a s t i n s t r u c t i o n ,
* we g o t t h e #v m e x i t a t , w e e x p l o i t t h e f a c t t h a t t h e
* virtual l a y o u t i s s t i l l t h e s a m e h e r e , s o w e c a n j u s t
* ld f r o m t h e g u e s t ' s P C a d d r e s s
* /
/* We only load the last instruction when it's safe */
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ D A T A _ S T O R A G E
beq l d _ l a s t _ i n s t
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ P R O G R A M
beq l d _ l a s t _ i n s t
b n o _ l d _ l a s t _ i n s t
ld_last_inst :
/* Save off the guest instruction we're at */
2010-01-08 02:58:04 +01:00
/ * Set g u e s t m o d e t o ' j u m p o v e r i n s t r u c t i o n ' s o i f l w z f a u l t s
* we' l l j u s t c o n t i n u e a t t h e n e x t I P . * /
li r9 , K V M _ G U E S T _ M O D E _ S K I P
stb r9 , P A C A _ K V M _ I N _ G U E S T ( r13 )
2009-11-02 12:02:29 +00:00
/* 1) enable paging for data */
mfmsr r9
ori r11 , r9 , M S R _ D R / * E n a b l e p a g i n g f o r d a t a * /
mtmsr r11
/* 2) fetch the instruction */
2010-01-08 02:58:04 +01:00
li r0 , K V M _ I N S T _ F E T C H _ F A I L E D / * I n c a s e l w z f a u l t s * /
2010-01-08 02:58:03 +01:00
lwz r0 , 0 ( r3 )
2009-11-02 12:02:29 +00:00
/* 3) disable paging again */
mtmsr r9
no_ld_last_inst :
2010-01-08 02:58:04 +01:00
/* Unset guest mode */
li r9 , K V M _ G U E S T _ M O D E _ N O N E
stb r9 , P A C A _ K V M _ I N _ G U E S T ( r13 )
2009-11-02 12:02:29 +00:00
/* Restore bolted entries from the shadow and fix it along the way */
/* We don't store anything in entry 0, so we don't need to take care of it */
slbia
isync
# if S L B _ N U M _ B O L T E D = = 3
ld r11 , P A C A _ S L B S H A D O W P T R ( r13 )
REBOLT_ S L B _ E N T R Y ( 0 )
REBOLT_ S L B _ E N T R Y ( 1 )
REBOLT_ S L B _ E N T R Y ( 2 )
# else
# error u n k n o w n n u m b e r o f b o l t e d e n t r i e s
# endif
slb_do_exit :
2010-01-08 02:58:03 +01:00
/ * Register u s a g e a t t h i s p o i n t :
*
* R0 = g u e s t l a s t i n s t
* R1 = h o s t R 1
* R2 = h o s t R 2
* R3 = g u e s t P C
* R4 = g u e s t M S R
* R5 = g u e s t D A R
* R6 = g u e s t D S I S R
* R1 2 = e x i t h a n d l e r i d
* R1 3 = P A C A
* PACA. K V M . * = g u e s t *
*
* /
2009-11-02 12:02:29 +00:00
/* RFI into the highmem handler */
2010-01-08 02:58:03 +01:00
mfmsr r7
ori r7 , r7 , M S R _ I R | M S R _ D R | M S R _ R I / * E n a b l e p a g i n g * /
mtsrr1 r7
ld r8 , P A C A _ K V M _ V M H A N D L E R ( r13 ) / * H i g h m e m h a n d l e r a d d r e s s * /
mtsrr0 r8
2009-11-02 12:02:29 +00:00
RFI
kvmppc_handler_trampoline_exit_end :