2009-11-02 15:02:29 +03:00
/ *
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e , v e r s i o n 2 , a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m ; if not, write to the Free Software
* Foundation, 5 1 F r a n k l i n S t r e e t , F i f t h F l o o r , B o s t o n , M A 0 2 1 1 0 - 1 3 0 1 , U S A .
*
* Copyright S U S E L i n u x P r o d u c t s G m b H 2 0 0 9
*
* Authors : Alexander G r a f < a g r a f @suse.de>
* /
# define S H A D O W _ S L B _ E S I D ( n u m ) ( S L B S H A D O W _ S A V E A R E A + ( n u m * 0 x10 ) )
# define S H A D O W _ S L B _ V S I D ( n u m ) ( S L B S H A D O W _ S A V E A R E A + ( n u m * 0 x10 ) + 0 x8 )
# define U N B O L T _ S L B _ E N T R Y ( n u m ) \
ld r9 , S H A D O W _ S L B _ E S I D ( n u m ) ( r12 ) ; \
/* Invalid? Skip. */ ; \
rldicl. r0 , r9 , 3 7 , 6 3 ; \
beq s l b _ e n t r y _ s k i p _ ## n u m ; \
xoris r9 , r9 , S L B _ E S I D _ V @h; \
std r9 , S H A D O W _ S L B _ E S I D ( n u m ) ( r12 ) ; \
slb_ e n t r y _ s k i p _ ## n u m :
# define R E B O L T _ S L B _ E N T R Y ( n u m ) \
ld r10 , S H A D O W _ S L B _ E S I D ( n u m ) ( r11 ) ; \
cmpdi r10 , 0 ; \
2010-01-05 00:19:22 +03:00
beq s l b _ e x i t _ s k i p _ ## n u m ; \
2009-11-02 15:02:29 +03:00
oris r10 , r10 , S L B _ E S I D _ V @h; \
ld r9 , S H A D O W _ S L B _ V S I D ( n u m ) ( r11 ) ; \
slbmte r9 , r10 ; \
std r10 , S H A D O W _ S L B _ E S I D ( n u m ) ( r11 ) ; \
slb_ e x i t _ s k i p _ ## n u m :
/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Entry c o d e *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /
.global kvmppc_handler_trampoline_enter
kvmppc_handler_trampoline_enter :
/ * Required s t a t e :
*
* MSR = ~ I R | D R
* R1 3 = P A C A
* R9 = g u e s t I P
* R1 0 = g u e s t M S R
* R1 1 = f r e e
* R1 2 = f r e e
* PACA[ P A C A _ E X M C + E X _ R 9 ] = g u e s t R 9
* PACA[ P A C A _ E X M C + E X _ R 1 0 ] = g u e s t R 1 0
* PACA[ P A C A _ E X M C + E X _ R 1 1 ] = g u e s t R 1 1
* PACA[ P A C A _ E X M C + E X _ R 1 2 ] = g u e s t R 1 2
* PACA[ P A C A _ E X M C + E X _ R 1 3 ] = g u e s t R 1 3
* PACA[ P A C A _ E X M C + E X _ C C R ] = g u e s t C R
* PACA[ P A C A _ E X M C + E X _ R 3 ] = g u e s t X E R
* /
mtsrr0 r9
mtsrr1 r10
mtspr S P R N _ S P R G _ S C R A T C H 0 , r0
/* Remove LPAR shadow entries */
# if S L B _ N U M _ B O L T E D = = 3
ld r12 , P A C A _ S L B S H A D O W P T R ( r13 )
/* Save off the first entry so we can slbie it later */
ld r10 , S H A D O W _ S L B _ E S I D ( 0 ) ( r12 )
ld r11 , S H A D O W _ S L B _ V S I D ( 0 ) ( r12 )
/* Remove bolted entries */
UNBOLT_ S L B _ E N T R Y ( 0 )
UNBOLT_ S L B _ E N T R Y ( 1 )
UNBOLT_ S L B _ E N T R Y ( 2 )
# else
# error u n k n o w n n u m b e r o f b o l t e d e n t r i e s
# endif
/* Flush SLB */
slbia
/* r0 = esid & ESID_MASK */
rldicr r10 , r10 , 0 , 3 5
/* r0 |= CLASS_BIT(VSID) */
rldic r12 , r11 , 5 6 - 3 6 , 3 6
or r10 , r10 , r12
slbie r10
isync
/* Fill SLB with our shadow */
lbz r12 , P A C A _ K V M _ S L B _ M A X ( r13 )
mulli r12 , r12 , 1 6
addi r12 , r12 , P A C A _ K V M _ S L B
add r12 , r12 , r13
/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
li r11 , P A C A _ K V M _ S L B
add r11 , r11 , r13
slb_loop_enter :
ld r10 , 0 ( r11 )
rldicl. r0 , r10 , 3 7 , 6 3
beq s l b _ l o o p _ e n t e r _ s k i p
ld r9 , 8 ( r11 )
slbmte r9 , r10
slb_loop_enter_skip :
addi r11 , r11 , 1 6
cmpd c r0 , r11 , r12
blt s l b _ l o o p _ e n t e r
slb_do_enter :
/* Enter guest */
mfspr r0 , S P R N _ S P R G _ S C R A T C H 0
ld r9 , ( P A C A _ E X M C + E X _ R 9 ) ( r13 )
ld r10 , ( P A C A _ E X M C + E X _ R 1 0 ) ( r13 )
ld r12 , ( P A C A _ E X M C + E X _ R 1 2 ) ( r13 )
lwz r11 , ( P A C A _ E X M C + E X _ C C R ) ( r13 )
mtcr r11
ld r11 , ( P A C A _ E X M C + E X _ R 3 ) ( r13 )
mtxer r11
ld r11 , ( P A C A _ E X M C + E X _ R 1 1 ) ( r13 )
ld r13 , ( P A C A _ E X M C + E X _ R 1 3 ) ( r13 )
RFI
kvmppc_handler_trampoline_enter_end :
/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Exit c o d e *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit :
/ * Register u s a g e a t t h i s p o i n t :
*
* SPRG_ S C R A T C H 0 = g u e s t R 1 3
* R0 1 = h o s t R 1
* R0 2 = h o s t R 2
* R1 0 = g u e s t P C
* R1 1 = g u e s t M S R
* R1 2 = e x i t h a n d l e r i d
* R1 3 = P A C A
* PACA. e x m c . C C R = g u e s t C R
* PACA. e x m c . R 9 = g u e s t R 1
* PACA. e x m c . R 1 0 = g u e s t R 1 0
* PACA. e x m c . R 1 1 = g u e s t R 1 1
* PACA. e x m c . R 1 2 = g u e s t R 1 2
* PACA. e x m c . R 1 3 = g u e s t R 2
*
* /
/* Save registers */
std r0 , ( P A C A _ E X M C + E X _ S R R 0 ) ( r13 )
std r9 , ( P A C A _ E X M C + E X _ R 3 ) ( r13 )
std r10 , ( P A C A _ E X M C + E X _ L R ) ( r13 )
std r11 , ( P A C A _ E X M C + E X _ D A R ) ( r13 )
/ *
* In o r d e r f o r u s t o e a s i l y g e t t h e l a s t i n s t r u c t i o n ,
* we g o t t h e #v m e x i t a t , w e e x p l o i t t h e f a c t t h a t t h e
* virtual l a y o u t i s s t i l l t h e s a m e h e r e , s o w e c a n j u s t
* ld f r o m t h e g u e s t ' s P C a d d r e s s
* /
/* We only load the last instruction when it's safe */
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ D A T A _ S T O R A G E
beq l d _ l a s t _ i n s t
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ P R O G R A M
beq l d _ l a s t _ i n s t
b n o _ l d _ l a s t _ i n s t
ld_last_inst :
/* Save off the guest instruction we're at */
/* 1) enable paging for data */
mfmsr r9
ori r11 , r9 , M S R _ D R / * E n a b l e p a g i n g f o r d a t a * /
mtmsr r11
/* 2) fetch the instruction */
lwz r0 , 0 ( r10 )
/* 3) disable paging again */
mtmsr r9
no_ld_last_inst :
/* Restore bolted entries from the shadow and fix it along the way */
/* We don't store anything in entry 0, so we don't need to take care of it */
slbia
isync
# if S L B _ N U M _ B O L T E D = = 3
ld r11 , P A C A _ S L B S H A D O W P T R ( r13 )
REBOLT_ S L B _ E N T R Y ( 0 )
REBOLT_ S L B _ E N T R Y ( 1 )
REBOLT_ S L B _ E N T R Y ( 2 )
# else
# error u n k n o w n n u m b e r o f b o l t e d e n t r i e s
# endif
slb_do_exit :
/* Restore registers */
ld r11 , ( P A C A _ E X M C + E X _ D A R ) ( r13 )
ld r10 , ( P A C A _ E X M C + E X _ L R ) ( r13 )
ld r9 , ( P A C A _ E X M C + E X _ R 3 ) ( r13 )
/* Save last inst */
stw r0 , ( P A C A _ E X M C + E X _ L R ) ( r13 )
/* Save DAR and DSISR before going to paged mode */
mfdar r0
std r0 , ( P A C A _ E X M C + E X _ D A R ) ( r13 )
mfdsisr r0
stw r0 , ( P A C A _ E X M C + E X _ D S I S R ) ( r13 )
/* RFI into the highmem handler */
mfmsr r0
ori r0 , r0 , M S R _ I R | M S R _ D R | M S R _ R I / * E n a b l e p a g i n g * /
mtsrr1 r0
ld r0 , P A C A S A V E D M S R ( r13 ) / * H i g h m e m h a n d l e r a d d r e s s * /
mtsrr0 r0
mfspr r0 , S P R N _ S P R G _ S C R A T C H 0
RFI
kvmppc_handler_trampoline_exit_end :