2010-04-16 00:11:35 +02:00
/ *
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e , v e r s i o n 2 , a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m ; if not, write to the Free Software
* Foundation, 5 1 F r a n k l i n S t r e e t , F i f t h F l o o r , B o s t o n , M A 0 2 1 1 0 - 1 3 0 1 , U S A .
*
* Copyright S U S E L i n u x P r o d u c t s G m b H 2 0 1 0
*
* Authors : Alexander G r a f < a g r a f @suse.de>
* /
/* Real mode helpers */
# if d e f i n e d ( C O N F I G _ P P C _ B O O K 3 S _ 6 4 )
# define G E T _ S H A D O W _ V C P U ( r e g ) \
addi r e g , r13 , P A C A _ K V M _ S V C P U
# elif d e f i n e d ( C O N F I G _ P P C _ B O O K 3 S _ 3 2 )
# define G E T _ S H A D O W _ V C P U ( r e g ) \
tophys( r e g , r2 ) ; \
lwz r e g , ( T H R E A D + T H R E A D _ K V M _ S V C P U ) ( r e g ) ; \
tophys( r e g , r e g )
# endif
/* Disable for nested KVM */
# define U S E _ Q U I C K _ L A S T _ I N S T
/* Get helper functions for subarch specific functionality */
# if d e f i n e d ( C O N F I G _ P P C _ B O O K 3 S _ 6 4 )
# include " b o o k 3 s _ 6 4 _ s l b . S "
# elif d e f i n e d ( C O N F I G _ P P C _ B O O K 3 S _ 3 2 )
# include " b o o k 3 s _ 3 2 _ s r . S "
# endif
/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Entry c o d e *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /
.global kvmppc_handler_trampoline_enter
kvmppc_handler_trampoline_enter :
/ * Required s t a t e :
*
* MSR = ~ I R | D R
* R1 3 = P A C A
* R1 = h o s t R 1
* R2 = h o s t R 2
* R1 0 = g u e s t M S R
* all o t h e r v o l a t i l e G P R S = f r e e
* SVCPU[ C R ] = g u e s t C R
* SVCPU[ X E R ] = g u e s t X E R
* SVCPU[ C T R ] = g u e s t C T R
* SVCPU[ L R ] = g u e s t L R
* /
/* r3 = shadow vcpu */
GET_ S H A D O W _ V C P U ( r3 )
/* Move SRR0 and SRR1 into the respective regs */
PPC_ L L r9 , S V C P U _ P C ( r3 )
mtsrr0 r9
mtsrr1 r10
/* Activate guest mode, so faults get handled by KVM */
li r11 , K V M _ G U E S T _ M O D E _ G U E S T
stb r11 , S V C P U _ I N _ G U E S T ( r3 )
/* Switch to guest segment. This is subarch specific. */
LOAD_ G U E S T _ S E G M E N T S
/* Enter guest */
PPC_ L L r4 , ( S V C P U _ C T R ) ( r3 )
PPC_ L L r5 , ( S V C P U _ L R ) ( r3 )
lwz r6 , ( S V C P U _ C R ) ( r3 )
lwz r7 , ( S V C P U _ X E R ) ( r3 )
mtctr r4
mtlr r5
mtcr r6
mtxer r7
PPC_ L L r0 , ( S V C P U _ R 0 ) ( r3 )
PPC_ L L r1 , ( S V C P U _ R 1 ) ( r3 )
PPC_ L L r2 , ( S V C P U _ R 2 ) ( r3 )
PPC_ L L r4 , ( S V C P U _ R 4 ) ( r3 )
PPC_ L L r5 , ( S V C P U _ R 5 ) ( r3 )
PPC_ L L r6 , ( S V C P U _ R 6 ) ( r3 )
PPC_ L L r7 , ( S V C P U _ R 7 ) ( r3 )
PPC_ L L r8 , ( S V C P U _ R 8 ) ( r3 )
PPC_ L L r9 , ( S V C P U _ R 9 ) ( r3 )
PPC_ L L r10 , ( S V C P U _ R 1 0 ) ( r3 )
PPC_ L L r11 , ( S V C P U _ R 1 1 ) ( r3 )
PPC_ L L r12 , ( S V C P U _ R 1 2 ) ( r3 )
PPC_ L L r13 , ( S V C P U _ R 1 3 ) ( r3 )
PPC_ L L r3 , ( S V C P U _ R 3 ) ( r3 )
RFI
kvmppc_handler_trampoline_enter_end :
/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Exit c o d e *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit :
/ * Register u s a g e a t t h i s p o i n t :
*
* SPRG_ S C R A T C H 0 = g u e s t R 1 3
* R1 2 = e x i t h a n d l e r i d
* R1 3 = s h a d o w v c p u - S H A D O W _ V C P U _ O F F [ =PACA o n P P C 6 4 ]
* SVCPU. S C R A T C H 0 = g u e s t R 1 2
* SVCPU. S C R A T C H 1 = g u e s t C R
*
* /
/* Save registers */
PPC_ S T L r0 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 0 ) ( r13 )
PPC_ S T L r1 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 1 ) ( r13 )
PPC_ S T L r2 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 2 ) ( r13 )
PPC_ S T L r3 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 3 ) ( r13 )
PPC_ S T L r4 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 4 ) ( r13 )
PPC_ S T L r5 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 5 ) ( r13 )
PPC_ S T L r6 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 6 ) ( r13 )
PPC_ S T L r7 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 7 ) ( r13 )
PPC_ S T L r8 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 8 ) ( r13 )
PPC_ S T L r9 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 9 ) ( r13 )
PPC_ S T L r10 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 1 0 ) ( r13 )
PPC_ S T L r11 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 1 1 ) ( r13 )
/* Restore R1/R2 so we can handle faults */
PPC_ L L r1 , ( S H A D O W _ V C P U _ O F F + S V C P U _ H O S T _ R 1 ) ( r13 )
PPC_ L L r2 , ( S H A D O W _ V C P U _ O F F + S V C P U _ H O S T _ R 2 ) ( r13 )
/* Save guest PC and MSR */
mfsrr0 r3
mfsrr1 r4
PPC_ S T L r3 , ( S H A D O W _ V C P U _ O F F + S V C P U _ P C ) ( r13 )
PPC_ S T L r4 , ( S H A D O W _ V C P U _ O F F + S V C P U _ S H A D O W _ S R R 1 ) ( r13 )
/* Get scratch'ed off registers */
mfspr r9 , S P R N _ S P R G _ S C R A T C H 0
PPC_ L L r8 , ( S H A D O W _ V C P U _ O F F + S V C P U _ S C R A T C H 0 ) ( r13 )
lwz r7 , ( S H A D O W _ V C P U _ O F F + S V C P U _ S C R A T C H 1 ) ( r13 )
PPC_ S T L r9 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 1 3 ) ( r13 )
PPC_ S T L r8 , ( S H A D O W _ V C P U _ O F F + S V C P U _ R 1 2 ) ( r13 )
stw r7 , ( S H A D O W _ V C P U _ O F F + S V C P U _ C R ) ( r13 )
/* Save more register state */
mfxer r5
mfdar r6
mfdsisr r7
mfctr r8
mflr r9
stw r5 , ( S H A D O W _ V C P U _ O F F + S V C P U _ X E R ) ( r13 )
PPC_ S T L r6 , ( S H A D O W _ V C P U _ O F F + S V C P U _ F A U L T _ D A R ) ( r13 )
stw r7 , ( S H A D O W _ V C P U _ O F F + S V C P U _ F A U L T _ D S I S R ) ( r13 )
PPC_ S T L r8 , ( S H A D O W _ V C P U _ O F F + S V C P U _ C T R ) ( r13 )
PPC_ S T L r9 , ( S H A D O W _ V C P U _ O F F + S V C P U _ L R ) ( r13 )
/ *
* In o r d e r f o r u s t o e a s i l y g e t t h e l a s t i n s t r u c t i o n ,
* we g o t t h e #v m e x i t a t , w e e x p l o i t t h e f a c t t h a t t h e
* virtual l a y o u t i s s t i l l t h e s a m e h e r e , s o w e c a n j u s t
* ld f r o m t h e g u e s t ' s P C a d d r e s s
* /
/* We only load the last instruction when it's safe */
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ D A T A _ S T O R A G E
beq l d _ l a s t _ i n s t
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ P R O G R A M
beq l d _ l a s t _ i n s t
2010-04-20 02:49:49 +02:00
cmpwi r12 , B O O K 3 S _ I N T E R R U P T _ A L I G N M E N T
beq- l d _ l a s t _ i n s t
2010-04-16 00:11:35 +02:00
b n o _ l d _ l a s t _ i n s t
ld_last_inst :
/* Save off the guest instruction we're at */
/* In case lwz faults */
li r0 , K V M _ I N S T _ F E T C H _ F A I L E D
# ifdef U S E _ Q U I C K _ L A S T _ I N S T
/ * Set g u e s t m o d e t o ' j u m p o v e r i n s t r u c t i o n ' s o i f l w z f a u l t s
* we' l l j u s t c o n t i n u e a t t h e n e x t I P . * /
li r9 , K V M _ G U E S T _ M O D E _ S K I P
stb r9 , ( S H A D O W _ V C P U _ O F F + S V C P U _ I N _ G U E S T ) ( r13 )
/* 1) enable paging for data */
mfmsr r9
ori r11 , r9 , M S R _ D R / * E n a b l e p a g i n g f o r d a t a * /
mtmsr r11
sync
/* 2) fetch the instruction */
lwz r0 , 0 ( r3 )
/* 3) disable paging again */
mtmsr r9
sync
# endif
stw r0 , ( S H A D O W _ V C P U _ O F F + S V C P U _ L A S T _ I N S T ) ( r13 )
no_ld_last_inst :
/* Unset guest mode */
li r9 , K V M _ G U E S T _ M O D E _ N O N E
stb r9 , ( S H A D O W _ V C P U _ O F F + S V C P U _ I N _ G U E S T ) ( r13 )
/* Switch back to host MMU */
LOAD_ H O S T _ S E G M E N T S
/ * Register u s a g e a t t h i s p o i n t :
*
* R1 = h o s t R 1
* R2 = h o s t R 2
* R1 2 = e x i t h a n d l e r i d
* R1 3 = s h a d o w v c p u - S H A D O W _ V C P U _ O F F [ =PACA o n P P C 6 4 ]
* SVCPU. * = g u e s t *
*
* /
/* RFI into the highmem handler */
mfmsr r7
ori r7 , r7 , M S R _ I R | M S R _ D R | M S R _ R I | M S R _ M E / * E n a b l e p a g i n g * /
mtsrr1 r7
/* Load highmem handler address */
PPC_ L L r8 , ( S H A D O W _ V C P U _ O F F + S V C P U _ V M H A N D L E R ) ( r13 )
mtsrr0 r8
RFI
kvmppc_handler_trampoline_exit_end :