2012-03-05 15:49:27 +04:00
/ *
* Low- l e v e l e x c e p t i o n h a n d l i n g c o d e
*
* Copyright ( C ) 2 0 1 2 A R M L t d .
* Authors : Catalin M a r i n a s < c a t a l i n . m a r i n a s @arm.com>
* Will D e a c o n < w i l l . d e a c o n @arm.com>
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m . I f n o t , s e e < h t t p : / / w w w . g n u . o r g / l i c e n s e s / > .
* /
# include < l i n u x / i n i t . h >
# include < l i n u x / l i n k a g e . h >
2015-06-01 12:47:41 +03:00
# include < a s m / a l t e r n a t i v e . h >
2012-03-05 15:49:27 +04:00
# include < a s m / a s s e m b l e r . h >
# include < a s m / a s m - o f f s e t s . h >
2015-03-23 22:07:02 +03:00
# include < a s m / c p u f e a t u r e . h >
2012-03-05 15:49:27 +04:00
# include < a s m / e r r n o . h >
2013-04-08 20:17:03 +04:00
# include < a s m / e s r . h >
2015-12-04 14:02:27 +03:00
# include < a s m / i r q . h >
2016-06-20 20:28:01 +03:00
# include < a s m / m e m o r y . h >
2012-03-05 15:49:27 +04:00
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / u n i s t d . h >
2014-05-30 23:34:15 +04:00
/ *
* Context t r a c k i n g s u b s y s t e m . U s e d t o i n s t r u m e n t t r a n s i t i o n s
* between u s e r a n d k e r n e l m o d e .
* /
.macro ct_ u s e r _ e x i t , s y s c a l l = 0
# ifdef C O N F I G _ C O N T E X T _ T R A C K I N G
bl c o n t e x t _ t r a c k i n g _ u s e r _ e x i t
.if \ syscall = = 1
/ *
* Save/ r e s t o r e n e e d e d d u r i n g s y s c a l l s . R e s t o r e s y s c a l l a r g u m e n t s f r o m
* the v a l u e s a l r e a d y s a v e d o n s t a c k d u r i n g k e r n e l _ e n t r y .
* /
ldp x0 , x1 , [ s p ]
ldp x2 , x3 , [ s p , #S _ X 2 ]
ldp x4 , x5 , [ s p , #S _ X 4 ]
ldp x6 , x7 , [ s p , #S _ X 6 ]
.endif
# endif
.endm
.macro ct_user_enter
# ifdef C O N F I G _ C O N T E X T _ T R A C K I N G
bl c o n t e x t _ t r a c k i n g _ u s e r _ e n t e r
# endif
.endm
2012-03-05 15:49:27 +04:00
/ *
* Bad A b o r t n u m b e r s
* - - - - - - - - - - - - - - - - -
* /
# define B A D _ S Y N C 0
# define B A D _ I R Q 1
# define B A D _ F I Q 2
# define B A D _ E R R O R 3
.macro kernel_ e n t r y , e l , r e g s i z e = 6 4
2014-09-29 15:26:41 +04:00
sub s p , s p , #S _ F R A M E _ S I Z E
2012-03-05 15:49:27 +04:00
.if \ regsize = = 3 2
mov w0 , w0 / / z e r o u p p e r 3 2 b i t s o f x0
.endif
2014-09-29 15:26:41 +04:00
stp x0 , x1 , [ s p , #16 * 0 ]
stp x2 , x3 , [ s p , #16 * 1 ]
stp x4 , x5 , [ s p , #16 * 2 ]
stp x6 , x7 , [ s p , #16 * 3 ]
stp x8 , x9 , [ s p , #16 * 4 ]
stp x10 , x11 , [ s p , #16 * 5 ]
stp x12 , x13 , [ s p , #16 * 6 ]
stp x14 , x15 , [ s p , #16 * 7 ]
stp x16 , x17 , [ s p , #16 * 8 ]
stp x18 , x19 , [ s p , #16 * 9 ]
stp x20 , x21 , [ s p , #16 * 1 0 ]
stp x22 , x23 , [ s p , #16 * 1 1 ]
stp x24 , x25 , [ s p , #16 * 1 2 ]
stp x26 , x27 , [ s p , #16 * 1 3 ]
stp x28 , x29 , [ s p , #16 * 1 4 ]
2012-03-05 15:49:27 +04:00
.if \ el = = 0
mrs x21 , s p _ e l 0
2015-12-04 14:02:25 +03:00
mov t s k , s p
and t s k , t s k , #~ ( T H R E A D _ S I Z E - 1 ) / / E n s u r e M D S C R _ E L 1 . S S i s c l e a r ,
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
ldr x19 , [ t s k , #T I _ F L A G S ] / / s i n c e w e c a n u n m a s k d e b u g
disable_ s t e p _ t s k x19 , x20 / / e x c e p t i o n s w h e n s c h e d u l i n g .
2015-12-10 13:22:41 +03:00
mov x29 , x z r / / f p p o i n t e d t o u s e r - s p a c e
2012-03-05 15:49:27 +04:00
.else
add x21 , s p , #S _ F R A M E _ S I Z E
2016-06-20 20:28:01 +03:00
get_ t h r e a d _ i n f o t s k
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
ldr x20 , [ t s k , #T I _ A D D R _ L I M I T ]
str x20 , [ s p , #S _ O R I G _ A D D R _ L I M I T ]
mov x20 , #T A S K _ S I Z E _ 64
str x20 , [ t s k , #T I _ A D D R _ L I M I T ]
ALTERNATIVE( n o p , S E T _ P S T A T E _ U A O ( 0 ) , A R M 6 4 _ H A S _ U A O , C O N F I G _ A R M 6 4 _ U A O )
.endif /* \el == 0 */
2012-03-05 15:49:27 +04:00
mrs x22 , e l r _ e l 1
mrs x23 , s p s r _ e l 1
stp l r , x21 , [ s p , #S _ L R ]
stp x22 , x23 , [ s p , #S _ P C ]
/ *
* Set s y s c a l l n o t o - 1 b y d e f a u l t ( o v e r r i d d e n l a t e r i f r e a l s y s c a l l ) .
* /
.if \ el = = 0
mvn x21 , x z r
str x21 , [ s p , #S _ S Y S C A L L N O ]
.endif
2015-12-04 14:02:25 +03:00
/ *
* Set s p _ e l 0 t o c u r r e n t t h r e a d _ i n f o .
* /
.if \ el = = 0
msr s p _ e l 0 , t s k
.endif
2012-03-05 15:49:27 +04:00
/ *
* Registers t h a t m a y b e u s e f u l a f t e r t h i s m a c r o i s i n v o k e d :
*
* x2 1 - a b o r t e d S P
* x2 2 - a b o r t e d P C
* x2 3 - a b o r t e d P S T A T E
* /
.endm
2015-08-19 17:57:09 +03:00
.macro kernel_ e x i t , e l
2016-06-20 20:28:01 +03:00
.if \ el ! = 0
/* Restore the task's original addr_limit. */
ldr x20 , [ s p , #S _ O R I G _ A D D R _ L I M I T ]
str x20 , [ t s k , #T I _ A D D R _ L I M I T ]
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
2012-03-05 15:49:27 +04:00
ldp x21 , x22 , [ s p , #S _ P C ] / / l o a d E L R , S P S R
.if \ el = = 0
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e n t e r
2012-03-05 15:49:27 +04:00
ldr x23 , [ s p , #S _ S P ] / / l o a d r e t u r n s t a c k p o i n t e r
2014-09-29 15:26:41 +04:00
msr s p _ e l 0 , x23
2015-03-23 22:07:02 +03:00
# ifdef C O N F I G _ A R M 6 4 _ E R R A T U M _ 8 4 5 7 1 9
2015-07-22 14:21:03 +03:00
alternative_ i f _ n o t A R M 6 4 _ W O R K A R O U N D _ 8 4 5 7 1 9
nop
nop
2015-03-23 22:07:02 +03:00
# ifdef C O N F I G _ P I D _ I N _ C O N T E X T I D R
2015-07-22 14:21:03 +03:00
nop
# endif
alternative_ e l s e
tbz x22 , #4 , 1 f
# ifdef C O N F I G _ P I D _ I N _ C O N T E X T I D R
mrs x29 , c o n t e x t i d r _ e l 1
msr c o n t e x t i d r _ e l 1 , x29
2015-03-23 22:07:02 +03:00
# else
2015-07-22 14:21:03 +03:00
msr c o n t e x t i d r _ e l 1 , x z r
2015-03-23 22:07:02 +03:00
# endif
2015-07-22 14:21:03 +03:00
1 :
alternative_ e n d i f
2015-03-23 22:07:02 +03:00
# endif
2012-03-05 15:49:27 +04:00
.endif
2014-09-29 15:26:41 +04:00
msr e l r _ e l 1 , x21 / / s e t u p t h e r e t u r n d a t a
msr s p s r _ e l 1 , x22
ldp x0 , x1 , [ s p , #16 * 0 ]
ldp x2 , x3 , [ s p , #16 * 1 ]
ldp x4 , x5 , [ s p , #16 * 2 ]
ldp x6 , x7 , [ s p , #16 * 3 ]
ldp x8 , x9 , [ s p , #16 * 4 ]
ldp x10 , x11 , [ s p , #16 * 5 ]
ldp x12 , x13 , [ s p , #16 * 6 ]
ldp x14 , x15 , [ s p , #16 * 7 ]
ldp x16 , x17 , [ s p , #16 * 8 ]
ldp x18 , x19 , [ s p , #16 * 9 ]
ldp x20 , x21 , [ s p , #16 * 1 0 ]
ldp x22 , x23 , [ s p , #16 * 1 1 ]
ldp x24 , x25 , [ s p , #16 * 1 2 ]
ldp x26 , x27 , [ s p , #16 * 1 3 ]
ldp x28 , x29 , [ s p , #16 * 1 4 ]
ldr l r , [ s p , #S _ L R ]
add s p , s p , #S _ F R A M E _ S I Z E / / r e s t o r e s p
2012-03-05 15:49:27 +04:00
eret / / r e t u r n t o k e r n e l
.endm
.macro get_ t h r e a d _ i n f o , r d
2015-12-04 14:02:25 +03:00
mrs \ r d , s p _ e l 0
2012-03-05 15:49:27 +04:00
.endm
2015-12-15 14:21:25 +03:00
.macro irq_stack_entry
2015-12-04 14:02:27 +03:00
mov x19 , s p / / p r e s e r v e t h e o r i g i n a l s p
/ *
arm64: remove irq_count and do_softirq_own_stack()
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().
This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.
Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.
Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-12-18 19:01:47 +03:00
* Compare s p w i t h t h e c u r r e n t t h r e a d _ i n f o , i f t h e t o p
* ~ ( THREAD_ S I Z E - 1 ) b i t s m a t c h , w e a r e o n a t a s k s t a c k , a n d
* should s w i t c h t o t h e i r q s t a c k .
2015-12-04 14:02:27 +03:00
* /
arm64: remove irq_count and do_softirq_own_stack()
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().
This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.
Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.
Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-12-18 19:01:47 +03:00
and x25 , x19 , #~ ( T H R E A D _ S I Z E - 1 )
cmp x25 , t s k
b. n e 9 9 9 8 f
2015-12-04 14:02:27 +03:00
arm64: remove irq_count and do_softirq_own_stack()
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().
This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.
Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.
Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-12-18 19:01:47 +03:00
this_ c p u _ p t r i r q _ s t a c k , x25 , x26
2015-12-04 14:02:27 +03:00
mov x26 , #I R Q _ S T A C K _ S T A R T _ S P
add x26 , x25 , x26
arm64: remove irq_count and do_softirq_own_stack()
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().
This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.
Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.
Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-12-18 19:01:47 +03:00
/* switch to the irq stack */
2015-12-04 14:02:27 +03:00
mov s p , x26
2015-12-15 14:21:25 +03:00
/ *
* Add a d u m m y s t a c k f r a m e , t h i s n o n - s t a n d a r d f o r m a t i s f i x e d u p
* by u n w i n d _ f r a m e ( )
* /
stp x29 , x19 , [ s p , #- 16 ] !
2015-12-04 14:02:27 +03:00
mov x29 , s p
9998 :
.endm
/ *
* x1 9 s h o u l d b e p r e s e r v e d b e t w e e n i r q _ s t a c k _ e n t r y a n d
* irq_ s t a c k _ e x i t .
* /
.macro irq_stack_exit
mov s p , x19
.endm
2012-03-05 15:49:27 +04:00
/ *
* These a r e t h e r e g i s t e r s u s e d i n t h e s y s c a l l h a n d l e r , a n d a l l o w u s t o
* have i n t h e o r y u p t o 7 a r g u m e n t s t o a f u n c t i o n - x0 t o x6 .
*
* x7 i s r e s e r v e d f o r t h e s y s t e m c a l l n u m b e r i n 3 2 - b i t m o d e .
* /
sc_ n r . r e q x25 / / n u m b e r o f s y s t e m c a l l s
scno . r e q x26 / / s y s c a l l n u m b e r
stbl . r e q x27 / / s y s c a l l t a b l e p o i n t e r
tsk . r e q x28 / / c u r r e n t t h r e a d _ i n f o
/ *
* Interrupt h a n d l i n g .
* /
.macro irq_handler
2015-12-04 14:02:27 +03:00
ldr_ l x1 , h a n d l e _ a r c h _ i r q
2012-03-05 15:49:27 +04:00
mov x0 , s p
2015-12-15 14:21:25 +03:00
irq_ s t a c k _ e n t r y
2012-03-05 15:49:27 +04:00
blr x1
2015-12-04 14:02:27 +03:00
irq_ s t a c k _ e x i t
2012-03-05 15:49:27 +04:00
.endm
.text
/ *
* Exception v e c t o r s .
* /
2016-07-08 19:35:50 +03:00
.pushsection " .entry .text " , " ax"
2012-03-05 15:49:27 +04:00
.align 11
ENTRY( v e c t o r s )
ventry e l 1 _ s y n c _ i n v a l i d / / S y n c h r o n o u s E L 1 t
ventry e l 1 _ i r q _ i n v a l i d / / I R Q E L 1 t
ventry e l 1 _ f i q _ i n v a l i d / / F I Q E L 1 t
ventry e l 1 _ e r r o r _ i n v a l i d / / E r r o r E L 1 t
ventry e l 1 _ s y n c / / S y n c h r o n o u s E L 1 h
ventry e l 1 _ i r q / / I R Q E L 1 h
ventry e l 1 _ f i q _ i n v a l i d / / F I Q E L 1 h
ventry e l 1 _ e r r o r _ i n v a l i d / / E r r o r E L 1 h
ventry e l 0 _ s y n c / / S y n c h r o n o u s 6 4 - b i t E L 0
ventry e l 0 _ i r q / / I R Q 6 4 - b i t E L 0
ventry e l 0 _ f i q _ i n v a l i d / / F I Q 6 4 - b i t E L 0
ventry e l 0 _ e r r o r _ i n v a l i d / / E r r o r 6 4 - b i t E L 0
# ifdef C O N F I G _ C O M P A T
ventry e l 0 _ s y n c _ c o m p a t / / S y n c h r o n o u s 3 2 - b i t E L 0
ventry e l 0 _ i r q _ c o m p a t / / I R Q 3 2 - b i t E L 0
ventry e l 0 _ f i q _ i n v a l i d _ c o m p a t / / F I Q 3 2 - b i t E L 0
ventry e l 0 _ e r r o r _ i n v a l i d _ c o m p a t / / E r r o r 3 2 - b i t E L 0
# else
ventry e l 0 _ s y n c _ i n v a l i d / / S y n c h r o n o u s 3 2 - b i t E L 0
ventry e l 0 _ i r q _ i n v a l i d / / I R Q 3 2 - b i t E L 0
ventry e l 0 _ f i q _ i n v a l i d / / F I Q 3 2 - b i t E L 0
ventry e l 0 _ e r r o r _ i n v a l i d / / E r r o r 3 2 - b i t E L 0
# endif
END( v e c t o r s )
/ *
* Invalid m o d e h a n d l e r s
* /
.macro inv_ e n t r y , e l , r e a s o n , r e g s i z e = 6 4
2016-03-18 12:58:09 +03:00
kernel_ e n t r y \ e l , \ r e g s i z e
2012-03-05 15:49:27 +04:00
mov x0 , s p
mov x1 , #\ r e a s o n
mrs x2 , e s r _ e l 1
b b a d _ m o d e
.endm
el0_sync_invalid :
inv_ e n t r y 0 , B A D _ S Y N C
ENDPROC( e l 0 _ s y n c _ i n v a l i d )
el0_irq_invalid :
inv_ e n t r y 0 , B A D _ I R Q
ENDPROC( e l 0 _ i r q _ i n v a l i d )
el0_fiq_invalid :
inv_ e n t r y 0 , B A D _ F I Q
ENDPROC( e l 0 _ f i q _ i n v a l i d )
el0_error_invalid :
inv_ e n t r y 0 , B A D _ E R R O R
ENDPROC( e l 0 _ e r r o r _ i n v a l i d )
# ifdef C O N F I G _ C O M P A T
el0_fiq_invalid_compat :
inv_ e n t r y 0 , B A D _ F I Q , 3 2
ENDPROC( e l 0 _ f i q _ i n v a l i d _ c o m p a t )
el0_error_invalid_compat :
inv_ e n t r y 0 , B A D _ E R R O R , 3 2
ENDPROC( e l 0 _ e r r o r _ i n v a l i d _ c o m p a t )
# endif
el1_sync_invalid :
inv_ e n t r y 1 , B A D _ S Y N C
ENDPROC( e l 1 _ s y n c _ i n v a l i d )
el1_irq_invalid :
inv_ e n t r y 1 , B A D _ I R Q
ENDPROC( e l 1 _ i r q _ i n v a l i d )
el1_fiq_invalid :
inv_ e n t r y 1 , B A D _ F I Q
ENDPROC( e l 1 _ f i q _ i n v a l i d )
el1_error_invalid :
inv_ e n t r y 1 , B A D _ E R R O R
ENDPROC( e l 1 _ e r r o r _ i n v a l i d )
/ *
* EL1 m o d e h a n d l e r s .
* /
.align 6
el1_sync :
kernel_ e n t r y 1
mrs x1 , e s r _ e l 1 / / r e a d t h e s y n d r o m e r e g i s t e r
2014-11-24 15:31:40 +03:00
lsr x24 , x1 , #E S R _ E L x _ E C _ S H I F T / / e x c e p t i o n c l a s s
cmp x24 , #E S R _ E L x _ E C _ D A B T _ C U R / / d a t a a b o r t i n E L 1
2012-03-05 15:49:27 +04:00
b. e q e l 1 _ d a
2016-08-10 04:25:26 +03:00
cmp x24 , #E S R _ E L x _ E C _ I A B T _ C U R / / i n s t r u c t i o n a b o r t i n E L 1
b. e q e l 1 _ i a
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ S Y S 64 / / c o n f i g u r a b l e t r a p
2012-03-05 15:49:27 +04:00
b. e q e l 1 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ S P _ A L I G N / / s t a c k a l i g n m e n t e x c e p t i o n
2012-03-05 15:49:27 +04:00
b. e q e l 1 _ s p _ p c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ P C _ A L I G N / / p c a l i g n m e n t e x c e p t i o n
2012-03-05 15:49:27 +04:00
b. e q e l 1 _ s p _ p c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ U N K N O W N / / u n k n o w n e x c e p t i o n i n E L 1
2012-03-05 15:49:27 +04:00
b. e q e l 1 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ B R E A K P T _ C U R / / d e b u g e x c e p t i o n i n E L 1
2012-03-05 15:49:27 +04:00
b. g e e l 1 _ d b g
b e l 1 _ i n v
2016-08-10 04:25:26 +03:00
el1_ia :
/ *
* Fall t h r o u g h t o t h e D a t a a b o r t c a s e
* /
2012-03-05 15:49:27 +04:00
el1_da :
/ *
* Data a b o r t h a n d l i n g
* /
mrs x0 , f a r _ e l 1
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2012-03-05 15:49:27 +04:00
/ / re- e n a b l e i n t e r r u p t s i f t h e y w e r e e n a b l e d i n t h e a b o r t e d c o n t e x t
tbnz x23 , #7 , 1 f / / P S R _ I _ B I T
enable_ i r q
1 :
mov x2 , s p / / s t r u c t p t _ r e g s
bl d o _ m e m _ a b o r t
/ / disable i n t e r r u p t s b e f o r e p u l l i n g p r e s e r v e d d a t a o f f t h e s t a c k
disable_ i r q
kernel_ e x i t 1
el1_sp_pc :
/ *
* Stack o r P C a l i g n m e n t e x c e p t i o n h a n d l i n g
* /
mrs x0 , f a r _ e l 1
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2012-03-05 15:49:27 +04:00
mov x2 , s p
b d o _ s p _ p c _ a b o r t
el1_undef :
/ *
* Undefined i n s t r u c t i o n
* /
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2012-03-05 15:49:27 +04:00
mov x0 , s p
b d o _ u n d e f i n s t r
el1_dbg :
/ *
* Debug e x c e p t i o n h a n d l i n g
* /
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ B R K 64 / / i f B R K 6 4
2013-12-04 09:50:20 +04:00
cinc x24 , x24 , e q / / s e t b i t ' 0 '
2012-03-05 15:49:27 +04:00
tbz x24 , #0 , e l 1 _ i n v / / E L 1 o n l y
mrs x0 , f a r _ e l 1
mov x2 , s p / / s t r u c t p t _ r e g s
bl d o _ d e b u g _ e x c e p t i o n
kernel_ e x i t 1
el1_inv :
/ / TODO : add s u p p o r t f o r u n d e f i n e d i n s t r u c t i o n s i n k e r n e l m o d e
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2012-03-05 15:49:27 +04:00
mov x0 , s p
2015-07-07 20:00:49 +03:00
mov x2 , x1
2012-03-05 15:49:27 +04:00
mov x1 , #B A D _ S Y N C
b b a d _ m o d e
ENDPROC( e l 1 _ s y n c )
.align 6
el1_irq :
kernel_ e n t r y 1
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2012-03-05 15:49:27 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bl t r a c e _ h a r d i r q s _ o f f
# endif
2013-11-12 21:11:53 +04:00
2012-03-05 15:49:27 +04:00
irq_ h a n d l e r
2013-11-12 21:11:53 +04:00
2012-03-05 15:49:27 +04:00
# ifdef C O N F I G _ P R E E M P T
2014-01-13 12:57:56 +04:00
ldr w24 , [ t s k , #T I _ P R E E M P T ] / / g e t p r e e m p t c o u n t
2013-11-05 00:14:58 +04:00
cbnz w24 , 1 f / / p r e e m p t c o u n t ! = 0
2012-03-05 15:49:27 +04:00
ldr x0 , [ t s k , #T I _ F L A G S ] / / g e t f l a g s
tbz x0 , #T I F _ N E E D _ R E S C H E D , 1 f / / n e e d s r e s c h e d u l i n g ?
bl e l 1 _ p r e e m p t
1 :
# endif
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bl t r a c e _ h a r d i r q s _ o n
# endif
kernel_ e x i t 1
ENDPROC( e l 1 _ i r q )
# ifdef C O N F I G _ P R E E M P T
el1_preempt :
mov x24 , l r
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
1 : bl p r e e m p t _ s c h e d u l e _ i r q / / i r q e n / d i s a b l e i s d o n e i n s i d e
2012-03-05 15:49:27 +04:00
ldr x0 , [ t s k , #T I _ F L A G S ] / / g e t n e w t a s k s T I _ F L A G S
tbnz x0 , #T I F _ N E E D _ R E S C H E D , 1 b / / n e e d s r e s c h e d u l i n g ?
ret x24
# endif
/ *
* EL0 m o d e h a n d l e r s .
* /
.align 6
el0_sync :
kernel_ e n t r y 0
mrs x25 , e s r _ e l 1 / / r e a d t h e s y n d r o m e r e g i s t e r
2014-11-24 15:31:40 +03:00
lsr x24 , x25 , #E S R _ E L x _ E C _ S H I F T / / e x c e p t i o n c l a s s
cmp x24 , #E S R _ E L x _ E C _ S V C 64 / / S V C i n 6 4 - b i t s t a t e
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ s v c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ D A B T _ L O W / / d a t a a b o r t i n E L 0
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ d a
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ I A B T _ L O W / / i n s t r u c t i o n a b o r t i n E L 0
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ i a
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ F P _ A S I M D / / F P / A S I M D a c c e s s
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ f p s i m d _ a c c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ F P _ E X C 64 / / F P / A S I M D e x c e p t i o n
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ f p s i m d _ e x c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ S Y S 64 / / c o n f i g u r a b l e t r a p
2016-06-28 20:07:32 +03:00
b. e q e l 0 _ s y s
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ S P _ A L I G N / / s t a c k a l i g n m e n t e x c e p t i o n
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ s p _ p c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ P C _ A L I G N / / p c a l i g n m e n t e x c e p t i o n
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ s p _ p c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ U N K N O W N / / u n k n o w n e x c e p t i o n i n E L 0
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ B R E A K P T _ L O W / / d e b u g e x c e p t i o n i n E L 0
2012-03-05 15:49:27 +04:00
b. g e e l 0 _ d b g
b e l 0 _ i n v
# ifdef C O N F I G _ C O M P A T
.align 6
el0_sync_compat :
kernel_ e n t r y 0 , 3 2
mrs x25 , e s r _ e l 1 / / r e a d t h e s y n d r o m e r e g i s t e r
2014-11-24 15:31:40 +03:00
lsr x24 , x25 , #E S R _ E L x _ E C _ S H I F T / / e x c e p t i o n c l a s s
cmp x24 , #E S R _ E L x _ E C _ S V C 32 / / S V C i n 3 2 - b i t s t a t e
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ s v c _ c o m p a t
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ D A B T _ L O W / / d a t a a b o r t i n E L 0
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ d a
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ I A B T _ L O W / / i n s t r u c t i o n a b o r t i n E L 0
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ i a
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ F P _ A S I M D / / F P / A S I M D a c c e s s
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ f p s i m d _ a c c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ F P _ E X C 32 / / F P / A S I M D e x c e p t i o n
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ f p s i m d _ e x c
2015-10-14 00:30:51 +03:00
cmp x24 , #E S R _ E L x _ E C _ P C _ A L I G N / / p c a l i g n m e n t e x c e p t i o n
b. e q e l 0 _ s p _ p c
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ U N K N O W N / / u n k n o w n e x c e p t i o n i n E L 0
2012-03-05 15:49:27 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ C P 15 _ 3 2 / / C P 1 5 M R C / M C R t r a p
2013-05-24 15:02:35 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ C P 15 _ 6 4 / / C P 1 5 M R R C / M C R R t r a p
2013-05-24 15:02:35 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ C P 14 _ M R / / C P 1 4 M R C / M C R t r a p
2013-05-24 15:02:35 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ C P 14 _ L S / / C P 1 4 L D C / S T C t r a p
2013-05-24 15:02:35 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ C P 14 _ 6 4 / / C P 1 4 M R R C / M C R R t r a p
2013-05-24 15:02:35 +04:00
b. e q e l 0 _ u n d e f
2014-11-24 15:31:40 +03:00
cmp x24 , #E S R _ E L x _ E C _ B R E A K P T _ L O W / / d e b u g e x c e p t i o n i n E L 0
2012-03-05 15:49:27 +04:00
b. g e e l 0 _ d b g
b e l 0 _ i n v
el0_svc_compat :
/ *
* AArch3 2 s y s c a l l h a n d l i n g
* /
2015-01-06 19:42:32 +03:00
adrp s t b l , c o m p a t _ s y s _ c a l l _ t a b l e / / l o a d c o m p a t s y s c a l l t a b l e p o i n t e r
2012-03-05 15:49:27 +04:00
uxtw s c n o , w7 / / s y s c a l l n u m b e r i n w7 ( r7 )
mov s c _ n r , #_ _ N R _ c o m p a t _ s y s c a l l s
b e l 0 _ s v c _ n a k e d
.align 6
el0_irq_compat :
kernel_ e n t r y 0 , 3 2
b e l 0 _ i r q _ n a k e d
# endif
el0_da :
/ *
* Data a b o r t h a n d l i n g
* /
2014-05-30 23:34:14 +04:00
mrs x26 , f a r _ e l 1
2012-03-05 15:49:27 +04:00
/ / enable i n t e r r u p t s b e f o r e c a l l i n g t h e m a i n h a n d l e r
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g _ a n d _ i r q
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
2014-05-30 23:34:14 +04:00
bic x0 , x26 , #( 0xff < < 5 6 )
2012-03-05 15:49:27 +04:00
mov x1 , x25
mov x2 , s p
2014-09-29 14:44:01 +04:00
bl d o _ m e m _ a b o r t
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_ia :
/ *
* Instruction a b o r t h a n d l i n g
* /
2014-05-30 23:34:14 +04:00
mrs x26 , f a r _ e l 1
2012-03-05 15:49:27 +04:00
/ / enable i n t e r r u p t s b e f o r e c a l l i n g t h e m a i n h a n d l e r
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g _ a n d _ i r q
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
2014-05-30 23:34:14 +04:00
mov x0 , x26
arm64: kill ESR_LNX_EXEC
Currently we treat ESR_EL1 bit 24 as software-defined for distinguishing
instruction aborts from data aborts, but this bit is architecturally
RES0 for instruction aborts, and could be allocated for an arbitrary
purpose in future. Additionally, we hard-code the value in entry.S
without the mnemonic, making the code difficult to understand.
Instead, remove ESR_LNX_EXEC, and distinguish aborts based on the esr,
which we already pass to the sole use of ESR_LNX_EXEC. A new helper,
is_el0_instruction_abort() is added to make the logic clear. Any
instruction aborts taken from EL1 will already have been handled by
bad_mode, so we need not handle that case in the helper.
For consistency, the existing permission_fault helper is renamed to
is_permission_fault, and the return type is changed to bool. There
should be no functional changes as the return value was a boolean
expression, and the result is only used in another boolean expression.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Dave P Martin <dave.martin@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-05-31 14:33:03 +03:00
mov x1 , x25
2012-03-05 15:49:27 +04:00
mov x2 , s p
2014-09-29 14:44:01 +04:00
bl d o _ m e m _ a b o r t
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_fpsimd_acc :
/ *
* Floating P o i n t o r A d v a n c e d S I M D a c c e s s
* /
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
2012-03-05 15:49:27 +04:00
mov x0 , x25
mov x1 , s p
2014-09-29 14:44:01 +04:00
bl d o _ f p s i m d _ a c c
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_fpsimd_exc :
/ *
* Floating P o i n t o r A d v a n c e d S I M D e x c e p t i o n
* /
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
2012-03-05 15:49:27 +04:00
mov x0 , x25
mov x1 , s p
2014-09-29 14:44:01 +04:00
bl d o _ f p s i m d _ e x c
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_sp_pc :
/ *
* Stack o r P C a l i g n m e n t e x c e p t i o n h a n d l i n g
* /
2014-05-30 23:34:14 +04:00
mrs x26 , f a r _ e l 1
2012-03-05 15:49:27 +04:00
/ / enable i n t e r r u p t s b e f o r e c a l l i n g t h e m a i n h a n d l e r
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g _ a n d _ i r q
2015-06-15 18:40:27 +03:00
ct_ u s e r _ e x i t
2014-05-30 23:34:14 +04:00
mov x0 , x26
2012-03-05 15:49:27 +04:00
mov x1 , x25
mov x2 , s p
2014-09-29 14:44:01 +04:00
bl d o _ s p _ p c _ a b o r t
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_undef :
/ *
* Undefined i n s t r u c t i o n
* /
2013-08-22 14:47:37 +04:00
/ / enable i n t e r r u p t s b e f o r e c a l l i n g t h e m a i n h a n d l e r
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g _ a n d _ i r q
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
mov x0 , s p
2014-09-29 14:44:01 +04:00
bl d o _ u n d e f i n s t r
b r e t _ t o _ u s e r
2016-06-28 20:07:32 +03:00
el0_sys :
/ *
* System i n s t r u c t i o n s , f o r t r a p p e d c a c h e m a i n t e n a n c e i n s t r u c t i o n s
* /
enable_ d b g _ a n d _ i r q
ct_ u s e r _ e x i t
mov x0 , x25
mov x1 , s p
bl d o _ s y s i n s t r
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_dbg :
/ *
* Debug e x c e p t i o n h a n d l i n g
* /
tbnz x24 , #0 , e l 0 _ i n v / / E L 0 o n l y
mrs x0 , f a r _ e l 1
mov x1 , x25
mov x2 , s p
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
bl d o _ d e b u g _ e x c e p t i o n
enable_ d b g
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
el0_inv :
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
2012-03-05 15:49:27 +04:00
mov x0 , s p
mov x1 , #B A D _ S Y N C
2015-07-07 20:00:49 +03:00
mov x2 , x25
2014-09-29 14:44:01 +04:00
bl b a d _ m o d e
b r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
ENDPROC( e l 0 _ s y n c )
.align 6
el0_irq :
kernel_ e n t r y 0
el0_irq_naked :
enable_ d b g
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bl t r a c e _ h a r d i r q s _ o f f
# endif
2013-11-12 21:11:53 +04:00
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t
2012-03-05 15:49:27 +04:00
irq_ h a n d l e r
2013-11-12 21:11:53 +04:00
2012-03-05 15:49:27 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bl t r a c e _ h a r d i r q s _ o n
# endif
b r e t _ t o _ u s e r
ENDPROC( e l 0 _ i r q )
/ *
* Register s w i t c h f o r A A r c h64 . T h e c a l l e e - s a v e d r e g i s t e r s n e e d t o b e s a v e d
* and r e s t o r e d . O n e n t r y :
* x0 = p r e v i o u s t a s k _ s t r u c t ( m u s t b e p r e s e r v e d a c r o s s t h e s w i t c h )
* x1 = n e x t t a s k _ s t r u c t
* Previous a n d n e x t a r e g u a r a n t e e d n o t t o b e t h e s a m e .
*
* /
ENTRY( c p u _ s w i t c h _ t o )
2015-07-20 17:14:53 +03:00
mov x10 , #T H R E A D _ C P U _ C O N T E X T
add x8 , x0 , x10
2012-03-05 15:49:27 +04:00
mov x9 , s p
stp x19 , x20 , [ x8 ] , #16 / / s t o r e c a l l e e - s a v e d r e g i s t e r s
stp x21 , x22 , [ x8 ] , #16
stp x23 , x24 , [ x8 ] , #16
stp x25 , x26 , [ x8 ] , #16
stp x27 , x28 , [ x8 ] , #16
stp x29 , x9 , [ x8 ] , #16
str l r , [ x8 ]
2015-07-20 17:14:53 +03:00
add x8 , x1 , x10
2012-03-05 15:49:27 +04:00
ldp x19 , x20 , [ x8 ] , #16 / / r e s t o r e c a l l e e - s a v e d r e g i s t e r s
ldp x21 , x22 , [ x8 ] , #16
ldp x23 , x24 , [ x8 ] , #16
ldp x25 , x26 , [ x8 ] , #16
ldp x27 , x28 , [ x8 ] , #16
ldp x29 , x9 , [ x8 ] , #16
ldr l r , [ x8 ]
mov s p , x9
2015-12-04 14:02:25 +03:00
and x9 , x9 , #~ ( T H R E A D _ S I Z E - 1 )
msr s p _ e l 0 , x9
2012-03-05 15:49:27 +04:00
ret
ENDPROC( c p u _ s w i t c h _ t o )
/ *
* This i s t h e f a s t s y s c a l l r e t u r n p a t h . W e d o a s l i t t l e a s p o s s i b l e h e r e ,
* and t h i s i n c l u d e s s a v i n g x0 b a c k i n t o t h e k e r n e l s t a c k .
* /
ret_fast_syscall :
disable_ i r q / / d i s a b l e i n t e r r u p t s
2015-08-19 17:57:09 +03:00
str x0 , [ s p , #S _ X 0 ] / / r e t u r n e d x0
2015-06-06 00:28:03 +03:00
ldr x1 , [ t s k , #T I _ F L A G S ] / / r e - c h e c k f o r s y s c a l l t r a c i n g
and x2 , x1 , #_ T I F _ S Y S C A L L _ W O R K
cbnz x2 , r e t _ f a s t _ s y s c a l l _ t r a c e
2012-03-05 15:49:27 +04:00
and x2 , x1 , #_ T I F _ W O R K _ M A S K
2015-08-19 17:57:09 +03:00
cbnz x2 , w o r k _ p e n d i n g
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ s t e p _ t s k x1 , x2
2015-08-19 17:57:09 +03:00
kernel_ e x i t 0
2015-06-06 00:28:03 +03:00
ret_fast_syscall_trace :
enable_ i r q / / e n a b l e i n t e r r u p t s
2015-08-19 17:57:09 +03:00
b _ _ s y s _ t r a c e _ r e t u r n _ s k i p p e d / / w e a l r e a d y s a v e d x0
2012-03-05 15:49:27 +04:00
/ *
* Ok, w e n e e d t o d o e x t r a p r o c e s s i n g , e n t e r t h e s l o w p a t h .
* /
work_pending :
mov x0 , s p / / ' r e g s '
bl d o _ n o t i f y _ r e s u m e
2015-12-04 15:42:29 +03:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
2016-07-14 23:48:14 +03:00
bl t r a c e _ h a r d i r q s _ o n / / e n a b l e d w h i l e i n u s e r s p a c e
2015-12-04 15:42:29 +03:00
# endif
2016-07-14 23:48:14 +03:00
ldr x1 , [ t s k , #T I _ F L A G S ] / / r e - c h e c k f o r s i n g l e - s t e p
b f i n i s h _ r e t _ t o _ u s e r
2012-03-05 15:49:27 +04:00
/ *
* " slow" s y s c a l l r e t u r n p a t h .
* /
2012-09-10 19:11:46 +04:00
ret_to_user :
2012-03-05 15:49:27 +04:00
disable_ i r q / / d i s a b l e i n t e r r u p t s
ldr x1 , [ t s k , #T I _ F L A G S ]
and x2 , x1 , #_ T I F _ W O R K _ M A S K
cbnz x2 , w o r k _ p e n d i n g
2016-07-14 23:48:14 +03:00
finish_ret_to_user :
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ s t e p _ t s k x1 , x2
2015-08-19 17:57:09 +03:00
kernel_ e x i t 0
2012-03-05 15:49:27 +04:00
ENDPROC( r e t _ t o _ u s e r )
/ *
* This i s h o w w e r e t u r n f r o m a f o r k .
* /
ENTRY( r e t _ f r o m _ f o r k )
bl s c h e d u l e _ t a i l
2012-10-05 15:31:20 +04:00
cbz x19 , 1 f / / n o t a k e r n e l t h r e a d
mov x0 , x20
blr x19
1 : get_ t h r e a d _ i n f o t s k
2012-03-05 15:49:27 +04:00
b r e t _ t o _ u s e r
ENDPROC( r e t _ f r o m _ f o r k )
/ *
* SVC h a n d l e r .
* /
.align 6
el0_svc :
adrp s t b l , s y s _ c a l l _ t a b l e / / l o a d s y s c a l l t a b l e p o i n t e r
uxtw s c n o , w8 / / s y s c a l l n u m b e r i n w8
mov s c _ n r , #_ _ N R _ s y s c a l l s
el0_svc_naked : / / compat e n t r y p o i n t
stp x0 , s c n o , [ s p , #S _ O R I G _ X 0 ] / / s a v e t h e o r i g i n a l x0 a n d s y s c a l l n u m b e r
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 22:04:06 +04:00
enable_ d b g _ a n d _ i r q
2014-05-30 23:34:15 +04:00
ct_ u s e r _ e x i t 1
2012-03-05 15:49:27 +04:00
2014-04-30 13:51:29 +04:00
ldr x16 , [ t s k , #T I _ F L A G S ] / / c h e c k f o r s y s c a l l h o o k s
tst x16 , #_ T I F _ S Y S C A L L _ W O R K
b. n e _ _ s y s _ t r a c e
2012-03-05 15:49:27 +04:00
cmp s c n o , s c _ n r / / c h e c k u p p e r s y s c a l l l i m i t
b. h s n i _ s y s
ldr x16 , [ s t b l , s c n o , l s l #3 ] / / a d d r e s s i n t h e s y s c a l l t a b l e
2014-09-29 14:44:01 +04:00
blr x16 / / c a l l s y s _ * r o u t i n e
b r e t _ f a s t _ s y s c a l l
2012-03-05 15:49:27 +04:00
ni_sys :
mov x0 , s p
2014-09-29 14:44:01 +04:00
bl d o _ n i _ s y s c a l l
b r e t _ f a s t _ s y s c a l l
2012-03-05 15:49:27 +04:00
ENDPROC( e l 0 _ s v c )
/ *
* This i s t h e r e a l l y s l o w p a t h . W e ' r e g o i n g t o b e d o i n g c o n t e x t
* switches, a n d w a i t i n g f o r o u r p a r e n t t o r e s p o n d .
* /
__sys_trace :
2014-11-28 08:26:35 +03:00
mov w0 , #- 1 / / s e t d e f a u l t e r r n o f o r
cmp s c n o , x0 / / u s e r - i s s u e d s y s c a l l ( - 1 )
b. n e 1 f
mov x0 , #- E N O S Y S
str x0 , [ s p , #S _ X 0 ]
1 : mov x0 , s p
2014-04-30 13:51:30 +04:00
bl s y s c a l l _ t r a c e _ e n t e r
2014-11-28 08:26:35 +03:00
cmp w0 , #- 1 / / s k i p t h e s y s c a l l ?
b. e q _ _ s y s _ t r a c e _ r e t u r n _ s k i p p e d
2012-03-05 15:49:27 +04:00
uxtw s c n o , w0 / / s y s c a l l n u m b e r ( p o s s i b l y n e w )
mov x1 , s p / / p o i n t e r t o r e g s
cmp s c n o , s c _ n r / / c h e c k u p p e r s y s c a l l l i m i t
2014-09-29 14:44:01 +04:00
b. h s _ _ n i _ s y s _ t r a c e
2012-03-05 15:49:27 +04:00
ldp x0 , x1 , [ s p ] / / r e s t o r e t h e s y s c a l l a r g s
ldp x2 , x3 , [ s p , #S _ X 2 ]
ldp x4 , x5 , [ s p , #S _ X 4 ]
ldp x6 , x7 , [ s p , #S _ X 6 ]
ldr x16 , [ s t b l , s c n o , l s l #3 ] / / a d d r e s s i n t h e s y s c a l l t a b l e
2014-09-29 14:44:01 +04:00
blr x16 / / c a l l s y s _ * r o u t i n e
2012-03-05 15:49:27 +04:00
__sys_trace_return :
2014-11-28 08:26:35 +03:00
str x0 , [ s p , #S _ X 0 ] / / s a v e r e t u r n e d x0
__sys_trace_return_skipped :
2014-04-30 13:51:30 +04:00
mov x0 , s p
bl s y s c a l l _ t r a c e _ e x i t
2012-03-05 15:49:27 +04:00
b r e t _ t o _ u s e r
2014-09-29 14:44:01 +04:00
__ni_sys_trace :
mov x0 , s p
bl d o _ n i _ s y s c a l l
b _ _ s y s _ t r a c e _ r e t u r n
2016-07-08 19:35:50 +03:00
.popsection / / .entry .text
2012-03-05 15:49:27 +04:00
/ *
* Special s y s t e m c a l l w r a p p e r s .
* /
ENTRY( s y s _ r t _ s i g r e t u r n _ w r a p p e r )
mov x0 , s p
b s y s _ r t _ s i g r e t u r n
ENDPROC( s y s _ r t _ s i g r e t u r n _ w r a p p e r )