2012-03-05 11:49:28 +00:00
/ *
* Based o n a r c h / a r m / m m / p r o c . S
*
* Copyright ( C ) 2 0 0 1 D e e p B l u e S o l u t i o n s L t d .
* Copyright ( C ) 2 0 1 2 A R M L t d .
* Author : Catalin M a r i n a s < c a t a l i n . m a r i n a s @arm.com>
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m . I f n o t , s e e < h t t p : / / w w w . g n u . o r g / l i c e n s e s / > .
* /
# include < l i n u x / i n i t . h >
# include < l i n u x / l i n k a g e . h >
# include < a s m / a s s e m b l e r . h >
# include < a s m / a s m - o f f s e t s . h >
# include < a s m / h w c a p . h >
# include < a s m / p g t a b l e . h >
2016-04-27 17:47:07 +01:00
# include < a s m / p g t a b l e - h w d e f . h >
2016-02-24 17:44:57 -08:00
# include < a s m / c p u f e a t u r e . h >
# include < a s m / a l t e r n a t i v e . h >
2012-03-05 11:49:28 +00:00
2014-04-02 17:55:40 +01:00
# ifdef C O N F I G _ A R M 6 4 _ 6 4 K _ P A G E S
# define T C R _ T G _ F L A G S T C R _ T G 0 _ 6 4 K | T C R _ T G 1 _ 6 4 K
2015-10-19 14:19:37 +01:00
# elif d e f i n e d ( C O N F I G _ A R M 6 4 _ 1 6 K _ P A G E S )
# define T C R _ T G _ F L A G S T C R _ T G 0 _ 1 6 K | T C R _ T G 1 _ 1 6 K
# else / * C O N F I G _ A R M 6 4 _ 4 K _ P A G E S * /
2014-04-02 17:55:40 +01:00
# define T C R _ T G _ F L A G S T C R _ T G 0 _ 4 K | T C R _ T G 1 _ 4 K
# endif
# define T C R _ S M P _ F L A G S T C R _ S H A R E D
2012-03-05 11:49:28 +00:00
2014-04-02 17:55:40 +01:00
/* PTWs cacheable, inner/outer WBWA */
# define T C R _ C A C H E _ F L A G S T C R _ I R G N _ W B W A | T C R _ O R G N _ W B W A
2012-03-05 11:49:28 +00:00
# define M A I R ( a t t r , m t ) ( ( a t t r ) < < ( ( m t ) * 8 ) )
/ *
* cpu_ d o _ i d l e ( )
*
* Idle t h e p r o c e s s o r ( w a i t f o r i n t e r r u p t ) .
* /
ENTRY( c p u _ d o _ i d l e )
dsb s y / / W F I m a y e n t e r a l o w - p o w e r m o d e
wfi
ret
ENDPROC( c p u _ d o _ i d l e )
2015-01-26 18:33:44 +00:00
# ifdef C O N F I G _ C P U _ P M
2013-07-17 10:14:45 +01:00
/ * *
* cpu_ d o _ s u s p e n d - s a v e C P U r e g i s t e r s c o n t e x t
*
* x0 : virtual a d d r e s s o f c o n t e x t p o i n t e r
* /
ENTRY( c p u _ d o _ s u s p e n d )
mrs x2 , t p i d r _ e l 0
mrs x3 , t p i d r r o _ e l 0
mrs x4 , c o n t e x t i d r _ e l 1
2016-04-27 17:47:07 +01:00
mrs x5 , c p a c r _ e l 1
mrs x6 , t c r _ e l 1
mrs x7 , v b a r _ e l 1
mrs x8 , m d s c r _ e l 1
mrs x9 , o s l s r _ e l 1
mrs x10 , s c t l r _ e l 1
2016-11-03 20:23:09 +00:00
mrs x11 , t p i d r _ e l 1
mrs x12 , s p _ e l 0
2013-07-17 10:14:45 +01:00
stp x2 , x3 , [ x0 ]
2016-04-27 17:47:07 +01:00
stp x4 , x z r , [ x0 , #16 ]
stp x5 , x6 , [ x0 , #32 ]
stp x7 , x8 , [ x0 , #48 ]
stp x9 , x10 , [ x0 , #64 ]
2016-11-03 20:23:09 +00:00
stp x11 , x12 , [ x0 , #80 ]
2013-07-17 10:14:45 +01:00
ret
ENDPROC( c p u _ d o _ s u s p e n d )
/ * *
* cpu_ d o _ r e s u m e - r e s t o r e C P U r e g i s t e r c o n t e x t
*
2016-04-27 17:47:07 +01:00
* x0 : Address o f c o n t e x t p o i n t e r
2013-07-17 10:14:45 +01:00
* /
2016-08-24 18:27:29 +01:00
.pushsection " .idmap .text " , " ax"
2013-07-17 10:14:45 +01:00
ENTRY( c p u _ d o _ r e s u m e )
ldp x2 , x3 , [ x0 ]
ldp x4 , x5 , [ x0 , #16 ]
2016-04-27 17:47:07 +01:00
ldp x6 , x8 , [ x0 , #32 ]
ldp x9 , x10 , [ x0 , #48 ]
ldp x11 , x12 , [ x0 , #64 ]
2016-11-03 20:23:09 +00:00
ldp x13 , x14 , [ x0 , #80 ]
2013-07-17 10:14:45 +01:00
msr t p i d r _ e l 0 , x2
msr t p i d r r o _ e l 0 , x3
msr c o n t e x t i d r _ e l 1 , x4
msr c p a c r _ e l 1 , x6
2016-04-27 17:47:07 +01:00
/* Don't change t0sz here, mask those bits when restoring */
mrs x5 , t c r _ e l 1
bfi x8 , x5 , T C R _ T 0 S Z _ O F F S E T , T C R _ T x S Z _ W I D T H
2013-07-17 10:14:45 +01:00
msr t c r _ e l 1 , x8
msr v b a r _ e l 1 , x9
2016-08-26 16:03:42 +01:00
/ *
* _ _ cpu_ s e t u p ( ) c l e a r e d M D S C R _ E L 1 . M D E a n d f r i e n d s , b e f o r e u n m a s k i n g
* debug e x c e p t i o n s . B y r e s t o r i n g M D S C R _ E L 1 h e r e , w e m a y t a k e a d e b u g
2017-11-02 12:12:34 +00:00
* exception. M a s k t h e m u n t i l l o c a l _ d a i f _ r e s t o r e ( ) i n c p u _ s u s p e n d ( )
2016-08-26 16:03:42 +01:00
* resets t h e m .
* /
2017-11-02 12:12:34 +00:00
disable_ d a i f
2013-07-17 10:14:45 +01:00
msr m d s c r _ e l 1 , x10
2016-08-26 16:03:42 +01:00
2016-04-27 17:47:07 +01:00
msr s c t l r _ e l 1 , x12
2016-11-03 20:23:09 +00:00
msr t p i d r _ e l 1 , x13
msr s p _ e l 0 , x14
2013-07-17 10:14:45 +01:00
/ *
* Restore o s l s r _ e l 1 b y w r i t i n g o s l a r _ e l 1
* /
ubfx x11 , x11 , #1 , #1
msr o s l a r _ e l 1 , x11
2016-01-13 14:50:03 +00:00
reset_ p m u s e r e n r _ e l 0 x0 / / D i s a b l e P M U a c c e s s f r o m E L 0
2013-07-17 10:14:45 +01:00
isb
ret
ENDPROC( c p u _ d o _ r e s u m e )
2016-08-24 18:27:29 +01:00
.popsection
2013-07-17 10:14:45 +01:00
# endif
2012-03-05 11:49:28 +00:00
/ *
2014-01-27 07:19:32 +00:00
* cpu_ d o _ s w i t c h _ m m ( p g d _ p h y s , t s k )
2012-03-05 11:49:28 +00:00
*
* Set t h e t r a n s l a t i o n t a b l e b a s e p o i n t e r t o b e p g d _ p h y s .
*
* - pgd_ p h y s - p h y s i c a l a d d r e s s o f n e w T T B
* /
ENTRY( c p u _ d o _ s w i t c h _ m m )
2017-08-10 13:19:09 +01:00
mrs x2 , t t b r1 _ e l 1
2015-10-06 18:46:24 +01:00
mmid x1 , x1 / / g e t m m - > c o n t e x t . i d
2017-08-10 13:19:09 +01:00
bfi x2 , x1 , #48 , #16 / / s e t t h e A S I D
msr t t b r1 _ e l 1 , x2 / / i n T T B R 1 ( s i n c e T C R . A 1 i s s e t )
isb
msr t t b r0 _ e l 1 , x0 / / n o w u p d a t e T T B R 0
2012-03-05 11:49:28 +00:00
isb
2017-08-10 13:34:30 +01:00
post_ t t b r _ u p d a t e _ w o r k a r o u n d
2016-02-24 17:44:57 -08:00
ret
2012-03-05 11:49:28 +00:00
ENDPROC( c p u _ d o _ s w i t c h _ m m )
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
.pushsection " .idmap .text " , " ax"
/ *
* void i d m a p _ c p u _ r e p l a c e _ t t b r1 ( p h y s _ a d d r _ t n e w _ p g d )
*
* This i s t h e l o w - l e v e l c o u n t e r p a r t t o c p u _ r e p l a c e _ t t b r1 , a n d s h o u l d n o t b e
* called b y a n y t h i n g e l s e . I t c a n o n l y b e e x e c u t e d f r o m a T T B R 0 m a p p i n g .
* /
ENTRY( i d m a p _ c p u _ r e p l a c e _ t t b r1 )
2017-11-02 12:12:34 +00:00
save_ a n d _ d i s a b l e _ d a i f f l a g s =x2
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
adrp x1 , e m p t y _ z e r o _ p a g e
msr t t b r1 _ e l 1 , x1
isb
tlbi v m a l l e 1
dsb n s h
isb
msr t t b r1 _ e l 1 , x0
isb
2017-11-02 12:12:34 +00:00
restore_ d a i f x2
arm64: mm: add code to safely replace TTBR1_EL1
If page tables are modified without suitable TLB maintenance, the ARM
architecture permits multiple TLB entries to be allocated for the same
VA. When this occurs, it is permitted that TLB conflict aborts are
raised in response to synchronous data/instruction accesses, and/or and
amalgamation of the TLB entries may be used as a result of a TLB lookup.
The presence of conflicting TLB entries may result in a variety of
behaviours detrimental to the system (e.g. erroneous physical addresses
may be used by I-cache fetches and/or page table walks). Some of these
cases may result in unexpected changes of hardware state, and/or result
in the (asynchronous) delivery of SError.
To avoid these issues, we must avoid situations where conflicting
entries may be allocated into TLBs. For user and module mappings we can
follow a strict break-before-make approach, but this cannot work for
modifications to the swapper page tables that cover the kernel text and
data.
Instead, this patch adds code which is intended to be executed from the
idmap, which can safely unmap the swapper page tables as it only
requires the idmap to be active. This enables us to uninstall the active
TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry
without potentially unmapping code or data required for the sequence.
This avoids the risk of conflict, but requires that updates are staged
in a copy of the swapper page tables prior to being installed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 11:45:01 +00:00
ret
ENDPROC( i d m a p _ c p u _ r e p l a c e _ t t b r1 )
.popsection
2012-03-05 11:49:28 +00:00
/ *
* _ _ cpu_ s e t u p
*
* Initialise t h e p r o c e s s o r f o r t u r n i n g t h e M M U o n . R e t u r n i n x0 t h e
* value o f t h e S C T L R _ E L 1 r e g i s t e r .
* /
2016-08-24 18:27:29 +01:00
.pushsection " .idmap .text " , " ax"
2012-03-05 11:49:28 +00:00
ENTRY( _ _ c p u _ s e t u p )
2015-10-06 18:46:22 +01:00
tlbi v m a l l e 1 / / I n v a l i d a t e l o c a l T L B
dsb n s h
2012-03-05 11:49:28 +00:00
mov x0 , #3 < < 2 0
msr c p a c r _ e l 1 , x0 / / E n a b l e F P / A S I M D
2015-08-20 11:47:13 +01:00
mov x0 , #1 < < 1 2 / / R e s e t m d s c r _ e l 1 a n d d i s a b l e
msr m d s c r _ e l 1 , x0 / / a c c e s s t o t h e D C C f r o m E L 0
arm64: debug: unmask PSTATE.D earlier
Clearing PSTATE.D is one of the requirements for generating a debug
exception. The arm64 booting protocol requires that PSTATE.D is set,
since many of the debug registers (for example, the hw_breakpoint
registers) are UNKNOWN out of reset and could potentially generate
spurious, fatal debug exceptions in early boot code if PSTATE.D was
clear. Once the debug registers have been safely initialised, PSTATE.D
is cleared, however this is currently broken for two reasons:
(1) The boot CPU clears PSTATE.D in a postcore_initcall and secondary
CPUs clear PSTATE.D in secondary_start_kernel. Since the initcall
runs after SMP (and the scheduler) have been initialised, there is
no guarantee that it is actually running on the boot CPU. In this
case, the boot CPU is left with PSTATE.D set and is not capable of
generating debug exceptions.
(2) In a preemptible kernel, we may explicitly schedule on the IRQ
return path to EL1. If an IRQ occurs with PSTATE.D set in the idle
thread, then we may schedule the kthread_init thread, run the
postcore_initcall to clear PSTATE.D and then context switch back
to the idle thread before returning from the IRQ. The exception
return path will then restore PSTATE.D from the stack, and set it
again.
This patch fixes the problem by moving the clearing of PSTATE.D earlier
to proc.S. This has the desirable effect of clearing it in one place for
all CPUs, long before we have to worry about the scheduler or any
exception handling. We ensure that the previous reset of MDSCR_EL1 has
completed before unmasking the exception, so that any spurious
exceptions resulting from UNKNOWN debug registers are not generated.
Without this patch applied, the kprobes selftests have been seen to fail
under KVM, where we end up attempting to step the OOL instruction buffer
with PSTATE.D set and therefore fail to complete the step.
Cc: <stable@vger.kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-19 15:07:37 +01:00
isb / / U n m a s k d e b u g e x c e p t i o n s n o w ,
enable_ d b g / / s i n c e t h i s i s p e r - c p u
2016-01-13 14:50:03 +00:00
reset_ p m u s e r e n r _ e l 0 x0 / / D i s a b l e P M U a c c e s s f r o m E L 0
2012-03-05 11:49:28 +00:00
/ *
* Memory r e g i o n a t t r i b u t e s f o r L P A E :
*
* n = A t t r I n d x [ 2 : 0 ]
* n M A I R
* DEVICE_ n G n R n E 0 0 0 0 0 0 0 0 0 0 0
* DEVICE_ n G n R E 0 0 1 0 0 0 0 0 1 0 0
* DEVICE_ G R E 0 1 0 0 0 0 0 1 1 0 0
* NORMAL_ N C 0 1 1 0 1 0 0 0 1 0 0
* NORMAL 1 0 0 1 1 1 1 1 1 1 1
2015-08-07 09:36:59 +01:00
* NORMAL_ W T 1 0 1 1 0 1 1 1 0 1 1
2012-03-05 11:49:28 +00:00
* /
ldr x5 , =MAIR ( 0 x00 , M T _ D E V I C E _ n G n R n E ) | \
MAIR( 0 x04 , M T _ D E V I C E _ n G n R E ) | \
MAIR( 0 x0 c , M T _ D E V I C E _ G R E ) | \
MAIR( 0 x44 , M T _ N O R M A L _ N C ) | \
2015-08-07 09:36:59 +01:00
MAIR( 0 x f f , M T _ N O R M A L ) | \
MAIR( 0 x b b , M T _ N O R M A L _ W T )
2012-03-05 11:49:28 +00:00
msr m a i r _ e l 1 , x5
/ *
* Prepare S C T L R
* /
adr x5 , c r v a l
ldp w5 , w6 , [ x5 ]
mrs x0 , s c t l r _ e l 1
bic x0 , x0 , x5 / / c l e a r b i t s
orr x0 , x0 , x6 / / s e t b i t s
/ *
* Set/ p r e p a r e T C R a n d T T B R . W e u s e 5 1 2 G B ( 3 9 - b i t ) a d d r e s s r a n g e f o r
* both u s e r a n d k e r n e l .
* /
2014-04-02 17:55:40 +01:00
ldr x10 , =TCR_TxSZ ( V A _ B I T S ) | T C R _ C A C H E _ F L A G S | T C R _ S M P _ F L A G S | \
2017-08-10 13:19:09 +01:00
TCR_ T G _ F L A G S | T C R _ A S I D 1 6 | T C R _ T B I 0 | T C R _ A 1
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-19 16:42:27 +00:00
tcr_ s e t _ i d m a p _ t 0 s z x10 , x9
2014-03-07 08:49:25 +00:00
/ *
* Read t h e P A R a n g e b i t s f r o m I D _ A A 6 4 M M F R 0 _ E L 1 a n d s e t t h e I P S b i t s i n
* TCR_ E L 1 .
* /
mrs x9 , I D _ A A 6 4 M M F R 0 _ E L 1
bfi x10 , x9 , #32 , #3
2015-07-10 17:24:28 +01:00
# ifdef C O N F I G _ A R M 6 4 _ H W _ A F D B M
/ *
* Hardware u p d a t e o f t h e A c c e s s a n d D i r t y b i t s .
* /
mrs x9 , I D _ A A 6 4 M M F R 1 _ E L 1
and x9 , x9 , #0xf
cbz x9 , 2 f
cmp x9 , #2
b. l t 1 f
orr x10 , x10 , #T C R _ H D / / h a r d w a r e D i r t y f l a g u p d a t e
1 : orr x10 , x10 , #T C R _ H A / / h a r d w a r e A c c e s s f l a g u p d a t e
2 :
# endif / * C O N F I G _ A R M 6 4 _ H W _ A F D B M * /
2012-03-05 11:49:28 +00:00
msr t c r _ e l 1 , x10
ret / / r e t u r n t o h e a d . S
ENDPROC( _ _ c p u _ s e t u p )
/ *
2014-12-17 15:50:21 +00:00
* We s e t t h e d e s i r e d v a l u e e x p l i c i t l y , i n c l u d i n g t h o s e o f t h e
* reserved b i t s . T h e v a l u e s o f b i t s E E & E 0 E w e r e s e t e a r l y i n
* el2 _ s e t u p , w h i c h a r e l e f t u n t o u c h e d b e l o w .
*
2012-03-05 11:49:28 +00:00
* n n T
* U E W T T U D U S I H B S
* CE0 X W H W C Z M E T E E A S
* . . . . .IEE . . . . NEAI T E . I . . A D D E N 0 A C A M
2014-12-17 15:50:21 +00:00
* 0 0 1 1 0 . . . 1 1 0 1 . .0 . . .0 . 1 0 . . .0 . . . . . . < hardware r e s e r v e d
* . . . . .1 . . . . . . 0 1 .1 1 1 .1 . .01 0 .01 1101 < software s e t t i n g s
2012-03-05 11:49:28 +00:00
* /
.type crval, #o b j e c t
crval :
2014-12-17 15:50:21 +00:00
.word 0xfcffffff / / clear
.word 0x34d5d91d / / set
2016-08-24 18:27:29 +01:00
.popsection