2005-04-17 02:20:36 +04:00
/ * $ Id : trampoline. S ,v 1 . 2 6 2 0 0 2 / 0 2 / 0 9 1 9 : 4 9 : 3 0 d a v e m E x p $
* trampoline. S : J u m p s t a r t s l a v e p r o c e s s o r s o n s p a r c64 .
*
* Copyright ( C ) 1 9 9 7 D a v i d S . M i l l e r ( d a v e m @caip.rutgers.edu)
* /
# include < a s m / h e a d . h >
# include < a s m / a s i . h >
# include < a s m / l s u . h >
# include < a s m / d c r . h >
# include < a s m / d c u . h >
# include < a s m / p s t a t e . h >
# include < a s m / p a g e . h >
# include < a s m / p g t a b l e . h >
# include < a s m / s p i t f i r e . h >
# include < a s m / p r o c e s s o r . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / m m u . h >
.data
.align 8
call_method :
.asciz " call- m e t h o d "
.align 8
itlb_load :
.asciz " SUNW,i t l b - l o a d "
.align 8
dtlb_load :
.asciz " SUNW,d t l b - l o a d "
.text
.align 8
.globl sparc6 4 _ c p u _ s t a r t u p , s p a r c64 _ c p u _ s t a r t u p _ e n d
sparc64_cpu_startup :
flushw
BRANCH_ I F _ C H E E T A H _ B A S E ( g 1 ,g 5 ,c h e e t a h _ s t a r t u p )
BRANCH_ I F _ C H E E T A H _ P L U S _ O R _ F O L L O W O N ( g 1 ,g 5 ,c h e e t a h _ p l u s _ s t a r t u p )
ba,p t % x c c , s p i t f i r e _ s t a r t u p
nop
cheetah_plus_startup :
/* Preserve OBP chosen DCU and DCR register settings. */
ba,p t % x c c , c h e e t a h _ g e n e r i c _ s t a r t u p
nop
cheetah_startup :
mov D C R _ B P E | D C R _ R P E | D C R _ S I | D C R _ I F P O E | D C R _ M S , % g 1
wr % g 1 , % a s r18
sethi % u h i ( D C U _ M E | D C U _ R E | D C U _ H P E | D C U _ S P E | D C U _ S L | D C U _ W E ) , % g 5
or % g 5 , % u l o ( D C U _ M E | D C U _ R E | D C U _ H P E | D C U _ S P E | D C U _ S L | D C U _ W E ) , % g 5
sllx % g 5 , 3 2 , % g 5
or % g 5 , D C U _ D M | D C U _ I M | D C U _ D C | D C U _ I C , % g 5
stxa % g 5 , [ % g 0 ] A S I _ D C U _ C O N T R O L _ R E G
membar #S y n c
cheetah_generic_startup :
mov T S B _ E X T E N S I O N _ P , % g 3
stxa % g 0 , [ % g 3 ] A S I _ D M M U
stxa % g 0 , [ % g 3 ] A S I _ I M M U
membar #S y n c
mov T S B _ E X T E N S I O N _ S , % g 3
stxa % g 0 , [ % g 3 ] A S I _ D M M U
membar #S y n c
mov T S B _ E X T E N S I O N _ N , % g 3
stxa % g 0 , [ % g 3 ] A S I _ D M M U
stxa % g 0 , [ % g 3 ] A S I _ I M M U
membar #S y n c
/* Disable STICK_INT interrupts. */
sethi % h i ( 0 x80 0 0 0 0 0 0 ) , % g 5
sllx % g 5 , 3 2 , % g 5
wr % g 5 , % a s r25
ba,p t % x c c , s t a r t u p _ c o n t i n u e
nop
spitfire_startup :
mov ( L S U _ C O N T R O L _ I C | L S U _ C O N T R O L _ D C | L S U _ C O N T R O L _ I M | L S U _ C O N T R O L _ D M ) , % g 1
stxa % g 1 , [ % g 0 ] A S I _ L S U _ C O N T R O L
membar #S y n c
startup_continue :
wrpr % g 0 , 1 5 , % p i l
sethi % h i ( 0 x80 0 0 0 0 0 0 ) , % g 2
sllx % g 2 , 3 2 , % g 2
wr % g 2 , 0 , % t i c k _ c m p r
/ * Call O B P b y h a n d t o l o c k K E R N B A S E i n t o i / d t l b s .
* We l o c k 2 c o n s e q u e t i v e e n t r i e s i f w e a r e ' b i g k e r n e l ' .
* /
mov % o 0 , % l 0
sethi % h i ( p r o m _ e n t r y _ l o c k ) , % g 2
1 : ldstub [ % g 2 + % l o ( p r o m _ e n t r y _ l o c k ) ] , % g 1
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-28 02:42:04 +04:00
membar #S t o r e L o a d | # S t o r e S t o r e
2005-04-17 02:20:36 +04:00
brnz,p n % g 1 , 1 b
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-28 02:42:04 +04:00
nop
2005-04-17 02:20:36 +04:00
sethi % h i ( p12 7 5 b u f ) , % g 2
or % g 2 , % l o ( p12 7 5 b u f ) , % g 2
ldx [ % g 2 + 0 x10 ] , % l 2
mov % s p , % l 1
add % l 2 , - ( 1 9 2 + 1 2 8 ) , % s p
flushw
sethi % h i ( c a l l _ m e t h o d ) , % g 2
or % g 2 , % l o ( c a l l _ m e t h o d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x00 ]
mov 5 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x08 ]
mov 1 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x10 ]
sethi % h i ( i t l b _ l o a d ) , % g 2
or % g 2 , % l o ( i t l b _ l o a d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x18 ]
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 07:11:33 +04:00
sethi % h i ( p r o m _ m m u _ i h a n d l e _ c a c h e ) , % g 2
lduw [ % g 2 + % l o ( p r o m _ m m u _ i h a n d l e _ c a c h e ) ] , % g 2
2005-04-17 02:20:36 +04:00
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x20 ]
sethi % h i ( K E R N B A S E ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x28 ]
sethi % h i ( k e r n _ l o c k e d _ t t e _ d a t a ) , % g 2
ldx [ % g 2 + % l o ( k e r n _ l o c k e d _ t t e _ d a t a ) ] , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x30 ]
mov 1 5 , % g 2
BRANCH_ I F _ A N Y _ C H E E T A H ( g 1 ,g 5 ,1 f )
mov 6 3 , % g 2
1 :
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x38 ]
sethi % h i ( p12 7 5 b u f ) , % g 2
or % g 2 , % l o ( p12 7 5 b u f ) , % g 2
ldx [ % g 2 + 0 x08 ] , % o 1
call % o 1
add % s p , ( 2 0 4 7 + 1 2 8 ) , % o 0
sethi % h i ( b i g k e r n e l ) , % g 2
lduw [ % g 2 + % l o ( b i g k e r n e l ) ] , % g 2
cmp % g 2 , 0
be,p t % i c c , d o _ d t l b
nop
sethi % h i ( c a l l _ m e t h o d ) , % g 2
or % g 2 , % l o ( c a l l _ m e t h o d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x00 ]
mov 5 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x08 ]
mov 1 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x10 ]
sethi % h i ( i t l b _ l o a d ) , % g 2
or % g 2 , % l o ( i t l b _ l o a d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x18 ]
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 07:11:33 +04:00
sethi % h i ( p r o m _ m m u _ i h a n d l e _ c a c h e ) , % g 2
lduw [ % g 2 + % l o ( p r o m _ m m u _ i h a n d l e _ c a c h e ) ] , % g 2
2005-04-17 02:20:36 +04:00
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x20 ]
sethi % h i ( K E R N B A S E + 0 x40 0 0 0 0 ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x28 ]
sethi % h i ( k e r n _ l o c k e d _ t t e _ d a t a ) , % g 2
ldx [ % g 2 + % l o ( k e r n _ l o c k e d _ t t e _ d a t a ) ] , % g 2
sethi % h i ( 0 x40 0 0 0 0 ) , % g 1
add % g 2 , % g 1 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x30 ]
mov 1 4 , % g 2
BRANCH_ I F _ A N Y _ C H E E T A H ( g 1 ,g 5 ,1 f )
mov 6 2 , % g 2
1 :
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x38 ]
sethi % h i ( p12 7 5 b u f ) , % g 2
or % g 2 , % l o ( p12 7 5 b u f ) , % g 2
ldx [ % g 2 + 0 x08 ] , % o 1
call % o 1
add % s p , ( 2 0 4 7 + 1 2 8 ) , % o 0
do_dtlb :
sethi % h i ( c a l l _ m e t h o d ) , % g 2
or % g 2 , % l o ( c a l l _ m e t h o d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x00 ]
mov 5 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x08 ]
mov 1 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x10 ]
sethi % h i ( d t l b _ l o a d ) , % g 2
or % g 2 , % l o ( d t l b _ l o a d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x18 ]
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 07:11:33 +04:00
sethi % h i ( p r o m _ m m u _ i h a n d l e _ c a c h e ) , % g 2
lduw [ % g 2 + % l o ( p r o m _ m m u _ i h a n d l e _ c a c h e ) ] , % g 2
2005-04-17 02:20:36 +04:00
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x20 ]
sethi % h i ( K E R N B A S E ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x28 ]
sethi % h i ( k e r n _ l o c k e d _ t t e _ d a t a ) , % g 2
ldx [ % g 2 + % l o ( k e r n _ l o c k e d _ t t e _ d a t a ) ] , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x30 ]
mov 1 5 , % g 2
BRANCH_ I F _ A N Y _ C H E E T A H ( g 1 ,g 5 ,1 f )
mov 6 3 , % g 2
1 :
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x38 ]
sethi % h i ( p12 7 5 b u f ) , % g 2
or % g 2 , % l o ( p12 7 5 b u f ) , % g 2
ldx [ % g 2 + 0 x08 ] , % o 1
call % o 1
add % s p , ( 2 0 4 7 + 1 2 8 ) , % o 0
sethi % h i ( b i g k e r n e l ) , % g 2
lduw [ % g 2 + % l o ( b i g k e r n e l ) ] , % g 2
cmp % g 2 , 0
be,p t % i c c , d o _ u n l o c k
nop
sethi % h i ( c a l l _ m e t h o d ) , % g 2
or % g 2 , % l o ( c a l l _ m e t h o d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x00 ]
mov 5 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x08 ]
mov 1 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x10 ]
sethi % h i ( d t l b _ l o a d ) , % g 2
or % g 2 , % l o ( d t l b _ l o a d ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x18 ]
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 07:11:33 +04:00
sethi % h i ( p r o m _ m m u _ i h a n d l e _ c a c h e ) , % g 2
lduw [ % g 2 + % l o ( p r o m _ m m u _ i h a n d l e _ c a c h e ) ] , % g 2
2005-04-17 02:20:36 +04:00
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x20 ]
sethi % h i ( K E R N B A S E + 0 x40 0 0 0 0 ) , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x28 ]
sethi % h i ( k e r n _ l o c k e d _ t t e _ d a t a ) , % g 2
ldx [ % g 2 + % l o ( k e r n _ l o c k e d _ t t e _ d a t a ) ] , % g 2
sethi % h i ( 0 x40 0 0 0 0 ) , % g 1
add % g 2 , % g 1 , % g 2
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x30 ]
mov 1 4 , % g 2
BRANCH_ I F _ A N Y _ C H E E T A H ( g 1 ,g 5 ,1 f )
mov 6 2 , % g 2
1 :
stx % g 2 , [ % s p + 2 0 4 7 + 1 2 8 + 0 x38 ]
sethi % h i ( p12 7 5 b u f ) , % g 2
or % g 2 , % l o ( p12 7 5 b u f ) , % g 2
ldx [ % g 2 + 0 x08 ] , % o 1
call % o 1
add % s p , ( 2 0 4 7 + 1 2 8 ) , % o 0
do_unlock :
sethi % h i ( p r o m _ e n t r y _ l o c k ) , % g 2
stb % g 0 , [ % g 2 + % l o ( p r o m _ e n t r y _ l o c k ) ]
membar #S t o r e S t o r e | # S t o r e L o a d
mov % l 1 , % s p
flushw
mov % l 0 , % o 0
wrpr % g 0 , ( P S T A T E _ P R I V | P S T A T E _ P E F ) , % p s t a t e
wr % g 0 , 0 , % f p r s
/* XXX Buggy PROM... */
srl % o 0 , 0 , % o 0
ldx [ % o 0 ] , % g 6
wr % g 0 , A S I _ P , % a s i
mov P R I M A R Y _ C O N T E X T , % g 7
stxa % g 0 , [ % g 7 ] A S I _ D M M U
membar #S y n c
mov S E C O N D A R Y _ C O N T E X T , % g 7
stxa % g 0 , [ % g 7 ] A S I _ D M M U
membar #S y n c
mov 1 , % g 5
sllx % g 5 , T H R E A D _ S H I F T , % g 5
sub % g 5 , ( S T A C K F R A M E _ S Z + S T A C K _ B I A S ) , % g 5
add % g 6 , % g 5 , % s p
mov 0 , % f p
wrpr % g 0 , 0 , % w s t a t e
wrpr % g 0 , 0 , % t l
/* Setup the trap globals, then we can resurface. */
rdpr % p s t a t e , % o 1
mov % g 6 , % o 2
wrpr % o 1 , P S T A T E _ A G , % p s t a t e
sethi % h i ( s p a r c64 _ t t a b l e _ t l 0 ) , % g 5
wrpr % g 5 , % t b a
mov % o 2 , % g 6
wrpr % o 1 , P S T A T E _ M G , % p s t a t e
# define K E R N _ H I G H B I T S ( ( _ P A G E _ V A L I D | _ P A G E _ S Z 4 M B ) ^ 0 x f f f f f80 0 0 0 0 0 0 0 0 0 )
# define K E R N _ L O W B I T S ( _ P A G E _ C P | _ P A G E _ C V | _ P A G E _ P | _ P A G E _ W )
mov T S B _ R E G , % g 1
stxa % g 0 , [ % g 1 ] A S I _ D M M U
membar #S y n c
mov T L B _ S F S R , % g 1
sethi % u h i ( K E R N _ H I G H B I T S ) , % g 2
or % g 2 , % u l o ( K E R N _ H I G H B I T S ) , % g 2
sllx % g 2 , 3 2 , % g 2
or % g 2 , K E R N _ L O W B I T S , % g 2
BRANCH_ I F _ A N Y _ C H E E T A H ( g 3 ,g 7 ,9 f )
ba,p t % x c c , 1 f
nop
9 :
sethi % u h i ( V P T E _ B A S E _ C H E E T A H ) , % g 3
or % g 3 , % u l o ( V P T E _ B A S E _ C H E E T A H ) , % g 3
ba,p t % x c c , 2 f
sllx % g 3 , 3 2 , % g 3
1 :
sethi % u h i ( V P T E _ B A S E _ S P I T F I R E ) , % g 3
or % g 3 , % u l o ( V P T E _ B A S E _ S P I T F I R E ) , % g 3
sllx % g 3 , 3 2 , % g 3
2 :
clr % g 7
# undef K E R N _ H I G H B I T S
# undef K E R N _ L O W B I T S
wrpr % o 1 , 0 x0 , % p s t a t e
ldx [ % g 6 + T I _ T A S K ] , % g 4
wrpr % g 0 , 0 , % w s t a t e
call i n i t _ i r q w o r k _ c u r c p u
nop
2005-10-05 02:23:20 +04:00
/* Start using proper page size encodings in ctx register. */
sethi % h i ( s p a r c64 _ k e r n _ p r i _ c o n t e x t ) , % g 3
ldx [ % g 3 + % l o ( s p a r c64 _ k e r n _ p r i _ c o n t e x t ) ] , % g 2
2005-04-17 02:20:36 +04:00
mov P R I M A R Y _ C O N T E X T , % g 1
2005-10-05 02:23:20 +04:00
stxa % g 2 , [ % g 1 ] A S I _ D M M U
2005-04-17 02:20:36 +04:00
membar #S y n c
rdpr % p s t a t e , % o 1
or % o 1 , P S T A T E _ I E , % o 1
wrpr % o 1 , 0 , % p s t a t e
call p r o m _ s e t _ t r a p _ t a b l e
sethi % h i ( s p a r c64 _ t t a b l e _ t l 0 ) , % o 0
call s m p _ c a l l i n
nop
call c p u _ i d l e
mov 0 , % o 0
call c p u _ p a n i c
nop
1 : b,a ,p t % x c c , 1 b
.align 8
sparc64_cpu_startup_end :