sparc32: Fix truncated relocation errors when linking large kernels

Use jumps instead of branches when jumping from one section to another
to avoid branches to addresses further away than 22 bit offsets can
handle that results in errors such as

arch/sparc/kernel/signal_32.o:(.fixup+0x0): relocation truncated to fit: R_SPARC_WDISP22 against `.text'

This is the same approach that was taken for sparc64 in commit
52eb053b7191 ("[SPARC64]: Fix linkage of enormous kernels.")

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202405080936.tWaJdO3P-lkp@intel.com/
Closes: https://lore.kernel.org/oe-kbuild-all/202406240441.5zaoshVX-lkp@intel.com/
Link: https://lore.kernel.org/r/20240710092341.457591-1-andreas@gaisler.com
Signed-off-by: Andreas Larsson <andreas@gaisler.com>
This commit is contained in:
Andreas Larsson 2024-07-10 11:23:41 +02:00
parent e51f125b10
commit a7ec177ebc
2 changed files with 15 additions and 6 deletions

View File

@ -95,7 +95,8 @@ __asm__ __volatile__( \
".section .fixup,#alloc,#execinstr\n\t" \
".align 4\n" \
"3:\n\t" \
"b 2b\n\t" \
"sethi %%hi(2b), %0\n\t" \
"jmpl %0 + %%lo(2b), %%g0\n\t" \
" mov %3, %0\n\t" \
".previous\n\n\t" \
".section __ex_table,#alloc\n\t" \
@ -163,8 +164,9 @@ __asm__ __volatile__( \
".section .fixup,#alloc,#execinstr\n\t" \
".align 4\n" \
"3:\n\t" \
"sethi %%hi(2b), %0\n\t" \
"clr %1\n\t" \
"b 2b\n\t" \
"jmpl %0 + %%lo(2b), %%g0\n\t" \
" mov %3, %0\n\n\t" \
".previous\n\t" \
".section __ex_table,#alloc\n\t" \

View File

@ -118,9 +118,12 @@ current_pc:
mov %o7, %g3
tst %o0
be no_sun4u_here
bne 2f
mov %g4, %o7 /* Previous %o7. */
sethi %hi(no_sun4u_here), %l1
jmpl %l1 + %lo(no_sun4u_here), %g0
nop
2:
mov %o0, %l0 ! stash away romvec
mov %o0, %g7 ! put it here too
mov %o1, %l1 ! stash away debug_vec too
@ -195,7 +198,8 @@ halt_notsup:
sub %o0, %l6, %o0
call %o1
nop
ba halt_me
sethi %hi(halt_me), %o0
jmpl %o0 + %lo(halt_me), %g0
nop
not_a_sun4:
@ -431,8 +435,11 @@ leon_init:
#ifdef CONFIG_SMP
ldub [%g2 + %lo(boot_cpu_id)], %g1
cmp %g1, 0xff ! unset means first CPU
bne leon_smp_cpu_startup ! continue only with master
be 1f
sethi %hi(leon_smp_cpu_startup), %g1
jmpl %g1 + %lo(leon_smp_cpu_startup), %g0
nop
1:
#endif
/* Get CPU-ID from most significant 4-bit of ASR17 */
rd %asr17, %g1