From 05642cf7289c5562e5939d2ee8a0529d310010b8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 7 Jan 2020 09:16:40 +0000 Subject: [PATCH] powerpc/32: don't restore r0, r6-r8 on exception entry path after trace_hardirqs_off() Since commit b86fb88855ea ("powerpc/32: implement fast entry for syscalls on non BOOKE") and commit 1a4b739bbb4f ("powerpc/32: implement fast entry for syscalls on BOOKE"), syscalls don't use the exception entry path anymore. It is therefore pointless to restore r0 and r6-r8 after calling trace_hardirqs_off(). In the meantime, drop the '2:' label which is unused and misleading. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/d2c6dc65d27e83964eb05f16a126161ab6455eea.1578388585.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/entry_32.S | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index ad000cbb5252..afab378c3d28 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -246,9 +246,8 @@ reenable_mmu: * r3 can be different from GPR3(r1) at this point, r9 and r11 * contains the old MSR and handler address respectively, * r4 & r5 can contain page fault arguments that need to be passed - * along as well. r12, CCR, CTR, XER etc... are left clobbered as - * they aren't useful past this point (aren't syscall arguments), - * the rest is restored from the exception frame. + * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left + * clobbered as they aren't useful past this point. */ stwu r1,-32(r1) @@ -262,16 +261,12 @@ reenable_mmu: * lockdep */ 1: bl trace_hardirqs_off -2: lwz r5,24(r1) + lwz r5,24(r1) lwz r4,20(r1) lwz r3,16(r1) lwz r11,12(r1) lwz r9,8(r1) addi r1,r1,32 - lwz r0,GPR0(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) mtctr r11 mtlr r9 bctr /* jump to handler */