2019-05-29 07:18:02 -07:00
/* SPDX-License-Identifier: GPL-2.0-only */
2014-12-01 16:52:17 +08:00
/ *
* Copyright ( c ) 2 0 1 4 , F u z h o u R o c k c h i p E l e c t r o n i c s C o . , L t d
* Author : Tony X i e < t o n y . x i e @rock-chips.com>
* /
# include < l i n u x / l i n k a g e . h >
# include < a s m / a s s e m b l e r . h >
ARM: mm: Make virt_to_pfn() a static inline
Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.
Doing this is a bit intrusive: virt_to_pfn() requires
PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in
<asm/page.h>, so this must be included *before* <asm/memory.h>.
The use of macros were obscuring the unclear inclusion order here,
as the macros would eventually be resolved, but a static inline
like this cannot be compiled with unresolved macros.
The naive solution to include <asm/page.h> at the top of
<asm/memory.h> does not work, because <asm/memory.h> sometimes
includes <asm/page.h> at the end of itself, which would create a
confusing inclusion loop. So instead, take the approach to always
unconditionally include <asm/page.h> at the end of <asm/memory.h>
arch/arm uses <asm/memory.h> explicitly in a lot of places,
however it turns out that if we just unconditionally include
<asm/memory.h> into <asm/page.h> and switch all inclusions of
<asm/memory.h> to <asm/page.h> instead, we enforce the right
order and <asm/memory.h> will always have access to the
definitions.
Put an inclusion guard in place making it impossible to include
<asm/memory.h> explicitly.
Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2022-06-02 10:18:32 +02:00
# include < a s m / p a g e . h >
2014-12-01 16:52:17 +08:00
.data
/ *
* this c o d e w i l l b e c o p i e d f r o m
* ddr t o s r a m f o r s y s t e m r e s u m e i n g .
* so i t i s " . d a t a s e c t i o n " .
* /
2017-07-26 12:49:31 +01:00
.align 2
2014-12-01 16:52:17 +08:00
ENTRY( r o c k c h i p _ s l p _ c p u _ r e s u m e )
setmode P S R _ I _ B I T | P S R _ F _ B I T | S V C _ M O D E , r1 @ set svc, irqs off
mrc p15 , 0 , r1 , c0 , c0 , 5
and r1 , r1 , #0xf
cmp r1 , #0
/* olny cpu0 can continue to run, the others is halt here */
beq c p u 0 r u n
secondary_loop :
wfe
b s e c o n d a r y _ l o o p
cpu0run :
ldr r3 , r k p m _ b o o t d a t a _ l 2 c t l r _ f
cmp r3 , #0
beq s p _ s e t
ldr r3 , r k p m _ b o o t d a t a _ l 2 c t l r
mcr p15 , 1 , r3 , c9 , c0 , 2
sp_set :
ldr s p , r k p m _ b o o t d a t a _ c p u s p
ldr r1 , r k p m _ b o o t d a t a _ c p u _ c o d e
bx r1
ENDPROC( r o c k c h i p _ s l p _ c p u _ r e s u m e )
/* Parameters filled in by the kernel */
/* Flag for whether to restore L2CTLR on resume */
.global rkpm_bootdata_l2ctlr_f
rkpm_bootdata_l2ctlr_f :
.long 0
/* Saved L2CTLR to restore on resume */
.global rkpm_bootdata_l2ctlr
rkpm_bootdata_l2ctlr :
.long 0
/* CPU resume SP addr */
.globl rkpm_bootdata_cpusp
rkpm_bootdata_cpusp :
.long 0
/* CPU resume function (physical address) */
.globl rkpm_bootdata_cpu_code
rkpm_bootdata_cpu_code :
.long 0
ENTRY( r k 3 2 8 8 _ b o o t r a m _ s z )
.word . - rockchip_ s l p _ c p u _ r e s u m e