2012-04-20 14:45:54 +01:00
#
# arch/arm64/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux := -p --no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET= $( TEXT_OFFSET)
GZFLAGS := -9
2016-01-26 09:13:44 +01:00
i f n e q ( $( CONFIG_RELOCATABLE ) , )
arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux
The linker routines that we rely on to produce a relocatable PIE binary
treat it as a shared ELF object in some ways, i.e., it emits symbol based
R_AARCH64_ABS64 relocations into the final binary since doing so would be
appropriate when linking a shared library that is subject to symbol
preemption. (This means that an executable can override certain symbols
that are exported by a shared library it is linked with, and that the
shared library *must* update all its internal references as well, and point
them to the version provided by the executable.)
Symbol preemption does not occur for OS hosted PIE executables, let alone
for vmlinux, and so we would prefer to get rid of these symbol based
relocations. This would allow us to simplify the relocation routines, and
to strip the .dynsym, .dynstr and .hash sections from the binary. (Note
that these are tiny, and are placed in the .init segment, but they clutter
up the vmlinux binary.)
Note that these R_AARCH64_ABS64 relocations are only emitted for absolute
references to symbols defined in the linker script, all other relocatable
quantities are covered by anonymous R_AARCH64_RELATIVE relocations that
simply list the offsets to all 64-bit values in the binary that need to be
fixed up based on the offset between the link time and run time addresses.
Fortunately, GNU ld has a -Bsymbolic option, which is intended for shared
libraries to allow them to ignore symbol preemption, and unconditionally
bind all internal symbol references to its own definitions. So set it for
our PIE binary as well, and get rid of the asoociated sections and the
relocation code that processes them.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: fixed conflict with __dynsym_offset linker script entry]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-07-24 14:00:13 +02:00
LDFLAGS_vmlinux += -pie -Bsymbolic
2016-01-26 09:13:44 +01:00
e n d i f
2016-08-22 11:58:36 +01:00
i f e q ( $( CONFIG_ARM 64_ERRATUM_ 843419) , y )
ifeq ( $( call ld-option, --fix-cortex-a53-843419) ,)
$( warning ld does not support --fix -cortex -a 53-843419; kernel may be susceptible to erratum )
else
LDFLAGS_vmlinux += --fix-cortex-a53-843419
endif
e n d i f
2012-04-20 14:45:54 +01:00
KBUILD_DEFCONFIG := defconfig
2015-02-03 16:14:13 +00:00
# Check for binutils support for specific extensions
lseinstr := $( call as-instr,.arch_extension lse,-DCONFIG_AS_LSE= 1)
i f e q ( $( CONFIG_ARM 64_LSE_ATOMICS ) , y )
ifeq ( $( lseinstr) ,)
$( warning LSE atomics not supported by binutils )
endif
e n d i f
KBUILD_CFLAGS += -mgeneral-regs-only $( lseinstr)
2016-01-21 22:56:26 -05:00
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
2016-01-25 16:52:16 -07:00
KBUILD_CFLAGS += $( call cc-option, -mpc-relative-literal-loads)
2015-02-03 16:14:13 +00:00
KBUILD_AFLAGS += $( lseinstr)
2013-10-11 14:52:08 +01:00
i f e q ( $( CONFIG_CPU_BIG_ENDIAN ) , y )
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB
LD += -EB
2016-08-30 10:31:35 +02:00
UTS_MACHINE := aarch64_be
2013-10-11 14:52:08 +01:00
e l s e
2012-04-20 14:45:54 +01:00
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
2016-08-30 10:31:35 +02:00
UTS_MACHINE := aarch64
2013-10-11 14:52:08 +01:00
e n d i f
2012-04-20 14:45:54 +01:00
CHECKFLAGS += -D__aarch64__
2015-11-24 12:37:35 +01:00
i f e q ( $( CONFIG_ARM 64_MODULE_CMODEL_LARGE ) , y )
2015-10-08 11:11:17 +01:00
KBUILD_CFLAGS_MODULE += -mcmodel= large
2015-03-17 12:15:02 +00:00
e n d i f
2015-11-24 12:37:35 +01:00
i f e q ( $( CONFIG_ARM 64_MODULE_PLTS ) , y )
KBUILD_LDFLAGS_MODULE += -T $( srctree) /arch/arm64/kernel/module.lds
e n d i f
2012-04-20 14:45:54 +01:00
# Default value
head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
2014-06-24 16:51:37 +01:00
i f e q ( $( CONFIG_ARM 64_RANDOMIZE_TEXT_OFFSET ) , y )
2016-05-31 15:58:00 +01:00
TEXT_OFFSET := $( shell awk " BEGIN {srand(); printf \"0x%06x\n\", \
int( 2 * 1024 * 1024 / ( 2 ^ $( CONFIG_ARM64_PAGE_SHIFT) ) * \
rand( ) ) * ( 2 ^ $( CONFIG_ARM64_PAGE_SHIFT) ) } " )
2014-06-24 16:51:37 +01:00
e l s e
2012-04-20 14:45:54 +01:00
TEXT_OFFSET := 0x00080000
2014-06-24 16:51:37 +01:00
e n d i f
2012-04-20 14:45:54 +01:00
2015-10-12 18:52:58 +03:00
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
# in 32-bit arithmetic
KASAN_SHADOW_OFFSET := $( shell printf "0x%08x00000000\n" $$ ( ( \
( 0xffffffff & ( -1 << ( $( CONFIG_ARM64_VA_BITS) - 32) ) ) \
+ ( 1 << ( $( CONFIG_ARM64_VA_BITS) - 32 - 3) ) \
- ( 1 << ( 64 - 32 - 3) ) ) ) )
2012-04-20 14:45:54 +01:00
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
2014-08-26 21:15:30 -07:00
core-$(CONFIG_NET) += arch/arm64/net/
2012-12-10 16:41:44 +00:00
core-$(CONFIG_KVM) += arch/arm64/kvm/
2013-06-12 16:48:38 +01:00
core-$(CONFIG_XEN) += arch/arm64/xen/
2014-03-06 16:23:33 +08:00
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
2012-04-20 14:45:54 +01:00
libs-y := arch/arm64/lib/ $( libs-y)
2015-03-16 09:30:39 +00:00
core-$(CONFIG_EFI_STUB) += $( objtree) /drivers/firmware/efi/libstub/lib.a
2012-04-20 14:45:54 +01:00
# Default target when executing plain make
2012-12-03 17:17:21 -06:00
KBUILD_IMAGE := Image.gz
KBUILD_DTBS := dtbs
2012-04-20 14:45:54 +01:00
2012-12-03 17:17:21 -06:00
all : $( KBUILD_IMAGE ) $( KBUILD_DTBS )
2012-04-20 14:45:54 +01:00
boot := arch/arm64/boot
2015-07-16 21:26:16 +01:00
Image : vmlinux
$( Q) $( MAKE) $( build) = $( boot) $( boot) /$@
2016-06-21 10:44:00 +09:00
Image.% : Image
2012-12-03 17:17:21 -06:00
$( Q) $( MAKE) $( build) = $( boot) $( boot) /$@
2012-04-20 14:45:54 +01:00
2016-02-19 15:05:50 +09:00
zinstall install :
2012-12-03 17:17:21 -06:00
$( Q) $( MAKE) $( build) = $( boot) $@
2012-04-20 14:45:54 +01:00
2012-12-03 17:17:21 -06:00
%.dtb : scripts
$( Q) $( MAKE) $( build) = $( boot) /dts $( boot) /dts/$@
2014-08-29 14:17:02 +02:00
PHONY += dtbs dtbs_install
2014-09-03 15:29:24 +02:00
dtbs : prepare scripts
2014-09-03 15:54:55 +02:00
$( Q) $( MAKE) $( build) = $( boot) /dts
2012-04-20 14:45:54 +01:00
2014-09-03 15:29:24 +02:00
dtbs_install :
$( Q) $( MAKE) $( dtbinst) = $( boot) /dts
2013-06-16 20:32:44 +01:00
PHONY += vdso_install
vdso_install :
$( Q) $( MAKE) $( build) = arch/arm64/kernel/vdso $@
2012-04-20 14:45:54 +01:00
# We use MRPROPER_FILES and CLEAN_FILES now
archclean :
$( Q) $( MAKE) $( clean) = $( boot)
2015-01-19 14:57:00 +00:00
$( Q) $( MAKE) $( clean) = $( boot) /dts
2012-04-20 14:45:54 +01:00
arm64: fix vdso-offsets.h dependency
arm64/kernel/{vdso,signal}.c include vdso-offsets.h, as well as any
file that includes asm/vdso.h. Therefore, vdso-offsets.h must be
generated before these files are compiled.
The current rules in arm64/kernel/Makefile do not actually enforce
this, because even though $(obj)/vdso is listed as a prerequisite for
vdso-offsets.h, this does not result in the intended effect of
building the vdso subdirectory (before all the other objects). As a
consequence, depending on the order in which the rules are followed,
vdso-offsets.h is updated or not before arm64/kernel/{vdso,signal}.o
are built. The current rules also impose an unnecessary dependency on
vdso-offsets.h for all arm64/kernel/*.o, resulting in unnecessary
rebuilds. This is made obvious when using make -j:
touch arch/arm64/kernel/vdso/gettimeofday.S && make -j$NCPUS arch/arm64/kernel
will sometimes result in none of arm64/kernel/*.o being
rebuilt, sometimes all of them, or even just some of them.
It is quite difficult to ensure that a header is generated before it
is used with recursive Makefiles by using normal rules. Instead,
arch-specific generated headers are normally built in the archprepare
recipe in the arch Makefile (see for instance arch/ia64/Makefile).
Unfortunately, asm-offsets.h is included in gettimeofday.S, and must
therefore be generated before vdso-offsets.h, which is not the case if
archprepare is used. For this reason, a rule run after archprepare has
to be used.
This commit adds rules in arm64/Makefile to build vdso-offsets.h
during the prepare step, ensuring that vdso-offsets.h is generated
before building anything. It also removes the now-unnecessary
dependencies on vdso-offsets.h in arm64/kernel/Makefile. Finally, it
removes the duplication of asm-offsets.h between arm64/kernel/vdso/
and include/generated/ and makes include/generated/vdso-offsets.h a
target in arm64/kernel/vdso/Makefile.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Michal Marek <mmarek@suse.com>
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-05-12 17:39:15 +01:00
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
# Therefore we need to generate the header after prepare0 has been made, hence
# this hack.
prepare : vdso_prepare
vdso_prepare : prepare 0
$( Q) $( MAKE) $( build) = arch/arm64/kernel/vdso include/generated/vdso-offsets.h
2012-04-20 14:45:54 +01:00
d e f i n e a r c h h e l p
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
2012-12-03 17:17:21 -06:00
echo '* dtbs - Build device tree blobs for enabled boards'
2014-08-29 14:17:02 +02:00
echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
2012-04-20 14:45:54 +01:00
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' Install using (your) ~/bin/installkernel or'
echo ' (distribution) /sbin/installkernel or'
echo ' install to $$(INSTALL_PATH) and run lilo'
e n d e f