xref: /OK3568_Linux_fs/kernel/arch/arm64/Makefile (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun#
2*4882a593Smuzhiyun# arch/arm64/Makefile
3*4882a593Smuzhiyun#
4*4882a593Smuzhiyun# This file is included by the global makefile so that you can add your own
5*4882a593Smuzhiyun# architecture-specific flags and dependencies.
6*4882a593Smuzhiyun#
7*4882a593Smuzhiyun# This file is subject to the terms and conditions of the GNU General Public
8*4882a593Smuzhiyun# License.  See the file "COPYING" in the main directory of this archive
9*4882a593Smuzhiyun# for more details.
10*4882a593Smuzhiyun#
11*4882a593Smuzhiyun# Copyright (C) 1995-2001 by Russell King
12*4882a593Smuzhiyun
13*4882a593SmuzhiyunLDFLAGS_vmlinux	:=--no-undefined -X
14*4882a593Smuzhiyun
15*4882a593Smuzhiyunifeq ($(CONFIG_RELOCATABLE), y)
16*4882a593Smuzhiyun# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
17*4882a593Smuzhiyun# for relative relocs, since this leads to better Image compression
18*4882a593Smuzhiyun# with the relocation offsets always being zero.
19*4882a593SmuzhiyunLDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext \
20*4882a593Smuzhiyun			$(call ld-option, --no-apply-dynamic-relocs)
21*4882a593Smuzhiyunendif
22*4882a593Smuzhiyun
23*4882a593Smuzhiyunifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
24*4882a593Smuzhiyun  ifeq ($(call ld-option, --fix-cortex-a53-843419),)
25*4882a593Smuzhiyun$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
26*4882a593Smuzhiyun  else
27*4882a593SmuzhiyunLDFLAGS_vmlinux	+= --fix-cortex-a53-843419
28*4882a593Smuzhiyun  endif
29*4882a593Smuzhiyunendif
30*4882a593Smuzhiyun
31*4882a593Smuzhiyunifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
32*4882a593Smuzhiyun  ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
33*4882a593Smuzhiyun$(warning LSE atomics not supported by binutils)
34*4882a593Smuzhiyun  endif
35*4882a593Smuzhiyunendif
36*4882a593Smuzhiyun
37*4882a593Smuzhiyuncc_has_k_constraint := $(call try-run,echo				\
38*4882a593Smuzhiyun	'int main(void) {						\
39*4882a593Smuzhiyun		asm volatile("and w0, w0, %w0" :: "K" (4294967295));	\
40*4882a593Smuzhiyun		return 0;						\
41*4882a593Smuzhiyun	}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyunifeq ($(CONFIG_BROKEN_GAS_INST),y)
44*4882a593Smuzhiyun$(warning Detected assembler with broken .inst; disassembly will be unreliable)
45*4882a593Smuzhiyunendif
46*4882a593Smuzhiyun
47*4882a593SmuzhiyunKBUILD_CFLAGS	+= -mgeneral-regs-only	\
48*4882a593Smuzhiyun		   $(compat_vdso) $(cc_has_k_constraint)
49*4882a593SmuzhiyunKBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
50*4882a593SmuzhiyunKBUILD_AFLAGS	+= $(compat_vdso)
51*4882a593Smuzhiyun
52*4882a593SmuzhiyunKBUILD_CFLAGS	+= $(call cc-option,-mabi=lp64)
53*4882a593SmuzhiyunKBUILD_AFLAGS	+= $(call cc-option,-mabi=lp64)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun# Avoid generating .eh_frame* sections.
56*4882a593SmuzhiyunKBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables -fno-unwind-tables
57*4882a593SmuzhiyunKBUILD_AFLAGS	+= -fno-asynchronous-unwind-tables -fno-unwind-tables
58*4882a593Smuzhiyun
59*4882a593Smuzhiyunifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
60*4882a593Smuzhiyunprepare: stack_protector_prepare
61*4882a593Smuzhiyunstack_protector_prepare: prepare0
62*4882a593Smuzhiyun	$(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg		  \
63*4882a593Smuzhiyun				-mstack-protector-guard-reg=sp_el0	  \
64*4882a593Smuzhiyun				-mstack-protector-guard-offset=$(shell	  \
65*4882a593Smuzhiyun			awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
66*4882a593Smuzhiyun					include/generated/asm-offsets.h))
67*4882a593Smuzhiyunendif
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun# Ensure that if the compiler supports branch protection we default it
70*4882a593Smuzhiyun# off, this will be overridden if we are using branch protection.
71*4882a593Smuzhiyunbranch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyunifeq ($(CONFIG_ARM64_PTR_AUTH),y)
74*4882a593Smuzhiyunbranch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
75*4882a593Smuzhiyun# We enable additional protection for leaf functions as there is some
76*4882a593Smuzhiyun# narrow potential for ROP protection benefits and no substantial
77*4882a593Smuzhiyun# performance impact has been observed.
78*4882a593Smuzhiyunifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
79*4882a593Smuzhiyunbranch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti
80*4882a593Smuzhiyunelse
81*4882a593Smuzhiyunbranch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
82*4882a593Smuzhiyunendif
83*4882a593Smuzhiyun# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
84*4882a593Smuzhiyun# compiler to generate them and consequently to break the single image contract
85*4882a593Smuzhiyun# we pass it only to the assembler. This option is utilized only in case of non
86*4882a593Smuzhiyun# integrated assemblers.
87*4882a593Smuzhiyunifeq ($(CONFIG_AS_HAS_PAC), y)
88*4882a593Smuzhiyunasm-arch := armv8.3-a
89*4882a593Smuzhiyunendif
90*4882a593Smuzhiyunendif
91*4882a593Smuzhiyun
92*4882a593SmuzhiyunKBUILD_CFLAGS += $(branch-prot-flags-y)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyunifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
95*4882a593Smuzhiyun# make sure to pass the newest target architecture to -march.
96*4882a593Smuzhiyunasm-arch := armv8.4-a
97*4882a593Smuzhiyunendif
98*4882a593Smuzhiyun
99*4882a593Smuzhiyunifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
100*4882a593Smuzhiyun# make sure to pass the newest target architecture to -march.
101*4882a593Smuzhiyunasm-arch := armv8.5-a
102*4882a593Smuzhiyunendif
103*4882a593Smuzhiyun
104*4882a593Smuzhiyunifdef asm-arch
105*4882a593SmuzhiyunKBUILD_CFLAGS	+= -Wa,-march=$(asm-arch) \
106*4882a593Smuzhiyun		   -DARM64_ASM_ARCH='"$(asm-arch)"'
107*4882a593Smuzhiyunendif
108*4882a593Smuzhiyun
109*4882a593Smuzhiyunifeq ($(CONFIG_SHADOW_CALL_STACK), y)
110*4882a593SmuzhiyunKBUILD_CFLAGS	+= -ffixed-x18
111*4882a593Smuzhiyunendif
112*4882a593Smuzhiyun
113*4882a593Smuzhiyunifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
114*4882a593SmuzhiyunKBUILD_CPPFLAGS	+= -mbig-endian
115*4882a593SmuzhiyunCHECKFLAGS	+= -D__AARCH64EB__
116*4882a593Smuzhiyun# Prefer the baremetal ELF build target, but not all toolchains include
117*4882a593Smuzhiyun# it so fall back to the standard linux version if needed.
118*4882a593SmuzhiyunKBUILD_LDFLAGS	+= -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
119*4882a593SmuzhiyunUTS_MACHINE	:= aarch64_be
120*4882a593Smuzhiyunelse
121*4882a593SmuzhiyunKBUILD_CPPFLAGS	+= -mlittle-endian
122*4882a593SmuzhiyunCHECKFLAGS	+= -D__AARCH64EL__
123*4882a593Smuzhiyun# Same as above, prefer ELF but fall back to linux target if needed.
124*4882a593SmuzhiyunKBUILD_LDFLAGS	+= -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
125*4882a593SmuzhiyunUTS_MACHINE	:= aarch64
126*4882a593Smuzhiyunendif
127*4882a593Smuzhiyun
128*4882a593Smuzhiyunifeq ($(CONFIG_LD_IS_LLD), y)
129*4882a593SmuzhiyunKBUILD_LDFLAGS	+= -z norelro
130*4882a593Smuzhiyunendif
131*4882a593Smuzhiyun
132*4882a593SmuzhiyunCHECKFLAGS	+= -D__aarch64__
133*4882a593Smuzhiyun
134*4882a593Smuzhiyunifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
135*4882a593Smuzhiyun  KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
136*4882a593Smuzhiyun  CC_FLAGS_FTRACE := -fpatchable-function-entry=2
137*4882a593Smuzhiyunendif
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun# Default value
140*4882a593Smuzhiyunhead-y		:= arch/arm64/kernel/head.o
141*4882a593Smuzhiyun
142*4882a593Smuzhiyunifeq ($(CONFIG_KASAN_SW_TAGS), y)
143*4882a593SmuzhiyunKASAN_SHADOW_SCALE_SHIFT := 4
144*4882a593Smuzhiyunelse ifeq ($(CONFIG_KASAN_GENERIC), y)
145*4882a593SmuzhiyunKASAN_SHADOW_SCALE_SHIFT := 3
146*4882a593Smuzhiyunendif
147*4882a593Smuzhiyun
148*4882a593SmuzhiyunKBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
149*4882a593SmuzhiyunKBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
150*4882a593SmuzhiyunKBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyuncore-y		+= arch/arm64/
153*4882a593Smuzhiyunlibs-y		:= arch/arm64/lib/ $(libs-y)
154*4882a593Smuzhiyunlibs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun# Default target when executing plain make
157*4882a593Smuzhiyunboot		:= arch/arm64/boot
158*4882a593SmuzhiyunKBUILD_IMAGE	:= $(boot)/Image.gz
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun# Don't compile Image in mixed build with "all" target
161*4882a593Smuzhiyunifndef KBUILD_MIXED_TREE
162*4882a593Smuzhiyunall:	Image.gz
163*4882a593Smuzhiyunendif
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun
166*4882a593SmuzhiyunImage: vmlinux
167*4882a593Smuzhiyun	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
168*4882a593Smuzhiyun
169*4882a593SmuzhiyunImage.%: Image
170*4882a593Smuzhiyun	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
171*4882a593Smuzhiyun
172*4882a593Smuzhiyunzinstall install:
173*4882a593Smuzhiyun	$(Q)$(MAKE) $(build)=$(boot) $@
174*4882a593Smuzhiyun
175*4882a593SmuzhiyunPHONY += vdso_install
176*4882a593Smuzhiyunvdso_install:
177*4882a593Smuzhiyun	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
178*4882a593Smuzhiyun	$(if $(CONFIG_COMPAT_VDSO), \
179*4882a593Smuzhiyun		$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun# We use MRPROPER_FILES and CLEAN_FILES now
182*4882a593Smuzhiyunarchclean:
183*4882a593Smuzhiyun	$(Q)$(MAKE) $(clean)=$(boot)
184*4882a593Smuzhiyun
185*4882a593Smuzhiyunifeq ($(KBUILD_EXTMOD),)
186*4882a593Smuzhiyun# We need to generate vdso-offsets.h before compiling certain files in kernel/.
187*4882a593Smuzhiyun# In order to do that, we should use the archprepare target, but we can't since
188*4882a593Smuzhiyun# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
189*4882a593Smuzhiyun# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
190*4882a593Smuzhiyun# Therefore we need to generate the header after prepare0 has been made, hence
191*4882a593Smuzhiyun# this hack.
192*4882a593Smuzhiyunprepare: vdso_prepare
193*4882a593Smuzhiyunvdso_prepare: prepare0
194*4882a593Smuzhiyun	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
195*4882a593Smuzhiyun	$(if $(CONFIG_COMPAT_VDSO),$(Q)$(MAKE) \
196*4882a593Smuzhiyun		$(build)=arch/arm64/kernel/vdso32  \
197*4882a593Smuzhiyun		include/generated/vdso32-offsets.h)
198*4882a593Smuzhiyunendif
199*4882a593Smuzhiyun
200*4882a593Smuzhiyundefine archhelp
201*4882a593Smuzhiyun  echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
202*4882a593Smuzhiyun  echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
203*4882a593Smuzhiyun  echo  '  install       - Install uncompressed kernel'
204*4882a593Smuzhiyun  echo  '  zinstall      - Install compressed kernel'
205*4882a593Smuzhiyun  echo  '                  Install using (your) ~/bin/installkernel or'
206*4882a593Smuzhiyun  echo  '                  (distribution) /sbin/installkernel or'
207*4882a593Smuzhiyun  echo  '                  install to $$(INSTALL_PATH) and run lilo'
208*4882a593Smuzhiyunendef
209*4882a593Smuzhiyun
210*4882a593SmuzhiyunMAKE_MODULES ?= y
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun%.img:
213*4882a593Smuzhiyunifeq ("$(CONFIG_MODULES)$(MAKE_MODULES)$(srctree)","yy$(objtree)")
214*4882a593Smuzhiyun	$(Q)$(MAKE) rockchip/$*.dtb Image.lz4 modules
215*4882a593Smuzhiyunelse
216*4882a593Smuzhiyun	$(Q)$(MAKE) rockchip/$*.dtb Image.lz4
217*4882a593Smuzhiyunendif
218*4882a593Smuzhiyun	$(Q)$(srctree)/scripts/mkimg --dtb $*.dtb
219*4882a593Smuzhiyun
220*4882a593SmuzhiyunCLEAN_DIRS += out
221*4882a593SmuzhiyunCLEAN_FILES += boot.img kernel.img resource.img zboot.img
222