xref: /OK3568_Linux_fs/kernel/arch/Kconfig (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun# SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun#
3*4882a593Smuzhiyun# General architecture dependent options
4*4882a593Smuzhiyun#
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun#
7*4882a593Smuzhiyun# Note: arch/$(SRCARCH)/Kconfig needs to be included first so that it can
8*4882a593Smuzhiyun# override the default values in this file.
9*4882a593Smuzhiyun#
10*4882a593Smuzhiyunsource "arch/$(SRCARCH)/Kconfig"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyunmenu "General architecture-dependent options"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyunconfig CRASH_CORE
15*4882a593Smuzhiyun	bool
16*4882a593Smuzhiyun
17*4882a593Smuzhiyunconfig KEXEC_CORE
18*4882a593Smuzhiyun	select CRASH_CORE
19*4882a593Smuzhiyun	bool
20*4882a593Smuzhiyun
21*4882a593Smuzhiyunconfig KEXEC_ELF
22*4882a593Smuzhiyun	bool
23*4882a593Smuzhiyun
24*4882a593Smuzhiyunconfig HAVE_IMA_KEXEC
25*4882a593Smuzhiyun	bool
26*4882a593Smuzhiyun
27*4882a593Smuzhiyunconfig SET_FS
28*4882a593Smuzhiyun	bool
29*4882a593Smuzhiyun
30*4882a593Smuzhiyunconfig HOTPLUG_SMT
31*4882a593Smuzhiyun	bool
32*4882a593Smuzhiyun
33*4882a593Smuzhiyunconfig GENERIC_ENTRY
34*4882a593Smuzhiyun       bool
35*4882a593Smuzhiyun
36*4882a593Smuzhiyunconfig OPROFILE
37*4882a593Smuzhiyun	tristate "OProfile system profiling"
38*4882a593Smuzhiyun	depends on PROFILING
39*4882a593Smuzhiyun	depends on HAVE_OPROFILE
40*4882a593Smuzhiyun	select RING_BUFFER
41*4882a593Smuzhiyun	select RING_BUFFER_ALLOW_SWAP
42*4882a593Smuzhiyun	help
43*4882a593Smuzhiyun	  OProfile is a profiling system capable of profiling the
44*4882a593Smuzhiyun	  whole system, include the kernel, kernel modules, libraries,
45*4882a593Smuzhiyun	  and applications.
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun	  If unsure, say N.
48*4882a593Smuzhiyun
49*4882a593Smuzhiyunconfig OPROFILE_EVENT_MULTIPLEX
50*4882a593Smuzhiyun	bool "OProfile multiplexing support (EXPERIMENTAL)"
51*4882a593Smuzhiyun	default n
52*4882a593Smuzhiyun	depends on OPROFILE && X86
53*4882a593Smuzhiyun	help
54*4882a593Smuzhiyun	  The number of hardware counters is limited. The multiplexing
55*4882a593Smuzhiyun	  feature enables OProfile to gather more events than counters
56*4882a593Smuzhiyun	  are provided by the hardware. This is realized by switching
57*4882a593Smuzhiyun	  between events at a user specified time interval.
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun	  If unsure, say N.
60*4882a593Smuzhiyun
61*4882a593Smuzhiyunconfig HAVE_OPROFILE
62*4882a593Smuzhiyun	bool
63*4882a593Smuzhiyun
64*4882a593Smuzhiyunconfig OPROFILE_NMI_TIMER
65*4882a593Smuzhiyun	def_bool y
66*4882a593Smuzhiyun	depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
67*4882a593Smuzhiyun
68*4882a593Smuzhiyunconfig KPROBES
69*4882a593Smuzhiyun	bool "Kprobes"
70*4882a593Smuzhiyun	depends on MODULES
71*4882a593Smuzhiyun	depends on HAVE_KPROBES
72*4882a593Smuzhiyun	select KALLSYMS
73*4882a593Smuzhiyun	help
74*4882a593Smuzhiyun	  Kprobes allows you to trap at almost any kernel address and
75*4882a593Smuzhiyun	  execute a callback function.  register_kprobe() establishes
76*4882a593Smuzhiyun	  a probepoint and specifies the callback.  Kprobes is useful
77*4882a593Smuzhiyun	  for kernel debugging, non-intrusive instrumentation and testing.
78*4882a593Smuzhiyun	  If in doubt, say "N".
79*4882a593Smuzhiyun
80*4882a593Smuzhiyunconfig JUMP_LABEL
81*4882a593Smuzhiyun	bool "Optimize very unlikely/likely branches"
82*4882a593Smuzhiyun	depends on HAVE_ARCH_JUMP_LABEL
83*4882a593Smuzhiyun	depends on CC_HAS_ASM_GOTO
84*4882a593Smuzhiyun	help
85*4882a593Smuzhiyun	 This option enables a transparent branch optimization that
86*4882a593Smuzhiyun	 makes certain almost-always-true or almost-always-false branch
87*4882a593Smuzhiyun	 conditions even cheaper to execute within the kernel.
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun	 Certain performance-sensitive kernel code, such as trace points,
90*4882a593Smuzhiyun	 scheduler functionality, networking code and KVM have such
91*4882a593Smuzhiyun	 branches and include support for this optimization technique.
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun	 If it is detected that the compiler has support for "asm goto",
94*4882a593Smuzhiyun	 the kernel will compile such branches with just a nop
95*4882a593Smuzhiyun	 instruction. When the condition flag is toggled to true, the
96*4882a593Smuzhiyun	 nop will be converted to a jump instruction to execute the
97*4882a593Smuzhiyun	 conditional block of instructions.
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun	 This technique lowers overhead and stress on the branch prediction
100*4882a593Smuzhiyun	 of the processor and generally makes the kernel faster. The update
101*4882a593Smuzhiyun	 of the condition is slower, but those are always very rare.
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun	 ( On 32-bit x86, the necessary options added to the compiler
104*4882a593Smuzhiyun	   flags may increase the size of the kernel slightly. )
105*4882a593Smuzhiyun
106*4882a593Smuzhiyunconfig STATIC_KEYS_SELFTEST
107*4882a593Smuzhiyun	bool "Static key selftest"
108*4882a593Smuzhiyun	depends on JUMP_LABEL
109*4882a593Smuzhiyun	help
110*4882a593Smuzhiyun	  Boot time self-test of the branch patching code.
111*4882a593Smuzhiyun
112*4882a593Smuzhiyunconfig STATIC_CALL_SELFTEST
113*4882a593Smuzhiyun	bool "Static call selftest"
114*4882a593Smuzhiyun	depends on HAVE_STATIC_CALL
115*4882a593Smuzhiyun	help
116*4882a593Smuzhiyun	  Boot time self-test of the call patching code.
117*4882a593Smuzhiyun
118*4882a593Smuzhiyunconfig OPTPROBES
119*4882a593Smuzhiyun	def_bool y
120*4882a593Smuzhiyun	depends on KPROBES && HAVE_OPTPROBES
121*4882a593Smuzhiyun	select TASKS_RCU if PREEMPTION
122*4882a593Smuzhiyun
123*4882a593Smuzhiyunconfig KPROBES_ON_FTRACE
124*4882a593Smuzhiyun	def_bool y
125*4882a593Smuzhiyun	depends on KPROBES && HAVE_KPROBES_ON_FTRACE
126*4882a593Smuzhiyun	depends on DYNAMIC_FTRACE_WITH_REGS
127*4882a593Smuzhiyun	help
128*4882a593Smuzhiyun	 If function tracer is enabled and the arch supports full
129*4882a593Smuzhiyun	 passing of pt_regs to function tracing, then kprobes can
130*4882a593Smuzhiyun	 optimize on top of function tracing.
131*4882a593Smuzhiyun
132*4882a593Smuzhiyunconfig UPROBES
133*4882a593Smuzhiyun	def_bool n
134*4882a593Smuzhiyun	depends on ARCH_SUPPORTS_UPROBES
135*4882a593Smuzhiyun	help
136*4882a593Smuzhiyun	  Uprobes is the user-space counterpart to kprobes: they
137*4882a593Smuzhiyun	  enable instrumentation applications (such as 'perf probe')
138*4882a593Smuzhiyun	  to establish unintrusive probes in user-space binaries and
139*4882a593Smuzhiyun	  libraries, by executing handler functions when the probes
140*4882a593Smuzhiyun	  are hit by user-space applications.
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun	  ( These probes come in the form of single-byte breakpoints,
143*4882a593Smuzhiyun	    managed by the kernel and kept transparent to the probed
144*4882a593Smuzhiyun	    application. )
145*4882a593Smuzhiyun
146*4882a593Smuzhiyunconfig HAVE_64BIT_ALIGNED_ACCESS
147*4882a593Smuzhiyun	def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
148*4882a593Smuzhiyun	help
149*4882a593Smuzhiyun	  Some architectures require 64 bit accesses to be 64 bit
150*4882a593Smuzhiyun	  aligned, which also requires structs containing 64 bit values
151*4882a593Smuzhiyun	  to be 64 bit aligned too. This includes some 32 bit
152*4882a593Smuzhiyun	  architectures which can do 64 bit accesses, as well as 64 bit
153*4882a593Smuzhiyun	  architectures without unaligned access.
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun	  This symbol should be selected by an architecture if 64 bit
156*4882a593Smuzhiyun	  accesses are required to be 64 bit aligned in this way even
157*4882a593Smuzhiyun	  though it is not a 64 bit architecture.
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun	  See Documentation/unaligned-memory-access.txt for more
160*4882a593Smuzhiyun	  information on the topic of unaligned memory accesses.
161*4882a593Smuzhiyun
162*4882a593Smuzhiyunconfig HAVE_EFFICIENT_UNALIGNED_ACCESS
163*4882a593Smuzhiyun	bool
164*4882a593Smuzhiyun	help
165*4882a593Smuzhiyun	  Some architectures are unable to perform unaligned accesses
166*4882a593Smuzhiyun	  without the use of get_unaligned/put_unaligned. Others are
167*4882a593Smuzhiyun	  unable to perform such accesses efficiently (e.g. trap on
168*4882a593Smuzhiyun	  unaligned access and require fixing it up in the exception
169*4882a593Smuzhiyun	  handler.)
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun	  This symbol should be selected by an architecture if it can
172*4882a593Smuzhiyun	  perform unaligned accesses efficiently to allow different
173*4882a593Smuzhiyun	  code paths to be selected for these cases. Some network
174*4882a593Smuzhiyun	  drivers, for example, could opt to not fix up alignment
175*4882a593Smuzhiyun	  problems with received packets if doing so would not help
176*4882a593Smuzhiyun	  much.
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun	  See Documentation/core-api/unaligned-memory-access.rst for more
179*4882a593Smuzhiyun	  information on the topic of unaligned memory accesses.
180*4882a593Smuzhiyun
181*4882a593Smuzhiyunconfig ARCH_USE_BUILTIN_BSWAP
182*4882a593Smuzhiyun	bool
183*4882a593Smuzhiyun	help
184*4882a593Smuzhiyun	 Modern versions of GCC (since 4.4) have builtin functions
185*4882a593Smuzhiyun	 for handling byte-swapping. Using these, instead of the old
186*4882a593Smuzhiyun	 inline assembler that the architecture code provides in the
187*4882a593Smuzhiyun	 __arch_bswapXX() macros, allows the compiler to see what's
188*4882a593Smuzhiyun	 happening and offers more opportunity for optimisation. In
189*4882a593Smuzhiyun	 particular, the compiler will be able to combine the byteswap
190*4882a593Smuzhiyun	 with a nearby load or store and use load-and-swap or
191*4882a593Smuzhiyun	 store-and-swap instructions if the architecture has them. It
192*4882a593Smuzhiyun	 should almost *never* result in code which is worse than the
193*4882a593Smuzhiyun	 hand-coded assembler in <asm/swab.h>.  But just in case it
194*4882a593Smuzhiyun	 does, the use of the builtins is optional.
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun	 Any architecture with load-and-swap or store-and-swap
197*4882a593Smuzhiyun	 instructions should set this. And it shouldn't hurt to set it
198*4882a593Smuzhiyun	 on architectures that don't have such instructions.
199*4882a593Smuzhiyun
200*4882a593Smuzhiyunconfig KRETPROBES
201*4882a593Smuzhiyun	def_bool y
202*4882a593Smuzhiyun	depends on KPROBES && HAVE_KRETPROBES
203*4882a593Smuzhiyun
204*4882a593Smuzhiyunconfig USER_RETURN_NOTIFIER
205*4882a593Smuzhiyun	bool
206*4882a593Smuzhiyun	depends on HAVE_USER_RETURN_NOTIFIER
207*4882a593Smuzhiyun	help
208*4882a593Smuzhiyun	  Provide a kernel-internal notification when a cpu is about to
209*4882a593Smuzhiyun	  switch to user mode.
210*4882a593Smuzhiyun
211*4882a593Smuzhiyunconfig HAVE_IOREMAP_PROT
212*4882a593Smuzhiyun	bool
213*4882a593Smuzhiyun
214*4882a593Smuzhiyunconfig HAVE_KPROBES
215*4882a593Smuzhiyun	bool
216*4882a593Smuzhiyun
217*4882a593Smuzhiyunconfig HAVE_KRETPROBES
218*4882a593Smuzhiyun	bool
219*4882a593Smuzhiyun
220*4882a593Smuzhiyunconfig HAVE_OPTPROBES
221*4882a593Smuzhiyun	bool
222*4882a593Smuzhiyun
223*4882a593Smuzhiyunconfig HAVE_KPROBES_ON_FTRACE
224*4882a593Smuzhiyun	bool
225*4882a593Smuzhiyun
226*4882a593Smuzhiyunconfig HAVE_FUNCTION_ERROR_INJECTION
227*4882a593Smuzhiyun	bool
228*4882a593Smuzhiyun
229*4882a593Smuzhiyunconfig HAVE_NMI
230*4882a593Smuzhiyun	bool
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun#
233*4882a593Smuzhiyun# An arch should select this if it provides all these things:
234*4882a593Smuzhiyun#
235*4882a593Smuzhiyun#	task_pt_regs()		in asm/processor.h or asm/ptrace.h
236*4882a593Smuzhiyun#	arch_has_single_step()	if there is hardware single-step support
237*4882a593Smuzhiyun#	arch_has_block_step()	if there is hardware block-step support
238*4882a593Smuzhiyun#	asm/syscall.h		supplying asm-generic/syscall.h interface
239*4882a593Smuzhiyun#	linux/regset.h		user_regset interfaces
240*4882a593Smuzhiyun#	CORE_DUMP_USE_REGSET	#define'd in linux/elf.h
241*4882a593Smuzhiyun#	TIF_SYSCALL_TRACE	calls tracehook_report_syscall_{entry,exit}
242*4882a593Smuzhiyun#	TIF_NOTIFY_RESUME	calls tracehook_notify_resume()
243*4882a593Smuzhiyun#	signal delivery		calls tracehook_signal_handler()
244*4882a593Smuzhiyun#
245*4882a593Smuzhiyunconfig HAVE_ARCH_TRACEHOOK
246*4882a593Smuzhiyun	bool
247*4882a593Smuzhiyun
248*4882a593Smuzhiyunconfig HAVE_DMA_CONTIGUOUS
249*4882a593Smuzhiyun	bool
250*4882a593Smuzhiyun
251*4882a593Smuzhiyunconfig GENERIC_SMP_IDLE_THREAD
252*4882a593Smuzhiyun	bool
253*4882a593Smuzhiyun
254*4882a593Smuzhiyunconfig GENERIC_IDLE_POLL_SETUP
255*4882a593Smuzhiyun	bool
256*4882a593Smuzhiyun
257*4882a593Smuzhiyunconfig ARCH_HAS_FORTIFY_SOURCE
258*4882a593Smuzhiyun	bool
259*4882a593Smuzhiyun	help
260*4882a593Smuzhiyun	  An architecture should select this when it can successfully
261*4882a593Smuzhiyun	  build and run with CONFIG_FORTIFY_SOURCE.
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun#
264*4882a593Smuzhiyun# Select if the arch provides a historic keepinit alias for the retain_initrd
265*4882a593Smuzhiyun# command line option
266*4882a593Smuzhiyun#
267*4882a593Smuzhiyunconfig ARCH_HAS_KEEPINITRD
268*4882a593Smuzhiyun	bool
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
271*4882a593Smuzhiyunconfig ARCH_HAS_SET_MEMORY
272*4882a593Smuzhiyun	bool
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun# Select if arch has all set_direct_map_invalid/default() functions
275*4882a593Smuzhiyunconfig ARCH_HAS_SET_DIRECT_MAP
276*4882a593Smuzhiyun	bool
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun#
279*4882a593Smuzhiyun# Select if the architecture provides the arch_dma_set_uncached symbol to
280*4882a593Smuzhiyun# either provide an uncached segement alias for a DMA allocation, or
281*4882a593Smuzhiyun# to remap the page tables in place.
282*4882a593Smuzhiyun#
283*4882a593Smuzhiyunconfig ARCH_HAS_DMA_SET_UNCACHED
284*4882a593Smuzhiyun	bool
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun#
287*4882a593Smuzhiyun# Select if the architectures provides the arch_dma_clear_uncached symbol
288*4882a593Smuzhiyun# to undo an in-place page table remap for uncached access.
289*4882a593Smuzhiyun#
290*4882a593Smuzhiyunconfig ARCH_HAS_DMA_CLEAR_UNCACHED
291*4882a593Smuzhiyun	bool
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun# Select if arch init_task must go in the __init_task_data section
294*4882a593Smuzhiyunconfig ARCH_TASK_STRUCT_ON_STACK
295*4882a593Smuzhiyun	bool
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun# Select if arch has its private alloc_task_struct() function
298*4882a593Smuzhiyunconfig ARCH_TASK_STRUCT_ALLOCATOR
299*4882a593Smuzhiyun	bool
300*4882a593Smuzhiyun
301*4882a593Smuzhiyunconfig HAVE_ARCH_THREAD_STRUCT_WHITELIST
302*4882a593Smuzhiyun	bool
303*4882a593Smuzhiyun	depends on !ARCH_TASK_STRUCT_ALLOCATOR
304*4882a593Smuzhiyun	help
305*4882a593Smuzhiyun	  An architecture should select this to provide hardened usercopy
306*4882a593Smuzhiyun	  knowledge about what region of the thread_struct should be
307*4882a593Smuzhiyun	  whitelisted for copying to userspace. Normally this is only the
308*4882a593Smuzhiyun	  FPU registers. Specifically, arch_thread_struct_whitelist()
309*4882a593Smuzhiyun	  should be implemented. Without this, the entire thread_struct
310*4882a593Smuzhiyun	  field in task_struct will be left whitelisted.
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun# Select if arch has its private alloc_thread_stack() function
313*4882a593Smuzhiyunconfig ARCH_THREAD_STACK_ALLOCATOR
314*4882a593Smuzhiyun	bool
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
317*4882a593Smuzhiyunconfig ARCH_WANTS_DYNAMIC_TASK_STRUCT
318*4882a593Smuzhiyun	bool
319*4882a593Smuzhiyun
320*4882a593Smuzhiyunconfig ARCH_32BIT_OFF_T
321*4882a593Smuzhiyun	bool
322*4882a593Smuzhiyun	depends on !64BIT
323*4882a593Smuzhiyun	help
324*4882a593Smuzhiyun	  All new 32-bit architectures should have 64-bit off_t type on
325*4882a593Smuzhiyun	  userspace side which corresponds to the loff_t kernel type. This
326*4882a593Smuzhiyun	  is the requirement for modern ABIs. Some existing architectures
327*4882a593Smuzhiyun	  still support 32-bit off_t. This option is enabled for all such
328*4882a593Smuzhiyun	  architectures explicitly.
329*4882a593Smuzhiyun
330*4882a593Smuzhiyunconfig HAVE_ASM_MODVERSIONS
331*4882a593Smuzhiyun	bool
332*4882a593Smuzhiyun	help
333*4882a593Smuzhiyun	  This symbol should be selected by an architecure if it provides
334*4882a593Smuzhiyun	  <asm/asm-prototypes.h> to support the module versioning for symbols
335*4882a593Smuzhiyun	  exported from assembly code.
336*4882a593Smuzhiyun
337*4882a593Smuzhiyunconfig HAVE_REGS_AND_STACK_ACCESS_API
338*4882a593Smuzhiyun	bool
339*4882a593Smuzhiyun	help
340*4882a593Smuzhiyun	  This symbol should be selected by an architecure if it supports
341*4882a593Smuzhiyun	  the API needed to access registers and stack entries from pt_regs,
342*4882a593Smuzhiyun	  declared in asm/ptrace.h
343*4882a593Smuzhiyun	  For example the kprobes-based event tracer needs this API.
344*4882a593Smuzhiyun
345*4882a593Smuzhiyunconfig HAVE_RSEQ
346*4882a593Smuzhiyun	bool
347*4882a593Smuzhiyun	depends on HAVE_REGS_AND_STACK_ACCESS_API
348*4882a593Smuzhiyun	help
349*4882a593Smuzhiyun	  This symbol should be selected by an architecture if it
350*4882a593Smuzhiyun	  supports an implementation of restartable sequences.
351*4882a593Smuzhiyun
352*4882a593Smuzhiyunconfig HAVE_FUNCTION_ARG_ACCESS_API
353*4882a593Smuzhiyun	bool
354*4882a593Smuzhiyun	help
355*4882a593Smuzhiyun	  This symbol should be selected by an architecure if it supports
356*4882a593Smuzhiyun	  the API needed to access function arguments from pt_regs,
357*4882a593Smuzhiyun	  declared in asm/ptrace.h
358*4882a593Smuzhiyun
359*4882a593Smuzhiyunconfig HAVE_HW_BREAKPOINT
360*4882a593Smuzhiyun	bool
361*4882a593Smuzhiyun	depends on PERF_EVENTS
362*4882a593Smuzhiyun
363*4882a593Smuzhiyunconfig HAVE_MIXED_BREAKPOINTS_REGS
364*4882a593Smuzhiyun	bool
365*4882a593Smuzhiyun	depends on HAVE_HW_BREAKPOINT
366*4882a593Smuzhiyun	help
367*4882a593Smuzhiyun	  Depending on the arch implementation of hardware breakpoints,
368*4882a593Smuzhiyun	  some of them have separate registers for data and instruction
369*4882a593Smuzhiyun	  breakpoints addresses, others have mixed registers to store
370*4882a593Smuzhiyun	  them but define the access type in a control register.
371*4882a593Smuzhiyun	  Select this option if your arch implements breakpoints under the
372*4882a593Smuzhiyun	  latter fashion.
373*4882a593Smuzhiyun
374*4882a593Smuzhiyunconfig HAVE_USER_RETURN_NOTIFIER
375*4882a593Smuzhiyun	bool
376*4882a593Smuzhiyun
377*4882a593Smuzhiyunconfig HAVE_PERF_EVENTS_NMI
378*4882a593Smuzhiyun	bool
379*4882a593Smuzhiyun	help
380*4882a593Smuzhiyun	  System hardware can generate an NMI using the perf event
381*4882a593Smuzhiyun	  subsystem.  Also has support for calculating CPU cycle events
382*4882a593Smuzhiyun	  to determine how many clock cycles in a given period.
383*4882a593Smuzhiyun
384*4882a593Smuzhiyunconfig HAVE_HARDLOCKUP_DETECTOR_PERF
385*4882a593Smuzhiyun	bool
386*4882a593Smuzhiyun	depends on HAVE_PERF_EVENTS_NMI
387*4882a593Smuzhiyun	help
388*4882a593Smuzhiyun	  The arch chooses to use the generic perf-NMI-based hardlockup
389*4882a593Smuzhiyun	  detector. Must define HAVE_PERF_EVENTS_NMI.
390*4882a593Smuzhiyun
391*4882a593Smuzhiyunconfig HAVE_NMI_WATCHDOG
392*4882a593Smuzhiyun	depends on HAVE_NMI
393*4882a593Smuzhiyun	bool
394*4882a593Smuzhiyun	help
395*4882a593Smuzhiyun	  The arch provides a low level NMI watchdog. It provides
396*4882a593Smuzhiyun	  asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
397*4882a593Smuzhiyun
398*4882a593Smuzhiyunconfig HAVE_HARDLOCKUP_DETECTOR_ARCH
399*4882a593Smuzhiyun	bool
400*4882a593Smuzhiyun	select HAVE_NMI_WATCHDOG
401*4882a593Smuzhiyun	help
402*4882a593Smuzhiyun	  The arch chooses to provide its own hardlockup detector, which is
403*4882a593Smuzhiyun	  a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
404*4882a593Smuzhiyun	  interfaces and parameters provided by hardlockup detector subsystem.
405*4882a593Smuzhiyun
406*4882a593Smuzhiyunconfig HAVE_PERF_REGS
407*4882a593Smuzhiyun	bool
408*4882a593Smuzhiyun	help
409*4882a593Smuzhiyun	  Support selective register dumps for perf events. This includes
410*4882a593Smuzhiyun	  bit-mapping of each registers and a unique architecture id.
411*4882a593Smuzhiyun
412*4882a593Smuzhiyunconfig HAVE_PERF_USER_STACK_DUMP
413*4882a593Smuzhiyun	bool
414*4882a593Smuzhiyun	help
415*4882a593Smuzhiyun	  Support user stack dumps for perf event samples. This needs
416*4882a593Smuzhiyun	  access to the user stack pointer which is not unified across
417*4882a593Smuzhiyun	  architectures.
418*4882a593Smuzhiyun
419*4882a593Smuzhiyunconfig HAVE_ARCH_JUMP_LABEL
420*4882a593Smuzhiyun	bool
421*4882a593Smuzhiyun
422*4882a593Smuzhiyunconfig HAVE_ARCH_JUMP_LABEL_RELATIVE
423*4882a593Smuzhiyun	bool
424*4882a593Smuzhiyun
425*4882a593Smuzhiyunconfig MMU_GATHER_TABLE_FREE
426*4882a593Smuzhiyun	bool
427*4882a593Smuzhiyun
428*4882a593Smuzhiyunconfig MMU_GATHER_RCU_TABLE_FREE
429*4882a593Smuzhiyun	bool
430*4882a593Smuzhiyun	select MMU_GATHER_TABLE_FREE
431*4882a593Smuzhiyun
432*4882a593Smuzhiyunconfig MMU_GATHER_PAGE_SIZE
433*4882a593Smuzhiyun	bool
434*4882a593Smuzhiyun
435*4882a593Smuzhiyunconfig MMU_GATHER_NO_RANGE
436*4882a593Smuzhiyun	bool
437*4882a593Smuzhiyun
438*4882a593Smuzhiyunconfig MMU_GATHER_NO_GATHER
439*4882a593Smuzhiyun	bool
440*4882a593Smuzhiyun	depends on MMU_GATHER_TABLE_FREE
441*4882a593Smuzhiyun
442*4882a593Smuzhiyunconfig ARCH_WANT_IRQS_OFF_ACTIVATE_MM
443*4882a593Smuzhiyun	bool
444*4882a593Smuzhiyun	help
445*4882a593Smuzhiyun	  Temporary select until all architectures can be converted to have
446*4882a593Smuzhiyun	  irqs disabled over activate_mm. Architectures that do IPI based TLB
447*4882a593Smuzhiyun	  shootdowns should enable this.
448*4882a593Smuzhiyun
449*4882a593Smuzhiyunconfig ARCH_HAVE_NMI_SAFE_CMPXCHG
450*4882a593Smuzhiyun	bool
451*4882a593Smuzhiyun
452*4882a593Smuzhiyunconfig HAVE_ALIGNED_STRUCT_PAGE
453*4882a593Smuzhiyun	bool
454*4882a593Smuzhiyun	help
455*4882a593Smuzhiyun	  This makes sure that struct pages are double word aligned and that
456*4882a593Smuzhiyun	  e.g. the SLUB allocator can perform double word atomic operations
457*4882a593Smuzhiyun	  on a struct page for better performance. However selecting this
458*4882a593Smuzhiyun	  might increase the size of a struct page by a word.
459*4882a593Smuzhiyun
460*4882a593Smuzhiyunconfig HAVE_CMPXCHG_LOCAL
461*4882a593Smuzhiyun	bool
462*4882a593Smuzhiyun
463*4882a593Smuzhiyunconfig HAVE_CMPXCHG_DOUBLE
464*4882a593Smuzhiyun	bool
465*4882a593Smuzhiyun
466*4882a593Smuzhiyunconfig ARCH_WEAK_RELEASE_ACQUIRE
467*4882a593Smuzhiyun	bool
468*4882a593Smuzhiyun
469*4882a593Smuzhiyunconfig ARCH_WANT_IPC_PARSE_VERSION
470*4882a593Smuzhiyun	bool
471*4882a593Smuzhiyun
472*4882a593Smuzhiyunconfig ARCH_WANT_COMPAT_IPC_PARSE_VERSION
473*4882a593Smuzhiyun	bool
474*4882a593Smuzhiyun
475*4882a593Smuzhiyunconfig ARCH_WANT_OLD_COMPAT_IPC
476*4882a593Smuzhiyun	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
477*4882a593Smuzhiyun	bool
478*4882a593Smuzhiyun
479*4882a593Smuzhiyunconfig HAVE_ARCH_SECCOMP
480*4882a593Smuzhiyun	bool
481*4882a593Smuzhiyun	help
482*4882a593Smuzhiyun	  An arch should select this symbol to support seccomp mode 1 (the fixed
483*4882a593Smuzhiyun	  syscall policy), and must provide an overrides for __NR_seccomp_sigreturn,
484*4882a593Smuzhiyun	  and compat syscalls if the asm-generic/seccomp.h defaults need adjustment:
485*4882a593Smuzhiyun	  - __NR_seccomp_read_32
486*4882a593Smuzhiyun	  - __NR_seccomp_write_32
487*4882a593Smuzhiyun	  - __NR_seccomp_exit_32
488*4882a593Smuzhiyun	  - __NR_seccomp_sigreturn_32
489*4882a593Smuzhiyun
490*4882a593Smuzhiyunconfig HAVE_ARCH_SECCOMP_FILTER
491*4882a593Smuzhiyun	bool
492*4882a593Smuzhiyun	select HAVE_ARCH_SECCOMP
493*4882a593Smuzhiyun	help
494*4882a593Smuzhiyun	  An arch should select this symbol if it provides all of these things:
495*4882a593Smuzhiyun	  - all the requirements for HAVE_ARCH_SECCOMP
496*4882a593Smuzhiyun	  - syscall_get_arch()
497*4882a593Smuzhiyun	  - syscall_get_arguments()
498*4882a593Smuzhiyun	  - syscall_rollback()
499*4882a593Smuzhiyun	  - syscall_set_return_value()
500*4882a593Smuzhiyun	  - SIGSYS siginfo_t support
501*4882a593Smuzhiyun	  - secure_computing is called from a ptrace_event()-safe context
502*4882a593Smuzhiyun	  - secure_computing return value is checked and a return value of -1
503*4882a593Smuzhiyun	    results in the system call being skipped immediately.
504*4882a593Smuzhiyun	  - seccomp syscall wired up
505*4882a593Smuzhiyun
506*4882a593Smuzhiyunconfig SECCOMP
507*4882a593Smuzhiyun	prompt "Enable seccomp to safely execute untrusted bytecode"
508*4882a593Smuzhiyun	def_bool y
509*4882a593Smuzhiyun	depends on HAVE_ARCH_SECCOMP
510*4882a593Smuzhiyun	help
511*4882a593Smuzhiyun	  This kernel feature is useful for number crunching applications
512*4882a593Smuzhiyun	  that may need to handle untrusted bytecode during their
513*4882a593Smuzhiyun	  execution. By using pipes or other transports made available
514*4882a593Smuzhiyun	  to the process as file descriptors supporting the read/write
515*4882a593Smuzhiyun	  syscalls, it's possible to isolate those applications in their
516*4882a593Smuzhiyun	  own address space using seccomp. Once seccomp is enabled via
517*4882a593Smuzhiyun	  prctl(PR_SET_SECCOMP) or the seccomp() syscall, it cannot be
518*4882a593Smuzhiyun	  disabled and the task is only allowed to execute a few safe
519*4882a593Smuzhiyun	  syscalls defined by each seccomp mode.
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun	  If unsure, say Y.
522*4882a593Smuzhiyun
523*4882a593Smuzhiyunconfig SECCOMP_FILTER
524*4882a593Smuzhiyun	def_bool y
525*4882a593Smuzhiyun	depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
526*4882a593Smuzhiyun	help
527*4882a593Smuzhiyun	  Enable tasks to build secure computing environments defined
528*4882a593Smuzhiyun	  in terms of Berkeley Packet Filter programs which implement
529*4882a593Smuzhiyun	  task-defined system call filtering polices.
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun	  See Documentation/userspace-api/seccomp_filter.rst for details.
532*4882a593Smuzhiyun
533*4882a593Smuzhiyunconfig HAVE_ARCH_STACKLEAK
534*4882a593Smuzhiyun	bool
535*4882a593Smuzhiyun	help
536*4882a593Smuzhiyun	  An architecture should select this if it has the code which
537*4882a593Smuzhiyun	  fills the used part of the kernel stack with the STACKLEAK_POISON
538*4882a593Smuzhiyun	  value before returning from system calls.
539*4882a593Smuzhiyun
540*4882a593Smuzhiyunconfig HAVE_STACKPROTECTOR
541*4882a593Smuzhiyun	bool
542*4882a593Smuzhiyun	help
543*4882a593Smuzhiyun	  An arch should select this symbol if:
544*4882a593Smuzhiyun	  - it has implemented a stack canary (e.g. __stack_chk_guard)
545*4882a593Smuzhiyun
546*4882a593Smuzhiyunconfig STACKPROTECTOR
547*4882a593Smuzhiyun	bool "Stack Protector buffer overflow detection"
548*4882a593Smuzhiyun	depends on HAVE_STACKPROTECTOR
549*4882a593Smuzhiyun	depends on $(cc-option,-fstack-protector)
550*4882a593Smuzhiyun	default y
551*4882a593Smuzhiyun	help
552*4882a593Smuzhiyun	  This option turns on the "stack-protector" GCC feature. This
553*4882a593Smuzhiyun	  feature puts, at the beginning of functions, a canary value on
554*4882a593Smuzhiyun	  the stack just before the return address, and validates
555*4882a593Smuzhiyun	  the value just before actually returning.  Stack based buffer
556*4882a593Smuzhiyun	  overflows (that need to overwrite this return address) now also
557*4882a593Smuzhiyun	  overwrite the canary, which gets detected and the attack is then
558*4882a593Smuzhiyun	  neutralized via a kernel panic.
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun	  Functions will have the stack-protector canary logic added if they
561*4882a593Smuzhiyun	  have an 8-byte or larger character array on the stack.
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun	  This feature requires gcc version 4.2 or above, or a distribution
564*4882a593Smuzhiyun	  gcc with the feature backported ("-fstack-protector").
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun	  On an x86 "defconfig" build, this feature adds canary checks to
567*4882a593Smuzhiyun	  about 3% of all kernel functions, which increases kernel code size
568*4882a593Smuzhiyun	  by about 0.3%.
569*4882a593Smuzhiyun
570*4882a593Smuzhiyunconfig STACKPROTECTOR_STRONG
571*4882a593Smuzhiyun	bool "Strong Stack Protector"
572*4882a593Smuzhiyun	depends on STACKPROTECTOR
573*4882a593Smuzhiyun	depends on $(cc-option,-fstack-protector-strong)
574*4882a593Smuzhiyun	default y
575*4882a593Smuzhiyun	help
576*4882a593Smuzhiyun	  Functions will have the stack-protector canary logic added in any
577*4882a593Smuzhiyun	  of the following conditions:
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun	  - local variable's address used as part of the right hand side of an
580*4882a593Smuzhiyun	    assignment or function argument
581*4882a593Smuzhiyun	  - local variable is an array (or union containing an array),
582*4882a593Smuzhiyun	    regardless of array type or length
583*4882a593Smuzhiyun	  - uses register local variables
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun	  This feature requires gcc version 4.9 or above, or a distribution
586*4882a593Smuzhiyun	  gcc with the feature backported ("-fstack-protector-strong").
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun	  On an x86 "defconfig" build, this feature adds canary checks to
589*4882a593Smuzhiyun	  about 20% of all kernel functions, which increases the kernel code
590*4882a593Smuzhiyun	  size by about 2%.
591*4882a593Smuzhiyun
592*4882a593Smuzhiyunconfig ARCH_SUPPORTS_SHADOW_CALL_STACK
593*4882a593Smuzhiyun	bool
594*4882a593Smuzhiyun	help
595*4882a593Smuzhiyun	  An architecture should select this if it supports Clang's Shadow
596*4882a593Smuzhiyun	  Call Stack and implements runtime support for shadow stack
597*4882a593Smuzhiyun	  switching.
598*4882a593Smuzhiyun
599*4882a593Smuzhiyunconfig SHADOW_CALL_STACK
600*4882a593Smuzhiyun	bool "Clang Shadow Call Stack"
601*4882a593Smuzhiyun	depends on CC_IS_CLANG && ARCH_SUPPORTS_SHADOW_CALL_STACK
602*4882a593Smuzhiyun	depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
603*4882a593Smuzhiyun	help
604*4882a593Smuzhiyun	  This option enables Clang's Shadow Call Stack, which uses a
605*4882a593Smuzhiyun	  shadow stack to protect function return addresses from being
606*4882a593Smuzhiyun	  overwritten by an attacker. More information can be found in
607*4882a593Smuzhiyun	  Clang's documentation:
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun	    https://clang.llvm.org/docs/ShadowCallStack.html
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun	  Note that security guarantees in the kernel differ from the
612*4882a593Smuzhiyun	  ones documented for user space. The kernel must store addresses
613*4882a593Smuzhiyun	  of shadow stacks in memory, which means an attacker capable of
614*4882a593Smuzhiyun	  reading and writing arbitrary memory may be able to locate them
615*4882a593Smuzhiyun	  and hijack control flow by modifying the stacks.
616*4882a593Smuzhiyun
617*4882a593Smuzhiyunconfig LTO
618*4882a593Smuzhiyun	bool
619*4882a593Smuzhiyun	help
620*4882a593Smuzhiyun	  Selected if the kernel will be built using the compiler's LTO feature.
621*4882a593Smuzhiyun
622*4882a593Smuzhiyunconfig LTO_CLANG
623*4882a593Smuzhiyun	bool
624*4882a593Smuzhiyun	select LTO
625*4882a593Smuzhiyun	help
626*4882a593Smuzhiyun	  Selected if the kernel will be built using Clang's LTO feature.
627*4882a593Smuzhiyun
628*4882a593Smuzhiyunconfig ARCH_SUPPORTS_LTO_CLANG
629*4882a593Smuzhiyun	bool
630*4882a593Smuzhiyun	help
631*4882a593Smuzhiyun	  An architecture should select this option if it supports:
632*4882a593Smuzhiyun	  - compiling with Clang,
633*4882a593Smuzhiyun	  - compiling inline assembly with Clang's integrated assembler,
634*4882a593Smuzhiyun	  - and linking with LLD.
635*4882a593Smuzhiyun
636*4882a593Smuzhiyunconfig ARCH_SUPPORTS_LTO_CLANG_THIN
637*4882a593Smuzhiyun	bool
638*4882a593Smuzhiyun	help
639*4882a593Smuzhiyun	  An architecture should select this option if it can support Clang's
640*4882a593Smuzhiyun	  ThinLTO mode.
641*4882a593Smuzhiyun
642*4882a593Smuzhiyunconfig HAS_LTO_CLANG
643*4882a593Smuzhiyun	def_bool y
644*4882a593Smuzhiyun	# Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
645*4882a593Smuzhiyun	depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
646*4882a593Smuzhiyun	depends on $(success,test $(LLVM) -eq 1)
647*4882a593Smuzhiyun	depends on $(success,test $(LLVM_IAS) -eq 1)
648*4882a593Smuzhiyun	depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
649*4882a593Smuzhiyun	depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
650*4882a593Smuzhiyun	depends on ARCH_SUPPORTS_LTO_CLANG
651*4882a593Smuzhiyun	depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
652*4882a593Smuzhiyun	depends on !KASAN || KASAN_HW_TAGS
653*4882a593Smuzhiyun	depends on !GCOV_KERNEL
654*4882a593Smuzhiyun	help
655*4882a593Smuzhiyun	  The compiler and Kconfig options support building with Clang's
656*4882a593Smuzhiyun	  LTO.
657*4882a593Smuzhiyun
658*4882a593Smuzhiyunchoice
659*4882a593Smuzhiyun	prompt "Link Time Optimization (LTO)"
660*4882a593Smuzhiyun	default LTO_NONE
661*4882a593Smuzhiyun	help
662*4882a593Smuzhiyun	  This option enables Link Time Optimization (LTO), which allows the
663*4882a593Smuzhiyun	  compiler to optimize binaries globally.
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun	  If unsure, select LTO_NONE. Note that LTO is very resource-intensive
666*4882a593Smuzhiyun	  so it's disabled by default.
667*4882a593Smuzhiyun
668*4882a593Smuzhiyunconfig LTO_NONE
669*4882a593Smuzhiyun	bool "None"
670*4882a593Smuzhiyun	help
671*4882a593Smuzhiyun	  Build the kernel normally, without Link Time Optimization (LTO).
672*4882a593Smuzhiyun
673*4882a593Smuzhiyunconfig LTO_CLANG_FULL
674*4882a593Smuzhiyun	bool "Clang Full LTO (EXPERIMENTAL)"
675*4882a593Smuzhiyun	depends on HAS_LTO_CLANG
676*4882a593Smuzhiyun	depends on !COMPILE_TEST
677*4882a593Smuzhiyun	select LTO_CLANG
678*4882a593Smuzhiyun	help
679*4882a593Smuzhiyun          This option enables Clang's full Link Time Optimization (LTO), which
680*4882a593Smuzhiyun          allows the compiler to optimize the kernel globally. If you enable
681*4882a593Smuzhiyun          this option, the compiler generates LLVM bitcode instead of ELF
682*4882a593Smuzhiyun          object files, and the actual compilation from bitcode happens at
683*4882a593Smuzhiyun          the LTO link step, which may take several minutes depending on the
684*4882a593Smuzhiyun          kernel configuration. More information can be found from LLVM's
685*4882a593Smuzhiyun          documentation:
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun	    https://llvm.org/docs/LinkTimeOptimization.html
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun	  During link time, this option can use a large amount of RAM, and
690*4882a593Smuzhiyun	  may take much longer than the ThinLTO option.
691*4882a593Smuzhiyun
692*4882a593Smuzhiyunconfig LTO_CLANG_THIN
693*4882a593Smuzhiyun	bool "Clang ThinLTO (EXPERIMENTAL)"
694*4882a593Smuzhiyun	depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN
695*4882a593Smuzhiyun	select LTO_CLANG
696*4882a593Smuzhiyun	help
697*4882a593Smuzhiyun	  This option enables Clang's ThinLTO, which allows for parallel
698*4882a593Smuzhiyun	  optimization and faster incremental compiles compared to the
699*4882a593Smuzhiyun	  CONFIG_LTO_CLANG_FULL option. More information can be found
700*4882a593Smuzhiyun	  from Clang's documentation:
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun	    https://clang.llvm.org/docs/ThinLTO.html
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun	  If unsure, say Y.
705*4882a593Smuzhiyunendchoice
706*4882a593Smuzhiyun
707*4882a593Smuzhiyunconfig CFI_CLANG
708*4882a593Smuzhiyun	bool "Use Clang's Control Flow Integrity (CFI)"
709*4882a593Smuzhiyun	depends on LTO_CLANG && KALLSYMS
710*4882a593Smuzhiyun	help
711*4882a593Smuzhiyun	  This option enables Clang's Control Flow Integrity (CFI), which adds
712*4882a593Smuzhiyun	  runtime checking for indirect function calls.
713*4882a593Smuzhiyun
714*4882a593Smuzhiyunconfig CFI_CLANG_SHADOW
715*4882a593Smuzhiyun	bool "Use CFI shadow to speed up cross-module checks"
716*4882a593Smuzhiyun	default y
717*4882a593Smuzhiyun	depends on CFI_CLANG && MODULES
718*4882a593Smuzhiyun	help
719*4882a593Smuzhiyun	  If you select this option, the kernel builds a fast look-up table of
720*4882a593Smuzhiyun	  CFI check functions in loaded modules to reduce overhead.
721*4882a593Smuzhiyun
722*4882a593Smuzhiyunconfig CFI_PERMISSIVE
723*4882a593Smuzhiyun	bool "Use CFI in permissive mode"
724*4882a593Smuzhiyun	depends on CFI_CLANG
725*4882a593Smuzhiyun	help
726*4882a593Smuzhiyun	  When selected, Control Flow Integrity (CFI) violations result in a
727*4882a593Smuzhiyun	  warning instead of a kernel panic. This option is useful for finding
728*4882a593Smuzhiyun	  CFI violations during development.
729*4882a593Smuzhiyun
730*4882a593Smuzhiyunconfig HAVE_ARCH_WITHIN_STACK_FRAMES
731*4882a593Smuzhiyun	bool
732*4882a593Smuzhiyun	help
733*4882a593Smuzhiyun	  An architecture should select this if it can walk the kernel stack
734*4882a593Smuzhiyun	  frames to determine if an object is part of either the arguments
735*4882a593Smuzhiyun	  or local variables (i.e. that it excludes saved return addresses,
736*4882a593Smuzhiyun	  and similar) by implementing an inline arch_within_stack_frames(),
737*4882a593Smuzhiyun	  which is used by CONFIG_HARDENED_USERCOPY.
738*4882a593Smuzhiyun
739*4882a593Smuzhiyunconfig HAVE_CONTEXT_TRACKING
740*4882a593Smuzhiyun	bool
741*4882a593Smuzhiyun	help
742*4882a593Smuzhiyun	  Provide kernel/user boundaries probes necessary for subsystems
743*4882a593Smuzhiyun	  that need it, such as userspace RCU extended quiescent state.
744*4882a593Smuzhiyun	  Syscalls need to be wrapped inside user_exit()-user_enter(), either
745*4882a593Smuzhiyun	  optimized behind static key or through the slow path using TIF_NOHZ
746*4882a593Smuzhiyun	  flag. Exceptions handlers must be wrapped as well. Irqs are already
747*4882a593Smuzhiyun	  protected inside rcu_irq_enter/rcu_irq_exit() but preemption or signal
748*4882a593Smuzhiyun	  handling on irq exit still need to be protected.
749*4882a593Smuzhiyun
750*4882a593Smuzhiyunconfig HAVE_TIF_NOHZ
751*4882a593Smuzhiyun	bool
752*4882a593Smuzhiyun	help
753*4882a593Smuzhiyun	  Arch relies on TIF_NOHZ and syscall slow path to implement context
754*4882a593Smuzhiyun	  tracking calls to user_enter()/user_exit().
755*4882a593Smuzhiyun
756*4882a593Smuzhiyunconfig HAVE_VIRT_CPU_ACCOUNTING
757*4882a593Smuzhiyun	bool
758*4882a593Smuzhiyun
759*4882a593Smuzhiyunconfig ARCH_HAS_SCALED_CPUTIME
760*4882a593Smuzhiyun	bool
761*4882a593Smuzhiyun
762*4882a593Smuzhiyunconfig HAVE_VIRT_CPU_ACCOUNTING_GEN
763*4882a593Smuzhiyun	bool
764*4882a593Smuzhiyun	default y if 64BIT
765*4882a593Smuzhiyun	help
766*4882a593Smuzhiyun	  With VIRT_CPU_ACCOUNTING_GEN, cputime_t becomes 64-bit.
767*4882a593Smuzhiyun	  Before enabling this option, arch code must be audited
768*4882a593Smuzhiyun	  to ensure there are no races in concurrent read/write of
769*4882a593Smuzhiyun	  cputime_t. For example, reading/writing 64-bit cputime_t on
770*4882a593Smuzhiyun	  some 32-bit arches may require multiple accesses, so proper
771*4882a593Smuzhiyun	  locking is needed to protect against concurrent accesses.
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun
774*4882a593Smuzhiyunconfig HAVE_IRQ_TIME_ACCOUNTING
775*4882a593Smuzhiyun	bool
776*4882a593Smuzhiyun	help
777*4882a593Smuzhiyun	  Archs need to ensure they use a high enough resolution clock to
778*4882a593Smuzhiyun	  support irq time accounting and then call enable_sched_clock_irqtime().
779*4882a593Smuzhiyun
780*4882a593Smuzhiyunconfig HAVE_MOVE_PUD
781*4882a593Smuzhiyun	bool
782*4882a593Smuzhiyun	help
783*4882a593Smuzhiyun	  Architectures that select this are able to move page tables at the
784*4882a593Smuzhiyun	  PUD level. If there are only 3 page table levels, the move effectively
785*4882a593Smuzhiyun	  happens at the PGD level.
786*4882a593Smuzhiyun
787*4882a593Smuzhiyunconfig HAVE_MOVE_PMD
788*4882a593Smuzhiyun	bool
789*4882a593Smuzhiyun	help
790*4882a593Smuzhiyun	  Archs that select this are able to move page tables at the PMD level.
791*4882a593Smuzhiyun
792*4882a593Smuzhiyunconfig HAVE_ARCH_TRANSPARENT_HUGEPAGE
793*4882a593Smuzhiyun	bool
794*4882a593Smuzhiyun
795*4882a593Smuzhiyunconfig HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
796*4882a593Smuzhiyun	bool
797*4882a593Smuzhiyun
798*4882a593Smuzhiyunconfig HAVE_ARCH_HUGE_VMAP
799*4882a593Smuzhiyun	bool
800*4882a593Smuzhiyun
801*4882a593Smuzhiyunconfig ARCH_WANT_HUGE_PMD_SHARE
802*4882a593Smuzhiyun	bool
803*4882a593Smuzhiyun
804*4882a593Smuzhiyunconfig HAVE_ARCH_SOFT_DIRTY
805*4882a593Smuzhiyun	bool
806*4882a593Smuzhiyun
807*4882a593Smuzhiyunconfig HAVE_MOD_ARCH_SPECIFIC
808*4882a593Smuzhiyun	bool
809*4882a593Smuzhiyun	help
810*4882a593Smuzhiyun	  The arch uses struct mod_arch_specific to store data.  Many arches
811*4882a593Smuzhiyun	  just need a simple module loader without arch specific data - those
812*4882a593Smuzhiyun	  should not enable this.
813*4882a593Smuzhiyun
814*4882a593Smuzhiyunconfig MODULES_USE_ELF_RELA
815*4882a593Smuzhiyun	bool
816*4882a593Smuzhiyun	help
817*4882a593Smuzhiyun	  Modules only use ELF RELA relocations.  Modules with ELF REL
818*4882a593Smuzhiyun	  relocations will give an error.
819*4882a593Smuzhiyun
820*4882a593Smuzhiyunconfig MODULES_USE_ELF_REL
821*4882a593Smuzhiyun	bool
822*4882a593Smuzhiyun	help
823*4882a593Smuzhiyun	  Modules only use ELF REL relocations.  Modules with ELF RELA
824*4882a593Smuzhiyun	  relocations will give an error.
825*4882a593Smuzhiyun
826*4882a593Smuzhiyunconfig HAVE_IRQ_EXIT_ON_IRQ_STACK
827*4882a593Smuzhiyun	bool
828*4882a593Smuzhiyun	help
829*4882a593Smuzhiyun	  Architecture doesn't only execute the irq handler on the irq stack
830*4882a593Smuzhiyun	  but also irq_exit(). This way we can process softirqs on this irq
831*4882a593Smuzhiyun	  stack instead of switching to a new one when we call __do_softirq()
832*4882a593Smuzhiyun	  in the end of an hardirq.
833*4882a593Smuzhiyun	  This spares a stack switch and improves cache usage on softirq
834*4882a593Smuzhiyun	  processing.
835*4882a593Smuzhiyun
836*4882a593Smuzhiyunconfig PGTABLE_LEVELS
837*4882a593Smuzhiyun	int
838*4882a593Smuzhiyun	default 2
839*4882a593Smuzhiyun
840*4882a593Smuzhiyunconfig ARCH_HAS_ELF_RANDOMIZE
841*4882a593Smuzhiyun	bool
842*4882a593Smuzhiyun	help
843*4882a593Smuzhiyun	  An architecture supports choosing randomized locations for
844*4882a593Smuzhiyun	  stack, mmap, brk, and ET_DYN. Defined functions:
845*4882a593Smuzhiyun	  - arch_mmap_rnd()
846*4882a593Smuzhiyun	  - arch_randomize_brk()
847*4882a593Smuzhiyun
848*4882a593Smuzhiyunconfig HAVE_ARCH_MMAP_RND_BITS
849*4882a593Smuzhiyun	bool
850*4882a593Smuzhiyun	help
851*4882a593Smuzhiyun	  An arch should select this symbol if it supports setting a variable
852*4882a593Smuzhiyun	  number of bits for use in establishing the base address for mmap
853*4882a593Smuzhiyun	  allocations, has MMU enabled and provides values for both:
854*4882a593Smuzhiyun	  - ARCH_MMAP_RND_BITS_MIN
855*4882a593Smuzhiyun	  - ARCH_MMAP_RND_BITS_MAX
856*4882a593Smuzhiyun
857*4882a593Smuzhiyunconfig HAVE_EXIT_THREAD
858*4882a593Smuzhiyun	bool
859*4882a593Smuzhiyun	help
860*4882a593Smuzhiyun	  An architecture implements exit_thread.
861*4882a593Smuzhiyun
862*4882a593Smuzhiyunconfig ARCH_MMAP_RND_BITS_MIN
863*4882a593Smuzhiyun	int
864*4882a593Smuzhiyun
865*4882a593Smuzhiyunconfig ARCH_MMAP_RND_BITS_MAX
866*4882a593Smuzhiyun	int
867*4882a593Smuzhiyun
868*4882a593Smuzhiyunconfig ARCH_MMAP_RND_BITS_DEFAULT
869*4882a593Smuzhiyun	int
870*4882a593Smuzhiyun
871*4882a593Smuzhiyunconfig ARCH_MMAP_RND_BITS
872*4882a593Smuzhiyun	int "Number of bits to use for ASLR of mmap base address" if EXPERT
873*4882a593Smuzhiyun	range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
874*4882a593Smuzhiyun	default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
875*4882a593Smuzhiyun	default ARCH_MMAP_RND_BITS_MIN
876*4882a593Smuzhiyun	depends on HAVE_ARCH_MMAP_RND_BITS
877*4882a593Smuzhiyun	help
878*4882a593Smuzhiyun	  This value can be used to select the number of bits to use to
879*4882a593Smuzhiyun	  determine the random offset to the base address of vma regions
880*4882a593Smuzhiyun	  resulting from mmap allocations. This value will be bounded
881*4882a593Smuzhiyun	  by the architecture's minimum and maximum supported values.
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun	  This value can be changed after boot using the
884*4882a593Smuzhiyun	  /proc/sys/vm/mmap_rnd_bits tunable
885*4882a593Smuzhiyun
886*4882a593Smuzhiyunconfig HAVE_ARCH_MMAP_RND_COMPAT_BITS
887*4882a593Smuzhiyun	bool
888*4882a593Smuzhiyun	help
889*4882a593Smuzhiyun	  An arch should select this symbol if it supports running applications
890*4882a593Smuzhiyun	  in compatibility mode, supports setting a variable number of bits for
891*4882a593Smuzhiyun	  use in establishing the base address for mmap allocations, has MMU
892*4882a593Smuzhiyun	  enabled and provides values for both:
893*4882a593Smuzhiyun	  - ARCH_MMAP_RND_COMPAT_BITS_MIN
894*4882a593Smuzhiyun	  - ARCH_MMAP_RND_COMPAT_BITS_MAX
895*4882a593Smuzhiyun
896*4882a593Smuzhiyunconfig ARCH_MMAP_RND_COMPAT_BITS_MIN
897*4882a593Smuzhiyun	int
898*4882a593Smuzhiyun
899*4882a593Smuzhiyunconfig ARCH_MMAP_RND_COMPAT_BITS_MAX
900*4882a593Smuzhiyun	int
901*4882a593Smuzhiyun
902*4882a593Smuzhiyunconfig ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
903*4882a593Smuzhiyun	int
904*4882a593Smuzhiyun
905*4882a593Smuzhiyunconfig ARCH_MMAP_RND_COMPAT_BITS
906*4882a593Smuzhiyun	int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
907*4882a593Smuzhiyun	range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
908*4882a593Smuzhiyun	default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
909*4882a593Smuzhiyun	default ARCH_MMAP_RND_COMPAT_BITS_MIN
910*4882a593Smuzhiyun	depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
911*4882a593Smuzhiyun	help
912*4882a593Smuzhiyun	  This value can be used to select the number of bits to use to
913*4882a593Smuzhiyun	  determine the random offset to the base address of vma regions
914*4882a593Smuzhiyun	  resulting from mmap allocations for compatible applications This
915*4882a593Smuzhiyun	  value will be bounded by the architecture's minimum and maximum
916*4882a593Smuzhiyun	  supported values.
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun	  This value can be changed after boot using the
919*4882a593Smuzhiyun	  /proc/sys/vm/mmap_rnd_compat_bits tunable
920*4882a593Smuzhiyun
921*4882a593Smuzhiyunconfig HAVE_ARCH_COMPAT_MMAP_BASES
922*4882a593Smuzhiyun	bool
923*4882a593Smuzhiyun	help
924*4882a593Smuzhiyun	  This allows 64bit applications to invoke 32-bit mmap() syscall
925*4882a593Smuzhiyun	  and vice-versa 32-bit applications to call 64-bit mmap().
926*4882a593Smuzhiyun	  Required for applications doing different bitness syscalls.
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun# This allows to use a set of generic functions to determine mmap base
929*4882a593Smuzhiyun# address by giving priority to top-down scheme only if the process
930*4882a593Smuzhiyun# is not in legacy mode (compat task, unlimited stack size or
931*4882a593Smuzhiyun# sysctl_legacy_va_layout).
932*4882a593Smuzhiyun# Architecture that selects this option can provide its own version of:
933*4882a593Smuzhiyun# - STACK_RND_MASK
934*4882a593Smuzhiyunconfig ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
935*4882a593Smuzhiyun	bool
936*4882a593Smuzhiyun	depends on MMU
937*4882a593Smuzhiyun	select ARCH_HAS_ELF_RANDOMIZE
938*4882a593Smuzhiyun
939*4882a593Smuzhiyunconfig HAVE_STACK_VALIDATION
940*4882a593Smuzhiyun	bool
941*4882a593Smuzhiyun	help
942*4882a593Smuzhiyun	  Architecture supports the 'objtool check' host tool command, which
943*4882a593Smuzhiyun	  performs compile-time stack metadata validation.
944*4882a593Smuzhiyun
945*4882a593Smuzhiyunconfig HAVE_RELIABLE_STACKTRACE
946*4882a593Smuzhiyun	bool
947*4882a593Smuzhiyun	help
948*4882a593Smuzhiyun	  Architecture has either save_stack_trace_tsk_reliable() or
949*4882a593Smuzhiyun	  arch_stack_walk_reliable() function which only returns a stack trace
950*4882a593Smuzhiyun	  if it can guarantee the trace is reliable.
951*4882a593Smuzhiyun
952*4882a593Smuzhiyunconfig HAVE_ARCH_HASH
953*4882a593Smuzhiyun	bool
954*4882a593Smuzhiyun	default n
955*4882a593Smuzhiyun	help
956*4882a593Smuzhiyun	  If this is set, the architecture provides an <asm/hash.h>
957*4882a593Smuzhiyun	  file which provides platform-specific implementations of some
958*4882a593Smuzhiyun	  functions in <linux/hash.h> or fs/namei.c.
959*4882a593Smuzhiyun
960*4882a593Smuzhiyunconfig HAVE_ARCH_NVRAM_OPS
961*4882a593Smuzhiyun	bool
962*4882a593Smuzhiyun
963*4882a593Smuzhiyunconfig ISA_BUS_API
964*4882a593Smuzhiyun	def_bool ISA
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun#
967*4882a593Smuzhiyun# ABI hall of shame
968*4882a593Smuzhiyun#
969*4882a593Smuzhiyunconfig CLONE_BACKWARDS
970*4882a593Smuzhiyun	bool
971*4882a593Smuzhiyun	help
972*4882a593Smuzhiyun	  Architecture has tls passed as the 4th argument of clone(2),
973*4882a593Smuzhiyun	  not the 5th one.
974*4882a593Smuzhiyun
975*4882a593Smuzhiyunconfig CLONE_BACKWARDS2
976*4882a593Smuzhiyun	bool
977*4882a593Smuzhiyun	help
978*4882a593Smuzhiyun	  Architecture has the first two arguments of clone(2) swapped.
979*4882a593Smuzhiyun
980*4882a593Smuzhiyunconfig CLONE_BACKWARDS3
981*4882a593Smuzhiyun	bool
982*4882a593Smuzhiyun	help
983*4882a593Smuzhiyun	  Architecture has tls passed as the 3rd argument of clone(2),
984*4882a593Smuzhiyun	  not the 5th one.
985*4882a593Smuzhiyun
986*4882a593Smuzhiyunconfig ODD_RT_SIGACTION
987*4882a593Smuzhiyun	bool
988*4882a593Smuzhiyun	help
989*4882a593Smuzhiyun	  Architecture has unusual rt_sigaction(2) arguments
990*4882a593Smuzhiyun
991*4882a593Smuzhiyunconfig OLD_SIGSUSPEND
992*4882a593Smuzhiyun	bool
993*4882a593Smuzhiyun	help
994*4882a593Smuzhiyun	  Architecture has old sigsuspend(2) syscall, of one-argument variety
995*4882a593Smuzhiyun
996*4882a593Smuzhiyunconfig OLD_SIGSUSPEND3
997*4882a593Smuzhiyun	bool
998*4882a593Smuzhiyun	help
999*4882a593Smuzhiyun	  Even weirder antique ABI - three-argument sigsuspend(2)
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyunconfig OLD_SIGACTION
1002*4882a593Smuzhiyun	bool
1003*4882a593Smuzhiyun	help
1004*4882a593Smuzhiyun	  Architecture has old sigaction(2) syscall.  Nope, not the same
1005*4882a593Smuzhiyun	  as OLD_SIGSUSPEND | OLD_SIGSUSPEND3 - alpha has sigsuspend(2),
1006*4882a593Smuzhiyun	  but fairly different variant of sigaction(2), thanks to OSF/1
1007*4882a593Smuzhiyun	  compatibility...
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyunconfig COMPAT_OLD_SIGACTION
1010*4882a593Smuzhiyun	bool
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyunconfig COMPAT_32BIT_TIME
1013*4882a593Smuzhiyun	bool "Provide system calls for 32-bit time_t"
1014*4882a593Smuzhiyun	default !64BIT || COMPAT
1015*4882a593Smuzhiyun	help
1016*4882a593Smuzhiyun	  This enables 32 bit time_t support in addition to 64 bit time_t support.
1017*4882a593Smuzhiyun	  This is relevant on all 32-bit architectures, and 64-bit architectures
1018*4882a593Smuzhiyun	  as part of compat syscall handling.
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyunconfig ARCH_NO_PREEMPT
1021*4882a593Smuzhiyun	bool
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyunconfig ARCH_SUPPORTS_RT
1024*4882a593Smuzhiyun	bool
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyunconfig CPU_NO_EFFICIENT_FFS
1027*4882a593Smuzhiyun	def_bool n
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyunconfig HAVE_ARCH_VMAP_STACK
1030*4882a593Smuzhiyun	def_bool n
1031*4882a593Smuzhiyun	help
1032*4882a593Smuzhiyun	  An arch should select this symbol if it can support kernel stacks
1033*4882a593Smuzhiyun	  in vmalloc space.  This means:
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun	  - vmalloc space must be large enough to hold many kernel stacks.
1036*4882a593Smuzhiyun	    This may rule out many 32-bit architectures.
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun	  - Stacks in vmalloc space need to work reliably.  For example, if
1039*4882a593Smuzhiyun	    vmap page tables are created on demand, either this mechanism
1040*4882a593Smuzhiyun	    needs to work while the stack points to a virtual address with
1041*4882a593Smuzhiyun	    unpopulated page tables or arch code (switch_to() and switch_mm(),
1042*4882a593Smuzhiyun	    most likely) needs to ensure that the stack's page table entries
1043*4882a593Smuzhiyun	    are populated before running on a possibly unpopulated stack.
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun	  - If the stack overflows into a guard page, something reasonable
1046*4882a593Smuzhiyun	    should happen.  The definition of "reasonable" is flexible, but
1047*4882a593Smuzhiyun	    instantly rebooting without logging anything would be unfriendly.
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyunconfig VMAP_STACK
1050*4882a593Smuzhiyun	default y
1051*4882a593Smuzhiyun	bool "Use a virtually-mapped stack"
1052*4882a593Smuzhiyun	depends on HAVE_ARCH_VMAP_STACK
1053*4882a593Smuzhiyun	depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
1054*4882a593Smuzhiyun	help
1055*4882a593Smuzhiyun	  Enable this if you want the use virtually-mapped kernel stacks
1056*4882a593Smuzhiyun	  with guard pages.  This causes kernel stack overflows to be
1057*4882a593Smuzhiyun	  caught immediately rather than causing difficult-to-diagnose
1058*4882a593Smuzhiyun	  corruption.
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun	  To use this with software KASAN modes, the architecture must support
1061*4882a593Smuzhiyun	  backing virtual mappings with real shadow memory, and KASAN_VMALLOC
1062*4882a593Smuzhiyun	  must be enabled.
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyunconfig ARCH_OPTIONAL_KERNEL_RWX
1065*4882a593Smuzhiyun	def_bool n
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyunconfig ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
1068*4882a593Smuzhiyun	def_bool n
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyunconfig ARCH_HAS_STRICT_KERNEL_RWX
1071*4882a593Smuzhiyun	def_bool n
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyunconfig STRICT_KERNEL_RWX
1074*4882a593Smuzhiyun	bool "Make kernel text and rodata read-only" if ARCH_OPTIONAL_KERNEL_RWX
1075*4882a593Smuzhiyun	depends on ARCH_HAS_STRICT_KERNEL_RWX
1076*4882a593Smuzhiyun	default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
1077*4882a593Smuzhiyun	help
1078*4882a593Smuzhiyun	  If this is set, kernel text and rodata memory will be made read-only,
1079*4882a593Smuzhiyun	  and non-text memory will be made non-executable. This provides
1080*4882a593Smuzhiyun	  protection against certain security exploits (e.g. executing the heap
1081*4882a593Smuzhiyun	  or modifying text)
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun	  These features are considered standard security practice these days.
1084*4882a593Smuzhiyun	  You should say Y here in almost all cases.
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyunconfig ARCH_HAS_STRICT_MODULE_RWX
1087*4882a593Smuzhiyun	def_bool n
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyunconfig STRICT_MODULE_RWX
1090*4882a593Smuzhiyun	bool "Set loadable kernel module data as NX and text as RO" if ARCH_OPTIONAL_KERNEL_RWX
1091*4882a593Smuzhiyun	depends on ARCH_HAS_STRICT_MODULE_RWX && MODULES
1092*4882a593Smuzhiyun	default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
1093*4882a593Smuzhiyun	help
1094*4882a593Smuzhiyun	  If this is set, module text and rodata memory will be made read-only,
1095*4882a593Smuzhiyun	  and non-text memory will be made non-executable. This provides
1096*4882a593Smuzhiyun	  protection against certain security exploits (e.g. writing to text)
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun# select if the architecture provides an asm/dma-direct.h header
1099*4882a593Smuzhiyunconfig ARCH_HAS_PHYS_TO_DMA
1100*4882a593Smuzhiyun	bool
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyunconfig HAVE_ARCH_COMPILER_H
1103*4882a593Smuzhiyun	bool
1104*4882a593Smuzhiyun	help
1105*4882a593Smuzhiyun	  An architecture can select this if it provides an
1106*4882a593Smuzhiyun	  asm/compiler.h header that should be included after
1107*4882a593Smuzhiyun	  linux/compiler-*.h in order to override macro definitions that those
1108*4882a593Smuzhiyun	  headers generally provide.
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyunconfig HAVE_ARCH_PREL32_RELOCATIONS
1111*4882a593Smuzhiyun	bool
1112*4882a593Smuzhiyun	help
1113*4882a593Smuzhiyun	  May be selected by an architecture if it supports place-relative
1114*4882a593Smuzhiyun	  32-bit relocations, both in the toolchain and in the module loader,
1115*4882a593Smuzhiyun	  in which case relative references can be used in special sections
1116*4882a593Smuzhiyun	  for PCI fixup, initcalls etc which are only half the size on 64 bit
1117*4882a593Smuzhiyun	  architectures, and don't require runtime relocation on relocatable
1118*4882a593Smuzhiyun	  kernels.
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyunconfig ARCH_USE_MEMREMAP_PROT
1121*4882a593Smuzhiyun	bool
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyunconfig LOCK_EVENT_COUNTS
1124*4882a593Smuzhiyun	bool "Locking event counts collection"
1125*4882a593Smuzhiyun	depends on DEBUG_FS
1126*4882a593Smuzhiyun	help
1127*4882a593Smuzhiyun	  Enable light-weight counting of various locking related events
1128*4882a593Smuzhiyun	  in the system with minimal performance impact. This reduces
1129*4882a593Smuzhiyun	  the chance of application behavior change because of timing
1130*4882a593Smuzhiyun	  differences. The counts are reported via debugfs.
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun# Select if the architecture has support for applying RELR relocations.
1133*4882a593Smuzhiyunconfig ARCH_HAS_RELR
1134*4882a593Smuzhiyun	bool
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyunconfig RELR
1137*4882a593Smuzhiyun	bool "Use RELR relocation packing"
1138*4882a593Smuzhiyun	depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
1139*4882a593Smuzhiyun	default y
1140*4882a593Smuzhiyun	help
1141*4882a593Smuzhiyun	  Store the kernel's dynamic relocations in the RELR relocation packing
1142*4882a593Smuzhiyun	  format. Requires a compatible linker (LLD supports this feature), as
1143*4882a593Smuzhiyun	  well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
1144*4882a593Smuzhiyun	  are compatible).
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyunconfig ARCH_HAS_MEM_ENCRYPT
1147*4882a593Smuzhiyun	bool
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyunconfig ARCH_HAS_CC_PLATFORM
1150*4882a593Smuzhiyun	bool
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyunconfig HAVE_SPARSE_SYSCALL_NR
1153*4882a593Smuzhiyun       bool
1154*4882a593Smuzhiyun       help
1155*4882a593Smuzhiyun          An architecture should select this if its syscall numbering is sparse
1156*4882a593Smuzhiyun	  to save space. For example, MIPS architecture has a syscall array with
1157*4882a593Smuzhiyun	  entries at 4000, 5000 and 6000 locations. This option turns on syscall
1158*4882a593Smuzhiyun	  related optimizations for a given architecture.
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyunconfig ARCH_HAS_VDSO_DATA
1161*4882a593Smuzhiyun	bool
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyunconfig HAVE_STATIC_CALL
1164*4882a593Smuzhiyun	bool
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyunconfig HAVE_STATIC_CALL_INLINE
1167*4882a593Smuzhiyun	bool
1168*4882a593Smuzhiyun	depends on HAVE_STATIC_CALL
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyunconfig ARCH_WANT_LD_ORPHAN_WARN
1171*4882a593Smuzhiyun	bool
1172*4882a593Smuzhiyun	help
1173*4882a593Smuzhiyun	  An arch should select this symbol once all linker sections are explicitly
1174*4882a593Smuzhiyun	  included, size-asserted, or discarded in the linker scripts. This is
1175*4882a593Smuzhiyun	  important because we never want expected sections to be placed heuristically
1176*4882a593Smuzhiyun	  by the linker, since the locations of such sections can change between linker
1177*4882a593Smuzhiyun	  versions.
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyunconfig ARCH_SPLIT_ARG64
1180*4882a593Smuzhiyun	bool
1181*4882a593Smuzhiyun	help
1182*4882a593Smuzhiyun	   If a 32-bit architecture requires 64-bit arguments to be split into
1183*4882a593Smuzhiyun	   pairs of 32-bit arguments, select this option.
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyunsource "kernel/gcov/Kconfig"
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyunsource "scripts/gcc-plugins/Kconfig"
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyunendmenu
1190