xref: /OK3568_Linux_fs/kernel/arch/arm64/Kconfig (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun# SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyunconfig ARM64
3*4882a593Smuzhiyun	def_bool y
4*4882a593Smuzhiyun	select ACPI_CCA_REQUIRED if ACPI
5*4882a593Smuzhiyun	select ACPI_GENERIC_GSI if ACPI
6*4882a593Smuzhiyun	select ACPI_GTDT if ACPI
7*4882a593Smuzhiyun	select ACPI_IORT if ACPI
8*4882a593Smuzhiyun	select ACPI_REDUCED_HARDWARE_ONLY if ACPI
9*4882a593Smuzhiyun	select ACPI_MCFG if (ACPI && PCI)
10*4882a593Smuzhiyun	select ACPI_SPCR_TABLE if ACPI
11*4882a593Smuzhiyun	select ACPI_PPTT if ACPI
12*4882a593Smuzhiyun	select ARCH_HAS_DEBUG_WX
13*4882a593Smuzhiyun	select ARCH_BINFMT_ELF_STATE
14*4882a593Smuzhiyun	select ARCH_HAS_DEBUG_VIRTUAL
15*4882a593Smuzhiyun	select ARCH_HAS_DEBUG_VM_PGTABLE
16*4882a593Smuzhiyun	select ARCH_HAS_DEVMEM_IS_ALLOWED
17*4882a593Smuzhiyun	select ARCH_HAS_DMA_PREP_COHERENT
18*4882a593Smuzhiyun	select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
19*4882a593Smuzhiyun	select ARCH_HAS_FAST_MULTIPLIER
20*4882a593Smuzhiyun	select ARCH_HAS_FORTIFY_SOURCE
21*4882a593Smuzhiyun	select ARCH_HAS_GCOV_PROFILE_ALL
22*4882a593Smuzhiyun	select ARCH_HAS_GIGANTIC_PAGE
23*4882a593Smuzhiyun	select ARCH_HAS_KCOV
24*4882a593Smuzhiyun	select ARCH_HAS_KEEPINITRD
25*4882a593Smuzhiyun	select ARCH_HAS_MEMBARRIER_SYNC_CORE
26*4882a593Smuzhiyun	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
27*4882a593Smuzhiyun	select ARCH_HAS_PTE_DEVMAP
28*4882a593Smuzhiyun	select ARCH_HAS_PTE_SPECIAL
29*4882a593Smuzhiyun	select ARCH_HAS_SETUP_DMA_OPS
30*4882a593Smuzhiyun	select ARCH_HAS_SET_DIRECT_MAP
31*4882a593Smuzhiyun	select ARCH_HAS_SET_MEMORY
32*4882a593Smuzhiyun	select ARCH_STACKWALK
33*4882a593Smuzhiyun	select ARCH_HAS_STRICT_KERNEL_RWX
34*4882a593Smuzhiyun	select ARCH_HAS_STRICT_MODULE_RWX
35*4882a593Smuzhiyun	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
36*4882a593Smuzhiyun	select ARCH_HAS_SYNC_DMA_FOR_CPU
37*4882a593Smuzhiyun	select ARCH_HAS_SYSCALL_WRAPPER
38*4882a593Smuzhiyun	select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
39*4882a593Smuzhiyun	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
40*4882a593Smuzhiyun	select ARCH_HAVE_ELF_PROT
41*4882a593Smuzhiyun	select ARCH_HAVE_NMI_SAFE_CMPXCHG
42*4882a593Smuzhiyun	select ARCH_INLINE_READ_LOCK if !PREEMPTION
43*4882a593Smuzhiyun	select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
44*4882a593Smuzhiyun	select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
45*4882a593Smuzhiyun	select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
46*4882a593Smuzhiyun	select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
47*4882a593Smuzhiyun	select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
48*4882a593Smuzhiyun	select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
49*4882a593Smuzhiyun	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
50*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
51*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
52*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
53*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
54*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
55*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
56*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
57*4882a593Smuzhiyun	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
58*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
59*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
60*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
61*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
62*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
63*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
64*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
65*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
66*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
67*4882a593Smuzhiyun	select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
68*4882a593Smuzhiyun	select ARCH_KEEP_MEMBLOCK
69*4882a593Smuzhiyun	select ARCH_USE_CMPXCHG_LOCKREF
70*4882a593Smuzhiyun	select ARCH_USE_GNU_PROPERTY
71*4882a593Smuzhiyun	select ARCH_USE_QUEUED_RWLOCKS
72*4882a593Smuzhiyun	select ARCH_USE_QUEUED_SPINLOCKS
73*4882a593Smuzhiyun	select ARCH_USE_SYM_ANNOTATIONS
74*4882a593Smuzhiyun	select ARCH_SUPPORTS_MEMORY_FAILURE
75*4882a593Smuzhiyun	select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
76*4882a593Smuzhiyun	select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN
77*4882a593Smuzhiyun	select ARCH_SUPPORTS_LTO_CLANG_THIN
78*4882a593Smuzhiyun	select ARCH_SUPPORTS_ATOMIC_RMW
79*4882a593Smuzhiyun	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
80*4882a593Smuzhiyun	select ARCH_SUPPORTS_NUMA_BALANCING
81*4882a593Smuzhiyun	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
82*4882a593Smuzhiyun	select ARCH_WANT_DEFAULT_BPF_JIT
83*4882a593Smuzhiyun	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
84*4882a593Smuzhiyun	select ARCH_WANT_FRAME_POINTERS
85*4882a593Smuzhiyun	select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
86*4882a593Smuzhiyun	select ARCH_WANT_LD_ORPHAN_WARN
87*4882a593Smuzhiyun	select ARCH_HAS_UBSAN_SANITIZE_ALL
88*4882a593Smuzhiyun	select ARM_AMBA
89*4882a593Smuzhiyun	select ARM_ARCH_TIMER
90*4882a593Smuzhiyun	select ARM_GIC
91*4882a593Smuzhiyun	select AUDIT_ARCH_COMPAT_GENERIC
92*4882a593Smuzhiyun	select ARM_GIC_V2M if PCI
93*4882a593Smuzhiyun	select ARM_GIC_V3
94*4882a593Smuzhiyun	select ARM_GIC_V3_ITS if PCI
95*4882a593Smuzhiyun	select ARM_PSCI_FW
96*4882a593Smuzhiyun	select BUILDTIME_TABLE_SORT
97*4882a593Smuzhiyun	select CLONE_BACKWARDS
98*4882a593Smuzhiyun	select COMMON_CLK
99*4882a593Smuzhiyun	select CPU_PM if (SUSPEND || CPU_IDLE)
100*4882a593Smuzhiyun	select CRC32
101*4882a593Smuzhiyun	select DCACHE_WORD_ACCESS
102*4882a593Smuzhiyun	select DMA_DIRECT_REMAP
103*4882a593Smuzhiyun	select EDAC_SUPPORT
104*4882a593Smuzhiyun	select FRAME_POINTER
105*4882a593Smuzhiyun	select GENERIC_ALLOCATOR
106*4882a593Smuzhiyun	select GENERIC_ARCH_TOPOLOGY
107*4882a593Smuzhiyun	select GENERIC_CLOCKEVENTS
108*4882a593Smuzhiyun	select GENERIC_CLOCKEVENTS_BROADCAST
109*4882a593Smuzhiyun	select GENERIC_CPU_AUTOPROBE
110*4882a593Smuzhiyun	select GENERIC_CPU_VULNERABILITIES
111*4882a593Smuzhiyun	select GENERIC_EARLY_IOREMAP
112*4882a593Smuzhiyun	select GENERIC_IDLE_POLL_SETUP
113*4882a593Smuzhiyun	select GENERIC_IRQ_IPI
114*4882a593Smuzhiyun	select ARCH_WANTS_IRQ_RAW
115*4882a593Smuzhiyun	select GENERIC_IRQ_MULTI_HANDLER
116*4882a593Smuzhiyun	select GENERIC_IRQ_PROBE
117*4882a593Smuzhiyun	select GENERIC_IRQ_SHOW
118*4882a593Smuzhiyun	select GENERIC_IRQ_SHOW_LEVEL
119*4882a593Smuzhiyun	select GENERIC_PCI_IOMAP
120*4882a593Smuzhiyun	select GENERIC_PTDUMP
121*4882a593Smuzhiyun	select GENERIC_SCHED_CLOCK
122*4882a593Smuzhiyun	select GENERIC_SMP_IDLE_THREAD
123*4882a593Smuzhiyun	select GENERIC_STRNCPY_FROM_USER
124*4882a593Smuzhiyun	select GENERIC_STRNLEN_USER
125*4882a593Smuzhiyun	select GENERIC_TIME_VSYSCALL
126*4882a593Smuzhiyun	select GENERIC_GETTIMEOFDAY
127*4882a593Smuzhiyun	select GENERIC_VDSO_TIME_NS
128*4882a593Smuzhiyun	select HANDLE_DOMAIN_IRQ
129*4882a593Smuzhiyun	select HARDIRQS_SW_RESEND
130*4882a593Smuzhiyun	select HAVE_MOVE_PMD
131*4882a593Smuzhiyun	select HAVE_MOVE_PUD
132*4882a593Smuzhiyun	select HAVE_PCI
133*4882a593Smuzhiyun	select HAVE_ACPI_APEI if (ACPI && EFI)
134*4882a593Smuzhiyun	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
135*4882a593Smuzhiyun	select HAVE_ARCH_AUDITSYSCALL
136*4882a593Smuzhiyun	select HAVE_ARCH_BITREVERSE
137*4882a593Smuzhiyun	select HAVE_ARCH_COMPILER_H
138*4882a593Smuzhiyun	select HAVE_ARCH_HUGE_VMAP
139*4882a593Smuzhiyun	select HAVE_ARCH_JUMP_LABEL
140*4882a593Smuzhiyun	select HAVE_ARCH_JUMP_LABEL_RELATIVE
141*4882a593Smuzhiyun	select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
142*4882a593Smuzhiyun	select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
143*4882a593Smuzhiyun	select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
144*4882a593Smuzhiyun	select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
145*4882a593Smuzhiyun	select HAVE_ARCH_KFENCE
146*4882a593Smuzhiyun	select HAVE_ARCH_KGDB
147*4882a593Smuzhiyun	select HAVE_ARCH_MMAP_RND_BITS
148*4882a593Smuzhiyun	select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
149*4882a593Smuzhiyun	select HAVE_ARCH_PREL32_RELOCATIONS
150*4882a593Smuzhiyun	select HAVE_ARCH_SECCOMP_FILTER
151*4882a593Smuzhiyun	select HAVE_ARCH_STACKLEAK
152*4882a593Smuzhiyun	select HAVE_ARCH_THREAD_STRUCT_WHITELIST
153*4882a593Smuzhiyun	select HAVE_ARCH_TRACEHOOK
154*4882a593Smuzhiyun	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
155*4882a593Smuzhiyun	select HAVE_ARCH_VMAP_STACK
156*4882a593Smuzhiyun	select HAVE_ARM_SMCCC
157*4882a593Smuzhiyun	select HAVE_ASM_MODVERSIONS
158*4882a593Smuzhiyun	select HAVE_EBPF_JIT
159*4882a593Smuzhiyun	select HAVE_C_RECORDMCOUNT
160*4882a593Smuzhiyun	select HAVE_CMPXCHG_DOUBLE
161*4882a593Smuzhiyun	select HAVE_CMPXCHG_LOCAL
162*4882a593Smuzhiyun	select HAVE_CONTEXT_TRACKING
163*4882a593Smuzhiyun	select HAVE_DEBUG_BUGVERBOSE
164*4882a593Smuzhiyun	select HAVE_DEBUG_KMEMLEAK
165*4882a593Smuzhiyun	select HAVE_DMA_CONTIGUOUS
166*4882a593Smuzhiyun	select HAVE_DYNAMIC_FTRACE
167*4882a593Smuzhiyun	select HAVE_DYNAMIC_FTRACE_WITH_REGS \
168*4882a593Smuzhiyun		if $(cc-option,-fpatchable-function-entry=2)
169*4882a593Smuzhiyun	select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
170*4882a593Smuzhiyun		if DYNAMIC_FTRACE_WITH_REGS
171*4882a593Smuzhiyun	select HAVE_EFFICIENT_UNALIGNED_ACCESS
172*4882a593Smuzhiyun	select HAVE_FAST_GUP
173*4882a593Smuzhiyun	select HAVE_FTRACE_MCOUNT_RECORD
174*4882a593Smuzhiyun	select HAVE_FUNCTION_TRACER
175*4882a593Smuzhiyun	select HAVE_FUNCTION_ERROR_INJECTION
176*4882a593Smuzhiyun	select HAVE_FUNCTION_GRAPH_TRACER
177*4882a593Smuzhiyun	select HAVE_GCC_PLUGINS
178*4882a593Smuzhiyun	select HAVE_HW_BREAKPOINT if PERF_EVENTS
179*4882a593Smuzhiyun	select HAVE_IRQ_TIME_ACCOUNTING
180*4882a593Smuzhiyun	select HAVE_NMI
181*4882a593Smuzhiyun	select HAVE_PATA_PLATFORM
182*4882a593Smuzhiyun	select HAVE_PERF_EVENTS
183*4882a593Smuzhiyun	select HAVE_PERF_REGS
184*4882a593Smuzhiyun	select HAVE_PERF_USER_STACK_DUMP
185*4882a593Smuzhiyun	select HAVE_REGS_AND_STACK_ACCESS_API
186*4882a593Smuzhiyun	select HAVE_FUNCTION_ARG_ACCESS_API
187*4882a593Smuzhiyun	select HAVE_FUTEX_CMPXCHG if FUTEX
188*4882a593Smuzhiyun	select MMU_GATHER_RCU_TABLE_FREE
189*4882a593Smuzhiyun	select HAVE_RSEQ
190*4882a593Smuzhiyun	select HAVE_STACKPROTECTOR
191*4882a593Smuzhiyun	select HAVE_SYSCALL_TRACEPOINTS
192*4882a593Smuzhiyun	select HAVE_KPROBES
193*4882a593Smuzhiyun	select HAVE_KRETPROBES
194*4882a593Smuzhiyun	select HAVE_GENERIC_VDSO
195*4882a593Smuzhiyun	select IOMMU_DMA if IOMMU_SUPPORT
196*4882a593Smuzhiyun	select IRQ_DOMAIN
197*4882a593Smuzhiyun	select IRQ_FORCED_THREADING
198*4882a593Smuzhiyun	select KASAN_VMALLOC if KASAN_GENERIC
199*4882a593Smuzhiyun	select MODULES_USE_ELF_RELA
200*4882a593Smuzhiyun	select NEED_DMA_MAP_STATE
201*4882a593Smuzhiyun	select NEED_SG_DMA_LENGTH
202*4882a593Smuzhiyun	select OF
203*4882a593Smuzhiyun	select OF_EARLY_FLATTREE
204*4882a593Smuzhiyun	select PCI_DOMAINS_GENERIC if PCI
205*4882a593Smuzhiyun	select PCI_ECAM if (ACPI && PCI)
206*4882a593Smuzhiyun	select PCI_SYSCALL if PCI
207*4882a593Smuzhiyun	select POWER_RESET
208*4882a593Smuzhiyun	select POWER_SUPPLY
209*4882a593Smuzhiyun	select SET_FS
210*4882a593Smuzhiyun	select SPARSE_IRQ
211*4882a593Smuzhiyun	select SWIOTLB
212*4882a593Smuzhiyun	select SYSCTL_EXCEPTION_TRACE
213*4882a593Smuzhiyun	select THREAD_INFO_IN_TASK
214*4882a593Smuzhiyun	select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
215*4882a593Smuzhiyun	select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
216*4882a593Smuzhiyun	help
217*4882a593Smuzhiyun	  ARM 64-bit (AArch64) Linux support.
218*4882a593Smuzhiyun
219*4882a593Smuzhiyunconfig 64BIT
220*4882a593Smuzhiyun	def_bool y
221*4882a593Smuzhiyun
222*4882a593Smuzhiyunconfig MMU
223*4882a593Smuzhiyun	def_bool y
224*4882a593Smuzhiyun
225*4882a593Smuzhiyunconfig ARM64_PAGE_SHIFT
226*4882a593Smuzhiyun	int
227*4882a593Smuzhiyun	default 16 if ARM64_64K_PAGES
228*4882a593Smuzhiyun	default 14 if ARM64_16K_PAGES
229*4882a593Smuzhiyun	default 12
230*4882a593Smuzhiyun
231*4882a593Smuzhiyunconfig ARM64_CONT_PTE_SHIFT
232*4882a593Smuzhiyun	int
233*4882a593Smuzhiyun	default 5 if ARM64_64K_PAGES
234*4882a593Smuzhiyun	default 7 if ARM64_16K_PAGES
235*4882a593Smuzhiyun	default 4
236*4882a593Smuzhiyun
237*4882a593Smuzhiyunconfig ARM64_CONT_PMD_SHIFT
238*4882a593Smuzhiyun	int
239*4882a593Smuzhiyun	default 5 if ARM64_64K_PAGES
240*4882a593Smuzhiyun	default 5 if ARM64_16K_PAGES
241*4882a593Smuzhiyun	default 4
242*4882a593Smuzhiyun
243*4882a593Smuzhiyunconfig ARCH_MMAP_RND_BITS_MIN
244*4882a593Smuzhiyun       default 14 if ARM64_64K_PAGES
245*4882a593Smuzhiyun       default 16 if ARM64_16K_PAGES
246*4882a593Smuzhiyun       default 18
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun# max bits determined by the following formula:
249*4882a593Smuzhiyun#  VA_BITS - PAGE_SHIFT - 3
250*4882a593Smuzhiyunconfig ARCH_MMAP_RND_BITS_MAX
251*4882a593Smuzhiyun       default 19 if ARM64_VA_BITS=36
252*4882a593Smuzhiyun       default 24 if ARM64_VA_BITS=39
253*4882a593Smuzhiyun       default 27 if ARM64_VA_BITS=42
254*4882a593Smuzhiyun       default 30 if ARM64_VA_BITS=47
255*4882a593Smuzhiyun       default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
256*4882a593Smuzhiyun       default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
257*4882a593Smuzhiyun       default 33 if ARM64_VA_BITS=48
258*4882a593Smuzhiyun       default 14 if ARM64_64K_PAGES
259*4882a593Smuzhiyun       default 16 if ARM64_16K_PAGES
260*4882a593Smuzhiyun       default 18
261*4882a593Smuzhiyun
262*4882a593Smuzhiyunconfig ARCH_MMAP_RND_COMPAT_BITS_MIN
263*4882a593Smuzhiyun       default 7 if ARM64_64K_PAGES
264*4882a593Smuzhiyun       default 9 if ARM64_16K_PAGES
265*4882a593Smuzhiyun       default 11
266*4882a593Smuzhiyun
267*4882a593Smuzhiyunconfig ARCH_MMAP_RND_COMPAT_BITS_MAX
268*4882a593Smuzhiyun       default 16
269*4882a593Smuzhiyun
270*4882a593Smuzhiyunconfig NO_IOPORT_MAP
271*4882a593Smuzhiyun	def_bool y if !PCI
272*4882a593Smuzhiyun
273*4882a593Smuzhiyunconfig STACKTRACE_SUPPORT
274*4882a593Smuzhiyun	def_bool y
275*4882a593Smuzhiyun
276*4882a593Smuzhiyunconfig ILLEGAL_POINTER_VALUE
277*4882a593Smuzhiyun	hex
278*4882a593Smuzhiyun	default 0xdead000000000000
279*4882a593Smuzhiyun
280*4882a593Smuzhiyunconfig LOCKDEP_SUPPORT
281*4882a593Smuzhiyun	def_bool y
282*4882a593Smuzhiyun
283*4882a593Smuzhiyunconfig TRACE_IRQFLAGS_SUPPORT
284*4882a593Smuzhiyun	def_bool y
285*4882a593Smuzhiyun
286*4882a593Smuzhiyunconfig GENERIC_BUG
287*4882a593Smuzhiyun	def_bool y
288*4882a593Smuzhiyun	depends on BUG
289*4882a593Smuzhiyun
290*4882a593Smuzhiyunconfig GENERIC_BUG_RELATIVE_POINTERS
291*4882a593Smuzhiyun	def_bool y
292*4882a593Smuzhiyun	depends on GENERIC_BUG
293*4882a593Smuzhiyun
294*4882a593Smuzhiyunconfig GENERIC_HWEIGHT
295*4882a593Smuzhiyun	def_bool y
296*4882a593Smuzhiyun
297*4882a593Smuzhiyunconfig GENERIC_CSUM
298*4882a593Smuzhiyun        def_bool y
299*4882a593Smuzhiyun
300*4882a593Smuzhiyunconfig GENERIC_CALIBRATE_DELAY
301*4882a593Smuzhiyun	def_bool y
302*4882a593Smuzhiyun
303*4882a593Smuzhiyunconfig ZONE_DMA
304*4882a593Smuzhiyun	bool "Support DMA zone" if EXPERT
305*4882a593Smuzhiyun	default y
306*4882a593Smuzhiyun
307*4882a593Smuzhiyunconfig ZONE_DMA32
308*4882a593Smuzhiyun	bool "Support DMA32 zone" if EXPERT
309*4882a593Smuzhiyun	default y
310*4882a593Smuzhiyun
311*4882a593Smuzhiyunconfig ARCH_ENABLE_MEMORY_HOTPLUG
312*4882a593Smuzhiyun	def_bool y
313*4882a593Smuzhiyun
314*4882a593Smuzhiyunconfig ARCH_ENABLE_MEMORY_HOTREMOVE
315*4882a593Smuzhiyun	def_bool y
316*4882a593Smuzhiyun
317*4882a593Smuzhiyunconfig SMP
318*4882a593Smuzhiyun	def_bool y
319*4882a593Smuzhiyun
320*4882a593Smuzhiyunconfig KERNEL_MODE_NEON
321*4882a593Smuzhiyun	def_bool y
322*4882a593Smuzhiyun
323*4882a593Smuzhiyunconfig FIX_EARLYCON_MEM
324*4882a593Smuzhiyun	def_bool y
325*4882a593Smuzhiyun
326*4882a593Smuzhiyunconfig PGTABLE_LEVELS
327*4882a593Smuzhiyun	int
328*4882a593Smuzhiyun	default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
329*4882a593Smuzhiyun	default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
330*4882a593Smuzhiyun	default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
331*4882a593Smuzhiyun	default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
332*4882a593Smuzhiyun	default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
333*4882a593Smuzhiyun	default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
334*4882a593Smuzhiyun
335*4882a593Smuzhiyunconfig ARCH_SUPPORTS_UPROBES
336*4882a593Smuzhiyun	def_bool y
337*4882a593Smuzhiyun
338*4882a593Smuzhiyunconfig ARCH_PROC_KCORE_TEXT
339*4882a593Smuzhiyun	def_bool y
340*4882a593Smuzhiyun
341*4882a593Smuzhiyunconfig BROKEN_GAS_INST
342*4882a593Smuzhiyun	def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
343*4882a593Smuzhiyun
344*4882a593Smuzhiyunconfig KASAN_SHADOW_OFFSET
345*4882a593Smuzhiyun	hex
346*4882a593Smuzhiyun	depends on KASAN_GENERIC || KASAN_SW_TAGS
347*4882a593Smuzhiyun	default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
348*4882a593Smuzhiyun	default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
349*4882a593Smuzhiyun	default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
350*4882a593Smuzhiyun	default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
351*4882a593Smuzhiyun	default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
352*4882a593Smuzhiyun	default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
353*4882a593Smuzhiyun	default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
354*4882a593Smuzhiyun	default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
355*4882a593Smuzhiyun	default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
356*4882a593Smuzhiyun	default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
357*4882a593Smuzhiyun	default 0xffffffffffffffff
358*4882a593Smuzhiyun
359*4882a593Smuzhiyunsource "arch/arm64/Kconfig.platforms"
360*4882a593Smuzhiyun
361*4882a593Smuzhiyunmenu "Kernel Features"
362*4882a593Smuzhiyun
363*4882a593Smuzhiyunmenu "ARM errata workarounds via the alternatives framework"
364*4882a593Smuzhiyun
365*4882a593Smuzhiyunconfig ARM64_WORKAROUND_CLEAN_CACHE
366*4882a593Smuzhiyun	bool
367*4882a593Smuzhiyun
368*4882a593Smuzhiyunconfig ARM64_ERRATUM_826319
369*4882a593Smuzhiyun	bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
370*4882a593Smuzhiyun	default y
371*4882a593Smuzhiyun	select ARM64_WORKAROUND_CLEAN_CACHE
372*4882a593Smuzhiyun	help
373*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
374*4882a593Smuzhiyun	  erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or
375*4882a593Smuzhiyun	  AXI master interface and an L2 cache.
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun	  If a Cortex-A53 uses an AMBA AXI4 ACE interface to other processors
378*4882a593Smuzhiyun	  and is unable to accept a certain write via this interface, it will
379*4882a593Smuzhiyun	  not progress on read data presented on the read data channel and the
380*4882a593Smuzhiyun	  system can deadlock.
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun	  The workaround promotes data cache clean instructions to
383*4882a593Smuzhiyun	  data cache clean-and-invalidate.
384*4882a593Smuzhiyun	  Please note that this does not necessarily enable the workaround,
385*4882a593Smuzhiyun	  as it depends on the alternative framework, which will only patch
386*4882a593Smuzhiyun	  the kernel if an affected CPU is detected.
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun	  If unsure, say Y.
389*4882a593Smuzhiyun
390*4882a593Smuzhiyunconfig ARM64_ERRATUM_827319
391*4882a593Smuzhiyun	bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect"
392*4882a593Smuzhiyun	default y
393*4882a593Smuzhiyun	select ARM64_WORKAROUND_CLEAN_CACHE
394*4882a593Smuzhiyun	help
395*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
396*4882a593Smuzhiyun	  erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI
397*4882a593Smuzhiyun	  master interface and an L2 cache.
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun	  Under certain conditions this erratum can cause a clean line eviction
400*4882a593Smuzhiyun	  to occur at the same time as another transaction to the same address
401*4882a593Smuzhiyun	  on the AMBA 5 CHI interface, which can cause data corruption if the
402*4882a593Smuzhiyun	  interconnect reorders the two transactions.
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun	  The workaround promotes data cache clean instructions to
405*4882a593Smuzhiyun	  data cache clean-and-invalidate.
406*4882a593Smuzhiyun	  Please note that this does not necessarily enable the workaround,
407*4882a593Smuzhiyun	  as it depends on the alternative framework, which will only patch
408*4882a593Smuzhiyun	  the kernel if an affected CPU is detected.
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun	  If unsure, say Y.
411*4882a593Smuzhiyun
412*4882a593Smuzhiyunconfig ARM64_ERRATUM_824069
413*4882a593Smuzhiyun	bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop"
414*4882a593Smuzhiyun	default y
415*4882a593Smuzhiyun	select ARM64_WORKAROUND_CLEAN_CACHE
416*4882a593Smuzhiyun	help
417*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
418*4882a593Smuzhiyun	  erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected
419*4882a593Smuzhiyun	  to a coherent interconnect.
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun	  If a Cortex-A53 processor is executing a store or prefetch for
422*4882a593Smuzhiyun	  write instruction at the same time as a processor in another
423*4882a593Smuzhiyun	  cluster is executing a cache maintenance operation to the same
424*4882a593Smuzhiyun	  address, then this erratum might cause a clean cache line to be
425*4882a593Smuzhiyun	  incorrectly marked as dirty.
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun	  The workaround promotes data cache clean instructions to
428*4882a593Smuzhiyun	  data cache clean-and-invalidate.
429*4882a593Smuzhiyun	  Please note that this option does not necessarily enable the
430*4882a593Smuzhiyun	  workaround, as it depends on the alternative framework, which will
431*4882a593Smuzhiyun	  only patch the kernel if an affected CPU is detected.
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun	  If unsure, say Y.
434*4882a593Smuzhiyun
435*4882a593Smuzhiyunconfig ARM64_ERRATUM_819472
436*4882a593Smuzhiyun	bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption"
437*4882a593Smuzhiyun	default y
438*4882a593Smuzhiyun	select ARM64_WORKAROUND_CLEAN_CACHE
439*4882a593Smuzhiyun	help
440*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
441*4882a593Smuzhiyun	  erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache
442*4882a593Smuzhiyun	  present when it is connected to a coherent interconnect.
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun	  If the processor is executing a load and store exclusive sequence at
445*4882a593Smuzhiyun	  the same time as a processor in another cluster is executing a cache
446*4882a593Smuzhiyun	  maintenance operation to the same address, then this erratum might
447*4882a593Smuzhiyun	  cause data corruption.
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun	  The workaround promotes data cache clean instructions to
450*4882a593Smuzhiyun	  data cache clean-and-invalidate.
451*4882a593Smuzhiyun	  Please note that this does not necessarily enable the workaround,
452*4882a593Smuzhiyun	  as it depends on the alternative framework, which will only patch
453*4882a593Smuzhiyun	  the kernel if an affected CPU is detected.
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun	  If unsure, say Y.
456*4882a593Smuzhiyun
457*4882a593Smuzhiyunconfig ARM64_ERRATUM_832075
458*4882a593Smuzhiyun	bool "Cortex-A57: 832075: possible deadlock on mixing exclusive memory accesses with device loads"
459*4882a593Smuzhiyun	default y
460*4882a593Smuzhiyun	help
461*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
462*4882a593Smuzhiyun	  erratum 832075 on Cortex-A57 parts up to r1p2.
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun	  Affected Cortex-A57 parts might deadlock when exclusive load/store
465*4882a593Smuzhiyun	  instructions to Write-Back memory are mixed with Device loads.
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun	  The workaround is to promote device loads to use Load-Acquire
468*4882a593Smuzhiyun	  semantics.
469*4882a593Smuzhiyun	  Please note that this does not necessarily enable the workaround,
470*4882a593Smuzhiyun	  as it depends on the alternative framework, which will only patch
471*4882a593Smuzhiyun	  the kernel if an affected CPU is detected.
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun	  If unsure, say Y.
474*4882a593Smuzhiyun
475*4882a593Smuzhiyunconfig ARM64_ERRATUM_834220
476*4882a593Smuzhiyun	bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
477*4882a593Smuzhiyun	depends on KVM
478*4882a593Smuzhiyun	default y
479*4882a593Smuzhiyun	help
480*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
481*4882a593Smuzhiyun	  erratum 834220 on Cortex-A57 parts up to r1p2.
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun	  Affected Cortex-A57 parts might report a Stage 2 translation
484*4882a593Smuzhiyun	  fault as the result of a Stage 1 fault for load crossing a
485*4882a593Smuzhiyun	  page boundary when there is a permission or device memory
486*4882a593Smuzhiyun	  alignment fault at Stage 1 and a translation fault at Stage 2.
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun	  The workaround is to verify that the Stage 1 translation
489*4882a593Smuzhiyun	  doesn't generate a fault before handling the Stage 2 fault.
490*4882a593Smuzhiyun	  Please note that this does not necessarily enable the workaround,
491*4882a593Smuzhiyun	  as it depends on the alternative framework, which will only patch
492*4882a593Smuzhiyun	  the kernel if an affected CPU is detected.
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun	  If unsure, say Y.
495*4882a593Smuzhiyun
496*4882a593Smuzhiyunconfig ARM64_ERRATUM_1742098
497*4882a593Smuzhiyun	bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
498*4882a593Smuzhiyun	depends on COMPAT
499*4882a593Smuzhiyun	default y
500*4882a593Smuzhiyun	help
501*4882a593Smuzhiyun	  This option removes the AES hwcap for aarch32 user-space to
502*4882a593Smuzhiyun	  workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun	  Affected parts may corrupt the AES state if an interrupt is
505*4882a593Smuzhiyun	  taken between a pair of AES instructions. These instructions
506*4882a593Smuzhiyun	  are only present if the cryptography extensions are present.
507*4882a593Smuzhiyun	  All software should have a fallback implementation for CPUs
508*4882a593Smuzhiyun	  that don't implement the cryptography extensions.
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun	  If unsure, say Y.
511*4882a593Smuzhiyun
512*4882a593Smuzhiyunconfig ARM64_ERRATUM_845719
513*4882a593Smuzhiyun	bool "Cortex-A53: 845719: a load might read incorrect data"
514*4882a593Smuzhiyun	depends on COMPAT
515*4882a593Smuzhiyun	default y
516*4882a593Smuzhiyun	help
517*4882a593Smuzhiyun	  This option adds an alternative code sequence to work around ARM
518*4882a593Smuzhiyun	  erratum 845719 on Cortex-A53 parts up to r0p4.
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun	  When running a compat (AArch32) userspace on an affected Cortex-A53
521*4882a593Smuzhiyun	  part, a load at EL0 from a virtual address that matches the bottom 32
522*4882a593Smuzhiyun	  bits of the virtual address used by a recent load at (AArch64) EL1
523*4882a593Smuzhiyun	  might return incorrect data.
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun	  The workaround is to write the contextidr_el1 register on exception
526*4882a593Smuzhiyun	  return to a 32-bit task.
527*4882a593Smuzhiyun	  Please note that this does not necessarily enable the workaround,
528*4882a593Smuzhiyun	  as it depends on the alternative framework, which will only patch
529*4882a593Smuzhiyun	  the kernel if an affected CPU is detected.
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun	  If unsure, say Y.
532*4882a593Smuzhiyun
533*4882a593Smuzhiyunconfig ARM64_ERRATUM_843419
534*4882a593Smuzhiyun	bool "Cortex-A53: 843419: A load or store might access an incorrect address"
535*4882a593Smuzhiyun	default y
536*4882a593Smuzhiyun	select ARM64_MODULE_PLTS if MODULES
537*4882a593Smuzhiyun	help
538*4882a593Smuzhiyun	  This option links the kernel with '--fix-cortex-a53-843419' and
539*4882a593Smuzhiyun	  enables PLT support to replace certain ADRP instructions, which can
540*4882a593Smuzhiyun	  cause subsequent memory accesses to use an incorrect address on
541*4882a593Smuzhiyun	  Cortex-A53 parts up to r0p4.
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun	  If unsure, say Y.
544*4882a593Smuzhiyun
545*4882a593Smuzhiyunconfig ARM64_ERRATUM_1024718
546*4882a593Smuzhiyun	bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
547*4882a593Smuzhiyun	default y
548*4882a593Smuzhiyun	help
549*4882a593Smuzhiyun	  This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun	  Affected Cortex-A55 cores (all revisions) could cause incorrect
552*4882a593Smuzhiyun	  update of the hardware dirty bit when the DBM/AP bits are updated
553*4882a593Smuzhiyun	  without a break-before-make. The workaround is to disable the usage
554*4882a593Smuzhiyun	  of hardware DBM locally on the affected cores. CPUs not affected by
555*4882a593Smuzhiyun	  this erratum will continue to use the feature.
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun	  If unsure, say Y.
558*4882a593Smuzhiyun
559*4882a593Smuzhiyunconfig ARM64_ERRATUM_1418040
560*4882a593Smuzhiyun	bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
561*4882a593Smuzhiyun	default y
562*4882a593Smuzhiyun	depends on COMPAT
563*4882a593Smuzhiyun	help
564*4882a593Smuzhiyun	  This option adds a workaround for ARM Cortex-A76/Neoverse-N1
565*4882a593Smuzhiyun	  errata 1188873 and 1418040.
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun	  Affected Cortex-A76/Neoverse-N1 cores (r0p0 to r3p1) could
568*4882a593Smuzhiyun	  cause register corruption when accessing the timer registers
569*4882a593Smuzhiyun	  from AArch32 userspace.
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun	  If unsure, say Y.
572*4882a593Smuzhiyun
573*4882a593Smuzhiyunconfig ARM64_WORKAROUND_SPECULATIVE_AT
574*4882a593Smuzhiyun	bool
575*4882a593Smuzhiyun
576*4882a593Smuzhiyunconfig ARM64_ERRATUM_1165522
577*4882a593Smuzhiyun	bool "Cortex-A76: 1165522: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
578*4882a593Smuzhiyun	default y
579*4882a593Smuzhiyun	select ARM64_WORKAROUND_SPECULATIVE_AT
580*4882a593Smuzhiyun	help
581*4882a593Smuzhiyun	  This option adds a workaround for ARM Cortex-A76 erratum 1165522.
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun	  Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with
584*4882a593Smuzhiyun	  corrupted TLBs by speculating an AT instruction during a guest
585*4882a593Smuzhiyun	  context switch.
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun	  If unsure, say Y.
588*4882a593Smuzhiyun
589*4882a593Smuzhiyunconfig ARM64_ERRATUM_1319367
590*4882a593Smuzhiyun	bool "Cortex-A57/A72: 1319537: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
591*4882a593Smuzhiyun	default y
592*4882a593Smuzhiyun	select ARM64_WORKAROUND_SPECULATIVE_AT
593*4882a593Smuzhiyun	help
594*4882a593Smuzhiyun	  This option adds work arounds for ARM Cortex-A57 erratum 1319537
595*4882a593Smuzhiyun	  and A72 erratum 1319367
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun	  Cortex-A57 and A72 cores could end-up with corrupted TLBs by
598*4882a593Smuzhiyun	  speculating an AT instruction during a guest context switch.
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun	  If unsure, say Y.
601*4882a593Smuzhiyun
602*4882a593Smuzhiyunconfig ARM64_ERRATUM_1530923
603*4882a593Smuzhiyun	bool "Cortex-A55: 1530923: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
604*4882a593Smuzhiyun	default y
605*4882a593Smuzhiyun	select ARM64_WORKAROUND_SPECULATIVE_AT
606*4882a593Smuzhiyun	help
607*4882a593Smuzhiyun	  This option adds a workaround for ARM Cortex-A55 erratum 1530923.
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun	  Affected Cortex-A55 cores (r0p0, r0p1, r1p0, r2p0) could end-up with
610*4882a593Smuzhiyun	  corrupted TLBs by speculating an AT instruction during a guest
611*4882a593Smuzhiyun	  context switch.
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun	  If unsure, say Y.
614*4882a593Smuzhiyun
615*4882a593Smuzhiyunconfig ARM64_WORKAROUND_REPEAT_TLBI
616*4882a593Smuzhiyun	bool
617*4882a593Smuzhiyun
618*4882a593Smuzhiyunconfig ARM64_ERRATUM_1286807
619*4882a593Smuzhiyun	bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
620*4882a593Smuzhiyun	default y
621*4882a593Smuzhiyun	select ARM64_WORKAROUND_REPEAT_TLBI
622*4882a593Smuzhiyun	help
623*4882a593Smuzhiyun	  This option adds a workaround for ARM Cortex-A76 erratum 1286807.
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun	  On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
626*4882a593Smuzhiyun	  address for a cacheable mapping of a location is being
627*4882a593Smuzhiyun	  accessed by a core while another core is remapping the virtual
628*4882a593Smuzhiyun	  address to a new physical page using the recommended
629*4882a593Smuzhiyun	  break-before-make sequence, then under very rare circumstances
630*4882a593Smuzhiyun	  TLBI+DSB completes before a read using the translation being
631*4882a593Smuzhiyun	  invalidated has been observed by other observers. The
632*4882a593Smuzhiyun	  workaround repeats the TLBI+DSB operation.
633*4882a593Smuzhiyun
634*4882a593Smuzhiyunconfig ARM64_ERRATUM_1463225
635*4882a593Smuzhiyun	bool "Cortex-A76: Software Step might prevent interrupt recognition"
636*4882a593Smuzhiyun	default y
637*4882a593Smuzhiyun	help
638*4882a593Smuzhiyun	  This option adds a workaround for Arm Cortex-A76 erratum 1463225.
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun	  On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
641*4882a593Smuzhiyun	  of a system call instruction (SVC) can prevent recognition of
642*4882a593Smuzhiyun	  subsequent interrupts when software stepping is disabled in the
643*4882a593Smuzhiyun	  exception handler of the system call and either kernel debugging
644*4882a593Smuzhiyun	  is enabled or VHE is in use.
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun	  Work around the erratum by triggering a dummy step exception
647*4882a593Smuzhiyun	  when handling a system call from a task that is being stepped
648*4882a593Smuzhiyun	  in a VHE configuration of the kernel.
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun	  If unsure, say Y.
651*4882a593Smuzhiyun
652*4882a593Smuzhiyunconfig ARM64_ERRATUM_1542419
653*4882a593Smuzhiyun	bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
654*4882a593Smuzhiyun	default y
655*4882a593Smuzhiyun	help
656*4882a593Smuzhiyun	  This option adds a workaround for ARM Neoverse-N1 erratum
657*4882a593Smuzhiyun	  1542419.
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun	  Affected Neoverse-N1 cores could execute a stale instruction when
660*4882a593Smuzhiyun	  modified by another CPU. The workaround depends on a firmware
661*4882a593Smuzhiyun	  counterpart.
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun	  Workaround the issue by hiding the DIC feature from EL0. This
664*4882a593Smuzhiyun	  forces user-space to perform cache maintenance.
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun	  If unsure, say Y.
667*4882a593Smuzhiyun
668*4882a593Smuzhiyunconfig ARM64_ERRATUM_1508412
669*4882a593Smuzhiyun	bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
670*4882a593Smuzhiyun	default y
671*4882a593Smuzhiyun	help
672*4882a593Smuzhiyun	  This option adds a workaround for Arm Cortex-A77 erratum 1508412.
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun	  Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence
675*4882a593Smuzhiyun	  of a store-exclusive or read of PAR_EL1 and a load with device or
676*4882a593Smuzhiyun	  non-cacheable memory attributes. The workaround depends on a firmware
677*4882a593Smuzhiyun	  counterpart.
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun	  KVM guests must also have the workaround implemented or they can
680*4882a593Smuzhiyun	  deadlock the system.
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun	  Work around the issue by inserting DMB SY barriers around PAR_EL1
683*4882a593Smuzhiyun	  register reads and warning KVM users. The DMB barrier is sufficient
684*4882a593Smuzhiyun	  to prevent a speculative PAR_EL1 read.
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun	  If unsure, say Y.
687*4882a593Smuzhiyun
688*4882a593Smuzhiyunconfig ARM64_ERRATUM_2051678
689*4882a593Smuzhiyun	bool "Cortex-A510: 2051678: disable Hardware Update of the page table's dirty bit"
690*4882a593Smuzhiyun	default y
691*4882a593Smuzhiyun	help
692*4882a593Smuzhiyun	  This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
693*4882a593Smuzhiyun	  Affected Coretex-A510 might not respect the ordering rules for
694*4882a593Smuzhiyun	  hardware update of the page table's dirty bit. The workaround
695*4882a593Smuzhiyun	  is to not enable the feature on affected CPUs.
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun	  If unsure, say Y.
698*4882a593Smuzhiyun
699*4882a593Smuzhiyunconfig ARM64_WORKAROUND_TSB_FLUSH_FAILURE
700*4882a593Smuzhiyun	bool
701*4882a593Smuzhiyun
702*4882a593Smuzhiyunconfig ARM64_ERRATUM_2054223
703*4882a593Smuzhiyun	bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace"
704*4882a593Smuzhiyun	default y
705*4882a593Smuzhiyun	select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
706*4882a593Smuzhiyun	help
707*4882a593Smuzhiyun	  Enable workaround for ARM Cortex-A710 erratum 2054223
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun	  Affected cores may fail to flush the trace data on a TSB instruction, when
710*4882a593Smuzhiyun	  the PE is in trace prohibited state. This will cause losing a few bytes
711*4882a593Smuzhiyun	  of the trace cached.
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun	  Workaround is to issue two TSB consecutively on affected cores.
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun	  If unsure, say Y.
716*4882a593Smuzhiyun
717*4882a593Smuzhiyunconfig ARM64_ERRATUM_2067961
718*4882a593Smuzhiyun	bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace"
719*4882a593Smuzhiyun	default y
720*4882a593Smuzhiyun	select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
721*4882a593Smuzhiyun	help
722*4882a593Smuzhiyun	  Enable workaround for ARM Neoverse-N2 erratum 2067961
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun	  Affected cores may fail to flush the trace data on a TSB instruction, when
725*4882a593Smuzhiyun	  the PE is in trace prohibited state. This will cause losing a few bytes
726*4882a593Smuzhiyun	  of the trace cached.
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun	  Workaround is to issue two TSB consecutively on affected cores.
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun	  If unsure, say Y.
731*4882a593Smuzhiyun
732*4882a593Smuzhiyunconfig ARM64_ERRATUM_2454944
733*4882a593Smuzhiyun	bool "Cortex-A510: 2454944: Unmodified cache line might be written back to memory"
734*4882a593Smuzhiyun	select ARCH_HAS_TEARDOWN_DMA_OPS
735*4882a593Smuzhiyun	select RODATA_FULL_DEFAULT_ENABLED
736*4882a593Smuzhiyun	help
737*4882a593Smuzhiyun	  This option adds the workaround for ARM Cortex-A510 erratum 2454944.
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun	  Affected Cortex-A510 core might write unmodified cache lines back to
740*4882a593Smuzhiyun	  memory, which breaks the assumptions upon which software coherency
741*4882a593Smuzhiyun	  management for non-coherent DMA relies. If a cache line is
742*4882a593Smuzhiyun	  speculatively fetched while a non-coherent device is writing directly
743*4882a593Smuzhiyun	  to DRAM, and subsequently written back by natural eviction, data
744*4882a593Smuzhiyun	  written by the device in the intervening period can be lost.
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun	  The workaround is to enforce as far as reasonably possible that all
747*4882a593Smuzhiyun	  non-coherent DMA transfers are bounced and/or remapped to minimise
748*4882a593Smuzhiyun	  the chance that any Cacheable alias exists through which speculative
749*4882a593Smuzhiyun	  cache fills could occur. To further improve effectiveness of
750*4882a593Smuzhiyun	  the workaround, lazy TLB flushing should be disabled.
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun	  This is quite involved and has unavoidable performance impact on
753*4882a593Smuzhiyun	  affected systems.
754*4882a593Smuzhiyun
755*4882a593Smuzhiyunconfig ARM64_ERRATUM_2457168
756*4882a593Smuzhiyun	bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
757*4882a593Smuzhiyun	depends on ARM64_AMU_EXTN
758*4882a593Smuzhiyun	default y
759*4882a593Smuzhiyun	help
760*4882a593Smuzhiyun	  This option adds the workaround for ARM Cortex-A510 erratum 2457168.
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun	  The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
763*4882a593Smuzhiyun	  as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
764*4882a593Smuzhiyun	  incorrectly giving a significantly higher output value.
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun	  Work around this problem by keeping the reference values of affected counters
767*4882a593Smuzhiyun	  to 0 thus signaling an error case. This effect is the same to firmware disabling
768*4882a593Smuzhiyun	  affected counters, in which case 0 will be returned when reading the disabled
769*4882a593Smuzhiyun	  counters.
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun	  If unsure, say Y.
772*4882a593Smuzhiyun
773*4882a593Smuzhiyunconfig CAVIUM_ERRATUM_22375
774*4882a593Smuzhiyun	bool "Cavium erratum 22375, 24313"
775*4882a593Smuzhiyun	default y
776*4882a593Smuzhiyun	help
777*4882a593Smuzhiyun	  Enable workaround for errata 22375 and 24313.
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun	  This implements two gicv3-its errata workarounds for ThunderX. Both
780*4882a593Smuzhiyun	  with a small impact affecting only ITS table allocation.
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun	    erratum 22375: only alloc 8MB table size
783*4882a593Smuzhiyun	    erratum 24313: ignore memory access type
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun	  The fixes are in ITS initialization and basically ignore memory access
786*4882a593Smuzhiyun	  type and table size provided by the TYPER and BASER registers.
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun	  If unsure, say Y.
789*4882a593Smuzhiyun
790*4882a593Smuzhiyunconfig CAVIUM_ERRATUM_23144
791*4882a593Smuzhiyun	bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
792*4882a593Smuzhiyun	depends on NUMA
793*4882a593Smuzhiyun	default y
794*4882a593Smuzhiyun	help
795*4882a593Smuzhiyun	  ITS SYNC command hang for cross node io and collections/cpu mapping.
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun	  If unsure, say Y.
798*4882a593Smuzhiyun
799*4882a593Smuzhiyunconfig CAVIUM_ERRATUM_23154
800*4882a593Smuzhiyun	bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
801*4882a593Smuzhiyun	default y
802*4882a593Smuzhiyun	help
803*4882a593Smuzhiyun	  The gicv3 of ThunderX requires a modified version for
804*4882a593Smuzhiyun	  reading the IAR status to ensure data synchronization
805*4882a593Smuzhiyun	  (access to icc_iar1_el1 is not sync'ed before and after).
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun	  If unsure, say Y.
808*4882a593Smuzhiyun
809*4882a593Smuzhiyunconfig CAVIUM_ERRATUM_27456
810*4882a593Smuzhiyun	bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
811*4882a593Smuzhiyun	default y
812*4882a593Smuzhiyun	help
813*4882a593Smuzhiyun	  On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
814*4882a593Smuzhiyun	  instructions may cause the icache to become corrupted if it
815*4882a593Smuzhiyun	  contains data for a non-current ASID.  The fix is to
816*4882a593Smuzhiyun	  invalidate the icache when changing the mm context.
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun	  If unsure, say Y.
819*4882a593Smuzhiyun
820*4882a593Smuzhiyunconfig CAVIUM_ERRATUM_30115
821*4882a593Smuzhiyun	bool "Cavium erratum 30115: Guest may disable interrupts in host"
822*4882a593Smuzhiyun	default y
823*4882a593Smuzhiyun	help
824*4882a593Smuzhiyun	  On ThunderX T88 pass 1.x through 2.2, T81 pass 1.0 through
825*4882a593Smuzhiyun	  1.2, and T83 Pass 1.0, KVM guest execution may disable
826*4882a593Smuzhiyun	  interrupts in host. Trapping both GICv3 group-0 and group-1
827*4882a593Smuzhiyun	  accesses sidesteps the issue.
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun	  If unsure, say Y.
830*4882a593Smuzhiyun
831*4882a593Smuzhiyunconfig CAVIUM_TX2_ERRATUM_219
832*4882a593Smuzhiyun	bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
833*4882a593Smuzhiyun	default y
834*4882a593Smuzhiyun	help
835*4882a593Smuzhiyun	  On Cavium ThunderX2, a load, store or prefetch instruction between a
836*4882a593Smuzhiyun	  TTBR update and the corresponding context synchronizing operation can
837*4882a593Smuzhiyun	  cause a spurious Data Abort to be delivered to any hardware thread in
838*4882a593Smuzhiyun	  the CPU core.
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun	  Work around the issue by avoiding the problematic code sequence and
841*4882a593Smuzhiyun	  trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
842*4882a593Smuzhiyun	  trap handler performs the corresponding register access, skips the
843*4882a593Smuzhiyun	  instruction and ensures context synchronization by virtue of the
844*4882a593Smuzhiyun	  exception return.
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun	  If unsure, say Y.
847*4882a593Smuzhiyun
848*4882a593Smuzhiyunconfig FUJITSU_ERRATUM_010001
849*4882a593Smuzhiyun	bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly"
850*4882a593Smuzhiyun	default y
851*4882a593Smuzhiyun	help
852*4882a593Smuzhiyun	  This option adds a workaround for Fujitsu-A64FX erratum E#010001.
853*4882a593Smuzhiyun	  On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory
854*4882a593Smuzhiyun	  accesses may cause undefined fault (Data abort, DFSC=0b111111).
855*4882a593Smuzhiyun	  This fault occurs under a specific hardware condition when a
856*4882a593Smuzhiyun	  load/store instruction performs an address translation using:
857*4882a593Smuzhiyun	  case-1  TTBR0_EL1 with TCR_EL1.NFD0 == 1.
858*4882a593Smuzhiyun	  case-2  TTBR0_EL2 with TCR_EL2.NFD0 == 1.
859*4882a593Smuzhiyun	  case-3  TTBR1_EL1 with TCR_EL1.NFD1 == 1.
860*4882a593Smuzhiyun	  case-4  TTBR1_EL2 with TCR_EL2.NFD1 == 1.
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun	  The workaround is to ensure these bits are clear in TCR_ELx.
863*4882a593Smuzhiyun	  The workaround only affects the Fujitsu-A64FX.
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun	  If unsure, say Y.
866*4882a593Smuzhiyun
867*4882a593Smuzhiyunconfig HISILICON_ERRATUM_161600802
868*4882a593Smuzhiyun	bool "Hip07 161600802: Erroneous redistributor VLPI base"
869*4882a593Smuzhiyun	default y
870*4882a593Smuzhiyun	help
871*4882a593Smuzhiyun	  The HiSilicon Hip07 SoC uses the wrong redistributor base
872*4882a593Smuzhiyun	  when issued ITS commands such as VMOVP and VMAPP, and requires
873*4882a593Smuzhiyun	  a 128kB offset to be applied to the target address in this commands.
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun	  If unsure, say Y.
876*4882a593Smuzhiyun
877*4882a593Smuzhiyunconfig QCOM_FALKOR_ERRATUM_1003
878*4882a593Smuzhiyun	bool "Falkor E1003: Incorrect translation due to ASID change"
879*4882a593Smuzhiyun	default y
880*4882a593Smuzhiyun	help
881*4882a593Smuzhiyun	  On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
882*4882a593Smuzhiyun	  and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
883*4882a593Smuzhiyun	  in TTBR1_EL1, this situation only occurs in the entry trampoline and
884*4882a593Smuzhiyun	  then only for entries in the walk cache, since the leaf translation
885*4882a593Smuzhiyun	  is unchanged. Work around the erratum by invalidating the walk cache
886*4882a593Smuzhiyun	  entries for the trampoline before entering the kernel proper.
887*4882a593Smuzhiyun
888*4882a593Smuzhiyunconfig QCOM_FALKOR_ERRATUM_1009
889*4882a593Smuzhiyun	bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
890*4882a593Smuzhiyun	default y
891*4882a593Smuzhiyun	select ARM64_WORKAROUND_REPEAT_TLBI
892*4882a593Smuzhiyun	help
893*4882a593Smuzhiyun	  On Falkor v1, the CPU may prematurely complete a DSB following a
894*4882a593Smuzhiyun	  TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation
895*4882a593Smuzhiyun	  one more time to fix the issue.
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun	  If unsure, say Y.
898*4882a593Smuzhiyun
899*4882a593Smuzhiyunconfig QCOM_QDF2400_ERRATUM_0065
900*4882a593Smuzhiyun	bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
901*4882a593Smuzhiyun	default y
902*4882a593Smuzhiyun	help
903*4882a593Smuzhiyun	  On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
904*4882a593Smuzhiyun	  ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
905*4882a593Smuzhiyun	  been indicated as 16Bytes (0xf), not 8Bytes (0x7).
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun	  If unsure, say Y.
908*4882a593Smuzhiyun
909*4882a593Smuzhiyunconfig QCOM_FALKOR_ERRATUM_E1041
910*4882a593Smuzhiyun	bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
911*4882a593Smuzhiyun	default y
912*4882a593Smuzhiyun	help
913*4882a593Smuzhiyun	  Falkor CPU may speculatively fetch instructions from an improper
914*4882a593Smuzhiyun	  memory location when MMU translation is changed from SCTLR_ELn[M]=1
915*4882a593Smuzhiyun	  to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun	  If unsure, say Y.
918*4882a593Smuzhiyun
919*4882a593Smuzhiyunconfig SOCIONEXT_SYNQUACER_PREITS
920*4882a593Smuzhiyun	bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
921*4882a593Smuzhiyun	default y
922*4882a593Smuzhiyun	help
923*4882a593Smuzhiyun	  Socionext Synquacer SoCs implement a separate h/w block to generate
924*4882a593Smuzhiyun	  MSI doorbell writes with non-zero values for the device ID.
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun	  If unsure, say Y.
927*4882a593Smuzhiyun
928*4882a593Smuzhiyunendmenu
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun
931*4882a593Smuzhiyunchoice
932*4882a593Smuzhiyun	prompt "Page size"
933*4882a593Smuzhiyun	default ARM64_4K_PAGES
934*4882a593Smuzhiyun	help
935*4882a593Smuzhiyun	  Page size (translation granule) configuration.
936*4882a593Smuzhiyun
937*4882a593Smuzhiyunconfig ARM64_4K_PAGES
938*4882a593Smuzhiyun	bool "4KB"
939*4882a593Smuzhiyun	help
940*4882a593Smuzhiyun	  This feature enables 4KB pages support.
941*4882a593Smuzhiyun
942*4882a593Smuzhiyunconfig ARM64_16K_PAGES
943*4882a593Smuzhiyun	bool "16KB"
944*4882a593Smuzhiyun	help
945*4882a593Smuzhiyun	  The system will use 16KB pages support. AArch32 emulation
946*4882a593Smuzhiyun	  requires applications compiled with 16K (or a multiple of 16K)
947*4882a593Smuzhiyun	  aligned segments.
948*4882a593Smuzhiyun
949*4882a593Smuzhiyunconfig ARM64_64K_PAGES
950*4882a593Smuzhiyun	bool "64KB"
951*4882a593Smuzhiyun	help
952*4882a593Smuzhiyun	  This feature enables 64KB pages support (4KB by default)
953*4882a593Smuzhiyun	  allowing only two levels of page tables and faster TLB
954*4882a593Smuzhiyun	  look-up. AArch32 emulation requires applications compiled
955*4882a593Smuzhiyun	  with 64K aligned segments.
956*4882a593Smuzhiyun
957*4882a593Smuzhiyunendchoice
958*4882a593Smuzhiyun
959*4882a593Smuzhiyunchoice
960*4882a593Smuzhiyun	prompt "Virtual address space size"
961*4882a593Smuzhiyun	default ARM64_VA_BITS_39 if ARM64_4K_PAGES
962*4882a593Smuzhiyun	default ARM64_VA_BITS_47 if ARM64_16K_PAGES
963*4882a593Smuzhiyun	default ARM64_VA_BITS_42 if ARM64_64K_PAGES
964*4882a593Smuzhiyun	help
965*4882a593Smuzhiyun	  Allows choosing one of multiple possible virtual address
966*4882a593Smuzhiyun	  space sizes. The level of translation table is determined by
967*4882a593Smuzhiyun	  a combination of page size and virtual address space size.
968*4882a593Smuzhiyun
969*4882a593Smuzhiyunconfig ARM64_VA_BITS_36
970*4882a593Smuzhiyun	bool "36-bit" if EXPERT
971*4882a593Smuzhiyun	depends on ARM64_16K_PAGES
972*4882a593Smuzhiyun
973*4882a593Smuzhiyunconfig ARM64_VA_BITS_39
974*4882a593Smuzhiyun	bool "39-bit"
975*4882a593Smuzhiyun	depends on ARM64_4K_PAGES
976*4882a593Smuzhiyun
977*4882a593Smuzhiyunconfig ARM64_VA_BITS_42
978*4882a593Smuzhiyun	bool "42-bit"
979*4882a593Smuzhiyun	depends on ARM64_64K_PAGES
980*4882a593Smuzhiyun
981*4882a593Smuzhiyunconfig ARM64_VA_BITS_47
982*4882a593Smuzhiyun	bool "47-bit"
983*4882a593Smuzhiyun	depends on ARM64_16K_PAGES
984*4882a593Smuzhiyun
985*4882a593Smuzhiyunconfig ARM64_VA_BITS_48
986*4882a593Smuzhiyun	bool "48-bit"
987*4882a593Smuzhiyun
988*4882a593Smuzhiyunconfig ARM64_VA_BITS_52
989*4882a593Smuzhiyun	bool "52-bit"
990*4882a593Smuzhiyun	depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
991*4882a593Smuzhiyun	help
992*4882a593Smuzhiyun	  Enable 52-bit virtual addressing for userspace when explicitly
993*4882a593Smuzhiyun	  requested via a hint to mmap(). The kernel will also use 52-bit
994*4882a593Smuzhiyun	  virtual addresses for its own mappings (provided HW support for
995*4882a593Smuzhiyun	  this feature is available, otherwise it reverts to 48-bit).
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun	  NOTE: Enabling 52-bit virtual addressing in conjunction with
998*4882a593Smuzhiyun	  ARMv8.3 Pointer Authentication will result in the PAC being
999*4882a593Smuzhiyun	  reduced from 7 bits to 3 bits, which may have a significant
1000*4882a593Smuzhiyun	  impact on its susceptibility to brute-force attacks.
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun	  If unsure, select 48-bit virtual addressing instead.
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyunendchoice
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyunconfig ARM64_FORCE_52BIT
1007*4882a593Smuzhiyun	bool "Force 52-bit virtual addresses for userspace"
1008*4882a593Smuzhiyun	depends on ARM64_VA_BITS_52 && EXPERT
1009*4882a593Smuzhiyun	help
1010*4882a593Smuzhiyun	  For systems with 52-bit userspace VAs enabled, the kernel will attempt
1011*4882a593Smuzhiyun	  to maintain compatibility with older software by providing 48-bit VAs
1012*4882a593Smuzhiyun	  unless a hint is supplied to mmap.
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun	  This configuration option disables the 48-bit compatibility logic, and
1015*4882a593Smuzhiyun	  forces all userspace addresses to be 52-bit on HW that supports it. One
1016*4882a593Smuzhiyun	  should only enable this configuration option for stress testing userspace
1017*4882a593Smuzhiyun	  memory management code. If unsure say N here.
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyunconfig ARM64_VA_BITS
1020*4882a593Smuzhiyun	int
1021*4882a593Smuzhiyun	default 36 if ARM64_VA_BITS_36
1022*4882a593Smuzhiyun	default 39 if ARM64_VA_BITS_39
1023*4882a593Smuzhiyun	default 42 if ARM64_VA_BITS_42
1024*4882a593Smuzhiyun	default 47 if ARM64_VA_BITS_47
1025*4882a593Smuzhiyun	default 48 if ARM64_VA_BITS_48
1026*4882a593Smuzhiyun	default 52 if ARM64_VA_BITS_52
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyunchoice
1029*4882a593Smuzhiyun	prompt "Physical address space size"
1030*4882a593Smuzhiyun	default ARM64_PA_BITS_48
1031*4882a593Smuzhiyun	help
1032*4882a593Smuzhiyun	  Choose the maximum physical address range that the kernel will
1033*4882a593Smuzhiyun	  support.
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyunconfig ARM64_PA_BITS_48
1036*4882a593Smuzhiyun	bool "48-bit"
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyunconfig ARM64_PA_BITS_52
1039*4882a593Smuzhiyun	bool "52-bit (ARMv8.2)"
1040*4882a593Smuzhiyun	depends on ARM64_64K_PAGES
1041*4882a593Smuzhiyun	depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
1042*4882a593Smuzhiyun	help
1043*4882a593Smuzhiyun	  Enable support for a 52-bit physical address space, introduced as
1044*4882a593Smuzhiyun	  part of the ARMv8.2-LPA extension.
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun	  With this enabled, the kernel will also continue to work on CPUs that
1047*4882a593Smuzhiyun	  do not support ARMv8.2-LPA, but with some added memory overhead (and
1048*4882a593Smuzhiyun	  minor performance overhead).
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyunendchoice
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyunconfig ARM64_PA_BITS
1053*4882a593Smuzhiyun	int
1054*4882a593Smuzhiyun	default 48 if ARM64_PA_BITS_48
1055*4882a593Smuzhiyun	default 52 if ARM64_PA_BITS_52
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyunchoice
1058*4882a593Smuzhiyun	prompt "Endianness"
1059*4882a593Smuzhiyun	default CPU_LITTLE_ENDIAN
1060*4882a593Smuzhiyun	help
1061*4882a593Smuzhiyun	  Select the endianness of data accesses performed by the CPU. Userspace
1062*4882a593Smuzhiyun	  applications will need to be compiled and linked for the endianness
1063*4882a593Smuzhiyun	  that is selected here.
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyunconfig CPU_BIG_ENDIAN
1066*4882a593Smuzhiyun	bool "Build big-endian kernel"
1067*4882a593Smuzhiyun	depends on !LD_IS_LLD || LLD_VERSION >= 130000
1068*4882a593Smuzhiyun	help
1069*4882a593Smuzhiyun	  Say Y if you plan on running a kernel with a big-endian userspace.
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyunconfig CPU_LITTLE_ENDIAN
1072*4882a593Smuzhiyun	bool "Build little-endian kernel"
1073*4882a593Smuzhiyun	help
1074*4882a593Smuzhiyun	  Say Y if you plan on running a kernel with a little-endian userspace.
1075*4882a593Smuzhiyun	  This is usually the case for distributions targeting arm64.
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyunendchoice
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyunconfig SCHED_MC
1080*4882a593Smuzhiyun	bool "Multi-core scheduler support"
1081*4882a593Smuzhiyun	help
1082*4882a593Smuzhiyun	  Multi-core scheduler support improves the CPU scheduler's decision
1083*4882a593Smuzhiyun	  making when dealing with multi-core CPU chips at a cost of slightly
1084*4882a593Smuzhiyun	  increased overhead in some places. If unsure say N here.
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyunconfig SCHED_SMT
1087*4882a593Smuzhiyun	bool "SMT scheduler support"
1088*4882a593Smuzhiyun	help
1089*4882a593Smuzhiyun	  Improves the CPU scheduler's decision making when dealing with
1090*4882a593Smuzhiyun	  MultiThreading at a cost of slightly increased overhead in some
1091*4882a593Smuzhiyun	  places. If unsure say N here.
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyunconfig NR_CPUS
1094*4882a593Smuzhiyun	int "Maximum number of CPUs (2-4096)"
1095*4882a593Smuzhiyun	range 2 4096
1096*4882a593Smuzhiyun	default "256"
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyunconfig HOTPLUG_CPU
1099*4882a593Smuzhiyun	bool "Support for hot-pluggable CPUs"
1100*4882a593Smuzhiyun	select GENERIC_IRQ_MIGRATION
1101*4882a593Smuzhiyun	help
1102*4882a593Smuzhiyun	  Say Y here to experiment with turning CPUs off and on.  CPUs
1103*4882a593Smuzhiyun	  can be controlled through /sys/devices/system/cpu.
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun# Common NUMA Features
1106*4882a593Smuzhiyunconfig NUMA
1107*4882a593Smuzhiyun	bool "NUMA Memory Allocation and Scheduler Support"
1108*4882a593Smuzhiyun	select ACPI_NUMA if ACPI
1109*4882a593Smuzhiyun	select OF_NUMA
1110*4882a593Smuzhiyun	help
1111*4882a593Smuzhiyun	  Enable NUMA (Non-Uniform Memory Access) support.
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun	  The kernel will try to allocate memory used by a CPU on the
1114*4882a593Smuzhiyun	  local memory of the CPU and add some more
1115*4882a593Smuzhiyun	  NUMA awareness to the kernel.
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyunconfig NODES_SHIFT
1118*4882a593Smuzhiyun	int "Maximum NUMA Nodes (as a power of 2)"
1119*4882a593Smuzhiyun	range 1 10
1120*4882a593Smuzhiyun	default "4"
1121*4882a593Smuzhiyun	depends on NEED_MULTIPLE_NODES
1122*4882a593Smuzhiyun	help
1123*4882a593Smuzhiyun	  Specify the maximum number of NUMA Nodes available on the target
1124*4882a593Smuzhiyun	  system.  Increases memory reserved to accommodate various tables.
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyunconfig USE_PERCPU_NUMA_NODE_ID
1127*4882a593Smuzhiyun	def_bool y
1128*4882a593Smuzhiyun	depends on NUMA
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyunconfig HAVE_SETUP_PER_CPU_AREA
1131*4882a593Smuzhiyun	def_bool y
1132*4882a593Smuzhiyun	depends on NUMA
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyunconfig NEED_PER_CPU_EMBED_FIRST_CHUNK
1135*4882a593Smuzhiyun	def_bool y
1136*4882a593Smuzhiyun	depends on NUMA
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyunconfig HOLES_IN_ZONE
1139*4882a593Smuzhiyun	def_bool y
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyunsource "kernel/Kconfig.hz"
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyunconfig ARCH_SUPPORTS_DEBUG_PAGEALLOC
1144*4882a593Smuzhiyun	def_bool y
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyunconfig ARCH_SPARSEMEM_ENABLE
1147*4882a593Smuzhiyun	def_bool y
1148*4882a593Smuzhiyun	select SPARSEMEM_VMEMMAP_ENABLE
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyunconfig ARCH_SPARSEMEM_DEFAULT
1151*4882a593Smuzhiyun	def_bool ARCH_SPARSEMEM_ENABLE
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyunconfig ARCH_SELECT_MEMORY_MODEL
1154*4882a593Smuzhiyun	def_bool ARCH_SPARSEMEM_ENABLE
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyunconfig ARCH_FLATMEM_ENABLE
1157*4882a593Smuzhiyun	def_bool !NUMA
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyunconfig HAVE_ARCH_PFN_VALID
1160*4882a593Smuzhiyun	def_bool y
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyunconfig HW_PERF_EVENTS
1163*4882a593Smuzhiyun	def_bool y
1164*4882a593Smuzhiyun	depends on ARM_PMU
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyunconfig SYS_SUPPORTS_HUGETLBFS
1167*4882a593Smuzhiyun	def_bool y
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyunconfig ARCH_WANT_HUGE_PMD_SHARE
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyunconfig ARCH_HAS_CACHE_LINE_SIZE
1172*4882a593Smuzhiyun	def_bool y
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyunconfig ARCH_ENABLE_SPLIT_PMD_PTLOCK
1175*4882a593Smuzhiyun	def_bool y if PGTABLE_LEVELS > 2
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun# Supported by clang >= 7.0
1178*4882a593Smuzhiyunconfig CC_HAVE_SHADOW_CALL_STACK
1179*4882a593Smuzhiyun	def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyunconfig PARAVIRT
1182*4882a593Smuzhiyun	bool "Enable paravirtualization code"
1183*4882a593Smuzhiyun	help
1184*4882a593Smuzhiyun	  This changes the kernel so it can modify itself when it is run
1185*4882a593Smuzhiyun	  under a hypervisor, potentially improving performance significantly
1186*4882a593Smuzhiyun	  over full virtualization.
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyunconfig PARAVIRT_TIME_ACCOUNTING
1189*4882a593Smuzhiyun	bool "Paravirtual steal time accounting"
1190*4882a593Smuzhiyun	select PARAVIRT
1191*4882a593Smuzhiyun	help
1192*4882a593Smuzhiyun	  Select this option to enable fine granularity task steal time
1193*4882a593Smuzhiyun	  accounting. Time spent executing other tasks in parallel with
1194*4882a593Smuzhiyun	  the current vCPU is discounted from the vCPU power. To account for
1195*4882a593Smuzhiyun	  that, there can be a small performance impact.
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun	  If in doubt, say N here.
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyunconfig KEXEC
1200*4882a593Smuzhiyun	depends on PM_SLEEP_SMP
1201*4882a593Smuzhiyun	select KEXEC_CORE
1202*4882a593Smuzhiyun	bool "kexec system call"
1203*4882a593Smuzhiyun	help
1204*4882a593Smuzhiyun	  kexec is a system call that implements the ability to shutdown your
1205*4882a593Smuzhiyun	  current kernel, and to start another kernel.  It is like a reboot
1206*4882a593Smuzhiyun	  but it is independent of the system firmware.   And like a reboot
1207*4882a593Smuzhiyun	  you can start any kernel with it, not just Linux.
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyunconfig KEXEC_FILE
1210*4882a593Smuzhiyun	bool "kexec file based system call"
1211*4882a593Smuzhiyun	select KEXEC_CORE
1212*4882a593Smuzhiyun	help
1213*4882a593Smuzhiyun	  This is new version of kexec system call. This system call is
1214*4882a593Smuzhiyun	  file based and takes file descriptors as system call argument
1215*4882a593Smuzhiyun	  for kernel and initramfs as opposed to list of segments as
1216*4882a593Smuzhiyun	  accepted by previous system call.
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyunconfig KEXEC_SIG
1219*4882a593Smuzhiyun	bool "Verify kernel signature during kexec_file_load() syscall"
1220*4882a593Smuzhiyun	depends on KEXEC_FILE
1221*4882a593Smuzhiyun	help
1222*4882a593Smuzhiyun	  Select this option to verify a signature with loaded kernel
1223*4882a593Smuzhiyun	  image. If configured, any attempt of loading a image without
1224*4882a593Smuzhiyun	  valid signature will fail.
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun	  In addition to that option, you need to enable signature
1227*4882a593Smuzhiyun	  verification for the corresponding kernel image type being
1228*4882a593Smuzhiyun	  loaded in order for this to work.
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyunconfig KEXEC_IMAGE_VERIFY_SIG
1231*4882a593Smuzhiyun	bool "Enable Image signature verification support"
1232*4882a593Smuzhiyun	default y
1233*4882a593Smuzhiyun	depends on KEXEC_SIG
1234*4882a593Smuzhiyun	depends on EFI && SIGNED_PE_FILE_VERIFICATION
1235*4882a593Smuzhiyun	help
1236*4882a593Smuzhiyun	  Enable Image signature verification support.
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyuncomment "Support for PE file signature verification disabled"
1239*4882a593Smuzhiyun	depends on KEXEC_SIG
1240*4882a593Smuzhiyun	depends on !EFI || !SIGNED_PE_FILE_VERIFICATION
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyunconfig CRASH_DUMP
1243*4882a593Smuzhiyun	bool "Build kdump crash kernel"
1244*4882a593Smuzhiyun	help
1245*4882a593Smuzhiyun	  Generate crash dump after being started by kexec. This should
1246*4882a593Smuzhiyun	  be normally only set in special crash dump kernels which are
1247*4882a593Smuzhiyun	  loaded in the main kernel with kexec-tools into a specially
1248*4882a593Smuzhiyun	  reserved region and then later executed after a crash by
1249*4882a593Smuzhiyun	  kdump/kexec.
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun	  For more details see Documentation/admin-guide/kdump/kdump.rst
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyunconfig XEN_DOM0
1254*4882a593Smuzhiyun	def_bool y
1255*4882a593Smuzhiyun	depends on XEN
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyunconfig XEN
1258*4882a593Smuzhiyun	bool "Xen guest support on ARM64"
1259*4882a593Smuzhiyun	depends on ARM64 && OF
1260*4882a593Smuzhiyun	select SWIOTLB_XEN
1261*4882a593Smuzhiyun	select PARAVIRT
1262*4882a593Smuzhiyun	help
1263*4882a593Smuzhiyun	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyunconfig FORCE_MAX_ZONEORDER
1266*4882a593Smuzhiyun	int
1267*4882a593Smuzhiyun	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
1268*4882a593Smuzhiyun	default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
1269*4882a593Smuzhiyun	default "11"
1270*4882a593Smuzhiyun	help
1271*4882a593Smuzhiyun	  The kernel memory allocator divides physically contiguous memory
1272*4882a593Smuzhiyun	  blocks into "zones", where each zone is a power of two number of
1273*4882a593Smuzhiyun	  pages.  This option selects the largest power of two that the kernel
1274*4882a593Smuzhiyun	  keeps in the memory allocator.  If you need to allocate very large
1275*4882a593Smuzhiyun	  blocks of physically contiguous memory, then you may need to
1276*4882a593Smuzhiyun	  increase this value.
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun	  This config option is actually maximum order plus one. For example,
1279*4882a593Smuzhiyun	  a value of 11 means that the largest free memory block is 2^10 pages.
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun	  We make sure that we can allocate upto a HugePage size for each configuration.
1282*4882a593Smuzhiyun	  Hence we have :
1283*4882a593Smuzhiyun		MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun	  However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
1286*4882a593Smuzhiyun	  4M allocations matching the default size used by generic code.
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyunconfig UNMAP_KERNEL_AT_EL0
1289*4882a593Smuzhiyun	bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
1290*4882a593Smuzhiyun	default y
1291*4882a593Smuzhiyun	help
1292*4882a593Smuzhiyun	  Speculation attacks against some high-performance processors can
1293*4882a593Smuzhiyun	  be used to bypass MMU permission checks and leak kernel data to
1294*4882a593Smuzhiyun	  userspace. This can be defended against by unmapping the kernel
1295*4882a593Smuzhiyun	  when running in userspace, mapping it back in on exception entry
1296*4882a593Smuzhiyun	  via a trampoline page in the vector table.
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun	  If unsure, say Y.
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyunconfig MITIGATE_SPECTRE_BRANCH_HISTORY
1301*4882a593Smuzhiyun	bool "Mitigate Spectre style attacks against branch history" if EXPERT
1302*4882a593Smuzhiyun	default y
1303*4882a593Smuzhiyun	help
1304*4882a593Smuzhiyun	  Speculation attacks against some high-performance processors can
1305*4882a593Smuzhiyun	  make use of branch history to influence future speculation.
1306*4882a593Smuzhiyun	  When taking an exception from user-space, a sequence of branches
1307*4882a593Smuzhiyun	  or a firmware call overwrites the branch history.
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyunconfig RODATA_FULL_DEFAULT_ENABLED
1310*4882a593Smuzhiyun	bool "Apply r/o permissions of VM areas also to their linear aliases"
1311*4882a593Smuzhiyun	default y
1312*4882a593Smuzhiyun	help
1313*4882a593Smuzhiyun	  Apply read-only attributes of VM areas to the linear alias of
1314*4882a593Smuzhiyun	  the backing pages as well. This prevents code or read-only data
1315*4882a593Smuzhiyun	  from being modified (inadvertently or intentionally) via another
1316*4882a593Smuzhiyun	  mapping of the same memory page. This additional enhancement can
1317*4882a593Smuzhiyun	  be turned off at runtime by passing rodata=[off|on] (and turned on
1318*4882a593Smuzhiyun	  with rodata=full if this option is set to 'n')
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun	  This requires the linear region to be mapped down to pages,
1321*4882a593Smuzhiyun	  which may adversely affect performance in some cases.
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyunconfig ARM64_SW_TTBR0_PAN
1324*4882a593Smuzhiyun	bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
1325*4882a593Smuzhiyun	help
1326*4882a593Smuzhiyun	  Enabling this option prevents the kernel from accessing
1327*4882a593Smuzhiyun	  user-space memory directly by pointing TTBR0_EL1 to a reserved
1328*4882a593Smuzhiyun	  zeroed area and reserved ASID. The user access routines
1329*4882a593Smuzhiyun	  restore the valid TTBR0_EL1 temporarily.
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyunconfig ARM64_TAGGED_ADDR_ABI
1332*4882a593Smuzhiyun	bool "Enable the tagged user addresses syscall ABI"
1333*4882a593Smuzhiyun	default y
1334*4882a593Smuzhiyun	help
1335*4882a593Smuzhiyun	  When this option is enabled, user applications can opt in to a
1336*4882a593Smuzhiyun	  relaxed ABI via prctl() allowing tagged addresses to be passed
1337*4882a593Smuzhiyun	  to system calls as pointer arguments. For details, see
1338*4882a593Smuzhiyun	  Documentation/arm64/tagged-address-abi.rst.
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyunmenuconfig COMPAT
1341*4882a593Smuzhiyun	bool "Kernel support for 32-bit EL0"
1342*4882a593Smuzhiyun	depends on ARM64_4K_PAGES || EXPERT
1343*4882a593Smuzhiyun	select COMPAT_BINFMT_ELF if BINFMT_ELF
1344*4882a593Smuzhiyun	select HAVE_UID16
1345*4882a593Smuzhiyun	select OLD_SIGSUSPEND3
1346*4882a593Smuzhiyun	select COMPAT_OLD_SIGACTION
1347*4882a593Smuzhiyun	help
1348*4882a593Smuzhiyun	  This option enables support for a 32-bit EL0 running under a 64-bit
1349*4882a593Smuzhiyun	  kernel at EL1. AArch32-specific components such as system calls,
1350*4882a593Smuzhiyun	  the user helper functions, VFP support and the ptrace interface are
1351*4882a593Smuzhiyun	  handled appropriately by the kernel.
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun	  If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
1354*4882a593Smuzhiyun	  that you will only be able to execute AArch32 binaries that were compiled
1355*4882a593Smuzhiyun	  with page size aligned segments.
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun	  If you want to execute 32-bit userspace applications, say Y.
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyunif COMPAT
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyunconfig KUSER_HELPERS
1362*4882a593Smuzhiyun	bool "Enable kuser helpers page for 32-bit applications"
1363*4882a593Smuzhiyun	default y
1364*4882a593Smuzhiyun	help
1365*4882a593Smuzhiyun	  Warning: disabling this option may break 32-bit user programs.
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun	  Provide kuser helpers to compat tasks. The kernel provides
1368*4882a593Smuzhiyun	  helper code to userspace in read only form at a fixed location
1369*4882a593Smuzhiyun	  to allow userspace to be independent of the CPU type fitted to
1370*4882a593Smuzhiyun	  the system. This permits binaries to be run on ARMv4 through
1371*4882a593Smuzhiyun	  to ARMv8 without modification.
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun	  See Documentation/arm/kernel_user_helpers.rst for details.
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun	  However, the fixed address nature of these helpers can be used
1376*4882a593Smuzhiyun	  by ROP (return orientated programming) authors when creating
1377*4882a593Smuzhiyun	  exploits.
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun	  If all of the binaries and libraries which run on your platform
1380*4882a593Smuzhiyun	  are built specifically for your platform, and make no use of
1381*4882a593Smuzhiyun	  these helpers, then you can turn this option off to hinder
1382*4882a593Smuzhiyun	  such exploits. However, in that case, if a binary or library
1383*4882a593Smuzhiyun	  relying on those helpers is run, it will not function correctly.
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun	  Say N here only if you are absolutely certain that you do not
1386*4882a593Smuzhiyun	  need these helpers; otherwise, the safe option is to say Y.
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyunconfig COMPAT_VDSO
1389*4882a593Smuzhiyun	bool "Enable vDSO for 32-bit applications"
1390*4882a593Smuzhiyun	depends on !CPU_BIG_ENDIAN
1391*4882a593Smuzhiyun	depends on (CC_IS_CLANG && LD_IS_LLD) || "$(CROSS_COMPILE_COMPAT)" != ""
1392*4882a593Smuzhiyun	select GENERIC_COMPAT_VDSO
1393*4882a593Smuzhiyun	default y
1394*4882a593Smuzhiyun	help
1395*4882a593Smuzhiyun	  Place in the process address space of 32-bit applications an
1396*4882a593Smuzhiyun	  ELF shared object providing fast implementations of gettimeofday
1397*4882a593Smuzhiyun	  and clock_gettime.
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun	  You must have a 32-bit build of glibc 2.22 or later for programs
1400*4882a593Smuzhiyun	  to seamlessly take advantage of this.
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyunconfig THUMB2_COMPAT_VDSO
1403*4882a593Smuzhiyun	bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT
1404*4882a593Smuzhiyun	depends on COMPAT_VDSO
1405*4882a593Smuzhiyun	default y
1406*4882a593Smuzhiyun	help
1407*4882a593Smuzhiyun	  Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
1408*4882a593Smuzhiyun	  otherwise with '-marm'.
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyunmenuconfig ARMV8_DEPRECATED
1411*4882a593Smuzhiyun	bool "Emulate deprecated/obsolete ARMv8 instructions"
1412*4882a593Smuzhiyun	depends on SYSCTL
1413*4882a593Smuzhiyun	help
1414*4882a593Smuzhiyun	  Legacy software support may require certain instructions
1415*4882a593Smuzhiyun	  that have been deprecated or obsoleted in the architecture.
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun	  Enable this config to enable selective emulation of these
1418*4882a593Smuzhiyun	  features.
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun	  If unsure, say Y
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyunif ARMV8_DEPRECATED
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyunconfig SWP_EMULATION
1425*4882a593Smuzhiyun	bool "Emulate SWP/SWPB instructions"
1426*4882a593Smuzhiyun	help
1427*4882a593Smuzhiyun	  ARMv8 obsoletes the use of A32 SWP/SWPB instructions such that
1428*4882a593Smuzhiyun	  they are always undefined. Say Y here to enable software
1429*4882a593Smuzhiyun	  emulation of these instructions for userspace using LDXR/STXR.
1430*4882a593Smuzhiyun	  This feature can be controlled at runtime with the abi.swp
1431*4882a593Smuzhiyun	  sysctl which is disabled by default.
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun	  In some older versions of glibc [<=2.8] SWP is used during futex
1434*4882a593Smuzhiyun	  trylock() operations with the assumption that the code will not
1435*4882a593Smuzhiyun	  be preempted. This invalid assumption may be more likely to fail
1436*4882a593Smuzhiyun	  with SWP emulation enabled, leading to deadlock of the user
1437*4882a593Smuzhiyun	  application.
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun	  NOTE: when accessing uncached shared regions, LDXR/STXR rely
1440*4882a593Smuzhiyun	  on an external transaction monitoring block called a global
1441*4882a593Smuzhiyun	  monitor to maintain update atomicity. If your system does not
1442*4882a593Smuzhiyun	  implement a global monitor, this option can cause programs that
1443*4882a593Smuzhiyun	  perform SWP operations to uncached memory to deadlock.
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun	  If unsure, say Y
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyunconfig CP15_BARRIER_EMULATION
1448*4882a593Smuzhiyun	bool "Emulate CP15 Barrier instructions"
1449*4882a593Smuzhiyun	help
1450*4882a593Smuzhiyun	  The CP15 barrier instructions - CP15ISB, CP15DSB, and
1451*4882a593Smuzhiyun	  CP15DMB - are deprecated in ARMv8 (and ARMv7). It is
1452*4882a593Smuzhiyun	  strongly recommended to use the ISB, DSB, and DMB
1453*4882a593Smuzhiyun	  instructions instead.
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun	  Say Y here to enable software emulation of these
1456*4882a593Smuzhiyun	  instructions for AArch32 userspace code. When this option is
1457*4882a593Smuzhiyun	  enabled, CP15 barrier usage is traced which can help
1458*4882a593Smuzhiyun	  identify software that needs updating. This feature can be
1459*4882a593Smuzhiyun	  controlled at runtime with the abi.cp15_barrier sysctl.
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun	  If unsure, say Y
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyunconfig SETEND_EMULATION
1464*4882a593Smuzhiyun	bool "Emulate SETEND instruction"
1465*4882a593Smuzhiyun	help
1466*4882a593Smuzhiyun	  The SETEND instruction alters the data-endianness of the
1467*4882a593Smuzhiyun	  AArch32 EL0, and is deprecated in ARMv8.
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun	  Say Y here to enable software emulation of the instruction
1470*4882a593Smuzhiyun	  for AArch32 userspace code. This feature can be controlled
1471*4882a593Smuzhiyun	  at runtime with the abi.setend sysctl.
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun	  Note: All the cpus on the system must have mixed endian support at EL0
1474*4882a593Smuzhiyun	  for this feature to be enabled. If a new CPU - which doesn't support mixed
1475*4882a593Smuzhiyun	  endian - is hotplugged in after this feature has been enabled, there could
1476*4882a593Smuzhiyun	  be unexpected results in the applications.
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun	  If unsure, say Y
1479*4882a593Smuzhiyunendif
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyunendif
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyunmenu "ARMv8.1 architectural features"
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyunconfig ARM64_HW_AFDBM
1486*4882a593Smuzhiyun	bool "Support for hardware updates of the Access and Dirty page flags"
1487*4882a593Smuzhiyun	default y
1488*4882a593Smuzhiyun	help
1489*4882a593Smuzhiyun	  The ARMv8.1 architecture extensions introduce support for
1490*4882a593Smuzhiyun	  hardware updates of the access and dirty information in page
1491*4882a593Smuzhiyun	  table entries. When enabled in TCR_EL1 (HA and HD bits) on
1492*4882a593Smuzhiyun	  capable processors, accesses to pages with PTE_AF cleared will
1493*4882a593Smuzhiyun	  set this bit instead of raising an access flag fault.
1494*4882a593Smuzhiyun	  Similarly, writes to read-only pages with the DBM bit set will
1495*4882a593Smuzhiyun	  clear the read-only bit (AP[2]) instead of raising a
1496*4882a593Smuzhiyun	  permission fault.
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun	  Kernels built with this configuration option enabled continue
1499*4882a593Smuzhiyun	  to work on pre-ARMv8.1 hardware and the performance impact is
1500*4882a593Smuzhiyun	  minimal. If unsure, say Y.
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyunconfig ARM64_PAN
1503*4882a593Smuzhiyun	bool "Enable support for Privileged Access Never (PAN)"
1504*4882a593Smuzhiyun	default y
1505*4882a593Smuzhiyun	help
1506*4882a593Smuzhiyun	 Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
1507*4882a593Smuzhiyun	 prevents the kernel or hypervisor from accessing user-space (EL0)
1508*4882a593Smuzhiyun	 memory directly.
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun	 Choosing this option will cause any unprotected (not using
1511*4882a593Smuzhiyun	 copy_to_user et al) memory access to fail with a permission fault.
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun	 The feature is detected at runtime, and will remain as a 'nop'
1514*4882a593Smuzhiyun	 instruction if the cpu does not implement the feature.
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyunconfig AS_HAS_LDAPR
1517*4882a593Smuzhiyun	def_bool $(as-instr,.arch_extension rcpc)
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyunconfig AS_HAS_LSE_ATOMICS
1520*4882a593Smuzhiyun	def_bool $(as-instr,.arch_extension lse)
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyunconfig ARM64_LSE_ATOMICS
1523*4882a593Smuzhiyun	bool
1524*4882a593Smuzhiyun	default ARM64_USE_LSE_ATOMICS
1525*4882a593Smuzhiyun	depends on AS_HAS_LSE_ATOMICS
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyunconfig ARM64_USE_LSE_ATOMICS
1528*4882a593Smuzhiyun	bool "Atomic instructions"
1529*4882a593Smuzhiyun	depends on JUMP_LABEL
1530*4882a593Smuzhiyun	default y
1531*4882a593Smuzhiyun	help
1532*4882a593Smuzhiyun	  As part of the Large System Extensions, ARMv8.1 introduces new
1533*4882a593Smuzhiyun	  atomic instructions that are designed specifically to scale in
1534*4882a593Smuzhiyun	  very large systems.
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun	  Say Y here to make use of these instructions for the in-kernel
1537*4882a593Smuzhiyun	  atomic routines. This incurs a small overhead on CPUs that do
1538*4882a593Smuzhiyun	  not support these instructions and requires the kernel to be
1539*4882a593Smuzhiyun	  built with binutils >= 2.25 in order for the new instructions
1540*4882a593Smuzhiyun	  to be used.
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyunconfig ARM64_VHE
1543*4882a593Smuzhiyun	bool "Enable support for Virtualization Host Extensions (VHE)"
1544*4882a593Smuzhiyun	default y
1545*4882a593Smuzhiyun	help
1546*4882a593Smuzhiyun	  Virtualization Host Extensions (VHE) allow the kernel to run
1547*4882a593Smuzhiyun	  directly at EL2 (instead of EL1) on processors that support
1548*4882a593Smuzhiyun	  it. This leads to better performance for KVM, as they reduce
1549*4882a593Smuzhiyun	  the cost of the world switch.
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun	  Selecting this option allows the VHE feature to be detected
1552*4882a593Smuzhiyun	  at runtime, and does not affect processors that do not
1553*4882a593Smuzhiyun	  implement this feature.
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyunendmenu
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyunmenu "ARMv8.2 architectural features"
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyunconfig ARM64_UAO
1560*4882a593Smuzhiyun	bool "Enable support for User Access Override (UAO)"
1561*4882a593Smuzhiyun	default y
1562*4882a593Smuzhiyun	help
1563*4882a593Smuzhiyun	  User Access Override (UAO; part of the ARMv8.2 Extensions)
1564*4882a593Smuzhiyun	  causes the 'unprivileged' variant of the load/store instructions to
1565*4882a593Smuzhiyun	  be overridden to be privileged.
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun	  This option changes get_user() and friends to use the 'unprivileged'
1568*4882a593Smuzhiyun	  variant of the load/store instructions. This ensures that user-space
1569*4882a593Smuzhiyun	  really did have access to the supplied memory. When addr_limit is
1570*4882a593Smuzhiyun	  set to kernel memory the UAO bit will be set, allowing privileged
1571*4882a593Smuzhiyun	  access to kernel memory.
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun	  Choosing this option will cause copy_to_user() et al to use user-space
1574*4882a593Smuzhiyun	  memory permissions.
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun	  The feature is detected at runtime, the kernel will use the
1577*4882a593Smuzhiyun	  regular load/store instructions if the cpu does not implement the
1578*4882a593Smuzhiyun	  feature.
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyunconfig ARM64_PMEM
1581*4882a593Smuzhiyun	bool "Enable support for persistent memory"
1582*4882a593Smuzhiyun	select ARCH_HAS_PMEM_API
1583*4882a593Smuzhiyun	select ARCH_HAS_UACCESS_FLUSHCACHE
1584*4882a593Smuzhiyun	help
1585*4882a593Smuzhiyun	  Say Y to enable support for the persistent memory API based on the
1586*4882a593Smuzhiyun	  ARMv8.2 DCPoP feature.
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun	  The feature is detected at runtime, and the kernel will use DC CVAC
1589*4882a593Smuzhiyun	  operations if DC CVAP is not supported (following the behaviour of
1590*4882a593Smuzhiyun	  DC CVAP itself if the system does not define a point of persistence).
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyunconfig ARM64_RAS_EXTN
1593*4882a593Smuzhiyun	bool "Enable support for RAS CPU Extensions"
1594*4882a593Smuzhiyun	default y
1595*4882a593Smuzhiyun	help
1596*4882a593Smuzhiyun	  CPUs that support the Reliability, Availability and Serviceability
1597*4882a593Smuzhiyun	  (RAS) Extensions, part of ARMv8.2 are able to track faults and
1598*4882a593Smuzhiyun	  errors, classify them and report them to software.
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun	  On CPUs with these extensions system software can use additional
1601*4882a593Smuzhiyun	  barriers to determine if faults are pending and read the
1602*4882a593Smuzhiyun	  classification from a new set of registers.
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun	  Selecting this feature will allow the kernel to use these barriers
1605*4882a593Smuzhiyun	  and access the new registers if the system supports the extension.
1606*4882a593Smuzhiyun	  Platform RAS features may additionally depend on firmware support.
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyunconfig ARM64_CNP
1609*4882a593Smuzhiyun	bool "Enable support for Common Not Private (CNP) translations"
1610*4882a593Smuzhiyun	default y
1611*4882a593Smuzhiyun	depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
1612*4882a593Smuzhiyun	help
1613*4882a593Smuzhiyun	  Common Not Private (CNP) allows translation table entries to
1614*4882a593Smuzhiyun	  be shared between different PEs in the same inner shareable
1615*4882a593Smuzhiyun	  domain, so the hardware can use this fact to optimise the
1616*4882a593Smuzhiyun	  caching of such entries in the TLB.
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun	  Selecting this option allows the CNP feature to be detected
1619*4882a593Smuzhiyun	  at runtime, and does not affect PEs that do not implement
1620*4882a593Smuzhiyun	  this feature.
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyunendmenu
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyunmenu "ARMv8.3 architectural features"
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyunconfig ARM64_PTR_AUTH
1627*4882a593Smuzhiyun	bool "Enable support for pointer authentication"
1628*4882a593Smuzhiyun	default y
1629*4882a593Smuzhiyun	depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
1630*4882a593Smuzhiyun	# Modern compilers insert a .note.gnu.property section note for PAC
1631*4882a593Smuzhiyun	# which is only understood by binutils starting with version 2.33.1.
1632*4882a593Smuzhiyun	depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
1633*4882a593Smuzhiyun	depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
1634*4882a593Smuzhiyun	depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
1635*4882a593Smuzhiyun	help
1636*4882a593Smuzhiyun	  Pointer authentication (part of the ARMv8.3 Extensions) provides
1637*4882a593Smuzhiyun	  instructions for signing and authenticating pointers against secret
1638*4882a593Smuzhiyun	  keys, which can be used to mitigate Return Oriented Programming (ROP)
1639*4882a593Smuzhiyun	  and other attacks.
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun	  This option enables these instructions at EL0 (i.e. for userspace).
1642*4882a593Smuzhiyun	  Choosing this option will cause the kernel to initialise secret keys
1643*4882a593Smuzhiyun	  for each process at exec() time, with these keys being
1644*4882a593Smuzhiyun	  context-switched along with the process.
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun	  If the compiler supports the -mbranch-protection or
1647*4882a593Smuzhiyun	  -msign-return-address flag (e.g. GCC 7 or later), then this option
1648*4882a593Smuzhiyun	  will also cause the kernel itself to be compiled with return address
1649*4882a593Smuzhiyun	  protection. In this case, and if the target hardware is known to
1650*4882a593Smuzhiyun	  support pointer authentication, then CONFIG_STACKPROTECTOR can be
1651*4882a593Smuzhiyun	  disabled with minimal loss of protection.
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun	  The feature is detected at runtime. If the feature is not present in
1654*4882a593Smuzhiyun	  hardware it will not be advertised to userspace/KVM guest nor will it
1655*4882a593Smuzhiyun	  be enabled.
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun	  If the feature is present on the boot CPU but not on a late CPU, then
1658*4882a593Smuzhiyun	  the late CPU will be parked. Also, if the boot CPU does not have
1659*4882a593Smuzhiyun	  address auth and the late CPU has then the late CPU will still boot
1660*4882a593Smuzhiyun	  but with the feature disabled. On such a system, this option should
1661*4882a593Smuzhiyun	  not be selected.
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun	  This feature works with FUNCTION_GRAPH_TRACER option only if
1664*4882a593Smuzhiyun	  DYNAMIC_FTRACE_WITH_REGS is enabled.
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyunconfig CC_HAS_BRANCH_PROT_PAC_RET
1667*4882a593Smuzhiyun	# GCC 9 or later, clang 8 or later
1668*4882a593Smuzhiyun	def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyunconfig CC_HAS_SIGN_RETURN_ADDRESS
1671*4882a593Smuzhiyun	# GCC 7, 8
1672*4882a593Smuzhiyun	def_bool $(cc-option,-msign-return-address=all)
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyunconfig AS_HAS_PAC
1675*4882a593Smuzhiyun	def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a)
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyunconfig AS_HAS_CFI_NEGATE_RA_STATE
1678*4882a593Smuzhiyun	def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyunendmenu
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyunmenu "ARMv8.4 architectural features"
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyunconfig ARM64_AMU_EXTN
1685*4882a593Smuzhiyun	bool "Enable support for the Activity Monitors Unit CPU extension"
1686*4882a593Smuzhiyun	default y
1687*4882a593Smuzhiyun	help
1688*4882a593Smuzhiyun	  The activity monitors extension is an optional extension introduced
1689*4882a593Smuzhiyun	  by the ARMv8.4 CPU architecture. This enables support for version 1
1690*4882a593Smuzhiyun	  of the activity monitors architecture, AMUv1.
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun	  To enable the use of this extension on CPUs that implement it, say Y.
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun	  Note that for architectural reasons, firmware _must_ implement AMU
1695*4882a593Smuzhiyun	  support when running on CPUs that present the activity monitors
1696*4882a593Smuzhiyun	  extension. The required support is present in:
1697*4882a593Smuzhiyun	    * Version 1.5 and later of the ARM Trusted Firmware
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun	  For kernels that have this configuration enabled but boot with broken
1700*4882a593Smuzhiyun	  firmware, you may need to say N here until the firmware is fixed.
1701*4882a593Smuzhiyun	  Otherwise you may experience firmware panics or lockups when
1702*4882a593Smuzhiyun	  accessing the counter registers. Even if you are not observing these
1703*4882a593Smuzhiyun	  symptoms, the values returned by the register reads might not
1704*4882a593Smuzhiyun	  correctly reflect reality. Most commonly, the value read will be 0,
1705*4882a593Smuzhiyun	  indicating that the counter is not enabled.
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyunconfig AS_HAS_ARMV8_4
1708*4882a593Smuzhiyun	def_bool $(cc-option,-Wa$(comma)-march=armv8.4-a)
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyunconfig ARM64_TLB_RANGE
1711*4882a593Smuzhiyun	bool "Enable support for tlbi range feature"
1712*4882a593Smuzhiyun	default y
1713*4882a593Smuzhiyun	depends on AS_HAS_ARMV8_4
1714*4882a593Smuzhiyun	help
1715*4882a593Smuzhiyun	  ARMv8.4-TLBI provides TLBI invalidation instruction that apply to a
1716*4882a593Smuzhiyun	  range of input addresses.
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun	  The feature introduces new assembly instructions, and they were
1719*4882a593Smuzhiyun	  support when binutils >= 2.30.
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyunendmenu
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyunmenu "ARMv8.5 architectural features"
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyunconfig AS_HAS_ARMV8_5
1726*4882a593Smuzhiyun	def_bool $(cc-option,-Wa$(comma)-march=armv8.5-a)
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyunconfig ARM64_BTI
1729*4882a593Smuzhiyun	bool "Branch Target Identification support"
1730*4882a593Smuzhiyun	default y
1731*4882a593Smuzhiyun	help
1732*4882a593Smuzhiyun	  Branch Target Identification (part of the ARMv8.5 Extensions)
1733*4882a593Smuzhiyun	  provides a mechanism to limit the set of locations to which computed
1734*4882a593Smuzhiyun	  branch instructions such as BR or BLR can jump.
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun	  To make use of BTI on CPUs that support it, say Y.
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun	  BTI is intended to provide complementary protection to other control
1739*4882a593Smuzhiyun	  flow integrity protection mechanisms, such as the Pointer
1740*4882a593Smuzhiyun	  authentication mechanism provided as part of the ARMv8.3 Extensions.
1741*4882a593Smuzhiyun	  For this reason, it does not make sense to enable this option without
1742*4882a593Smuzhiyun	  also enabling support for pointer authentication.  Thus, when
1743*4882a593Smuzhiyun	  enabling this option you should also select ARM64_PTR_AUTH=y.
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun	  Userspace binaries must also be specifically compiled to make use of
1746*4882a593Smuzhiyun	  this mechanism.  If you say N here or the hardware does not support
1747*4882a593Smuzhiyun	  BTI, such binaries can still run, but you get no additional
1748*4882a593Smuzhiyun	  enforcement of branch destinations.
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyunconfig ARM64_BTI_KERNEL
1751*4882a593Smuzhiyun	bool "Use Branch Target Identification for kernel"
1752*4882a593Smuzhiyun	default y
1753*4882a593Smuzhiyun	depends on ARM64_BTI
1754*4882a593Smuzhiyun	depends on ARM64_PTR_AUTH
1755*4882a593Smuzhiyun	depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
1756*4882a593Smuzhiyun	# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
1757*4882a593Smuzhiyun	depends on !CC_IS_GCC || GCC_VERSION >= 100100
1758*4882a593Smuzhiyun	# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
1759*4882a593Smuzhiyun	depends on !CC_IS_GCC
1760*4882a593Smuzhiyun	# https://bugs.llvm.org/show_bug.cgi?id=46258
1761*4882a593Smuzhiyun	depends on !CFI_CLANG || CLANG_VERSION >= 120000
1762*4882a593Smuzhiyun	depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
1763*4882a593Smuzhiyun	help
1764*4882a593Smuzhiyun	  Build the kernel with Branch Target Identification annotations
1765*4882a593Smuzhiyun	  and enable enforcement of this for kernel code. When this option
1766*4882a593Smuzhiyun	  is enabled and the system supports BTI all kernel code including
1767*4882a593Smuzhiyun	  modular code must have BTI enabled.
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyunconfig CC_HAS_BRANCH_PROT_PAC_RET_BTI
1770*4882a593Smuzhiyun	# GCC 9 or later, clang 8 or later
1771*4882a593Smuzhiyun	def_bool $(cc-option,-mbranch-protection=pac-ret+leaf+bti)
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyunconfig ARM64_E0PD
1774*4882a593Smuzhiyun	bool "Enable support for E0PD"
1775*4882a593Smuzhiyun	default y
1776*4882a593Smuzhiyun	help
1777*4882a593Smuzhiyun	  E0PD (part of the ARMv8.5 extensions) allows us to ensure
1778*4882a593Smuzhiyun	  that EL0 accesses made via TTBR1 always fault in constant time,
1779*4882a593Smuzhiyun	  providing similar benefits to KASLR as those provided by KPTI, but
1780*4882a593Smuzhiyun	  with lower overhead and without disrupting legitimate access to
1781*4882a593Smuzhiyun	  kernel memory such as SPE.
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun	  This option enables E0PD for TTBR1 where available.
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyunconfig ARCH_RANDOM
1786*4882a593Smuzhiyun	bool "Enable support for random number generation"
1787*4882a593Smuzhiyun	default y
1788*4882a593Smuzhiyun	help
1789*4882a593Smuzhiyun	  Random number generation (part of the ARMv8.5 Extensions)
1790*4882a593Smuzhiyun	  provides a high bandwidth, cryptographically secure
1791*4882a593Smuzhiyun	  hardware random number generator.
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyunconfig ARM64_AS_HAS_MTE
1794*4882a593Smuzhiyun	# Initial support for MTE went in binutils 2.32.0, checked with
1795*4882a593Smuzhiyun	# ".arch armv8.5-a+memtag" below. However, this was incomplete
1796*4882a593Smuzhiyun	# as a late addition to the final architecture spec (LDGM/STGM)
1797*4882a593Smuzhiyun	# is only supported in the newer 2.32.x and 2.33 binutils
1798*4882a593Smuzhiyun	# versions, hence the extra "stgm" instruction check below.
1799*4882a593Smuzhiyun	def_bool $(as-instr,.arch armv8.5-a+memtag\nstgm xzr$(comma)[x0])
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyunconfig ARM64_MTE
1802*4882a593Smuzhiyun	bool "Memory Tagging Extension support"
1803*4882a593Smuzhiyun	default y
1804*4882a593Smuzhiyun	depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
1805*4882a593Smuzhiyun	depends on AS_HAS_ARMV8_5
1806*4882a593Smuzhiyun	# Required for tag checking in the uaccess routines
1807*4882a593Smuzhiyun	depends on ARM64_PAN
1808*4882a593Smuzhiyun	depends on AS_HAS_LSE_ATOMICS
1809*4882a593Smuzhiyun	select ARCH_USES_HIGH_VMA_FLAGS
1810*4882a593Smuzhiyun	help
1811*4882a593Smuzhiyun	  Memory Tagging (part of the ARMv8.5 Extensions) provides
1812*4882a593Smuzhiyun	  architectural support for run-time, always-on detection of
1813*4882a593Smuzhiyun	  various classes of memory error to aid with software debugging
1814*4882a593Smuzhiyun	  to eliminate vulnerabilities arising from memory-unsafe
1815*4882a593Smuzhiyun	  languages.
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun	  This option enables the support for the Memory Tagging
1818*4882a593Smuzhiyun	  Extension at EL0 (i.e. for userspace).
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun	  Selecting this option allows the feature to be detected at
1821*4882a593Smuzhiyun	  runtime. Any secondary CPU not implementing this feature will
1822*4882a593Smuzhiyun	  not be allowed a late bring-up.
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun	  Userspace binaries that want to use this feature must
1825*4882a593Smuzhiyun	  explicitly opt in. The mechanism for the userspace is
1826*4882a593Smuzhiyun	  described in:
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun	  Documentation/arm64/memory-tagging-extension.rst.
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyunendmenu
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyunconfig ARM64_SVE
1833*4882a593Smuzhiyun	bool "ARM Scalable Vector Extension support"
1834*4882a593Smuzhiyun	default y
1835*4882a593Smuzhiyun	help
1836*4882a593Smuzhiyun	  The Scalable Vector Extension (SVE) is an extension to the AArch64
1837*4882a593Smuzhiyun	  execution state which complements and extends the SIMD functionality
1838*4882a593Smuzhiyun	  of the base architecture to support much larger vectors and to enable
1839*4882a593Smuzhiyun	  additional vectorisation opportunities.
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun	  To enable use of this extension on CPUs that implement it, say Y.
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun	  On CPUs that support the SVE2 extensions, this option will enable
1844*4882a593Smuzhiyun	  those too.
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun	  Note that for architectural reasons, firmware _must_ implement SVE
1847*4882a593Smuzhiyun	  support when running on SVE capable hardware.  The required support
1848*4882a593Smuzhiyun	  is present in:
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun	    * version 1.5 and later of the ARM Trusted Firmware
1851*4882a593Smuzhiyun	    * the AArch64 boot wrapper since commit 5e1261e08abf
1852*4882a593Smuzhiyun	      ("bootwrapper: SVE: Enable SVE for EL2 and below").
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun	  For other firmware implementations, consult the firmware documentation
1855*4882a593Smuzhiyun	  or vendor.
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun	  If you need the kernel to boot on SVE-capable hardware with broken
1858*4882a593Smuzhiyun	  firmware, you may need to say N here until you get your firmware
1859*4882a593Smuzhiyun	  fixed.  Otherwise, you may experience firmware panics or lockups when
1860*4882a593Smuzhiyun	  booting the kernel.  If unsure and you are not observing these
1861*4882a593Smuzhiyun	  symptoms, you should assume that it is safe to say Y.
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyunconfig ARM64_MODULE_PLTS
1864*4882a593Smuzhiyun	bool "Use PLTs to allow module memory to spill over into vmalloc area"
1865*4882a593Smuzhiyun	depends on MODULES
1866*4882a593Smuzhiyun	select HAVE_MOD_ARCH_SPECIFIC
1867*4882a593Smuzhiyun	help
1868*4882a593Smuzhiyun	  Allocate PLTs when loading modules so that jumps and calls whose
1869*4882a593Smuzhiyun	  targets are too far away for their relative offsets to be encoded
1870*4882a593Smuzhiyun	  in the instructions themselves can be bounced via veneers in the
1871*4882a593Smuzhiyun	  module's PLT. This allows modules to be allocated in the generic
1872*4882a593Smuzhiyun	  vmalloc area after the dedicated module memory area has been
1873*4882a593Smuzhiyun	  exhausted.
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun	  When running with address space randomization (KASLR), the module
1876*4882a593Smuzhiyun	  region itself may be too far away for ordinary relative jumps and
1877*4882a593Smuzhiyun	  calls, and so in that case, module PLTs are required and cannot be
1878*4882a593Smuzhiyun	  disabled.
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun	  Specific errata workaround(s) might also force module PLTs to be
1881*4882a593Smuzhiyun	  enabled (ARM64_ERRATUM_843419).
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyunconfig ARM64_PSEUDO_NMI
1884*4882a593Smuzhiyun	bool "Support for NMI-like interrupts"
1885*4882a593Smuzhiyun	select ARM_GIC_V3
1886*4882a593Smuzhiyun	help
1887*4882a593Smuzhiyun	  Adds support for mimicking Non-Maskable Interrupts through the use of
1888*4882a593Smuzhiyun	  GIC interrupt priority. This support requires version 3 or later of
1889*4882a593Smuzhiyun	  ARM GIC.
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun	  This high priority configuration for interrupts needs to be
1892*4882a593Smuzhiyun	  explicitly enabled by setting the kernel parameter
1893*4882a593Smuzhiyun	  "irqchip.gicv3_pseudo_nmi" to 1.
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun	  If unsure, say N
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyunif ARM64_PSEUDO_NMI
1898*4882a593Smuzhiyunconfig ARM64_DEBUG_PRIORITY_MASKING
1899*4882a593Smuzhiyun	bool "Debug interrupt priority masking"
1900*4882a593Smuzhiyun	help
1901*4882a593Smuzhiyun	  This adds runtime checks to functions enabling/disabling
1902*4882a593Smuzhiyun	  interrupts when using priority masking. The additional checks verify
1903*4882a593Smuzhiyun	  the validity of ICC_PMR_EL1 when calling concerned functions.
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun	  If unsure, say N
1906*4882a593Smuzhiyunendif
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyunconfig RELOCATABLE
1909*4882a593Smuzhiyun	bool "Build a relocatable kernel image" if EXPERT
1910*4882a593Smuzhiyun	select ARCH_HAS_RELR
1911*4882a593Smuzhiyun	default y
1912*4882a593Smuzhiyun	help
1913*4882a593Smuzhiyun	  This builds the kernel as a Position Independent Executable (PIE),
1914*4882a593Smuzhiyun	  which retains all relocation metadata required to relocate the
1915*4882a593Smuzhiyun	  kernel binary at runtime to a different virtual address than the
1916*4882a593Smuzhiyun	  address it was linked at.
1917*4882a593Smuzhiyun	  Since AArch64 uses the RELA relocation format, this requires a
1918*4882a593Smuzhiyun	  relocation pass at runtime even if the kernel is loaded at the
1919*4882a593Smuzhiyun	  same address it was linked at.
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyunconfig RANDOMIZE_BASE
1922*4882a593Smuzhiyun	bool "Randomize the address of the kernel image"
1923*4882a593Smuzhiyun	select ARM64_MODULE_PLTS if MODULES
1924*4882a593Smuzhiyun	select RELOCATABLE
1925*4882a593Smuzhiyun	help
1926*4882a593Smuzhiyun	  Randomizes the virtual address at which the kernel image is
1927*4882a593Smuzhiyun	  loaded, as a security feature that deters exploit attempts
1928*4882a593Smuzhiyun	  relying on knowledge of the location of kernel internals.
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun	  It is the bootloader's job to provide entropy, by passing a
1931*4882a593Smuzhiyun	  random u64 value in /chosen/kaslr-seed at kernel entry.
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun	  When booting via the UEFI stub, it will invoke the firmware's
1934*4882a593Smuzhiyun	  EFI_RNG_PROTOCOL implementation (if available) to supply entropy
1935*4882a593Smuzhiyun	  to the kernel proper. In addition, it will randomise the physical
1936*4882a593Smuzhiyun	  location of the kernel Image as well.
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun	  If unsure, say N.
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyunconfig RANDOMIZE_MODULE_REGION_FULL
1941*4882a593Smuzhiyun	bool "Randomize the module region over a 4 GB range"
1942*4882a593Smuzhiyun	depends on RANDOMIZE_BASE
1943*4882a593Smuzhiyun	default y
1944*4882a593Smuzhiyun	help
1945*4882a593Smuzhiyun	  Randomizes the location of the module region inside a 4 GB window
1946*4882a593Smuzhiyun	  covering the core kernel. This way, it is less likely for modules
1947*4882a593Smuzhiyun	  to leak information about the location of core kernel data structures
1948*4882a593Smuzhiyun	  but it does imply that function calls between modules and the core
1949*4882a593Smuzhiyun	  kernel will need to be resolved via veneers in the module PLT.
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun	  When this option is not set, the module region will be randomized over
1952*4882a593Smuzhiyun	  a limited range that contains the [_stext, _etext] interval of the
1953*4882a593Smuzhiyun	  core kernel, so branch relocations are always in range.
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyunconfig CC_HAVE_STACKPROTECTOR_SYSREG
1956*4882a593Smuzhiyun	def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyunconfig STACKPROTECTOR_PER_TASK
1959*4882a593Smuzhiyun	def_bool y
1960*4882a593Smuzhiyun	depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyunendmenu
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyunmenu "Boot options"
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyunconfig ARM64_ACPI_PARKING_PROTOCOL
1967*4882a593Smuzhiyun	bool "Enable support for the ARM64 ACPI parking protocol"
1968*4882a593Smuzhiyun	depends on ACPI
1969*4882a593Smuzhiyun	help
1970*4882a593Smuzhiyun	  Enable support for the ARM64 ACPI parking protocol. If disabled
1971*4882a593Smuzhiyun	  the kernel will not allow booting through the ARM64 ACPI parking
1972*4882a593Smuzhiyun	  protocol even if the corresponding data is present in the ACPI
1973*4882a593Smuzhiyun	  MADT table.
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyunconfig CMDLINE
1976*4882a593Smuzhiyun	string "Default kernel command string"
1977*4882a593Smuzhiyun	default ""
1978*4882a593Smuzhiyun	help
1979*4882a593Smuzhiyun	  Provide a set of default command-line options at build time by
1980*4882a593Smuzhiyun	  entering them here. As a minimum, you should specify the the
1981*4882a593Smuzhiyun	  root device (e.g. root=/dev/nfs).
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyunchoice
1984*4882a593Smuzhiyun	prompt "Kernel command line type" if CMDLINE != ""
1985*4882a593Smuzhiyun	default CMDLINE_FROM_BOOTLOADER
1986*4882a593Smuzhiyun	help
1987*4882a593Smuzhiyun	  Choose how the kernel will handle the provided default kernel
1988*4882a593Smuzhiyun	  command line string.
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyunconfig CMDLINE_FROM_BOOTLOADER
1991*4882a593Smuzhiyun	bool "Use bootloader kernel arguments if available"
1992*4882a593Smuzhiyun	help
1993*4882a593Smuzhiyun	  Uses the command-line options passed by the boot loader. If
1994*4882a593Smuzhiyun	  the boot loader doesn't provide any, the default kernel command
1995*4882a593Smuzhiyun	  string provided in CMDLINE will be used.
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyunconfig CMDLINE_EXTEND
1998*4882a593Smuzhiyun	bool "Extend bootloader kernel arguments"
1999*4882a593Smuzhiyun	help
2000*4882a593Smuzhiyun	  The command-line arguments provided by the boot loader will be
2001*4882a593Smuzhiyun	  appended to the default kernel command string.
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyunconfig CMDLINE_FORCE
2004*4882a593Smuzhiyun	bool "Always use the default kernel command string"
2005*4882a593Smuzhiyun	help
2006*4882a593Smuzhiyun	  Always use the default kernel command string, even if the boot
2007*4882a593Smuzhiyun	  loader passes other arguments to the kernel.
2008*4882a593Smuzhiyun	  This is useful if you cannot or don't want to change the
2009*4882a593Smuzhiyun	  command-line options your boot loader passes to the kernel.
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyunendchoice
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyunconfig EFI_STUB
2014*4882a593Smuzhiyun	bool
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyunconfig EFI
2017*4882a593Smuzhiyun	bool "UEFI runtime support"
2018*4882a593Smuzhiyun	depends on OF && !CPU_BIG_ENDIAN
2019*4882a593Smuzhiyun	depends on KERNEL_MODE_NEON
2020*4882a593Smuzhiyun	select ARCH_SUPPORTS_ACPI
2021*4882a593Smuzhiyun	select LIBFDT
2022*4882a593Smuzhiyun	select UCS2_STRING
2023*4882a593Smuzhiyun	select EFI_PARAMS_FROM_FDT
2024*4882a593Smuzhiyun	select EFI_RUNTIME_WRAPPERS
2025*4882a593Smuzhiyun	select EFI_STUB
2026*4882a593Smuzhiyun	select EFI_GENERIC_STUB
2027*4882a593Smuzhiyun	default y
2028*4882a593Smuzhiyun	help
2029*4882a593Smuzhiyun	  This option provides support for runtime services provided
2030*4882a593Smuzhiyun	  by UEFI firmware (such as non-volatile variables, realtime
2031*4882a593Smuzhiyun          clock, and platform reset). A UEFI stub is also provided to
2032*4882a593Smuzhiyun	  allow the kernel to be booted as an EFI application. This
2033*4882a593Smuzhiyun	  is only useful on systems that have UEFI firmware.
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyunconfig DMI
2036*4882a593Smuzhiyun	bool "Enable support for SMBIOS (DMI) tables"
2037*4882a593Smuzhiyun	depends on EFI
2038*4882a593Smuzhiyun	default y
2039*4882a593Smuzhiyun	help
2040*4882a593Smuzhiyun	  This enables SMBIOS/DMI feature for systems.
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun	  This option is only useful on systems that have UEFI firmware.
2043*4882a593Smuzhiyun	  However, even with this option, the resultant kernel should
2044*4882a593Smuzhiyun	  continue to boot on existing non-UEFI platforms.
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyunendmenu
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyunconfig SYSVIPC_COMPAT
2049*4882a593Smuzhiyun	def_bool y
2050*4882a593Smuzhiyun	depends on COMPAT && SYSVIPC
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyunconfig ARCH_ENABLE_HUGEPAGE_MIGRATION
2053*4882a593Smuzhiyun	def_bool y
2054*4882a593Smuzhiyun	depends on HUGETLB_PAGE && MIGRATION
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyunconfig ARCH_ENABLE_THP_MIGRATION
2057*4882a593Smuzhiyun	def_bool y
2058*4882a593Smuzhiyun	depends on TRANSPARENT_HUGEPAGE
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyunmenu "Power management options"
2061*4882a593Smuzhiyun
2062*4882a593Smuzhiyunsource "kernel/power/Kconfig"
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyunconfig ARCH_HIBERNATION_POSSIBLE
2065*4882a593Smuzhiyun	def_bool y
2066*4882a593Smuzhiyun	depends on CPU_PM
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyunconfig ARCH_HIBERNATION_HEADER
2069*4882a593Smuzhiyun	def_bool y
2070*4882a593Smuzhiyun	depends on HIBERNATION
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyunconfig ARCH_SUSPEND_POSSIBLE
2073*4882a593Smuzhiyun	def_bool y
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyunendmenu
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyunmenu "CPU Power Management"
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyunsource "drivers/cpuidle/Kconfig"
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyunsource "drivers/cpufreq/Kconfig"
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyunendmenu
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyunsource "drivers/firmware/Kconfig"
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyunsource "drivers/acpi/Kconfig"
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyunsource "arch/arm64/kvm/Kconfig"
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyunif CRYPTO
2092*4882a593Smuzhiyunsource "arch/arm64/crypto/Kconfig"
2093*4882a593Smuzhiyunendif
2094