xref: /OK3568_Linux_fs/kernel/mm/Kconfig (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun# SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun
3*4882a593Smuzhiyunmenu "Memory Management options"
4*4882a593Smuzhiyun
5*4882a593Smuzhiyunconfig SELECT_MEMORY_MODEL
6*4882a593Smuzhiyun	def_bool y
7*4882a593Smuzhiyun	depends on ARCH_SELECT_MEMORY_MODEL
8*4882a593Smuzhiyun
9*4882a593Smuzhiyunchoice
10*4882a593Smuzhiyun	prompt "Memory model"
11*4882a593Smuzhiyun	depends on SELECT_MEMORY_MODEL
12*4882a593Smuzhiyun	default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT
13*4882a593Smuzhiyun	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
14*4882a593Smuzhiyun	default FLATMEM_MANUAL
15*4882a593Smuzhiyun	help
16*4882a593Smuzhiyun	  This option allows you to change some of the ways that
17*4882a593Smuzhiyun	  Linux manages its memory internally. Most users will
18*4882a593Smuzhiyun	  only have one option here selected by the architecture
19*4882a593Smuzhiyun	  configuration. This is normal.
20*4882a593Smuzhiyun
21*4882a593Smuzhiyunconfig FLATMEM_MANUAL
22*4882a593Smuzhiyun	bool "Flat Memory"
23*4882a593Smuzhiyun	depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
24*4882a593Smuzhiyun	help
25*4882a593Smuzhiyun	  This option is best suited for non-NUMA systems with
26*4882a593Smuzhiyun	  flat address space. The FLATMEM is the most efficient
27*4882a593Smuzhiyun	  system in terms of performance and resource consumption
28*4882a593Smuzhiyun	  and it is the best option for smaller systems.
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun	  For systems that have holes in their physical address
31*4882a593Smuzhiyun	  spaces and for features like NUMA and memory hotplug,
32*4882a593Smuzhiyun	  choose "Sparse Memory".
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun	  If unsure, choose this option (Flat Memory) over any other.
35*4882a593Smuzhiyun
36*4882a593Smuzhiyunconfig DISCONTIGMEM_MANUAL
37*4882a593Smuzhiyun	bool "Discontiguous Memory"
38*4882a593Smuzhiyun	depends on ARCH_DISCONTIGMEM_ENABLE
39*4882a593Smuzhiyun	help
40*4882a593Smuzhiyun	  This option provides enhanced support for discontiguous
41*4882a593Smuzhiyun	  memory systems, over FLATMEM.  These systems have holes
42*4882a593Smuzhiyun	  in their physical address spaces, and this option provides
43*4882a593Smuzhiyun	  more efficient handling of these holes.
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun	  Although "Discontiguous Memory" is still used by several
46*4882a593Smuzhiyun	  architectures, it is considered deprecated in favor of
47*4882a593Smuzhiyun	  "Sparse Memory".
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun	  If unsure, choose "Sparse Memory" over this option.
50*4882a593Smuzhiyun
51*4882a593Smuzhiyunconfig SPARSEMEM_MANUAL
52*4882a593Smuzhiyun	bool "Sparse Memory"
53*4882a593Smuzhiyun	depends on ARCH_SPARSEMEM_ENABLE
54*4882a593Smuzhiyun	help
55*4882a593Smuzhiyun	  This will be the only option for some systems, including
56*4882a593Smuzhiyun	  memory hot-plug systems.  This is normal.
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun	  This option provides efficient support for systems with
59*4882a593Smuzhiyun	  holes is their physical address space and allows memory
60*4882a593Smuzhiyun	  hot-plug and hot-remove.
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun	  If unsure, choose "Flat Memory" over this option.
63*4882a593Smuzhiyun
64*4882a593Smuzhiyunendchoice
65*4882a593Smuzhiyun
66*4882a593Smuzhiyunconfig DISCONTIGMEM
67*4882a593Smuzhiyun	def_bool y
68*4882a593Smuzhiyun	depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
69*4882a593Smuzhiyun
70*4882a593Smuzhiyunconfig SPARSEMEM
71*4882a593Smuzhiyun	def_bool y
72*4882a593Smuzhiyun	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
73*4882a593Smuzhiyun
74*4882a593Smuzhiyunconfig FLATMEM
75*4882a593Smuzhiyun	def_bool y
76*4882a593Smuzhiyun	depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
77*4882a593Smuzhiyun
78*4882a593Smuzhiyunconfig FLAT_NODE_MEM_MAP
79*4882a593Smuzhiyun	def_bool y
80*4882a593Smuzhiyun	depends on !SPARSEMEM
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun#
83*4882a593Smuzhiyun# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
84*4882a593Smuzhiyun# to represent different areas of memory.  This variable allows
85*4882a593Smuzhiyun# those dependencies to exist individually.
86*4882a593Smuzhiyun#
87*4882a593Smuzhiyunconfig NEED_MULTIPLE_NODES
88*4882a593Smuzhiyun	def_bool y
89*4882a593Smuzhiyun	depends on DISCONTIGMEM || NUMA
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun#
92*4882a593Smuzhiyun# SPARSEMEM_EXTREME (which is the default) does some bootmem
93*4882a593Smuzhiyun# allocations when sparse_init() is called.  If this cannot
94*4882a593Smuzhiyun# be done on your architecture, select this option.  However,
95*4882a593Smuzhiyun# statically allocating the mem_section[] array can potentially
96*4882a593Smuzhiyun# consume vast quantities of .bss, so be careful.
97*4882a593Smuzhiyun#
98*4882a593Smuzhiyun# This option will also potentially produce smaller runtime code
99*4882a593Smuzhiyun# with gcc 3.4 and later.
100*4882a593Smuzhiyun#
101*4882a593Smuzhiyunconfig SPARSEMEM_STATIC
102*4882a593Smuzhiyun	bool
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun#
105*4882a593Smuzhiyun# Architecture platforms which require a two level mem_section in SPARSEMEM
106*4882a593Smuzhiyun# must select this option. This is usually for architecture platforms with
107*4882a593Smuzhiyun# an extremely sparse physical address space.
108*4882a593Smuzhiyun#
109*4882a593Smuzhiyunconfig SPARSEMEM_EXTREME
110*4882a593Smuzhiyun	def_bool y
111*4882a593Smuzhiyun	depends on SPARSEMEM && !SPARSEMEM_STATIC
112*4882a593Smuzhiyun
113*4882a593Smuzhiyunconfig SPARSEMEM_VMEMMAP_ENABLE
114*4882a593Smuzhiyun	bool
115*4882a593Smuzhiyun
116*4882a593Smuzhiyunconfig SPARSEMEM_VMEMMAP
117*4882a593Smuzhiyun	bool "Sparse Memory virtual memmap"
118*4882a593Smuzhiyun	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
119*4882a593Smuzhiyun	default y
120*4882a593Smuzhiyun	help
121*4882a593Smuzhiyun	  SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
122*4882a593Smuzhiyun	  pfn_to_page and page_to_pfn operations.  This is the most
123*4882a593Smuzhiyun	  efficient option when sufficient kernel resources are available.
124*4882a593Smuzhiyun
125*4882a593Smuzhiyunconfig HAVE_MEMBLOCK_PHYS_MAP
126*4882a593Smuzhiyun	bool
127*4882a593Smuzhiyun
128*4882a593Smuzhiyunconfig HAVE_FAST_GUP
129*4882a593Smuzhiyun	depends on MMU
130*4882a593Smuzhiyun	bool
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun# Don't discard allocated memory used to track "memory" and "reserved" memblocks
133*4882a593Smuzhiyun# after early boot, so it can still be used to test for validity of memory.
134*4882a593Smuzhiyun# Also, memblocks are updated with memory hot(un)plug.
135*4882a593Smuzhiyunconfig ARCH_KEEP_MEMBLOCK
136*4882a593Smuzhiyun	bool
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun# Keep arch NUMA mapping infrastructure post-init.
139*4882a593Smuzhiyunconfig NUMA_KEEP_MEMINFO
140*4882a593Smuzhiyun	bool
141*4882a593Smuzhiyun
142*4882a593Smuzhiyunconfig MEMORY_ISOLATION
143*4882a593Smuzhiyun	bool
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun#
146*4882a593Smuzhiyun# Only be set on architectures that have completely implemented memory hotplug
147*4882a593Smuzhiyun# feature. If you are not sure, don't touch it.
148*4882a593Smuzhiyun#
149*4882a593Smuzhiyunconfig HAVE_BOOTMEM_INFO_NODE
150*4882a593Smuzhiyun	def_bool n
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun# eventually, we can have this option just 'select SPARSEMEM'
153*4882a593Smuzhiyunconfig MEMORY_HOTPLUG
154*4882a593Smuzhiyun	bool "Allow for memory hot-add"
155*4882a593Smuzhiyun	select MEMORY_ISOLATION
156*4882a593Smuzhiyun	depends on SPARSEMEM || X86_64_ACPI_NUMA
157*4882a593Smuzhiyun	depends on ARCH_ENABLE_MEMORY_HOTPLUG
158*4882a593Smuzhiyun	depends on 64BIT || BROKEN
159*4882a593Smuzhiyun	select NUMA_KEEP_MEMINFO if NUMA
160*4882a593Smuzhiyun
161*4882a593Smuzhiyunconfig MEMORY_HOTPLUG_SPARSE
162*4882a593Smuzhiyun	def_bool y
163*4882a593Smuzhiyun	depends on SPARSEMEM && MEMORY_HOTPLUG
164*4882a593Smuzhiyun
165*4882a593Smuzhiyunconfig MEMORY_HOTPLUG_DEFAULT_ONLINE
166*4882a593Smuzhiyun	bool "Online the newly added memory blocks by default"
167*4882a593Smuzhiyun	depends on MEMORY_HOTPLUG
168*4882a593Smuzhiyun	help
169*4882a593Smuzhiyun	  This option sets the default policy setting for memory hotplug
170*4882a593Smuzhiyun	  onlining policy (/sys/devices/system/memory/auto_online_blocks) which
171*4882a593Smuzhiyun	  determines what happens to newly added memory regions. Policy setting
172*4882a593Smuzhiyun	  can always be changed at runtime.
173*4882a593Smuzhiyun	  See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun	  Say Y here if you want all hot-plugged memory blocks to appear in
176*4882a593Smuzhiyun	  'online' state by default.
177*4882a593Smuzhiyun	  Say N here if you want the default policy to keep all hot-plugged
178*4882a593Smuzhiyun	  memory blocks in 'offline' state.
179*4882a593Smuzhiyun
180*4882a593Smuzhiyunconfig MEMORY_HOTREMOVE
181*4882a593Smuzhiyun	bool "Allow for memory hot remove"
182*4882a593Smuzhiyun	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
183*4882a593Smuzhiyun	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
184*4882a593Smuzhiyun	depends on MIGRATION
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun# Heavily threaded applications may benefit from splitting the mm-wide
187*4882a593Smuzhiyun# page_table_lock, so that faults on different parts of the user address
188*4882a593Smuzhiyun# space can be handled with less contention: split it at this NR_CPUS.
189*4882a593Smuzhiyun# Default to 4 for wider testing, though 8 might be more appropriate.
190*4882a593Smuzhiyun# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
191*4882a593Smuzhiyun# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
192*4882a593Smuzhiyun# SPARC32 allocates multiple pte tables within a single page, and therefore
193*4882a593Smuzhiyun# a per-page lock leads to problems when multiple tables need to be locked
194*4882a593Smuzhiyun# at the same time (e.g. copy_page_range()).
195*4882a593Smuzhiyun# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
196*4882a593Smuzhiyun#
197*4882a593Smuzhiyunconfig SPLIT_PTLOCK_CPUS
198*4882a593Smuzhiyun	int
199*4882a593Smuzhiyun	default "999999" if !MMU
200*4882a593Smuzhiyun	default "999999" if ARM && !CPU_CACHE_VIPT
201*4882a593Smuzhiyun	default "999999" if PARISC && !PA20
202*4882a593Smuzhiyun	default "999999" if SPARC32
203*4882a593Smuzhiyun	default "4"
204*4882a593Smuzhiyun
205*4882a593Smuzhiyunconfig ARCH_ENABLE_SPLIT_PMD_PTLOCK
206*4882a593Smuzhiyun	bool
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun#
209*4882a593Smuzhiyun# support for memory balloon
210*4882a593Smuzhiyunconfig MEMORY_BALLOON
211*4882a593Smuzhiyun	bool
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun#
214*4882a593Smuzhiyun# support for memory balloon compaction
215*4882a593Smuzhiyunconfig BALLOON_COMPACTION
216*4882a593Smuzhiyun	bool "Allow for balloon memory compaction/migration"
217*4882a593Smuzhiyun	def_bool y
218*4882a593Smuzhiyun	depends on COMPACTION && MEMORY_BALLOON
219*4882a593Smuzhiyun	help
220*4882a593Smuzhiyun	  Memory fragmentation introduced by ballooning might reduce
221*4882a593Smuzhiyun	  significantly the number of 2MB contiguous memory blocks that can be
222*4882a593Smuzhiyun	  used within a guest, thus imposing performance penalties associated
223*4882a593Smuzhiyun	  with the reduced number of transparent huge pages that could be used
224*4882a593Smuzhiyun	  by the guest workload. Allowing the compaction & migration for memory
225*4882a593Smuzhiyun	  pages enlisted as being part of memory balloon devices avoids the
226*4882a593Smuzhiyun	  scenario aforementioned and helps improving memory defragmentation.
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun#
229*4882a593Smuzhiyun# support for memory compaction
230*4882a593Smuzhiyunconfig COMPACTION
231*4882a593Smuzhiyun	bool "Allow for memory compaction"
232*4882a593Smuzhiyun	def_bool y
233*4882a593Smuzhiyun	select MIGRATION
234*4882a593Smuzhiyun	depends on MMU
235*4882a593Smuzhiyun	help
236*4882a593Smuzhiyun	  Compaction is the only memory management component to form
237*4882a593Smuzhiyun	  high order (larger physically contiguous) memory blocks
238*4882a593Smuzhiyun	  reliably. The page allocator relies on compaction heavily and
239*4882a593Smuzhiyun	  the lack of the feature can lead to unexpected OOM killer
240*4882a593Smuzhiyun	  invocations for high order memory requests. You shouldn't
241*4882a593Smuzhiyun	  disable this option unless there really is a strong reason for
242*4882a593Smuzhiyun	  it and then we would be really interested to hear about that at
243*4882a593Smuzhiyun	  linux-mm@kvack.org.
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun#
246*4882a593Smuzhiyun# support for free page reporting
247*4882a593Smuzhiyunconfig PAGE_REPORTING
248*4882a593Smuzhiyun	bool "Free page reporting"
249*4882a593Smuzhiyun	def_bool n
250*4882a593Smuzhiyun	help
251*4882a593Smuzhiyun	  Free page reporting allows for the incremental acquisition of
252*4882a593Smuzhiyun	  free pages from the buddy allocator for the purpose of reporting
253*4882a593Smuzhiyun	  those pages to another entity, such as a hypervisor, so that the
254*4882a593Smuzhiyun	  memory can be freed within the host for other uses.
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun#
257*4882a593Smuzhiyun# support for page migration
258*4882a593Smuzhiyun#
259*4882a593Smuzhiyunconfig MIGRATION
260*4882a593Smuzhiyun	bool "Page migration"
261*4882a593Smuzhiyun	def_bool y
262*4882a593Smuzhiyun	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
263*4882a593Smuzhiyun	help
264*4882a593Smuzhiyun	  Allows the migration of the physical location of pages of processes
265*4882a593Smuzhiyun	  while the virtual addresses are not changed. This is useful in
266*4882a593Smuzhiyun	  two situations. The first is on NUMA systems to put pages nearer
267*4882a593Smuzhiyun	  to the processors accessing. The second is when allocating huge
268*4882a593Smuzhiyun	  pages as migration can relocate pages to satisfy a huge page
269*4882a593Smuzhiyun	  allocation instead of reclaiming.
270*4882a593Smuzhiyun
271*4882a593Smuzhiyunconfig ARCH_ENABLE_HUGEPAGE_MIGRATION
272*4882a593Smuzhiyun	bool
273*4882a593Smuzhiyun
274*4882a593Smuzhiyunconfig ARCH_ENABLE_THP_MIGRATION
275*4882a593Smuzhiyun	bool
276*4882a593Smuzhiyun
277*4882a593Smuzhiyunconfig CONTIG_ALLOC
278*4882a593Smuzhiyun	def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
279*4882a593Smuzhiyun
280*4882a593Smuzhiyunconfig PHYS_ADDR_T_64BIT
281*4882a593Smuzhiyun	def_bool 64BIT
282*4882a593Smuzhiyun
283*4882a593Smuzhiyunconfig BOUNCE
284*4882a593Smuzhiyun	bool "Enable bounce buffers"
285*4882a593Smuzhiyun	default y
286*4882a593Smuzhiyun	depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
287*4882a593Smuzhiyun	help
288*4882a593Smuzhiyun	  Enable bounce buffers for devices that cannot access
289*4882a593Smuzhiyun	  the full range of memory available to the CPU. Enabled
290*4882a593Smuzhiyun	  by default when ZONE_DMA or HIGHMEM is selected, but you
291*4882a593Smuzhiyun	  may say n to override this.
292*4882a593Smuzhiyun
293*4882a593Smuzhiyunconfig VIRT_TO_BUS
294*4882a593Smuzhiyun	bool
295*4882a593Smuzhiyun	help
296*4882a593Smuzhiyun	  An architecture should select this if it implements the
297*4882a593Smuzhiyun	  deprecated interface virt_to_bus().  All new architectures
298*4882a593Smuzhiyun	  should probably not select this.
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun
301*4882a593Smuzhiyunconfig MMU_NOTIFIER
302*4882a593Smuzhiyun	bool
303*4882a593Smuzhiyun	select SRCU
304*4882a593Smuzhiyun	select INTERVAL_TREE
305*4882a593Smuzhiyun
306*4882a593Smuzhiyunconfig KSM
307*4882a593Smuzhiyun	bool "Enable KSM for page merging"
308*4882a593Smuzhiyun	depends on MMU
309*4882a593Smuzhiyun	select XXHASH
310*4882a593Smuzhiyun	help
311*4882a593Smuzhiyun	  Enable Kernel Samepage Merging: KSM periodically scans those areas
312*4882a593Smuzhiyun	  of an application's address space that an app has advised may be
313*4882a593Smuzhiyun	  mergeable.  When it finds pages of identical content, it replaces
314*4882a593Smuzhiyun	  the many instances by a single page with that content, so
315*4882a593Smuzhiyun	  saving memory until one or another app needs to modify the content.
316*4882a593Smuzhiyun	  Recommended for use with KVM, or with other duplicative applications.
317*4882a593Smuzhiyun	  See Documentation/vm/ksm.rst for more information: KSM is inactive
318*4882a593Smuzhiyun	  until a program has madvised that an area is MADV_MERGEABLE, and
319*4882a593Smuzhiyun	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
320*4882a593Smuzhiyun
321*4882a593Smuzhiyunconfig DEFAULT_MMAP_MIN_ADDR
322*4882a593Smuzhiyun	int "Low address space to protect from user allocation"
323*4882a593Smuzhiyun	depends on MMU
324*4882a593Smuzhiyun	default 4096
325*4882a593Smuzhiyun	help
326*4882a593Smuzhiyun	  This is the portion of low virtual memory which should be protected
327*4882a593Smuzhiyun	  from userspace allocation.  Keeping a user from writing to low pages
328*4882a593Smuzhiyun	  can help reduce the impact of kernel NULL pointer bugs.
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun	  For most ia64, ppc64 and x86 users with lots of address space
331*4882a593Smuzhiyun	  a value of 65536 is reasonable and should cause no problems.
332*4882a593Smuzhiyun	  On arm and other archs it should not be higher than 32768.
333*4882a593Smuzhiyun	  Programs which use vm86 functionality or have some need to map
334*4882a593Smuzhiyun	  this low address space will need CAP_SYS_RAWIO or disable this
335*4882a593Smuzhiyun	  protection by setting the value to 0.
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun	  This value can be changed after boot using the
338*4882a593Smuzhiyun	  /proc/sys/vm/mmap_min_addr tunable.
339*4882a593Smuzhiyun
340*4882a593Smuzhiyunconfig ARCH_SUPPORTS_MEMORY_FAILURE
341*4882a593Smuzhiyun	bool
342*4882a593Smuzhiyun
343*4882a593Smuzhiyunconfig MEMORY_FAILURE
344*4882a593Smuzhiyun	depends on MMU
345*4882a593Smuzhiyun	depends on ARCH_SUPPORTS_MEMORY_FAILURE
346*4882a593Smuzhiyun	bool "Enable recovery from hardware memory errors"
347*4882a593Smuzhiyun	select MEMORY_ISOLATION
348*4882a593Smuzhiyun	select RAS
349*4882a593Smuzhiyun	help
350*4882a593Smuzhiyun	  Enables code to recover from some memory failures on systems
351*4882a593Smuzhiyun	  with MCA recovery. This allows a system to continue running
352*4882a593Smuzhiyun	  even when some of its memory has uncorrected errors. This requires
353*4882a593Smuzhiyun	  special hardware support and typically ECC memory.
354*4882a593Smuzhiyun
355*4882a593Smuzhiyunconfig HWPOISON_INJECT
356*4882a593Smuzhiyun	tristate "HWPoison pages injector"
357*4882a593Smuzhiyun	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
358*4882a593Smuzhiyun	select PROC_PAGE_MONITOR
359*4882a593Smuzhiyun
360*4882a593Smuzhiyunconfig NOMMU_INITIAL_TRIM_EXCESS
361*4882a593Smuzhiyun	int "Turn on mmap() excess space trimming before booting"
362*4882a593Smuzhiyun	depends on !MMU
363*4882a593Smuzhiyun	default 1
364*4882a593Smuzhiyun	help
365*4882a593Smuzhiyun	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
366*4882a593Smuzhiyun	  of memory on which to store mappings, but it can only ask the system
367*4882a593Smuzhiyun	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
368*4882a593Smuzhiyun	  more than it requires.  To deal with this, mmap() is able to trim off
369*4882a593Smuzhiyun	  the excess and return it to the allocator.
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun	  If trimming is enabled, the excess is trimmed off and returned to the
372*4882a593Smuzhiyun	  system allocator, which can cause extra fragmentation, particularly
373*4882a593Smuzhiyun	  if there are a lot of transient processes.
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun	  If trimming is disabled, the excess is kept, but not used, which for
376*4882a593Smuzhiyun	  long-term mappings means that the space is wasted.
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun	  Trimming can be dynamically controlled through a sysctl option
379*4882a593Smuzhiyun	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
380*4882a593Smuzhiyun	  excess pages there must be before trimming should occur, or zero if
381*4882a593Smuzhiyun	  no trimming is to occur.
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun	  This option specifies the initial value of this option.  The default
384*4882a593Smuzhiyun	  of 1 says that all excess pages should be trimmed.
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
387*4882a593Smuzhiyun
388*4882a593Smuzhiyunconfig TRANSPARENT_HUGEPAGE
389*4882a593Smuzhiyun	bool "Transparent Hugepage Support"
390*4882a593Smuzhiyun	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
391*4882a593Smuzhiyun	select COMPACTION
392*4882a593Smuzhiyun	select XARRAY_MULTI
393*4882a593Smuzhiyun	help
394*4882a593Smuzhiyun	  Transparent Hugepages allows the kernel to use huge pages and
395*4882a593Smuzhiyun	  huge tlb transparently to the applications whenever possible.
396*4882a593Smuzhiyun	  This feature can improve computing performance to certain
397*4882a593Smuzhiyun	  applications by speeding up page faults during memory
398*4882a593Smuzhiyun	  allocation, by reducing the number of tlb misses and by speeding
399*4882a593Smuzhiyun	  up the pagetable walking.
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun	  If memory constrained on embedded, you may want to say N.
402*4882a593Smuzhiyun
403*4882a593Smuzhiyunchoice
404*4882a593Smuzhiyun	prompt "Transparent Hugepage Support sysfs defaults"
405*4882a593Smuzhiyun	depends on TRANSPARENT_HUGEPAGE
406*4882a593Smuzhiyun	default TRANSPARENT_HUGEPAGE_ALWAYS
407*4882a593Smuzhiyun	help
408*4882a593Smuzhiyun	  Selects the sysfs defaults for Transparent Hugepage Support.
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun	config TRANSPARENT_HUGEPAGE_ALWAYS
411*4882a593Smuzhiyun		bool "always"
412*4882a593Smuzhiyun	help
413*4882a593Smuzhiyun	  Enabling Transparent Hugepage always, can increase the
414*4882a593Smuzhiyun	  memory footprint of applications without a guaranteed
415*4882a593Smuzhiyun	  benefit but it will work automatically for all applications.
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun	config TRANSPARENT_HUGEPAGE_MADVISE
418*4882a593Smuzhiyun		bool "madvise"
419*4882a593Smuzhiyun	help
420*4882a593Smuzhiyun	  Enabling Transparent Hugepage madvise, will only provide a
421*4882a593Smuzhiyun	  performance improvement benefit to the applications using
422*4882a593Smuzhiyun	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
423*4882a593Smuzhiyun	  memory footprint of applications without a guaranteed
424*4882a593Smuzhiyun	  benefit.
425*4882a593Smuzhiyunendchoice
426*4882a593Smuzhiyun
427*4882a593Smuzhiyunconfig ARCH_WANTS_THP_SWAP
428*4882a593Smuzhiyun	def_bool n
429*4882a593Smuzhiyun
430*4882a593Smuzhiyunconfig THP_SWAP
431*4882a593Smuzhiyun	def_bool y
432*4882a593Smuzhiyun	depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP
433*4882a593Smuzhiyun	help
434*4882a593Smuzhiyun	  Swap transparent huge pages in one piece, without splitting.
435*4882a593Smuzhiyun	  XXX: For now, swap cluster backing transparent huge page
436*4882a593Smuzhiyun	  will be split after swapout.
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun	  For selection by architectures with reasonable THP sizes.
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun#
441*4882a593Smuzhiyun# UP and nommu archs use km based percpu allocator
442*4882a593Smuzhiyun#
443*4882a593Smuzhiyunconfig NEED_PER_CPU_KM
444*4882a593Smuzhiyun	depends on !SMP
445*4882a593Smuzhiyun	bool
446*4882a593Smuzhiyun	default y
447*4882a593Smuzhiyun
448*4882a593Smuzhiyunconfig CLEANCACHE
449*4882a593Smuzhiyun	bool "Enable cleancache driver to cache clean pages if tmem is present"
450*4882a593Smuzhiyun	help
451*4882a593Smuzhiyun	  Cleancache can be thought of as a page-granularity victim cache
452*4882a593Smuzhiyun	  for clean pages that the kernel's pageframe replacement algorithm
453*4882a593Smuzhiyun	  (PFRA) would like to keep around, but can't since there isn't enough
454*4882a593Smuzhiyun	  memory.  So when the PFRA "evicts" a page, it first attempts to use
455*4882a593Smuzhiyun	  cleancache code to put the data contained in that page into
456*4882a593Smuzhiyun	  "transcendent memory", memory that is not directly accessible or
457*4882a593Smuzhiyun	  addressable by the kernel and is of unknown and possibly
458*4882a593Smuzhiyun	  time-varying size.  And when a cleancache-enabled
459*4882a593Smuzhiyun	  filesystem wishes to access a page in a file on disk, it first
460*4882a593Smuzhiyun	  checks cleancache to see if it already contains it; if it does,
461*4882a593Smuzhiyun	  the page is copied into the kernel and a disk access is avoided.
462*4882a593Smuzhiyun	  When a transcendent memory driver is available (such as zcache or
463*4882a593Smuzhiyun	  Xen transcendent memory), a significant I/O reduction
464*4882a593Smuzhiyun	  may be achieved.  When none is available, all cleancache calls
465*4882a593Smuzhiyun	  are reduced to a single pointer-compare-against-NULL resulting
466*4882a593Smuzhiyun	  in a negligible performance hit.
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun	  If unsure, say Y to enable cleancache
469*4882a593Smuzhiyun
470*4882a593Smuzhiyunconfig FRONTSWAP
471*4882a593Smuzhiyun	bool "Enable frontswap to cache swap pages if tmem is present"
472*4882a593Smuzhiyun	depends on SWAP
473*4882a593Smuzhiyun	help
474*4882a593Smuzhiyun	  Frontswap is so named because it can be thought of as the opposite
475*4882a593Smuzhiyun	  of a "backing" store for a swap device.  The data is stored into
476*4882a593Smuzhiyun	  "transcendent memory", memory that is not directly accessible or
477*4882a593Smuzhiyun	  addressable by the kernel and is of unknown and possibly
478*4882a593Smuzhiyun	  time-varying size.  When space in transcendent memory is available,
479*4882a593Smuzhiyun	  a significant swap I/O reduction may be achieved.  When none is
480*4882a593Smuzhiyun	  available, all frontswap calls are reduced to a single pointer-
481*4882a593Smuzhiyun	  compare-against-NULL resulting in a negligible performance hit
482*4882a593Smuzhiyun	  and swap data is stored as normal on the matching swap device.
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun	  If unsure, say Y to enable frontswap.
485*4882a593Smuzhiyun
486*4882a593Smuzhiyunconfig CMA
487*4882a593Smuzhiyun	bool "Contiguous Memory Allocator"
488*4882a593Smuzhiyun	depends on MMU
489*4882a593Smuzhiyun	select MIGRATION
490*4882a593Smuzhiyun	select MEMORY_ISOLATION
491*4882a593Smuzhiyun	help
492*4882a593Smuzhiyun	  This enables the Contiguous Memory Allocator which allows other
493*4882a593Smuzhiyun	  subsystems to allocate big physically-contiguous blocks of memory.
494*4882a593Smuzhiyun	  CMA reserves a region of memory and allows only movable pages to
495*4882a593Smuzhiyun	  be allocated from it. This way, the kernel can use the memory for
496*4882a593Smuzhiyun	  pagecache and when a subsystem requests for contiguous area, the
497*4882a593Smuzhiyun	  allocated pages are migrated away to serve the contiguous request.
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun	  If unsure, say "n".
500*4882a593Smuzhiyun
501*4882a593Smuzhiyunconfig CMA_INACTIVE
502*4882a593Smuzhiyun	bool "CMA not active to system"
503*4882a593Smuzhiyun	depends on CMA
504*4882a593Smuzhiyun	help
505*4882a593Smuzhiyun	  This forbids the CMA to active its pages to system memory, to keep
506*4882a593Smuzhiyun	  page from CMA never be borrowed by system.
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun	  If unsure, say "n".
509*4882a593Smuzhiyun
510*4882a593Smuzhiyunconfig CMA_DEBUG
511*4882a593Smuzhiyun	bool "CMA debug messages (DEVELOPMENT)"
512*4882a593Smuzhiyun	depends on DEBUG_KERNEL && CMA
513*4882a593Smuzhiyun	help
514*4882a593Smuzhiyun	  Turns on debug messages in CMA.  This produces KERN_DEBUG
515*4882a593Smuzhiyun	  messages for every CMA call as well as various messages while
516*4882a593Smuzhiyun	  processing calls such as dma_alloc_from_contiguous().
517*4882a593Smuzhiyun	  This option does not affect warning and error messages.
518*4882a593Smuzhiyun
519*4882a593Smuzhiyunconfig CMA_DEBUGFS
520*4882a593Smuzhiyun	bool "CMA debugfs interface"
521*4882a593Smuzhiyun	depends on CMA && DEBUG_FS
522*4882a593Smuzhiyun	help
523*4882a593Smuzhiyun	  Turns on the DebugFS interface for CMA.
524*4882a593Smuzhiyun
525*4882a593Smuzhiyunconfig CMA_DEBUGFS_BITMAP_HEX
526*4882a593Smuzhiyun	bool "CMA debugfs add bitmap_hex node"
527*4882a593Smuzhiyun	depends on CMA_DEBUGFS
528*4882a593Smuzhiyun	help
529*4882a593Smuzhiyun	  Turns on the bitmap_hex node under DEBUGFS, shows the bitmap in hex
530*4882a593Smuzhiyun	  format.
531*4882a593Smuzhiyun
532*4882a593Smuzhiyunconfig CMA_SYSFS
533*4882a593Smuzhiyun	bool "CMA information through sysfs interface"
534*4882a593Smuzhiyun	depends on CMA && SYSFS
535*4882a593Smuzhiyun	help
536*4882a593Smuzhiyun	  This option exposes some sysfs attributes to get information
537*4882a593Smuzhiyun	  from CMA.
538*4882a593Smuzhiyun
539*4882a593Smuzhiyunconfig CMA_AREAS
540*4882a593Smuzhiyun	int "Maximum count of the CMA areas"
541*4882a593Smuzhiyun	depends on CMA
542*4882a593Smuzhiyun	default 19 if NUMA
543*4882a593Smuzhiyun	default 7
544*4882a593Smuzhiyun	help
545*4882a593Smuzhiyun	  CMA allows to create CMA areas for particular purpose, mainly,
546*4882a593Smuzhiyun	  used as device private area. This parameter sets the maximum
547*4882a593Smuzhiyun	  number of CMA area in the system.
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun	  If unsure, leave the default value "7" in UMA and "19" in NUMA.
550*4882a593Smuzhiyun
551*4882a593Smuzhiyunconfig MEM_SOFT_DIRTY
552*4882a593Smuzhiyun	bool "Track memory changes"
553*4882a593Smuzhiyun	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
554*4882a593Smuzhiyun	select PROC_PAGE_MONITOR
555*4882a593Smuzhiyun	help
556*4882a593Smuzhiyun	  This option enables memory changes tracking by introducing a
557*4882a593Smuzhiyun	  soft-dirty bit on pte-s. This bit it set when someone writes
558*4882a593Smuzhiyun	  into a page just as regular dirty bit, but unlike the latter
559*4882a593Smuzhiyun	  it can be cleared by hands.
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun	  See Documentation/admin-guide/mm/soft-dirty.rst for more details.
562*4882a593Smuzhiyun
563*4882a593Smuzhiyunconfig ZSWAP
564*4882a593Smuzhiyun	bool "Compressed cache for swap pages (EXPERIMENTAL)"
565*4882a593Smuzhiyun	depends on FRONTSWAP && CRYPTO=y
566*4882a593Smuzhiyun	select ZPOOL
567*4882a593Smuzhiyun	help
568*4882a593Smuzhiyun	  A lightweight compressed cache for swap pages.  It takes
569*4882a593Smuzhiyun	  pages that are in the process of being swapped out and attempts to
570*4882a593Smuzhiyun	  compress them into a dynamically allocated RAM-based memory pool.
571*4882a593Smuzhiyun	  This can result in a significant I/O reduction on swap device and,
572*4882a593Smuzhiyun	  in the case where decompressing from RAM is faster that swap device
573*4882a593Smuzhiyun	  reads, can also improve workload performance.
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun	  This is marked experimental because it is a new feature (as of
576*4882a593Smuzhiyun	  v3.11) that interacts heavily with memory reclaim.  While these
577*4882a593Smuzhiyun	  interactions don't cause any known issues on simple memory setups,
578*4882a593Smuzhiyun	  they have not be fully explored on the large set of potential
579*4882a593Smuzhiyun	  configurations and workloads that exist.
580*4882a593Smuzhiyun
581*4882a593Smuzhiyunchoice
582*4882a593Smuzhiyun	prompt "Compressed cache for swap pages default compressor"
583*4882a593Smuzhiyun	depends on ZSWAP
584*4882a593Smuzhiyun	default ZSWAP_COMPRESSOR_DEFAULT_LZO
585*4882a593Smuzhiyun	help
586*4882a593Smuzhiyun	  Selects the default compression algorithm for the compressed cache
587*4882a593Smuzhiyun	  for swap pages.
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun	  For an overview what kind of performance can be expected from
590*4882a593Smuzhiyun	  a particular compression algorithm please refer to the benchmarks
591*4882a593Smuzhiyun	  available at the following LWN page:
592*4882a593Smuzhiyun	  https://lwn.net/Articles/751795/
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun	  If in doubt, select 'LZO'.
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun	  The selection made here can be overridden by using the kernel
597*4882a593Smuzhiyun	  command line 'zswap.compressor=' option.
598*4882a593Smuzhiyun
599*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
600*4882a593Smuzhiyun	bool "Deflate"
601*4882a593Smuzhiyun	select CRYPTO_DEFLATE
602*4882a593Smuzhiyun	help
603*4882a593Smuzhiyun	  Use the Deflate algorithm as the default compression algorithm.
604*4882a593Smuzhiyun
605*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT_LZO
606*4882a593Smuzhiyun	bool "LZO"
607*4882a593Smuzhiyun	select CRYPTO_LZO
608*4882a593Smuzhiyun	help
609*4882a593Smuzhiyun	  Use the LZO algorithm as the default compression algorithm.
610*4882a593Smuzhiyun
611*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT_842
612*4882a593Smuzhiyun	bool "842"
613*4882a593Smuzhiyun	select CRYPTO_842
614*4882a593Smuzhiyun	help
615*4882a593Smuzhiyun	  Use the 842 algorithm as the default compression algorithm.
616*4882a593Smuzhiyun
617*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT_LZ4
618*4882a593Smuzhiyun	bool "LZ4"
619*4882a593Smuzhiyun	select CRYPTO_LZ4
620*4882a593Smuzhiyun	help
621*4882a593Smuzhiyun	  Use the LZ4 algorithm as the default compression algorithm.
622*4882a593Smuzhiyun
623*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
624*4882a593Smuzhiyun	bool "LZ4HC"
625*4882a593Smuzhiyun	select CRYPTO_LZ4HC
626*4882a593Smuzhiyun	help
627*4882a593Smuzhiyun	  Use the LZ4HC algorithm as the default compression algorithm.
628*4882a593Smuzhiyun
629*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT_ZSTD
630*4882a593Smuzhiyun	bool "zstd"
631*4882a593Smuzhiyun	select CRYPTO_ZSTD
632*4882a593Smuzhiyun	help
633*4882a593Smuzhiyun	  Use the zstd algorithm as the default compression algorithm.
634*4882a593Smuzhiyunendchoice
635*4882a593Smuzhiyun
636*4882a593Smuzhiyunconfig ZSWAP_COMPRESSOR_DEFAULT
637*4882a593Smuzhiyun       string
638*4882a593Smuzhiyun       depends on ZSWAP
639*4882a593Smuzhiyun       default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
640*4882a593Smuzhiyun       default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
641*4882a593Smuzhiyun       default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
642*4882a593Smuzhiyun       default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
643*4882a593Smuzhiyun       default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
644*4882a593Smuzhiyun       default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
645*4882a593Smuzhiyun       default ""
646*4882a593Smuzhiyun
647*4882a593Smuzhiyunchoice
648*4882a593Smuzhiyun	prompt "Compressed cache for swap pages default allocator"
649*4882a593Smuzhiyun	depends on ZSWAP
650*4882a593Smuzhiyun	default ZSWAP_ZPOOL_DEFAULT_ZBUD
651*4882a593Smuzhiyun	help
652*4882a593Smuzhiyun	  Selects the default allocator for the compressed cache for
653*4882a593Smuzhiyun	  swap pages.
654*4882a593Smuzhiyun	  The default is 'zbud' for compatibility, however please do
655*4882a593Smuzhiyun	  read the description of each of the allocators below before
656*4882a593Smuzhiyun	  making a right choice.
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun	  The selection made here can be overridden by using the kernel
659*4882a593Smuzhiyun	  command line 'zswap.zpool=' option.
660*4882a593Smuzhiyun
661*4882a593Smuzhiyunconfig ZSWAP_ZPOOL_DEFAULT_ZBUD
662*4882a593Smuzhiyun	bool "zbud"
663*4882a593Smuzhiyun	select ZBUD
664*4882a593Smuzhiyun	help
665*4882a593Smuzhiyun	  Use the zbud allocator as the default allocator.
666*4882a593Smuzhiyun
667*4882a593Smuzhiyunconfig ZSWAP_ZPOOL_DEFAULT_Z3FOLD
668*4882a593Smuzhiyun	bool "z3fold"
669*4882a593Smuzhiyun	select Z3FOLD
670*4882a593Smuzhiyun	help
671*4882a593Smuzhiyun	  Use the z3fold allocator as the default allocator.
672*4882a593Smuzhiyun
673*4882a593Smuzhiyunconfig ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
674*4882a593Smuzhiyun	bool "zsmalloc"
675*4882a593Smuzhiyun	select ZSMALLOC
676*4882a593Smuzhiyun	help
677*4882a593Smuzhiyun	  Use the zsmalloc allocator as the default allocator.
678*4882a593Smuzhiyunendchoice
679*4882a593Smuzhiyun
680*4882a593Smuzhiyunconfig ZSWAP_ZPOOL_DEFAULT
681*4882a593Smuzhiyun       string
682*4882a593Smuzhiyun       depends on ZSWAP
683*4882a593Smuzhiyun       default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
684*4882a593Smuzhiyun       default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
685*4882a593Smuzhiyun       default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
686*4882a593Smuzhiyun       default ""
687*4882a593Smuzhiyun
688*4882a593Smuzhiyunconfig ZSWAP_DEFAULT_ON
689*4882a593Smuzhiyun	bool "Enable the compressed cache for swap pages by default"
690*4882a593Smuzhiyun	depends on ZSWAP
691*4882a593Smuzhiyun	help
692*4882a593Smuzhiyun	  If selected, the compressed cache for swap pages will be enabled
693*4882a593Smuzhiyun	  at boot, otherwise it will be disabled.
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun	  The selection made here can be overridden by using the kernel
696*4882a593Smuzhiyun	  command line 'zswap.enabled=' option.
697*4882a593Smuzhiyun
698*4882a593Smuzhiyunconfig ZPOOL
699*4882a593Smuzhiyun	tristate "Common API for compressed memory storage"
700*4882a593Smuzhiyun	help
701*4882a593Smuzhiyun	  Compressed memory storage API.  This allows using either zbud or
702*4882a593Smuzhiyun	  zsmalloc.
703*4882a593Smuzhiyun
704*4882a593Smuzhiyunconfig ZBUD
705*4882a593Smuzhiyun	tristate "Low (Up to 2x) density storage for compressed pages"
706*4882a593Smuzhiyun	help
707*4882a593Smuzhiyun	  A special purpose allocator for storing compressed pages.
708*4882a593Smuzhiyun	  It is designed to store up to two compressed pages per physical
709*4882a593Smuzhiyun	  page.  While this design limits storage density, it has simple and
710*4882a593Smuzhiyun	  deterministic reclaim properties that make it preferable to a higher
711*4882a593Smuzhiyun	  density approach when reclaim will be used.
712*4882a593Smuzhiyun
713*4882a593Smuzhiyunconfig Z3FOLD
714*4882a593Smuzhiyun	tristate "Up to 3x density storage for compressed pages"
715*4882a593Smuzhiyun	depends on ZPOOL
716*4882a593Smuzhiyun	help
717*4882a593Smuzhiyun	  A special purpose allocator for storing compressed pages.
718*4882a593Smuzhiyun	  It is designed to store up to three compressed pages per physical
719*4882a593Smuzhiyun	  page. It is a ZBUD derivative so the simplicity and determinism are
720*4882a593Smuzhiyun	  still there.
721*4882a593Smuzhiyun
722*4882a593Smuzhiyunconfig ZSMALLOC
723*4882a593Smuzhiyun	tristate "Memory allocator for compressed pages"
724*4882a593Smuzhiyun	depends on MMU
725*4882a593Smuzhiyun	help
726*4882a593Smuzhiyun	  zsmalloc is a slab-based memory allocator designed to store
727*4882a593Smuzhiyun	  compressed RAM pages.  zsmalloc uses virtual memory mapping
728*4882a593Smuzhiyun	  in order to reduce fragmentation.  However, this results in a
729*4882a593Smuzhiyun	  non-standard allocator interface where a handle, not a pointer, is
730*4882a593Smuzhiyun	  returned by an alloc().  This handle must be mapped in order to
731*4882a593Smuzhiyun	  access the allocated space.
732*4882a593Smuzhiyun
733*4882a593Smuzhiyunconfig ZSMALLOC_STAT
734*4882a593Smuzhiyun	bool "Export zsmalloc statistics"
735*4882a593Smuzhiyun	depends on ZSMALLOC
736*4882a593Smuzhiyun	select DEBUG_FS
737*4882a593Smuzhiyun	help
738*4882a593Smuzhiyun	  This option enables code in the zsmalloc to collect various
739*4882a593Smuzhiyun	  statistics about whats happening in zsmalloc and exports that
740*4882a593Smuzhiyun	  information to userspace via debugfs.
741*4882a593Smuzhiyun	  If unsure, say N.
742*4882a593Smuzhiyun
743*4882a593Smuzhiyunconfig GENERIC_EARLY_IOREMAP
744*4882a593Smuzhiyun	bool
745*4882a593Smuzhiyun
746*4882a593Smuzhiyunconfig MAX_STACK_SIZE_MB
747*4882a593Smuzhiyun	int "Maximum user stack size for 32-bit processes (MB)"
748*4882a593Smuzhiyun	default 80
749*4882a593Smuzhiyun	range 8 2048
750*4882a593Smuzhiyun	depends on STACK_GROWSUP && (!64BIT || COMPAT)
751*4882a593Smuzhiyun	help
752*4882a593Smuzhiyun	  This is the maximum stack size in Megabytes in the VM layout of 32-bit
753*4882a593Smuzhiyun	  user processes when the stack grows upwards (currently only on parisc
754*4882a593Smuzhiyun	  arch). The stack will be located at the highest memory address minus
755*4882a593Smuzhiyun	  the given value, unless the RLIMIT_STACK hard limit is changed to a
756*4882a593Smuzhiyun	  smaller value in which case that is used.
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun	  A sane initial value is 80 MB.
759*4882a593Smuzhiyun
760*4882a593Smuzhiyunconfig DEFERRED_STRUCT_PAGE_INIT
761*4882a593Smuzhiyun	bool "Defer initialisation of struct pages to kthreads"
762*4882a593Smuzhiyun	depends on SPARSEMEM
763*4882a593Smuzhiyun	depends on !NEED_PER_CPU_KM
764*4882a593Smuzhiyun	depends on 64BIT
765*4882a593Smuzhiyun	select PADATA
766*4882a593Smuzhiyun	help
767*4882a593Smuzhiyun	  Ordinarily all struct pages are initialised during early boot in a
768*4882a593Smuzhiyun	  single thread. On very large machines this can take a considerable
769*4882a593Smuzhiyun	  amount of time. If this option is set, large machines will bring up
770*4882a593Smuzhiyun	  a subset of memmap at boot and then initialise the rest in parallel.
771*4882a593Smuzhiyun	  This has a potential performance impact on tasks running early in the
772*4882a593Smuzhiyun	  lifetime of the system until these kthreads finish the
773*4882a593Smuzhiyun	  initialisation.
774*4882a593Smuzhiyun
775*4882a593Smuzhiyunconfig PAGE_IDLE_FLAG
776*4882a593Smuzhiyun	bool
777*4882a593Smuzhiyun	select PAGE_EXTENSION if !64BIT
778*4882a593Smuzhiyun	help
779*4882a593Smuzhiyun	  This adds PG_idle and PG_young flags to 'struct page'.  PTE Accessed
780*4882a593Smuzhiyun	  bit writers can set the state of the bit in the flags so that PTE
781*4882a593Smuzhiyun	  Accessed bit readers may avoid disturbance.
782*4882a593Smuzhiyun
783*4882a593Smuzhiyunconfig IDLE_PAGE_TRACKING
784*4882a593Smuzhiyun	bool "Enable idle page tracking"
785*4882a593Smuzhiyun	depends on SYSFS && MMU
786*4882a593Smuzhiyun	select PAGE_IDLE_FLAG
787*4882a593Smuzhiyun	help
788*4882a593Smuzhiyun	  This feature allows to estimate the amount of user pages that have
789*4882a593Smuzhiyun	  not been touched during a given period of time. This information can
790*4882a593Smuzhiyun	  be useful to tune memory cgroup limits and/or for job placement
791*4882a593Smuzhiyun	  within a compute cluster.
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun	  See Documentation/admin-guide/mm/idle_page_tracking.rst for
794*4882a593Smuzhiyun	  more details.
795*4882a593Smuzhiyun
796*4882a593Smuzhiyunconfig ARCH_HAS_PTE_DEVMAP
797*4882a593Smuzhiyun	bool
798*4882a593Smuzhiyun
799*4882a593Smuzhiyunconfig ZONE_DEVICE
800*4882a593Smuzhiyun	bool "Device memory (pmem, HMM, etc...) hotplug support"
801*4882a593Smuzhiyun	depends on MEMORY_HOTPLUG
802*4882a593Smuzhiyun	depends on MEMORY_HOTREMOVE
803*4882a593Smuzhiyun	depends on SPARSEMEM_VMEMMAP
804*4882a593Smuzhiyun	depends on ARCH_HAS_PTE_DEVMAP
805*4882a593Smuzhiyun	select XARRAY_MULTI
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun	help
808*4882a593Smuzhiyun	  Device memory hotplug support allows for establishing pmem,
809*4882a593Smuzhiyun	  or other device driver discovered memory regions, in the
810*4882a593Smuzhiyun	  memmap. This allows pfn_to_page() lookups of otherwise
811*4882a593Smuzhiyun	  "device-physical" addresses which is needed for using a DAX
812*4882a593Smuzhiyun	  mapping in an O_DIRECT operation, among other things.
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun	  If FS_DAX is enabled, then say Y.
815*4882a593Smuzhiyun
816*4882a593Smuzhiyunconfig DEV_PAGEMAP_OPS
817*4882a593Smuzhiyun	bool
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun#
820*4882a593Smuzhiyun# Helpers to mirror range of the CPU page tables of a process into device page
821*4882a593Smuzhiyun# tables.
822*4882a593Smuzhiyun#
823*4882a593Smuzhiyunconfig HMM_MIRROR
824*4882a593Smuzhiyun	bool
825*4882a593Smuzhiyun	depends on MMU
826*4882a593Smuzhiyun
827*4882a593Smuzhiyunconfig DEVICE_PRIVATE
828*4882a593Smuzhiyun	bool "Unaddressable device memory (GPU memory, ...)"
829*4882a593Smuzhiyun	depends on ZONE_DEVICE
830*4882a593Smuzhiyun	select DEV_PAGEMAP_OPS
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun	help
833*4882a593Smuzhiyun	  Allows creation of struct pages to represent unaddressable device
834*4882a593Smuzhiyun	  memory; i.e., memory that is only accessible from the device (or
835*4882a593Smuzhiyun	  group of devices). You likely also want to select HMM_MIRROR.
836*4882a593Smuzhiyun
837*4882a593Smuzhiyunconfig VMAP_PFN
838*4882a593Smuzhiyun	bool
839*4882a593Smuzhiyun
840*4882a593Smuzhiyunconfig FRAME_VECTOR
841*4882a593Smuzhiyun	bool
842*4882a593Smuzhiyun
843*4882a593Smuzhiyunconfig ARCH_USES_HIGH_VMA_FLAGS
844*4882a593Smuzhiyun	bool
845*4882a593Smuzhiyunconfig ARCH_HAS_PKEYS
846*4882a593Smuzhiyun	bool
847*4882a593Smuzhiyun
848*4882a593Smuzhiyunconfig PERCPU_STATS
849*4882a593Smuzhiyun	bool "Collect percpu memory statistics"
850*4882a593Smuzhiyun	help
851*4882a593Smuzhiyun	  This feature collects and exposes statistics via debugfs. The
852*4882a593Smuzhiyun	  information includes global and per chunk statistics, which can
853*4882a593Smuzhiyun	  be used to help understand percpu memory usage.
854*4882a593Smuzhiyun
855*4882a593Smuzhiyunconfig ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
856*4882a593Smuzhiyun       def_bool n
857*4882a593Smuzhiyun
858*4882a593Smuzhiyunconfig SPECULATIVE_PAGE_FAULT
859*4882a593Smuzhiyun       bool "Speculative page faults"
860*4882a593Smuzhiyun       default y
861*4882a593Smuzhiyun       depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
862*4882a593Smuzhiyun       depends on MMU && SMP && !NUMA
863*4882a593Smuzhiyun       help
864*4882a593Smuzhiyun         Try to handle user space page faults without holding the mmap_sem.
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun	 This should allow better concurrency for massively threaded process
867*4882a593Smuzhiyun	 since the page fault handler will not wait for other threads memory
868*4882a593Smuzhiyun	 layout change to be done, assuming that this change is done in another
869*4882a593Smuzhiyun	 part of the process's memory space. This type of page fault is named
870*4882a593Smuzhiyun	 speculative page fault.
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun	 If the speculative page fault fails because of a concurrency is
873*4882a593Smuzhiyun	 detected or because underlying PMD or PTE tables are not yet
874*4882a593Smuzhiyun	 allocating, it is failing its processing and a classic page fault
875*4882a593Smuzhiyun	 is then tried.
876*4882a593Smuzhiyun
877*4882a593Smuzhiyunconfig GUP_BENCHMARK
878*4882a593Smuzhiyun	bool "Enable infrastructure for get_user_pages() and related calls benchmarking"
879*4882a593Smuzhiyun	help
880*4882a593Smuzhiyun	  Provides /sys/kernel/debug/gup_benchmark that helps with testing
881*4882a593Smuzhiyun	  performance of get_user_pages() and related calls.
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun	  See tools/testing/selftests/vm/gup_benchmark.c
884*4882a593Smuzhiyun
885*4882a593Smuzhiyunconfig GUP_GET_PTE_LOW_HIGH
886*4882a593Smuzhiyun	bool
887*4882a593Smuzhiyun
888*4882a593Smuzhiyunconfig READ_ONLY_THP_FOR_FS
889*4882a593Smuzhiyun	bool "Read-only THP for filesystems (EXPERIMENTAL)"
890*4882a593Smuzhiyun	depends on TRANSPARENT_HUGEPAGE && SHMEM
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun	help
893*4882a593Smuzhiyun	  Allow khugepaged to put read-only file-backed pages in THP.
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun	  This is marked experimental because it is a new feature. Write
896*4882a593Smuzhiyun	  support of file THPs will be developed in the next few release
897*4882a593Smuzhiyun	  cycles.
898*4882a593Smuzhiyun
899*4882a593Smuzhiyunconfig ARCH_HAS_PTE_SPECIAL
900*4882a593Smuzhiyun	bool
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun#
903*4882a593Smuzhiyun# Some architectures require a special hugepage directory format that is
904*4882a593Smuzhiyun# required to support multiple hugepage sizes. For example a4fe3ce76
905*4882a593Smuzhiyun# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
906*4882a593Smuzhiyun# introduced it on powerpc.  This allows for a more flexible hugepage
907*4882a593Smuzhiyun# pagetable layouts.
908*4882a593Smuzhiyun#
909*4882a593Smuzhiyunconfig ARCH_HAS_HUGEPD
910*4882a593Smuzhiyun	bool
911*4882a593Smuzhiyun
912*4882a593Smuzhiyunconfig MAPPING_DIRTY_HELPERS
913*4882a593Smuzhiyun        bool
914*4882a593Smuzhiyun
915*4882a593Smuzhiyunsource "mm/damon/Kconfig"
916*4882a593Smuzhiyun
917*4882a593Smuzhiyunendmenu
918