xref: /OK3568_Linux_fs/kernel/include/linux/mman.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_MMAN_H
3*4882a593Smuzhiyun #define _LINUX_MMAN_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/percpu_counter.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/atomic.h>
9*4882a593Smuzhiyun #include <uapi/linux/mman.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * Arrange for legacy / undefined architecture specific flags to be
13*4882a593Smuzhiyun  * ignored by mmap handling code.
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun #ifndef MAP_32BIT
16*4882a593Smuzhiyun #define MAP_32BIT 0
17*4882a593Smuzhiyun #endif
18*4882a593Smuzhiyun #ifndef MAP_HUGE_2MB
19*4882a593Smuzhiyun #define MAP_HUGE_2MB 0
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun #ifndef MAP_HUGE_1GB
22*4882a593Smuzhiyun #define MAP_HUGE_1GB 0
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun #ifndef MAP_UNINITIALIZED
25*4882a593Smuzhiyun #define MAP_UNINITIALIZED 0
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun #ifndef MAP_SYNC
28*4882a593Smuzhiyun #define MAP_SYNC 0
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * The historical set of flags that all mmap implementations implicitly
33*4882a593Smuzhiyun  * support when a ->mmap_validate() op is not provided in file_operations.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun #define LEGACY_MAP_MASK (MAP_SHARED \
36*4882a593Smuzhiyun 		| MAP_PRIVATE \
37*4882a593Smuzhiyun 		| MAP_FIXED \
38*4882a593Smuzhiyun 		| MAP_ANONYMOUS \
39*4882a593Smuzhiyun 		| MAP_DENYWRITE \
40*4882a593Smuzhiyun 		| MAP_EXECUTABLE \
41*4882a593Smuzhiyun 		| MAP_UNINITIALIZED \
42*4882a593Smuzhiyun 		| MAP_GROWSDOWN \
43*4882a593Smuzhiyun 		| MAP_LOCKED \
44*4882a593Smuzhiyun 		| MAP_NORESERVE \
45*4882a593Smuzhiyun 		| MAP_POPULATE \
46*4882a593Smuzhiyun 		| MAP_NONBLOCK \
47*4882a593Smuzhiyun 		| MAP_STACK \
48*4882a593Smuzhiyun 		| MAP_HUGETLB \
49*4882a593Smuzhiyun 		| MAP_32BIT \
50*4882a593Smuzhiyun 		| MAP_HUGE_2MB \
51*4882a593Smuzhiyun 		| MAP_HUGE_1GB)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun extern int sysctl_overcommit_memory;
54*4882a593Smuzhiyun extern int sysctl_overcommit_ratio;
55*4882a593Smuzhiyun extern unsigned long sysctl_overcommit_kbytes;
56*4882a593Smuzhiyun extern struct percpu_counter vm_committed_as;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #ifdef CONFIG_SMP
59*4882a593Smuzhiyun extern s32 vm_committed_as_batch;
60*4882a593Smuzhiyun extern void mm_compute_batch(int overcommit_policy);
61*4882a593Smuzhiyun #else
62*4882a593Smuzhiyun #define vm_committed_as_batch 0
mm_compute_batch(int overcommit_policy)63*4882a593Smuzhiyun static inline void mm_compute_batch(int overcommit_policy)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun unsigned long vm_memory_committed(void);
69*4882a593Smuzhiyun 
vm_acct_memory(long pages)70*4882a593Smuzhiyun static inline void vm_acct_memory(long pages)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
vm_unacct_memory(long pages)75*4882a593Smuzhiyun static inline void vm_unacct_memory(long pages)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	vm_acct_memory(-pages);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun  * Allow architectures to handle additional protection and flag bits. The
82*4882a593Smuzhiyun  * overriding macros must be defined in the arch-specific asm/mman.h file.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #ifndef arch_calc_vm_prot_bits
86*4882a593Smuzhiyun #define arch_calc_vm_prot_bits(prot, pkey) 0
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifndef arch_calc_vm_flag_bits
90*4882a593Smuzhiyun #define arch_calc_vm_flag_bits(flags) 0
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #ifndef arch_vm_get_page_prot
94*4882a593Smuzhiyun #define arch_vm_get_page_prot(vm_flags) __pgprot(0)
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #ifndef arch_validate_prot
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * This is called from mprotect().  PROT_GROWSDOWN and PROT_GROWSUP have
100*4882a593Smuzhiyun  * already been masked out.
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  * Returns true if the prot flags are valid
103*4882a593Smuzhiyun  */
arch_validate_prot(unsigned long prot,unsigned long addr)104*4882a593Smuzhiyun static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun #define arch_validate_prot arch_validate_prot
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #ifndef arch_validate_flags
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun  * This is called from mmap() and mprotect() with the updated vma->vm_flags.
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * Returns true if the VM_* flags are valid.
116*4882a593Smuzhiyun  */
arch_validate_flags(unsigned long flags)117*4882a593Smuzhiyun static inline bool arch_validate_flags(unsigned long flags)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	return true;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun #define arch_validate_flags arch_validate_flags
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  * Optimisation macro.  It is equivalent to:
126*4882a593Smuzhiyun  *      (x & bit1) ? bit2 : 0
127*4882a593Smuzhiyun  * but this version is faster.
128*4882a593Smuzhiyun  * ("bit1" and "bit2" must be single bits)
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun #define _calc_vm_trans(x, bit1, bit2) \
131*4882a593Smuzhiyun   ((!(bit1) || !(bit2)) ? 0 : \
132*4882a593Smuzhiyun   ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
133*4882a593Smuzhiyun    : ((x) & (bit1)) / ((bit1) / (bit2))))
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun  * Combine the mmap "prot" argument into "vm_flags" used internally.
137*4882a593Smuzhiyun  */
138*4882a593Smuzhiyun static inline unsigned long
calc_vm_prot_bits(unsigned long prot,unsigned long pkey)139*4882a593Smuzhiyun calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	return _calc_vm_trans(prot, PROT_READ,  VM_READ ) |
142*4882a593Smuzhiyun 	       _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
143*4882a593Smuzhiyun 	       _calc_vm_trans(prot, PROT_EXEC,  VM_EXEC) |
144*4882a593Smuzhiyun 	       arch_calc_vm_prot_bits(prot, pkey);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun  * Combine the mmap "flags" argument into "vm_flags" used internally.
149*4882a593Smuzhiyun  */
150*4882a593Smuzhiyun static inline unsigned long
calc_vm_flag_bits(unsigned long flags)151*4882a593Smuzhiyun calc_vm_flag_bits(unsigned long flags)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
154*4882a593Smuzhiyun 	       _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
155*4882a593Smuzhiyun 	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
156*4882a593Smuzhiyun 	       _calc_vm_trans(flags, MAP_SYNC,	     VM_SYNC      ) |
157*4882a593Smuzhiyun 	       arch_calc_vm_flag_bits(flags);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun unsigned long vm_commit_limit(void);
161*4882a593Smuzhiyun #endif /* _LINUX_MMAN_H */
162