xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/mman.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_MMAN_H__
3*4882a593Smuzhiyun #define __ASM_MMAN_H__
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/compiler.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun #include <uapi/asm/mman.h>
8*4882a593Smuzhiyun 
arch_calc_vm_prot_bits(unsigned long prot,unsigned long pkey __always_unused)9*4882a593Smuzhiyun static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
10*4882a593Smuzhiyun 	unsigned long pkey __always_unused)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun 	unsigned long ret = 0;
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 	if (system_supports_bti() && (prot & PROT_BTI))
15*4882a593Smuzhiyun 		ret |= VM_ARM64_BTI;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	if (system_supports_mte() && (prot & PROT_MTE))
18*4882a593Smuzhiyun 		ret |= VM_MTE;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	return ret;
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
23*4882a593Smuzhiyun 
arch_calc_vm_flag_bits(unsigned long flags)24*4882a593Smuzhiyun static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	/*
27*4882a593Smuzhiyun 	 * Only allow MTE on anonymous mappings as these are guaranteed to be
28*4882a593Smuzhiyun 	 * backed by tags-capable memory. The vm_flags may be overridden by a
29*4882a593Smuzhiyun 	 * filesystem supporting MTE (RAM-based).
30*4882a593Smuzhiyun 	 */
31*4882a593Smuzhiyun 	if (system_supports_mte() && (flags & MAP_ANONYMOUS))
32*4882a593Smuzhiyun 		return VM_MTE_ALLOWED;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
37*4882a593Smuzhiyun 
arch_vm_get_page_prot(unsigned long vm_flags)38*4882a593Smuzhiyun static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	pteval_t prot = 0;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (vm_flags & VM_ARM64_BTI)
43*4882a593Smuzhiyun 		prot |= PTE_GP;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/*
46*4882a593Smuzhiyun 	 * There are two conditions required for returning a Normal Tagged
47*4882a593Smuzhiyun 	 * memory type: (1) the user requested it via PROT_MTE passed to
48*4882a593Smuzhiyun 	 * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
49*4882a593Smuzhiyun 	 * register (1) as VM_MTE in the vma->vm_flags and (2) as
50*4882a593Smuzhiyun 	 * VM_MTE_ALLOWED. Note that the latter can only be set during the
51*4882a593Smuzhiyun 	 * mmap() call since mprotect() does not accept MAP_* flags.
52*4882a593Smuzhiyun 	 * Checking for VM_MTE only is sufficient since arch_validate_flags()
53*4882a593Smuzhiyun 	 * does not permit (VM_MTE & !VM_MTE_ALLOWED).
54*4882a593Smuzhiyun 	 */
55*4882a593Smuzhiyun 	if (vm_flags & VM_MTE)
56*4882a593Smuzhiyun 		prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return __pgprot(prot);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
61*4882a593Smuzhiyun 
arch_validate_prot(unsigned long prot,unsigned long addr __always_unused)62*4882a593Smuzhiyun static inline bool arch_validate_prot(unsigned long prot,
63*4882a593Smuzhiyun 	unsigned long addr __always_unused)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (system_supports_bti())
68*4882a593Smuzhiyun 		supported |= PROT_BTI;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (system_supports_mte())
71*4882a593Smuzhiyun 		supported |= PROT_MTE;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	return (prot & ~supported) == 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
76*4882a593Smuzhiyun 
arch_validate_flags(unsigned long vm_flags)77*4882a593Smuzhiyun static inline bool arch_validate_flags(unsigned long vm_flags)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	if (!system_supports_mte())
80*4882a593Smuzhiyun 		return true;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* only allow VM_MTE if VM_MTE_ALLOWED has been set previously */
83*4882a593Smuzhiyun 	return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #endif /* ! __ASM_MMAN_H__ */
88