xref: /OK3568_Linux_fs/kernel/arch/arm/mm/mmap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/arch/arm/mm/mmap.c
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/fs.h>
6*4882a593Smuzhiyun #include <linux/mm.h>
7*4882a593Smuzhiyun #include <linux/mman.h>
8*4882a593Smuzhiyun #include <linux/shm.h>
9*4882a593Smuzhiyun #include <linux/sched/signal.h>
10*4882a593Smuzhiyun #include <linux/sched/mm.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/personality.h>
13*4882a593Smuzhiyun #include <linux/random.h>
14*4882a593Smuzhiyun #include <asm/cachetype.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define COLOUR_ALIGN(addr,pgoff)		\
17*4882a593Smuzhiyun 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
18*4882a593Smuzhiyun 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * We need to ensure that shared mappings are correctly aligned to
22*4882a593Smuzhiyun  * avoid aliasing issues with VIPT caches.  We need to ensure that
23*4882a593Smuzhiyun  * a specific page of an object is always mapped at a multiple of
24*4882a593Smuzhiyun  * SHMLBA bytes.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * We unconditionally provide this function for all cases, however
27*4882a593Smuzhiyun  * in the VIVT case, we optimise out the alignment rules.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)30*4882a593Smuzhiyun arch_get_unmapped_area(struct file *filp, unsigned long addr,
31*4882a593Smuzhiyun 		unsigned long len, unsigned long pgoff, unsigned long flags)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
34*4882a593Smuzhiyun 	struct vm_area_struct *vma;
35*4882a593Smuzhiyun 	int do_align = 0;
36*4882a593Smuzhiyun 	int aliasing = cache_is_vipt_aliasing();
37*4882a593Smuzhiyun 	struct vm_unmapped_area_info info;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/*
40*4882a593Smuzhiyun 	 * We only need to do colour alignment if either the I or D
41*4882a593Smuzhiyun 	 * caches alias.
42*4882a593Smuzhiyun 	 */
43*4882a593Smuzhiyun 	if (aliasing)
44*4882a593Smuzhiyun 		do_align = filp || (flags & MAP_SHARED);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/*
47*4882a593Smuzhiyun 	 * We enforce the MAP_FIXED case.
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	if (flags & MAP_FIXED) {
50*4882a593Smuzhiyun 		if (aliasing && flags & MAP_SHARED &&
51*4882a593Smuzhiyun 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
52*4882a593Smuzhiyun 			return -EINVAL;
53*4882a593Smuzhiyun 		return addr;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (len > TASK_SIZE)
57*4882a593Smuzhiyun 		return -ENOMEM;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (addr) {
60*4882a593Smuzhiyun 		if (do_align)
61*4882a593Smuzhiyun 			addr = COLOUR_ALIGN(addr, pgoff);
62*4882a593Smuzhiyun 		else
63*4882a593Smuzhiyun 			addr = PAGE_ALIGN(addr);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 		vma = find_vma(mm, addr);
66*4882a593Smuzhiyun 		if (TASK_SIZE - len >= addr &&
67*4882a593Smuzhiyun 		    (!vma || addr + len <= vm_start_gap(vma)))
68*4882a593Smuzhiyun 			return addr;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	info.flags = 0;
72*4882a593Smuzhiyun 	info.length = len;
73*4882a593Smuzhiyun 	info.low_limit = mm->mmap_base;
74*4882a593Smuzhiyun 	info.high_limit = TASK_SIZE;
75*4882a593Smuzhiyun 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76*4882a593Smuzhiyun 	info.align_offset = pgoff << PAGE_SHIFT;
77*4882a593Smuzhiyun 	return vm_unmapped_area(&info);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)81*4882a593Smuzhiyun arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82*4882a593Smuzhiyun 			const unsigned long len, const unsigned long pgoff,
83*4882a593Smuzhiyun 			const unsigned long flags)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct vm_area_struct *vma;
86*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
87*4882a593Smuzhiyun 	unsigned long addr = addr0;
88*4882a593Smuzhiyun 	int do_align = 0;
89*4882a593Smuzhiyun 	int aliasing = cache_is_vipt_aliasing();
90*4882a593Smuzhiyun 	struct vm_unmapped_area_info info;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * We only need to do colour alignment if either the I or D
94*4882a593Smuzhiyun 	 * caches alias.
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	if (aliasing)
97*4882a593Smuzhiyun 		do_align = filp || (flags & MAP_SHARED);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* requested length too big for entire address space */
100*4882a593Smuzhiyun 	if (len > TASK_SIZE)
101*4882a593Smuzhiyun 		return -ENOMEM;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	if (flags & MAP_FIXED) {
104*4882a593Smuzhiyun 		if (aliasing && flags & MAP_SHARED &&
105*4882a593Smuzhiyun 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
106*4882a593Smuzhiyun 			return -EINVAL;
107*4882a593Smuzhiyun 		return addr;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* requesting a specific address */
111*4882a593Smuzhiyun 	if (addr) {
112*4882a593Smuzhiyun 		if (do_align)
113*4882a593Smuzhiyun 			addr = COLOUR_ALIGN(addr, pgoff);
114*4882a593Smuzhiyun 		else
115*4882a593Smuzhiyun 			addr = PAGE_ALIGN(addr);
116*4882a593Smuzhiyun 		vma = find_vma(mm, addr);
117*4882a593Smuzhiyun 		if (TASK_SIZE - len >= addr &&
118*4882a593Smuzhiyun 				(!vma || addr + len <= vm_start_gap(vma)))
119*4882a593Smuzhiyun 			return addr;
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
123*4882a593Smuzhiyun 	info.length = len;
124*4882a593Smuzhiyun 	info.low_limit = FIRST_USER_ADDRESS;
125*4882a593Smuzhiyun 	info.high_limit = mm->mmap_base;
126*4882a593Smuzhiyun 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
127*4882a593Smuzhiyun 	info.align_offset = pgoff << PAGE_SHIFT;
128*4882a593Smuzhiyun 	addr = vm_unmapped_area(&info);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/*
131*4882a593Smuzhiyun 	 * A failed mmap() very likely causes application failure,
132*4882a593Smuzhiyun 	 * so fall back to the bottom-up function here. This scenario
133*4882a593Smuzhiyun 	 * can happen with large stack limits and large mmap()
134*4882a593Smuzhiyun 	 * allocations.
135*4882a593Smuzhiyun 	 */
136*4882a593Smuzhiyun 	if (addr & ~PAGE_MASK) {
137*4882a593Smuzhiyun 		VM_BUG_ON(addr != -ENOMEM);
138*4882a593Smuzhiyun 		info.flags = 0;
139*4882a593Smuzhiyun 		info.low_limit = mm->mmap_base;
140*4882a593Smuzhiyun 		info.high_limit = TASK_SIZE;
141*4882a593Smuzhiyun 		addr = vm_unmapped_area(&info);
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return addr;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun  * You really shouldn't be using read() or write() on /dev/mem.  This
149*4882a593Smuzhiyun  * might go away in the future.
150*4882a593Smuzhiyun  */
valid_phys_addr_range(phys_addr_t addr,size_t size)151*4882a593Smuzhiyun int valid_phys_addr_range(phys_addr_t addr, size_t size)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	if (addr < PHYS_OFFSET)
154*4882a593Smuzhiyun 		return 0;
155*4882a593Smuzhiyun 	if (addr + size > __pa(high_memory - 1) + 1)
156*4882a593Smuzhiyun 		return 0;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	return 1;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun  * Do not allow /dev/mem mappings beyond the supported physical range.
163*4882a593Smuzhiyun  */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)164*4882a593Smuzhiyun int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #ifdef CONFIG_STRICT_DEVMEM
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #include <linux/ioport.h>
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun  * devmem_is_allowed() checks to see if /dev/mem access to a certain
175*4882a593Smuzhiyun  * address is valid. The argument is a physical page number.
176*4882a593Smuzhiyun  * We mimic x86 here by disallowing access to system RAM as well as
177*4882a593Smuzhiyun  * device-exclusive MMIO regions. This effectively disable read()/write()
178*4882a593Smuzhiyun  * on /dev/mem.
179*4882a593Smuzhiyun  */
devmem_is_allowed(unsigned long pfn)180*4882a593Smuzhiyun int devmem_is_allowed(unsigned long pfn)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
183*4882a593Smuzhiyun 		return 0;
184*4882a593Smuzhiyun 	if (!page_is_ram(pfn))
185*4882a593Smuzhiyun 		return 1;
186*4882a593Smuzhiyun 	return 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #endif
190