1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/fs.h>
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/mman.h>
7*4882a593Smuzhiyun #include <linux/shm.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/random.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define COLOUR_ALIGN(addr,pgoff) \
13*4882a593Smuzhiyun ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
14*4882a593Smuzhiyun (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun * We need to ensure that shared mappings are correctly aligned to
18*4882a593Smuzhiyun * avoid aliasing issues with VIPT caches. We need to ensure that
19*4882a593Smuzhiyun * a specific page of an object is always mapped at a multiple of
20*4882a593Smuzhiyun * SHMLBA bytes.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * We unconditionally provide this function for all cases.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)25*4882a593Smuzhiyun arch_get_unmapped_area(struct file *filp, unsigned long addr,
26*4882a593Smuzhiyun unsigned long len, unsigned long pgoff, unsigned long flags)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
29*4882a593Smuzhiyun struct vm_area_struct *vma;
30*4882a593Smuzhiyun int do_align = 0;
31*4882a593Smuzhiyun struct vm_unmapped_area_info info;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * We only need to do colour alignment if either the I or D
35*4882a593Smuzhiyun * caches alias.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun do_align = filp || (flags & MAP_SHARED);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * We enforce the MAP_FIXED case.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun if (flags & MAP_FIXED) {
43*4882a593Smuzhiyun if (flags & MAP_SHARED &&
44*4882a593Smuzhiyun (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
45*4882a593Smuzhiyun return -EINVAL;
46*4882a593Smuzhiyun return addr;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (len > TASK_SIZE)
50*4882a593Smuzhiyun return -ENOMEM;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (addr) {
53*4882a593Smuzhiyun if (do_align)
54*4882a593Smuzhiyun addr = COLOUR_ALIGN(addr, pgoff);
55*4882a593Smuzhiyun else
56*4882a593Smuzhiyun addr = PAGE_ALIGN(addr);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun vma = find_vma(mm, addr);
59*4882a593Smuzhiyun if (TASK_SIZE - len >= addr &&
60*4882a593Smuzhiyun (!vma || addr + len <= vm_start_gap(vma)))
61*4882a593Smuzhiyun return addr;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun info.flags = 0;
65*4882a593Smuzhiyun info.length = len;
66*4882a593Smuzhiyun info.low_limit = mm->mmap_base;
67*4882a593Smuzhiyun info.high_limit = TASK_SIZE;
68*4882a593Smuzhiyun info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
69*4882a593Smuzhiyun info.align_offset = pgoff << PAGE_SHIFT;
70*4882a593Smuzhiyun return vm_unmapped_area(&info);
71*4882a593Smuzhiyun }
72