1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2005-2017 Andes Technology Corporation
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/sched.h>
5*4882a593Smuzhiyun #include <linux/mman.h>
6*4882a593Smuzhiyun #include <linux/shm.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define COLOUR_ALIGN(addr,pgoff) \
9*4882a593Smuzhiyun ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
10*4882a593Smuzhiyun (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * We need to ensure that shared mappings are correctly aligned to
14*4882a593Smuzhiyun * avoid aliasing issues with VIPT caches. We need to ensure that
15*4882a593Smuzhiyun * a specific page of an object is always mapped at a multiple of
16*4882a593Smuzhiyun * SHMLBA bytes.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * We unconditionally provide this function for all cases, however
19*4882a593Smuzhiyun * in the VIVT case, we optimise out the alignment rules.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)22*4882a593Smuzhiyun arch_get_unmapped_area(struct file *filp, unsigned long addr,
23*4882a593Smuzhiyun unsigned long len, unsigned long pgoff,
24*4882a593Smuzhiyun unsigned long flags)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
27*4882a593Smuzhiyun struct vm_area_struct *vma;
28*4882a593Smuzhiyun int do_align = 0;
29*4882a593Smuzhiyun struct vm_unmapped_area_info info;
30*4882a593Smuzhiyun int aliasing = 0;
31*4882a593Smuzhiyun if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING))
32*4882a593Smuzhiyun aliasing = 1;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * We only need to do colour alignment if either the I or D
36*4882a593Smuzhiyun * caches alias.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun if (aliasing)
39*4882a593Smuzhiyun do_align = filp || (flags & MAP_SHARED);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * We enforce the MAP_FIXED case.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun if (flags & MAP_FIXED) {
45*4882a593Smuzhiyun if (aliasing && flags & MAP_SHARED &&
46*4882a593Smuzhiyun (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
47*4882a593Smuzhiyun return -EINVAL;
48*4882a593Smuzhiyun return addr;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (len > TASK_SIZE)
52*4882a593Smuzhiyun return -ENOMEM;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (addr) {
55*4882a593Smuzhiyun if (do_align)
56*4882a593Smuzhiyun addr = COLOUR_ALIGN(addr, pgoff);
57*4882a593Smuzhiyun else
58*4882a593Smuzhiyun addr = PAGE_ALIGN(addr);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun vma = find_vma(mm, addr);
61*4882a593Smuzhiyun if (TASK_SIZE - len >= addr &&
62*4882a593Smuzhiyun (!vma || addr + len <= vm_start_gap(vma)))
63*4882a593Smuzhiyun return addr;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun info.flags = 0;
67*4882a593Smuzhiyun info.length = len;
68*4882a593Smuzhiyun info.low_limit = mm->mmap_base;
69*4882a593Smuzhiyun info.high_limit = TASK_SIZE;
70*4882a593Smuzhiyun info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
71*4882a593Smuzhiyun info.align_offset = pgoff << PAGE_SHIFT;
72*4882a593Smuzhiyun return vm_unmapped_area(&info);
73*4882a593Smuzhiyun }
74