xref: /OK3568_Linux_fs/kernel/arch/alpha/include/asm/cacheflush.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ALPHA_CACHEFLUSH_H
3*4882a593Smuzhiyun #define _ALPHA_CACHEFLUSH_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /* Note that the following two definitions are _highly_ dependent
8*4882a593Smuzhiyun    on the contexts in which they are used in the kernel.  I personally
9*4882a593Smuzhiyun    think it is criminal how loosely defined these macros are.  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /* We need to flush the kernel's icache after loading modules.  The
12*4882a593Smuzhiyun    only other use of this macro is in load_aout_interp which is not
13*4882a593Smuzhiyun    used on Alpha.
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun    Note that this definition should *not* be used for userspace
16*4882a593Smuzhiyun    icache flushing.  While functional, it is _way_ overkill.  The
17*4882a593Smuzhiyun    icache is tagged with ASNs and it suffices to allocate a new ASN
18*4882a593Smuzhiyun    for the process.  */
19*4882a593Smuzhiyun #ifndef CONFIG_SMP
20*4882a593Smuzhiyun #define flush_icache_range(start, end)		imb()
21*4882a593Smuzhiyun #else
22*4882a593Smuzhiyun #define flush_icache_range(start, end)		smp_imb()
23*4882a593Smuzhiyun extern void smp_imb(void);
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* We need to flush the userspace icache after setting breakpoints in
27*4882a593Smuzhiyun    ptrace.
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun    Instead of indiscriminately using imb, take advantage of the fact
30*4882a593Smuzhiyun    that icache entries are tagged with the ASN and load a new mm context.  */
31*4882a593Smuzhiyun /* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifndef CONFIG_SMP
34*4882a593Smuzhiyun #include <linux/sched.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun extern void __load_new_mm_context(struct mm_struct *);
37*4882a593Smuzhiyun static inline void
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)38*4882a593Smuzhiyun flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
39*4882a593Smuzhiyun 			unsigned long addr, int len)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC) {
42*4882a593Smuzhiyun 		struct mm_struct *mm = vma->vm_mm;
43*4882a593Smuzhiyun 		if (current->active_mm == mm)
44*4882a593Smuzhiyun 			__load_new_mm_context(mm);
45*4882a593Smuzhiyun 		else
46*4882a593Smuzhiyun 			mm->context[smp_processor_id()] = 0;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun #define flush_icache_user_page flush_icache_user_page
50*4882a593Smuzhiyun #else /* CONFIG_SMP */
51*4882a593Smuzhiyun extern void flush_icache_user_page(struct vm_area_struct *vma,
52*4882a593Smuzhiyun 		struct page *page, unsigned long addr, int len);
53*4882a593Smuzhiyun #define flush_icache_user_page flush_icache_user_page
54*4882a593Smuzhiyun #endif /* CONFIG_SMP */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* This is used only in __do_fault and do_swap_page.  */
57*4882a593Smuzhiyun #define flush_icache_page(vma, page) \
58*4882a593Smuzhiyun 	flush_icache_user_page((vma), (page), 0, 0)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #include <asm-generic/cacheflush.h>
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #endif /* _ALPHA_CACHEFLUSH_H */
63