xref: /OK3568_Linux_fs/kernel/arch/um/include/asm/mmu_context.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef __UM_MMU_CONTEXT_H
7*4882a593Smuzhiyun #define __UM_MMU_CONTEXT_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/mm_types.h>
11*4882a593Smuzhiyun #include <linux/mmap_lock.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/mmu.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun extern void uml_setup_stubs(struct mm_struct *mm);
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * Needed since we do not use the asm-generic/mm_hooks.h:
18*4882a593Smuzhiyun  */
arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)19*4882a593Smuzhiyun static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	uml_setup_stubs(mm);
22*4882a593Smuzhiyun 	return 0;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun extern void arch_exit_mmap(struct mm_struct *mm);
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)25*4882a593Smuzhiyun static inline void arch_unmap(struct mm_struct *mm,
26*4882a593Smuzhiyun 			unsigned long start, unsigned long end)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun }
arch_vma_access_permitted(struct vm_area_struct * vma,bool write,bool execute,bool foreign)29*4882a593Smuzhiyun static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
30*4882a593Smuzhiyun 		bool write, bool execute, bool foreign)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	/* by default, allow everything */
33*4882a593Smuzhiyun 	return true;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * end asm-generic/mm_hooks.h functions
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define deactivate_mm(tsk,mm)	do { } while (0)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun extern void force_flush_all(void);
43*4882a593Smuzhiyun 
activate_mm(struct mm_struct * old,struct mm_struct * new)44*4882a593Smuzhiyun static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	/*
47*4882a593Smuzhiyun 	 * This is called by fs/exec.c and sys_unshare()
48*4882a593Smuzhiyun 	 * when the new ->mm is used for the first time.
49*4882a593Smuzhiyun 	 */
50*4882a593Smuzhiyun 	__switch_mm(&new->context.id);
51*4882a593Smuzhiyun 	mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
52*4882a593Smuzhiyun 	uml_setup_stubs(new);
53*4882a593Smuzhiyun 	mmap_write_unlock(new);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)56*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
57*4882a593Smuzhiyun 			     struct task_struct *tsk)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	unsigned cpu = smp_processor_id();
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if(prev != next){
62*4882a593Smuzhiyun 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
63*4882a593Smuzhiyun 		cpumask_set_cpu(cpu, mm_cpumask(next));
64*4882a593Smuzhiyun 		if(next != &init_mm)
65*4882a593Smuzhiyun 			__switch_mm(&next->context.id);
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)69*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm,
70*4882a593Smuzhiyun 				  struct task_struct *tsk)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun extern void destroy_context(struct mm_struct *mm);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #endif
79