xref: /OK3568_Linux_fs/kernel/arch/nds32/include/asm/mmu_context.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun // Copyright (C) 2005-2017 Andes Technology Corporation
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef __ASM_NDS32_MMU_CONTEXT_H
5*4882a593Smuzhiyun #define __ASM_NDS32_MMU_CONTEXT_H
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/spinlock.h>
8*4882a593Smuzhiyun #include <asm/tlbflush.h>
9*4882a593Smuzhiyun #include <asm/proc-fns.h>
10*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)13*4882a593Smuzhiyun init_new_context(struct task_struct *tsk, struct mm_struct *mm)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	mm->context.id = 0;
16*4882a593Smuzhiyun 	return 0;
17*4882a593Smuzhiyun }
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define destroy_context(mm)	do { } while(0)
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define CID_BITS	9
22*4882a593Smuzhiyun extern spinlock_t cid_lock;
23*4882a593Smuzhiyun extern unsigned int cpu_last_cid;
24*4882a593Smuzhiyun 
__new_context(struct mm_struct * mm)25*4882a593Smuzhiyun static inline void __new_context(struct mm_struct *mm)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	unsigned int cid;
28*4882a593Smuzhiyun 	unsigned long flags;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	spin_lock_irqsave(&cid_lock, flags);
31*4882a593Smuzhiyun 	cid = cpu_last_cid;
32*4882a593Smuzhiyun 	cpu_last_cid += 1 << TLB_MISC_offCID;
33*4882a593Smuzhiyun 	if (cpu_last_cid == 0)
34*4882a593Smuzhiyun 		cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	if ((cid & TLB_MISC_mskCID) == 0)
37*4882a593Smuzhiyun 		flush_tlb_all();
38*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cid_lock, flags);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	mm->context.id = cid;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
check_context(struct mm_struct * mm)43*4882a593Smuzhiyun static inline void check_context(struct mm_struct *mm)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	if (unlikely
46*4882a593Smuzhiyun 	    ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS))
47*4882a593Smuzhiyun 		__new_context(mm);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)50*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)54*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
55*4882a593Smuzhiyun 			     struct task_struct *tsk)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	unsigned int cpu = smp_processor_id();
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
60*4882a593Smuzhiyun 		check_context(next);
61*4882a593Smuzhiyun 		cpu_switch_mm(next);
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define deactivate_mm(tsk,mm)	do { } while (0)
66*4882a593Smuzhiyun #define activate_mm(prev,next)	switch_mm(prev, next, NULL)
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #endif
69