xref: /OK3568_Linux_fs/kernel/arch/microblaze/include/asm/mmu_context_mm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
4*4882a593Smuzhiyun  * Copyright (C) 2008-2009 PetaLogix
5*4882a593Smuzhiyun  * Copyright (C) 2006 Atmark Techno, Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
9*4882a593Smuzhiyun #define _ASM_MICROBLAZE_MMU_CONTEXT_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/atomic.h>
12*4882a593Smuzhiyun #include <linux/mm_types.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/bitops.h>
16*4882a593Smuzhiyun #include <asm/mmu.h>
17*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun # ifdef __KERNEL__
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * This function defines the mapping from contexts to VSIDs (virtual
22*4882a593Smuzhiyun  * segment IDs).  We use a skew on both the context and the high 4 bits
23*4882a593Smuzhiyun  * of the 32-bit virtual address (the "effective segment ID") in order
24*4882a593Smuzhiyun  * to spread out the entries in the MMU hash table.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun # define CTX_TO_VSID(ctx, va)	(((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
27*4882a593Smuzhiyun 				 & 0xffffff)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun    MicroBlaze has 256 contexts, so we can just rotate through these
31*4882a593Smuzhiyun    as a way of "switching" contexts.  If the TID of the TLB is zero,
32*4882a593Smuzhiyun    the PID/TID comparison is disabled, so we can use a TID of zero
33*4882a593Smuzhiyun    to represent all kernel pages as shared among all contexts.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)36*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun # define NO_CONTEXT	256
41*4882a593Smuzhiyun # define LAST_CONTEXT	255
42*4882a593Smuzhiyun # define FIRST_CONTEXT	1
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Set the current MMU context.
46*4882a593Smuzhiyun  * This is done byloading up the segment registers for the user part of the
47*4882a593Smuzhiyun  * address space.
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * Since the PGD is immediately available, it is much faster to simply
50*4882a593Smuzhiyun  * pass this along as a second parameter, which is required for 8xx and
51*4882a593Smuzhiyun  * can be used for debugging on all processors (if you happen to have
52*4882a593Smuzhiyun  * an Abatron).
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun extern void set_context(mm_context_t context, pgd_t *pgd);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Bitmap of contexts in use.
58*4882a593Smuzhiyun  * The size of this bitmap is LAST_CONTEXT + 1 bits.
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun extern unsigned long context_map[];
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun  * This caches the next context number that we expect to be free.
64*4882a593Smuzhiyun  * Its use is an optimization only, we can't rely on this context
65*4882a593Smuzhiyun  * number to be free, but it usually will be.
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun extern mm_context_t next_mmu_context;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * Since we don't have sufficient contexts to give one to every task
71*4882a593Smuzhiyun  * that could be in the system, we need to be able to steal contexts.
72*4882a593Smuzhiyun  * These variables support that.
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun extern atomic_t nr_free_contexts;
75*4882a593Smuzhiyun extern struct mm_struct *context_mm[LAST_CONTEXT+1];
76*4882a593Smuzhiyun extern void steal_context(void);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun  * Get a new mmu context for the address space described by `mm'.
80*4882a593Smuzhiyun  */
get_mmu_context(struct mm_struct * mm)81*4882a593Smuzhiyun static inline void get_mmu_context(struct mm_struct *mm)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	mm_context_t ctx;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT)
86*4882a593Smuzhiyun 		return;
87*4882a593Smuzhiyun 	while (atomic_dec_if_positive(&nr_free_contexts) < 0)
88*4882a593Smuzhiyun 		steal_context();
89*4882a593Smuzhiyun 	ctx = next_mmu_context;
90*4882a593Smuzhiyun 	while (test_and_set_bit(ctx, context_map)) {
91*4882a593Smuzhiyun 		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
92*4882a593Smuzhiyun 		if (ctx > LAST_CONTEXT)
93*4882a593Smuzhiyun 			ctx = 0;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 	next_mmu_context = (ctx + 1) & LAST_CONTEXT;
96*4882a593Smuzhiyun 	mm->context = ctx;
97*4882a593Smuzhiyun 	context_mm[ctx] = mm;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun  * Set up the context for a new address space.
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun # define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * We're finished using the context for an address space.
107*4882a593Smuzhiyun  */
destroy_context(struct mm_struct * mm)108*4882a593Smuzhiyun static inline void destroy_context(struct mm_struct *mm)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
111*4882a593Smuzhiyun 		clear_bit(mm->context, context_map);
112*4882a593Smuzhiyun 		mm->context = NO_CONTEXT;
113*4882a593Smuzhiyun 		atomic_inc(&nr_free_contexts);
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)117*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
118*4882a593Smuzhiyun 			     struct task_struct *tsk)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	tsk->thread.pgdir = next->pgd;
121*4882a593Smuzhiyun 	get_mmu_context(next);
122*4882a593Smuzhiyun 	set_context(next->context, next->pgd);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * After we have set current->mm to a new value, this activates
127*4882a593Smuzhiyun  * the context for the new mm so we see the new mappings.
128*4882a593Smuzhiyun  */
activate_mm(struct mm_struct * active_mm,struct mm_struct * mm)129*4882a593Smuzhiyun static inline void activate_mm(struct mm_struct *active_mm,
130*4882a593Smuzhiyun 			struct mm_struct *mm)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	current->thread.pgdir = mm->pgd;
133*4882a593Smuzhiyun 	get_mmu_context(mm);
134*4882a593Smuzhiyun 	set_context(mm->context, mm->pgd);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun extern void mmu_context_init(void);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun # endif /* __KERNEL__ */
140*4882a593Smuzhiyun #endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
141