1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Switch an MMU context.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun * for more details.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2001 - 2013 Tensilica Inc.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifndef _XTENSA_MMU_CONTEXT_H
12*4882a593Smuzhiyun #define _XTENSA_MMU_CONTEXT_H
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifndef CONFIG_MMU
15*4882a593Smuzhiyun #include <asm/nommu_context.h>
16*4882a593Smuzhiyun #else
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/stringify.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/mm_types.h>
21*4882a593Smuzhiyun #include <linux/pgtable.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <asm/vectors.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <asm/cacheflush.h>
26*4882a593Smuzhiyun #include <asm/tlbflush.h>
27*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
28*4882a593Smuzhiyun #include <asm-generic/percpu.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #if (XCHAL_HAVE_TLBS != 1)
31*4882a593Smuzhiyun # error "Linux must have an MMU!"
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun DECLARE_PER_CPU(unsigned long, asid_cache);
35*4882a593Smuzhiyun #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * NO_CONTEXT is the invalid ASID value that we don't ever assign to
39*4882a593Smuzhiyun * any user or kernel context. We use the reserved values in the
40*4882a593Smuzhiyun * ASID_INSERT macro below.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * 0 invalid
43*4882a593Smuzhiyun * 1 kernel
44*4882a593Smuzhiyun * 2 reserved
45*4882a593Smuzhiyun * 3 reserved
46*4882a593Smuzhiyun * 4...255 available
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define NO_CONTEXT 0
50*4882a593Smuzhiyun #define ASID_USER_FIRST 4
51*4882a593Smuzhiyun #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
52*4882a593Smuzhiyun #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun void init_mmu(void);
55*4882a593Smuzhiyun void init_kio(void);
56*4882a593Smuzhiyun
set_rasid_register(unsigned long val)57*4882a593Smuzhiyun static inline void set_rasid_register (unsigned long val)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun __asm__ __volatile__ (" wsr %0, rasid\n\t"
60*4882a593Smuzhiyun " isync\n" : : "a" (val));
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
get_rasid_register(void)63*4882a593Smuzhiyun static inline unsigned long get_rasid_register (void)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun unsigned long tmp;
66*4882a593Smuzhiyun __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
67*4882a593Smuzhiyun return tmp;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
get_new_mmu_context(struct mm_struct * mm,unsigned int cpu)70*4882a593Smuzhiyun static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun unsigned long asid = cpu_asid_cache(cpu);
73*4882a593Smuzhiyun if ((++asid & ASID_MASK) == 0) {
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Start new asid cycle; continue counting with next
76*4882a593Smuzhiyun * incarnation bits; skipping over 0, 1, 2, 3.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun local_flush_tlb_all();
79*4882a593Smuzhiyun asid += ASID_USER_FIRST;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun cpu_asid_cache(cpu) = asid;
82*4882a593Smuzhiyun mm->context.asid[cpu] = asid;
83*4882a593Smuzhiyun mm->context.cpu = cpu;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
get_mmu_context(struct mm_struct * mm,unsigned int cpu)86*4882a593Smuzhiyun static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Check if our ASID is of an older version and thus invalid.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (mm) {
93*4882a593Smuzhiyun unsigned long asid = mm->context.asid[cpu];
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (asid == NO_CONTEXT ||
96*4882a593Smuzhiyun ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
97*4882a593Smuzhiyun get_new_mmu_context(mm, cpu);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
activate_context(struct mm_struct * mm,unsigned int cpu)101*4882a593Smuzhiyun static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun get_mmu_context(mm, cpu);
104*4882a593Smuzhiyun set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
105*4882a593Smuzhiyun invalidate_page_directory();
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * Initialize the context related info for a new mm_struct
110*4882a593Smuzhiyun * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
111*4882a593Smuzhiyun * to -1 says the process has never run on any core.
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun
init_new_context(struct task_struct * tsk,struct mm_struct * mm)114*4882a593Smuzhiyun static inline int init_new_context(struct task_struct *tsk,
115*4882a593Smuzhiyun struct mm_struct *mm)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun int cpu;
118*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
119*4882a593Smuzhiyun mm->context.asid[cpu] = NO_CONTEXT;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun mm->context.cpu = -1;
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)125*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
126*4882a593Smuzhiyun struct task_struct *tsk)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
129*4882a593Smuzhiyun int migrated = next->context.cpu != cpu;
130*4882a593Smuzhiyun /* Flush the icache if we migrated to a new core. */
131*4882a593Smuzhiyun if (migrated) {
132*4882a593Smuzhiyun __invalidate_icache_all();
133*4882a593Smuzhiyun next->context.cpu = cpu;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun if (migrated || prev != next)
136*4882a593Smuzhiyun activate_context(next, cpu);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
140*4882a593Smuzhiyun #define deactivate_mm(tsk, mm) do { } while (0)
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Destroy context related info for an mm_struct that is about
144*4882a593Smuzhiyun * to be put to rest.
145*4882a593Smuzhiyun */
destroy_context(struct mm_struct * mm)146*4882a593Smuzhiyun static inline void destroy_context(struct mm_struct *mm)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun invalidate_page_directory();
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)152*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun /* Nothing to do. */
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #endif /* CONFIG_MMU */
159*4882a593Smuzhiyun #endif /* _XTENSA_MMU_CONTEXT_H */
160