1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __PARISC_MMU_CONTEXT_H
3*4882a593Smuzhiyun #define __PARISC_MMU_CONTEXT_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/atomic.h>
8*4882a593Smuzhiyun #include <linux/spinlock.h>
9*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
10*4882a593Smuzhiyun
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)11*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun }
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* on PA-RISC, we actually have enough contexts to justify an allocator
16*4882a593Smuzhiyun * for them. prumpf */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun extern unsigned long alloc_sid(void);
19*4882a593Smuzhiyun extern void free_sid(unsigned long);
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)22*4882a593Smuzhiyun init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun BUG_ON(atomic_read(&mm->mm_users) != 1);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun mm->context = alloc_sid();
27*4882a593Smuzhiyun return 0;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static inline void
destroy_context(struct mm_struct * mm)31*4882a593Smuzhiyun destroy_context(struct mm_struct *mm)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun free_sid(mm->context);
34*4882a593Smuzhiyun mm->context = 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
__space_to_prot(mm_context_t context)37*4882a593Smuzhiyun static inline unsigned long __space_to_prot(mm_context_t context)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun #if SPACEID_SHIFT == 0
40*4882a593Smuzhiyun return context << 1;
41*4882a593Smuzhiyun #else
42*4882a593Smuzhiyun return context >> (SPACEID_SHIFT - 1);
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
load_context(mm_context_t context)46*4882a593Smuzhiyun static inline void load_context(mm_context_t context)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun mtsp(context, 3);
49*4882a593Smuzhiyun mtctl(__space_to_prot(context), 8);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)52*4882a593Smuzhiyun static inline void switch_mm_irqs_off(struct mm_struct *prev,
53*4882a593Smuzhiyun struct mm_struct *next, struct task_struct *tsk)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun if (prev != next) {
56*4882a593Smuzhiyun #ifdef CONFIG_TLB_PTLOCK
57*4882a593Smuzhiyun /* put physical address of page_table_lock in cr28 (tr4)
58*4882a593Smuzhiyun for TLB faults */
59*4882a593Smuzhiyun spinlock_t *pgd_lock = &next->page_table_lock;
60*4882a593Smuzhiyun mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun mtctl(__pa(next->pgd), 25);
63*4882a593Smuzhiyun load_context(next->context);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)67*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev,
68*4882a593Smuzhiyun struct mm_struct *next, struct task_struct *tsk)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun unsigned long flags;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (prev == next)
73*4882a593Smuzhiyun return;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun local_irq_save(flags);
76*4882a593Smuzhiyun switch_mm_irqs_off(prev, next, tsk);
77*4882a593Smuzhiyun local_irq_restore(flags);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun #define switch_mm_irqs_off switch_mm_irqs_off
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define deactivate_mm(tsk,mm) do { } while (0)
82*4882a593Smuzhiyun
activate_mm(struct mm_struct * prev,struct mm_struct * next)83*4882a593Smuzhiyun static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Activate_mm is our one chance to allocate a space id
87*4882a593Smuzhiyun * for a new mm created in the exec path. There's also
88*4882a593Smuzhiyun * some lazy tlb stuff, which is currently dead code, but
89*4882a593Smuzhiyun * we only allocate a space id if one hasn't been allocated
90*4882a593Smuzhiyun * already, so we should be OK.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun BUG_ON(next == &init_mm); /* Should never happen */
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (next->context == 0)
96*4882a593Smuzhiyun next->context = alloc_sid();
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun switch_mm(prev,next,current);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun #endif
101