xref: /OK3568_Linux_fs/kernel/arch/hexagon/include/asm/mmu_context.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MM context support for the Hexagon architecture
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef _ASM_MMU_CONTEXT_H
9*4882a593Smuzhiyun #define _ASM_MMU_CONTEXT_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/mm_types.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/setup.h>
14*4882a593Smuzhiyun #include <asm/page.h>
15*4882a593Smuzhiyun #include <asm/pgalloc.h>
16*4882a593Smuzhiyun #include <asm/mem-layout.h>
17*4882a593Smuzhiyun 
destroy_context(struct mm_struct * mm)18*4882a593Smuzhiyun static inline void destroy_context(struct mm_struct *mm)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * VM port hides all TLB management, so "lazy TLB" isn't very
24*4882a593Smuzhiyun  * meaningful.  Even for ports to architectures with visble TLBs,
25*4882a593Smuzhiyun  * this is almost invariably a null function.
26*4882a593Smuzhiyun  */
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)27*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm,
28*4882a593Smuzhiyun 	struct task_struct *tsk)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * Architecture-specific actions, if any, for memory map deactivation.
34*4882a593Smuzhiyun  */
deactivate_mm(struct task_struct * tsk,struct mm_struct * mm)35*4882a593Smuzhiyun static inline void deactivate_mm(struct task_struct *tsk,
36*4882a593Smuzhiyun 	struct mm_struct *mm)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  * init_new_context - initialize context related info for new mm_struct instance
42*4882a593Smuzhiyun  * @tsk: pointer to a task struct
43*4882a593Smuzhiyun  * @mm: pointer to a new mm struct
44*4882a593Smuzhiyun  */
init_new_context(struct task_struct * tsk,struct mm_struct * mm)45*4882a593Smuzhiyun static inline int init_new_context(struct task_struct *tsk,
46*4882a593Smuzhiyun 					struct mm_struct *mm)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	/* mm->context is set up by pgd_alloc */
49*4882a593Smuzhiyun 	return 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  *  Switch active mm context
54*4882a593Smuzhiyun  */
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)55*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
56*4882a593Smuzhiyun 				struct task_struct *tsk)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	int l1;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/*
61*4882a593Smuzhiyun 	 * For virtual machine, we have to update system map if it's been
62*4882a593Smuzhiyun 	 * touched.
63*4882a593Smuzhiyun 	 */
64*4882a593Smuzhiyun 	if (next->context.generation < prev->context.generation) {
65*4882a593Smuzhiyun 		for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
66*4882a593Smuzhiyun 			next->pgd[l1] = init_mm.pgd[l1];
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		next->context.generation = prev->context.generation;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	__vmnewmap((void *)next->context.ptbase);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun  *  Activate new memory map for task
76*4882a593Smuzhiyun  */
activate_mm(struct mm_struct * prev,struct mm_struct * next)77*4882a593Smuzhiyun static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	unsigned long flags;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	local_irq_save(flags);
82*4882a593Smuzhiyun 	switch_mm(prev, next, current_thread_info()->task);
83*4882a593Smuzhiyun 	local_irq_restore(flags);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*  Generic hooks for arch_dup_mmap and arch_exit_mmap  */
87*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #endif
90