1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * TLB flushing for 64-bit hash-MMU CPUs
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/percpu.h>
10*4882a593Smuzhiyun #include <asm/page.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define PPC64_TLB_BATCH_NR 192
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct ppc64_tlb_batch {
15*4882a593Smuzhiyun int active;
16*4882a593Smuzhiyun unsigned long index;
17*4882a593Smuzhiyun struct mm_struct *mm;
18*4882a593Smuzhiyun real_pte_t pte[PPC64_TLB_BATCH_NR];
19*4882a593Smuzhiyun unsigned long vpn[PPC64_TLB_BATCH_NR];
20*4882a593Smuzhiyun unsigned int psize;
21*4882a593Smuzhiyun int ssize;
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28*4882a593Smuzhiyun
arch_enter_lazy_mmu_mode(void)29*4882a593Smuzhiyun static inline void arch_enter_lazy_mmu_mode(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct ppc64_tlb_batch *batch;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (radix_enabled())
34*4882a593Smuzhiyun return;
35*4882a593Smuzhiyun batch = this_cpu_ptr(&ppc64_tlb_batch);
36*4882a593Smuzhiyun batch->active = 1;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
arch_leave_lazy_mmu_mode(void)39*4882a593Smuzhiyun static inline void arch_leave_lazy_mmu_mode(void)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct ppc64_tlb_batch *batch;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (radix_enabled())
44*4882a593Smuzhiyun return;
45*4882a593Smuzhiyun batch = this_cpu_ptr(&ppc64_tlb_batch);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (batch->index)
48*4882a593Smuzhiyun __flush_tlb_pending(batch);
49*4882a593Smuzhiyun batch->active = 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define arch_flush_lazy_mmu_mode() do {} while (0)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun extern void hash__tlbiel_all(unsigned int action);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
57*4882a593Smuzhiyun int ssize, unsigned long flags);
58*4882a593Smuzhiyun extern void flush_hash_range(unsigned long number, int local);
59*4882a593Smuzhiyun extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
60*4882a593Smuzhiyun pmd_t *pmdp, unsigned int psize, int ssize,
61*4882a593Smuzhiyun unsigned long flags);
hash__local_flush_tlb_mm(struct mm_struct * mm)62*4882a593Smuzhiyun static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
hash__flush_tlb_mm(struct mm_struct * mm)66*4882a593Smuzhiyun static inline void hash__flush_tlb_mm(struct mm_struct *mm)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
hash__local_flush_all_mm(struct mm_struct * mm)70*4882a593Smuzhiyun static inline void hash__local_flush_all_mm(struct mm_struct *mm)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * There's no Page Walk Cache for hash, so what is needed is
74*4882a593Smuzhiyun * the same as flush_tlb_mm(), which doesn't really make sense
75*4882a593Smuzhiyun * with hash. So the only thing we could do is flush the
76*4882a593Smuzhiyun * entire LPID! Punt for now, as it's not being used.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun WARN_ON_ONCE(1);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
hash__flush_all_mm(struct mm_struct * mm)81*4882a593Smuzhiyun static inline void hash__flush_all_mm(struct mm_struct *mm)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * There's no Page Walk Cache for hash, so what is needed is
85*4882a593Smuzhiyun * the same as flush_tlb_mm(), which doesn't really make sense
86*4882a593Smuzhiyun * with hash. So the only thing we could do is flush the
87*4882a593Smuzhiyun * entire LPID! Punt for now, as it's not being used.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun WARN_ON_ONCE(1);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
hash__local_flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)92*4882a593Smuzhiyun static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
93*4882a593Smuzhiyun unsigned long vmaddr)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
hash__flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)97*4882a593Smuzhiyun static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
98*4882a593Smuzhiyun unsigned long vmaddr)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
hash__flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)102*4882a593Smuzhiyun static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
103*4882a593Smuzhiyun unsigned long start, unsigned long end)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
hash__flush_tlb_kernel_range(unsigned long start,unsigned long end)107*4882a593Smuzhiyun static inline void hash__flush_tlb_kernel_range(unsigned long start,
108*4882a593Smuzhiyun unsigned long end)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun struct mmu_gather;
114*4882a593Smuzhiyun extern void hash__tlb_flush(struct mmu_gather *tlb);
115*4882a593Smuzhiyun /* Private function for use by PCI IO mapping code */
116*4882a593Smuzhiyun extern void __flush_hash_table_range(unsigned long start, unsigned long end);
117*4882a593Smuzhiyun extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
118*4882a593Smuzhiyun unsigned long addr);
119*4882a593Smuzhiyun #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
120