1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _S390_TLB_H
3*4882a593Smuzhiyun #define _S390_TLB_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * TLB flushing on s390 is complicated. The following requirement
7*4882a593Smuzhiyun * from the principles of operation is the most arduous:
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * "A valid table entry must not be changed while it is attached
10*4882a593Smuzhiyun * to any CPU and may be used for translation by that CPU except to
11*4882a593Smuzhiyun * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
12*4882a593Smuzhiyun * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
13*4882a593Smuzhiyun * table entry, or (3) make a change by means of a COMPARE AND SWAP
14*4882a593Smuzhiyun * AND PURGE instruction that purges the TLB."
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * The modification of a pte of an active mm struct therefore is
17*4882a593Smuzhiyun * a two step process: i) invalidate the pte, ii) store the new pte.
18*4882a593Smuzhiyun * This is true for the page protection bit as well.
19*4882a593Smuzhiyun * The only possible optimization is to flush at the beginning of
20*4882a593Smuzhiyun * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Pages used for the page tables is a different story. FIXME: more
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun void __tlb_remove_table(void *_table);
26*4882a593Smuzhiyun static inline void tlb_flush(struct mmu_gather *tlb);
27*4882a593Smuzhiyun static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
28*4882a593Smuzhiyun struct page *page, int page_size);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define tlb_start_vma(tlb, vma) do { } while (0)
31*4882a593Smuzhiyun #define tlb_end_vma(tlb, vma) do { } while (0)
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define tlb_flush tlb_flush
34*4882a593Smuzhiyun #define pte_free_tlb pte_free_tlb
35*4882a593Smuzhiyun #define pmd_free_tlb pmd_free_tlb
36*4882a593Smuzhiyun #define p4d_free_tlb p4d_free_tlb
37*4882a593Smuzhiyun #define pud_free_tlb pud_free_tlb
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include <asm/tlbflush.h>
40*4882a593Smuzhiyun #include <asm-generic/tlb.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * Release the page cache reference for a pte removed by
44*4882a593Smuzhiyun * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
45*4882a593Smuzhiyun * has already been freed, so just do free_page_and_swap_cache.
46*4882a593Smuzhiyun */
__tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)47*4882a593Smuzhiyun static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
48*4882a593Smuzhiyun struct page *page, int page_size)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun free_page_and_swap_cache(page);
51*4882a593Smuzhiyun return false;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
tlb_flush(struct mmu_gather * tlb)54*4882a593Smuzhiyun static inline void tlb_flush(struct mmu_gather *tlb)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun __tlb_flush_mm_lazy(tlb->mm);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * pte_free_tlb frees a pte table and clears the CRSTE for the
61*4882a593Smuzhiyun * page table from the tlb.
62*4882a593Smuzhiyun */
pte_free_tlb(struct mmu_gather * tlb,pgtable_t pte,unsigned long address)63*4882a593Smuzhiyun static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
64*4882a593Smuzhiyun unsigned long address)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun __tlb_adjust_range(tlb, address, PAGE_SIZE);
67*4882a593Smuzhiyun tlb->mm->context.flush_mm = 1;
68*4882a593Smuzhiyun tlb->freed_tables = 1;
69*4882a593Smuzhiyun tlb->cleared_ptes = 1;
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * page_table_free_rcu takes care of the allocation bit masks
72*4882a593Smuzhiyun * of the 2K table fragments in the 4K page table page,
73*4882a593Smuzhiyun * then calls tlb_remove_table.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun page_table_free_rcu(tlb, (unsigned long *) pte, address);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * pmd_free_tlb frees a pmd table and clears the CRSTE for the
80*4882a593Smuzhiyun * segment table entry from the tlb.
81*4882a593Smuzhiyun * If the mm uses a two level page table the single pmd is freed
82*4882a593Smuzhiyun * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
83*4882a593Smuzhiyun * to avoid the double free of the pmd in this case.
84*4882a593Smuzhiyun */
pmd_free_tlb(struct mmu_gather * tlb,pmd_t * pmd,unsigned long address)85*4882a593Smuzhiyun static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
86*4882a593Smuzhiyun unsigned long address)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun if (mm_pmd_folded(tlb->mm))
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun pgtable_pmd_page_dtor(virt_to_page(pmd));
91*4882a593Smuzhiyun __tlb_adjust_range(tlb, address, PAGE_SIZE);
92*4882a593Smuzhiyun tlb->mm->context.flush_mm = 1;
93*4882a593Smuzhiyun tlb->freed_tables = 1;
94*4882a593Smuzhiyun tlb->cleared_puds = 1;
95*4882a593Smuzhiyun tlb_remove_table(tlb, pmd);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * p4d_free_tlb frees a pud table and clears the CRSTE for the
100*4882a593Smuzhiyun * region second table entry from the tlb.
101*4882a593Smuzhiyun * If the mm uses a four level page table the single p4d is freed
102*4882a593Smuzhiyun * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
103*4882a593Smuzhiyun * to avoid the double free of the p4d in this case.
104*4882a593Smuzhiyun */
p4d_free_tlb(struct mmu_gather * tlb,p4d_t * p4d,unsigned long address)105*4882a593Smuzhiyun static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
106*4882a593Smuzhiyun unsigned long address)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun if (mm_p4d_folded(tlb->mm))
109*4882a593Smuzhiyun return;
110*4882a593Smuzhiyun __tlb_adjust_range(tlb, address, PAGE_SIZE);
111*4882a593Smuzhiyun tlb->mm->context.flush_mm = 1;
112*4882a593Smuzhiyun tlb->freed_tables = 1;
113*4882a593Smuzhiyun tlb->cleared_p4ds = 1;
114*4882a593Smuzhiyun tlb_remove_table(tlb, p4d);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * pud_free_tlb frees a pud table and clears the CRSTE for the
119*4882a593Smuzhiyun * region third table entry from the tlb.
120*4882a593Smuzhiyun * If the mm uses a three level page table the single pud is freed
121*4882a593Smuzhiyun * as the pgd. pud_free_tlb checks the asce_limit against 4TB
122*4882a593Smuzhiyun * to avoid the double free of the pud in this case.
123*4882a593Smuzhiyun */
pud_free_tlb(struct mmu_gather * tlb,pud_t * pud,unsigned long address)124*4882a593Smuzhiyun static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
125*4882a593Smuzhiyun unsigned long address)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun if (mm_pud_folded(tlb->mm))
128*4882a593Smuzhiyun return;
129*4882a593Smuzhiyun tlb->mm->context.flush_mm = 1;
130*4882a593Smuzhiyun tlb->freed_tables = 1;
131*4882a593Smuzhiyun tlb->cleared_puds = 1;
132*4882a593Smuzhiyun tlb_remove_table(tlb, pud);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #endif /* _S390_TLB_H */
137