Lines Matching +full:architecturally +full:- +full:defined
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
32 * Generic MMU-gather implementation.
49 * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
54 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
59 * - tlb_remove_table()
61 * tlb_remove_table() is the basic primitive to free page-table directories
68 * - tlb_remove_page() / __tlb_remove_page()
69 * - tlb_remove_page_size() / __tlb_remove_page_size()
79 * - tlb_change_page_size()
81 * call before __tlb_remove_page*() to set the current page-size; implies a
84 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
86 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
89 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
92 * - mmu_gather::fullmm
97 * - We can ignore tlb_{start,end}_vma(); because we don't
100 * - (RISC) architectures that use ASIDs can cycle to a new ASID
103 * - mmu_gather::need_flush_all
107 * x86-PAE needs this when changing top-level entries.
114 * - mmu_gather::start / mmu_gather::end
119 * - mmu_gather::freed_tables
123 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
131 * Additionally there are a few opt-in features:
146 * Useful if your architecture has non-page page directories.
153 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
157 * and therefore doesn't naturally serialize with software page-table walkers.
185 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
201 * This allows an architecture that does not use the linux page-tables for
224 * to work on, then just handle a few from the on-stack structure.
236 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
240 * lockups for non-preemptible kernels on huge machines when a lot of memory
313 tlb->start = min(tlb->start, address); in __tlb_adjust_range()
314 tlb->end = max(tlb->end, address + range_size); in __tlb_adjust_range()
319 if (tlb->fullmm) { in __tlb_reset_range()
320 tlb->start = tlb->end = ~0; in __tlb_reset_range()
322 tlb->start = TASK_SIZE; in __tlb_reset_range()
323 tlb->end = 0; in __tlb_reset_range()
325 tlb->freed_tables = 0; in __tlb_reset_range()
326 tlb->cleared_ptes = 0; in __tlb_reset_range()
327 tlb->cleared_pmds = 0; in __tlb_reset_range()
328 tlb->cleared_puds = 0; in __tlb_reset_range()
329 tlb->cleared_p4ds = 0; in __tlb_reset_range()
339 #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
353 if (tlb->end) in tlb_flush()
354 flush_tlb_mm(tlb->mm); in tlb_flush()
367 #if defined(tlb_start_vma) || defined(tlb_end_vma)
378 if (tlb->fullmm || tlb->need_flush_all) { in tlb_flush()
379 flush_tlb_mm(tlb->mm); in tlb_flush()
380 } else if (tlb->end) { in tlb_flush()
382 .vm_mm = tlb->mm, in tlb_flush()
383 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush()
384 (tlb->vma_huge ? VM_HUGETLB : 0), in tlb_flush()
387 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
396 * mips-4k) flush only large pages. in tlb_update_vma_flags()
398 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB in tlb_update_vma_flags()
405 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
406 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
424 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || in tlb_flush_mmu_tlbonly()
425 tlb->cleared_puds || tlb->cleared_p4ds)) in tlb_flush_mmu_tlbonly()
429 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
458 if (tlb->page_size && tlb->page_size != page_size) { in tlb_change_page_size()
459 if (!tlb->fullmm && !tlb->need_flush_all) in tlb_change_page_size()
463 tlb->page_size = page_size; in tlb_change_page_size()
469 if (tlb->cleared_ptes) in tlb_get_unmap_shift()
471 if (tlb->cleared_pmds) in tlb_get_unmap_shift()
473 if (tlb->cleared_puds) in tlb_get_unmap_shift()
475 if (tlb->cleared_p4ds) in tlb_get_unmap_shift()
494 if (tlb->fullmm) in tlb_start_vma()
498 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma()
505 if (tlb->fullmm) in tlb_end_vma()
519 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
526 tlb->cleared_ptes = 1; in tlb_flush_pte_range()
533 tlb->cleared_pmds = 1; in tlb_flush_pmd_range()
540 tlb->cleared_puds = 1; in tlb_flush_pud_range()
547 tlb->cleared_p4ds = 1; in tlb_flush_p4d_range()
555 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
559 * userspace is unmapping already-unmapped pages, which happens quite a lot.
582 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
596 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
614 * architected non-legacy page table cache (which I'm not aware of
615 * anybody actually doing), you're going to have some architecturally
631 tlb->freed_tables = 1; \
640 tlb->freed_tables = 1; \
649 tlb->freed_tables = 1; \
658 tlb->freed_tables = 1; \