1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/tlbv4wb.S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 1997-2002 Russell King 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * ARM architecture version 4 TLB handling functions. 8*4882a593Smuzhiyun * These assume a split I/D TLBs w/o I TLB entry, with a write buffer. 9*4882a593Smuzhiyun * 10*4882a593Smuzhiyun * Processors: SA110 SA1100 SA1110 11*4882a593Smuzhiyun */ 12*4882a593Smuzhiyun#include <linux/linkage.h> 13*4882a593Smuzhiyun#include <linux/init.h> 14*4882a593Smuzhiyun#include <asm/assembler.h> 15*4882a593Smuzhiyun#include <asm/asm-offsets.h> 16*4882a593Smuzhiyun#include <asm/tlbflush.h> 17*4882a593Smuzhiyun#include "proc-macros.S" 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun .align 5 20*4882a593Smuzhiyun/* 21*4882a593Smuzhiyun * v4wb_flush_user_tlb_range(start, end, mm) 22*4882a593Smuzhiyun * 23*4882a593Smuzhiyun * Invalidate a range of TLB entries in the specified address space. 24*4882a593Smuzhiyun * 25*4882a593Smuzhiyun * - start - range start address 26*4882a593Smuzhiyun * - end - range end address 27*4882a593Smuzhiyun * - mm - mm_struct describing address space 28*4882a593Smuzhiyun */ 29*4882a593Smuzhiyun .align 5 30*4882a593SmuzhiyunENTRY(v4wb_flush_user_tlb_range) 31*4882a593Smuzhiyun vma_vm_mm ip, r2 32*4882a593Smuzhiyun act_mm r3 @ get current->active_mm 33*4882a593Smuzhiyun eors r3, ip, r3 @ == mm ? 34*4882a593Smuzhiyun retne lr @ no, we dont do anything 35*4882a593Smuzhiyun vma_vm_flags r2, r2 36*4882a593Smuzhiyun mcr p15, 0, r3, c7, c10, 4 @ drain WB 37*4882a593Smuzhiyun tst r2, #VM_EXEC 38*4882a593Smuzhiyun mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB 39*4882a593Smuzhiyun bic r0, r0, #0x0ff 40*4882a593Smuzhiyun bic r0, r0, #0xf00 41*4882a593Smuzhiyun1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry 42*4882a593Smuzhiyun add r0, r0, #PAGE_SZ 43*4882a593Smuzhiyun cmp r0, r1 44*4882a593Smuzhiyun blo 1b 45*4882a593Smuzhiyun ret lr 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun/* 48*4882a593Smuzhiyun * v4_flush_kern_tlb_range(start, end) 49*4882a593Smuzhiyun * 50*4882a593Smuzhiyun * Invalidate a range of TLB entries in the specified kernel 51*4882a593Smuzhiyun * address range. 52*4882a593Smuzhiyun * 53*4882a593Smuzhiyun * - start - virtual address (may not be aligned) 54*4882a593Smuzhiyun * - end - virtual address (may not be aligned) 55*4882a593Smuzhiyun */ 56*4882a593SmuzhiyunENTRY(v4wb_flush_kern_tlb_range) 57*4882a593Smuzhiyun mov r3, #0 58*4882a593Smuzhiyun mcr p15, 0, r3, c7, c10, 4 @ drain WB 59*4882a593Smuzhiyun bic r0, r0, #0x0ff 60*4882a593Smuzhiyun bic r0, r0, #0xf00 61*4882a593Smuzhiyun mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB 62*4882a593Smuzhiyun1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry 63*4882a593Smuzhiyun add r0, r0, #PAGE_SZ 64*4882a593Smuzhiyun cmp r0, r1 65*4882a593Smuzhiyun blo 1b 66*4882a593Smuzhiyun ret lr 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun __INITDATA 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 71*4882a593Smuzhiyun define_tlb_functions v4wb, v4wb_tlb_flags 72