1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * TLB Exception Handling for ARC 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * Vineetg: April 2011 : 8*4882a593Smuzhiyun * -MMU v1: moved out legacy code into a seperate file 9*4882a593Smuzhiyun * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, 10*4882a593Smuzhiyun * helps avoid a shift when preparing PD0 from PTE 11*4882a593Smuzhiyun * 12*4882a593Smuzhiyun * Vineetg: July 2009 13*4882a593Smuzhiyun * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB 14*4882a593Smuzhiyun * entry, so that it doesn't knock out it's I-TLB entry 15*4882a593Smuzhiyun * -Some more fine tuning: 16*4882a593Smuzhiyun * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc 17*4882a593Smuzhiyun * 18*4882a593Smuzhiyun * Vineetg: July 2009 19*4882a593Smuzhiyun * -Practically rewrote the I/D TLB Miss handlers 20*4882a593Smuzhiyun * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. 21*4882a593Smuzhiyun * Hence Leaner by 1.5 K 22*4882a593Smuzhiyun * Used Conditional arithmetic to replace excessive branching 23*4882a593Smuzhiyun * Also used short instructions wherever possible 24*4882a593Smuzhiyun * 25*4882a593Smuzhiyun * Vineetg: Aug 13th 2008 26*4882a593Smuzhiyun * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing 27*4882a593Smuzhiyun * more information in case of a Fatality 28*4882a593Smuzhiyun * 29*4882a593Smuzhiyun * Vineetg: March 25th Bug #92690 30*4882a593Smuzhiyun * -Added Debug Code to check if sw-ASID == hw-ASID 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 33*4882a593Smuzhiyun */ 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun#include <linux/linkage.h> 36*4882a593Smuzhiyun#include <linux/pgtable.h> 37*4882a593Smuzhiyun#include <asm/entry.h> 38*4882a593Smuzhiyun#include <asm/mmu.h> 39*4882a593Smuzhiyun#include <asm/arcregs.h> 40*4882a593Smuzhiyun#include <asm/cache.h> 41*4882a593Smuzhiyun#include <asm/processor.h> 42*4882a593Smuzhiyun#include <asm/tlb-mmu1.h> 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun#ifdef CONFIG_ISA_ARCOMPACT 45*4882a593Smuzhiyun;----------------------------------------------------------------- 46*4882a593Smuzhiyun; ARC700 Exception Handling doesn't auto-switch stack and it only provides 47*4882a593Smuzhiyun; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" 48*4882a593Smuzhiyun; 49*4882a593Smuzhiyun; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a 50*4882a593Smuzhiyun; "global" is used to free-up FIRST core reg to be able to code the rest of 51*4882a593Smuzhiyun; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). 52*4882a593Smuzhiyun; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 53*4882a593Smuzhiyun; need to be saved as well by extending the "global" to be 4 words. Hence 54*4882a593Smuzhiyun; ".size ex_saved_reg1, 16" 55*4882a593Smuzhiyun; [All of this dance is to avoid stack switching for each TLB Miss, since we 56*4882a593Smuzhiyun; only need to save only a handful of regs, as opposed to complete reg file] 57*4882a593Smuzhiyun; 58*4882a593Smuzhiyun; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST 59*4882a593Smuzhiyun; core reg as it will not be SMP safe. 60*4882a593Smuzhiyun; Thus scratch AUX reg is used (and no longer used to cache task PGD). 61*4882a593Smuzhiyun; To save the rest of 3 regs - per cpu, the global is made "per-cpu". 62*4882a593Smuzhiyun; Epilogue thus has to locate the "per-cpu" storage for regs. 63*4882a593Smuzhiyun; To avoid cache line bouncing the per-cpu global is aligned/sized per 64*4882a593Smuzhiyun; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence 65*4882a593Smuzhiyun; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun; As simple as that.... 68*4882a593Smuzhiyun;-------------------------------------------------------------------------- 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun; scratch memory to save [r0-r3] used to code TLB refill Handler 71*4882a593SmuzhiyunARCFP_DATA ex_saved_reg1 72*4882a593Smuzhiyun .align 1 << L1_CACHE_SHIFT 73*4882a593Smuzhiyun .type ex_saved_reg1, @object 74*4882a593Smuzhiyun#ifdef CONFIG_SMP 75*4882a593Smuzhiyun .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 76*4882a593Smuzhiyunex_saved_reg1: 77*4882a593Smuzhiyun .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 78*4882a593Smuzhiyun#else 79*4882a593Smuzhiyun .size ex_saved_reg1, 16 80*4882a593Smuzhiyunex_saved_reg1: 81*4882a593Smuzhiyun .zero 16 82*4882a593Smuzhiyun#endif 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun.macro TLBMISS_FREEUP_REGS 85*4882a593Smuzhiyun#ifdef CONFIG_SMP 86*4882a593Smuzhiyun sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with 87*4882a593Smuzhiyun GET_CPU_ID r0 ; get to per cpu scratch mem, 88*4882a593Smuzhiyun asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu 89*4882a593Smuzhiyun add r0, @ex_saved_reg1, r0 90*4882a593Smuzhiyun#else 91*4882a593Smuzhiyun st r0, [@ex_saved_reg1] 92*4882a593Smuzhiyun mov_s r0, @ex_saved_reg1 93*4882a593Smuzhiyun#endif 94*4882a593Smuzhiyun st_s r1, [r0, 4] 95*4882a593Smuzhiyun st_s r2, [r0, 8] 96*4882a593Smuzhiyun st_s r3, [r0, 12] 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun ; VERIFY if the ASID in MMU-PID Reg is same as 99*4882a593Smuzhiyun ; one in Linux data structures 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun tlb_paranoid_check_asm 102*4882a593Smuzhiyun.endm 103*4882a593Smuzhiyun 104*4882a593Smuzhiyun.macro TLBMISS_RESTORE_REGS 105*4882a593Smuzhiyun#ifdef CONFIG_SMP 106*4882a593Smuzhiyun GET_CPU_ID r0 ; get to per cpu scratch mem 107*4882a593Smuzhiyun asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide 108*4882a593Smuzhiyun add r0, @ex_saved_reg1, r0 109*4882a593Smuzhiyun ld_s r3, [r0,12] 110*4882a593Smuzhiyun ld_s r2, [r0, 8] 111*4882a593Smuzhiyun ld_s r1, [r0, 4] 112*4882a593Smuzhiyun lr r0, [ARC_REG_SCRATCH_DATA0] 113*4882a593Smuzhiyun#else 114*4882a593Smuzhiyun mov_s r0, @ex_saved_reg1 115*4882a593Smuzhiyun ld_s r3, [r0,12] 116*4882a593Smuzhiyun ld_s r2, [r0, 8] 117*4882a593Smuzhiyun ld_s r1, [r0, 4] 118*4882a593Smuzhiyun ld_s r0, [r0] 119*4882a593Smuzhiyun#endif 120*4882a593Smuzhiyun.endm 121*4882a593Smuzhiyun 122*4882a593Smuzhiyun#else /* ARCv2 */ 123*4882a593Smuzhiyun 124*4882a593Smuzhiyun.macro TLBMISS_FREEUP_REGS 125*4882a593Smuzhiyun#ifdef CONFIG_ARC_HAS_LL64 126*4882a593Smuzhiyun std r0, [sp, -16] 127*4882a593Smuzhiyun std r2, [sp, -8] 128*4882a593Smuzhiyun#else 129*4882a593Smuzhiyun PUSH r0 130*4882a593Smuzhiyun PUSH r1 131*4882a593Smuzhiyun PUSH r2 132*4882a593Smuzhiyun PUSH r3 133*4882a593Smuzhiyun#endif 134*4882a593Smuzhiyun.endm 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun.macro TLBMISS_RESTORE_REGS 137*4882a593Smuzhiyun#ifdef CONFIG_ARC_HAS_LL64 138*4882a593Smuzhiyun ldd r0, [sp, -16] 139*4882a593Smuzhiyun ldd r2, [sp, -8] 140*4882a593Smuzhiyun#else 141*4882a593Smuzhiyun POP r3 142*4882a593Smuzhiyun POP r2 143*4882a593Smuzhiyun POP r1 144*4882a593Smuzhiyun POP r0 145*4882a593Smuzhiyun#endif 146*4882a593Smuzhiyun.endm 147*4882a593Smuzhiyun 148*4882a593Smuzhiyun#endif 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun;============================================================================ 151*4882a593Smuzhiyun; Troubleshooting Stuff 152*4882a593Smuzhiyun;============================================================================ 153*4882a593Smuzhiyun 154*4882a593Smuzhiyun; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid 155*4882a593Smuzhiyun; When Creating TLB Entries, instead of doing 3 dependent loads from memory, 156*4882a593Smuzhiyun; we use the MMU PID Reg to get current ASID. 157*4882a593Smuzhiyun; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. 158*4882a593Smuzhiyun; So we try to detect this in TLB Mis shandler 159*4882a593Smuzhiyun 160*4882a593Smuzhiyun.macro tlb_paranoid_check_asm 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun#ifdef CONFIG_ARC_DBG_TLB_PARANOIA 163*4882a593Smuzhiyun 164*4882a593Smuzhiyun GET_CURR_TASK_ON_CPU r3 165*4882a593Smuzhiyun ld r0, [r3, TASK_ACT_MM] 166*4882a593Smuzhiyun ld r0, [r0, MM_CTXT+MM_CTXT_ASID] 167*4882a593Smuzhiyun breq r0, 0, 55f ; Error if no ASID allocated 168*4882a593Smuzhiyun 169*4882a593Smuzhiyun lr r1, [ARC_REG_PID] 170*4882a593Smuzhiyun and r1, r1, 0xFF 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun and r2, r0, 0xFF ; MMU PID bits only for comparison 173*4882a593Smuzhiyun breq r1, r2, 5f 174*4882a593Smuzhiyun 175*4882a593Smuzhiyun55: 176*4882a593Smuzhiyun ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode 177*4882a593Smuzhiyun lr r2, [erstatus] 178*4882a593Smuzhiyun bbit0 r2, STATUS_U_BIT, 5f 179*4882a593Smuzhiyun 180*4882a593Smuzhiyun ; We sure are in troubled waters, Flag the error, but to do so 181*4882a593Smuzhiyun ; need to switch to kernel mode stack to call error routine 182*4882a593Smuzhiyun GET_TSK_STACK_BASE r3, sp 183*4882a593Smuzhiyun 184*4882a593Smuzhiyun ; Call printk to shoutout aloud 185*4882a593Smuzhiyun mov r2, 1 186*4882a593Smuzhiyun j print_asid_mismatch 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun5: ; ASIDs match so proceed normally 189*4882a593Smuzhiyun nop 190*4882a593Smuzhiyun 191*4882a593Smuzhiyun#endif 192*4882a593Smuzhiyun 193*4882a593Smuzhiyun.endm 194*4882a593Smuzhiyun 195*4882a593Smuzhiyun;============================================================================ 196*4882a593Smuzhiyun;TLB Miss handling Code 197*4882a593Smuzhiyun;============================================================================ 198*4882a593Smuzhiyun 199*4882a593Smuzhiyun;----------------------------------------------------------------------------- 200*4882a593Smuzhiyun; This macro does the page-table lookup for the faulting address. 201*4882a593Smuzhiyun; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address 202*4882a593Smuzhiyun.macro LOAD_FAULT_PTE 203*4882a593Smuzhiyun 204*4882a593Smuzhiyun lr r2, [efa] 205*4882a593Smuzhiyun 206*4882a593Smuzhiyun#ifdef ARC_USE_SCRATCH_REG 207*4882a593Smuzhiyun lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd 208*4882a593Smuzhiyun#else 209*4882a593Smuzhiyun GET_CURR_TASK_ON_CPU r1 210*4882a593Smuzhiyun ld r1, [r1, TASK_ACT_MM] 211*4882a593Smuzhiyun ld r1, [r1, MM_PGD] 212*4882a593Smuzhiyun#endif 213*4882a593Smuzhiyun 214*4882a593Smuzhiyun lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD 215*4882a593Smuzhiyun ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr 216*4882a593Smuzhiyun tst r3, r3 217*4882a593Smuzhiyun bz do_slow_path_pf ; if no Page Table, do page fault 218*4882a593Smuzhiyun 219*4882a593Smuzhiyun#ifdef CONFIG_TRANSPARENT_HUGEPAGE 220*4882a593Smuzhiyun and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp) 221*4882a593Smuzhiyun add2.nz r1, r1, r0 222*4882a593Smuzhiyun bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk 223*4882a593Smuzhiyun mov.nz r0, r3 224*4882a593Smuzhiyun 225*4882a593Smuzhiyun#endif 226*4882a593Smuzhiyun and r1, r3, PAGE_MASK 227*4882a593Smuzhiyun 228*4882a593Smuzhiyun ; Get the PTE entry: The idea is 229*4882a593Smuzhiyun ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr 230*4882a593Smuzhiyun ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index 231*4882a593Smuzhiyun ; (3) z = (pgtbl + y * 4) 232*4882a593Smuzhiyun 233*4882a593Smuzhiyun#ifdef CONFIG_ARC_HAS_PAE40 234*4882a593Smuzhiyun#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */ 235*4882a593Smuzhiyun#else 236*4882a593Smuzhiyun#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */ 237*4882a593Smuzhiyun#endif 238*4882a593Smuzhiyun 239*4882a593Smuzhiyun ; multiply in step (3) above avoided by shifting lesser in step (1) 240*4882a593Smuzhiyun lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG ) 241*4882a593Smuzhiyun and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG ) 242*4882a593Smuzhiyun ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40) 243*4882a593Smuzhiyun ; r1: PTE ptr 244*4882a593Smuzhiyun 245*4882a593Smuzhiyun2: 246*4882a593Smuzhiyun 247*4882a593Smuzhiyun.endm 248*4882a593Smuzhiyun 249*4882a593Smuzhiyun;----------------------------------------------------------------- 250*4882a593Smuzhiyun; Convert Linux PTE entry into TLB entry 251*4882a593Smuzhiyun; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu 252*4882a593Smuzhiyun; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI]) 253*4882a593Smuzhiyun; IN: r0 = PTE, r1 = ptr to PTE 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun.macro CONV_PTE_TO_TLB 256*4882a593Smuzhiyun and r3, r0, PTE_BITS_RWX ; r w x 257*4882a593Smuzhiyun asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only) 258*4882a593Smuzhiyun and.f 0, r0, _PAGE_GLOBAL 259*4882a593Smuzhiyun or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page) 260*4882a593Smuzhiyun 261*4882a593Smuzhiyun and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE 262*4882a593Smuzhiyun or r3, r3, r2 263*4882a593Smuzhiyun 264*4882a593Smuzhiyun sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C 265*4882a593Smuzhiyun#ifdef CONFIG_ARC_HAS_PAE40 266*4882a593Smuzhiyun ld r3, [r1, 4] ; paddr[39..32] 267*4882a593Smuzhiyun sr r3, [ARC_REG_TLBPD1HI] 268*4882a593Smuzhiyun#endif 269*4882a593Smuzhiyun 270*4882a593Smuzhiyun and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb 271*4882a593Smuzhiyun 272*4882a593Smuzhiyun lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun or r3, r3, r2 ; S | vaddr | {sasid|asid} 275*4882a593Smuzhiyun sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 276*4882a593Smuzhiyun.endm 277*4882a593Smuzhiyun 278*4882a593Smuzhiyun;----------------------------------------------------------------- 279*4882a593Smuzhiyun; Commit the TLB entry into MMU 280*4882a593Smuzhiyun 281*4882a593Smuzhiyun.macro COMMIT_ENTRY_TO_MMU 282*4882a593Smuzhiyun#if (CONFIG_ARC_MMU_VER < 4) 283*4882a593Smuzhiyun 284*4882a593Smuzhiyun /* Get free TLB slot: Set = computed from vaddr, way = random */ 285*4882a593Smuzhiyun sr TLBGetIndex, [ARC_REG_TLBCOMMAND] 286*4882a593Smuzhiyun 287*4882a593Smuzhiyun /* Commit the Write */ 288*4882a593Smuzhiyun sr TLBWriteNI, [ARC_REG_TLBCOMMAND] 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun#else 291*4882a593Smuzhiyun sr TLBInsertEntry, [ARC_REG_TLBCOMMAND] 292*4882a593Smuzhiyun#endif 293*4882a593Smuzhiyun 294*4882a593Smuzhiyun88: 295*4882a593Smuzhiyun.endm 296*4882a593Smuzhiyun 297*4882a593Smuzhiyun 298*4882a593SmuzhiyunARCFP_CODE ;Fast Path Code, candidate for ICCM 299*4882a593Smuzhiyun 300*4882a593Smuzhiyun;----------------------------------------------------------------------------- 301*4882a593Smuzhiyun; I-TLB Miss Exception Handler 302*4882a593Smuzhiyun;----------------------------------------------------------------------------- 303*4882a593Smuzhiyun 304*4882a593SmuzhiyunENTRY(EV_TLBMissI) 305*4882a593Smuzhiyun 306*4882a593Smuzhiyun TLBMISS_FREEUP_REGS 307*4882a593Smuzhiyun 308*4882a593Smuzhiyun ;---------------------------------------------------------------- 309*4882a593Smuzhiyun ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA 310*4882a593Smuzhiyun LOAD_FAULT_PTE 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun ;---------------------------------------------------------------- 313*4882a593Smuzhiyun ; VERIFY_PTE: Check if PTE permissions approp for executing code 314*4882a593Smuzhiyun cmp_s r2, VMALLOC_START 315*4882a593Smuzhiyun mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) 316*4882a593Smuzhiyun or.hs r2, r2, _PAGE_GLOBAL 317*4882a593Smuzhiyun 318*4882a593Smuzhiyun and r3, r0, r2 ; Mask out NON Flag bits from PTE 319*4882a593Smuzhiyun xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) 320*4882a593Smuzhiyun bnz do_slow_path_pf 321*4882a593Smuzhiyun 322*4882a593Smuzhiyun ; Let Linux VM know that the page was accessed 323*4882a593Smuzhiyun or r0, r0, _PAGE_ACCESSED ; set Accessed Bit 324*4882a593Smuzhiyun st_s r0, [r1] ; Write back PTE 325*4882a593Smuzhiyun 326*4882a593Smuzhiyun CONV_PTE_TO_TLB 327*4882a593Smuzhiyun COMMIT_ENTRY_TO_MMU 328*4882a593Smuzhiyun TLBMISS_RESTORE_REGS 329*4882a593SmuzhiyunEV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation 330*4882a593Smuzhiyun rtie 331*4882a593Smuzhiyun 332*4882a593SmuzhiyunEND(EV_TLBMissI) 333*4882a593Smuzhiyun 334*4882a593Smuzhiyun;----------------------------------------------------------------------------- 335*4882a593Smuzhiyun; D-TLB Miss Exception Handler 336*4882a593Smuzhiyun;----------------------------------------------------------------------------- 337*4882a593Smuzhiyun 338*4882a593SmuzhiyunENTRY(EV_TLBMissD) 339*4882a593Smuzhiyun 340*4882a593Smuzhiyun TLBMISS_FREEUP_REGS 341*4882a593Smuzhiyun 342*4882a593Smuzhiyun ;---------------------------------------------------------------- 343*4882a593Smuzhiyun ; Get the PTE corresponding to V-addr accessed 344*4882a593Smuzhiyun ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA 345*4882a593Smuzhiyun LOAD_FAULT_PTE 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun ;---------------------------------------------------------------- 348*4882a593Smuzhiyun ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) 349*4882a593Smuzhiyun 350*4882a593Smuzhiyun cmp_s r2, VMALLOC_START 351*4882a593Smuzhiyun mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE 352*4882a593Smuzhiyun or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only 353*4882a593Smuzhiyun 354*4882a593Smuzhiyun ; Linux PTE [RWX] bits are semantically overloaded: 355*4882a593Smuzhiyun ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) 356*4882a593Smuzhiyun ; -Otherwise they are user-mode permissions, and those are exactly 357*4882a593Smuzhiyun ; same for kernel mode as well (e.g. copy_(to|from)_user) 358*4882a593Smuzhiyun 359*4882a593Smuzhiyun lr r3, [ecr] 360*4882a593Smuzhiyun btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access 361*4882a593Smuzhiyun or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE 362*4882a593Smuzhiyun btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access 363*4882a593Smuzhiyun or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE 364*4882a593Smuzhiyun ; Above laddering takes care of XCHG access (both R and W) 365*4882a593Smuzhiyun 366*4882a593Smuzhiyun ; By now, r2 setup with all the Flags we need to check in PTE 367*4882a593Smuzhiyun and r3, r0, r2 ; Mask out NON Flag bits from PTE 368*4882a593Smuzhiyun brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) 369*4882a593Smuzhiyun 370*4882a593Smuzhiyun ;---------------------------------------------------------------- 371*4882a593Smuzhiyun ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty 372*4882a593Smuzhiyun or r0, r0, _PAGE_ACCESSED ; Accessed bit always 373*4882a593Smuzhiyun or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well 374*4882a593Smuzhiyun st_s r0, [r1] ; Write back PTE 375*4882a593Smuzhiyun 376*4882a593Smuzhiyun CONV_PTE_TO_TLB 377*4882a593Smuzhiyun 378*4882a593Smuzhiyun#if (CONFIG_ARC_MMU_VER == 1) 379*4882a593Smuzhiyun ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of 380*4882a593Smuzhiyun ; memcpy where 3 parties contend for 2 ways, ensuing a livelock. 381*4882a593Smuzhiyun ; But only for old MMU or one with Metal Fix 382*4882a593Smuzhiyun TLB_WRITE_HEURISTICS 383*4882a593Smuzhiyun#endif 384*4882a593Smuzhiyun 385*4882a593Smuzhiyun COMMIT_ENTRY_TO_MMU 386*4882a593Smuzhiyun TLBMISS_RESTORE_REGS 387*4882a593SmuzhiyunEV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation 388*4882a593Smuzhiyun rtie 389*4882a593Smuzhiyun 390*4882a593Smuzhiyun;-------- Common routine to call Linux Page Fault Handler ----------- 391*4882a593Smuzhiyundo_slow_path_pf: 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun#ifdef CONFIG_ISA_ARCV2 394*4882a593Smuzhiyun ; Set Z flag if exception in U mode. Hardware micro-ops do this on any 395*4882a593Smuzhiyun ; taken interrupt/exception, and thus is already the case at the entry 396*4882a593Smuzhiyun ; above, but ensuing code would have already clobbered. 397*4882a593Smuzhiyun ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set 398*4882a593Smuzhiyun 399*4882a593Smuzhiyun lr r2, [erstatus] 400*4882a593Smuzhiyun and r2, r2, STATUS_U_MASK 401*4882a593Smuzhiyun bxor.f 0, r2, STATUS_U_BIT 402*4882a593Smuzhiyun#endif 403*4882a593Smuzhiyun 404*4882a593Smuzhiyun ; Restore the 4-scratch regs saved by fast path miss handler 405*4882a593Smuzhiyun TLBMISS_RESTORE_REGS 406*4882a593Smuzhiyun 407*4882a593Smuzhiyun ; Slow path TLB Miss handled as a regular ARC Exception 408*4882a593Smuzhiyun ; (stack switching / save the complete reg-file). 409*4882a593Smuzhiyun b call_do_page_fault 410*4882a593SmuzhiyunEND(EV_TLBMissD) 411