1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * PowerPC version 4*4882a593Smuzhiyun * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5*4882a593Smuzhiyun * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 6*4882a593Smuzhiyun * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 7*4882a593Smuzhiyun * Adapted for Power Macintosh by Paul Mackerras. 8*4882a593Smuzhiyun * Low-level exception handlers and MMU support 9*4882a593Smuzhiyun * rewritten by Paul Mackerras. 10*4882a593Smuzhiyun * Copyright (C) 1996 Paul Mackerras. 11*4882a593Smuzhiyun * 12*4882a593Smuzhiyun * This file contains low-level assembler routines for managing 13*4882a593Smuzhiyun * the PowerPC MMU hash table. (PPC 8xx processors don't use a 14*4882a593Smuzhiyun * hash table, so this file is not used on them.) 15*4882a593Smuzhiyun */ 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun#include <linux/pgtable.h> 18*4882a593Smuzhiyun#include <linux/init.h> 19*4882a593Smuzhiyun#include <asm/reg.h> 20*4882a593Smuzhiyun#include <asm/page.h> 21*4882a593Smuzhiyun#include <asm/cputable.h> 22*4882a593Smuzhiyun#include <asm/ppc_asm.h> 23*4882a593Smuzhiyun#include <asm/thread_info.h> 24*4882a593Smuzhiyun#include <asm/asm-offsets.h> 25*4882a593Smuzhiyun#include <asm/export.h> 26*4882a593Smuzhiyun#include <asm/feature-fixups.h> 27*4882a593Smuzhiyun#include <asm/code-patching-asm.h> 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun#ifdef CONFIG_SMP 30*4882a593Smuzhiyun .section .bss 31*4882a593Smuzhiyun .align 2 32*4882a593Smuzhiyunmmu_hash_lock: 33*4882a593Smuzhiyun .space 4 34*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun/* 37*4882a593Smuzhiyun * Load a PTE into the hash table, if possible. 38*4882a593Smuzhiyun * The address is in r4, and r3 contains an access flag: 39*4882a593Smuzhiyun * _PAGE_RW (0x400) if a write. 40*4882a593Smuzhiyun * r9 contains the SRR1 value, from which we use the MSR_PR bit. 41*4882a593Smuzhiyun * SPRG_THREAD contains the physical address of the current task's thread. 42*4882a593Smuzhiyun * 43*4882a593Smuzhiyun * Returns to the caller if the access is illegal or there is no 44*4882a593Smuzhiyun * mapping for the address. Otherwise it places an appropriate PTE 45*4882a593Smuzhiyun * in the hash table and returns from the exception. 46*4882a593Smuzhiyun * Uses r0, r3 - r6, r8, r10, ctr, lr. 47*4882a593Smuzhiyun */ 48*4882a593Smuzhiyun .text 49*4882a593Smuzhiyun_GLOBAL(hash_page) 50*4882a593Smuzhiyun#ifdef CONFIG_SMP 51*4882a593Smuzhiyun lis r8, (mmu_hash_lock - PAGE_OFFSET)@h 52*4882a593Smuzhiyun ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l 53*4882a593Smuzhiyun lis r0,0x0fff 54*4882a593Smuzhiyun b 10f 55*4882a593Smuzhiyun11: lwz r6,0(r8) 56*4882a593Smuzhiyun cmpwi 0,r6,0 57*4882a593Smuzhiyun bne 11b 58*4882a593Smuzhiyun10: lwarx r6,0,r8 59*4882a593Smuzhiyun cmpwi 0,r6,0 60*4882a593Smuzhiyun bne- 11b 61*4882a593Smuzhiyun stwcx. r0,0,r8 62*4882a593Smuzhiyun bne- 10b 63*4882a593Smuzhiyun isync 64*4882a593Smuzhiyun#endif 65*4882a593Smuzhiyun /* Get PTE (linux-style) and check access */ 66*4882a593Smuzhiyun lis r0, TASK_SIZE@h /* check if kernel address */ 67*4882a593Smuzhiyun cmplw 0,r4,r0 68*4882a593Smuzhiyun ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ 69*4882a593Smuzhiyun mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */ 70*4882a593Smuzhiyun blt+ 112f /* assume user more likely */ 71*4882a593Smuzhiyun lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ 72*4882a593Smuzhiyun addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 73*4882a593Smuzhiyun rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 74*4882a593Smuzhiyun112: 75*4882a593Smuzhiyun#ifndef CONFIG_PTE_64BIT 76*4882a593Smuzhiyun rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 77*4882a593Smuzhiyun lwz r8,0(r5) /* get pmd entry */ 78*4882a593Smuzhiyun rlwinm. r8,r8,0,0,19 /* extract address of pte page */ 79*4882a593Smuzhiyun#else 80*4882a593Smuzhiyun rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */ 81*4882a593Smuzhiyun lwzx r8,r8,r5 /* Get L1 entry */ 82*4882a593Smuzhiyun rlwinm. r8,r8,0,0,20 /* extract pt base address */ 83*4882a593Smuzhiyun#endif 84*4882a593Smuzhiyun#ifdef CONFIG_SMP 85*4882a593Smuzhiyun beq- .Lhash_page_out /* return if no mapping */ 86*4882a593Smuzhiyun#else 87*4882a593Smuzhiyun /* XXX it seems like the 601 will give a machine fault on the 88*4882a593Smuzhiyun rfi if its alignment is wrong (bottom 4 bits of address are 89*4882a593Smuzhiyun 8 or 0xc) and we have had a not-taken conditional branch 90*4882a593Smuzhiyun to the address following the rfi. */ 91*4882a593Smuzhiyun beqlr- 92*4882a593Smuzhiyun#endif 93*4882a593Smuzhiyun#ifndef CONFIG_PTE_64BIT 94*4882a593Smuzhiyun rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 95*4882a593Smuzhiyun#else 96*4882a593Smuzhiyun rlwimi r8,r4,23,20,28 /* compute pte address */ 97*4882a593Smuzhiyun#endif 98*4882a593Smuzhiyun rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ 99*4882a593Smuzhiyun ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun /* 102*4882a593Smuzhiyun * Update the linux PTE atomically. We do the lwarx up-front 103*4882a593Smuzhiyun * because almost always, there won't be a permission violation 104*4882a593Smuzhiyun * and there won't already be an HPTE, and thus we will have 105*4882a593Smuzhiyun * to update the PTE to set _PAGE_HASHPTE. -- paulus. 106*4882a593Smuzhiyun * 107*4882a593Smuzhiyun * If PTE_64BIT is set, the low word is the flags word; use that 108*4882a593Smuzhiyun * word for locking since it contains all the interesting bits. 109*4882a593Smuzhiyun */ 110*4882a593Smuzhiyun#if (PTE_FLAGS_OFFSET != 0) 111*4882a593Smuzhiyun addi r8,r8,PTE_FLAGS_OFFSET 112*4882a593Smuzhiyun#endif 113*4882a593Smuzhiyun.Lretry: 114*4882a593Smuzhiyun lwarx r6,0,r8 /* get linux-style pte, flag word */ 115*4882a593Smuzhiyun andc. r5,r3,r6 /* check access & ~permission */ 116*4882a593Smuzhiyun#ifdef CONFIG_SMP 117*4882a593Smuzhiyun bne- .Lhash_page_out /* return if access not permitted */ 118*4882a593Smuzhiyun#else 119*4882a593Smuzhiyun bnelr- 120*4882a593Smuzhiyun#endif 121*4882a593Smuzhiyun or r5,r0,r6 /* set accessed/dirty bits */ 122*4882a593Smuzhiyun#ifdef CONFIG_PTE_64BIT 123*4882a593Smuzhiyun#ifdef CONFIG_SMP 124*4882a593Smuzhiyun subf r10,r6,r8 /* create false data dependency */ 125*4882a593Smuzhiyun subi r10,r10,PTE_FLAGS_OFFSET 126*4882a593Smuzhiyun lwzx r10,r6,r10 /* Get upper PTE word */ 127*4882a593Smuzhiyun#else 128*4882a593Smuzhiyun lwz r10,-PTE_FLAGS_OFFSET(r8) 129*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 130*4882a593Smuzhiyun#endif /* CONFIG_PTE_64BIT */ 131*4882a593Smuzhiyun stwcx. r5,0,r8 /* attempt to update PTE */ 132*4882a593Smuzhiyun bne- .Lretry /* retry if someone got there first */ 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun mfsrin r3,r4 /* get segment reg for segment */ 135*4882a593Smuzhiyun#ifndef CONFIG_VMAP_STACK 136*4882a593Smuzhiyun mfctr r0 137*4882a593Smuzhiyun stw r0,_CTR(r11) 138*4882a593Smuzhiyun#endif 139*4882a593Smuzhiyun bl create_hpte /* add the hash table entry */ 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun#ifdef CONFIG_SMP 142*4882a593Smuzhiyun eieio 143*4882a593Smuzhiyun lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha 144*4882a593Smuzhiyun li r0,0 145*4882a593Smuzhiyun stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) 146*4882a593Smuzhiyun#endif 147*4882a593Smuzhiyun 148*4882a593Smuzhiyun#ifdef CONFIG_VMAP_STACK 149*4882a593Smuzhiyun b fast_hash_page_return 150*4882a593Smuzhiyun#else 151*4882a593Smuzhiyun /* Return from the exception */ 152*4882a593Smuzhiyun lwz r5,_CTR(r11) 153*4882a593Smuzhiyun mtctr r5 154*4882a593Smuzhiyun lwz r0,GPR0(r11) 155*4882a593Smuzhiyun lwz r8,GPR8(r11) 156*4882a593Smuzhiyun b fast_exception_return 157*4882a593Smuzhiyun#endif 158*4882a593Smuzhiyun 159*4882a593Smuzhiyun#ifdef CONFIG_SMP 160*4882a593Smuzhiyun.Lhash_page_out: 161*4882a593Smuzhiyun eieio 162*4882a593Smuzhiyun lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha 163*4882a593Smuzhiyun li r0,0 164*4882a593Smuzhiyun stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) 165*4882a593Smuzhiyun blr 166*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 167*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(hash_page) 168*4882a593Smuzhiyun 169*4882a593Smuzhiyun/* 170*4882a593Smuzhiyun * Add an entry for a particular page to the hash table. 171*4882a593Smuzhiyun * 172*4882a593Smuzhiyun * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval) 173*4882a593Smuzhiyun * 174*4882a593Smuzhiyun * We assume any necessary modifications to the pte (e.g. setting 175*4882a593Smuzhiyun * the accessed bit) have already been done and that there is actually 176*4882a593Smuzhiyun * a hash table in use (i.e. we're not on a 603). 177*4882a593Smuzhiyun */ 178*4882a593Smuzhiyun_GLOBAL(add_hash_page) 179*4882a593Smuzhiyun mflr r0 180*4882a593Smuzhiyun stw r0,4(r1) 181*4882a593Smuzhiyun 182*4882a593Smuzhiyun /* Convert context and va to VSID */ 183*4882a593Smuzhiyun mulli r3,r3,897*16 /* multiply context by context skew */ 184*4882a593Smuzhiyun rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ 185*4882a593Smuzhiyun mulli r0,r0,0x111 /* multiply by ESID skew */ 186*4882a593Smuzhiyun add r3,r3,r0 /* note create_hpte trims to 24 bits */ 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun#ifdef CONFIG_SMP 189*4882a593Smuzhiyun lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */ 190*4882a593Smuzhiyun oris r8,r8,12 191*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 192*4882a593Smuzhiyun 193*4882a593Smuzhiyun /* 194*4882a593Smuzhiyun * We disable interrupts here, even on UP, because we don't 195*4882a593Smuzhiyun * want to race with hash_page, and because we want the 196*4882a593Smuzhiyun * _PAGE_HASHPTE bit to be a reliable indication of whether 197*4882a593Smuzhiyun * the HPTE exists (or at least whether one did once). 198*4882a593Smuzhiyun * We also turn off the MMU for data accesses so that we 199*4882a593Smuzhiyun * we can't take a hash table miss (assuming the code is 200*4882a593Smuzhiyun * covered by a BAT). -- paulus 201*4882a593Smuzhiyun */ 202*4882a593Smuzhiyun mfmsr r9 203*4882a593Smuzhiyun rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */ 204*4882a593Smuzhiyun rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 205*4882a593Smuzhiyun mtmsr r0 206*4882a593Smuzhiyun isync 207*4882a593Smuzhiyun 208*4882a593Smuzhiyun#ifdef CONFIG_SMP 209*4882a593Smuzhiyun lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha 210*4882a593Smuzhiyun addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l 211*4882a593Smuzhiyun10: lwarx r0,0,r6 /* take the mmu_hash_lock */ 212*4882a593Smuzhiyun cmpi 0,r0,0 213*4882a593Smuzhiyun bne- 11f 214*4882a593Smuzhiyun stwcx. r8,0,r6 215*4882a593Smuzhiyun beq+ 12f 216*4882a593Smuzhiyun11: lwz r0,0(r6) 217*4882a593Smuzhiyun cmpi 0,r0,0 218*4882a593Smuzhiyun beq 10b 219*4882a593Smuzhiyun b 11b 220*4882a593Smuzhiyun12: isync 221*4882a593Smuzhiyun#endif 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun /* 224*4882a593Smuzhiyun * Fetch the linux pte and test and set _PAGE_HASHPTE atomically. 225*4882a593Smuzhiyun * If _PAGE_HASHPTE was already set, we don't replace the existing 226*4882a593Smuzhiyun * HPTE, so we just unlock and return. 227*4882a593Smuzhiyun */ 228*4882a593Smuzhiyun mr r8,r5 229*4882a593Smuzhiyun#ifndef CONFIG_PTE_64BIT 230*4882a593Smuzhiyun rlwimi r8,r4,22,20,29 231*4882a593Smuzhiyun#else 232*4882a593Smuzhiyun rlwimi r8,r4,23,20,28 233*4882a593Smuzhiyun addi r8,r8,PTE_FLAGS_OFFSET 234*4882a593Smuzhiyun#endif 235*4882a593Smuzhiyun1: lwarx r6,0,r8 236*4882a593Smuzhiyun andi. r0,r6,_PAGE_HASHPTE 237*4882a593Smuzhiyun bne 9f /* if HASHPTE already set, done */ 238*4882a593Smuzhiyun#ifdef CONFIG_PTE_64BIT 239*4882a593Smuzhiyun#ifdef CONFIG_SMP 240*4882a593Smuzhiyun subf r10,r6,r8 /* create false data dependency */ 241*4882a593Smuzhiyun subi r10,r10,PTE_FLAGS_OFFSET 242*4882a593Smuzhiyun lwzx r10,r6,r10 /* Get upper PTE word */ 243*4882a593Smuzhiyun#else 244*4882a593Smuzhiyun lwz r10,-PTE_FLAGS_OFFSET(r8) 245*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 246*4882a593Smuzhiyun#endif /* CONFIG_PTE_64BIT */ 247*4882a593Smuzhiyun ori r5,r6,_PAGE_HASHPTE 248*4882a593Smuzhiyun stwcx. r5,0,r8 249*4882a593Smuzhiyun bne- 1b 250*4882a593Smuzhiyun 251*4882a593Smuzhiyun bl create_hpte 252*4882a593Smuzhiyun 253*4882a593Smuzhiyun9: 254*4882a593Smuzhiyun#ifdef CONFIG_SMP 255*4882a593Smuzhiyun lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha 256*4882a593Smuzhiyun addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l 257*4882a593Smuzhiyun eieio 258*4882a593Smuzhiyun li r0,0 259*4882a593Smuzhiyun stw r0,0(r6) /* clear mmu_hash_lock */ 260*4882a593Smuzhiyun#endif 261*4882a593Smuzhiyun 262*4882a593Smuzhiyun /* reenable interrupts and DR */ 263*4882a593Smuzhiyun mtmsr r9 264*4882a593Smuzhiyun isync 265*4882a593Smuzhiyun 266*4882a593Smuzhiyun lwz r0,4(r1) 267*4882a593Smuzhiyun mtlr r0 268*4882a593Smuzhiyun blr 269*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(add_hash_page) 270*4882a593Smuzhiyun 271*4882a593Smuzhiyun/* 272*4882a593Smuzhiyun * This routine adds a hardware PTE to the hash table. 273*4882a593Smuzhiyun * It is designed to be called with the MMU either on or off. 274*4882a593Smuzhiyun * r3 contains the VSID, r4 contains the virtual address, 275*4882a593Smuzhiyun * r5 contains the linux PTE, r6 contains the old value of the 276*4882a593Smuzhiyun * linux PTE (before setting _PAGE_HASHPTE). r10 contains the 277*4882a593Smuzhiyun * upper half of the PTE if CONFIG_PTE_64BIT. 278*4882a593Smuzhiyun * On SMP, the caller should have the mmu_hash_lock held. 279*4882a593Smuzhiyun * We assume that the caller has (or will) set the _PAGE_HASHPTE 280*4882a593Smuzhiyun * bit in the linux PTE in memory. The value passed in r6 should 281*4882a593Smuzhiyun * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set 282*4882a593Smuzhiyun * this routine will skip the search for an existing HPTE. 283*4882a593Smuzhiyun * This procedure modifies r0, r3 - r6, r8, cr0. 284*4882a593Smuzhiyun * -- paulus. 285*4882a593Smuzhiyun * 286*4882a593Smuzhiyun * For speed, 4 of the instructions get patched once the size and 287*4882a593Smuzhiyun * physical address of the hash table are known. These definitions 288*4882a593Smuzhiyun * of Hash_base and Hash_bits below are for the early hash table. 289*4882a593Smuzhiyun */ 290*4882a593SmuzhiyunHash_base = early_hash 291*4882a593SmuzhiyunHash_bits = 12 /* e.g. 256kB hash table */ 292*4882a593SmuzhiyunHash_msk = (((1 << Hash_bits) - 1) * 64) 293*4882a593Smuzhiyun 294*4882a593Smuzhiyun/* defines for the PTE format for 32-bit PPCs */ 295*4882a593Smuzhiyun#define HPTE_SIZE 8 296*4882a593Smuzhiyun#define PTEG_SIZE 64 297*4882a593Smuzhiyun#define LG_PTEG_SIZE 6 298*4882a593Smuzhiyun#define LDPTEu lwzu 299*4882a593Smuzhiyun#define LDPTE lwz 300*4882a593Smuzhiyun#define STPTE stw 301*4882a593Smuzhiyun#define CMPPTE cmpw 302*4882a593Smuzhiyun#define PTE_H 0x40 303*4882a593Smuzhiyun#define PTE_V 0x80000000 304*4882a593Smuzhiyun#define TST_V(r) rlwinm. r,r,0,0,0 305*4882a593Smuzhiyun#define SET_V(r) oris r,r,PTE_V@h 306*4882a593Smuzhiyun#define CLR_V(r,t) rlwinm r,r,0,1,31 307*4882a593Smuzhiyun 308*4882a593Smuzhiyun#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) 309*4882a593Smuzhiyun#define HASH_RIGHT 31-LG_PTEG_SIZE 310*4882a593Smuzhiyun 311*4882a593Smuzhiyun__REF 312*4882a593Smuzhiyun_GLOBAL(create_hpte) 313*4882a593Smuzhiyun /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ 314*4882a593Smuzhiyun rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */ 315*4882a593Smuzhiyun rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */ 316*4882a593Smuzhiyun and r8,r8,r0 /* writable if _RW & _DIRTY */ 317*4882a593Smuzhiyun rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ 318*4882a593Smuzhiyun rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ 319*4882a593Smuzhiyun ori r8,r8,0xe04 /* clear out reserved bits */ 320*4882a593Smuzhiyun andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */ 321*4882a593SmuzhiyunBEGIN_FTR_SECTION 322*4882a593Smuzhiyun rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ 323*4882a593SmuzhiyunEND_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 324*4882a593Smuzhiyun#ifdef CONFIG_PTE_64BIT 325*4882a593Smuzhiyun /* Put the XPN bits into the PTE */ 326*4882a593Smuzhiyun rlwimi r8,r10,8,20,22 327*4882a593Smuzhiyun rlwimi r8,r10,2,29,29 328*4882a593Smuzhiyun#endif 329*4882a593Smuzhiyun 330*4882a593Smuzhiyun /* Construct the high word of the PPC-style PTE (r5) */ 331*4882a593Smuzhiyun rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 332*4882a593Smuzhiyun rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ 333*4882a593Smuzhiyun SET_V(r5) /* set V (valid) bit */ 334*4882a593Smuzhiyun 335*4882a593Smuzhiyun patch_site 0f, patch__hash_page_A0 336*4882a593Smuzhiyun patch_site 1f, patch__hash_page_A1 337*4882a593Smuzhiyun patch_site 2f, patch__hash_page_A2 338*4882a593Smuzhiyun /* Get the address of the primary PTE group in the hash table (r3) */ 339*4882a593Smuzhiyun0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 340*4882a593Smuzhiyun1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 341*4882a593Smuzhiyun2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 342*4882a593Smuzhiyun xor r3,r3,r0 /* make primary hash */ 343*4882a593Smuzhiyun li r0,8 /* PTEs/group */ 344*4882a593Smuzhiyun 345*4882a593Smuzhiyun /* 346*4882a593Smuzhiyun * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search 347*4882a593Smuzhiyun * if it is clear, meaning that the HPTE isn't there already... 348*4882a593Smuzhiyun */ 349*4882a593Smuzhiyun andi. r6,r6,_PAGE_HASHPTE 350*4882a593Smuzhiyun beq+ 10f /* no PTE: go look for an empty slot */ 351*4882a593Smuzhiyun tlbie r4 352*4882a593Smuzhiyun 353*4882a593Smuzhiyun lis r4, (htab_hash_searches - PAGE_OFFSET)@ha 354*4882a593Smuzhiyun lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) 355*4882a593Smuzhiyun addi r6,r6,1 /* count how many searches we do */ 356*4882a593Smuzhiyun stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) 357*4882a593Smuzhiyun 358*4882a593Smuzhiyun /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ 359*4882a593Smuzhiyun mtctr r0 360*4882a593Smuzhiyun addi r4,r3,-HPTE_SIZE 361*4882a593Smuzhiyun1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ 362*4882a593Smuzhiyun CMPPTE 0,r6,r5 363*4882a593Smuzhiyun bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 364*4882a593Smuzhiyun beq+ .Lfound_slot 365*4882a593Smuzhiyun 366*4882a593Smuzhiyun patch_site 0f, patch__hash_page_B 367*4882a593Smuzhiyun /* Search the secondary PTEG for a matching PTE */ 368*4882a593Smuzhiyun ori r5,r5,PTE_H /* set H (secondary hash) bit */ 369*4882a593Smuzhiyun0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 370*4882a593Smuzhiyun xori r4,r4,(-PTEG_SIZE & 0xffff) 371*4882a593Smuzhiyun addi r4,r4,-HPTE_SIZE 372*4882a593Smuzhiyun mtctr r0 373*4882a593Smuzhiyun2: LDPTEu r6,HPTE_SIZE(r4) 374*4882a593Smuzhiyun CMPPTE 0,r6,r5 375*4882a593Smuzhiyun bdnzf 2,2b 376*4882a593Smuzhiyun beq+ .Lfound_slot 377*4882a593Smuzhiyun xori r5,r5,PTE_H /* clear H bit again */ 378*4882a593Smuzhiyun 379*4882a593Smuzhiyun /* Search the primary PTEG for an empty slot */ 380*4882a593Smuzhiyun10: mtctr r0 381*4882a593Smuzhiyun addi r4,r3,-HPTE_SIZE /* search primary PTEG */ 382*4882a593Smuzhiyun1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ 383*4882a593Smuzhiyun TST_V(r6) /* test valid bit */ 384*4882a593Smuzhiyun bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 385*4882a593Smuzhiyun beq+ .Lfound_empty 386*4882a593Smuzhiyun 387*4882a593Smuzhiyun /* update counter of times that the primary PTEG is full */ 388*4882a593Smuzhiyun lis r4, (primary_pteg_full - PAGE_OFFSET)@ha 389*4882a593Smuzhiyun lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) 390*4882a593Smuzhiyun addi r6,r6,1 391*4882a593Smuzhiyun stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun patch_site 0f, patch__hash_page_C 394*4882a593Smuzhiyun /* Search the secondary PTEG for an empty slot */ 395*4882a593Smuzhiyun ori r5,r5,PTE_H /* set H (secondary hash) bit */ 396*4882a593Smuzhiyun0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 397*4882a593Smuzhiyun xori r4,r4,(-PTEG_SIZE & 0xffff) 398*4882a593Smuzhiyun addi r4,r4,-HPTE_SIZE 399*4882a593Smuzhiyun mtctr r0 400*4882a593Smuzhiyun2: LDPTEu r6,HPTE_SIZE(r4) 401*4882a593Smuzhiyun TST_V(r6) 402*4882a593Smuzhiyun bdnzf 2,2b 403*4882a593Smuzhiyun beq+ .Lfound_empty 404*4882a593Smuzhiyun xori r5,r5,PTE_H /* clear H bit again */ 405*4882a593Smuzhiyun 406*4882a593Smuzhiyun /* 407*4882a593Smuzhiyun * Choose an arbitrary slot in the primary PTEG to overwrite. 408*4882a593Smuzhiyun * Since both the primary and secondary PTEGs are full, and we 409*4882a593Smuzhiyun * have no information that the PTEs in the primary PTEG are 410*4882a593Smuzhiyun * more important or useful than those in the secondary PTEG, 411*4882a593Smuzhiyun * and we know there is a definite (although small) speed 412*4882a593Smuzhiyun * advantage to putting the PTE in the primary PTEG, we always 413*4882a593Smuzhiyun * put the PTE in the primary PTEG. 414*4882a593Smuzhiyun * 415*4882a593Smuzhiyun * In addition, we skip any slot that is mapping kernel text in 416*4882a593Smuzhiyun * order to avoid a deadlock when not using BAT mappings if 417*4882a593Smuzhiyun * trying to hash in the kernel hash code itself after it has 418*4882a593Smuzhiyun * already taken the hash table lock. This works in conjunction 419*4882a593Smuzhiyun * with pre-faulting of the kernel text. 420*4882a593Smuzhiyun * 421*4882a593Smuzhiyun * If the hash table bucket is full of kernel text entries, we'll 422*4882a593Smuzhiyun * lockup here but that shouldn't happen 423*4882a593Smuzhiyun */ 424*4882a593Smuzhiyun 425*4882a593Smuzhiyun1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ 426*4882a593Smuzhiyun lwz r6, (next_slot - PAGE_OFFSET)@l(r4) 427*4882a593Smuzhiyun addi r6,r6,HPTE_SIZE /* search for candidate */ 428*4882a593Smuzhiyun andi. r6,r6,7*HPTE_SIZE 429*4882a593Smuzhiyun stw r6,next_slot@l(r4) 430*4882a593Smuzhiyun add r4,r3,r6 431*4882a593Smuzhiyun LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */ 432*4882a593Smuzhiyun clrrwi r0,r0,12 433*4882a593Smuzhiyun lis r6,etext@h 434*4882a593Smuzhiyun ori r6,r6,etext@l /* get etext */ 435*4882a593Smuzhiyun tophys(r6,r6) 436*4882a593Smuzhiyun cmpl cr0,r0,r6 /* compare and try again */ 437*4882a593Smuzhiyun blt 1b 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun#ifndef CONFIG_SMP 440*4882a593Smuzhiyun /* Store PTE in PTEG */ 441*4882a593Smuzhiyun.Lfound_empty: 442*4882a593Smuzhiyun STPTE r5,0(r4) 443*4882a593Smuzhiyun.Lfound_slot: 444*4882a593Smuzhiyun STPTE r8,HPTE_SIZE/2(r4) 445*4882a593Smuzhiyun 446*4882a593Smuzhiyun#else /* CONFIG_SMP */ 447*4882a593Smuzhiyun/* 448*4882a593Smuzhiyun * Between the tlbie above and updating the hash table entry below, 449*4882a593Smuzhiyun * another CPU could read the hash table entry and put it in its TLB. 450*4882a593Smuzhiyun * There are 3 cases: 451*4882a593Smuzhiyun * 1. using an empty slot 452*4882a593Smuzhiyun * 2. updating an earlier entry to change permissions (i.e. enable write) 453*4882a593Smuzhiyun * 3. taking over the PTE for an unrelated address 454*4882a593Smuzhiyun * 455*4882a593Smuzhiyun * In each case it doesn't really matter if the other CPUs have the old 456*4882a593Smuzhiyun * PTE in their TLB. So we don't need to bother with another tlbie here, 457*4882a593Smuzhiyun * which is convenient as we've overwritten the register that had the 458*4882a593Smuzhiyun * address. :-) The tlbie above is mainly to make sure that this CPU comes 459*4882a593Smuzhiyun * and gets the new PTE from the hash table. 460*4882a593Smuzhiyun * 461*4882a593Smuzhiyun * We do however have to make sure that the PTE is never in an invalid 462*4882a593Smuzhiyun * state with the V bit set. 463*4882a593Smuzhiyun */ 464*4882a593Smuzhiyun.Lfound_empty: 465*4882a593Smuzhiyun.Lfound_slot: 466*4882a593Smuzhiyun CLR_V(r5,r0) /* clear V (valid) bit in PTE */ 467*4882a593Smuzhiyun STPTE r5,0(r4) 468*4882a593Smuzhiyun sync 469*4882a593Smuzhiyun TLBSYNC 470*4882a593Smuzhiyun STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ 471*4882a593Smuzhiyun sync 472*4882a593Smuzhiyun SET_V(r5) 473*4882a593Smuzhiyun STPTE r5,0(r4) /* finally set V bit in PTE */ 474*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 475*4882a593Smuzhiyun 476*4882a593Smuzhiyun sync /* make sure pte updates get to memory */ 477*4882a593Smuzhiyun blr 478*4882a593Smuzhiyun .previous 479*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(create_hpte) 480*4882a593Smuzhiyun 481*4882a593Smuzhiyun .section .bss 482*4882a593Smuzhiyun .align 2 483*4882a593Smuzhiyunnext_slot: 484*4882a593Smuzhiyun .space 4 485*4882a593Smuzhiyunprimary_pteg_full: 486*4882a593Smuzhiyun .space 4 487*4882a593Smuzhiyunhtab_hash_searches: 488*4882a593Smuzhiyun .space 4 489*4882a593Smuzhiyun .previous 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun/* 492*4882a593Smuzhiyun * Flush the entry for a particular page from the hash table. 493*4882a593Smuzhiyun * 494*4882a593Smuzhiyun * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval, 495*4882a593Smuzhiyun * int count) 496*4882a593Smuzhiyun * 497*4882a593Smuzhiyun * We assume that there is a hash table in use (Hash != 0). 498*4882a593Smuzhiyun */ 499*4882a593Smuzhiyun__REF 500*4882a593Smuzhiyun_GLOBAL(flush_hash_pages) 501*4882a593Smuzhiyun /* 502*4882a593Smuzhiyun * We disable interrupts here, even on UP, because we want 503*4882a593Smuzhiyun * the _PAGE_HASHPTE bit to be a reliable indication of 504*4882a593Smuzhiyun * whether the HPTE exists (or at least whether one did once). 505*4882a593Smuzhiyun * We also turn off the MMU for data accesses so that we 506*4882a593Smuzhiyun * we can't take a hash table miss (assuming the code is 507*4882a593Smuzhiyun * covered by a BAT). -- paulus 508*4882a593Smuzhiyun */ 509*4882a593Smuzhiyun mfmsr r10 510*4882a593Smuzhiyun rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 511*4882a593Smuzhiyun rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 512*4882a593Smuzhiyun mtmsr r0 513*4882a593Smuzhiyun isync 514*4882a593Smuzhiyun 515*4882a593Smuzhiyun /* First find a PTE in the range that has _PAGE_HASHPTE set */ 516*4882a593Smuzhiyun#ifndef CONFIG_PTE_64BIT 517*4882a593Smuzhiyun rlwimi r5,r4,22,20,29 518*4882a593Smuzhiyun#else 519*4882a593Smuzhiyun rlwimi r5,r4,23,20,28 520*4882a593Smuzhiyun#endif 521*4882a593Smuzhiyun1: lwz r0,PTE_FLAGS_OFFSET(r5) 522*4882a593Smuzhiyun cmpwi cr1,r6,1 523*4882a593Smuzhiyun andi. r0,r0,_PAGE_HASHPTE 524*4882a593Smuzhiyun bne 2f 525*4882a593Smuzhiyun ble cr1,19f 526*4882a593Smuzhiyun addi r4,r4,0x1000 527*4882a593Smuzhiyun addi r5,r5,PTE_SIZE 528*4882a593Smuzhiyun addi r6,r6,-1 529*4882a593Smuzhiyun b 1b 530*4882a593Smuzhiyun 531*4882a593Smuzhiyun /* Convert context and va to VSID */ 532*4882a593Smuzhiyun2: mulli r3,r3,897*16 /* multiply context by context skew */ 533*4882a593Smuzhiyun rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ 534*4882a593Smuzhiyun mulli r0,r0,0x111 /* multiply by ESID skew */ 535*4882a593Smuzhiyun add r3,r3,r0 /* note code below trims to 24 bits */ 536*4882a593Smuzhiyun 537*4882a593Smuzhiyun /* Construct the high word of the PPC-style PTE (r11) */ 538*4882a593Smuzhiyun rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 539*4882a593Smuzhiyun rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ 540*4882a593Smuzhiyun SET_V(r11) /* set V (valid) bit */ 541*4882a593Smuzhiyun 542*4882a593Smuzhiyun#ifdef CONFIG_SMP 543*4882a593Smuzhiyun lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha 544*4882a593Smuzhiyun addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l 545*4882a593Smuzhiyun tophys (r8, r2) 546*4882a593Smuzhiyun lwz r8, TASK_CPU(r8) 547*4882a593Smuzhiyun oris r8,r8,9 548*4882a593Smuzhiyun10: lwarx r0,0,r9 549*4882a593Smuzhiyun cmpi 0,r0,0 550*4882a593Smuzhiyun bne- 11f 551*4882a593Smuzhiyun stwcx. r8,0,r9 552*4882a593Smuzhiyun beq+ 12f 553*4882a593Smuzhiyun11: lwz r0,0(r9) 554*4882a593Smuzhiyun cmpi 0,r0,0 555*4882a593Smuzhiyun beq 10b 556*4882a593Smuzhiyun b 11b 557*4882a593Smuzhiyun12: isync 558*4882a593Smuzhiyun#endif 559*4882a593Smuzhiyun 560*4882a593Smuzhiyun /* 561*4882a593Smuzhiyun * Check the _PAGE_HASHPTE bit in the linux PTE. If it is 562*4882a593Smuzhiyun * already clear, we're done (for this pte). If not, 563*4882a593Smuzhiyun * clear it (atomically) and proceed. -- paulus. 564*4882a593Smuzhiyun */ 565*4882a593Smuzhiyun#if (PTE_FLAGS_OFFSET != 0) 566*4882a593Smuzhiyun addi r5,r5,PTE_FLAGS_OFFSET 567*4882a593Smuzhiyun#endif 568*4882a593Smuzhiyun33: lwarx r8,0,r5 /* fetch the pte flags word */ 569*4882a593Smuzhiyun andi. r0,r8,_PAGE_HASHPTE 570*4882a593Smuzhiyun beq 8f /* done if HASHPTE is already clear */ 571*4882a593Smuzhiyun rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 572*4882a593Smuzhiyun stwcx. r8,0,r5 /* update the pte */ 573*4882a593Smuzhiyun bne- 33b 574*4882a593Smuzhiyun 575*4882a593Smuzhiyun patch_site 0f, patch__flush_hash_A0 576*4882a593Smuzhiyun patch_site 1f, patch__flush_hash_A1 577*4882a593Smuzhiyun patch_site 2f, patch__flush_hash_A2 578*4882a593Smuzhiyun /* Get the address of the primary PTE group in the hash table (r3) */ 579*4882a593Smuzhiyun0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 580*4882a593Smuzhiyun1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 581*4882a593Smuzhiyun2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 582*4882a593Smuzhiyun xor r8,r0,r8 /* make primary hash */ 583*4882a593Smuzhiyun 584*4882a593Smuzhiyun /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ 585*4882a593Smuzhiyun li r0,8 /* PTEs/group */ 586*4882a593Smuzhiyun mtctr r0 587*4882a593Smuzhiyun addi r12,r8,-HPTE_SIZE 588*4882a593Smuzhiyun1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */ 589*4882a593Smuzhiyun CMPPTE 0,r0,r11 590*4882a593Smuzhiyun bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 591*4882a593Smuzhiyun beq+ 3f 592*4882a593Smuzhiyun 593*4882a593Smuzhiyun patch_site 0f, patch__flush_hash_B 594*4882a593Smuzhiyun /* Search the secondary PTEG for a matching PTE */ 595*4882a593Smuzhiyun ori r11,r11,PTE_H /* set H (secondary hash) bit */ 596*4882a593Smuzhiyun li r0,8 /* PTEs/group */ 597*4882a593Smuzhiyun0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ 598*4882a593Smuzhiyun xori r12,r12,(-PTEG_SIZE & 0xffff) 599*4882a593Smuzhiyun addi r12,r12,-HPTE_SIZE 600*4882a593Smuzhiyun mtctr r0 601*4882a593Smuzhiyun2: LDPTEu r0,HPTE_SIZE(r12) 602*4882a593Smuzhiyun CMPPTE 0,r0,r11 603*4882a593Smuzhiyun bdnzf 2,2b 604*4882a593Smuzhiyun xori r11,r11,PTE_H /* clear H again */ 605*4882a593Smuzhiyun bne- 4f /* should rarely fail to find it */ 606*4882a593Smuzhiyun 607*4882a593Smuzhiyun3: li r0,0 608*4882a593Smuzhiyun STPTE r0,0(r12) /* invalidate entry */ 609*4882a593Smuzhiyun4: sync 610*4882a593Smuzhiyun tlbie r4 /* in hw tlb too */ 611*4882a593Smuzhiyun sync 612*4882a593Smuzhiyun 613*4882a593Smuzhiyun8: ble cr1,9f /* if all ptes checked */ 614*4882a593Smuzhiyun81: addi r6,r6,-1 615*4882a593Smuzhiyun addi r5,r5,PTE_SIZE 616*4882a593Smuzhiyun addi r4,r4,0x1000 617*4882a593Smuzhiyun lwz r0,0(r5) /* check next pte */ 618*4882a593Smuzhiyun cmpwi cr1,r6,1 619*4882a593Smuzhiyun andi. r0,r0,_PAGE_HASHPTE 620*4882a593Smuzhiyun bne 33b 621*4882a593Smuzhiyun bgt cr1,81b 622*4882a593Smuzhiyun 623*4882a593Smuzhiyun9: 624*4882a593Smuzhiyun#ifdef CONFIG_SMP 625*4882a593Smuzhiyun TLBSYNC 626*4882a593Smuzhiyun li r0,0 627*4882a593Smuzhiyun stw r0,0(r9) /* clear mmu_hash_lock */ 628*4882a593Smuzhiyun#endif 629*4882a593Smuzhiyun 630*4882a593Smuzhiyun19: mtmsr r10 631*4882a593Smuzhiyun isync 632*4882a593Smuzhiyun blr 633*4882a593Smuzhiyun .previous 634*4882a593SmuzhiyunEXPORT_SYMBOL(flush_hash_pages) 635*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(flush_hash_pages) 636*4882a593Smuzhiyun 637*4882a593Smuzhiyun/* 638*4882a593Smuzhiyun * Flush an entry from the TLB 639*4882a593Smuzhiyun */ 640*4882a593Smuzhiyun_GLOBAL(_tlbie) 641*4882a593Smuzhiyun#ifdef CONFIG_SMP 642*4882a593Smuzhiyun lwz r8,TASK_CPU(r2) 643*4882a593Smuzhiyun oris r8,r8,11 644*4882a593Smuzhiyun mfmsr r10 645*4882a593Smuzhiyun rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 646*4882a593Smuzhiyun rlwinm r0,r0,0,28,26 /* clear DR */ 647*4882a593Smuzhiyun mtmsr r0 648*4882a593Smuzhiyun isync 649*4882a593Smuzhiyun lis r9,mmu_hash_lock@h 650*4882a593Smuzhiyun ori r9,r9,mmu_hash_lock@l 651*4882a593Smuzhiyun tophys(r9,r9) 652*4882a593Smuzhiyun10: lwarx r7,0,r9 653*4882a593Smuzhiyun cmpwi 0,r7,0 654*4882a593Smuzhiyun bne- 10b 655*4882a593Smuzhiyun stwcx. r8,0,r9 656*4882a593Smuzhiyun bne- 10b 657*4882a593Smuzhiyun eieio 658*4882a593Smuzhiyun tlbie r3 659*4882a593Smuzhiyun sync 660*4882a593Smuzhiyun TLBSYNC 661*4882a593Smuzhiyun li r0,0 662*4882a593Smuzhiyun stw r0,0(r9) /* clear mmu_hash_lock */ 663*4882a593Smuzhiyun mtmsr r10 664*4882a593Smuzhiyun isync 665*4882a593Smuzhiyun#else /* CONFIG_SMP */ 666*4882a593Smuzhiyun tlbie r3 667*4882a593Smuzhiyun sync 668*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 669*4882a593Smuzhiyun blr 670*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(_tlbie) 671*4882a593Smuzhiyun 672*4882a593Smuzhiyun/* 673*4882a593Smuzhiyun * Flush the entire TLB. 603/603e only 674*4882a593Smuzhiyun */ 675*4882a593Smuzhiyun_GLOBAL(_tlbia) 676*4882a593Smuzhiyun#if defined(CONFIG_SMP) 677*4882a593Smuzhiyun lwz r8,TASK_CPU(r2) 678*4882a593Smuzhiyun oris r8,r8,10 679*4882a593Smuzhiyun mfmsr r10 680*4882a593Smuzhiyun rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 681*4882a593Smuzhiyun rlwinm r0,r0,0,28,26 /* clear DR */ 682*4882a593Smuzhiyun mtmsr r0 683*4882a593Smuzhiyun isync 684*4882a593Smuzhiyun lis r9,mmu_hash_lock@h 685*4882a593Smuzhiyun ori r9,r9,mmu_hash_lock@l 686*4882a593Smuzhiyun tophys(r9,r9) 687*4882a593Smuzhiyun10: lwarx r7,0,r9 688*4882a593Smuzhiyun cmpwi 0,r7,0 689*4882a593Smuzhiyun bne- 10b 690*4882a593Smuzhiyun stwcx. r8,0,r9 691*4882a593Smuzhiyun bne- 10b 692*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 693*4882a593Smuzhiyun li r5, 32 694*4882a593Smuzhiyun lis r4, KERNELBASE@h 695*4882a593Smuzhiyun mtctr r5 696*4882a593Smuzhiyun sync 697*4882a593Smuzhiyun0: tlbie r4 698*4882a593Smuzhiyun addi r4, r4, 0x1000 699*4882a593Smuzhiyun bdnz 0b 700*4882a593Smuzhiyun sync 701*4882a593Smuzhiyun#ifdef CONFIG_SMP 702*4882a593Smuzhiyun TLBSYNC 703*4882a593Smuzhiyun li r0,0 704*4882a593Smuzhiyun stw r0,0(r9) /* clear mmu_hash_lock */ 705*4882a593Smuzhiyun mtmsr r10 706*4882a593Smuzhiyun isync 707*4882a593Smuzhiyun#endif /* CONFIG_SMP */ 708*4882a593Smuzhiyun blr 709*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(_tlbia) 710