1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * This file is based on sample code from ARMv8 ARM. 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10#include <asm-offsets.h> 11#include <config.h> 12#include <asm/macro.h> 13#include <asm/system.h> 14#include <linux/linkage.h> 15 16/* 17 * void __asm_dcache_level(level) 18 * 19 * flush or invalidate one level cache. 20 * 21 * x0: cache level 22 * x1: 0 clean & invalidate, 1 invalidate only 23 * x2~x9: clobbered 24 */ 25.pushsection .text.__asm_dcache_level, "ax" 26ENTRY(__asm_dcache_level) 27 lsl x12, x0, #1 28 msr csselr_el1, x12 /* select cache level */ 29 isb /* sync change of cssidr_el1 */ 30 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ 31 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ 32 add x2, x2, #4 /* x2 <- log2(cache line size) */ 33 mov x3, #0x3ff 34 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ 35 clz w5, w3 /* bit position of #ways */ 36 mov x4, #0x7fff 37 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ 38 /* x12 <- cache level << 1 */ 39 /* x2 <- line length offset */ 40 /* x3 <- number of cache ways - 1 */ 41 /* x4 <- number of cache sets - 1 */ 42 /* x5 <- bit position of #ways */ 43 44loop_set: 45 mov x6, x3 /* x6 <- working copy of #ways */ 46loop_way: 47 lsl x7, x6, x5 48 orr x9, x12, x7 /* map way and level to cisw value */ 49 lsl x7, x4, x2 50 orr x9, x9, x7 /* map set number to cisw value */ 51 tbz w1, #0, 1f 52 dc isw, x9 53 b 2f 541: dc cisw, x9 /* clean & invalidate by set/way */ 552: subs x6, x6, #1 /* decrement the way */ 56 b.ge loop_way 57 subs x4, x4, #1 /* decrement the set */ 58 b.ge loop_set 59 60 ret 61ENDPROC(__asm_dcache_level) 62.popsection 63 64/* 65 * void __asm_flush_dcache_all(int invalidate_only) 66 * 67 * x0: 0 clean & invalidate, 1 invalidate only 68 * 69 * flush or invalidate all data cache by SET/WAY. 70 */ 71.pushsection .text.__asm_dcache_all, "ax" 72ENTRY(__asm_dcache_all) 73 mov x1, x0 74 dsb sy 75 mrs x10, clidr_el1 /* read clidr_el1 */ 76 lsr x11, x10, #24 77 and x11, x11, #0x7 /* x11 <- loc */ 78 cbz x11, finished /* if loc is 0, exit */ 79 mov x15, lr 80 mov x0, #0 /* start flush at cache level 0 */ 81 /* x0 <- cache level */ 82 /* x10 <- clidr_el1 */ 83 /* x11 <- loc */ 84 /* x15 <- return address */ 85 86loop_level: 87 lsl x12, x0, #1 88 add x12, x12, x0 /* x0 <- tripled cache level */ 89 lsr x12, x10, x12 90 and x12, x12, #7 /* x12 <- cache type */ 91 cmp x12, #2 92 b.lt skip /* skip if no cache or icache */ 93 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */ 94skip: 95 add x0, x0, #1 /* increment cache level */ 96 cmp x11, x0 97 b.gt loop_level 98 99 mov x0, #0 100 msr csselr_el1, x0 /* restore csselr_el1 */ 101 dsb sy 102 isb 103 mov lr, x15 104 105finished: 106 ret 107ENDPROC(__asm_dcache_all) 108.popsection 109 110.pushsection .text.__asm_flush_dcache_all, "ax" 111ENTRY(__asm_flush_dcache_all) 112 mov x0, #0 113 b __asm_dcache_all 114ENDPROC(__asm_flush_dcache_all) 115.popsection 116 117.pushsection .text.__asm_invalidate_dcache_all, "ax" 118ENTRY(__asm_invalidate_dcache_all) 119 mov x0, #0x1 120 b __asm_dcache_all 121ENDPROC(__asm_invalidate_dcache_all) 122.popsection 123 124/* 125 * void __asm_flush_dcache_range(start, end) 126 * 127 * clean & invalidate data cache in the range 128 * 129 * x0: start address 130 * x1: end address 131 */ 132.pushsection .text.__asm_flush_dcache_range, "ax" 133ENTRY(__asm_flush_dcache_range) 134 isb 135 mrs x3, ctr_el0 136 lsr x3, x3, #16 137 and x3, x3, #0xf 138 mov x2, #4 139 lsl x2, x2, x3 /* cache line size */ 140 141 /* x2 <- minimal cache line size in cache system */ 142 sub x3, x2, #1 143 bic x0, x0, x3 1441: dc civac, x0 /* clean & invalidate data or unified cache */ 145 add x0, x0, x2 146 cmp x0, x1 147 b.lo 1b 148 dsb sy 149 ret 150ENDPROC(__asm_flush_dcache_range) 151.popsection 152/* 153 * void __asm_invalidate_dcache_range(start, end) 154 * 155 * invalidate data cache in the range 156 * 157 * x0: start address 158 * x1: end address 159 */ 160.pushsection .text.__asm_invalidate_dcache_range, "ax" 161ENTRY(__asm_invalidate_dcache_range) 162 mrs x3, ctr_el0 163 ubfm x3, x3, #16, #19 164 mov x2, #4 165 lsl x2, x2, x3 /* cache line size */ 166 167 /* x2 <- minimal cache line size in cache system */ 168 sub x3, x2, #1 169 bic x0, x0, x3 1701: dc ivac, x0 /* invalidate data or unified cache */ 171 add x0, x0, x2 172 cmp x0, x1 173 b.lo 1b 174 dsb sy 175 isb 176 ret 177ENDPROC(__asm_invalidate_dcache_range) 178.popsection 179 180/* 181 * void __asm_invalidate_icache_all(void) 182 * 183 * invalidate all tlb entries. 184 */ 185.pushsection .text.__asm_invalidate_icache_all, "ax" 186ENTRY(__asm_invalidate_icache_all) 187 ic ialluis 188 isb sy 189 ret 190ENDPROC(__asm_invalidate_icache_all) 191.popsection 192 193.pushsection .text.__asm_invalidate_l3_dcache, "ax" 194ENTRY(__asm_invalidate_l3_dcache) 195 mov x0, #0 /* return status as success */ 196 ret 197ENDPROC(__asm_invalidate_l3_dcache) 198 .weak __asm_invalidate_l3_dcache 199.popsection 200 201.pushsection .text.__asm_flush_l3_dcache, "ax" 202ENTRY(__asm_flush_l3_dcache) 203 mov x0, #0 /* return status as success */ 204 ret 205ENDPROC(__asm_flush_l3_dcache) 206 .weak __asm_flush_l3_dcache 207.popsection 208 209.pushsection .text.__asm_invalidate_l3_icache, "ax" 210ENTRY(__asm_invalidate_l3_icache) 211 mov x0, #0 /* return status as success */ 212 ret 213ENDPROC(__asm_invalidate_l3_icache) 214 .weak __asm_invalidate_l3_icache 215.popsection 216 217/* 218 * void __asm_switch_ttbr(ulong new_ttbr) 219 * 220 * Safely switches to a new page table. 221 */ 222.pushsection .text.__asm_switch_ttbr, "ax" 223ENTRY(__asm_switch_ttbr) 224 /* x2 = SCTLR (alive throghout the function) */ 225 switch_el x4, 3f, 2f, 1f 2263: mrs x2, sctlr_el3 227 b 0f 2282: mrs x2, sctlr_el2 229 b 0f 2301: mrs x2, sctlr_el1 2310: 232 233 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ 234 movn x1, #(CR_M | CR_C | CR_I) 235 and x1, x2, x1 236 switch_el x4, 3f, 2f, 1f 2373: msr sctlr_el3, x1 238 b 0f 2392: msr sctlr_el2, x1 240 b 0f 2411: msr sctlr_el1, x1 2420: isb 243 244 /* This call only clobbers x30 (lr) and x9 (unused) */ 245 mov x3, x30 246 bl __asm_invalidate_tlb_all 247 248 /* From here on we're running safely with caches disabled */ 249 250 /* Set TTBR to our first argument */ 251 switch_el x4, 3f, 2f, 1f 2523: msr ttbr0_el3, x0 253 b 0f 2542: msr ttbr0_el2, x0 255 b 0f 2561: msr ttbr0_el1, x0 2570: isb 258 259 /* Restore original SCTLR and thus enable caches again */ 260 switch_el x4, 3f, 2f, 1f 2613: msr sctlr_el3, x2 262 b 0f 2632: msr sctlr_el2, x2 264 b 0f 2651: msr sctlr_el1, x2 2660: isb 267 268 ret x3 269ENDPROC(__asm_switch_ttbr) 270.popsection 271