1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ARC Cache Management
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
6*4882a593Smuzhiyun * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/cache.h>
13*4882a593Smuzhiyun #include <linux/mmu_context.h>
14*4882a593Smuzhiyun #include <linux/syscalls.h>
15*4882a593Smuzhiyun #include <linux/uaccess.h>
16*4882a593Smuzhiyun #include <linux/pagemap.h>
17*4882a593Smuzhiyun #include <asm/cacheflush.h>
18*4882a593Smuzhiyun #include <asm/cachectl.h>
19*4882a593Smuzhiyun #include <asm/setup.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_ISA_ARCV2
22*4882a593Smuzhiyun #define USE_RGN_FLSH 1
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static int l2_line_sz;
26*4882a593Smuzhiyun static int ioc_exists;
27*4882a593Smuzhiyun int slc_enable = 1, ioc_enable = 1;
28*4882a593Smuzhiyun unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
29*4882a593Smuzhiyun unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
32*4882a593Smuzhiyun unsigned long sz, const int op, const int full_page);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
35*4882a593Smuzhiyun void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
36*4882a593Smuzhiyun void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
37*4882a593Smuzhiyun
arc_cache_mumbojumbo(int c,char * buf,int len)38*4882a593Smuzhiyun char *arc_cache_mumbojumbo(int c, char *buf, int len)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun int n = 0;
41*4882a593Smuzhiyun struct cpuinfo_arc_cache *p;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define PR_CACHE(p, cfg, str) \
44*4882a593Smuzhiyun if (!(p)->line_len) \
45*4882a593Smuzhiyun n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
46*4882a593Smuzhiyun else \
47*4882a593Smuzhiyun n += scnprintf(buf + n, len - n, \
48*4882a593Smuzhiyun str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
49*4882a593Smuzhiyun (p)->sz_k, (p)->assoc, (p)->line_len, \
50*4882a593Smuzhiyun (p)->vipt ? "VIPT" : "PIPT", \
51*4882a593Smuzhiyun (p)->alias ? " aliasing" : "", \
52*4882a593Smuzhiyun IS_USED_CFG(cfg));
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
55*4882a593Smuzhiyun PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun p = &cpuinfo_arc700[c].slc;
58*4882a593Smuzhiyun if (p->line_len)
59*4882a593Smuzhiyun n += scnprintf(buf + n, len - n,
60*4882a593Smuzhiyun "SLC\t\t: %uK, %uB Line%s\n",
61*4882a593Smuzhiyun p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
64*4882a593Smuzhiyun perip_base,
65*4882a593Smuzhiyun IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return buf;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * Read the Cache Build Confuration Registers, Decode them and save into
72*4882a593Smuzhiyun * the cpuinfo structure for later use.
73*4882a593Smuzhiyun * No Validation done here, simply read/convert the BCRs
74*4882a593Smuzhiyun */
read_decode_cache_bcr_arcv2(int cpu)75*4882a593Smuzhiyun static void read_decode_cache_bcr_arcv2(int cpu)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
78*4882a593Smuzhiyun struct bcr_generic sbcr;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct bcr_slc_cfg {
81*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
82*4882a593Smuzhiyun unsigned int pad:24, way:2, lsz:2, sz:4;
83*4882a593Smuzhiyun #else
84*4882a593Smuzhiyun unsigned int sz:4, lsz:2, way:2, pad:24;
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun } slc_cfg;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct bcr_clust_cfg {
89*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
90*4882a593Smuzhiyun unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
91*4882a593Smuzhiyun #else
92*4882a593Smuzhiyun unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun } cbcr;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct bcr_volatile {
97*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
98*4882a593Smuzhiyun unsigned int start:4, limit:4, pad:22, order:1, disable:1;
99*4882a593Smuzhiyun #else
100*4882a593Smuzhiyun unsigned int disable:1, order:1, pad:22, limit:4, start:4;
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun } vol;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun READ_BCR(ARC_REG_SLC_BCR, sbcr);
106*4882a593Smuzhiyun if (sbcr.ver) {
107*4882a593Smuzhiyun READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
108*4882a593Smuzhiyun p_slc->sz_k = 128 << slc_cfg.sz;
109*4882a593Smuzhiyun l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
113*4882a593Smuzhiyun if (cbcr.c) {
114*4882a593Smuzhiyun ioc_exists = 1;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * As for today we don't support both IOC and ZONE_HIGHMEM enabled
118*4882a593Smuzhiyun * simultaneously. This happens because as of today IOC aperture covers
119*4882a593Smuzhiyun * only ZONE_NORMAL (low mem) and any dma transactions outside this
120*4882a593Smuzhiyun * region won't be HW coherent.
121*4882a593Smuzhiyun * If we want to use both IOC and ZONE_HIGHMEM we can use
122*4882a593Smuzhiyun * bounce_buffer to handle dma transactions to HIGHMEM.
123*4882a593Smuzhiyun * Also it is possible to modify dma_direct cache ops or increase IOC
124*4882a593Smuzhiyun * aperture size if we are planning to use HIGHMEM without PAE.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
127*4882a593Smuzhiyun ioc_enable = 0;
128*4882a593Smuzhiyun } else {
129*4882a593Smuzhiyun ioc_enable = 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* HS 2.0 didn't have AUX_VOL */
133*4882a593Smuzhiyun if (cpuinfo_arc700[cpu].core.family > 0x51) {
134*4882a593Smuzhiyun READ_BCR(AUX_VOL, vol);
135*4882a593Smuzhiyun perip_base = vol.start << 28;
136*4882a593Smuzhiyun /* HS 3.0 has limit and strict-ordering fields */
137*4882a593Smuzhiyun if (cpuinfo_arc700[cpu].core.family > 0x52)
138*4882a593Smuzhiyun perip_end = (vol.limit << 28) - 1;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
read_decode_cache_bcr(void)142*4882a593Smuzhiyun void read_decode_cache_bcr(void)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct cpuinfo_arc_cache *p_ic, *p_dc;
145*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
146*4882a593Smuzhiyun struct bcr_cache {
147*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
148*4882a593Smuzhiyun unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
149*4882a593Smuzhiyun #else
150*4882a593Smuzhiyun unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
151*4882a593Smuzhiyun #endif
152*4882a593Smuzhiyun } ibcr, dbcr;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun p_ic = &cpuinfo_arc700[cpu].icache;
155*4882a593Smuzhiyun READ_BCR(ARC_REG_IC_BCR, ibcr);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (!ibcr.ver)
158*4882a593Smuzhiyun goto dc_chk;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (ibcr.ver <= 3) {
161*4882a593Smuzhiyun BUG_ON(ibcr.config != 3);
162*4882a593Smuzhiyun p_ic->assoc = 2; /* Fixed to 2w set assoc */
163*4882a593Smuzhiyun } else if (ibcr.ver >= 4) {
164*4882a593Smuzhiyun p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun p_ic->line_len = 8 << ibcr.line_len;
168*4882a593Smuzhiyun p_ic->sz_k = 1 << (ibcr.sz - 1);
169*4882a593Smuzhiyun p_ic->vipt = 1;
170*4882a593Smuzhiyun p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun dc_chk:
173*4882a593Smuzhiyun p_dc = &cpuinfo_arc700[cpu].dcache;
174*4882a593Smuzhiyun READ_BCR(ARC_REG_DC_BCR, dbcr);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (!dbcr.ver)
177*4882a593Smuzhiyun goto slc_chk;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (dbcr.ver <= 3) {
180*4882a593Smuzhiyun BUG_ON(dbcr.config != 2);
181*4882a593Smuzhiyun p_dc->assoc = 4; /* Fixed to 4w set assoc */
182*4882a593Smuzhiyun p_dc->vipt = 1;
183*4882a593Smuzhiyun p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
184*4882a593Smuzhiyun } else if (dbcr.ver >= 4) {
185*4882a593Smuzhiyun p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
186*4882a593Smuzhiyun p_dc->vipt = 0;
187*4882a593Smuzhiyun p_dc->alias = 0; /* PIPT so can't VIPT alias */
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun p_dc->line_len = 16 << dbcr.line_len;
191*4882a593Smuzhiyun p_dc->sz_k = 1 << (dbcr.sz - 1);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun slc_chk:
194*4882a593Smuzhiyun if (is_isa_arcv2())
195*4882a593Smuzhiyun read_decode_cache_bcr_arcv2(cpu);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Line Operation on {I,D}-Cache
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun #define OP_INV 0x1
203*4882a593Smuzhiyun #define OP_FLUSH 0x2
204*4882a593Smuzhiyun #define OP_FLUSH_N_INV 0x3
205*4882a593Smuzhiyun #define OP_INV_IC 0x4
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
211*4882a593Smuzhiyun * The orig Cache Management Module "CDU" only required paddr to invalidate a
212*4882a593Smuzhiyun * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
213*4882a593Smuzhiyun * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
214*4882a593Smuzhiyun * the exact same line.
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
217*4882a593Smuzhiyun * paddr alone could not be used to correctly index the cache.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * ------------------
220*4882a593Smuzhiyun * MMU v1/v2 (Fixed Page Size 8k)
221*4882a593Smuzhiyun * ------------------
222*4882a593Smuzhiyun * The solution was to provide CDU with these additonal vaddr bits. These
223*4882a593Smuzhiyun * would be bits [x:13], x would depend on cache-geometry, 13 comes from
224*4882a593Smuzhiyun * standard page size of 8k.
225*4882a593Smuzhiyun * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
226*4882a593Smuzhiyun * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
227*4882a593Smuzhiyun * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
228*4882a593Smuzhiyun * represent the offset within cache-line. The adv of using this "clumsy"
229*4882a593Smuzhiyun * interface for additional info was no new reg was needed in CDU programming
230*4882a593Smuzhiyun * model.
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * 17:13 represented the max num of bits passable, actual bits needed were
233*4882a593Smuzhiyun * fewer, based on the num-of-aliases possible.
234*4882a593Smuzhiyun * -for 2 alias possibility, only bit 13 needed (32K cache)
235*4882a593Smuzhiyun * -for 4 alias possibility, bits 14:13 needed (64K cache)
236*4882a593Smuzhiyun *
237*4882a593Smuzhiyun * ------------------
238*4882a593Smuzhiyun * MMU v3
239*4882a593Smuzhiyun * ------------------
240*4882a593Smuzhiyun * This ver of MMU supports variable page sizes (1k-16k): although Linux will
241*4882a593Smuzhiyun * only support 8k (default), 16k and 4k.
242*4882a593Smuzhiyun * However from hardware perspective, smaller page sizes aggravate aliasing
243*4882a593Smuzhiyun * meaning more vaddr bits needed to disambiguate the cache-line-op ;
244*4882a593Smuzhiyun * the existing scheme of piggybacking won't work for certain configurations.
245*4882a593Smuzhiyun * Two new registers IC_PTAG and DC_PTAG inttoduced.
246*4882a593Smuzhiyun * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun static inline
__cache_line_loop_v2(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)250*4882a593Smuzhiyun void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
251*4882a593Smuzhiyun unsigned long sz, const int op, const int full_page)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun unsigned int aux_cmd;
254*4882a593Smuzhiyun int num_lines;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (op == OP_INV_IC) {
257*4882a593Smuzhiyun aux_cmd = ARC_REG_IC_IVIL;
258*4882a593Smuzhiyun } else {
259*4882a593Smuzhiyun /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
260*4882a593Smuzhiyun aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Ensure we properly floor/ceil the non-line aligned/sized requests
264*4882a593Smuzhiyun * and have @paddr - aligned to cache line and integral @num_lines.
265*4882a593Smuzhiyun * This however can be avoided for page sized since:
266*4882a593Smuzhiyun * -@paddr will be cache-line aligned already (being page aligned)
267*4882a593Smuzhiyun * -@sz will be integral multiple of line size (being page sized).
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun if (!full_page) {
270*4882a593Smuzhiyun sz += paddr & ~CACHE_LINE_MASK;
271*4882a593Smuzhiyun paddr &= CACHE_LINE_MASK;
272*4882a593Smuzhiyun vaddr &= CACHE_LINE_MASK;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* MMUv2 and before: paddr contains stuffed vaddrs bits */
278*4882a593Smuzhiyun paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun while (num_lines-- > 0) {
281*4882a593Smuzhiyun write_aux_reg(aux_cmd, paddr);
282*4882a593Smuzhiyun paddr += L1_CACHE_BYTES;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * For ARC700 MMUv3 I-cache and D-cache flushes
288*4882a593Smuzhiyun * - ARC700 programming model requires paddr and vaddr be passed in seperate
289*4882a593Smuzhiyun * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
290*4882a593Smuzhiyun * caches actually alias or not.
291*4882a593Smuzhiyun * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
292*4882a593Smuzhiyun * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun static inline
__cache_line_loop_v3(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)295*4882a593Smuzhiyun void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
296*4882a593Smuzhiyun unsigned long sz, const int op, const int full_page)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun unsigned int aux_cmd, aux_tag;
299*4882a593Smuzhiyun int num_lines;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (op == OP_INV_IC) {
302*4882a593Smuzhiyun aux_cmd = ARC_REG_IC_IVIL;
303*4882a593Smuzhiyun aux_tag = ARC_REG_IC_PTAG;
304*4882a593Smuzhiyun } else {
305*4882a593Smuzhiyun aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
306*4882a593Smuzhiyun aux_tag = ARC_REG_DC_PTAG;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Ensure we properly floor/ceil the non-line aligned/sized requests
310*4882a593Smuzhiyun * and have @paddr - aligned to cache line and integral @num_lines.
311*4882a593Smuzhiyun * This however can be avoided for page sized since:
312*4882a593Smuzhiyun * -@paddr will be cache-line aligned already (being page aligned)
313*4882a593Smuzhiyun * -@sz will be integral multiple of line size (being page sized).
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun if (!full_page) {
316*4882a593Smuzhiyun sz += paddr & ~CACHE_LINE_MASK;
317*4882a593Smuzhiyun paddr &= CACHE_LINE_MASK;
318*4882a593Smuzhiyun vaddr &= CACHE_LINE_MASK;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * MMUv3, cache ops require paddr in PTAG reg
324*4882a593Smuzhiyun * if V-P const for loop, PTAG can be written once outside loop
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun if (full_page)
327*4882a593Smuzhiyun write_aux_reg(aux_tag, paddr);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * This is technically for MMU v4, using the MMU v3 programming model
331*4882a593Smuzhiyun * Special work for HS38 aliasing I-cache configuration with PAE40
332*4882a593Smuzhiyun * - upper 8 bits of paddr need to be written into PTAG_HI
333*4882a593Smuzhiyun * - (and needs to be written before the lower 32 bits)
334*4882a593Smuzhiyun * Note that PTAG_HI is hoisted outside the line loop
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun if (is_pae40_enabled() && op == OP_INV_IC)
337*4882a593Smuzhiyun write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun while (num_lines-- > 0) {
340*4882a593Smuzhiyun if (!full_page) {
341*4882a593Smuzhiyun write_aux_reg(aux_tag, paddr);
342*4882a593Smuzhiyun paddr += L1_CACHE_BYTES;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun write_aux_reg(aux_cmd, vaddr);
346*4882a593Smuzhiyun vaddr += L1_CACHE_BYTES;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun #ifndef USE_RGN_FLSH
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
354*4882a593Smuzhiyun * Here's how cache ops are implemented
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
357*4882a593Smuzhiyun * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
358*4882a593Smuzhiyun * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
359*4882a593Smuzhiyun * respectively, similar to MMU v3 programming model, hence
360*4882a593Smuzhiyun * __cache_line_loop_v3() is used)
361*4882a593Smuzhiyun *
362*4882a593Smuzhiyun * If PAE40 is enabled, independent of aliasing considerations, the higher bits
363*4882a593Smuzhiyun * needs to be written into PTAG_HI
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun static inline
__cache_line_loop_v4(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)366*4882a593Smuzhiyun void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
367*4882a593Smuzhiyun unsigned long sz, const int op, const int full_page)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun unsigned int aux_cmd;
370*4882a593Smuzhiyun int num_lines;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (op == OP_INV_IC) {
373*4882a593Smuzhiyun aux_cmd = ARC_REG_IC_IVIL;
374*4882a593Smuzhiyun } else {
375*4882a593Smuzhiyun /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
376*4882a593Smuzhiyun aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* Ensure we properly floor/ceil the non-line aligned/sized requests
380*4882a593Smuzhiyun * and have @paddr - aligned to cache line and integral @num_lines.
381*4882a593Smuzhiyun * This however can be avoided for page sized since:
382*4882a593Smuzhiyun * -@paddr will be cache-line aligned already (being page aligned)
383*4882a593Smuzhiyun * -@sz will be integral multiple of line size (being page sized).
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun if (!full_page) {
386*4882a593Smuzhiyun sz += paddr & ~CACHE_LINE_MASK;
387*4882a593Smuzhiyun paddr &= CACHE_LINE_MASK;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * For HS38 PAE40 configuration
394*4882a593Smuzhiyun * - upper 8 bits of paddr need to be written into PTAG_HI
395*4882a593Smuzhiyun * - (and needs to be written before the lower 32 bits)
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun if (is_pae40_enabled()) {
398*4882a593Smuzhiyun if (op == OP_INV_IC)
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun * Non aliasing I-cache in HS38,
401*4882a593Smuzhiyun * aliasing I-cache handled in __cache_line_loop_v3()
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
404*4882a593Smuzhiyun else
405*4882a593Smuzhiyun write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun while (num_lines-- > 0) {
409*4882a593Smuzhiyun write_aux_reg(aux_cmd, paddr);
410*4882a593Smuzhiyun paddr += L1_CACHE_BYTES;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun #else
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * optimized flush operation which takes a region as opposed to iterating per line
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun static inline
__cache_line_loop_v4(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)420*4882a593Smuzhiyun void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
421*4882a593Smuzhiyun unsigned long sz, const int op, const int full_page)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun unsigned int s, e;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* Only for Non aliasing I-cache in HS38 */
426*4882a593Smuzhiyun if (op == OP_INV_IC) {
427*4882a593Smuzhiyun s = ARC_REG_IC_IVIR;
428*4882a593Smuzhiyun e = ARC_REG_IC_ENDR;
429*4882a593Smuzhiyun } else {
430*4882a593Smuzhiyun s = ARC_REG_DC_STARTR;
431*4882a593Smuzhiyun e = ARC_REG_DC_ENDR;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (!full_page) {
435*4882a593Smuzhiyun /* for any leading gap between @paddr and start of cache line */
436*4882a593Smuzhiyun sz += paddr & ~CACHE_LINE_MASK;
437*4882a593Smuzhiyun paddr &= CACHE_LINE_MASK;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun * account for any trailing gap to end of cache line
441*4882a593Smuzhiyun * this is equivalent to DIV_ROUND_UP() in line ops above
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun sz += L1_CACHE_BYTES - 1;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (is_pae40_enabled()) {
447*4882a593Smuzhiyun /* TBD: check if crossing 4TB boundary */
448*4882a593Smuzhiyun if (op == OP_INV_IC)
449*4882a593Smuzhiyun write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
450*4882a593Smuzhiyun else
451*4882a593Smuzhiyun write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* ENDR needs to be set ahead of START */
455*4882a593Smuzhiyun write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
456*4882a593Smuzhiyun write_aux_reg(s, paddr);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* caller waits on DC_CTRL.FS */
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun #endif
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun #if (CONFIG_ARC_MMU_VER < 3)
464*4882a593Smuzhiyun #define __cache_line_loop __cache_line_loop_v2
465*4882a593Smuzhiyun #elif (CONFIG_ARC_MMU_VER == 3)
466*4882a593Smuzhiyun #define __cache_line_loop __cache_line_loop_v3
467*4882a593Smuzhiyun #elif (CONFIG_ARC_MMU_VER > 3)
468*4882a593Smuzhiyun #define __cache_line_loop __cache_line_loop_v4
469*4882a593Smuzhiyun #endif
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun #ifdef CONFIG_ARC_HAS_DCACHE
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /***************************************************************
474*4882a593Smuzhiyun * Machine specific helpers for Entire D-Cache or Per Line ops
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun #ifndef USE_RGN_FLSH
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * this version avoids extra read/write of DC_CTRL for flush or invalid ops
480*4882a593Smuzhiyun * in the non region flush regime (such as for ARCompact)
481*4882a593Smuzhiyun */
__before_dc_op(const int op)482*4882a593Smuzhiyun static inline void __before_dc_op(const int op)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun if (op == OP_FLUSH_N_INV) {
485*4882a593Smuzhiyun /* Dcache provides 2 cmd: FLUSH or INV
486*4882a593Smuzhiyun * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
487*4882a593Smuzhiyun * flush-n-inv is achieved by INV cmd but with IM=1
488*4882a593Smuzhiyun * So toggle INV sub-mode depending on op request and default
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun const unsigned int ctl = ARC_REG_DC_CTRL;
491*4882a593Smuzhiyun write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun #else
496*4882a593Smuzhiyun
__before_dc_op(const int op)497*4882a593Smuzhiyun static inline void __before_dc_op(const int op)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun const unsigned int ctl = ARC_REG_DC_CTRL;
500*4882a593Smuzhiyun unsigned int val = read_aux_reg(ctl);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (op == OP_FLUSH_N_INV) {
503*4882a593Smuzhiyun val |= DC_CTRL_INV_MODE_FLUSH;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (op != OP_INV_IC) {
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
509*4882a593Smuzhiyun * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun val &= ~DC_CTRL_RGN_OP_MSK;
512*4882a593Smuzhiyun if (op & OP_INV)
513*4882a593Smuzhiyun val |= DC_CTRL_RGN_OP_INV;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun write_aux_reg(ctl, val);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun #endif
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun
__after_dc_op(const int op)521*4882a593Smuzhiyun static inline void __after_dc_op(const int op)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun if (op & OP_FLUSH) {
524*4882a593Smuzhiyun const unsigned int ctl = ARC_REG_DC_CTRL;
525*4882a593Smuzhiyun unsigned int reg;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* flush / flush-n-inv both wait */
528*4882a593Smuzhiyun while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
529*4882a593Smuzhiyun ;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* Switch back to default Invalidate mode */
532*4882a593Smuzhiyun if (op == OP_FLUSH_N_INV)
533*4882a593Smuzhiyun write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun * Operation on Entire D-Cache
539*4882a593Smuzhiyun * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
540*4882a593Smuzhiyun * Note that constant propagation ensures all the checks are gone
541*4882a593Smuzhiyun * in generated code
542*4882a593Smuzhiyun */
__dc_entire_op(const int op)543*4882a593Smuzhiyun static inline void __dc_entire_op(const int op)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun int aux;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun __before_dc_op(op);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
550*4882a593Smuzhiyun aux = ARC_REG_DC_IVDC;
551*4882a593Smuzhiyun else
552*4882a593Smuzhiyun aux = ARC_REG_DC_FLSH;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun write_aux_reg(aux, 0x1);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun __after_dc_op(op);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
__dc_disable(void)559*4882a593Smuzhiyun static inline void __dc_disable(void)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun const int r = ARC_REG_DC_CTRL;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun __dc_entire_op(OP_FLUSH_N_INV);
564*4882a593Smuzhiyun write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
__dc_enable(void)567*4882a593Smuzhiyun static void __dc_enable(void)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun const int r = ARC_REG_DC_CTRL;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* For kernel mappings cache operation: index is same as paddr */
575*4882a593Smuzhiyun #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
579*4882a593Smuzhiyun */
__dc_line_op(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op)580*4882a593Smuzhiyun static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
581*4882a593Smuzhiyun unsigned long sz, const int op)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
584*4882a593Smuzhiyun unsigned long flags;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun local_irq_save(flags);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun __before_dc_op(op);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun __cache_line_loop(paddr, vaddr, sz, op, full_page);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun __after_dc_op(op);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun local_irq_restore(flags);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun #else
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun #define __dc_entire_op(op)
600*4882a593Smuzhiyun #define __dc_disable()
601*4882a593Smuzhiyun #define __dc_enable()
602*4882a593Smuzhiyun #define __dc_line_op(paddr, vaddr, sz, op)
603*4882a593Smuzhiyun #define __dc_line_op_k(paddr, sz, op)
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun #endif /* CONFIG_ARC_HAS_DCACHE */
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun #ifdef CONFIG_ARC_HAS_ICACHE
608*4882a593Smuzhiyun
__ic_entire_inv(void)609*4882a593Smuzhiyun static inline void __ic_entire_inv(void)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun write_aux_reg(ARC_REG_IC_IVIC, 1);
612*4882a593Smuzhiyun read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun static inline void
__ic_line_inv_vaddr_local(phys_addr_t paddr,unsigned long vaddr,unsigned long sz)616*4882a593Smuzhiyun __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
617*4882a593Smuzhiyun unsigned long sz)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
620*4882a593Smuzhiyun unsigned long flags;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun local_irq_save(flags);
623*4882a593Smuzhiyun (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
624*4882a593Smuzhiyun local_irq_restore(flags);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun #ifndef CONFIG_SMP
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun #else
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun struct ic_inv_args {
634*4882a593Smuzhiyun phys_addr_t paddr, vaddr;
635*4882a593Smuzhiyun int sz;
636*4882a593Smuzhiyun };
637*4882a593Smuzhiyun
__ic_line_inv_vaddr_helper(void * info)638*4882a593Smuzhiyun static void __ic_line_inv_vaddr_helper(void *info)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct ic_inv_args *ic_inv = info;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
__ic_line_inv_vaddr(phys_addr_t paddr,unsigned long vaddr,unsigned long sz)645*4882a593Smuzhiyun static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
646*4882a593Smuzhiyun unsigned long sz)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun struct ic_inv_args ic_inv = {
649*4882a593Smuzhiyun .paddr = paddr,
650*4882a593Smuzhiyun .vaddr = vaddr,
651*4882a593Smuzhiyun .sz = sz
652*4882a593Smuzhiyun };
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun #endif /* CONFIG_SMP */
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun #else /* !CONFIG_ARC_HAS_ICACHE */
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun #define __ic_entire_inv()
662*4882a593Smuzhiyun #define __ic_line_inv_vaddr(pstart, vstart, sz)
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun #endif /* CONFIG_ARC_HAS_ICACHE */
665*4882a593Smuzhiyun
slc_op_rgn(phys_addr_t paddr,unsigned long sz,const int op)666*4882a593Smuzhiyun noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun #ifdef CONFIG_ISA_ARCV2
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * SLC is shared between all cores and concurrent aux operations from
671*4882a593Smuzhiyun * multiple cores need to be serialized using a spinlock
672*4882a593Smuzhiyun * A concurrent operation can be silently ignored and/or the old/new
673*4882a593Smuzhiyun * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
674*4882a593Smuzhiyun * below)
675*4882a593Smuzhiyun */
676*4882a593Smuzhiyun static DEFINE_SPINLOCK(lock);
677*4882a593Smuzhiyun unsigned long flags;
678*4882a593Smuzhiyun unsigned int ctrl;
679*4882a593Smuzhiyun phys_addr_t end;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun spin_lock_irqsave(&lock, flags);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
685*4882a593Smuzhiyun * - b'000 (default) is Flush,
686*4882a593Smuzhiyun * - b'001 is Invalidate if CTRL.IM == 0
687*4882a593Smuzhiyun * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun /* Don't rely on default value of IM bit */
692*4882a593Smuzhiyun if (!(op & OP_FLUSH)) /* i.e. OP_INV */
693*4882a593Smuzhiyun ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
694*4882a593Smuzhiyun else
695*4882a593Smuzhiyun ctrl |= SLC_CTRL_IM;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (op & OP_INV)
698*4882a593Smuzhiyun ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
699*4882a593Smuzhiyun else
700*4882a593Smuzhiyun ctrl &= ~SLC_CTRL_RGN_OP_INV;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun * Lower bits are ignored, no need to clip
706*4882a593Smuzhiyun * END needs to be setup before START (latter triggers the operation)
707*4882a593Smuzhiyun * END can't be same as START, so add (l2_line_sz - 1) to sz
708*4882a593Smuzhiyun */
709*4882a593Smuzhiyun end = paddr + sz + l2_line_sz - 1;
710*4882a593Smuzhiyun if (is_pae40_enabled())
711*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (is_pae40_enabled())
716*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
721*4882a593Smuzhiyun read_aux_reg(ARC_REG_SLC_CTRL);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun spin_unlock_irqrestore(&lock, flags);
726*4882a593Smuzhiyun #endif
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
slc_op_line(phys_addr_t paddr,unsigned long sz,const int op)729*4882a593Smuzhiyun noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun #ifdef CONFIG_ISA_ARCV2
732*4882a593Smuzhiyun /*
733*4882a593Smuzhiyun * SLC is shared between all cores and concurrent aux operations from
734*4882a593Smuzhiyun * multiple cores need to be serialized using a spinlock
735*4882a593Smuzhiyun * A concurrent operation can be silently ignored and/or the old/new
736*4882a593Smuzhiyun * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
737*4882a593Smuzhiyun * below)
738*4882a593Smuzhiyun */
739*4882a593Smuzhiyun static DEFINE_SPINLOCK(lock);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
742*4882a593Smuzhiyun unsigned int ctrl, cmd;
743*4882a593Smuzhiyun unsigned long flags;
744*4882a593Smuzhiyun int num_lines;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun spin_lock_irqsave(&lock, flags);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* Don't rely on default value of IM bit */
751*4882a593Smuzhiyun if (!(op & OP_FLUSH)) /* i.e. OP_INV */
752*4882a593Smuzhiyun ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
753*4882a593Smuzhiyun else
754*4882a593Smuzhiyun ctrl |= SLC_CTRL_IM;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun sz += paddr & ~SLC_LINE_MASK;
761*4882a593Smuzhiyun paddr &= SLC_LINE_MASK;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun num_lines = DIV_ROUND_UP(sz, l2_line_sz);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun while (num_lines-- > 0) {
766*4882a593Smuzhiyun write_aux_reg(cmd, paddr);
767*4882a593Smuzhiyun paddr += l2_line_sz;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
771*4882a593Smuzhiyun read_aux_reg(ARC_REG_SLC_CTRL);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun spin_unlock_irqrestore(&lock, flags);
776*4882a593Smuzhiyun #endif
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
780*4882a593Smuzhiyun
slc_entire_op(const int op)781*4882a593Smuzhiyun noinline static void slc_entire_op(const int op)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun unsigned int ctrl, r = ARC_REG_SLC_CTRL;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun ctrl = read_aux_reg(r);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun if (!(op & OP_FLUSH)) /* i.e. OP_INV */
788*4882a593Smuzhiyun ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
789*4882a593Smuzhiyun else
790*4882a593Smuzhiyun ctrl |= SLC_CTRL_IM;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun write_aux_reg(r, ctrl);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
795*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
796*4882a593Smuzhiyun else
797*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
800*4882a593Smuzhiyun read_aux_reg(r);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun /* Important to wait for flush to complete */
803*4882a593Smuzhiyun while (read_aux_reg(r) & SLC_CTRL_BUSY);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
arc_slc_disable(void)806*4882a593Smuzhiyun static inline void arc_slc_disable(void)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun const int r = ARC_REG_SLC_CTRL;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun slc_entire_op(OP_FLUSH_N_INV);
811*4882a593Smuzhiyun write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
arc_slc_enable(void)814*4882a593Smuzhiyun static inline void arc_slc_enable(void)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun const int r = ARC_REG_SLC_CTRL;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /***********************************************************
822*4882a593Smuzhiyun * Exported APIs
823*4882a593Smuzhiyun */
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * Handle cache congruency of kernel and userspace mappings of page when kernel
827*4882a593Smuzhiyun * writes-to/reads-from
828*4882a593Smuzhiyun *
829*4882a593Smuzhiyun * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
830*4882a593Smuzhiyun * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
831*4882a593Smuzhiyun * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
832*4882a593Smuzhiyun * -In SMP, if hardware caches are coherent
833*4882a593Smuzhiyun *
834*4882a593Smuzhiyun * There's a corollary case, where kernel READs from a userspace mapped page.
835*4882a593Smuzhiyun * If the U-mapping is not congruent to to K-mapping, former needs flushing.
836*4882a593Smuzhiyun */
flush_dcache_page(struct page * page)837*4882a593Smuzhiyun void flush_dcache_page(struct page *page)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun struct address_space *mapping;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun if (!cache_is_vipt_aliasing()) {
842*4882a593Smuzhiyun clear_bit(PG_dc_clean, &page->flags);
843*4882a593Smuzhiyun return;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* don't handle anon pages here */
847*4882a593Smuzhiyun mapping = page_mapping_file(page);
848*4882a593Smuzhiyun if (!mapping)
849*4882a593Smuzhiyun return;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /*
852*4882a593Smuzhiyun * pagecache page, file not yet mapped to userspace
853*4882a593Smuzhiyun * Make a note that K-mapping is dirty
854*4882a593Smuzhiyun */
855*4882a593Smuzhiyun if (!mapping_mapped(mapping)) {
856*4882a593Smuzhiyun clear_bit(PG_dc_clean, &page->flags);
857*4882a593Smuzhiyun } else if (page_mapcount(page)) {
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /* kernel reading from page with U-mapping */
860*4882a593Smuzhiyun phys_addr_t paddr = (unsigned long)page_address(page);
861*4882a593Smuzhiyun unsigned long vaddr = page->index << PAGE_SHIFT;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun if (addr_not_cache_congruent(paddr, vaddr))
864*4882a593Smuzhiyun __flush_dcache_page(paddr, vaddr);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_page);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /*
870*4882a593Smuzhiyun * DMA ops for systems with L1 cache only
871*4882a593Smuzhiyun * Make memory coherent with L1 cache by flushing/invalidating L1 lines
872*4882a593Smuzhiyun */
__dma_cache_wback_inv_l1(phys_addr_t start,unsigned long sz)873*4882a593Smuzhiyun static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
__dma_cache_inv_l1(phys_addr_t start,unsigned long sz)878*4882a593Smuzhiyun static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun __dc_line_op_k(start, sz, OP_INV);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
__dma_cache_wback_l1(phys_addr_t start,unsigned long sz)883*4882a593Smuzhiyun static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun __dc_line_op_k(start, sz, OP_FLUSH);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /*
889*4882a593Smuzhiyun * DMA ops for systems with both L1 and L2 caches, but without IOC
890*4882a593Smuzhiyun * Both L1 and L2 lines need to be explicitly flushed/invalidated
891*4882a593Smuzhiyun */
__dma_cache_wback_inv_slc(phys_addr_t start,unsigned long sz)892*4882a593Smuzhiyun static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
895*4882a593Smuzhiyun slc_op(start, sz, OP_FLUSH_N_INV);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
__dma_cache_inv_slc(phys_addr_t start,unsigned long sz)898*4882a593Smuzhiyun static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun __dc_line_op_k(start, sz, OP_INV);
901*4882a593Smuzhiyun slc_op(start, sz, OP_INV);
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
__dma_cache_wback_slc(phys_addr_t start,unsigned long sz)904*4882a593Smuzhiyun static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun __dc_line_op_k(start, sz, OP_FLUSH);
907*4882a593Smuzhiyun slc_op(start, sz, OP_FLUSH);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /*
911*4882a593Smuzhiyun * Exported DMA API
912*4882a593Smuzhiyun */
dma_cache_wback_inv(phys_addr_t start,unsigned long sz)913*4882a593Smuzhiyun void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun __dma_cache_wback_inv(start, sz);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun EXPORT_SYMBOL(dma_cache_wback_inv);
918*4882a593Smuzhiyun
dma_cache_inv(phys_addr_t start,unsigned long sz)919*4882a593Smuzhiyun void dma_cache_inv(phys_addr_t start, unsigned long sz)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun __dma_cache_inv(start, sz);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun EXPORT_SYMBOL(dma_cache_inv);
924*4882a593Smuzhiyun
dma_cache_wback(phys_addr_t start,unsigned long sz)925*4882a593Smuzhiyun void dma_cache_wback(phys_addr_t start, unsigned long sz)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun __dma_cache_wback(start, sz);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun EXPORT_SYMBOL(dma_cache_wback);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun /*
932*4882a593Smuzhiyun * This is API for making I/D Caches consistent when modifying
933*4882a593Smuzhiyun * kernel code (loadable modules, kprobes, kgdb...)
934*4882a593Smuzhiyun * This is called on insmod, with kernel virtual address for CODE of
935*4882a593Smuzhiyun * the module. ARC cache maintenance ops require PHY address thus we
936*4882a593Smuzhiyun * need to convert vmalloc addr to PHY addr
937*4882a593Smuzhiyun */
flush_icache_range(unsigned long kstart,unsigned long kend)938*4882a593Smuzhiyun void flush_icache_range(unsigned long kstart, unsigned long kend)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun unsigned int tot_sz;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* Shortcut for bigger flush ranges.
945*4882a593Smuzhiyun * Here we don't care if this was kernel virtual or phy addr
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun tot_sz = kend - kstart;
948*4882a593Smuzhiyun if (tot_sz > PAGE_SIZE) {
949*4882a593Smuzhiyun flush_cache_all();
950*4882a593Smuzhiyun return;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun /* Case: Kernel Phy addr (0x8000_0000 onwards) */
954*4882a593Smuzhiyun if (likely(kstart > PAGE_OFFSET)) {
955*4882a593Smuzhiyun /*
956*4882a593Smuzhiyun * The 2nd arg despite being paddr will be used to index icache
957*4882a593Smuzhiyun * This is OK since no alternate virtual mappings will exist
958*4882a593Smuzhiyun * given the callers for this case: kprobe/kgdb in built-in
959*4882a593Smuzhiyun * kernel code only.
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun __sync_icache_dcache(kstart, kstart, kend - kstart);
962*4882a593Smuzhiyun return;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /*
966*4882a593Smuzhiyun * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
967*4882a593Smuzhiyun * (1) ARC Cache Maintenance ops only take Phy addr, hence special
968*4882a593Smuzhiyun * handling of kernel vaddr.
969*4882a593Smuzhiyun *
970*4882a593Smuzhiyun * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
971*4882a593Smuzhiyun * it still needs to handle a 2 page scenario, where the range
972*4882a593Smuzhiyun * straddles across 2 virtual pages and hence need for loop
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun while (tot_sz > 0) {
975*4882a593Smuzhiyun unsigned int off, sz;
976*4882a593Smuzhiyun unsigned long phy, pfn;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun off = kstart % PAGE_SIZE;
979*4882a593Smuzhiyun pfn = vmalloc_to_pfn((void *)kstart);
980*4882a593Smuzhiyun phy = (pfn << PAGE_SHIFT) + off;
981*4882a593Smuzhiyun sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
982*4882a593Smuzhiyun __sync_icache_dcache(phy, kstart, sz);
983*4882a593Smuzhiyun kstart += sz;
984*4882a593Smuzhiyun tot_sz -= sz;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun EXPORT_SYMBOL(flush_icache_range);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun /*
990*4882a593Smuzhiyun * General purpose helper to make I and D cache lines consistent.
991*4882a593Smuzhiyun * @paddr is phy addr of region
992*4882a593Smuzhiyun * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
993*4882a593Smuzhiyun * However in one instance, when called by kprobe (for a breakpt in
994*4882a593Smuzhiyun * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
995*4882a593Smuzhiyun * use a paddr to index the cache (despite VIPT). This is fine since since a
996*4882a593Smuzhiyun * builtin kernel page will not have any virtual mappings.
997*4882a593Smuzhiyun * kprobe on loadable module will be kernel vaddr.
998*4882a593Smuzhiyun */
__sync_icache_dcache(phys_addr_t paddr,unsigned long vaddr,int len)999*4882a593Smuzhiyun void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
1002*4882a593Smuzhiyun __ic_line_inv_vaddr(paddr, vaddr, len);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* wrapper to compile time eliminate alignment checks in flush loop */
__inv_icache_page(phys_addr_t paddr,unsigned long vaddr)1006*4882a593Smuzhiyun void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun * wrapper to clearout kernel or userspace mappings of a page
1013*4882a593Smuzhiyun * For kernel mappings @vaddr == @paddr
1014*4882a593Smuzhiyun */
__flush_dcache_page(phys_addr_t paddr,unsigned long vaddr)1015*4882a593Smuzhiyun void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
flush_cache_all(void)1020*4882a593Smuzhiyun noinline void flush_cache_all(void)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun unsigned long flags;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun local_irq_save(flags);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun __ic_entire_inv();
1027*4882a593Smuzhiyun __dc_entire_op(OP_FLUSH_N_INV);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun local_irq_restore(flags);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
1034*4882a593Smuzhiyun
flush_cache_mm(struct mm_struct * mm)1035*4882a593Smuzhiyun void flush_cache_mm(struct mm_struct *mm)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun flush_cache_all();
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
flush_cache_page(struct vm_area_struct * vma,unsigned long u_vaddr,unsigned long pfn)1040*4882a593Smuzhiyun void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
1041*4882a593Smuzhiyun unsigned long pfn)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun phys_addr_t paddr = pfn << PAGE_SHIFT;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun u_vaddr &= PAGE_MASK;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun __flush_dcache_page(paddr, u_vaddr);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (vma->vm_flags & VM_EXEC)
1050*4882a593Smuzhiyun __inv_icache_page(paddr, u_vaddr);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1053*4882a593Smuzhiyun void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
1054*4882a593Smuzhiyun unsigned long end)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun flush_cache_all();
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long u_vaddr)1059*4882a593Smuzhiyun void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1060*4882a593Smuzhiyun unsigned long u_vaddr)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun /* TBD: do we really need to clear the kernel mapping */
1063*4882a593Smuzhiyun __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
1064*4882a593Smuzhiyun __flush_dcache_page((phys_addr_t)page_address(page),
1065*4882a593Smuzhiyun (phys_addr_t)page_address(page));
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun #endif
1070*4882a593Smuzhiyun
copy_user_highpage(struct page * to,struct page * from,unsigned long u_vaddr,struct vm_area_struct * vma)1071*4882a593Smuzhiyun void copy_user_highpage(struct page *to, struct page *from,
1072*4882a593Smuzhiyun unsigned long u_vaddr, struct vm_area_struct *vma)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun void *kfrom = kmap_atomic(from);
1075*4882a593Smuzhiyun void *kto = kmap_atomic(to);
1076*4882a593Smuzhiyun int clean_src_k_mappings = 0;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun /*
1079*4882a593Smuzhiyun * If SRC page was already mapped in userspace AND it's U-mapping is
1080*4882a593Smuzhiyun * not congruent with K-mapping, sync former to physical page so that
1081*4882a593Smuzhiyun * K-mapping in memcpy below, sees the right data
1082*4882a593Smuzhiyun *
1083*4882a593Smuzhiyun * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
1084*4882a593Smuzhiyun * equally valid for SRC page as well
1085*4882a593Smuzhiyun *
1086*4882a593Smuzhiyun * For !VIPT cache, all of this gets compiled out as
1087*4882a593Smuzhiyun * addr_not_cache_congruent() is 0
1088*4882a593Smuzhiyun */
1089*4882a593Smuzhiyun if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
1090*4882a593Smuzhiyun __flush_dcache_page((unsigned long)kfrom, u_vaddr);
1091*4882a593Smuzhiyun clean_src_k_mappings = 1;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun copy_page(kto, kfrom);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * Mark DST page K-mapping as dirty for a later finalization by
1098*4882a593Smuzhiyun * update_mmu_cache(). Although the finalization could have been done
1099*4882a593Smuzhiyun * here as well (given that both vaddr/paddr are available).
1100*4882a593Smuzhiyun * But update_mmu_cache() already has code to do that for other
1101*4882a593Smuzhiyun * non copied user pages (e.g. read faults which wire in pagecache page
1102*4882a593Smuzhiyun * directly).
1103*4882a593Smuzhiyun */
1104*4882a593Smuzhiyun clear_bit(PG_dc_clean, &to->flags);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun /*
1107*4882a593Smuzhiyun * if SRC was already usermapped and non-congruent to kernel mapping
1108*4882a593Smuzhiyun * sync the kernel mapping back to physical page
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun if (clean_src_k_mappings) {
1111*4882a593Smuzhiyun __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
1112*4882a593Smuzhiyun set_bit(PG_dc_clean, &from->flags);
1113*4882a593Smuzhiyun } else {
1114*4882a593Smuzhiyun clear_bit(PG_dc_clean, &from->flags);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun kunmap_atomic(kto);
1118*4882a593Smuzhiyun kunmap_atomic(kfrom);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
clear_user_page(void * to,unsigned long u_vaddr,struct page * page)1121*4882a593Smuzhiyun void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun clear_page(to);
1124*4882a593Smuzhiyun clear_bit(PG_dc_clean, &page->flags);
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun EXPORT_SYMBOL(clear_user_page);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /**********************************************************************
1129*4882a593Smuzhiyun * Explicit Cache flush request from user space via syscall
1130*4882a593Smuzhiyun * Needed for JITs which generate code on the fly
1131*4882a593Smuzhiyun */
SYSCALL_DEFINE3(cacheflush,uint32_t,start,uint32_t,sz,uint32_t,flags)1132*4882a593Smuzhiyun SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun /* TBD: optimize this */
1135*4882a593Smuzhiyun flush_cache_all();
1136*4882a593Smuzhiyun return 0;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /*
1140*4882a593Smuzhiyun * IO-Coherency (IOC) setup rules:
1141*4882a593Smuzhiyun *
1142*4882a593Smuzhiyun * 1. Needs to be at system level, so only once by Master core
1143*4882a593Smuzhiyun * Non-Masters need not be accessing caches at that time
1144*4882a593Smuzhiyun * - They are either HALT_ON_RESET and kick started much later or
1145*4882a593Smuzhiyun * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1146*4882a593Smuzhiyun * doesn't perturb caches or coherency unit
1147*4882a593Smuzhiyun *
1148*4882a593Smuzhiyun * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
1149*4882a593Smuzhiyun * otherwise any straggler data might behave strangely post IOC enabling
1150*4882a593Smuzhiyun *
1151*4882a593Smuzhiyun * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1152*4882a593Smuzhiyun * Coherency transactions
1153*4882a593Smuzhiyun */
arc_ioc_setup(void)1154*4882a593Smuzhiyun noinline void __init arc_ioc_setup(void)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun unsigned int ioc_base, mem_sz;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /*
1159*4882a593Smuzhiyun * If IOC was already enabled (due to bootloader) it technically needs to
1160*4882a593Smuzhiyun * be reconfigured with aperture base,size corresponding to Linux memory map
1161*4882a593Smuzhiyun * which will certainly be different than uboot's. But disabling and
1162*4882a593Smuzhiyun * reenabling IOC when DMA might be potentially active is tricky business.
1163*4882a593Smuzhiyun * To avoid random memory issues later, just panic here and ask user to
1164*4882a593Smuzhiyun * upgrade bootloader to one which doesn't enable IOC
1165*4882a593Smuzhiyun */
1166*4882a593Smuzhiyun if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1167*4882a593Smuzhiyun panic("IOC already enabled, please upgrade bootloader!\n");
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (!ioc_enable)
1170*4882a593Smuzhiyun return;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* Flush + invalidate + disable L1 dcache */
1173*4882a593Smuzhiyun __dc_disable();
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /* Flush + invalidate SLC */
1176*4882a593Smuzhiyun if (read_aux_reg(ARC_REG_SLC_BCR))
1177*4882a593Smuzhiyun slc_entire_op(OP_FLUSH_N_INV);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun /*
1180*4882a593Smuzhiyun * currently IOC Aperture covers entire DDR
1181*4882a593Smuzhiyun * TBD: fix for PGU + 1GB of low mem
1182*4882a593Smuzhiyun * TBD: fix for PAE
1183*4882a593Smuzhiyun */
1184*4882a593Smuzhiyun mem_sz = arc_get_mem_sz();
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1187*4882a593Smuzhiyun panic("IOC Aperture size must be power of 2 larger than 4KB");
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /*
1190*4882a593Smuzhiyun * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
1191*4882a593Smuzhiyun * so setting 0x11 implies 512MB, 0x12 implies 1GB...
1192*4882a593Smuzhiyun */
1193*4882a593Smuzhiyun write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /* for now assume kernel base is start of IOC aperture */
1196*4882a593Smuzhiyun ioc_base = CONFIG_LINUX_RAM_BASE;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun if (ioc_base % mem_sz != 0)
1199*4882a593Smuzhiyun panic("IOC Aperture start must be aligned to the size of the aperture");
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1202*4882a593Smuzhiyun write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1203*4882a593Smuzhiyun write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /* Re-enable L1 dcache */
1206*4882a593Smuzhiyun __dc_enable();
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /*
1210*4882a593Smuzhiyun * Cache related boot time checks/setups only needed on master CPU:
1211*4882a593Smuzhiyun * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1212*4882a593Smuzhiyun * Assume SMP only, so all cores will have same cache config. A check on
1213*4882a593Smuzhiyun * one core suffices for all
1214*4882a593Smuzhiyun * - IOC setup / dma callbacks only need to be done once
1215*4882a593Smuzhiyun */
arc_cache_init_master(void)1216*4882a593Smuzhiyun void __init arc_cache_init_master(void)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun unsigned int __maybe_unused cpu = smp_processor_id();
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1221*4882a593Smuzhiyun struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (!ic->line_len)
1224*4882a593Smuzhiyun panic("cache support enabled but non-existent cache\n");
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (ic->line_len != L1_CACHE_BYTES)
1227*4882a593Smuzhiyun panic("ICache line [%d] != kernel Config [%d]",
1228*4882a593Smuzhiyun ic->line_len, L1_CACHE_BYTES);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /*
1231*4882a593Smuzhiyun * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
1232*4882a593Smuzhiyun * pair to provide vaddr/paddr respectively, just as in MMU v3
1233*4882a593Smuzhiyun */
1234*4882a593Smuzhiyun if (is_isa_arcv2() && ic->alias)
1235*4882a593Smuzhiyun _cache_line_loop_ic_fn = __cache_line_loop_v3;
1236*4882a593Smuzhiyun else
1237*4882a593Smuzhiyun _cache_line_loop_ic_fn = __cache_line_loop;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1241*4882a593Smuzhiyun struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun if (!dc->line_len)
1244*4882a593Smuzhiyun panic("cache support enabled but non-existent cache\n");
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun if (dc->line_len != L1_CACHE_BYTES)
1247*4882a593Smuzhiyun panic("DCache line [%d] != kernel Config [%d]",
1248*4882a593Smuzhiyun dc->line_len, L1_CACHE_BYTES);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1251*4882a593Smuzhiyun if (is_isa_arcompact()) {
1252*4882a593Smuzhiyun int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1253*4882a593Smuzhiyun int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun if (dc->alias) {
1256*4882a593Smuzhiyun if (!handled)
1257*4882a593Smuzhiyun panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1258*4882a593Smuzhiyun if (CACHE_COLORS_NUM != num_colors)
1259*4882a593Smuzhiyun panic("CACHE_COLORS_NUM not optimized for config\n");
1260*4882a593Smuzhiyun } else if (!dc->alias && handled) {
1261*4882a593Smuzhiyun panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun /*
1267*4882a593Smuzhiyun * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
1268*4882a593Smuzhiyun * or equal to any cache line length.
1269*4882a593Smuzhiyun */
1270*4882a593Smuzhiyun BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1271*4882a593Smuzhiyun "SMP_CACHE_BYTES must be >= any cache line length");
1272*4882a593Smuzhiyun if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1273*4882a593Smuzhiyun panic("L2 Cache line [%d] > kernel Config [%d]\n",
1274*4882a593Smuzhiyun l2_line_sz, SMP_CACHE_BYTES);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun /* Note that SLC disable not formally supported till HS 3.0 */
1277*4882a593Smuzhiyun if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1278*4882a593Smuzhiyun arc_slc_disable();
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun if (is_isa_arcv2() && ioc_exists)
1281*4882a593Smuzhiyun arc_ioc_setup();
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1284*4882a593Smuzhiyun __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1285*4882a593Smuzhiyun __dma_cache_inv = __dma_cache_inv_slc;
1286*4882a593Smuzhiyun __dma_cache_wback = __dma_cache_wback_slc;
1287*4882a593Smuzhiyun } else {
1288*4882a593Smuzhiyun __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1289*4882a593Smuzhiyun __dma_cache_inv = __dma_cache_inv_l1;
1290*4882a593Smuzhiyun __dma_cache_wback = __dma_cache_wback_l1;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * In case of IOC (say IOC+SLC case), pointers above could still be set
1294*4882a593Smuzhiyun * but end up not being relevant as the first function in chain is not
1295*4882a593Smuzhiyun * called at all for devices using coherent DMA.
1296*4882a593Smuzhiyun * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1297*4882a593Smuzhiyun */
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
arc_cache_init(void)1300*4882a593Smuzhiyun void __ref arc_cache_init(void)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun unsigned int __maybe_unused cpu = smp_processor_id();
1303*4882a593Smuzhiyun char str[256];
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (!cpu)
1308*4882a593Smuzhiyun arc_cache_init_master();
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun /*
1311*4882a593Smuzhiyun * In PAE regime, TLB and cache maintenance ops take wider addresses
1312*4882a593Smuzhiyun * And even if PAE is not enabled in kernel, the upper 32-bits still need
1313*4882a593Smuzhiyun * to be zeroed to keep the ops sane.
1314*4882a593Smuzhiyun * As an optimization for more common !PAE enabled case, zero them out
1315*4882a593Smuzhiyun * once at init, rather than checking/setting to 0 for every runtime op
1316*4882a593Smuzhiyun */
1317*4882a593Smuzhiyun if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1320*4882a593Smuzhiyun write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1323*4882a593Smuzhiyun write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun if (l2_line_sz) {
1326*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1327*4882a593Smuzhiyun write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun }
1331