xref: /OK3568_Linux_fs/kernel/arch/csky/mm/cachev2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/spinlock.h>
5*4882a593Smuzhiyun #include <linux/smp.h>
6*4882a593Smuzhiyun #include <linux/mm.h>
7*4882a593Smuzhiyun #include <asm/cache.h>
8*4882a593Smuzhiyun #include <asm/barrier.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /* for L1-cache */
11*4882a593Smuzhiyun #define INS_CACHE		(1 << 0)
12*4882a593Smuzhiyun #define DATA_CACHE		(1 << 1)
13*4882a593Smuzhiyun #define CACHE_INV		(1 << 4)
14*4882a593Smuzhiyun #define CACHE_CLR		(1 << 5)
15*4882a593Smuzhiyun #define CACHE_OMS		(1 << 6)
16*4882a593Smuzhiyun 
local_icache_inv_all(void * priv)17*4882a593Smuzhiyun void local_icache_inv_all(void *priv)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	mtcr("cr17", INS_CACHE|CACHE_INV);
20*4882a593Smuzhiyun 	sync_is();
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #ifdef CONFIG_CPU_HAS_ICACHE_INS
icache_inv_range(unsigned long start,unsigned long end)24*4882a593Smuzhiyun void icache_inv_range(unsigned long start, unsigned long end)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	for (; i < end; i += L1_CACHE_BYTES)
29*4882a593Smuzhiyun 		asm volatile("icache.iva %0\n"::"r"(i):"memory");
30*4882a593Smuzhiyun 	sync_is();
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun #else
33*4882a593Smuzhiyun struct cache_range {
34*4882a593Smuzhiyun 	unsigned long start;
35*4882a593Smuzhiyun 	unsigned long end;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static DEFINE_SPINLOCK(cache_lock);
39*4882a593Smuzhiyun 
cache_op_line(unsigned long i,unsigned int val)40*4882a593Smuzhiyun static inline void cache_op_line(unsigned long i, unsigned int val)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	mtcr("cr22", i);
43*4882a593Smuzhiyun 	mtcr("cr17", val);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
local_icache_inv_range(void * priv)46*4882a593Smuzhiyun void local_icache_inv_range(void *priv)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	struct cache_range *param = priv;
49*4882a593Smuzhiyun 	unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
50*4882a593Smuzhiyun 	unsigned long flags;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	spin_lock_irqsave(&cache_lock, flags);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	for (; i < param->end; i += L1_CACHE_BYTES)
55*4882a593Smuzhiyun 		cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cache_lock, flags);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	sync_is();
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
icache_inv_range(unsigned long start,unsigned long end)62*4882a593Smuzhiyun void icache_inv_range(unsigned long start, unsigned long end)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct cache_range param = { start, end };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (irqs_disabled())
67*4882a593Smuzhiyun 		local_icache_inv_range(&param);
68*4882a593Smuzhiyun 	else
69*4882a593Smuzhiyun 		on_each_cpu(local_icache_inv_range, &param, 1);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun 
dcache_wb_line(unsigned long start)73*4882a593Smuzhiyun inline void dcache_wb_line(unsigned long start)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
76*4882a593Smuzhiyun 	sync_is();
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
dcache_wb_range(unsigned long start,unsigned long end)79*4882a593Smuzhiyun void dcache_wb_range(unsigned long start, unsigned long end)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	for (; i < end; i += L1_CACHE_BYTES)
84*4882a593Smuzhiyun 		asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
85*4882a593Smuzhiyun 	sync_is();
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
cache_wbinv_range(unsigned long start,unsigned long end)88*4882a593Smuzhiyun void cache_wbinv_range(unsigned long start, unsigned long end)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	dcache_wb_range(start, end);
91*4882a593Smuzhiyun 	icache_inv_range(start, end);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun EXPORT_SYMBOL(cache_wbinv_range);
94*4882a593Smuzhiyun 
dma_wbinv_range(unsigned long start,unsigned long end)95*4882a593Smuzhiyun void dma_wbinv_range(unsigned long start, unsigned long end)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	for (; i < end; i += L1_CACHE_BYTES)
100*4882a593Smuzhiyun 		asm volatile("dcache.civa %0\n"::"r"(i):"memory");
101*4882a593Smuzhiyun 	sync_is();
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
dma_inv_range(unsigned long start,unsigned long end)104*4882a593Smuzhiyun void dma_inv_range(unsigned long start, unsigned long end)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	for (; i < end; i += L1_CACHE_BYTES)
109*4882a593Smuzhiyun 		asm volatile("dcache.iva %0\n"::"r"(i):"memory");
110*4882a593Smuzhiyun 	sync_is();
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
dma_wb_range(unsigned long start,unsigned long end)113*4882a593Smuzhiyun void dma_wb_range(unsigned long start, unsigned long end)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	for (; i < end; i += L1_CACHE_BYTES)
118*4882a593Smuzhiyun 		asm volatile("dcache.cva %0\n"::"r"(i):"memory");
119*4882a593Smuzhiyun 	sync_is();
120*4882a593Smuzhiyun }
121