xref: /OK3568_Linux_fs/kernel/arch/arm/mm/cache-uniphier.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015-2016 Socionext Inc.
4*4882a593Smuzhiyun  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define pr_fmt(fmt)		"uniphier: " fmt
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/log2.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <asm/hardware/cache-uniphier.h>
16*4882a593Smuzhiyun #include <asm/outercache.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* control registers */
19*4882a593Smuzhiyun #define UNIPHIER_SSCC		0x0	/* Control Register */
20*4882a593Smuzhiyun #define    UNIPHIER_SSCC_BST			BIT(20)	/* UCWG burst read */
21*4882a593Smuzhiyun #define    UNIPHIER_SSCC_ACT			BIT(19)	/* Inst-Data separate */
22*4882a593Smuzhiyun #define    UNIPHIER_SSCC_WTG			BIT(18)	/* WT gathering on */
23*4882a593Smuzhiyun #define    UNIPHIER_SSCC_PRD			BIT(17)	/* enable pre-fetch */
24*4882a593Smuzhiyun #define    UNIPHIER_SSCC_ON			BIT(0)	/* enable cache */
25*4882a593Smuzhiyun #define UNIPHIER_SSCLPDAWCR	0x30	/* Unified/Data Active Way Control */
26*4882a593Smuzhiyun #define UNIPHIER_SSCLPIAWCR	0x34	/* Instruction Active Way Control */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* revision registers */
29*4882a593Smuzhiyun #define UNIPHIER_SSCID		0x0	/* ID Register */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* operation registers */
32*4882a593Smuzhiyun #define UNIPHIER_SSCOPE		0x244	/* Cache Operation Primitive Entry */
33*4882a593Smuzhiyun #define    UNIPHIER_SSCOPE_CM_INV		0x0	/* invalidate */
34*4882a593Smuzhiyun #define    UNIPHIER_SSCOPE_CM_CLEAN		0x1	/* clean */
35*4882a593Smuzhiyun #define    UNIPHIER_SSCOPE_CM_FLUSH		0x2	/* flush */
36*4882a593Smuzhiyun #define    UNIPHIER_SSCOPE_CM_SYNC		0x8	/* sync (drain bufs) */
37*4882a593Smuzhiyun #define    UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH	0x9	/* flush p-fetch buf */
38*4882a593Smuzhiyun #define UNIPHIER_SSCOQM		0x248	/* Cache Operation Queue Mode */
39*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_S_MASK		(0x3 << 17)
40*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_S_RANGE		(0x0 << 17)
41*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_S_ALL		(0x1 << 17)
42*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_CE			BIT(15)	/* notify completion */
43*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_CM_INV		0x0	/* invalidate */
44*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_CM_CLEAN		0x1	/* clean */
45*4882a593Smuzhiyun #define    UNIPHIER_SSCOQM_CM_FLUSH		0x2	/* flush */
46*4882a593Smuzhiyun #define UNIPHIER_SSCOQAD	0x24c	/* Cache Operation Queue Address */
47*4882a593Smuzhiyun #define UNIPHIER_SSCOQSZ	0x250	/* Cache Operation Queue Size */
48*4882a593Smuzhiyun #define UNIPHIER_SSCOPPQSEF	0x25c	/* Cache Operation Queue Set Complete*/
49*4882a593Smuzhiyun #define    UNIPHIER_SSCOPPQSEF_FE		BIT(1)
50*4882a593Smuzhiyun #define    UNIPHIER_SSCOPPQSEF_OE		BIT(0)
51*4882a593Smuzhiyun #define UNIPHIER_SSCOLPQS	0x260	/* Cache Operation Queue Status */
52*4882a593Smuzhiyun #define    UNIPHIER_SSCOLPQS_EF			BIT(2)
53*4882a593Smuzhiyun #define    UNIPHIER_SSCOLPQS_EST		BIT(1)
54*4882a593Smuzhiyun #define    UNIPHIER_SSCOLPQS_QST		BIT(0)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Is the operation region specified by address range? */
57*4882a593Smuzhiyun #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
58*4882a593Smuzhiyun 		((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun  * uniphier_cache_data - UniPhier outer cache specific data
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * @ctrl_base: virtual base address of control registers
64*4882a593Smuzhiyun  * @rev_base: virtual base address of revision registers
65*4882a593Smuzhiyun  * @op_base: virtual base address of operation registers
66*4882a593Smuzhiyun  * @way_mask: each bit specifies if the way is present
67*4882a593Smuzhiyun  * @nsets: number of associativity sets
68*4882a593Smuzhiyun  * @line_size: line size in bytes
69*4882a593Smuzhiyun  * @range_op_max_size: max size that can be handled by a single range operation
70*4882a593Smuzhiyun  * @list: list node to include this level in the whole cache hierarchy
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun struct uniphier_cache_data {
73*4882a593Smuzhiyun 	void __iomem *ctrl_base;
74*4882a593Smuzhiyun 	void __iomem *rev_base;
75*4882a593Smuzhiyun 	void __iomem *op_base;
76*4882a593Smuzhiyun 	void __iomem *way_ctrl_base;
77*4882a593Smuzhiyun 	u32 way_mask;
78*4882a593Smuzhiyun 	u32 nsets;
79*4882a593Smuzhiyun 	u32 line_size;
80*4882a593Smuzhiyun 	u32 range_op_max_size;
81*4882a593Smuzhiyun 	struct list_head list;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * List of the whole outer cache hierarchy.  This list is only modified during
86*4882a593Smuzhiyun  * the early boot stage, so no mutex is taken for the access to the list.
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun static LIST_HEAD(uniphier_cache_list);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * __uniphier_cache_sync - perform a sync point for a particular cache level
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  * @data: cache controller specific data
94*4882a593Smuzhiyun  */
__uniphier_cache_sync(struct uniphier_cache_data * data)95*4882a593Smuzhiyun static void __uniphier_cache_sync(struct uniphier_cache_data *data)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	/* This sequence need not be atomic.  Do not disable IRQ. */
98*4882a593Smuzhiyun 	writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
99*4882a593Smuzhiyun 		       data->op_base + UNIPHIER_SSCOPE);
100*4882a593Smuzhiyun 	/* need a read back to confirm */
101*4882a593Smuzhiyun 	readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun  * __uniphier_cache_maint_common - run a queue operation for a particular level
106*4882a593Smuzhiyun  *
107*4882a593Smuzhiyun  * @data: cache controller specific data
108*4882a593Smuzhiyun  * @start: start address of range operation (don't care for "all" operation)
109*4882a593Smuzhiyun  * @size: data size of range operation (don't care for "all" operation)
110*4882a593Smuzhiyun  * @operation: flags to specify the desired cache operation
111*4882a593Smuzhiyun  */
__uniphier_cache_maint_common(struct uniphier_cache_data * data,unsigned long start,unsigned long size,u32 operation)112*4882a593Smuzhiyun static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
113*4882a593Smuzhiyun 					  unsigned long start,
114*4882a593Smuzhiyun 					  unsigned long size,
115*4882a593Smuzhiyun 					  u32 operation)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	unsigned long flags;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/*
120*4882a593Smuzhiyun 	 * No spin lock is necessary here because:
121*4882a593Smuzhiyun 	 *
122*4882a593Smuzhiyun 	 * [1] This outer cache controller is able to accept maintenance
123*4882a593Smuzhiyun 	 * operations from multiple CPUs at a time in an SMP system; if a
124*4882a593Smuzhiyun 	 * maintenance operation is under way and another operation is issued,
125*4882a593Smuzhiyun 	 * the new one is stored in the queue.  The controller performs one
126*4882a593Smuzhiyun 	 * operation after another.  If the queue is full, the status register,
127*4882a593Smuzhiyun 	 * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
128*4882a593Smuzhiyun 	 * failed.  The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
129*4882a593Smuzhiyun 	 * different instances for each CPU, i.e. each CPU can track the status
130*4882a593Smuzhiyun 	 * of the maintenance operations triggered by itself.
131*4882a593Smuzhiyun 	 *
132*4882a593Smuzhiyun 	 * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
133*4882a593Smuzhiyun 	 * SSCOQWN}, are shared between multiple CPUs, but the hardware still
134*4882a593Smuzhiyun 	 * guarantees the registration sequence is atomic; the write access to
135*4882a593Smuzhiyun 	 * them are arbitrated by the hardware.  The first accessor to the
136*4882a593Smuzhiyun 	 * register, UNIPHIER_SSCOQM, holds the access right and it is released
137*4882a593Smuzhiyun 	 * by reading the status register, UNIPHIER_SSCOPPQSEF.  While one CPU
138*4882a593Smuzhiyun 	 * is holding the access right, other CPUs fail to register operations.
139*4882a593Smuzhiyun 	 * One CPU should not hold the access right for a long time, so local
140*4882a593Smuzhiyun 	 * IRQs should be disabled while the following sequence.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	local_irq_save(flags);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/* clear the complete notification flag */
145*4882a593Smuzhiyun 	writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	do {
148*4882a593Smuzhiyun 		/* set cache operation */
149*4882a593Smuzhiyun 		writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
150*4882a593Smuzhiyun 			       data->op_base + UNIPHIER_SSCOQM);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		/* set address range if needed */
153*4882a593Smuzhiyun 		if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
154*4882a593Smuzhiyun 			writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
155*4882a593Smuzhiyun 			writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
156*4882a593Smuzhiyun 		}
157*4882a593Smuzhiyun 	} while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
158*4882a593Smuzhiyun 			  (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* wait until the operation is completed */
161*4882a593Smuzhiyun 	while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
162*4882a593Smuzhiyun 		      UNIPHIER_SSCOLPQS_EF))
163*4882a593Smuzhiyun 		cpu_relax();
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	local_irq_restore(flags);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
__uniphier_cache_maint_all(struct uniphier_cache_data * data,u32 operation)168*4882a593Smuzhiyun static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
169*4882a593Smuzhiyun 				       u32 operation)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	__uniphier_cache_maint_common(data, 0, 0,
172*4882a593Smuzhiyun 				      UNIPHIER_SSCOQM_S_ALL | operation);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	__uniphier_cache_sync(data);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
__uniphier_cache_maint_range(struct uniphier_cache_data * data,unsigned long start,unsigned long end,u32 operation)177*4882a593Smuzhiyun static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
178*4882a593Smuzhiyun 					 unsigned long start, unsigned long end,
179*4882a593Smuzhiyun 					 u32 operation)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	unsigned long size;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/*
184*4882a593Smuzhiyun 	 * If the start address is not aligned,
185*4882a593Smuzhiyun 	 * perform a cache operation for the first cache-line
186*4882a593Smuzhiyun 	 */
187*4882a593Smuzhiyun 	start = start & ~(data->line_size - 1);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	size = end - start;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (unlikely(size >= (unsigned long)(-data->line_size))) {
192*4882a593Smuzhiyun 		/* this means cache operation for all range */
193*4882a593Smuzhiyun 		__uniphier_cache_maint_all(data, operation);
194*4882a593Smuzhiyun 		return;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/*
198*4882a593Smuzhiyun 	 * If the end address is not aligned,
199*4882a593Smuzhiyun 	 * perform a cache operation for the last cache-line
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	size = ALIGN(size, data->line_size);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	while (size) {
204*4882a593Smuzhiyun 		unsigned long chunk_size = min_t(unsigned long, size,
205*4882a593Smuzhiyun 						 data->range_op_max_size);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		__uniphier_cache_maint_common(data, start, chunk_size,
208*4882a593Smuzhiyun 					UNIPHIER_SSCOQM_S_RANGE | operation);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		start += chunk_size;
211*4882a593Smuzhiyun 		size -= chunk_size;
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	__uniphier_cache_sync(data);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
__uniphier_cache_enable(struct uniphier_cache_data * data,bool on)217*4882a593Smuzhiyun static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	u32 val = 0;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (on)
222*4882a593Smuzhiyun 		val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
__uniphier_cache_set_active_ways(struct uniphier_cache_data * data)227*4882a593Smuzhiyun static void __init __uniphier_cache_set_active_ways(
228*4882a593Smuzhiyun 					struct uniphier_cache_data *data)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	unsigned int cpu;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
233*4882a593Smuzhiyun 		writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
uniphier_cache_maint_range(unsigned long start,unsigned long end,u32 operation)236*4882a593Smuzhiyun static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
237*4882a593Smuzhiyun 				       u32 operation)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct uniphier_cache_data *data;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	list_for_each_entry(data, &uniphier_cache_list, list)
242*4882a593Smuzhiyun 		__uniphier_cache_maint_range(data, start, end, operation);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
uniphier_cache_maint_all(u32 operation)245*4882a593Smuzhiyun static void uniphier_cache_maint_all(u32 operation)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct uniphier_cache_data *data;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	list_for_each_entry(data, &uniphier_cache_list, list)
250*4882a593Smuzhiyun 		__uniphier_cache_maint_all(data, operation);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
uniphier_cache_inv_range(unsigned long start,unsigned long end)253*4882a593Smuzhiyun static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
uniphier_cache_clean_range(unsigned long start,unsigned long end)258*4882a593Smuzhiyun static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
uniphier_cache_flush_range(unsigned long start,unsigned long end)263*4882a593Smuzhiyun static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
uniphier_cache_inv_all(void)268*4882a593Smuzhiyun static void __init uniphier_cache_inv_all(void)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
uniphier_cache_flush_all(void)273*4882a593Smuzhiyun static void uniphier_cache_flush_all(void)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
uniphier_cache_disable(void)278*4882a593Smuzhiyun static void uniphier_cache_disable(void)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	struct uniphier_cache_data *data;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	list_for_each_entry_reverse(data, &uniphier_cache_list, list)
283*4882a593Smuzhiyun 		__uniphier_cache_enable(data, false);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	uniphier_cache_flush_all();
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
uniphier_cache_enable(void)288*4882a593Smuzhiyun static void __init uniphier_cache_enable(void)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct uniphier_cache_data *data;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	uniphier_cache_inv_all();
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	list_for_each_entry(data, &uniphier_cache_list, list) {
295*4882a593Smuzhiyun 		__uniphier_cache_enable(data, true);
296*4882a593Smuzhiyun 		__uniphier_cache_set_active_ways(data);
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
uniphier_cache_sync(void)300*4882a593Smuzhiyun static void uniphier_cache_sync(void)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct uniphier_cache_data *data;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	list_for_each_entry(data, &uniphier_cache_list, list)
305*4882a593Smuzhiyun 		__uniphier_cache_sync(data);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun static const struct of_device_id uniphier_cache_match[] __initconst = {
309*4882a593Smuzhiyun 	{ .compatible = "socionext,uniphier-system-cache" },
310*4882a593Smuzhiyun 	{ /* sentinel */ }
311*4882a593Smuzhiyun };
312*4882a593Smuzhiyun 
__uniphier_cache_init(struct device_node * np,unsigned int * cache_level)313*4882a593Smuzhiyun static int __init __uniphier_cache_init(struct device_node *np,
314*4882a593Smuzhiyun 					unsigned int *cache_level)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct uniphier_cache_data *data;
317*4882a593Smuzhiyun 	u32 level, cache_size;
318*4882a593Smuzhiyun 	struct device_node *next_np;
319*4882a593Smuzhiyun 	int ret = 0;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (!of_match_node(uniphier_cache_match, np)) {
322*4882a593Smuzhiyun 		pr_err("L%d: not compatible with uniphier cache\n",
323*4882a593Smuzhiyun 		       *cache_level);
324*4882a593Smuzhiyun 		return -EINVAL;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (of_property_read_u32(np, "cache-level", &level)) {
328*4882a593Smuzhiyun 		pr_err("L%d: cache-level is not specified\n", *cache_level);
329*4882a593Smuzhiyun 		return -EINVAL;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (level != *cache_level) {
333*4882a593Smuzhiyun 		pr_err("L%d: cache-level is unexpected value %d\n",
334*4882a593Smuzhiyun 		       *cache_level, level);
335*4882a593Smuzhiyun 		return -EINVAL;
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (!of_property_read_bool(np, "cache-unified")) {
339*4882a593Smuzhiyun 		pr_err("L%d: cache-unified is not specified\n", *cache_level);
340*4882a593Smuzhiyun 		return -EINVAL;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
344*4882a593Smuzhiyun 	if (!data)
345*4882a593Smuzhiyun 		return -ENOMEM;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
348*4882a593Smuzhiyun 	    !is_power_of_2(data->line_size)) {
349*4882a593Smuzhiyun 		pr_err("L%d: cache-line-size is unspecified or invalid\n",
350*4882a593Smuzhiyun 		       *cache_level);
351*4882a593Smuzhiyun 		ret = -EINVAL;
352*4882a593Smuzhiyun 		goto err;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
356*4882a593Smuzhiyun 	    !is_power_of_2(data->nsets)) {
357*4882a593Smuzhiyun 		pr_err("L%d: cache-sets is unspecified or invalid\n",
358*4882a593Smuzhiyun 		       *cache_level);
359*4882a593Smuzhiyun 		ret = -EINVAL;
360*4882a593Smuzhiyun 		goto err;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (of_property_read_u32(np, "cache-size", &cache_size) ||
364*4882a593Smuzhiyun 	    cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
365*4882a593Smuzhiyun 		pr_err("L%d: cache-size is unspecified or invalid\n",
366*4882a593Smuzhiyun 		       *cache_level);
367*4882a593Smuzhiyun 		ret = -EINVAL;
368*4882a593Smuzhiyun 		goto err;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
372*4882a593Smuzhiyun 				 0);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	data->ctrl_base = of_iomap(np, 0);
375*4882a593Smuzhiyun 	if (!data->ctrl_base) {
376*4882a593Smuzhiyun 		pr_err("L%d: failed to map control register\n", *cache_level);
377*4882a593Smuzhiyun 		ret = -ENOMEM;
378*4882a593Smuzhiyun 		goto err;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	data->rev_base = of_iomap(np, 1);
382*4882a593Smuzhiyun 	if (!data->rev_base) {
383*4882a593Smuzhiyun 		pr_err("L%d: failed to map revision register\n", *cache_level);
384*4882a593Smuzhiyun 		ret = -ENOMEM;
385*4882a593Smuzhiyun 		goto err;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	data->op_base = of_iomap(np, 2);
389*4882a593Smuzhiyun 	if (!data->op_base) {
390*4882a593Smuzhiyun 		pr_err("L%d: failed to map operation register\n", *cache_level);
391*4882a593Smuzhiyun 		ret = -ENOMEM;
392*4882a593Smuzhiyun 		goto err;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	data->way_ctrl_base = data->ctrl_base + 0xc00;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (*cache_level == 2) {
398*4882a593Smuzhiyun 		u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
399*4882a593Smuzhiyun 		/*
400*4882a593Smuzhiyun 		 * The size of range operation is limited to (1 << 22) or less
401*4882a593Smuzhiyun 		 * for PH-sLD8 or older SoCs.
402*4882a593Smuzhiyun 		 */
403*4882a593Smuzhiyun 		if (revision <= 0x16)
404*4882a593Smuzhiyun 			data->range_op_max_size = (u32)1 << 22;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		/*
407*4882a593Smuzhiyun 		 * Unfortunatly, the offset address of active way control base
408*4882a593Smuzhiyun 		 * varies from SoC to SoC.
409*4882a593Smuzhiyun 		 */
410*4882a593Smuzhiyun 		switch (revision) {
411*4882a593Smuzhiyun 		case 0x11:	/* sLD3 */
412*4882a593Smuzhiyun 			data->way_ctrl_base = data->ctrl_base + 0x870;
413*4882a593Smuzhiyun 			break;
414*4882a593Smuzhiyun 		case 0x12:	/* LD4 */
415*4882a593Smuzhiyun 		case 0x16:	/* sld8 */
416*4882a593Smuzhiyun 			data->way_ctrl_base = data->ctrl_base + 0x840;
417*4882a593Smuzhiyun 			break;
418*4882a593Smuzhiyun 		default:
419*4882a593Smuzhiyun 			break;
420*4882a593Smuzhiyun 		}
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	data->range_op_max_size -= data->line_size;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	INIT_LIST_HEAD(&data->list);
426*4882a593Smuzhiyun 	list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/*
429*4882a593Smuzhiyun 	 * OK, this level has been successfully initialized.  Look for the next
430*4882a593Smuzhiyun 	 * level cache.  Do not roll back even if the initialization of the
431*4882a593Smuzhiyun 	 * next level cache fails because we want to continue with available
432*4882a593Smuzhiyun 	 * cache levels.
433*4882a593Smuzhiyun 	 */
434*4882a593Smuzhiyun 	next_np = of_find_next_cache_node(np);
435*4882a593Smuzhiyun 	if (next_np) {
436*4882a593Smuzhiyun 		(*cache_level)++;
437*4882a593Smuzhiyun 		ret = __uniphier_cache_init(next_np, cache_level);
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 	of_node_put(next_np);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	return ret;
442*4882a593Smuzhiyun err:
443*4882a593Smuzhiyun 	iounmap(data->op_base);
444*4882a593Smuzhiyun 	iounmap(data->rev_base);
445*4882a593Smuzhiyun 	iounmap(data->ctrl_base);
446*4882a593Smuzhiyun 	kfree(data);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	return ret;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
uniphier_cache_init(void)451*4882a593Smuzhiyun int __init uniphier_cache_init(void)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	struct device_node *np = NULL;
454*4882a593Smuzhiyun 	unsigned int cache_level;
455*4882a593Smuzhiyun 	int ret = 0;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* look for level 2 cache */
458*4882a593Smuzhiyun 	while ((np = of_find_matching_node(np, uniphier_cache_match)))
459*4882a593Smuzhiyun 		if (!of_property_read_u32(np, "cache-level", &cache_level) &&
460*4882a593Smuzhiyun 		    cache_level == 2)
461*4882a593Smuzhiyun 			break;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (!np)
464*4882a593Smuzhiyun 		return -ENODEV;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	ret = __uniphier_cache_init(np, &cache_level);
467*4882a593Smuzhiyun 	of_node_put(np);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (ret) {
470*4882a593Smuzhiyun 		/*
471*4882a593Smuzhiyun 		 * Error out iif L2 initialization fails.  Continue with any
472*4882a593Smuzhiyun 		 * error on L3 or outer because they are optional.
473*4882a593Smuzhiyun 		 */
474*4882a593Smuzhiyun 		if (cache_level == 2) {
475*4882a593Smuzhiyun 			pr_err("failed to initialize L2 cache\n");
476*4882a593Smuzhiyun 			return ret;
477*4882a593Smuzhiyun 		}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		cache_level--;
480*4882a593Smuzhiyun 		ret = 0;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	outer_cache.inv_range = uniphier_cache_inv_range;
484*4882a593Smuzhiyun 	outer_cache.clean_range = uniphier_cache_clean_range;
485*4882a593Smuzhiyun 	outer_cache.flush_range = uniphier_cache_flush_range;
486*4882a593Smuzhiyun 	outer_cache.flush_all = uniphier_cache_flush_all;
487*4882a593Smuzhiyun 	outer_cache.disable = uniphier_cache_disable;
488*4882a593Smuzhiyun 	outer_cache.sync = uniphier_cache_sync;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	uniphier_cache_enable();
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	pr_info("enabled outer cache (cache level: %d)\n", cache_level);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	return ret;
495*4882a593Smuzhiyun }
496