xref: /OK3568_Linux_fs/kernel/drivers/bus/qcom-ebi2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Qualcomm External Bus Interface 2 (EBI2) driver
4*4882a593Smuzhiyun  * an older version of the Qualcomm Parallel Interface Controller (QPIC)
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2016 Linaro Ltd.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Author: Linus Walleij <linus.walleij@linaro.org>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * See the device tree bindings for this block for more details on the
11*4882a593Smuzhiyun  * hardware.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/clk.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/io.h>
18*4882a593Smuzhiyun #include <linux/of.h>
19*4882a593Smuzhiyun #include <linux/of_platform.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/bitops.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun #define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
29*4882a593Smuzhiyun #define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
30*4882a593Smuzhiyun #define EBI2_CS2_ENABLE_MASK BIT(4)
31*4882a593Smuzhiyun #define EBI2_CS3_ENABLE_MASK BIT(5)
32*4882a593Smuzhiyun #define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
33*4882a593Smuzhiyun #define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
34*4882a593Smuzhiyun #define EBI2_CSN_MASK GENMASK(9, 0)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define EBI2_XMEM_CFG 0x0000 /* Power management etc */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * SLOW CSn CFG
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
42*4882a593Smuzhiyun  *             memory continues to drive the data bus after OE is de-asserted.
43*4882a593Smuzhiyun  *             Inserted when reading one CS and switching to another CS or read
44*4882a593Smuzhiyun  *             followed by write on the same CS. Valid values 0 thru 15.
45*4882a593Smuzhiyun  * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
46*4882a593Smuzhiyun  *             every write minimum 1. The data out is driven from the time WE is
47*4882a593Smuzhiyun  *             asserted until CS is asserted. With a hold of 1, the CS stays
48*4882a593Smuzhiyun  *             active for 1 extra cycle etc. Valid values 0 thru 15.
49*4882a593Smuzhiyun  * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
50*4882a593Smuzhiyun  *             write to a page or burst memory
51*4882a593Smuzhiyun  * Bits 15-8:  RD_DELTA initial latency for read cycles inserted for the first
52*4882a593Smuzhiyun  *             read to a page or burst memory
53*4882a593Smuzhiyun  * Bits 7-4:   WR_WAIT number of wait cycles for every write access, 0=1 cycle
54*4882a593Smuzhiyun  *             so 1 thru 16 cycles.
55*4882a593Smuzhiyun  * Bits 3-0:   RD_WAIT number of wait cycles for every read access, 0=1 cycle
56*4882a593Smuzhiyun  *             so 1 thru 16 cycles.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define EBI2_XMEM_CS0_SLOW_CFG 0x0008
59*4882a593Smuzhiyun #define EBI2_XMEM_CS1_SLOW_CFG 0x000C
60*4882a593Smuzhiyun #define EBI2_XMEM_CS2_SLOW_CFG 0x0010
61*4882a593Smuzhiyun #define EBI2_XMEM_CS3_SLOW_CFG 0x0014
62*4882a593Smuzhiyun #define EBI2_XMEM_CS4_SLOW_CFG 0x0018
63*4882a593Smuzhiyun #define EBI2_XMEM_CS5_SLOW_CFG 0x001C
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define EBI2_XMEM_RECOVERY_SHIFT	28
66*4882a593Smuzhiyun #define EBI2_XMEM_WR_HOLD_SHIFT		24
67*4882a593Smuzhiyun #define EBI2_XMEM_WR_DELTA_SHIFT	16
68*4882a593Smuzhiyun #define EBI2_XMEM_RD_DELTA_SHIFT	8
69*4882a593Smuzhiyun #define EBI2_XMEM_WR_WAIT_SHIFT		4
70*4882a593Smuzhiyun #define EBI2_XMEM_RD_WAIT_SHIFT		0
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * FAST CSn CFG
74*4882a593Smuzhiyun  * Bits 31-28: ?
75*4882a593Smuzhiyun  * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
76*4882a593Smuzhiyun  *             transfer. For a single read trandfer this will be the time
77*4882a593Smuzhiyun  *             from CS assertion to OE assertion.
78*4882a593Smuzhiyun  * Bits 18-24: ?
79*4882a593Smuzhiyun  * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
80*4882a593Smuzhiyun  *             assertion, with respect to the cycle where ADV is asserted.
81*4882a593Smuzhiyun  *             2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
82*4882a593Smuzhiyun  * Bits 5:     ADDR_HOLD_ENA, The address is held for an extra cycle to meet
83*4882a593Smuzhiyun  *             hold time requirements with ADV assertion.
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  * The manual mentions "write precharge cycles" and "precharge cycles".
86*4882a593Smuzhiyun  * We have not been able to figure out which bit fields these correspond to
87*4882a593Smuzhiyun  * in the hardware, or what valid values exist. The current hypothesis is that
88*4882a593Smuzhiyun  * this is something just used on the FAST chip selects. There is also a "byte
89*4882a593Smuzhiyun  * device enable" flag somewhere for 8bit memories.
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun #define EBI2_XMEM_CS0_FAST_CFG 0x0028
92*4882a593Smuzhiyun #define EBI2_XMEM_CS1_FAST_CFG 0x002C
93*4882a593Smuzhiyun #define EBI2_XMEM_CS2_FAST_CFG 0x0030
94*4882a593Smuzhiyun #define EBI2_XMEM_CS3_FAST_CFG 0x0034
95*4882a593Smuzhiyun #define EBI2_XMEM_CS4_FAST_CFG 0x0038
96*4882a593Smuzhiyun #define EBI2_XMEM_CS5_FAST_CFG 0x003C
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define EBI2_XMEM_RD_HOLD_SHIFT		24
99*4882a593Smuzhiyun #define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT	16
100*4882a593Smuzhiyun #define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT	5
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun  * struct cs_data - struct with info on a chipselect setting
104*4882a593Smuzhiyun  * @enable_mask: mask to enable the chipselect in the EBI2 config
105*4882a593Smuzhiyun  * @slow_cfg0: offset to XMEMC slow CS config
106*4882a593Smuzhiyun  * @fast_cfg1: offset to XMEMC fast CS config
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun struct cs_data {
109*4882a593Smuzhiyun 	u32 enable_mask;
110*4882a593Smuzhiyun 	u16 slow_cfg;
111*4882a593Smuzhiyun 	u16 fast_cfg;
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun static const struct cs_data cs_info[] = {
115*4882a593Smuzhiyun 	{
116*4882a593Smuzhiyun 		/* CS0 */
117*4882a593Smuzhiyun 		.enable_mask = EBI2_CS0_ENABLE_MASK,
118*4882a593Smuzhiyun 		.slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
119*4882a593Smuzhiyun 		.fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
120*4882a593Smuzhiyun 	},
121*4882a593Smuzhiyun 	{
122*4882a593Smuzhiyun 		/* CS1 */
123*4882a593Smuzhiyun 		.enable_mask = EBI2_CS1_ENABLE_MASK,
124*4882a593Smuzhiyun 		.slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
125*4882a593Smuzhiyun 		.fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
126*4882a593Smuzhiyun 	},
127*4882a593Smuzhiyun 	{
128*4882a593Smuzhiyun 		/* CS2 */
129*4882a593Smuzhiyun 		.enable_mask = EBI2_CS2_ENABLE_MASK,
130*4882a593Smuzhiyun 		.slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
131*4882a593Smuzhiyun 		.fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
132*4882a593Smuzhiyun 	},
133*4882a593Smuzhiyun 	{
134*4882a593Smuzhiyun 		/* CS3 */
135*4882a593Smuzhiyun 		.enable_mask = EBI2_CS3_ENABLE_MASK,
136*4882a593Smuzhiyun 		.slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
137*4882a593Smuzhiyun 		.fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
138*4882a593Smuzhiyun 	},
139*4882a593Smuzhiyun 	{
140*4882a593Smuzhiyun 		/* CS4 */
141*4882a593Smuzhiyun 		.enable_mask = EBI2_CS4_ENABLE_MASK,
142*4882a593Smuzhiyun 		.slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
143*4882a593Smuzhiyun 		.fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
144*4882a593Smuzhiyun 	},
145*4882a593Smuzhiyun 	{
146*4882a593Smuzhiyun 		/* CS5 */
147*4882a593Smuzhiyun 		.enable_mask = EBI2_CS5_ENABLE_MASK,
148*4882a593Smuzhiyun 		.slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
149*4882a593Smuzhiyun 		.fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
150*4882a593Smuzhiyun 	},
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun  * struct ebi2_xmem_prop - describes an XMEM config property
155*4882a593Smuzhiyun  * @prop: the device tree binding name
156*4882a593Smuzhiyun  * @max: maximum value for the property
157*4882a593Smuzhiyun  * @slowreg: true if this property is in the SLOW CS config register
158*4882a593Smuzhiyun  * else it is assumed to be in the FAST config register
159*4882a593Smuzhiyun  * @shift: the bit field start in the SLOW or FAST register for this
160*4882a593Smuzhiyun  * property
161*4882a593Smuzhiyun  */
162*4882a593Smuzhiyun struct ebi2_xmem_prop {
163*4882a593Smuzhiyun 	const char *prop;
164*4882a593Smuzhiyun 	u32 max;
165*4882a593Smuzhiyun 	bool slowreg;
166*4882a593Smuzhiyun 	u16 shift;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun static const struct ebi2_xmem_prop xmem_props[] = {
170*4882a593Smuzhiyun 	{
171*4882a593Smuzhiyun 		.prop = "qcom,xmem-recovery-cycles",
172*4882a593Smuzhiyun 		.max = 15,
173*4882a593Smuzhiyun 		.slowreg = true,
174*4882a593Smuzhiyun 		.shift = EBI2_XMEM_RECOVERY_SHIFT,
175*4882a593Smuzhiyun 	},
176*4882a593Smuzhiyun 	{
177*4882a593Smuzhiyun 		.prop = "qcom,xmem-write-hold-cycles",
178*4882a593Smuzhiyun 		.max = 15,
179*4882a593Smuzhiyun 		.slowreg = true,
180*4882a593Smuzhiyun 		.shift = EBI2_XMEM_WR_HOLD_SHIFT,
181*4882a593Smuzhiyun 	},
182*4882a593Smuzhiyun 	{
183*4882a593Smuzhiyun 		.prop = "qcom,xmem-write-delta-cycles",
184*4882a593Smuzhiyun 		.max = 255,
185*4882a593Smuzhiyun 		.slowreg = true,
186*4882a593Smuzhiyun 		.shift = EBI2_XMEM_WR_DELTA_SHIFT,
187*4882a593Smuzhiyun 	},
188*4882a593Smuzhiyun 	{
189*4882a593Smuzhiyun 		.prop = "qcom,xmem-read-delta-cycles",
190*4882a593Smuzhiyun 		.max = 255,
191*4882a593Smuzhiyun 		.slowreg = true,
192*4882a593Smuzhiyun 		.shift = EBI2_XMEM_RD_DELTA_SHIFT,
193*4882a593Smuzhiyun 	},
194*4882a593Smuzhiyun 	{
195*4882a593Smuzhiyun 		.prop = "qcom,xmem-write-wait-cycles",
196*4882a593Smuzhiyun 		.max = 15,
197*4882a593Smuzhiyun 		.slowreg = true,
198*4882a593Smuzhiyun 		.shift = EBI2_XMEM_WR_WAIT_SHIFT,
199*4882a593Smuzhiyun 	},
200*4882a593Smuzhiyun 	{
201*4882a593Smuzhiyun 		.prop = "qcom,xmem-read-wait-cycles",
202*4882a593Smuzhiyun 		.max = 15,
203*4882a593Smuzhiyun 		.slowreg = true,
204*4882a593Smuzhiyun 		.shift = EBI2_XMEM_RD_WAIT_SHIFT,
205*4882a593Smuzhiyun 	},
206*4882a593Smuzhiyun 	{
207*4882a593Smuzhiyun 		.prop = "qcom,xmem-address-hold-enable",
208*4882a593Smuzhiyun 		.max = 1, /* boolean prop */
209*4882a593Smuzhiyun 		.slowreg = false,
210*4882a593Smuzhiyun 		.shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
211*4882a593Smuzhiyun 	},
212*4882a593Smuzhiyun 	{
213*4882a593Smuzhiyun 		.prop = "qcom,xmem-adv-to-oe-recovery-cycles",
214*4882a593Smuzhiyun 		.max = 3,
215*4882a593Smuzhiyun 		.slowreg = false,
216*4882a593Smuzhiyun 		.shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
217*4882a593Smuzhiyun 	},
218*4882a593Smuzhiyun 	{
219*4882a593Smuzhiyun 		.prop = "qcom,xmem-read-hold-cycles",
220*4882a593Smuzhiyun 		.max = 15,
221*4882a593Smuzhiyun 		.slowreg = false,
222*4882a593Smuzhiyun 		.shift = EBI2_XMEM_RD_HOLD_SHIFT,
223*4882a593Smuzhiyun 	},
224*4882a593Smuzhiyun };
225*4882a593Smuzhiyun 
qcom_ebi2_setup_chipselect(struct device_node * np,struct device * dev,void __iomem * ebi2_base,void __iomem * ebi2_xmem,u32 csindex)226*4882a593Smuzhiyun static void qcom_ebi2_setup_chipselect(struct device_node *np,
227*4882a593Smuzhiyun 				       struct device *dev,
228*4882a593Smuzhiyun 				       void __iomem *ebi2_base,
229*4882a593Smuzhiyun 				       void __iomem *ebi2_xmem,
230*4882a593Smuzhiyun 				       u32 csindex)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	const struct cs_data *csd;
233*4882a593Smuzhiyun 	u32 slowcfg, fastcfg;
234*4882a593Smuzhiyun 	u32 val;
235*4882a593Smuzhiyun 	int ret;
236*4882a593Smuzhiyun 	int i;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	csd = &cs_info[csindex];
239*4882a593Smuzhiyun 	val = readl(ebi2_base);
240*4882a593Smuzhiyun 	val |= csd->enable_mask;
241*4882a593Smuzhiyun 	writel(val, ebi2_base);
242*4882a593Smuzhiyun 	dev_dbg(dev, "enabled CS%u\n", csindex);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* Next set up the XMEMC */
245*4882a593Smuzhiyun 	slowcfg = 0;
246*4882a593Smuzhiyun 	fastcfg = 0;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
249*4882a593Smuzhiyun 		const struct ebi2_xmem_prop *xp = &xmem_props[i];
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		/* All are regular u32 values */
252*4882a593Smuzhiyun 		ret = of_property_read_u32(np, xp->prop, &val);
253*4882a593Smuzhiyun 		if (ret) {
254*4882a593Smuzhiyun 			dev_dbg(dev, "could not read %s for CS%d\n",
255*4882a593Smuzhiyun 				xp->prop, csindex);
256*4882a593Smuzhiyun 			continue;
257*4882a593Smuzhiyun 		}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		/* First check boolean props */
260*4882a593Smuzhiyun 		if (xp->max == 1 && val) {
261*4882a593Smuzhiyun 			if (xp->slowreg)
262*4882a593Smuzhiyun 				slowcfg |= BIT(xp->shift);
263*4882a593Smuzhiyun 			else
264*4882a593Smuzhiyun 				fastcfg |= BIT(xp->shift);
265*4882a593Smuzhiyun 			dev_dbg(dev, "set %s flag\n", xp->prop);
266*4882a593Smuzhiyun 			continue;
267*4882a593Smuzhiyun 		}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		/* We're dealing with an u32 */
270*4882a593Smuzhiyun 		if (val > xp->max) {
271*4882a593Smuzhiyun 			dev_err(dev,
272*4882a593Smuzhiyun 				"too high value for %s: %u, capped at %u\n",
273*4882a593Smuzhiyun 				xp->prop, val, xp->max);
274*4882a593Smuzhiyun 			val = xp->max;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 		if (xp->slowreg)
277*4882a593Smuzhiyun 			slowcfg |= (val << xp->shift);
278*4882a593Smuzhiyun 		else
279*4882a593Smuzhiyun 			fastcfg |= (val << xp->shift);
280*4882a593Smuzhiyun 		dev_dbg(dev, "set %s to %u\n", xp->prop, val);
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
284*4882a593Smuzhiyun 		 csindex, slowcfg, fastcfg);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (slowcfg)
287*4882a593Smuzhiyun 		writel(slowcfg, ebi2_xmem + csd->slow_cfg);
288*4882a593Smuzhiyun 	if (fastcfg)
289*4882a593Smuzhiyun 		writel(fastcfg, ebi2_xmem + csd->fast_cfg);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
qcom_ebi2_probe(struct platform_device * pdev)292*4882a593Smuzhiyun static int qcom_ebi2_probe(struct platform_device *pdev)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
295*4882a593Smuzhiyun 	struct device_node *child;
296*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
297*4882a593Smuzhiyun 	struct resource *res;
298*4882a593Smuzhiyun 	void __iomem *ebi2_base;
299*4882a593Smuzhiyun 	void __iomem *ebi2_xmem;
300*4882a593Smuzhiyun 	struct clk *ebi2xclk;
301*4882a593Smuzhiyun 	struct clk *ebi2clk;
302*4882a593Smuzhiyun 	bool have_children = false;
303*4882a593Smuzhiyun 	u32 val;
304*4882a593Smuzhiyun 	int ret;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	ebi2xclk = devm_clk_get(dev, "ebi2x");
307*4882a593Smuzhiyun 	if (IS_ERR(ebi2xclk))
308*4882a593Smuzhiyun 		return PTR_ERR(ebi2xclk);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	ret = clk_prepare_enable(ebi2xclk);
311*4882a593Smuzhiyun 	if (ret) {
312*4882a593Smuzhiyun 		dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
313*4882a593Smuzhiyun 		return ret;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	ebi2clk = devm_clk_get(dev, "ebi2");
317*4882a593Smuzhiyun 	if (IS_ERR(ebi2clk)) {
318*4882a593Smuzhiyun 		ret = PTR_ERR(ebi2clk);
319*4882a593Smuzhiyun 		goto err_disable_2x_clk;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	ret = clk_prepare_enable(ebi2clk);
323*4882a593Smuzhiyun 	if (ret) {
324*4882a593Smuzhiyun 		dev_err(dev, "could not enable EBI2 clk\n");
325*4882a593Smuzhiyun 		goto err_disable_2x_clk;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
329*4882a593Smuzhiyun 	ebi2_base = devm_ioremap_resource(dev, res);
330*4882a593Smuzhiyun 	if (IS_ERR(ebi2_base)) {
331*4882a593Smuzhiyun 		ret = PTR_ERR(ebi2_base);
332*4882a593Smuzhiyun 		goto err_disable_clk;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
336*4882a593Smuzhiyun 	ebi2_xmem = devm_ioremap_resource(dev, res);
337*4882a593Smuzhiyun 	if (IS_ERR(ebi2_xmem)) {
338*4882a593Smuzhiyun 		ret = PTR_ERR(ebi2_xmem);
339*4882a593Smuzhiyun 		goto err_disable_clk;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* Allegedly this turns the power save mode off */
343*4882a593Smuzhiyun 	writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* Disable all chipselects */
346*4882a593Smuzhiyun 	val = readl(ebi2_base);
347*4882a593Smuzhiyun 	val &= ~EBI2_CSN_MASK;
348*4882a593Smuzhiyun 	writel(val, ebi2_base);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Walk over the child nodes and see what chipselects we use */
351*4882a593Smuzhiyun 	for_each_available_child_of_node(np, child) {
352*4882a593Smuzhiyun 		u32 csindex;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		/* Figure out the chipselect */
355*4882a593Smuzhiyun 		ret = of_property_read_u32(child, "reg", &csindex);
356*4882a593Smuzhiyun 		if (ret) {
357*4882a593Smuzhiyun 			of_node_put(child);
358*4882a593Smuzhiyun 			return ret;
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		if (csindex > 5) {
362*4882a593Smuzhiyun 			dev_err(dev,
363*4882a593Smuzhiyun 				"invalid chipselect %u, we only support 0-5\n",
364*4882a593Smuzhiyun 				csindex);
365*4882a593Smuzhiyun 			continue;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		qcom_ebi2_setup_chipselect(child,
369*4882a593Smuzhiyun 					   dev,
370*4882a593Smuzhiyun 					   ebi2_base,
371*4882a593Smuzhiyun 					   ebi2_xmem,
372*4882a593Smuzhiyun 					   csindex);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		/* We have at least one child */
375*4882a593Smuzhiyun 		have_children = true;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (have_children)
379*4882a593Smuzhiyun 		return of_platform_default_populate(np, NULL, dev);
380*4882a593Smuzhiyun 	return 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun err_disable_clk:
383*4882a593Smuzhiyun 	clk_disable_unprepare(ebi2clk);
384*4882a593Smuzhiyun err_disable_2x_clk:
385*4882a593Smuzhiyun 	clk_disable_unprepare(ebi2xclk);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	return ret;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun static const struct of_device_id qcom_ebi2_of_match[] = {
391*4882a593Smuzhiyun 	{ .compatible = "qcom,msm8660-ebi2", },
392*4882a593Smuzhiyun 	{ .compatible = "qcom,apq8060-ebi2", },
393*4882a593Smuzhiyun 	{ }
394*4882a593Smuzhiyun };
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun static struct platform_driver qcom_ebi2_driver = {
397*4882a593Smuzhiyun 	.probe = qcom_ebi2_probe,
398*4882a593Smuzhiyun 	.driver = {
399*4882a593Smuzhiyun 		.name = "qcom-ebi2",
400*4882a593Smuzhiyun 		.of_match_table = qcom_ebi2_of_match,
401*4882a593Smuzhiyun 	},
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun module_platform_driver(qcom_ebi2_driver);
404*4882a593Smuzhiyun MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
405*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm EBI2 driver");
406*4882a593Smuzhiyun MODULE_LICENSE("GPL");
407