xref: /OK3568_Linux_fs/kernel/arch/arm/mach-mvebu/board-v7.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Device Tree support for Armada 370 and XP platforms.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2012 Marvell
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Lior Amsalem <alior@marvell.com>
7*4882a593Smuzhiyun  * Gregory CLEMENT <gregory.clement@free-electrons.com>
8*4882a593Smuzhiyun  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This file is licensed under the terms of the GNU General Public
11*4882a593Smuzhiyun  * License version 2.  This program is licensed "as is" without any
12*4882a593Smuzhiyun  * warranty of any kind, whether express or implied.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/of_fdt.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <linux/clocksource.h>
21*4882a593Smuzhiyun #include <linux/dma-mapping.h>
22*4882a593Smuzhiyun #include <linux/memblock.h>
23*4882a593Smuzhiyun #include <linux/mbus.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/irqchip.h>
26*4882a593Smuzhiyun #include <asm/hardware/cache-l2x0.h>
27*4882a593Smuzhiyun #include <asm/mach/arch.h>
28*4882a593Smuzhiyun #include <asm/mach/map.h>
29*4882a593Smuzhiyun #include <asm/mach/time.h>
30*4882a593Smuzhiyun #include <asm/smp_scu.h>
31*4882a593Smuzhiyun #include "armada-370-xp.h"
32*4882a593Smuzhiyun #include "common.h"
33*4882a593Smuzhiyun #include "coherency.h"
34*4882a593Smuzhiyun #include "mvebu-soc-id.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void __iomem *scu_base;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Enables the SCU when available. Obviously, this is only useful on
40*4882a593Smuzhiyun  * Cortex-A based SOCs, not on PJ4B based ones.
41*4882a593Smuzhiyun  */
mvebu_scu_enable(void)42*4882a593Smuzhiyun static void __init mvebu_scu_enable(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct device_node *np =
45*4882a593Smuzhiyun 		of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
46*4882a593Smuzhiyun 	if (np) {
47*4882a593Smuzhiyun 		scu_base = of_iomap(np, 0);
48*4882a593Smuzhiyun 		scu_enable(scu_base);
49*4882a593Smuzhiyun 		of_node_put(np);
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
mvebu_get_scu_base(void)53*4882a593Smuzhiyun void __iomem *mvebu_get_scu_base(void)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	return scu_base;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * When returning from suspend, the platform goes through the
60*4882a593Smuzhiyun  * bootloader, which executes its DDR3 training code. This code has
61*4882a593Smuzhiyun  * the unfortunate idea of using the first 10 KB of each DRAM bank to
62*4882a593Smuzhiyun  * exercise the RAM and calculate the optimal timings. Therefore, this
63*4882a593Smuzhiyun  * area of RAM is overwritten, and shouldn't be used by the kernel if
64*4882a593Smuzhiyun  * suspend/resume is supported.
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifdef CONFIG_SUSPEND
68*4882a593Smuzhiyun #define MVEBU_DDR_TRAINING_AREA_SZ (10 * SZ_1K)
mvebu_scan_mem(unsigned long node,const char * uname,int depth,void * data)69*4882a593Smuzhiyun static int __init mvebu_scan_mem(unsigned long node, const char *uname,
70*4882a593Smuzhiyun 				 int depth, void *data)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
73*4882a593Smuzhiyun 	const __be32 *reg, *endp;
74*4882a593Smuzhiyun 	int l;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (type == NULL || strcmp(type, "memory"))
77*4882a593Smuzhiyun 		return 0;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
80*4882a593Smuzhiyun 	if (reg == NULL)
81*4882a593Smuzhiyun 		reg = of_get_flat_dt_prop(node, "reg", &l);
82*4882a593Smuzhiyun 	if (reg == NULL)
83*4882a593Smuzhiyun 		return 0;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	endp = reg + (l / sizeof(__be32));
86*4882a593Smuzhiyun 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
87*4882a593Smuzhiyun 		u64 base, size;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
90*4882a593Smuzhiyun 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		memblock_reserve(base, MVEBU_DDR_TRAINING_AREA_SZ);
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
mvebu_memblock_reserve(void)98*4882a593Smuzhiyun static void __init mvebu_memblock_reserve(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	of_scan_flat_dt(mvebu_scan_mem, NULL);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun #else
mvebu_memblock_reserve(void)103*4882a593Smuzhiyun static void __init mvebu_memblock_reserve(void) {}
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun 
mvebu_init_irq(void)106*4882a593Smuzhiyun static void __init mvebu_init_irq(void)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	irqchip_init();
109*4882a593Smuzhiyun 	mvebu_scu_enable();
110*4882a593Smuzhiyun 	coherency_init();
111*4882a593Smuzhiyun 	BUG_ON(mvebu_mbus_dt_init(coherency_available()));
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
i2c_quirk(void)114*4882a593Smuzhiyun static void __init i2c_quirk(void)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct device_node *np;
117*4882a593Smuzhiyun 	u32 dev, rev;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/*
120*4882a593Smuzhiyun 	 * Only revisons more recent than A0 support the offload
121*4882a593Smuzhiyun 	 * mechanism. We can exit only if we are sure that we can
122*4882a593Smuzhiyun 	 * get the SoC revision and it is more recent than A0.
123*4882a593Smuzhiyun 	 */
124*4882a593Smuzhiyun 	if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > MV78XX0_A0_REV)
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "marvell,mv78230-i2c") {
128*4882a593Smuzhiyun 		struct property *new_compat;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 		new_compat = kzalloc(sizeof(*new_compat), GFP_KERNEL);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		new_compat->name = kstrdup("compatible", GFP_KERNEL);
133*4882a593Smuzhiyun 		new_compat->length = sizeof("marvell,mv78230-a0-i2c");
134*4882a593Smuzhiyun 		new_compat->value = kstrdup("marvell,mv78230-a0-i2c",
135*4882a593Smuzhiyun 						GFP_KERNEL);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		of_update_property(np, new_compat);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
mvebu_dt_init(void)141*4882a593Smuzhiyun static void __init mvebu_dt_init(void)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	if (of_machine_is_compatible("marvell,armadaxp"))
144*4882a593Smuzhiyun 		i2c_quirk();
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
armada_370_xp_dt_fixup(void)147*4882a593Smuzhiyun static void __init armada_370_xp_dt_fixup(void)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun #ifdef CONFIG_SMP
150*4882a593Smuzhiyun 	smp_set_ops(smp_ops(armada_xp_smp_ops));
151*4882a593Smuzhiyun #endif
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun static const char * const armada_370_xp_dt_compat[] __initconst = {
155*4882a593Smuzhiyun 	"marvell,armada-370-xp",
156*4882a593Smuzhiyun 	NULL,
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
160*4882a593Smuzhiyun 	.l2c_aux_val	= 0,
161*4882a593Smuzhiyun 	.l2c_aux_mask	= ~0,
162*4882a593Smuzhiyun 	.init_machine	= mvebu_dt_init,
163*4882a593Smuzhiyun 	.init_irq       = mvebu_init_irq,
164*4882a593Smuzhiyun 	.restart	= mvebu_restart,
165*4882a593Smuzhiyun 	.reserve        = mvebu_memblock_reserve,
166*4882a593Smuzhiyun 	.dt_compat	= armada_370_xp_dt_compat,
167*4882a593Smuzhiyun 	.dt_fixup	= armada_370_xp_dt_fixup,
168*4882a593Smuzhiyun MACHINE_END
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static const char * const armada_375_dt_compat[] __initconst = {
171*4882a593Smuzhiyun 	"marvell,armada375",
172*4882a593Smuzhiyun 	NULL,
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
176*4882a593Smuzhiyun 	.l2c_aux_val	= 0,
177*4882a593Smuzhiyun 	.l2c_aux_mask	= ~0,
178*4882a593Smuzhiyun 	.init_irq       = mvebu_init_irq,
179*4882a593Smuzhiyun 	.init_machine	= mvebu_dt_init,
180*4882a593Smuzhiyun 	.restart	= mvebu_restart,
181*4882a593Smuzhiyun 	.dt_compat	= armada_375_dt_compat,
182*4882a593Smuzhiyun MACHINE_END
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun static const char * const armada_38x_dt_compat[] __initconst = {
185*4882a593Smuzhiyun 	"marvell,armada380",
186*4882a593Smuzhiyun 	"marvell,armada385",
187*4882a593Smuzhiyun 	NULL,
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
191*4882a593Smuzhiyun 	.l2c_aux_val	= 0,
192*4882a593Smuzhiyun 	.l2c_aux_mask	= ~0,
193*4882a593Smuzhiyun 	.init_irq       = mvebu_init_irq,
194*4882a593Smuzhiyun 	.restart	= mvebu_restart,
195*4882a593Smuzhiyun 	.dt_compat	= armada_38x_dt_compat,
196*4882a593Smuzhiyun MACHINE_END
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun static const char * const armada_39x_dt_compat[] __initconst = {
199*4882a593Smuzhiyun 	"marvell,armada390",
200*4882a593Smuzhiyun 	"marvell,armada398",
201*4882a593Smuzhiyun 	NULL,
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun DT_MACHINE_START(ARMADA_39X_DT, "Marvell Armada 39x (Device Tree)")
205*4882a593Smuzhiyun 	.l2c_aux_val	= 0,
206*4882a593Smuzhiyun 	.l2c_aux_mask	= ~0,
207*4882a593Smuzhiyun 	.init_irq       = mvebu_init_irq,
208*4882a593Smuzhiyun 	.restart	= mvebu_restart,
209*4882a593Smuzhiyun 	.dt_compat	= armada_39x_dt_compat,
210*4882a593Smuzhiyun MACHINE_END
211