xref: /OK3568_Linux_fs/kernel/drivers/soc/bcm/brcmstb/biuctrl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Broadcom STB SoCs Bus Unit Interface controls
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2015, Broadcom Corporation
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt)	"brcmstb: " KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/of_address.h>
13*4882a593Smuzhiyun #include <linux/syscore_ops.h>
14*4882a593Smuzhiyun #include <linux/soc/brcmstb/brcmstb.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define RACENPREF_MASK			0x3
17*4882a593Smuzhiyun #define RACPREFINST_SHIFT		0
18*4882a593Smuzhiyun #define RACENINST_SHIFT			2
19*4882a593Smuzhiyun #define RACPREFDATA_SHIFT		4
20*4882a593Smuzhiyun #define RACENDATA_SHIFT			6
21*4882a593Smuzhiyun #define RAC_CPU_SHIFT			8
22*4882a593Smuzhiyun #define RACCFG_MASK			0xff
23*4882a593Smuzhiyun #define DPREF_LINE_2_SHIFT		24
24*4882a593Smuzhiyun #define DPREF_LINE_2_MASK		0xff
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
27*4882a593Smuzhiyun #define RAC_DATA_INST_EN_MASK		(1 << RACPREFINST_SHIFT | \
28*4882a593Smuzhiyun 					 RACENPREF_MASK << RACENINST_SHIFT | \
29*4882a593Smuzhiyun 					 1 << RACPREFDATA_SHIFT | \
30*4882a593Smuzhiyun 					 RACENPREF_MASK << RACENDATA_SHIFT)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define  CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK	0x70000000
33*4882a593Smuzhiyun #define CPU_CREDIT_REG_MCPx_READ_CRED_MASK	0xf
34*4882a593Smuzhiyun #define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK	0xf
35*4882a593Smuzhiyun #define CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(x)	((x) * 8)
36*4882a593Smuzhiyun #define CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(x)	(((x) * 8) + 4)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(x)	((x) * 8)
39*4882a593Smuzhiyun #define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK		0xff
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK	0xf
42*4882a593Smuzhiyun #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK		0xf
43*4882a593Smuzhiyun #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT	4
44*4882a593Smuzhiyun #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE		BIT(8)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static void __iomem *cpubiuctrl_base;
47*4882a593Smuzhiyun static bool mcp_wr_pairing_en;
48*4882a593Smuzhiyun static const int *cpubiuctrl_regs;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun enum cpubiuctrl_regs {
51*4882a593Smuzhiyun 	CPU_CREDIT_REG = 0,
52*4882a593Smuzhiyun 	CPU_MCP_FLOW_REG,
53*4882a593Smuzhiyun 	CPU_WRITEBACK_CTRL_REG,
54*4882a593Smuzhiyun 	RAC_CONFIG0_REG,
55*4882a593Smuzhiyun 	RAC_CONFIG1_REG,
56*4882a593Smuzhiyun 	NUM_CPU_BIUCTRL_REGS,
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
cbc_readl(int reg)59*4882a593Smuzhiyun static inline u32 cbc_readl(int reg)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	int offset = cpubiuctrl_regs[reg];
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (offset == -1 ||
64*4882a593Smuzhiyun 	    (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
65*4882a593Smuzhiyun 		return (u32)-1;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	return readl_relaxed(cpubiuctrl_base + offset);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
cbc_writel(u32 val,int reg)70*4882a593Smuzhiyun static inline void cbc_writel(u32 val, int reg)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	int offset = cpubiuctrl_regs[reg];
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (offset == -1 ||
75*4882a593Smuzhiyun 	    (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
76*4882a593Smuzhiyun 		return;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	writel(val, cpubiuctrl_base + offset);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static const int b15_cpubiuctrl_regs[] = {
82*4882a593Smuzhiyun 	[CPU_CREDIT_REG] = 0x184,
83*4882a593Smuzhiyun 	[CPU_MCP_FLOW_REG] = -1,
84*4882a593Smuzhiyun 	[CPU_WRITEBACK_CTRL_REG] = -1,
85*4882a593Smuzhiyun 	[RAC_CONFIG0_REG] = -1,
86*4882a593Smuzhiyun 	[RAC_CONFIG1_REG] = -1,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Odd cases, e.g: 7260A0 */
90*4882a593Smuzhiyun static const int b53_cpubiuctrl_no_wb_regs[] = {
91*4882a593Smuzhiyun 	[CPU_CREDIT_REG] = 0x0b0,
92*4882a593Smuzhiyun 	[CPU_MCP_FLOW_REG] = 0x0b4,
93*4882a593Smuzhiyun 	[CPU_WRITEBACK_CTRL_REG] = -1,
94*4882a593Smuzhiyun 	[RAC_CONFIG0_REG] = 0x78,
95*4882a593Smuzhiyun 	[RAC_CONFIG1_REG] = 0x7c,
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static const int b53_cpubiuctrl_regs[] = {
99*4882a593Smuzhiyun 	[CPU_CREDIT_REG] = 0x0b0,
100*4882a593Smuzhiyun 	[CPU_MCP_FLOW_REG] = 0x0b4,
101*4882a593Smuzhiyun 	[CPU_WRITEBACK_CTRL_REG] = 0x22c,
102*4882a593Smuzhiyun 	[RAC_CONFIG0_REG] = 0x78,
103*4882a593Smuzhiyun 	[RAC_CONFIG1_REG] = 0x7c,
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static const int a72_cpubiuctrl_regs[] = {
107*4882a593Smuzhiyun 	[CPU_CREDIT_REG] = 0x18,
108*4882a593Smuzhiyun 	[CPU_MCP_FLOW_REG] = 0x1c,
109*4882a593Smuzhiyun 	[CPU_WRITEBACK_CTRL_REG] = 0x20,
110*4882a593Smuzhiyun 	[RAC_CONFIG0_REG] = 0x08,
111*4882a593Smuzhiyun 	[RAC_CONFIG1_REG] = 0x0c,
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
mcp_write_pairing_set(void)114*4882a593Smuzhiyun static int __init mcp_write_pairing_set(void)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	u32 creds = 0;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (!cpubiuctrl_base)
119*4882a593Smuzhiyun 		return -1;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	creds = cbc_readl(CPU_CREDIT_REG);
122*4882a593Smuzhiyun 	if (mcp_wr_pairing_en) {
123*4882a593Smuzhiyun 		pr_info("MCP: Enabling write pairing\n");
124*4882a593Smuzhiyun 		cbc_writel(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
125*4882a593Smuzhiyun 			   CPU_CREDIT_REG);
126*4882a593Smuzhiyun 	} else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
127*4882a593Smuzhiyun 		pr_info("MCP: Disabling write pairing\n");
128*4882a593Smuzhiyun 		cbc_writel(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
129*4882a593Smuzhiyun 			   CPU_CREDIT_REG);
130*4882a593Smuzhiyun 	} else {
131*4882a593Smuzhiyun 		pr_info("MCP: Write pairing already disabled\n");
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static const u32 a72_b53_mach_compat[] = {
138*4882a593Smuzhiyun 	0x7211,
139*4882a593Smuzhiyun 	0x7216,
140*4882a593Smuzhiyun 	0x72164,
141*4882a593Smuzhiyun 	0x72165,
142*4882a593Smuzhiyun 	0x7255,
143*4882a593Smuzhiyun 	0x7260,
144*4882a593Smuzhiyun 	0x7268,
145*4882a593Smuzhiyun 	0x7271,
146*4882a593Smuzhiyun 	0x7278,
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* The read-ahead cache present in the Brahma-B53 CPU is a special piece of
150*4882a593Smuzhiyun  * hardware after the integrated L2 cache of the B53 CPU complex whose purpose
151*4882a593Smuzhiyun  * is to prefetch instruction and/or data with a line size of either 64 bytes
152*4882a593Smuzhiyun  * or 256 bytes. The rationale is that the data-bus of the CPU interface is
153*4882a593Smuzhiyun  * optimized for 256-byte transactions, and enabling the read-ahead cache
154*4882a593Smuzhiyun  * provides a significant performance boost (typically twice the performance
155*4882a593Smuzhiyun  * for a memcpy benchmark application).
156*4882a593Smuzhiyun  *
157*4882a593Smuzhiyun  * The read-ahead cache is transparent for Virtual Address cache maintenance
158*4882a593Smuzhiyun  * operations: IC IVAU, DC IVAC, DC CVAC, DC CVAU and DC CIVAC.  So no special
159*4882a593Smuzhiyun  * handling is needed for the DMA API above and beyond what is included in the
160*4882a593Smuzhiyun  * arm64 implementation.
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * In addition, since the Point of Unification is typically between L1 and L2
163*4882a593Smuzhiyun  * for the Brahma-B53 processor no special read-ahead cache handling is needed
164*4882a593Smuzhiyun  * for the IC IALLU and IC IALLUIS cache maintenance operations.
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * However, it is not possible to specify the cache level (L3) for the cache
167*4882a593Smuzhiyun  * maintenance instructions operating by set/way to operate on the read-ahead
168*4882a593Smuzhiyun  * cache.  The read-ahead cache will maintain coherency when inner cache lines
169*4882a593Smuzhiyun  * are cleaned by set/way, but if it is necessary to invalidate inner cache
170*4882a593Smuzhiyun  * lines by set/way to maintain coherency with system masters operating on
171*4882a593Smuzhiyun  * shared memory that does not have hardware support for coherency, then it
172*4882a593Smuzhiyun  * will also be necessary to explicitly invalidate the read-ahead cache.
173*4882a593Smuzhiyun  */
a72_b53_rac_enable_all(struct device_node * np)174*4882a593Smuzhiyun static void __init a72_b53_rac_enable_all(struct device_node *np)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	unsigned int cpu;
177*4882a593Smuzhiyun 	u32 enable = 0, pref_dist, shift;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_CACHE_B15_RAC))
180*4882a593Smuzhiyun 		return;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
183*4882a593Smuzhiyun 		return;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	pref_dist = cbc_readl(RAC_CONFIG1_REG);
186*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
187*4882a593Smuzhiyun 		shift = cpu * RAC_CPU_SHIFT + RACPREFDATA_SHIFT;
188*4882a593Smuzhiyun 		enable |= RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT);
189*4882a593Smuzhiyun 		if (cpubiuctrl_regs == a72_cpubiuctrl_regs) {
190*4882a593Smuzhiyun 			enable &= ~(RACENPREF_MASK << shift);
191*4882a593Smuzhiyun 			enable |= 3 << shift;
192*4882a593Smuzhiyun 			pref_dist |= 1 << (cpu + DPREF_LINE_2_SHIFT);
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	cbc_writel(enable, RAC_CONFIG0_REG);
197*4882a593Smuzhiyun 	cbc_writel(pref_dist, RAC_CONFIG1_REG);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	pr_info("%pOF: Broadcom %s read-ahead cache\n",
200*4882a593Smuzhiyun 		np, cpubiuctrl_regs == a72_cpubiuctrl_regs ?
201*4882a593Smuzhiyun 		"Cortex-A72" : "Brahma-B53");
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
mcp_a72_b53_set(void)204*4882a593Smuzhiyun static void __init mcp_a72_b53_set(void)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	unsigned int i;
207*4882a593Smuzhiyun 	u32 reg;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	reg = brcmstb_get_family_id();
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
212*4882a593Smuzhiyun 		if (BRCM_ID(reg) == a72_b53_mach_compat[i])
213*4882a593Smuzhiyun 			break;
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (i == ARRAY_SIZE(a72_b53_mach_compat))
217*4882a593Smuzhiyun 		return;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Set all 3 MCP interfaces to 8 credits */
220*4882a593Smuzhiyun 	reg = cbc_readl(CPU_CREDIT_REG);
221*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
222*4882a593Smuzhiyun 		reg &= ~(CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK <<
223*4882a593Smuzhiyun 			 CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i));
224*4882a593Smuzhiyun 		reg &= ~(CPU_CREDIT_REG_MCPx_READ_CRED_MASK <<
225*4882a593Smuzhiyun 			 CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i));
226*4882a593Smuzhiyun 		reg |= 8 << CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i);
227*4882a593Smuzhiyun 		reg |= 8 << CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	cbc_writel(reg, CPU_CREDIT_REG);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/* Max out the number of in-flight Jwords reads on the MCP interface */
232*4882a593Smuzhiyun 	reg = cbc_readl(CPU_MCP_FLOW_REG);
233*4882a593Smuzhiyun 	for (i = 0; i < 3; i++)
234*4882a593Smuzhiyun 		reg |= CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK <<
235*4882a593Smuzhiyun 			CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(i);
236*4882a593Smuzhiyun 	cbc_writel(reg, CPU_MCP_FLOW_REG);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* Enable writeback throttling, set timeout to 128 cycles, 256 cycles
239*4882a593Smuzhiyun 	 * threshold
240*4882a593Smuzhiyun 	 */
241*4882a593Smuzhiyun 	reg = cbc_readl(CPU_WRITEBACK_CTRL_REG);
242*4882a593Smuzhiyun 	reg |= CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE;
243*4882a593Smuzhiyun 	reg &= ~CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK;
244*4882a593Smuzhiyun 	reg &= ~(CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK <<
245*4882a593Smuzhiyun 		 CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT);
246*4882a593Smuzhiyun 	reg |= 8;
247*4882a593Smuzhiyun 	reg |= 7 << CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT;
248*4882a593Smuzhiyun 	cbc_writel(reg, CPU_WRITEBACK_CTRL_REG);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
setup_hifcpubiuctrl_regs(struct device_node * np)251*4882a593Smuzhiyun static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct device_node *cpu_dn;
254*4882a593Smuzhiyun 	u32 family_id;
255*4882a593Smuzhiyun 	int ret = 0;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	cpubiuctrl_base = of_iomap(np, 0);
258*4882a593Smuzhiyun 	if (!cpubiuctrl_base) {
259*4882a593Smuzhiyun 		pr_err("failed to remap BIU control base\n");
260*4882a593Smuzhiyun 		ret = -ENOMEM;
261*4882a593Smuzhiyun 		goto out;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	cpu_dn = of_get_cpu_node(0, NULL);
267*4882a593Smuzhiyun 	if (!cpu_dn) {
268*4882a593Smuzhiyun 		pr_err("failed to obtain CPU device node\n");
269*4882a593Smuzhiyun 		ret = -ENODEV;
270*4882a593Smuzhiyun 		goto out;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
274*4882a593Smuzhiyun 		cpubiuctrl_regs = b15_cpubiuctrl_regs;
275*4882a593Smuzhiyun 	else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
276*4882a593Smuzhiyun 		cpubiuctrl_regs = b53_cpubiuctrl_regs;
277*4882a593Smuzhiyun 	else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
278*4882a593Smuzhiyun 		cpubiuctrl_regs = a72_cpubiuctrl_regs;
279*4882a593Smuzhiyun 	else {
280*4882a593Smuzhiyun 		pr_err("unsupported CPU\n");
281*4882a593Smuzhiyun 		ret = -EINVAL;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 	of_node_put(cpu_dn);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	family_id = brcmstb_get_family_id();
286*4882a593Smuzhiyun 	if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
287*4882a593Smuzhiyun 		cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
288*4882a593Smuzhiyun out:
289*4882a593Smuzhiyun 	of_node_put(np);
290*4882a593Smuzhiyun 	return ret;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
294*4882a593Smuzhiyun static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
295*4882a593Smuzhiyun 
brcmstb_cpu_credit_reg_suspend(void)296*4882a593Smuzhiyun static int brcmstb_cpu_credit_reg_suspend(void)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	unsigned int i;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (!cpubiuctrl_base)
301*4882a593Smuzhiyun 		return 0;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
304*4882a593Smuzhiyun 		cpubiuctrl_reg_save[i] = cbc_readl(i);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
brcmstb_cpu_credit_reg_resume(void)309*4882a593Smuzhiyun static void brcmstb_cpu_credit_reg_resume(void)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	unsigned int i;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (!cpubiuctrl_base)
314*4882a593Smuzhiyun 		return;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
317*4882a593Smuzhiyun 		cbc_writel(cpubiuctrl_reg_save[i], i);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
321*4882a593Smuzhiyun 	.suspend = brcmstb_cpu_credit_reg_suspend,
322*4882a593Smuzhiyun 	.resume = brcmstb_cpu_credit_reg_resume,
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun #endif
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 
brcmstb_biuctrl_init(void)327*4882a593Smuzhiyun static int __init brcmstb_biuctrl_init(void)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct device_node *np;
330*4882a593Smuzhiyun 	int ret;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* We might be running on a multi-platform kernel, don't make this a
333*4882a593Smuzhiyun 	 * fatal error, just bail out early
334*4882a593Smuzhiyun 	 */
335*4882a593Smuzhiyun 	np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
336*4882a593Smuzhiyun 	if (!np)
337*4882a593Smuzhiyun 		return 0;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	ret = setup_hifcpubiuctrl_regs(np);
340*4882a593Smuzhiyun 	if (ret)
341*4882a593Smuzhiyun 		return ret;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	ret = mcp_write_pairing_set();
344*4882a593Smuzhiyun 	if (ret) {
345*4882a593Smuzhiyun 		pr_err("MCP: Unable to disable write pairing!\n");
346*4882a593Smuzhiyun 		return ret;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	a72_b53_rac_enable_all(np);
350*4882a593Smuzhiyun 	mcp_a72_b53_set();
351*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
352*4882a593Smuzhiyun 	register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
353*4882a593Smuzhiyun #endif
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun early_initcall(brcmstb_biuctrl_init);
357