xref: /OK3568_Linux_fs/kernel/arch/arm/mach-hisi/platmcpm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2013-2014 Linaro Ltd.
4*4882a593Smuzhiyun  * Copyright (c) 2013-2014 Hisilicon Limited.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/init.h>
7*4882a593Smuzhiyun #include <linux/smp.h>
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun #include <linux/memblock.h>
11*4882a593Smuzhiyun #include <linux/of_address.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/cputype.h>
14*4882a593Smuzhiyun #include <asm/cp15.h>
15*4882a593Smuzhiyun #include <asm/cacheflush.h>
16*4882a593Smuzhiyun #include <asm/smp.h>
17*4882a593Smuzhiyun #include <asm/smp_plat.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "core.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x]
22*4882a593Smuzhiyun  * 1 -- unreset; 0 -- reset
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun #define CORE_RESET_BIT(x)		(1 << x)
25*4882a593Smuzhiyun #define NEON_RESET_BIT(x)		(1 << (x + 4))
26*4882a593Smuzhiyun #define CORE_DEBUG_RESET_BIT(x)		(1 << (x + 9))
27*4882a593Smuzhiyun #define CLUSTER_L2_RESET_BIT		(1 << 8)
28*4882a593Smuzhiyun #define CLUSTER_DEBUG_RESET_BIT		(1 << 13)
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * bits definition in SC_CPU_RESET_STATUS[x]
32*4882a593Smuzhiyun  * 1 -- reset status; 0 -- unreset status
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define CORE_RESET_STATUS(x)		(1 << x)
35*4882a593Smuzhiyun #define NEON_RESET_STATUS(x)		(1 << (x + 4))
36*4882a593Smuzhiyun #define CORE_DEBUG_RESET_STATUS(x)	(1 << (x + 9))
37*4882a593Smuzhiyun #define CLUSTER_L2_RESET_STATUS		(1 << 8)
38*4882a593Smuzhiyun #define CLUSTER_DEBUG_RESET_STATUS	(1 << 13)
39*4882a593Smuzhiyun #define CORE_WFI_STATUS(x)		(1 << (x + 16))
40*4882a593Smuzhiyun #define CORE_WFE_STATUS(x)		(1 << (x + 20))
41*4882a593Smuzhiyun #define CORE_DEBUG_ACK(x)		(1 << (x + 24))
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define SC_CPU_RESET_REQ(x)		(0x520 + (x << 3))	/* reset */
44*4882a593Smuzhiyun #define SC_CPU_RESET_DREQ(x)		(0x524 + (x << 3))	/* unreset */
45*4882a593Smuzhiyun #define SC_CPU_RESET_STATUS(x)		(0x1520 + (x << 3))
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define FAB_SF_MODE			0x0c
48*4882a593Smuzhiyun #define FAB_SF_INVLD			0x10
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* bits definition in FB_SF_INVLD */
51*4882a593Smuzhiyun #define FB_SF_INVLD_START		(1 << 8)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define HIP04_MAX_CLUSTERS		4
54*4882a593Smuzhiyun #define HIP04_MAX_CPUS_PER_CLUSTER	4
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define POLL_MSEC	10
57*4882a593Smuzhiyun #define TIMEOUT_MSEC	1000
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static void __iomem *sysctrl, *fabric;
60*4882a593Smuzhiyun static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
61*4882a593Smuzhiyun static DEFINE_SPINLOCK(boot_lock);
62*4882a593Smuzhiyun static u32 fabric_phys_addr;
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * [0]: bootwrapper physical address
65*4882a593Smuzhiyun  * [1]: bootwrapper size
66*4882a593Smuzhiyun  * [2]: relocation address
67*4882a593Smuzhiyun  * [3]: relocation size
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun static u32 hip04_boot_method[4];
70*4882a593Smuzhiyun 
hip04_cluster_is_down(unsigned int cluster)71*4882a593Smuzhiyun static bool hip04_cluster_is_down(unsigned int cluster)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	int i;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++)
76*4882a593Smuzhiyun 		if (hip04_cpu_table[cluster][i])
77*4882a593Smuzhiyun 			return false;
78*4882a593Smuzhiyun 	return true;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
hip04_set_snoop_filter(unsigned int cluster,unsigned int on)81*4882a593Smuzhiyun static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	unsigned long data;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!fabric)
86*4882a593Smuzhiyun 		BUG();
87*4882a593Smuzhiyun 	data = readl_relaxed(fabric + FAB_SF_MODE);
88*4882a593Smuzhiyun 	if (on)
89*4882a593Smuzhiyun 		data |= 1 << cluster;
90*4882a593Smuzhiyun 	else
91*4882a593Smuzhiyun 		data &= ~(1 << cluster);
92*4882a593Smuzhiyun 	writel_relaxed(data, fabric + FAB_SF_MODE);
93*4882a593Smuzhiyun 	do {
94*4882a593Smuzhiyun 		cpu_relax();
95*4882a593Smuzhiyun 	} while (data != readl_relaxed(fabric + FAB_SF_MODE));
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
hip04_boot_secondary(unsigned int l_cpu,struct task_struct * idle)98*4882a593Smuzhiyun static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	unsigned int mpidr, cpu, cluster;
101*4882a593Smuzhiyun 	unsigned long data;
102*4882a593Smuzhiyun 	void __iomem *sys_dreq, *sys_status;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	mpidr = cpu_logical_map(l_cpu);
105*4882a593Smuzhiyun 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
106*4882a593Smuzhiyun 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (!sysctrl)
109*4882a593Smuzhiyun 		return -ENODEV;
110*4882a593Smuzhiyun 	if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
111*4882a593Smuzhiyun 		return -EINVAL;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	spin_lock_irq(&boot_lock);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (hip04_cpu_table[cluster][cpu])
116*4882a593Smuzhiyun 		goto out;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster);
119*4882a593Smuzhiyun 	sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster);
120*4882a593Smuzhiyun 	if (hip04_cluster_is_down(cluster)) {
121*4882a593Smuzhiyun 		data = CLUSTER_DEBUG_RESET_BIT;
122*4882a593Smuzhiyun 		writel_relaxed(data, sys_dreq);
123*4882a593Smuzhiyun 		do {
124*4882a593Smuzhiyun 			cpu_relax();
125*4882a593Smuzhiyun 			data = readl_relaxed(sys_status);
126*4882a593Smuzhiyun 		} while (data & CLUSTER_DEBUG_RESET_STATUS);
127*4882a593Smuzhiyun 		hip04_set_snoop_filter(cluster, 1);
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
131*4882a593Smuzhiyun 	       CORE_DEBUG_RESET_BIT(cpu);
132*4882a593Smuzhiyun 	writel_relaxed(data, sys_dreq);
133*4882a593Smuzhiyun 	do {
134*4882a593Smuzhiyun 		cpu_relax();
135*4882a593Smuzhiyun 	} while (data == readl_relaxed(sys_status));
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/*
138*4882a593Smuzhiyun 	 * We may fail to power up core again without this delay.
139*4882a593Smuzhiyun 	 * It's not mentioned in document. It's found by test.
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	udelay(20);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	arch_send_wakeup_ipi_mask(cpumask_of(l_cpu));
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun out:
146*4882a593Smuzhiyun 	hip04_cpu_table[cluster][cpu]++;
147*4882a593Smuzhiyun 	spin_unlock_irq(&boot_lock);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
hip04_cpu_die(unsigned int l_cpu)153*4882a593Smuzhiyun static void hip04_cpu_die(unsigned int l_cpu)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	unsigned int mpidr, cpu, cluster;
156*4882a593Smuzhiyun 	bool last_man;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	mpidr = cpu_logical_map(l_cpu);
159*4882a593Smuzhiyun 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
160*4882a593Smuzhiyun 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	spin_lock(&boot_lock);
163*4882a593Smuzhiyun 	hip04_cpu_table[cluster][cpu]--;
164*4882a593Smuzhiyun 	if (hip04_cpu_table[cluster][cpu] == 1) {
165*4882a593Smuzhiyun 		/* A power_up request went ahead of us. */
166*4882a593Smuzhiyun 		spin_unlock(&boot_lock);
167*4882a593Smuzhiyun 		return;
168*4882a593Smuzhiyun 	} else if (hip04_cpu_table[cluster][cpu] > 1) {
169*4882a593Smuzhiyun 		pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
170*4882a593Smuzhiyun 		BUG();
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	last_man = hip04_cluster_is_down(cluster);
174*4882a593Smuzhiyun 	spin_unlock(&boot_lock);
175*4882a593Smuzhiyun 	if (last_man) {
176*4882a593Smuzhiyun 		/* Since it's Cortex A15, disable L2 prefetching. */
177*4882a593Smuzhiyun 		asm volatile(
178*4882a593Smuzhiyun 		"mcr	p15, 1, %0, c15, c0, 3 \n\t"
179*4882a593Smuzhiyun 		"isb	\n\t"
180*4882a593Smuzhiyun 		"dsb	"
181*4882a593Smuzhiyun 		: : "r" (0x400) );
182*4882a593Smuzhiyun 		v7_exit_coherency_flush(all);
183*4882a593Smuzhiyun 	} else {
184*4882a593Smuzhiyun 		v7_exit_coherency_flush(louis);
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	for (;;)
188*4882a593Smuzhiyun 		wfi();
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
hip04_cpu_kill(unsigned int l_cpu)191*4882a593Smuzhiyun static int hip04_cpu_kill(unsigned int l_cpu)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	unsigned int mpidr, cpu, cluster;
194*4882a593Smuzhiyun 	unsigned int data, tries, count;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	mpidr = cpu_logical_map(l_cpu);
197*4882a593Smuzhiyun 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
198*4882a593Smuzhiyun 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
199*4882a593Smuzhiyun 	BUG_ON(cluster >= HIP04_MAX_CLUSTERS ||
200*4882a593Smuzhiyun 	       cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	count = TIMEOUT_MSEC / POLL_MSEC;
203*4882a593Smuzhiyun 	spin_lock_irq(&boot_lock);
204*4882a593Smuzhiyun 	for (tries = 0; tries < count; tries++) {
205*4882a593Smuzhiyun 		if (hip04_cpu_table[cluster][cpu])
206*4882a593Smuzhiyun 			goto err;
207*4882a593Smuzhiyun 		cpu_relax();
208*4882a593Smuzhiyun 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
209*4882a593Smuzhiyun 		if (data & CORE_WFI_STATUS(cpu))
210*4882a593Smuzhiyun 			break;
211*4882a593Smuzhiyun 		spin_unlock_irq(&boot_lock);
212*4882a593Smuzhiyun 		/* Wait for clean L2 when the whole cluster is down. */
213*4882a593Smuzhiyun 		msleep(POLL_MSEC);
214*4882a593Smuzhiyun 		spin_lock_irq(&boot_lock);
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	if (tries >= count)
217*4882a593Smuzhiyun 		goto err;
218*4882a593Smuzhiyun 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
219*4882a593Smuzhiyun 	       CORE_DEBUG_RESET_BIT(cpu);
220*4882a593Smuzhiyun 	writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster));
221*4882a593Smuzhiyun 	for (tries = 0; tries < count; tries++) {
222*4882a593Smuzhiyun 		cpu_relax();
223*4882a593Smuzhiyun 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
224*4882a593Smuzhiyun 		if (data & CORE_RESET_STATUS(cpu))
225*4882a593Smuzhiyun 			break;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 	if (tries >= count)
228*4882a593Smuzhiyun 		goto err;
229*4882a593Smuzhiyun 	if (hip04_cluster_is_down(cluster))
230*4882a593Smuzhiyun 		hip04_set_snoop_filter(cluster, 0);
231*4882a593Smuzhiyun 	spin_unlock_irq(&boot_lock);
232*4882a593Smuzhiyun 	return 1;
233*4882a593Smuzhiyun err:
234*4882a593Smuzhiyun 	spin_unlock_irq(&boot_lock);
235*4882a593Smuzhiyun 	return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun #endif
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun static const struct smp_operations hip04_smp_ops __initconst = {
240*4882a593Smuzhiyun 	.smp_boot_secondary	= hip04_boot_secondary,
241*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
242*4882a593Smuzhiyun 	.cpu_die		= hip04_cpu_die,
243*4882a593Smuzhiyun 	.cpu_kill		= hip04_cpu_kill,
244*4882a593Smuzhiyun #endif
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun 
hip04_cpu_table_init(void)247*4882a593Smuzhiyun static bool __init hip04_cpu_table_init(void)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	unsigned int mpidr, cpu, cluster;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	mpidr = read_cpuid_mpidr();
252*4882a593Smuzhiyun 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
253*4882a593Smuzhiyun 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (cluster >= HIP04_MAX_CLUSTERS ||
256*4882a593Smuzhiyun 	    cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
257*4882a593Smuzhiyun 		pr_err("%s: boot CPU is out of bound!\n", __func__);
258*4882a593Smuzhiyun 		return false;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	hip04_set_snoop_filter(cluster, 1);
261*4882a593Smuzhiyun 	hip04_cpu_table[cluster][cpu] = 1;
262*4882a593Smuzhiyun 	return true;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
hip04_smp_init(void)265*4882a593Smuzhiyun static int __init hip04_smp_init(void)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct device_node *np, *np_sctl, *np_fab;
268*4882a593Smuzhiyun 	struct resource fab_res;
269*4882a593Smuzhiyun 	void __iomem *relocation;
270*4882a593Smuzhiyun 	int ret = -ENODEV;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper");
273*4882a593Smuzhiyun 	if (!np)
274*4882a593Smuzhiyun 		goto err;
275*4882a593Smuzhiyun 	ret = of_property_read_u32_array(np, "boot-method",
276*4882a593Smuzhiyun 					 &hip04_boot_method[0], 4);
277*4882a593Smuzhiyun 	if (ret)
278*4882a593Smuzhiyun 		goto err;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	ret = -ENODEV;
281*4882a593Smuzhiyun 	np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
282*4882a593Smuzhiyun 	if (!np_sctl)
283*4882a593Smuzhiyun 		goto err;
284*4882a593Smuzhiyun 	np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric");
285*4882a593Smuzhiyun 	if (!np_fab)
286*4882a593Smuzhiyun 		goto err;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]);
289*4882a593Smuzhiyun 	if (ret)
290*4882a593Smuzhiyun 		goto err;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]);
293*4882a593Smuzhiyun 	if (!relocation) {
294*4882a593Smuzhiyun 		pr_err("failed to map relocation space\n");
295*4882a593Smuzhiyun 		ret = -ENOMEM;
296*4882a593Smuzhiyun 		goto err_reloc;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 	sysctrl = of_iomap(np_sctl, 0);
299*4882a593Smuzhiyun 	if (!sysctrl) {
300*4882a593Smuzhiyun 		pr_err("failed to get sysctrl base\n");
301*4882a593Smuzhiyun 		ret = -ENOMEM;
302*4882a593Smuzhiyun 		goto err_sysctrl;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	ret = of_address_to_resource(np_fab, 0, &fab_res);
305*4882a593Smuzhiyun 	if (ret) {
306*4882a593Smuzhiyun 		pr_err("failed to get fabric base phys\n");
307*4882a593Smuzhiyun 		goto err_fabric;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 	fabric_phys_addr = fab_res.start;
310*4882a593Smuzhiyun 	sync_cache_w(&fabric_phys_addr);
311*4882a593Smuzhiyun 	fabric = of_iomap(np_fab, 0);
312*4882a593Smuzhiyun 	if (!fabric) {
313*4882a593Smuzhiyun 		pr_err("failed to get fabric base\n");
314*4882a593Smuzhiyun 		ret = -ENOMEM;
315*4882a593Smuzhiyun 		goto err_fabric;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (!hip04_cpu_table_init()) {
319*4882a593Smuzhiyun 		ret = -EINVAL;
320*4882a593Smuzhiyun 		goto err_table;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/*
324*4882a593Smuzhiyun 	 * Fill the instruction address that is used after secondary core
325*4882a593Smuzhiyun 	 * out of reset.
326*4882a593Smuzhiyun 	 */
327*4882a593Smuzhiyun 	writel_relaxed(hip04_boot_method[0], relocation);
328*4882a593Smuzhiyun 	writel_relaxed(0xa5a5a5a5, relocation + 4);	/* magic number */
329*4882a593Smuzhiyun 	writel_relaxed(__pa_symbol(secondary_startup), relocation + 8);
330*4882a593Smuzhiyun 	writel_relaxed(0, relocation + 12);
331*4882a593Smuzhiyun 	iounmap(relocation);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	smp_set_ops(&hip04_smp_ops);
334*4882a593Smuzhiyun 	return ret;
335*4882a593Smuzhiyun err_table:
336*4882a593Smuzhiyun 	iounmap(fabric);
337*4882a593Smuzhiyun err_fabric:
338*4882a593Smuzhiyun 	iounmap(sysctrl);
339*4882a593Smuzhiyun err_sysctrl:
340*4882a593Smuzhiyun 	iounmap(relocation);
341*4882a593Smuzhiyun err_reloc:
342*4882a593Smuzhiyun 	memblock_free(hip04_boot_method[0], hip04_boot_method[1]);
343*4882a593Smuzhiyun err:
344*4882a593Smuzhiyun 	return ret;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun early_initcall(hip04_smp_init);
347