xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/drmem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Dynamic reconfiguration memory support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2017 IBM Corporation
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt) "drmem: " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/of_fdt.h>
13*4882a593Smuzhiyun #include <linux/memblock.h>
14*4882a593Smuzhiyun #include <asm/prom.h>
15*4882a593Smuzhiyun #include <asm/drmem.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun static int n_root_addr_cells, n_root_size_cells;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static struct drmem_lmb_info __drmem_info;
20*4882a593Smuzhiyun struct drmem_lmb_info *drmem_info = &__drmem_info;
21*4882a593Smuzhiyun 
drmem_lmb_memory_max(void)22*4882a593Smuzhiyun u64 drmem_lmb_memory_max(void)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	struct drmem_lmb *last_lmb;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
27*4882a593Smuzhiyun 	return last_lmb->base_addr + drmem_lmb_size();
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
drmem_lmb_flags(struct drmem_lmb * lmb)30*4882a593Smuzhiyun static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	/*
33*4882a593Smuzhiyun 	 * Return the value of the lmb flags field minus the reserved
34*4882a593Smuzhiyun 	 * bit used internally for hotplug processing.
35*4882a593Smuzhiyun 	 */
36*4882a593Smuzhiyun 	return lmb->flags & ~DRMEM_LMB_RESERVED;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
clone_property(struct property * prop,u32 prop_sz)39*4882a593Smuzhiyun static struct property *clone_property(struct property *prop, u32 prop_sz)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct property *new_prop;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
44*4882a593Smuzhiyun 	if (!new_prop)
45*4882a593Smuzhiyun 		return NULL;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
48*4882a593Smuzhiyun 	new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
49*4882a593Smuzhiyun 	if (!new_prop->name || !new_prop->value) {
50*4882a593Smuzhiyun 		kfree(new_prop->name);
51*4882a593Smuzhiyun 		kfree(new_prop->value);
52*4882a593Smuzhiyun 		kfree(new_prop);
53*4882a593Smuzhiyun 		return NULL;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	new_prop->length = prop_sz;
57*4882a593Smuzhiyun #if defined(CONFIG_OF_DYNAMIC)
58*4882a593Smuzhiyun 	of_property_set_flag(new_prop, OF_DYNAMIC);
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun 	return new_prop;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
drmem_update_dt_v1(struct device_node * memory,struct property * prop)63*4882a593Smuzhiyun static int drmem_update_dt_v1(struct device_node *memory,
64*4882a593Smuzhiyun 			      struct property *prop)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct property *new_prop;
67*4882a593Smuzhiyun 	struct of_drconf_cell_v1 *dr_cell;
68*4882a593Smuzhiyun 	struct drmem_lmb *lmb;
69*4882a593Smuzhiyun 	u32 *p;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	new_prop = clone_property(prop, prop->length);
72*4882a593Smuzhiyun 	if (!new_prop)
73*4882a593Smuzhiyun 		return -1;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	p = new_prop->value;
76*4882a593Smuzhiyun 	*p++ = cpu_to_be32(drmem_info->n_lmbs);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	dr_cell = (struct of_drconf_cell_v1 *)p;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	for_each_drmem_lmb(lmb) {
81*4882a593Smuzhiyun 		dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
82*4882a593Smuzhiyun 		dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
83*4882a593Smuzhiyun 		dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
84*4882a593Smuzhiyun 		dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		dr_cell++;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	of_update_property(memory, new_prop);
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
init_drconf_v2_cell(struct of_drconf_cell_v2 * dr_cell,struct drmem_lmb * lmb)93*4882a593Smuzhiyun static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
94*4882a593Smuzhiyun 				struct drmem_lmb *lmb)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
97*4882a593Smuzhiyun 	dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
98*4882a593Smuzhiyun 	dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
99*4882a593Smuzhiyun 	dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
drmem_update_dt_v2(struct device_node * memory,struct property * prop)102*4882a593Smuzhiyun static int drmem_update_dt_v2(struct device_node *memory,
103*4882a593Smuzhiyun 			      struct property *prop)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct property *new_prop;
106*4882a593Smuzhiyun 	struct of_drconf_cell_v2 *dr_cell;
107*4882a593Smuzhiyun 	struct drmem_lmb *lmb, *prev_lmb;
108*4882a593Smuzhiyun 	u32 lmb_sets, prop_sz, seq_lmbs;
109*4882a593Smuzhiyun 	u32 *p;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* First pass, determine how many LMB sets are needed. */
112*4882a593Smuzhiyun 	lmb_sets = 0;
113*4882a593Smuzhiyun 	prev_lmb = NULL;
114*4882a593Smuzhiyun 	for_each_drmem_lmb(lmb) {
115*4882a593Smuzhiyun 		if (!prev_lmb) {
116*4882a593Smuzhiyun 			prev_lmb = lmb;
117*4882a593Smuzhiyun 			lmb_sets++;
118*4882a593Smuzhiyun 			continue;
119*4882a593Smuzhiyun 		}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		if (prev_lmb->aa_index != lmb->aa_index ||
122*4882a593Smuzhiyun 		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
123*4882a593Smuzhiyun 			lmb_sets++;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		prev_lmb = lmb;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
129*4882a593Smuzhiyun 	new_prop = clone_property(prop, prop_sz);
130*4882a593Smuzhiyun 	if (!new_prop)
131*4882a593Smuzhiyun 		return -1;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	p = new_prop->value;
134*4882a593Smuzhiyun 	*p++ = cpu_to_be32(lmb_sets);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	dr_cell = (struct of_drconf_cell_v2 *)p;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* Second pass, populate the LMB set data */
139*4882a593Smuzhiyun 	prev_lmb = NULL;
140*4882a593Smuzhiyun 	seq_lmbs = 0;
141*4882a593Smuzhiyun 	for_each_drmem_lmb(lmb) {
142*4882a593Smuzhiyun 		if (prev_lmb == NULL) {
143*4882a593Smuzhiyun 			/* Start of first LMB set */
144*4882a593Smuzhiyun 			prev_lmb = lmb;
145*4882a593Smuzhiyun 			init_drconf_v2_cell(dr_cell, lmb);
146*4882a593Smuzhiyun 			seq_lmbs++;
147*4882a593Smuzhiyun 			continue;
148*4882a593Smuzhiyun 		}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		if (prev_lmb->aa_index != lmb->aa_index ||
151*4882a593Smuzhiyun 		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
152*4882a593Smuzhiyun 			/* end of one set, start of another */
153*4882a593Smuzhiyun 			dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
154*4882a593Smuzhiyun 			dr_cell++;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 			init_drconf_v2_cell(dr_cell, lmb);
157*4882a593Smuzhiyun 			seq_lmbs = 1;
158*4882a593Smuzhiyun 		} else {
159*4882a593Smuzhiyun 			seq_lmbs++;
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		prev_lmb = lmb;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* close out last LMB set */
166*4882a593Smuzhiyun 	dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
167*4882a593Smuzhiyun 	of_update_property(memory, new_prop);
168*4882a593Smuzhiyun 	return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
drmem_update_dt(void)171*4882a593Smuzhiyun int drmem_update_dt(void)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct device_node *memory;
174*4882a593Smuzhiyun 	struct property *prop;
175*4882a593Smuzhiyun 	int rc = -1;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
178*4882a593Smuzhiyun 	if (!memory)
179*4882a593Smuzhiyun 		return -1;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
182*4882a593Smuzhiyun 	if (prop) {
183*4882a593Smuzhiyun 		rc = drmem_update_dt_v1(memory, prop);
184*4882a593Smuzhiyun 	} else {
185*4882a593Smuzhiyun 		prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
186*4882a593Smuzhiyun 		if (prop)
187*4882a593Smuzhiyun 			rc = drmem_update_dt_v2(memory, prop);
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	of_node_put(memory);
191*4882a593Smuzhiyun 	return rc;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
read_drconf_v1_cell(struct drmem_lmb * lmb,const __be32 ** prop)194*4882a593Smuzhiyun static void read_drconf_v1_cell(struct drmem_lmb *lmb,
195*4882a593Smuzhiyun 				       const __be32 **prop)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	const __be32 *p = *prop;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	lmb->base_addr = of_read_number(p, n_root_addr_cells);
200*4882a593Smuzhiyun 	p += n_root_addr_cells;
201*4882a593Smuzhiyun 	lmb->drc_index = of_read_number(p++, 1);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	p++; /* skip reserved field */
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	lmb->aa_index = of_read_number(p++, 1);
206*4882a593Smuzhiyun 	lmb->flags = of_read_number(p++, 1);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	*prop = p;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun static int
__walk_drmem_v1_lmbs(const __be32 * prop,const __be32 * usm,void * data,int (* func)(struct drmem_lmb *,const __be32 **,void *))212*4882a593Smuzhiyun __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data,
213*4882a593Smuzhiyun 		     int (*func)(struct drmem_lmb *, const __be32 **, void *))
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct drmem_lmb lmb;
216*4882a593Smuzhiyun 	u32 i, n_lmbs;
217*4882a593Smuzhiyun 	int ret = 0;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	n_lmbs = of_read_number(prop++, 1);
220*4882a593Smuzhiyun 	for (i = 0; i < n_lmbs; i++) {
221*4882a593Smuzhiyun 		read_drconf_v1_cell(&lmb, &prop);
222*4882a593Smuzhiyun 		ret = func(&lmb, &usm, data);
223*4882a593Smuzhiyun 		if (ret)
224*4882a593Smuzhiyun 			break;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return ret;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
read_drconf_v2_cell(struct of_drconf_cell_v2 * dr_cell,const __be32 ** prop)230*4882a593Smuzhiyun static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
231*4882a593Smuzhiyun 				       const __be32 **prop)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	const __be32 *p = *prop;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	dr_cell->seq_lmbs = of_read_number(p++, 1);
236*4882a593Smuzhiyun 	dr_cell->base_addr = of_read_number(p, n_root_addr_cells);
237*4882a593Smuzhiyun 	p += n_root_addr_cells;
238*4882a593Smuzhiyun 	dr_cell->drc_index = of_read_number(p++, 1);
239*4882a593Smuzhiyun 	dr_cell->aa_index = of_read_number(p++, 1);
240*4882a593Smuzhiyun 	dr_cell->flags = of_read_number(p++, 1);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	*prop = p;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun static int
__walk_drmem_v2_lmbs(const __be32 * prop,const __be32 * usm,void * data,int (* func)(struct drmem_lmb *,const __be32 **,void *))246*4882a593Smuzhiyun __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data,
247*4882a593Smuzhiyun 		     int (*func)(struct drmem_lmb *, const __be32 **, void *))
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct of_drconf_cell_v2 dr_cell;
250*4882a593Smuzhiyun 	struct drmem_lmb lmb;
251*4882a593Smuzhiyun 	u32 i, j, lmb_sets;
252*4882a593Smuzhiyun 	int ret = 0;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	lmb_sets = of_read_number(prop++, 1);
255*4882a593Smuzhiyun 	for (i = 0; i < lmb_sets; i++) {
256*4882a593Smuzhiyun 		read_drconf_v2_cell(&dr_cell, &prop);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 		for (j = 0; j < dr_cell.seq_lmbs; j++) {
259*4882a593Smuzhiyun 			lmb.base_addr = dr_cell.base_addr;
260*4882a593Smuzhiyun 			dr_cell.base_addr += drmem_lmb_size();
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 			lmb.drc_index = dr_cell.drc_index;
263*4882a593Smuzhiyun 			dr_cell.drc_index++;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 			lmb.aa_index = dr_cell.aa_index;
266*4882a593Smuzhiyun 			lmb.flags = dr_cell.flags;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 			ret = func(&lmb, &usm, data);
269*4882a593Smuzhiyun 			if (ret)
270*4882a593Smuzhiyun 				break;
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return ret;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #ifdef CONFIG_PPC_PSERIES
walk_drmem_lmbs_early(unsigned long node,void * data,int (* func)(struct drmem_lmb *,const __be32 **,void *))278*4882a593Smuzhiyun int __init walk_drmem_lmbs_early(unsigned long node, void *data,
279*4882a593Smuzhiyun 		int (*func)(struct drmem_lmb *, const __be32 **, void *))
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	const __be32 *prop, *usm;
282*4882a593Smuzhiyun 	int len, ret = -ENODEV;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
285*4882a593Smuzhiyun 	if (!prop || len < dt_root_size_cells * sizeof(__be32))
286*4882a593Smuzhiyun 		return ret;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* Get the address & size cells */
289*4882a593Smuzhiyun 	n_root_addr_cells = dt_root_addr_cells;
290*4882a593Smuzhiyun 	n_root_size_cells = dt_root_size_cells;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
297*4882a593Smuzhiyun 	if (prop) {
298*4882a593Smuzhiyun 		ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
299*4882a593Smuzhiyun 	} else {
300*4882a593Smuzhiyun 		prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
301*4882a593Smuzhiyun 					   &len);
302*4882a593Smuzhiyun 		if (prop)
303*4882a593Smuzhiyun 			ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	memblock_dump_all();
307*4882a593Smuzhiyun 	return ret;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun #endif
311*4882a593Smuzhiyun 
init_drmem_lmb_size(struct device_node * dn)312*4882a593Smuzhiyun static int init_drmem_lmb_size(struct device_node *dn)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	const __be32 *prop;
315*4882a593Smuzhiyun 	int len;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (drmem_info->lmb_size)
318*4882a593Smuzhiyun 		return 0;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	prop = of_get_property(dn, "ibm,lmb-size", &len);
321*4882a593Smuzhiyun 	if (!prop || len < n_root_size_cells * sizeof(__be32)) {
322*4882a593Smuzhiyun 		pr_info("Could not determine LMB size\n");
323*4882a593Smuzhiyun 		return -1;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	drmem_info->lmb_size = of_read_number(prop, n_root_size_cells);
327*4882a593Smuzhiyun 	return 0;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun  * Returns the property linux,drconf-usable-memory if
332*4882a593Smuzhiyun  * it exists (the property exists only in kexec/kdump kernels,
333*4882a593Smuzhiyun  * added by kexec-tools)
334*4882a593Smuzhiyun  */
of_get_usable_memory(struct device_node * dn)335*4882a593Smuzhiyun static const __be32 *of_get_usable_memory(struct device_node *dn)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	const __be32 *prop;
338*4882a593Smuzhiyun 	u32 len;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
341*4882a593Smuzhiyun 	if (!prop || len < sizeof(unsigned int))
342*4882a593Smuzhiyun 		return NULL;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return prop;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
walk_drmem_lmbs(struct device_node * dn,void * data,int (* func)(struct drmem_lmb *,const __be32 **,void *))347*4882a593Smuzhiyun int walk_drmem_lmbs(struct device_node *dn, void *data,
348*4882a593Smuzhiyun 		    int (*func)(struct drmem_lmb *, const __be32 **, void *))
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	const __be32 *prop, *usm;
351*4882a593Smuzhiyun 	int ret = -ENODEV;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (!of_root)
354*4882a593Smuzhiyun 		return ret;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* Get the address & size cells */
357*4882a593Smuzhiyun 	of_node_get(of_root);
358*4882a593Smuzhiyun 	n_root_addr_cells = of_n_addr_cells(of_root);
359*4882a593Smuzhiyun 	n_root_size_cells = of_n_size_cells(of_root);
360*4882a593Smuzhiyun 	of_node_put(of_root);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (init_drmem_lmb_size(dn))
363*4882a593Smuzhiyun 		return ret;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	usm = of_get_usable_memory(dn);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
368*4882a593Smuzhiyun 	if (prop) {
369*4882a593Smuzhiyun 		ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
370*4882a593Smuzhiyun 	} else {
371*4882a593Smuzhiyun 		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
372*4882a593Smuzhiyun 		if (prop)
373*4882a593Smuzhiyun 			ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	return ret;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
init_drmem_v1_lmbs(const __be32 * prop)379*4882a593Smuzhiyun static void __init init_drmem_v1_lmbs(const __be32 *prop)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct drmem_lmb *lmb;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	drmem_info->n_lmbs = of_read_number(prop++, 1);
384*4882a593Smuzhiyun 	if (drmem_info->n_lmbs == 0)
385*4882a593Smuzhiyun 		return;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
388*4882a593Smuzhiyun 				   GFP_KERNEL);
389*4882a593Smuzhiyun 	if (!drmem_info->lmbs)
390*4882a593Smuzhiyun 		return;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	for_each_drmem_lmb(lmb)
393*4882a593Smuzhiyun 		read_drconf_v1_cell(lmb, &prop);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
init_drmem_v2_lmbs(const __be32 * prop)396*4882a593Smuzhiyun static void __init init_drmem_v2_lmbs(const __be32 *prop)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct drmem_lmb *lmb;
399*4882a593Smuzhiyun 	struct of_drconf_cell_v2 dr_cell;
400*4882a593Smuzhiyun 	const __be32 *p;
401*4882a593Smuzhiyun 	u32 i, j, lmb_sets;
402*4882a593Smuzhiyun 	int lmb_index;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	lmb_sets = of_read_number(prop++, 1);
405*4882a593Smuzhiyun 	if (lmb_sets == 0)
406*4882a593Smuzhiyun 		return;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* first pass, calculate the number of LMBs */
409*4882a593Smuzhiyun 	p = prop;
410*4882a593Smuzhiyun 	for (i = 0; i < lmb_sets; i++) {
411*4882a593Smuzhiyun 		read_drconf_v2_cell(&dr_cell, &p);
412*4882a593Smuzhiyun 		drmem_info->n_lmbs += dr_cell.seq_lmbs;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
416*4882a593Smuzhiyun 				   GFP_KERNEL);
417*4882a593Smuzhiyun 	if (!drmem_info->lmbs)
418*4882a593Smuzhiyun 		return;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* second pass, read in the LMB information */
421*4882a593Smuzhiyun 	lmb_index = 0;
422*4882a593Smuzhiyun 	p = prop;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	for (i = 0; i < lmb_sets; i++) {
425*4882a593Smuzhiyun 		read_drconf_v2_cell(&dr_cell, &p);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		for (j = 0; j < dr_cell.seq_lmbs; j++) {
428*4882a593Smuzhiyun 			lmb = &drmem_info->lmbs[lmb_index++];
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 			lmb->base_addr = dr_cell.base_addr;
431*4882a593Smuzhiyun 			dr_cell.base_addr += drmem_info->lmb_size;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 			lmb->drc_index = dr_cell.drc_index;
434*4882a593Smuzhiyun 			dr_cell.drc_index++;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 			lmb->aa_index = dr_cell.aa_index;
437*4882a593Smuzhiyun 			lmb->flags = dr_cell.flags;
438*4882a593Smuzhiyun 		}
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
drmem_init(void)442*4882a593Smuzhiyun static int __init drmem_init(void)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct device_node *dn;
445*4882a593Smuzhiyun 	const __be32 *prop;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
448*4882a593Smuzhiyun 	if (!dn) {
449*4882a593Smuzhiyun 		pr_info("No dynamic reconfiguration memory found\n");
450*4882a593Smuzhiyun 		return 0;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (init_drmem_lmb_size(dn)) {
454*4882a593Smuzhiyun 		of_node_put(dn);
455*4882a593Smuzhiyun 		return 0;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
459*4882a593Smuzhiyun 	if (prop) {
460*4882a593Smuzhiyun 		init_drmem_v1_lmbs(prop);
461*4882a593Smuzhiyun 	} else {
462*4882a593Smuzhiyun 		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
463*4882a593Smuzhiyun 		if (prop)
464*4882a593Smuzhiyun 			init_drmem_v2_lmbs(prop);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	of_node_put(dn);
468*4882a593Smuzhiyun 	return 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun late_initcall(drmem_init);
471