1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
7*4882a593Smuzhiyun * Copyright (C) 2000 by Silicon Graphics, Inc.
8*4882a593Smuzhiyun * Copyright (C) 2004 by Christoph Hellwig
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * On SGI IP27 the ARC memory configuration data is completely bogus but
11*4882a593Smuzhiyun * alternate easier to use mechanisms are available.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/memblock.h>
16*4882a593Smuzhiyun #include <linux/mm.h>
17*4882a593Smuzhiyun #include <linux/mmzone.h>
18*4882a593Smuzhiyun #include <linux/export.h>
19*4882a593Smuzhiyun #include <linux/nodemask.h>
20*4882a593Smuzhiyun #include <linux/swap.h>
21*4882a593Smuzhiyun #include <linux/pfn.h>
22*4882a593Smuzhiyun #include <linux/highmem.h>
23*4882a593Smuzhiyun #include <asm/page.h>
24*4882a593Smuzhiyun #include <asm/pgalloc.h>
25*4882a593Smuzhiyun #include <asm/sections.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <asm/sn/arch.h>
28*4882a593Smuzhiyun #include <asm/sn/agent.h>
29*4882a593Smuzhiyun #include <asm/sn/klconfig.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "ip27-common.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34*4882a593Smuzhiyun #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct node_data *__node_data[MAX_NUMNODES];
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun EXPORT_SYMBOL(__node_data);
39*4882a593Smuzhiyun
gen_region_mask(void)40*4882a593Smuzhiyun static u64 gen_region_mask(void)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun int region_shift;
43*4882a593Smuzhiyun u64 region_mask;
44*4882a593Smuzhiyun nasid_t nasid;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun region_shift = get_region_shift();
47*4882a593Smuzhiyun region_mask = 0;
48*4882a593Smuzhiyun for_each_online_node(nasid)
49*4882a593Smuzhiyun region_mask |= BIT_ULL(nasid >> region_shift);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun return region_mask;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define rou_rflag rou_flags
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static int router_distance;
57*4882a593Smuzhiyun
router_recurse(klrou_t * router_a,klrou_t * router_b,int depth)58*4882a593Smuzhiyun static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun klrou_t *router;
61*4882a593Smuzhiyun lboard_t *brd;
62*4882a593Smuzhiyun int port;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (router_a->rou_rflag == 1)
65*4882a593Smuzhiyun return;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (depth >= router_distance)
68*4882a593Smuzhiyun return;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun router_a->rou_rflag = 1;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
73*4882a593Smuzhiyun if (router_a->rou_port[port].port_nasid == INVALID_NASID)
74*4882a593Smuzhiyun continue;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun brd = (lboard_t *)NODE_OFFSET_TO_K0(
77*4882a593Smuzhiyun router_a->rou_port[port].port_nasid,
78*4882a593Smuzhiyun router_a->rou_port[port].port_offset);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (brd->brd_type == KLTYPE_ROUTER) {
81*4882a593Smuzhiyun router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
82*4882a593Smuzhiyun if (router == router_b) {
83*4882a593Smuzhiyun if (depth < router_distance)
84*4882a593Smuzhiyun router_distance = depth;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun else
87*4882a593Smuzhiyun router_recurse(router, router_b, depth + 1);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun router_a->rou_rflag = 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
95*4882a593Smuzhiyun EXPORT_SYMBOL(__node_distances);
96*4882a593Smuzhiyun
compute_node_distance(nasid_t nasid_a,nasid_t nasid_b)97*4882a593Smuzhiyun static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun klrou_t *router, *router_a = NULL, *router_b = NULL;
100*4882a593Smuzhiyun lboard_t *brd, *dest_brd;
101*4882a593Smuzhiyun nasid_t nasid;
102*4882a593Smuzhiyun int port;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Figure out which routers nodes in question are connected to */
105*4882a593Smuzhiyun for_each_online_node(nasid) {
106*4882a593Smuzhiyun brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
107*4882a593Smuzhiyun KLTYPE_ROUTER);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun if (!brd)
110*4882a593Smuzhiyun continue;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun do {
113*4882a593Smuzhiyun if (brd->brd_flags & DUPLICATE_BOARD)
114*4882a593Smuzhiyun continue;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
117*4882a593Smuzhiyun router->rou_rflag = 0;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
120*4882a593Smuzhiyun if (router->rou_port[port].port_nasid == INVALID_NASID)
121*4882a593Smuzhiyun continue;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
124*4882a593Smuzhiyun router->rou_port[port].port_nasid,
125*4882a593Smuzhiyun router->rou_port[port].port_offset);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (dest_brd->brd_type == KLTYPE_IP27) {
128*4882a593Smuzhiyun if (dest_brd->brd_nasid == nasid_a)
129*4882a593Smuzhiyun router_a = router;
130*4882a593Smuzhiyun if (dest_brd->brd_nasid == nasid_b)
131*4882a593Smuzhiyun router_b = router;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (nasid_a == nasid_b)
139*4882a593Smuzhiyun return LOCAL_DISTANCE;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (router_a == router_b)
142*4882a593Smuzhiyun return LOCAL_DISTANCE + 1;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (router_a == NULL) {
145*4882a593Smuzhiyun pr_info("node_distance: router_a NULL\n");
146*4882a593Smuzhiyun return 255;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun if (router_b == NULL) {
149*4882a593Smuzhiyun pr_info("node_distance: router_b NULL\n");
150*4882a593Smuzhiyun return 255;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun router_distance = 100;
154*4882a593Smuzhiyun router_recurse(router_a, router_b, 2);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return LOCAL_DISTANCE + router_distance;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
init_topology_matrix(void)159*4882a593Smuzhiyun static void __init init_topology_matrix(void)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun nasid_t row, col;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun for (row = 0; row < MAX_NUMNODES; row++)
164*4882a593Smuzhiyun for (col = 0; col < MAX_NUMNODES; col++)
165*4882a593Smuzhiyun __node_distances[row][col] = -1;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun for_each_online_node(row) {
168*4882a593Smuzhiyun for_each_online_node(col) {
169*4882a593Smuzhiyun __node_distances[row][col] =
170*4882a593Smuzhiyun compute_node_distance(row, col);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
dump_topology(void)175*4882a593Smuzhiyun static void __init dump_topology(void)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun nasid_t nasid;
178*4882a593Smuzhiyun lboard_t *brd, *dest_brd;
179*4882a593Smuzhiyun int port;
180*4882a593Smuzhiyun int router_num = 0;
181*4882a593Smuzhiyun klrou_t *router;
182*4882a593Smuzhiyun nasid_t row, col;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun pr_info("************** Topology ********************\n");
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun pr_info(" ");
187*4882a593Smuzhiyun for_each_online_node(col)
188*4882a593Smuzhiyun pr_cont("%02d ", col);
189*4882a593Smuzhiyun pr_cont("\n");
190*4882a593Smuzhiyun for_each_online_node(row) {
191*4882a593Smuzhiyun pr_info("%02d ", row);
192*4882a593Smuzhiyun for_each_online_node(col)
193*4882a593Smuzhiyun pr_cont("%2d ", node_distance(row, col));
194*4882a593Smuzhiyun pr_cont("\n");
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun for_each_online_node(nasid) {
198*4882a593Smuzhiyun brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
199*4882a593Smuzhiyun KLTYPE_ROUTER);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (!brd)
202*4882a593Smuzhiyun continue;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun do {
205*4882a593Smuzhiyun if (brd->brd_flags & DUPLICATE_BOARD)
206*4882a593Smuzhiyun continue;
207*4882a593Smuzhiyun pr_cont("Router %d:", router_num);
208*4882a593Smuzhiyun router_num++;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
213*4882a593Smuzhiyun if (router->rou_port[port].port_nasid == INVALID_NASID)
214*4882a593Smuzhiyun continue;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
217*4882a593Smuzhiyun router->rou_port[port].port_nasid,
218*4882a593Smuzhiyun router->rou_port[port].port_offset);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (dest_brd->brd_type == KLTYPE_IP27)
221*4882a593Smuzhiyun pr_cont(" %d", dest_brd->brd_nasid);
222*4882a593Smuzhiyun if (dest_brd->brd_type == KLTYPE_ROUTER)
223*4882a593Smuzhiyun pr_cont(" r");
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun pr_cont("\n");
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
slot_getbasepfn(nasid_t nasid,int slot)231*4882a593Smuzhiyun static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
slot_psize_compute(nasid_t nasid,int slot)236*4882a593Smuzhiyun static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun lboard_t *brd;
239*4882a593Smuzhiyun klmembnk_t *banks;
240*4882a593Smuzhiyun unsigned long size;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* Find the node board */
243*4882a593Smuzhiyun brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
244*4882a593Smuzhiyun if (!brd)
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* Get the memory bank structure */
248*4882a593Smuzhiyun banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
249*4882a593Smuzhiyun if (!banks)
250*4882a593Smuzhiyun return 0;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Size in _Megabytes_ */
253*4882a593Smuzhiyun size = (unsigned long)banks->membnk_bnksz[slot/4];
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* hack for 128 dimm banks */
256*4882a593Smuzhiyun if (size <= 128) {
257*4882a593Smuzhiyun if (slot % 4 == 0) {
258*4882a593Smuzhiyun size <<= 20; /* size in bytes */
259*4882a593Smuzhiyun return size >> PAGE_SHIFT;
260*4882a593Smuzhiyun } else
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun } else {
263*4882a593Smuzhiyun size /= 4;
264*4882a593Smuzhiyun size <<= 20;
265*4882a593Smuzhiyun return size >> PAGE_SHIFT;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
mlreset(void)269*4882a593Smuzhiyun static void __init mlreset(void)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun u64 region_mask;
272*4882a593Smuzhiyun nasid_t nasid;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun master_nasid = get_nasid();
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Probe for all CPUs - this creates the cpumask and sets up the
278*4882a593Smuzhiyun * mapping tables. We need to do this as early as possible.
279*4882a593Smuzhiyun */
280*4882a593Smuzhiyun #ifdef CONFIG_SMP
281*4882a593Smuzhiyun cpu_node_probe();
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun init_topology_matrix();
285*4882a593Smuzhiyun dump_topology();
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun region_mask = gen_region_mask();
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun setup_replication_mask();
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Set all nodes' calias sizes to 8k
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun for_each_online_node(nasid) {
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * Always have node 0 in the region mask, otherwise
297*4882a593Smuzhiyun * CALIAS accesses get exceptions since the hub
298*4882a593Smuzhiyun * thinks it is a node 0 address.
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
301*4882a593Smuzhiyun REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun #ifdef LATER
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Set up all hubs to have a big window pointing at
306*4882a593Smuzhiyun * widget 0. Memory mode, widget 0, offset 0
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
309*4882a593Smuzhiyun ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
310*4882a593Smuzhiyun (0 << IIO_ITTE_WIDGET_SHIFT)));
311*4882a593Smuzhiyun #endif
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
szmem(void)315*4882a593Smuzhiyun static void __init szmem(void)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
318*4882a593Smuzhiyun int slot;
319*4882a593Smuzhiyun nasid_t node;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun for_each_online_node(node) {
322*4882a593Smuzhiyun nodebytes = 0;
323*4882a593Smuzhiyun for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
324*4882a593Smuzhiyun slot_psize = slot_psize_compute(node, slot);
325*4882a593Smuzhiyun if (slot == 0)
326*4882a593Smuzhiyun slot0sz = slot_psize;
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * We need to refine the hack when we have replicated
329*4882a593Smuzhiyun * kernel text.
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun nodebytes += (1LL << SLOT_SHIFT);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (!slot_psize)
334*4882a593Smuzhiyun continue;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
337*4882a593Smuzhiyun (slot0sz << PAGE_SHIFT)) {
338*4882a593Smuzhiyun pr_info("Ignoring slot %d onwards on node %d\n",
339*4882a593Smuzhiyun slot, node);
340*4882a593Smuzhiyun slot = MAX_MEM_SLOTS;
341*4882a593Smuzhiyun continue;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
344*4882a593Smuzhiyun PFN_PHYS(slot_psize), node);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
node_mem_init(nasid_t node)349*4882a593Smuzhiyun static void __init node_mem_init(nasid_t node)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
352*4882a593Smuzhiyun unsigned long slot_freepfn = node_getfirstfree(node);
353*4882a593Smuzhiyun unsigned long start_pfn, end_pfn;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Allocate the node data structures on the node first.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
361*4882a593Smuzhiyun memset(__node_data[node], 0, PAGE_SIZE);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun NODE_DATA(node)->node_start_pfn = start_pfn;
364*4882a593Smuzhiyun NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun cpumask_clear(&hub_data(node)->h_cpus);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
369*4882a593Smuzhiyun sizeof(struct hub_data));
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun memblock_reserve(slot_firstpfn << PAGE_SHIFT,
372*4882a593Smuzhiyun ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * A node with nothing. We use it to avoid any special casing in
377*4882a593Smuzhiyun * cpumask_of_node
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun static struct node_data null_node = {
380*4882a593Smuzhiyun .hub = {
381*4882a593Smuzhiyun .h_cpus = CPU_MASK_NONE
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun * Currently, the intranode memory hole support assumes that each slot
387*4882a593Smuzhiyun * contains at least 32 MBytes of memory. We assume all bootmem data
388*4882a593Smuzhiyun * fits on the first slot.
389*4882a593Smuzhiyun */
prom_meminit(void)390*4882a593Smuzhiyun void __init prom_meminit(void)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun nasid_t node;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun mlreset();
395*4882a593Smuzhiyun szmem();
396*4882a593Smuzhiyun max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun for (node = 0; node < MAX_NUMNODES; node++) {
399*4882a593Smuzhiyun if (node_online(node)) {
400*4882a593Smuzhiyun node_mem_init(node);
401*4882a593Smuzhiyun continue;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun __node_data[node] = &null_node;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
prom_free_prom_memory(void)407*4882a593Smuzhiyun void __init prom_free_prom_memory(void)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun /* We got nothing to free here ... */
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun extern void setup_zero_pages(void);
413*4882a593Smuzhiyun
paging_init(void)414*4882a593Smuzhiyun void __init paging_init(void)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun unsigned long zones_size[MAX_NR_ZONES] = {0, };
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun pagetable_init();
419*4882a593Smuzhiyun zones_size[ZONE_NORMAL] = max_low_pfn;
420*4882a593Smuzhiyun free_area_init(zones_size);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
mem_init(void)423*4882a593Smuzhiyun void __init mem_init(void)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
426*4882a593Smuzhiyun memblock_free_all();
427*4882a593Smuzhiyun setup_zero_pages(); /* This comes from node 0 */
428*4882a593Smuzhiyun mem_init_print_info(NULL);
429*4882a593Smuzhiyun }
430