1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Common code for 32 and 64-bit NUMA */
3*4882a593Smuzhiyun #include <linux/acpi.h>
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/string.h>
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/memblock.h>
9*4882a593Smuzhiyun #include <linux/mmzone.h>
10*4882a593Smuzhiyun #include <linux/ctype.h>
11*4882a593Smuzhiyun #include <linux/nodemask.h>
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/topology.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <asm/e820/api.h>
16*4882a593Smuzhiyun #include <asm/proto.h>
17*4882a593Smuzhiyun #include <asm/dma.h>
18*4882a593Smuzhiyun #include <asm/amd_nb.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "numa_internal.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun int numa_off;
23*4882a593Smuzhiyun nodemask_t numa_nodes_parsed __initdata;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26*4882a593Smuzhiyun EXPORT_SYMBOL(node_data);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
29*4882a593Smuzhiyun static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static int numa_distance_cnt;
32*4882a593Smuzhiyun static u8 *numa_distance;
33*4882a593Smuzhiyun
numa_setup(char * opt)34*4882a593Smuzhiyun static __init int numa_setup(char *opt)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun if (!opt)
37*4882a593Smuzhiyun return -EINVAL;
38*4882a593Smuzhiyun if (!strncmp(opt, "off", 3))
39*4882a593Smuzhiyun numa_off = 1;
40*4882a593Smuzhiyun if (!strncmp(opt, "fake=", 5))
41*4882a593Smuzhiyun return numa_emu_cmdline(opt + 5);
42*4882a593Smuzhiyun if (!strncmp(opt, "noacpi", 6))
43*4882a593Smuzhiyun disable_srat();
44*4882a593Smuzhiyun if (!strncmp(opt, "nohmat", 6))
45*4882a593Smuzhiyun disable_hmat();
46*4882a593Smuzhiyun return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun early_param("numa", numa_setup);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * apicid, cpu, node mappings
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun s16 __apicid_to_node[MAX_LOCAL_APIC] = {
54*4882a593Smuzhiyun [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
numa_cpu_node(int cpu)57*4882a593Smuzhiyun int numa_cpu_node(int cpu)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (apicid != BAD_APICID)
62*4882a593Smuzhiyun return __apicid_to_node[apicid];
63*4882a593Smuzhiyun return NUMA_NO_NODE;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
67*4882a593Smuzhiyun EXPORT_SYMBOL(node_to_cpumask_map);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Map cpu index to node index
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
73*4882a593Smuzhiyun EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
74*4882a593Smuzhiyun
numa_set_node(int cpu,int node)75*4882a593Smuzhiyun void numa_set_node(int cpu, int node)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* early setting, no percpu area yet */
80*4882a593Smuzhiyun if (cpu_to_node_map) {
81*4882a593Smuzhiyun cpu_to_node_map[cpu] = node;
82*4882a593Smuzhiyun return;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_PER_CPU_MAPS
86*4882a593Smuzhiyun if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
87*4882a593Smuzhiyun printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
88*4882a593Smuzhiyun dump_stack();
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun per_cpu(x86_cpu_to_node_map, cpu) = node;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun set_cpu_numa_node(cpu, node);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
numa_clear_node(int cpu)97*4882a593Smuzhiyun void numa_clear_node(int cpu)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun numa_set_node(cpu, NUMA_NO_NODE);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Allocate node_to_cpumask_map based on number of available nodes
104*4882a593Smuzhiyun * Requires node_possible_map to be valid.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * Note: cpumask_of_node() is not valid until after this is done.
107*4882a593Smuzhiyun * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
108*4882a593Smuzhiyun */
setup_node_to_cpumask_map(void)109*4882a593Smuzhiyun void __init setup_node_to_cpumask_map(void)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun unsigned int node;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* setup nr_node_ids if not done yet */
114*4882a593Smuzhiyun if (nr_node_ids == MAX_NUMNODES)
115*4882a593Smuzhiyun setup_nr_node_ids();
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* allocate the map */
118*4882a593Smuzhiyun for (node = 0; node < nr_node_ids; node++)
119*4882a593Smuzhiyun alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* cpumask_of_node() will now work */
122*4882a593Smuzhiyun pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
numa_add_memblk_to(int nid,u64 start,u64 end,struct numa_meminfo * mi)125*4882a593Smuzhiyun static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
126*4882a593Smuzhiyun struct numa_meminfo *mi)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun /* ignore zero length blks */
129*4882a593Smuzhiyun if (start == end)
130*4882a593Smuzhiyun return 0;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* whine about and ignore invalid blks */
133*4882a593Smuzhiyun if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
134*4882a593Smuzhiyun pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
135*4882a593Smuzhiyun nid, start, end - 1);
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (mi->nr_blks >= NR_NODE_MEMBLKS) {
140*4882a593Smuzhiyun pr_err("too many memblk ranges\n");
141*4882a593Smuzhiyun return -EINVAL;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun mi->blk[mi->nr_blks].start = start;
145*4882a593Smuzhiyun mi->blk[mi->nr_blks].end = end;
146*4882a593Smuzhiyun mi->blk[mi->nr_blks].nid = nid;
147*4882a593Smuzhiyun mi->nr_blks++;
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /**
152*4882a593Smuzhiyun * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
153*4882a593Smuzhiyun * @idx: Index of memblk to remove
154*4882a593Smuzhiyun * @mi: numa_meminfo to remove memblk from
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
157*4882a593Smuzhiyun * decrementing @mi->nr_blks.
158*4882a593Smuzhiyun */
numa_remove_memblk_from(int idx,struct numa_meminfo * mi)159*4882a593Smuzhiyun void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun mi->nr_blks--;
162*4882a593Smuzhiyun memmove(&mi->blk[idx], &mi->blk[idx + 1],
163*4882a593Smuzhiyun (mi->nr_blks - idx) * sizeof(mi->blk[0]));
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
168*4882a593Smuzhiyun * @dst: numa_meminfo to append block to
169*4882a593Smuzhiyun * @idx: Index of memblk to remove
170*4882a593Smuzhiyun * @src: numa_meminfo to remove memblk from
171*4882a593Smuzhiyun */
numa_move_tail_memblk(struct numa_meminfo * dst,int idx,struct numa_meminfo * src)172*4882a593Smuzhiyun static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
173*4882a593Smuzhiyun struct numa_meminfo *src)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun dst->blk[dst->nr_blks++] = src->blk[idx];
176*4882a593Smuzhiyun numa_remove_memblk_from(idx, src);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun * numa_add_memblk - Add one numa_memblk to numa_meminfo
181*4882a593Smuzhiyun * @nid: NUMA node ID of the new memblk
182*4882a593Smuzhiyun * @start: Start address of the new memblk
183*4882a593Smuzhiyun * @end: End address of the new memblk
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Add a new memblk to the default numa_meminfo.
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * RETURNS:
188*4882a593Smuzhiyun * 0 on success, -errno on failure.
189*4882a593Smuzhiyun */
numa_add_memblk(int nid,u64 start,u64 end)190*4882a593Smuzhiyun int __init numa_add_memblk(int nid, u64 start, u64 end)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return numa_add_memblk_to(nid, start, end, &numa_meminfo);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Allocate NODE_DATA for a node on the local memory */
alloc_node_data(int nid)196*4882a593Smuzhiyun static void __init alloc_node_data(int nid)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
199*4882a593Smuzhiyun u64 nd_pa;
200*4882a593Smuzhiyun void *nd;
201*4882a593Smuzhiyun int tnid;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Allocate node data. Try node-local memory and then any node.
205*4882a593Smuzhiyun * Never allocate in DMA zone.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
208*4882a593Smuzhiyun if (!nd_pa) {
209*4882a593Smuzhiyun pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
210*4882a593Smuzhiyun nd_size, nid);
211*4882a593Smuzhiyun return;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun nd = __va(nd_pa);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* report and initialize */
216*4882a593Smuzhiyun printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
217*4882a593Smuzhiyun nd_pa, nd_pa + nd_size - 1);
218*4882a593Smuzhiyun tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
219*4882a593Smuzhiyun if (tnid != nid)
220*4882a593Smuzhiyun printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun node_data[nid] = nd;
223*4882a593Smuzhiyun memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun node_set_online(nid);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun * numa_cleanup_meminfo - Cleanup a numa_meminfo
230*4882a593Smuzhiyun * @mi: numa_meminfo to clean up
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Sanitize @mi by merging and removing unnecessary memblks. Also check for
233*4882a593Smuzhiyun * conflicts and clear unused memblks.
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * RETURNS:
236*4882a593Smuzhiyun * 0 on success, -errno on failure.
237*4882a593Smuzhiyun */
numa_cleanup_meminfo(struct numa_meminfo * mi)238*4882a593Smuzhiyun int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun const u64 low = 0;
241*4882a593Smuzhiyun const u64 high = PFN_PHYS(max_pfn);
242*4882a593Smuzhiyun int i, j, k;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* first, trim all entries */
245*4882a593Smuzhiyun for (i = 0; i < mi->nr_blks; i++) {
246*4882a593Smuzhiyun struct numa_memblk *bi = &mi->blk[i];
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* move / save reserved memory ranges */
249*4882a593Smuzhiyun if (!memblock_overlaps_region(&memblock.memory,
250*4882a593Smuzhiyun bi->start, bi->end - bi->start)) {
251*4882a593Smuzhiyun numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
252*4882a593Smuzhiyun continue;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* make sure all non-reserved blocks are inside the limits */
256*4882a593Smuzhiyun bi->start = max(bi->start, low);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* preserve info for non-RAM areas above 'max_pfn': */
259*4882a593Smuzhiyun if (bi->end > high) {
260*4882a593Smuzhiyun numa_add_memblk_to(bi->nid, high, bi->end,
261*4882a593Smuzhiyun &numa_reserved_meminfo);
262*4882a593Smuzhiyun bi->end = high;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* and there's no empty block */
266*4882a593Smuzhiyun if (bi->start >= bi->end)
267*4882a593Smuzhiyun numa_remove_memblk_from(i--, mi);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* merge neighboring / overlapping entries */
271*4882a593Smuzhiyun for (i = 0; i < mi->nr_blks; i++) {
272*4882a593Smuzhiyun struct numa_memblk *bi = &mi->blk[i];
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun for (j = i + 1; j < mi->nr_blks; j++) {
275*4882a593Smuzhiyun struct numa_memblk *bj = &mi->blk[j];
276*4882a593Smuzhiyun u64 start, end;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * See whether there are overlapping blocks. Whine
280*4882a593Smuzhiyun * about but allow overlaps of the same nid. They
281*4882a593Smuzhiyun * will be merged below.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun if (bi->end > bj->start && bi->start < bj->end) {
284*4882a593Smuzhiyun if (bi->nid != bj->nid) {
285*4882a593Smuzhiyun pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
286*4882a593Smuzhiyun bi->nid, bi->start, bi->end - 1,
287*4882a593Smuzhiyun bj->nid, bj->start, bj->end - 1);
288*4882a593Smuzhiyun return -EINVAL;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
291*4882a593Smuzhiyun bi->nid, bi->start, bi->end - 1,
292*4882a593Smuzhiyun bj->start, bj->end - 1);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * Join together blocks on the same node, holes
297*4882a593Smuzhiyun * between which don't overlap with memory on other
298*4882a593Smuzhiyun * nodes.
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun if (bi->nid != bj->nid)
301*4882a593Smuzhiyun continue;
302*4882a593Smuzhiyun start = min(bi->start, bj->start);
303*4882a593Smuzhiyun end = max(bi->end, bj->end);
304*4882a593Smuzhiyun for (k = 0; k < mi->nr_blks; k++) {
305*4882a593Smuzhiyun struct numa_memblk *bk = &mi->blk[k];
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (bi->nid == bk->nid)
308*4882a593Smuzhiyun continue;
309*4882a593Smuzhiyun if (start < bk->end && end > bk->start)
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun if (k < mi->nr_blks)
313*4882a593Smuzhiyun continue;
314*4882a593Smuzhiyun printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
315*4882a593Smuzhiyun bi->nid, bi->start, bi->end - 1, bj->start,
316*4882a593Smuzhiyun bj->end - 1, start, end - 1);
317*4882a593Smuzhiyun bi->start = start;
318*4882a593Smuzhiyun bi->end = end;
319*4882a593Smuzhiyun numa_remove_memblk_from(j--, mi);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* clear unused ones */
324*4882a593Smuzhiyun for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
325*4882a593Smuzhiyun mi->blk[i].start = mi->blk[i].end = 0;
326*4882a593Smuzhiyun mi->blk[i].nid = NUMA_NO_NODE;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * Set nodes, which have memory in @mi, in *@nodemask.
334*4882a593Smuzhiyun */
numa_nodemask_from_meminfo(nodemask_t * nodemask,const struct numa_meminfo * mi)335*4882a593Smuzhiyun static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
336*4882a593Smuzhiyun const struct numa_meminfo *mi)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun int i;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
341*4882a593Smuzhiyun if (mi->blk[i].start != mi->blk[i].end &&
342*4882a593Smuzhiyun mi->blk[i].nid != NUMA_NO_NODE)
343*4882a593Smuzhiyun node_set(mi->blk[i].nid, *nodemask);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * numa_reset_distance - Reset NUMA distance table
348*4882a593Smuzhiyun *
349*4882a593Smuzhiyun * The current table is freed. The next numa_set_distance() call will
350*4882a593Smuzhiyun * create a new one.
351*4882a593Smuzhiyun */
numa_reset_distance(void)352*4882a593Smuzhiyun void __init numa_reset_distance(void)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* numa_distance could be 1LU marking allocation failure, test cnt */
357*4882a593Smuzhiyun if (numa_distance_cnt)
358*4882a593Smuzhiyun memblock_free(__pa(numa_distance), size);
359*4882a593Smuzhiyun numa_distance_cnt = 0;
360*4882a593Smuzhiyun numa_distance = NULL; /* enable table creation */
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
numa_alloc_distance(void)363*4882a593Smuzhiyun static int __init numa_alloc_distance(void)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun nodemask_t nodes_parsed;
366*4882a593Smuzhiyun size_t size;
367*4882a593Smuzhiyun int i, j, cnt = 0;
368*4882a593Smuzhiyun u64 phys;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* size the new table and allocate it */
371*4882a593Smuzhiyun nodes_parsed = numa_nodes_parsed;
372*4882a593Smuzhiyun numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun for_each_node_mask(i, nodes_parsed)
375*4882a593Smuzhiyun cnt = i;
376*4882a593Smuzhiyun cnt++;
377*4882a593Smuzhiyun size = cnt * cnt * sizeof(numa_distance[0]);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
380*4882a593Smuzhiyun size, PAGE_SIZE);
381*4882a593Smuzhiyun if (!phys) {
382*4882a593Smuzhiyun pr_warn("Warning: can't allocate distance table!\n");
383*4882a593Smuzhiyun /* don't retry until explicitly reset */
384*4882a593Smuzhiyun numa_distance = (void *)1LU;
385*4882a593Smuzhiyun return -ENOMEM;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun memblock_reserve(phys, size);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun numa_distance = __va(phys);
390*4882a593Smuzhiyun numa_distance_cnt = cnt;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* fill with the default distances */
393*4882a593Smuzhiyun for (i = 0; i < cnt; i++)
394*4882a593Smuzhiyun for (j = 0; j < cnt; j++)
395*4882a593Smuzhiyun numa_distance[i * cnt + j] = i == j ?
396*4882a593Smuzhiyun LOCAL_DISTANCE : REMOTE_DISTANCE;
397*4882a593Smuzhiyun printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun * numa_set_distance - Set NUMA distance from one NUMA to another
404*4882a593Smuzhiyun * @from: the 'from' node to set distance
405*4882a593Smuzhiyun * @to: the 'to' node to set distance
406*4882a593Smuzhiyun * @distance: NUMA distance
407*4882a593Smuzhiyun *
408*4882a593Smuzhiyun * Set the distance from node @from to @to to @distance. If distance table
409*4882a593Smuzhiyun * doesn't exist, one which is large enough to accommodate all the currently
410*4882a593Smuzhiyun * known nodes will be created.
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * If such table cannot be allocated, a warning is printed and further
413*4882a593Smuzhiyun * calls are ignored until the distance table is reset with
414*4882a593Smuzhiyun * numa_reset_distance().
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * If @from or @to is higher than the highest known node or lower than zero
417*4882a593Smuzhiyun * at the time of table creation or @distance doesn't make sense, the call
418*4882a593Smuzhiyun * is ignored.
419*4882a593Smuzhiyun * This is to allow simplification of specific NUMA config implementations.
420*4882a593Smuzhiyun */
numa_set_distance(int from,int to,int distance)421*4882a593Smuzhiyun void __init numa_set_distance(int from, int to, int distance)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun if (!numa_distance && numa_alloc_distance() < 0)
424*4882a593Smuzhiyun return;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
427*4882a593Smuzhiyun from < 0 || to < 0) {
428*4882a593Smuzhiyun pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
429*4882a593Smuzhiyun from, to, distance);
430*4882a593Smuzhiyun return;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if ((u8)distance != distance ||
434*4882a593Smuzhiyun (from == to && distance != LOCAL_DISTANCE)) {
435*4882a593Smuzhiyun pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
436*4882a593Smuzhiyun from, to, distance);
437*4882a593Smuzhiyun return;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun numa_distance[from * numa_distance_cnt + to] = distance;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
__node_distance(int from,int to)443*4882a593Smuzhiyun int __node_distance(int from, int to)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun if (from >= numa_distance_cnt || to >= numa_distance_cnt)
446*4882a593Smuzhiyun return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
447*4882a593Smuzhiyun return numa_distance[from * numa_distance_cnt + to];
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun EXPORT_SYMBOL(__node_distance);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * Sanity check to catch more bad NUMA configurations (they are amazingly
453*4882a593Smuzhiyun * common). Make sure the nodes cover all memory.
454*4882a593Smuzhiyun */
numa_meminfo_cover_memory(const struct numa_meminfo * mi)455*4882a593Smuzhiyun static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun u64 numaram, e820ram;
458*4882a593Smuzhiyun int i;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun numaram = 0;
461*4882a593Smuzhiyun for (i = 0; i < mi->nr_blks; i++) {
462*4882a593Smuzhiyun u64 s = mi->blk[i].start >> PAGE_SHIFT;
463*4882a593Smuzhiyun u64 e = mi->blk[i].end >> PAGE_SHIFT;
464*4882a593Smuzhiyun numaram += e - s;
465*4882a593Smuzhiyun numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
466*4882a593Smuzhiyun if ((s64)numaram < 0)
467*4882a593Smuzhiyun numaram = 0;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
473*4882a593Smuzhiyun if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
474*4882a593Smuzhiyun printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
475*4882a593Smuzhiyun (numaram << PAGE_SHIFT) >> 20,
476*4882a593Smuzhiyun (e820ram << PAGE_SHIFT) >> 20);
477*4882a593Smuzhiyun return false;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun return true;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun * Mark all currently memblock-reserved physical memory (which covers the
484*4882a593Smuzhiyun * kernel's own memory ranges) as hot-unswappable.
485*4882a593Smuzhiyun */
numa_clear_kernel_node_hotplug(void)486*4882a593Smuzhiyun static void __init numa_clear_kernel_node_hotplug(void)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun nodemask_t reserved_nodemask = NODE_MASK_NONE;
489*4882a593Smuzhiyun struct memblock_region *mb_region;
490*4882a593Smuzhiyun int i;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /*
493*4882a593Smuzhiyun * We have to do some preprocessing of memblock regions, to
494*4882a593Smuzhiyun * make them suitable for reservation.
495*4882a593Smuzhiyun *
496*4882a593Smuzhiyun * At this time, all memory regions reserved by memblock are
497*4882a593Smuzhiyun * used by the kernel, but those regions are not split up
498*4882a593Smuzhiyun * along node boundaries yet, and don't necessarily have their
499*4882a593Smuzhiyun * node ID set yet either.
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * So iterate over all memory known to the x86 architecture,
502*4882a593Smuzhiyun * and use those ranges to set the nid in memblock.reserved.
503*4882a593Smuzhiyun * This will split up the memblock regions along node
504*4882a593Smuzhiyun * boundaries and will set the node IDs as well.
505*4882a593Smuzhiyun */
506*4882a593Smuzhiyun for (i = 0; i < numa_meminfo.nr_blks; i++) {
507*4882a593Smuzhiyun struct numa_memblk *mb = numa_meminfo.blk + i;
508*4882a593Smuzhiyun int ret;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
511*4882a593Smuzhiyun WARN_ON_ONCE(ret);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun * Now go over all reserved memblock regions, to construct a
516*4882a593Smuzhiyun * node mask of all kernel reserved memory areas.
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
519*4882a593Smuzhiyun * numa_meminfo might not include all memblock.reserved
520*4882a593Smuzhiyun * memory ranges, because quirks such as trim_snb_memory()
521*4882a593Smuzhiyun * reserve specific pages for Sandy Bridge graphics. ]
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun for_each_reserved_mem_region(mb_region) {
524*4882a593Smuzhiyun int nid = memblock_get_region_node(mb_region);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (nid != MAX_NUMNODES)
527*4882a593Smuzhiyun node_set(nid, reserved_nodemask);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
532*4882a593Smuzhiyun * belonging to the reserved node mask.
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * Note that this will include memory regions that reside
535*4882a593Smuzhiyun * on nodes that contain kernel memory - entire nodes
536*4882a593Smuzhiyun * become hot-unpluggable:
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun for (i = 0; i < numa_meminfo.nr_blks; i++) {
539*4882a593Smuzhiyun struct numa_memblk *mb = numa_meminfo.blk + i;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (!node_isset(mb->nid, reserved_nodemask))
542*4882a593Smuzhiyun continue;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun memblock_clear_hotplug(mb->start, mb->end - mb->start);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
numa_register_memblks(struct numa_meminfo * mi)548*4882a593Smuzhiyun static int __init numa_register_memblks(struct numa_meminfo *mi)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun int i, nid;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* Account for nodes with cpus and no memory */
553*4882a593Smuzhiyun node_possible_map = numa_nodes_parsed;
554*4882a593Smuzhiyun numa_nodemask_from_meminfo(&node_possible_map, mi);
555*4882a593Smuzhiyun if (WARN_ON(nodes_empty(node_possible_map)))
556*4882a593Smuzhiyun return -EINVAL;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun for (i = 0; i < mi->nr_blks; i++) {
559*4882a593Smuzhiyun struct numa_memblk *mb = &mi->blk[i];
560*4882a593Smuzhiyun memblock_set_node(mb->start, mb->end - mb->start,
561*4882a593Smuzhiyun &memblock.memory, mb->nid);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /*
565*4882a593Smuzhiyun * At very early time, the kernel have to use some memory such as
566*4882a593Smuzhiyun * loading the kernel image. We cannot prevent this anyway. So any
567*4882a593Smuzhiyun * node the kernel resides in should be un-hotpluggable.
568*4882a593Smuzhiyun *
569*4882a593Smuzhiyun * And when we come here, alloc node data won't fail.
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun numa_clear_kernel_node_hotplug();
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun * If sections array is gonna be used for pfn -> nid mapping, check
575*4882a593Smuzhiyun * whether its granularity is fine enough.
576*4882a593Smuzhiyun */
577*4882a593Smuzhiyun if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
578*4882a593Smuzhiyun unsigned long pfn_align = node_map_pfn_alignment();
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (pfn_align && pfn_align < PAGES_PER_SECTION) {
581*4882a593Smuzhiyun pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
582*4882a593Smuzhiyun PFN_PHYS(pfn_align) >> 20,
583*4882a593Smuzhiyun PFN_PHYS(PAGES_PER_SECTION) >> 20);
584*4882a593Smuzhiyun return -EINVAL;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun if (!numa_meminfo_cover_memory(mi))
588*4882a593Smuzhiyun return -EINVAL;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /* Finally register nodes. */
591*4882a593Smuzhiyun for_each_node_mask(nid, node_possible_map) {
592*4882a593Smuzhiyun u64 start = PFN_PHYS(max_pfn);
593*4882a593Smuzhiyun u64 end = 0;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun for (i = 0; i < mi->nr_blks; i++) {
596*4882a593Smuzhiyun if (nid != mi->blk[i].nid)
597*4882a593Smuzhiyun continue;
598*4882a593Smuzhiyun start = min(mi->blk[i].start, start);
599*4882a593Smuzhiyun end = max(mi->blk[i].end, end);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (start >= end)
603*4882a593Smuzhiyun continue;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Don't confuse VM with a node that doesn't have the
607*4882a593Smuzhiyun * minimum amount of memory:
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun if (end && (end - start) < NODE_MIN_SIZE)
610*4882a593Smuzhiyun continue;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun alloc_node_data(nid);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* Dump memblock with node info and return. */
616*4882a593Smuzhiyun memblock_dump_all();
617*4882a593Smuzhiyun return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun * There are unfortunately some poorly designed mainboards around that
622*4882a593Smuzhiyun * only connect memory to a single CPU. This breaks the 1:1 cpu->node
623*4882a593Smuzhiyun * mapping. To avoid this fill in the mapping for all possible CPUs,
624*4882a593Smuzhiyun * as the number of CPUs is not known yet. We round robin the existing
625*4882a593Smuzhiyun * nodes.
626*4882a593Smuzhiyun */
numa_init_array(void)627*4882a593Smuzhiyun static void __init numa_init_array(void)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun int rr, i;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun rr = first_node(node_online_map);
632*4882a593Smuzhiyun for (i = 0; i < nr_cpu_ids; i++) {
633*4882a593Smuzhiyun if (early_cpu_to_node(i) != NUMA_NO_NODE)
634*4882a593Smuzhiyun continue;
635*4882a593Smuzhiyun numa_set_node(i, rr);
636*4882a593Smuzhiyun rr = next_node_in(rr, node_online_map);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
numa_init(int (* init_func)(void))640*4882a593Smuzhiyun static int __init numa_init(int (*init_func)(void))
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun int i;
643*4882a593Smuzhiyun int ret;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun for (i = 0; i < MAX_LOCAL_APIC; i++)
646*4882a593Smuzhiyun set_apicid_to_node(i, NUMA_NO_NODE);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun nodes_clear(numa_nodes_parsed);
649*4882a593Smuzhiyun nodes_clear(node_possible_map);
650*4882a593Smuzhiyun nodes_clear(node_online_map);
651*4882a593Smuzhiyun memset(&numa_meminfo, 0, sizeof(numa_meminfo));
652*4882a593Smuzhiyun WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
653*4882a593Smuzhiyun MAX_NUMNODES));
654*4882a593Smuzhiyun WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
655*4882a593Smuzhiyun MAX_NUMNODES));
656*4882a593Smuzhiyun /* In case that parsing SRAT failed. */
657*4882a593Smuzhiyun WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
658*4882a593Smuzhiyun numa_reset_distance();
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun ret = init_func();
661*4882a593Smuzhiyun if (ret < 0)
662*4882a593Smuzhiyun return ret;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * We reset memblock back to the top-down direction
666*4882a593Smuzhiyun * here because if we configured ACPI_NUMA, we have
667*4882a593Smuzhiyun * parsed SRAT in init_func(). It is ok to have the
668*4882a593Smuzhiyun * reset here even if we did't configure ACPI_NUMA
669*4882a593Smuzhiyun * or acpi numa init fails and fallbacks to dummy
670*4882a593Smuzhiyun * numa init.
671*4882a593Smuzhiyun */
672*4882a593Smuzhiyun memblock_set_bottom_up(false);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun ret = numa_cleanup_meminfo(&numa_meminfo);
675*4882a593Smuzhiyun if (ret < 0)
676*4882a593Smuzhiyun return ret;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun numa_emulation(&numa_meminfo, numa_distance_cnt);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun ret = numa_register_memblks(&numa_meminfo);
681*4882a593Smuzhiyun if (ret < 0)
682*4882a593Smuzhiyun return ret;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun for (i = 0; i < nr_cpu_ids; i++) {
685*4882a593Smuzhiyun int nid = early_cpu_to_node(i);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
688*4882a593Smuzhiyun continue;
689*4882a593Smuzhiyun if (!node_online(nid))
690*4882a593Smuzhiyun numa_clear_node(i);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun numa_init_array();
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun return 0;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /**
698*4882a593Smuzhiyun * dummy_numa_init - Fallback dummy NUMA init
699*4882a593Smuzhiyun *
700*4882a593Smuzhiyun * Used if there's no underlying NUMA architecture, NUMA initialization
701*4882a593Smuzhiyun * fails, or NUMA is disabled on the command line.
702*4882a593Smuzhiyun *
703*4882a593Smuzhiyun * Must online at least one node and add memory blocks that cover all
704*4882a593Smuzhiyun * allowed memory. This function must not fail.
705*4882a593Smuzhiyun */
dummy_numa_init(void)706*4882a593Smuzhiyun static int __init dummy_numa_init(void)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun printk(KERN_INFO "%s\n",
709*4882a593Smuzhiyun numa_off ? "NUMA turned off" : "No NUMA configuration found");
710*4882a593Smuzhiyun printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
711*4882a593Smuzhiyun 0LLU, PFN_PHYS(max_pfn) - 1);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun node_set(0, numa_nodes_parsed);
714*4882a593Smuzhiyun numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun return 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /**
720*4882a593Smuzhiyun * x86_numa_init - Initialize NUMA
721*4882a593Smuzhiyun *
722*4882a593Smuzhiyun * Try each configured NUMA initialization method until one succeeds. The
723*4882a593Smuzhiyun * last fallback is dummy single node config encompassing whole memory and
724*4882a593Smuzhiyun * never fails.
725*4882a593Smuzhiyun */
x86_numa_init(void)726*4882a593Smuzhiyun void __init x86_numa_init(void)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun if (!numa_off) {
729*4882a593Smuzhiyun #ifdef CONFIG_ACPI_NUMA
730*4882a593Smuzhiyun if (!numa_init(x86_acpi_numa_init))
731*4882a593Smuzhiyun return;
732*4882a593Smuzhiyun #endif
733*4882a593Smuzhiyun #ifdef CONFIG_AMD_NUMA
734*4882a593Smuzhiyun if (!numa_init(amd_numa_init))
735*4882a593Smuzhiyun return;
736*4882a593Smuzhiyun #endif
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun numa_init(dummy_numa_init);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
init_memory_less_node(int nid)742*4882a593Smuzhiyun static void __init init_memory_less_node(int nid)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun /* Allocate and initialize node data. Memory-less node is now online.*/
745*4882a593Smuzhiyun alloc_node_data(nid);
746*4882a593Smuzhiyun free_area_init_memoryless_node(nid);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /*
749*4882a593Smuzhiyun * All zonelists will be built later in start_kernel() after per cpu
750*4882a593Smuzhiyun * areas are initialized.
751*4882a593Smuzhiyun */
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /*
755*4882a593Smuzhiyun * A node may exist which has one or more Generic Initiators but no CPUs and no
756*4882a593Smuzhiyun * memory.
757*4882a593Smuzhiyun *
758*4882a593Smuzhiyun * This function must be called after init_cpu_to_node(), to ensure that any
759*4882a593Smuzhiyun * memoryless CPU nodes have already been brought online, and before the
760*4882a593Smuzhiyun * node_data[nid] is needed for zone list setup in build_all_zonelists().
761*4882a593Smuzhiyun *
762*4882a593Smuzhiyun * When this function is called, any nodes containing either memory and/or CPUs
763*4882a593Smuzhiyun * will already be online and there is no need to do anything extra, even if
764*4882a593Smuzhiyun * they also contain one or more Generic Initiators.
765*4882a593Smuzhiyun */
init_gi_nodes(void)766*4882a593Smuzhiyun void __init init_gi_nodes(void)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun int nid;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun for_each_node_state(nid, N_GENERIC_INITIATOR)
771*4882a593Smuzhiyun if (!node_online(nid))
772*4882a593Smuzhiyun init_memory_less_node(nid);
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun * Setup early cpu_to_node.
777*4882a593Smuzhiyun *
778*4882a593Smuzhiyun * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
779*4882a593Smuzhiyun * and apicid_to_node[] tables have valid entries for a CPU.
780*4882a593Smuzhiyun * This means we skip cpu_to_node[] initialisation for NUMA
781*4882a593Smuzhiyun * emulation and faking node case (when running a kernel compiled
782*4882a593Smuzhiyun * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
783*4882a593Smuzhiyun * is already initialized in a round robin manner at numa_init_array,
784*4882a593Smuzhiyun * prior to this call, and this initialization is good enough
785*4882a593Smuzhiyun * for the fake NUMA cases.
786*4882a593Smuzhiyun *
787*4882a593Smuzhiyun * Called before the per_cpu areas are setup.
788*4882a593Smuzhiyun */
init_cpu_to_node(void)789*4882a593Smuzhiyun void __init init_cpu_to_node(void)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun int cpu;
792*4882a593Smuzhiyun u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun BUG_ON(cpu_to_apicid == NULL);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
797*4882a593Smuzhiyun int node = numa_cpu_node(cpu);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if (node == NUMA_NO_NODE)
800*4882a593Smuzhiyun continue;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (!node_online(node))
803*4882a593Smuzhiyun init_memory_less_node(node);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun numa_set_node(cpu, node);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun #ifndef CONFIG_DEBUG_PER_CPU_MAPS
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun # ifndef CONFIG_NUMA_EMU
numa_add_cpu(int cpu)812*4882a593Smuzhiyun void numa_add_cpu(int cpu)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
numa_remove_cpu(int cpu)817*4882a593Smuzhiyun void numa_remove_cpu(int cpu)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun # endif /* !CONFIG_NUMA_EMU */
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
824*4882a593Smuzhiyun
__cpu_to_node(int cpu)825*4882a593Smuzhiyun int __cpu_to_node(int cpu)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
828*4882a593Smuzhiyun printk(KERN_WARNING
829*4882a593Smuzhiyun "cpu_to_node(%d): usage too early!\n", cpu);
830*4882a593Smuzhiyun dump_stack();
831*4882a593Smuzhiyun return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun return per_cpu(x86_cpu_to_node_map, cpu);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun EXPORT_SYMBOL(__cpu_to_node);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /*
838*4882a593Smuzhiyun * Same function as cpu_to_node() but used if called before the
839*4882a593Smuzhiyun * per_cpu areas are setup.
840*4882a593Smuzhiyun */
early_cpu_to_node(int cpu)841*4882a593Smuzhiyun int early_cpu_to_node(int cpu)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun if (early_per_cpu_ptr(x86_cpu_to_node_map))
844*4882a593Smuzhiyun return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (!cpu_possible(cpu)) {
847*4882a593Smuzhiyun printk(KERN_WARNING
848*4882a593Smuzhiyun "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
849*4882a593Smuzhiyun dump_stack();
850*4882a593Smuzhiyun return NUMA_NO_NODE;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun return per_cpu(x86_cpu_to_node_map, cpu);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
debug_cpumask_set_cpu(int cpu,int node,bool enable)855*4882a593Smuzhiyun void debug_cpumask_set_cpu(int cpu, int node, bool enable)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct cpumask *mask;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (node == NUMA_NO_NODE) {
860*4882a593Smuzhiyun /* early_cpu_to_node() already emits a warning and trace */
861*4882a593Smuzhiyun return;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun mask = node_to_cpumask_map[node];
864*4882a593Smuzhiyun if (!cpumask_available(mask)) {
865*4882a593Smuzhiyun pr_err("node_to_cpumask_map[%i] NULL\n", node);
866*4882a593Smuzhiyun dump_stack();
867*4882a593Smuzhiyun return;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (enable)
871*4882a593Smuzhiyun cpumask_set_cpu(cpu, mask);
872*4882a593Smuzhiyun else
873*4882a593Smuzhiyun cpumask_clear_cpu(cpu, mask);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
876*4882a593Smuzhiyun enable ? "numa_add_cpu" : "numa_remove_cpu",
877*4882a593Smuzhiyun cpu, node, cpumask_pr_args(mask));
878*4882a593Smuzhiyun return;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun # ifndef CONFIG_NUMA_EMU
numa_set_cpumask(int cpu,bool enable)882*4882a593Smuzhiyun static void numa_set_cpumask(int cpu, bool enable)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
numa_add_cpu(int cpu)887*4882a593Smuzhiyun void numa_add_cpu(int cpu)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun numa_set_cpumask(cpu, true);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
numa_remove_cpu(int cpu)892*4882a593Smuzhiyun void numa_remove_cpu(int cpu)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun numa_set_cpumask(cpu, false);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun # endif /* !CONFIG_NUMA_EMU */
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /*
899*4882a593Smuzhiyun * Returns a pointer to the bitmask of CPUs on Node 'node'.
900*4882a593Smuzhiyun */
cpumask_of_node(int node)901*4882a593Smuzhiyun const struct cpumask *cpumask_of_node(int node)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun if ((unsigned)node >= nr_node_ids) {
904*4882a593Smuzhiyun printk(KERN_WARNING
905*4882a593Smuzhiyun "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
906*4882a593Smuzhiyun node, nr_node_ids);
907*4882a593Smuzhiyun dump_stack();
908*4882a593Smuzhiyun return cpu_none_mask;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun if (!cpumask_available(node_to_cpumask_map[node])) {
911*4882a593Smuzhiyun printk(KERN_WARNING
912*4882a593Smuzhiyun "cpumask_of_node(%d): no node_to_cpumask_map!\n",
913*4882a593Smuzhiyun node);
914*4882a593Smuzhiyun dump_stack();
915*4882a593Smuzhiyun return cpu_online_mask;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun return node_to_cpumask_map[node];
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_of_node);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun #ifdef CONFIG_NUMA_KEEP_MEMINFO
meminfo_to_nid(struct numa_meminfo * mi,u64 start)924*4882a593Smuzhiyun static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun int i;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun for (i = 0; i < mi->nr_blks; i++)
929*4882a593Smuzhiyun if (mi->blk[i].start <= start && mi->blk[i].end > start)
930*4882a593Smuzhiyun return mi->blk[i].nid;
931*4882a593Smuzhiyun return NUMA_NO_NODE;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
phys_to_target_node(phys_addr_t start)934*4882a593Smuzhiyun int phys_to_target_node(phys_addr_t start)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun int nid = meminfo_to_nid(&numa_meminfo, start);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * Prefer online nodes, but if reserved memory might be
940*4882a593Smuzhiyun * hot-added continue the search with reserved ranges.
941*4882a593Smuzhiyun */
942*4882a593Smuzhiyun if (nid != NUMA_NO_NODE)
943*4882a593Smuzhiyun return nid;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun return meminfo_to_nid(&numa_reserved_meminfo, start);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(phys_to_target_node);
948*4882a593Smuzhiyun
memory_add_physaddr_to_nid(u64 start)949*4882a593Smuzhiyun int memory_add_physaddr_to_nid(u64 start)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun int nid = meminfo_to_nid(&numa_meminfo, start);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
954*4882a593Smuzhiyun nid = numa_meminfo.blk[0].nid;
955*4882a593Smuzhiyun return nid;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
958*4882a593Smuzhiyun #endif
959