1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * sparse memory mappings.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/mmzone.h>
8*4882a593Smuzhiyun #include <linux/memblock.h>
9*4882a593Smuzhiyun #include <linux/compiler.h>
10*4882a593Smuzhiyun #include <linux/highmem.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/vmalloc.h>
14*4882a593Smuzhiyun #include <linux/swap.h>
15*4882a593Smuzhiyun #include <linux/swapops.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "internal.h"
18*4882a593Smuzhiyun #include <asm/dma.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * Permanent SPARSEMEM data:
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * 1) mem_section - memory sections, mem_map's for valid memory
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_EXTREME
26*4882a593Smuzhiyun struct mem_section **mem_section;
27*4882a593Smuzhiyun #else
28*4882a593Smuzhiyun struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29*4882a593Smuzhiyun ____cacheline_internodealigned_in_smp;
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun EXPORT_SYMBOL(mem_section);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #ifdef NODE_NOT_IN_PAGE_FLAGS
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * If we did not store the node number in the page then we have to
36*4882a593Smuzhiyun * do a lookup in the section_to_node_table in order to find which
37*4882a593Smuzhiyun * node the page belongs to.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun #if MAX_NUMNODES <= 256
40*4882a593Smuzhiyun static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41*4882a593Smuzhiyun #else
42*4882a593Smuzhiyun static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun
page_to_nid(const struct page * page)45*4882a593Smuzhiyun int page_to_nid(const struct page *page)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun return section_to_node_table[page_to_section(page)];
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun EXPORT_SYMBOL(page_to_nid);
50*4882a593Smuzhiyun
set_section_nid(unsigned long section_nr,int nid)51*4882a593Smuzhiyun static void set_section_nid(unsigned long section_nr, int nid)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun section_to_node_table[section_nr] = nid;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun #else /* !NODE_NOT_IN_PAGE_FLAGS */
set_section_nid(unsigned long section_nr,int nid)56*4882a593Smuzhiyun static inline void set_section_nid(unsigned long section_nr, int nid)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_EXTREME
sparse_index_alloc(int nid)62*4882a593Smuzhiyun static noinline struct mem_section __ref *sparse_index_alloc(int nid)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct mem_section *section = NULL;
65*4882a593Smuzhiyun unsigned long array_size = SECTIONS_PER_ROOT *
66*4882a593Smuzhiyun sizeof(struct mem_section);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (slab_is_available()) {
69*4882a593Smuzhiyun section = kzalloc_node(array_size, GFP_KERNEL, nid);
70*4882a593Smuzhiyun } else {
71*4882a593Smuzhiyun section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72*4882a593Smuzhiyun nid);
73*4882a593Smuzhiyun if (!section)
74*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes nid=%d\n",
75*4882a593Smuzhiyun __func__, array_size, nid);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return section;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
sparse_index_init(unsigned long section_nr,int nid)81*4882a593Smuzhiyun static int __meminit sparse_index_init(unsigned long section_nr, int nid)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun unsigned long root = SECTION_NR_TO_ROOT(section_nr);
84*4882a593Smuzhiyun struct mem_section *section;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * An existing section is possible in the sub-section hotplug
88*4882a593Smuzhiyun * case. First hot-add instantiates, follow-on hot-add reuses
89*4882a593Smuzhiyun * the existing section.
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * The mem_hotplug_lock resolves the apparent race below.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun if (mem_section[root])
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun section = sparse_index_alloc(nid);
97*4882a593Smuzhiyun if (!section)
98*4882a593Smuzhiyun return -ENOMEM;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun mem_section[root] = section;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #else /* !SPARSEMEM_EXTREME */
sparse_index_init(unsigned long section_nr,int nid)105*4882a593Smuzhiyun static inline int sparse_index_init(unsigned long section_nr, int nid)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_EXTREME
__section_nr(struct mem_section * ms)112*4882a593Smuzhiyun unsigned long __section_nr(struct mem_section *ms)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun unsigned long root_nr;
115*4882a593Smuzhiyun struct mem_section *root = NULL;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
118*4882a593Smuzhiyun root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
119*4882a593Smuzhiyun if (!root)
120*4882a593Smuzhiyun continue;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
123*4882a593Smuzhiyun break;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun VM_BUG_ON(!root);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun #else
__section_nr(struct mem_section * ms)131*4882a593Smuzhiyun unsigned long __section_nr(struct mem_section *ms)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun return (unsigned long)(ms - mem_section[0]);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun #endif
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * During early boot, before section_mem_map is used for an actual
139*4882a593Smuzhiyun * mem_map, we use section_mem_map to store the section's NUMA
140*4882a593Smuzhiyun * node. This keeps us from having to use another data structure. The
141*4882a593Smuzhiyun * node information is cleared just before we store the real mem_map.
142*4882a593Smuzhiyun */
sparse_encode_early_nid(int nid)143*4882a593Smuzhiyun static inline unsigned long sparse_encode_early_nid(int nid)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return (nid << SECTION_NID_SHIFT);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
sparse_early_nid(struct mem_section * section)148*4882a593Smuzhiyun static inline int sparse_early_nid(struct mem_section *section)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun return (section->section_mem_map >> SECTION_NID_SHIFT);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Validate the physical addressing limitations of the model */
mminit_validate_memmodel_limits(unsigned long * start_pfn,unsigned long * end_pfn)154*4882a593Smuzhiyun void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155*4882a593Smuzhiyun unsigned long *end_pfn)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * Sanity checks - do not allow an architecture to pass
161*4882a593Smuzhiyun * in larger pfns than the maximum scope of sparsemem:
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun if (*start_pfn > max_sparsemem_pfn) {
164*4882a593Smuzhiyun mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165*4882a593Smuzhiyun "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166*4882a593Smuzhiyun *start_pfn, *end_pfn, max_sparsemem_pfn);
167*4882a593Smuzhiyun WARN_ON_ONCE(1);
168*4882a593Smuzhiyun *start_pfn = max_sparsemem_pfn;
169*4882a593Smuzhiyun *end_pfn = max_sparsemem_pfn;
170*4882a593Smuzhiyun } else if (*end_pfn > max_sparsemem_pfn) {
171*4882a593Smuzhiyun mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172*4882a593Smuzhiyun "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173*4882a593Smuzhiyun *start_pfn, *end_pfn, max_sparsemem_pfn);
174*4882a593Smuzhiyun WARN_ON_ONCE(1);
175*4882a593Smuzhiyun *end_pfn = max_sparsemem_pfn;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * There are a number of times that we loop over NR_MEM_SECTIONS,
181*4882a593Smuzhiyun * looking for section_present() on each. But, when we have very
182*4882a593Smuzhiyun * large physical address spaces, NR_MEM_SECTIONS can also be
183*4882a593Smuzhiyun * very large which makes the loops quite long.
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Keeping track of this gives us an easy way to break out of
186*4882a593Smuzhiyun * those loops early.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun unsigned long __highest_present_section_nr;
section_mark_present(struct mem_section * ms)189*4882a593Smuzhiyun static void section_mark_present(struct mem_section *ms)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun unsigned long section_nr = __section_nr(ms);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (section_nr > __highest_present_section_nr)
194*4882a593Smuzhiyun __highest_present_section_nr = section_nr;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun ms->section_mem_map |= SECTION_MARKED_PRESENT;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun #define for_each_present_section_nr(start, section_nr) \
200*4882a593Smuzhiyun for (section_nr = next_present_section_nr(start-1); \
201*4882a593Smuzhiyun ((section_nr != -1) && \
202*4882a593Smuzhiyun (section_nr <= __highest_present_section_nr)); \
203*4882a593Smuzhiyun section_nr = next_present_section_nr(section_nr))
204*4882a593Smuzhiyun
first_present_section_nr(void)205*4882a593Smuzhiyun static inline unsigned long first_present_section_nr(void)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return next_present_section_nr(-1);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_VMEMMAP
subsection_mask_set(unsigned long * map,unsigned long pfn,unsigned long nr_pages)211*4882a593Smuzhiyun static void subsection_mask_set(unsigned long *map, unsigned long pfn,
212*4882a593Smuzhiyun unsigned long nr_pages)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun int idx = subsection_map_index(pfn);
215*4882a593Smuzhiyun int end = subsection_map_index(pfn + nr_pages - 1);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun bitmap_set(map, idx, end - idx + 1);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
subsection_map_init(unsigned long pfn,unsigned long nr_pages)220*4882a593Smuzhiyun void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
223*4882a593Smuzhiyun unsigned long nr, start_sec = pfn_to_section_nr(pfn);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (!nr_pages)
226*4882a593Smuzhiyun return;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun for (nr = start_sec; nr <= end_sec; nr++) {
229*4882a593Smuzhiyun struct mem_section *ms;
230*4882a593Smuzhiyun unsigned long pfns;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun pfns = min(nr_pages, PAGES_PER_SECTION
233*4882a593Smuzhiyun - (pfn & ~PAGE_SECTION_MASK));
234*4882a593Smuzhiyun ms = __nr_to_section(nr);
235*4882a593Smuzhiyun subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
238*4882a593Smuzhiyun pfns, subsection_map_index(pfn),
239*4882a593Smuzhiyun subsection_map_index(pfn + pfns - 1));
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun pfn += pfns;
242*4882a593Smuzhiyun nr_pages -= pfns;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun #else
subsection_map_init(unsigned long pfn,unsigned long nr_pages)246*4882a593Smuzhiyun void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun #endif
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Record a memory area against a node. */
memory_present(int nid,unsigned long start,unsigned long end)252*4882a593Smuzhiyun static void __init memory_present(int nid, unsigned long start, unsigned long end)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun unsigned long pfn;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_EXTREME
257*4882a593Smuzhiyun if (unlikely(!mem_section)) {
258*4882a593Smuzhiyun unsigned long size, align;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
261*4882a593Smuzhiyun align = 1 << (INTERNODE_CACHE_SHIFT);
262*4882a593Smuzhiyun mem_section = memblock_alloc(size, align);
263*4882a593Smuzhiyun if (!mem_section)
264*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
265*4882a593Smuzhiyun __func__, size, align);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun start &= PAGE_SECTION_MASK;
270*4882a593Smuzhiyun mminit_validate_memmodel_limits(&start, &end);
271*4882a593Smuzhiyun for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
272*4882a593Smuzhiyun unsigned long section = pfn_to_section_nr(pfn);
273*4882a593Smuzhiyun struct mem_section *ms;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun sparse_index_init(section, nid);
276*4882a593Smuzhiyun set_section_nid(section, nid);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun ms = __nr_to_section(section);
279*4882a593Smuzhiyun if (!ms->section_mem_map) {
280*4882a593Smuzhiyun ms->section_mem_map = sparse_encode_early_nid(nid) |
281*4882a593Smuzhiyun SECTION_IS_ONLINE;
282*4882a593Smuzhiyun section_mark_present(ms);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * Mark all memblocks as present using memory_present().
289*4882a593Smuzhiyun * This is a convenience function that is useful to mark all of the systems
290*4882a593Smuzhiyun * memory as present during initialization.
291*4882a593Smuzhiyun */
memblocks_present(void)292*4882a593Smuzhiyun static void __init memblocks_present(void)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun unsigned long start, end;
295*4882a593Smuzhiyun int i, nid;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
298*4882a593Smuzhiyun memory_present(nid, start, end);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * Subtle, we encode the real pfn into the mem_map such that
303*4882a593Smuzhiyun * the identity pfn - section_mem_map will return the actual
304*4882a593Smuzhiyun * physical page frame number.
305*4882a593Smuzhiyun */
sparse_encode_mem_map(struct page * mem_map,unsigned long pnum)306*4882a593Smuzhiyun static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun unsigned long coded_mem_map =
309*4882a593Smuzhiyun (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
310*4882a593Smuzhiyun BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
311*4882a593Smuzhiyun BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
312*4882a593Smuzhiyun return coded_mem_map;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * Decode mem_map from the coded memmap
318*4882a593Smuzhiyun */
sparse_decode_mem_map(unsigned long coded_mem_map,unsigned long pnum)319*4882a593Smuzhiyun struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun /* mask off the extra low bits of information */
322*4882a593Smuzhiyun coded_mem_map &= SECTION_MAP_MASK;
323*4882a593Smuzhiyun return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTPLUG */
326*4882a593Smuzhiyun
sparse_init_one_section(struct mem_section * ms,unsigned long pnum,struct page * mem_map,struct mem_section_usage * usage,unsigned long flags)327*4882a593Smuzhiyun static void __meminit sparse_init_one_section(struct mem_section *ms,
328*4882a593Smuzhiyun unsigned long pnum, struct page *mem_map,
329*4882a593Smuzhiyun struct mem_section_usage *usage, unsigned long flags)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun ms->section_mem_map &= ~SECTION_MAP_MASK;
332*4882a593Smuzhiyun ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
333*4882a593Smuzhiyun | SECTION_HAS_MEM_MAP | flags;
334*4882a593Smuzhiyun ms->usage = usage;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
usemap_size(void)337*4882a593Smuzhiyun static unsigned long usemap_size(void)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
mem_section_usage_size(void)342*4882a593Smuzhiyun size_t mem_section_usage_size(void)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun return sizeof(struct mem_section_usage) + usemap_size();
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTREMOVE
348*4882a593Smuzhiyun static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data * pgdat,unsigned long size)349*4882a593Smuzhiyun sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
350*4882a593Smuzhiyun unsigned long size)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct mem_section_usage *usage;
353*4882a593Smuzhiyun unsigned long goal, limit;
354*4882a593Smuzhiyun int nid;
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * A page may contain usemaps for other sections preventing the
357*4882a593Smuzhiyun * page being freed and making a section unremovable while
358*4882a593Smuzhiyun * other sections referencing the usemap remain active. Similarly,
359*4882a593Smuzhiyun * a pgdat can prevent a section being removed. If section A
360*4882a593Smuzhiyun * contains a pgdat and section B contains the usemap, both
361*4882a593Smuzhiyun * sections become inter-dependent. This allocates usemaps
362*4882a593Smuzhiyun * from the same section as the pgdat where possible to avoid
363*4882a593Smuzhiyun * this problem.
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
366*4882a593Smuzhiyun limit = goal + (1UL << PA_SECTION_SHIFT);
367*4882a593Smuzhiyun nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
368*4882a593Smuzhiyun again:
369*4882a593Smuzhiyun usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
370*4882a593Smuzhiyun if (!usage && limit) {
371*4882a593Smuzhiyun limit = 0;
372*4882a593Smuzhiyun goto again;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun return usage;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
check_usemap_section_nr(int nid,struct mem_section_usage * usage)377*4882a593Smuzhiyun static void __init check_usemap_section_nr(int nid,
378*4882a593Smuzhiyun struct mem_section_usage *usage)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun unsigned long usemap_snr, pgdat_snr;
381*4882a593Smuzhiyun static unsigned long old_usemap_snr;
382*4882a593Smuzhiyun static unsigned long old_pgdat_snr;
383*4882a593Smuzhiyun struct pglist_data *pgdat = NODE_DATA(nid);
384*4882a593Smuzhiyun int usemap_nid;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* First call */
387*4882a593Smuzhiyun if (!old_usemap_snr) {
388*4882a593Smuzhiyun old_usemap_snr = NR_MEM_SECTIONS;
389*4882a593Smuzhiyun old_pgdat_snr = NR_MEM_SECTIONS;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
393*4882a593Smuzhiyun pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
394*4882a593Smuzhiyun if (usemap_snr == pgdat_snr)
395*4882a593Smuzhiyun return;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
398*4882a593Smuzhiyun /* skip redundant message */
399*4882a593Smuzhiyun return;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun old_usemap_snr = usemap_snr;
402*4882a593Smuzhiyun old_pgdat_snr = pgdat_snr;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
405*4882a593Smuzhiyun if (usemap_nid != nid) {
406*4882a593Smuzhiyun pr_info("node %d must be removed before remove section %ld\n",
407*4882a593Smuzhiyun nid, usemap_snr);
408*4882a593Smuzhiyun return;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun * There is a circular dependency.
412*4882a593Smuzhiyun * Some platforms allow un-removable section because they will just
413*4882a593Smuzhiyun * gather other removable sections for dynamic partitioning.
414*4882a593Smuzhiyun * Just notify un-removable section's number here.
415*4882a593Smuzhiyun */
416*4882a593Smuzhiyun pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
417*4882a593Smuzhiyun usemap_snr, pgdat_snr, nid);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun #else
420*4882a593Smuzhiyun static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data * pgdat,unsigned long size)421*4882a593Smuzhiyun sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
422*4882a593Smuzhiyun unsigned long size)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
check_usemap_section_nr(int nid,struct mem_section_usage * usage)427*4882a593Smuzhiyun static void __init check_usemap_section_nr(int nid,
428*4882a593Smuzhiyun struct mem_section_usage *usage)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTREMOVE */
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_VMEMMAP
section_map_size(void)434*4882a593Smuzhiyun static unsigned long __init section_map_size(void)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun #else
section_map_size(void)440*4882a593Smuzhiyun static unsigned long __init section_map_size(void)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
__populate_section_memmap(unsigned long pfn,unsigned long nr_pages,int nid,struct vmem_altmap * altmap)445*4882a593Smuzhiyun struct page __init *__populate_section_memmap(unsigned long pfn,
446*4882a593Smuzhiyun unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun unsigned long size = section_map_size();
449*4882a593Smuzhiyun struct page *map = sparse_buffer_alloc(size);
450*4882a593Smuzhiyun phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (map)
453*4882a593Smuzhiyun return map;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun map = memblock_alloc_try_nid_raw(size, size, addr,
456*4882a593Smuzhiyun MEMBLOCK_ALLOC_ACCESSIBLE, nid);
457*4882a593Smuzhiyun if (!map)
458*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
459*4882a593Smuzhiyun __func__, size, PAGE_SIZE, nid, &addr);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun return map;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun static void *sparsemap_buf __meminitdata;
466*4882a593Smuzhiyun static void *sparsemap_buf_end __meminitdata;
467*4882a593Smuzhiyun
sparse_buffer_free(unsigned long size)468*4882a593Smuzhiyun static inline void __meminit sparse_buffer_free(unsigned long size)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun WARN_ON(!sparsemap_buf || size == 0);
471*4882a593Smuzhiyun memblock_free_early(__pa(sparsemap_buf), size);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
sparse_buffer_init(unsigned long size,int nid)474*4882a593Smuzhiyun static void __init sparse_buffer_init(unsigned long size, int nid)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
477*4882a593Smuzhiyun WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * Pre-allocated buffer is mainly used by __populate_section_memmap
480*4882a593Smuzhiyun * and we want it to be properly aligned to the section size - this is
481*4882a593Smuzhiyun * especially the case for VMEMMAP which maps memmap to PMDs
482*4882a593Smuzhiyun */
483*4882a593Smuzhiyun sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
484*4882a593Smuzhiyun addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
485*4882a593Smuzhiyun sparsemap_buf_end = sparsemap_buf + size;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
sparse_buffer_fini(void)488*4882a593Smuzhiyun static void __init sparse_buffer_fini(void)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun unsigned long size = sparsemap_buf_end - sparsemap_buf;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (sparsemap_buf && size > 0)
493*4882a593Smuzhiyun sparse_buffer_free(size);
494*4882a593Smuzhiyun sparsemap_buf = NULL;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
sparse_buffer_alloc(unsigned long size)497*4882a593Smuzhiyun void * __meminit sparse_buffer_alloc(unsigned long size)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun void *ptr = NULL;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (sparsemap_buf) {
502*4882a593Smuzhiyun ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
503*4882a593Smuzhiyun if (ptr + size > sparsemap_buf_end)
504*4882a593Smuzhiyun ptr = NULL;
505*4882a593Smuzhiyun else {
506*4882a593Smuzhiyun /* Free redundant aligned space */
507*4882a593Smuzhiyun if ((unsigned long)(ptr - sparsemap_buf) > 0)
508*4882a593Smuzhiyun sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
509*4882a593Smuzhiyun sparsemap_buf = ptr + size;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun return ptr;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
vmemmap_populate_print_last(void)515*4882a593Smuzhiyun void __weak __meminit vmemmap_populate_print_last(void)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
521*4882a593Smuzhiyun * And number of present sections in this node is map_count.
522*4882a593Smuzhiyun */
sparse_init_nid(int nid,unsigned long pnum_begin,unsigned long pnum_end,unsigned long map_count)523*4882a593Smuzhiyun static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
524*4882a593Smuzhiyun unsigned long pnum_end,
525*4882a593Smuzhiyun unsigned long map_count)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun struct mem_section_usage *usage;
528*4882a593Smuzhiyun unsigned long pnum;
529*4882a593Smuzhiyun struct page *map;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
532*4882a593Smuzhiyun mem_section_usage_size() * map_count);
533*4882a593Smuzhiyun if (!usage) {
534*4882a593Smuzhiyun pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
535*4882a593Smuzhiyun goto failed;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun sparse_buffer_init(map_count * section_map_size(), nid);
538*4882a593Smuzhiyun for_each_present_section_nr(pnum_begin, pnum) {
539*4882a593Smuzhiyun unsigned long pfn = section_nr_to_pfn(pnum);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (pnum >= pnum_end)
542*4882a593Smuzhiyun break;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
545*4882a593Smuzhiyun nid, NULL);
546*4882a593Smuzhiyun if (!map) {
547*4882a593Smuzhiyun pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
548*4882a593Smuzhiyun __func__, nid);
549*4882a593Smuzhiyun pnum_begin = pnum;
550*4882a593Smuzhiyun sparse_buffer_fini();
551*4882a593Smuzhiyun goto failed;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun check_usemap_section_nr(nid, usage);
554*4882a593Smuzhiyun sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
555*4882a593Smuzhiyun SECTION_IS_EARLY);
556*4882a593Smuzhiyun usage = (void *) usage + mem_section_usage_size();
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun sparse_buffer_fini();
559*4882a593Smuzhiyun return;
560*4882a593Smuzhiyun failed:
561*4882a593Smuzhiyun /* We failed to allocate, mark all the following pnums as not present */
562*4882a593Smuzhiyun for_each_present_section_nr(pnum_begin, pnum) {
563*4882a593Smuzhiyun struct mem_section *ms;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (pnum >= pnum_end)
566*4882a593Smuzhiyun break;
567*4882a593Smuzhiyun ms = __nr_to_section(pnum);
568*4882a593Smuzhiyun ms->section_mem_map = 0;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun * Allocate the accumulated non-linear sections, allocate a mem_map
574*4882a593Smuzhiyun * for each and record the physical to section mapping.
575*4882a593Smuzhiyun */
sparse_init(void)576*4882a593Smuzhiyun void __init sparse_init(void)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun unsigned long pnum_end, pnum_begin, map_count = 1;
579*4882a593Smuzhiyun int nid_begin;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun memblocks_present();
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun pnum_begin = first_present_section_nr();
584*4882a593Smuzhiyun nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
587*4882a593Smuzhiyun set_pageblock_order();
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun for_each_present_section_nr(pnum_begin + 1, pnum_end) {
590*4882a593Smuzhiyun int nid = sparse_early_nid(__nr_to_section(pnum_end));
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (nid == nid_begin) {
593*4882a593Smuzhiyun map_count++;
594*4882a593Smuzhiyun continue;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun /* Init node with sections in range [pnum_begin, pnum_end) */
597*4882a593Smuzhiyun sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
598*4882a593Smuzhiyun nid_begin = nid;
599*4882a593Smuzhiyun pnum_begin = pnum_end;
600*4882a593Smuzhiyun map_count = 1;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun /* cover the last node */
603*4882a593Smuzhiyun sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
604*4882a593Smuzhiyun vmemmap_populate_print_last();
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Mark all memory sections within the pfn range as online */
online_mem_sections(unsigned long start_pfn,unsigned long end_pfn)610*4882a593Smuzhiyun void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun unsigned long pfn;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
615*4882a593Smuzhiyun unsigned long section_nr = pfn_to_section_nr(pfn);
616*4882a593Smuzhiyun struct mem_section *ms;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* onlining code should never touch invalid ranges */
619*4882a593Smuzhiyun if (WARN_ON(!valid_section_nr(section_nr)))
620*4882a593Smuzhiyun continue;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun ms = __nr_to_section(section_nr);
623*4882a593Smuzhiyun ms->section_mem_map |= SECTION_IS_ONLINE;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTREMOVE
628*4882a593Smuzhiyun /* Mark all memory sections within the pfn range as offline */
offline_mem_sections(unsigned long start_pfn,unsigned long end_pfn)629*4882a593Smuzhiyun void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun unsigned long pfn;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
634*4882a593Smuzhiyun unsigned long section_nr = pfn_to_section_nr(pfn);
635*4882a593Smuzhiyun struct mem_section *ms;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * TODO this needs some double checking. Offlining code makes
639*4882a593Smuzhiyun * sure to check pfn_valid but those checks might be just bogus
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun if (WARN_ON(!valid_section_nr(section_nr)))
642*4882a593Smuzhiyun continue;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun ms = __nr_to_section(section_nr);
645*4882a593Smuzhiyun ms->section_mem_map &= ~SECTION_IS_ONLINE;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun #endif
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_VMEMMAP
populate_section_memmap(unsigned long pfn,unsigned long nr_pages,int nid,struct vmem_altmap * altmap)651*4882a593Smuzhiyun static struct page * __meminit populate_section_memmap(unsigned long pfn,
652*4882a593Smuzhiyun unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun return __populate_section_memmap(pfn, nr_pages, nid, altmap);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
depopulate_section_memmap(unsigned long pfn,unsigned long nr_pages,struct vmem_altmap * altmap)657*4882a593Smuzhiyun static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
658*4882a593Smuzhiyun struct vmem_altmap *altmap)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun unsigned long start = (unsigned long) pfn_to_page(pfn);
661*4882a593Smuzhiyun unsigned long end = start + nr_pages * sizeof(struct page);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun vmemmap_free(start, end, altmap);
664*4882a593Smuzhiyun }
free_map_bootmem(struct page * memmap)665*4882a593Smuzhiyun static void free_map_bootmem(struct page *memmap)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun unsigned long start = (unsigned long)memmap;
668*4882a593Smuzhiyun unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun vmemmap_free(start, end, NULL);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
clear_subsection_map(unsigned long pfn,unsigned long nr_pages)673*4882a593Smuzhiyun static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
676*4882a593Smuzhiyun DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
677*4882a593Smuzhiyun struct mem_section *ms = __pfn_to_section(pfn);
678*4882a593Smuzhiyun unsigned long *subsection_map = ms->usage
679*4882a593Smuzhiyun ? &ms->usage->subsection_map[0] : NULL;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun subsection_mask_set(map, pfn, nr_pages);
682*4882a593Smuzhiyun if (subsection_map)
683*4882a593Smuzhiyun bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
686*4882a593Smuzhiyun "section already deactivated (%#lx + %ld)\n",
687*4882a593Smuzhiyun pfn, nr_pages))
688*4882a593Smuzhiyun return -EINVAL;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
691*4882a593Smuzhiyun return 0;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
is_subsection_map_empty(struct mem_section * ms)694*4882a593Smuzhiyun static bool is_subsection_map_empty(struct mem_section *ms)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun return bitmap_empty(&ms->usage->subsection_map[0],
697*4882a593Smuzhiyun SUBSECTIONS_PER_SECTION);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
fill_subsection_map(unsigned long pfn,unsigned long nr_pages)700*4882a593Smuzhiyun static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun struct mem_section *ms = __pfn_to_section(pfn);
703*4882a593Smuzhiyun DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
704*4882a593Smuzhiyun unsigned long *subsection_map;
705*4882a593Smuzhiyun int rc = 0;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun subsection_mask_set(map, pfn, nr_pages);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun subsection_map = &ms->usage->subsection_map[0];
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
712*4882a593Smuzhiyun rc = -EINVAL;
713*4882a593Smuzhiyun else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
714*4882a593Smuzhiyun rc = -EEXIST;
715*4882a593Smuzhiyun else
716*4882a593Smuzhiyun bitmap_or(subsection_map, map, subsection_map,
717*4882a593Smuzhiyun SUBSECTIONS_PER_SECTION);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun return rc;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun #else
populate_section_memmap(unsigned long pfn,unsigned long nr_pages,int nid,struct vmem_altmap * altmap)722*4882a593Smuzhiyun struct page * __meminit populate_section_memmap(unsigned long pfn,
723*4882a593Smuzhiyun unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun return kvmalloc_node(array_size(sizeof(struct page),
726*4882a593Smuzhiyun PAGES_PER_SECTION), GFP_KERNEL, nid);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
depopulate_section_memmap(unsigned long pfn,unsigned long nr_pages,struct vmem_altmap * altmap)729*4882a593Smuzhiyun static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
730*4882a593Smuzhiyun struct vmem_altmap *altmap)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun kvfree(pfn_to_page(pfn));
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
free_map_bootmem(struct page * memmap)735*4882a593Smuzhiyun static void free_map_bootmem(struct page *memmap)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun unsigned long maps_section_nr, removing_section_nr, i;
738*4882a593Smuzhiyun unsigned long magic, nr_pages;
739*4882a593Smuzhiyun struct page *page = virt_to_page(memmap);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
742*4882a593Smuzhiyun >> PAGE_SHIFT;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++, page++) {
745*4882a593Smuzhiyun magic = (unsigned long) page->freelist;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun BUG_ON(magic == NODE_INFO);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
750*4882a593Smuzhiyun removing_section_nr = page_private(page);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /*
753*4882a593Smuzhiyun * When this function is called, the removing section is
754*4882a593Smuzhiyun * logical offlined state. This means all pages are isolated
755*4882a593Smuzhiyun * from page allocator. If removing section's memmap is placed
756*4882a593Smuzhiyun * on the same section, it must not be freed.
757*4882a593Smuzhiyun * If it is freed, page allocator may allocate it which will
758*4882a593Smuzhiyun * be removed physically soon.
759*4882a593Smuzhiyun */
760*4882a593Smuzhiyun if (maps_section_nr != removing_section_nr)
761*4882a593Smuzhiyun put_page_bootmem(page);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
clear_subsection_map(unsigned long pfn,unsigned long nr_pages)765*4882a593Smuzhiyun static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun return 0;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
is_subsection_map_empty(struct mem_section * ms)770*4882a593Smuzhiyun static bool is_subsection_map_empty(struct mem_section *ms)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun return true;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
fill_subsection_map(unsigned long pfn,unsigned long nr_pages)775*4882a593Smuzhiyun static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun #endif /* CONFIG_SPARSEMEM_VMEMMAP */
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun * To deactivate a memory region, there are 3 cases to handle across
783*4882a593Smuzhiyun * two configurations (SPARSEMEM_VMEMMAP={y,n}):
784*4882a593Smuzhiyun *
785*4882a593Smuzhiyun * 1. deactivation of a partial hot-added section (only possible in
786*4882a593Smuzhiyun * the SPARSEMEM_VMEMMAP=y case).
787*4882a593Smuzhiyun * a) section was present at memory init.
788*4882a593Smuzhiyun * b) section was hot-added post memory init.
789*4882a593Smuzhiyun * 2. deactivation of a complete hot-added section.
790*4882a593Smuzhiyun * 3. deactivation of a complete section from memory init.
791*4882a593Smuzhiyun *
792*4882a593Smuzhiyun * For 1, when subsection_map does not empty we will not be freeing the
793*4882a593Smuzhiyun * usage map, but still need to free the vmemmap range.
794*4882a593Smuzhiyun *
795*4882a593Smuzhiyun * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
796*4882a593Smuzhiyun */
section_deactivate(unsigned long pfn,unsigned long nr_pages,struct vmem_altmap * altmap)797*4882a593Smuzhiyun static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
798*4882a593Smuzhiyun struct vmem_altmap *altmap)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct mem_section *ms = __pfn_to_section(pfn);
801*4882a593Smuzhiyun bool section_is_early = early_section(ms);
802*4882a593Smuzhiyun struct page *memmap = NULL;
803*4882a593Smuzhiyun bool empty;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (clear_subsection_map(pfn, nr_pages))
806*4882a593Smuzhiyun return;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun empty = is_subsection_map_empty(ms);
809*4882a593Smuzhiyun if (empty) {
810*4882a593Smuzhiyun unsigned long section_nr = pfn_to_section_nr(pfn);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /*
813*4882a593Smuzhiyun * When removing an early section, the usage map is kept (as the
814*4882a593Smuzhiyun * usage maps of other sections fall into the same page). It
815*4882a593Smuzhiyun * will be re-used when re-adding the section - which is then no
816*4882a593Smuzhiyun * longer an early section. If the usage map is PageReserved, it
817*4882a593Smuzhiyun * was allocated during boot.
818*4882a593Smuzhiyun */
819*4882a593Smuzhiyun if (!PageReserved(virt_to_page(ms->usage))) {
820*4882a593Smuzhiyun kfree(ms->usage);
821*4882a593Smuzhiyun ms->usage = NULL;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
824*4882a593Smuzhiyun /*
825*4882a593Smuzhiyun * Mark the section invalid so that valid_section()
826*4882a593Smuzhiyun * return false. This prevents code from dereferencing
827*4882a593Smuzhiyun * ms->usage array.
828*4882a593Smuzhiyun */
829*4882a593Smuzhiyun ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun * The memmap of early sections is always fully populated. See
834*4882a593Smuzhiyun * section_activate() and pfn_valid() .
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun if (!section_is_early)
837*4882a593Smuzhiyun depopulate_section_memmap(pfn, nr_pages, altmap);
838*4882a593Smuzhiyun else if (memmap)
839*4882a593Smuzhiyun free_map_bootmem(memmap);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun if (empty)
842*4882a593Smuzhiyun ms->section_mem_map = (unsigned long)NULL;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
section_activate(int nid,unsigned long pfn,unsigned long nr_pages,struct vmem_altmap * altmap)845*4882a593Smuzhiyun static struct page * __meminit section_activate(int nid, unsigned long pfn,
846*4882a593Smuzhiyun unsigned long nr_pages, struct vmem_altmap *altmap)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct mem_section *ms = __pfn_to_section(pfn);
849*4882a593Smuzhiyun struct mem_section_usage *usage = NULL;
850*4882a593Smuzhiyun struct page *memmap;
851*4882a593Smuzhiyun int rc = 0;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun if (!ms->usage) {
854*4882a593Smuzhiyun usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
855*4882a593Smuzhiyun if (!usage)
856*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
857*4882a593Smuzhiyun ms->usage = usage;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun rc = fill_subsection_map(pfn, nr_pages);
861*4882a593Smuzhiyun if (rc) {
862*4882a593Smuzhiyun if (usage)
863*4882a593Smuzhiyun ms->usage = NULL;
864*4882a593Smuzhiyun kfree(usage);
865*4882a593Smuzhiyun return ERR_PTR(rc);
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * The early init code does not consider partially populated
870*4882a593Smuzhiyun * initial sections, it simply assumes that memory will never be
871*4882a593Smuzhiyun * referenced. If we hot-add memory into such a section then we
872*4882a593Smuzhiyun * do not need to populate the memmap and can simply reuse what
873*4882a593Smuzhiyun * is already there.
874*4882a593Smuzhiyun */
875*4882a593Smuzhiyun if (nr_pages < PAGES_PER_SECTION && early_section(ms))
876*4882a593Smuzhiyun return pfn_to_page(pfn);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
879*4882a593Smuzhiyun if (!memmap) {
880*4882a593Smuzhiyun section_deactivate(pfn, nr_pages, altmap);
881*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun return memmap;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /**
888*4882a593Smuzhiyun * sparse_add_section - add a memory section, or populate an existing one
889*4882a593Smuzhiyun * @nid: The node to add section on
890*4882a593Smuzhiyun * @start_pfn: start pfn of the memory range
891*4882a593Smuzhiyun * @nr_pages: number of pfns to add in the section
892*4882a593Smuzhiyun * @altmap: device page map
893*4882a593Smuzhiyun *
894*4882a593Smuzhiyun * This is only intended for hotplug.
895*4882a593Smuzhiyun *
896*4882a593Smuzhiyun * Note that only VMEMMAP supports sub-section aligned hotplug,
897*4882a593Smuzhiyun * the proper alignment and size are gated by check_pfn_span().
898*4882a593Smuzhiyun *
899*4882a593Smuzhiyun *
900*4882a593Smuzhiyun * Return:
901*4882a593Smuzhiyun * * 0 - On success.
902*4882a593Smuzhiyun * * -EEXIST - Section has been present.
903*4882a593Smuzhiyun * * -ENOMEM - Out of memory.
904*4882a593Smuzhiyun */
sparse_add_section(int nid,unsigned long start_pfn,unsigned long nr_pages,struct vmem_altmap * altmap)905*4882a593Smuzhiyun int __meminit sparse_add_section(int nid, unsigned long start_pfn,
906*4882a593Smuzhiyun unsigned long nr_pages, struct vmem_altmap *altmap)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun unsigned long section_nr = pfn_to_section_nr(start_pfn);
909*4882a593Smuzhiyun struct mem_section *ms;
910*4882a593Smuzhiyun struct page *memmap;
911*4882a593Smuzhiyun int ret;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun ret = sparse_index_init(section_nr, nid);
914*4882a593Smuzhiyun if (ret < 0)
915*4882a593Smuzhiyun return ret;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun memmap = section_activate(nid, start_pfn, nr_pages, altmap);
918*4882a593Smuzhiyun if (IS_ERR(memmap))
919*4882a593Smuzhiyun return PTR_ERR(memmap);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /*
922*4882a593Smuzhiyun * Poison uninitialized struct pages in order to catch invalid flags
923*4882a593Smuzhiyun * combinations.
924*4882a593Smuzhiyun */
925*4882a593Smuzhiyun page_init_poison(memmap, sizeof(struct page) * nr_pages);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun ms = __nr_to_section(section_nr);
928*4882a593Smuzhiyun set_section_nid(section_nr, nid);
929*4882a593Smuzhiyun section_mark_present(ms);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun /* Align memmap to section boundary in the subsection case */
932*4882a593Smuzhiyun if (section_nr_to_pfn(section_nr) != start_pfn)
933*4882a593Smuzhiyun memmap = pfn_to_page(section_nr_to_pfn(section_nr));
934*4882a593Smuzhiyun sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun return 0;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
clear_hwpoisoned_pages(struct page * memmap,int nr_pages)940*4882a593Smuzhiyun static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun int i;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /*
945*4882a593Smuzhiyun * A further optimization is to have per section refcounted
946*4882a593Smuzhiyun * num_poisoned_pages. But that would need more space per memmap, so
947*4882a593Smuzhiyun * for now just do a quick global check to speed up this routine in the
948*4882a593Smuzhiyun * absence of bad pages.
949*4882a593Smuzhiyun */
950*4882a593Smuzhiyun if (atomic_long_read(&num_poisoned_pages) == 0)
951*4882a593Smuzhiyun return;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
954*4882a593Smuzhiyun if (PageHWPoison(&memmap[i])) {
955*4882a593Smuzhiyun num_poisoned_pages_dec();
956*4882a593Smuzhiyun ClearPageHWPoison(&memmap[i]);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun #else
clear_hwpoisoned_pages(struct page * memmap,int nr_pages)961*4882a593Smuzhiyun static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun #endif
965*4882a593Smuzhiyun
sparse_remove_section(struct mem_section * ms,unsigned long pfn,unsigned long nr_pages,unsigned long map_offset,struct vmem_altmap * altmap)966*4882a593Smuzhiyun void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
967*4882a593Smuzhiyun unsigned long nr_pages, unsigned long map_offset,
968*4882a593Smuzhiyun struct vmem_altmap *altmap)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
971*4882a593Smuzhiyun nr_pages - map_offset);
972*4882a593Smuzhiyun section_deactivate(pfn, nr_pages, altmap);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTPLUG */
975