xref: /OK3568_Linux_fs/kernel/mm/percpu-vm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * mm/percpu-vm.c - vmalloc area based chunk allocation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2010		SUSE Linux Products GmbH
6*4882a593Smuzhiyun  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Chunks are mapped into vmalloc areas and populated page by page.
9*4882a593Smuzhiyun  * This is the default chunk allocator.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
pcpu_chunk_page(struct pcpu_chunk * chunk,unsigned int cpu,int page_idx)12*4882a593Smuzhiyun static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
13*4882a593Smuzhiyun 				    unsigned int cpu, int page_idx)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	/* must not be used on pre-mapped chunk */
16*4882a593Smuzhiyun 	WARN_ON(chunk->immutable);
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /**
22*4882a593Smuzhiyun  * pcpu_get_pages - get temp pages array
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * Returns pointer to array of pointers to struct page which can be indexed
25*4882a593Smuzhiyun  * with pcpu_page_idx().  Note that there is only one array and accesses
26*4882a593Smuzhiyun  * should be serialized by pcpu_alloc_mutex.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * RETURNS:
29*4882a593Smuzhiyun  * Pointer to temp pages array on success.
30*4882a593Smuzhiyun  */
pcpu_get_pages(void)31*4882a593Smuzhiyun static struct page **pcpu_get_pages(void)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	static struct page **pages;
34*4882a593Smuzhiyun 	size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	lockdep_assert_held(&pcpu_alloc_mutex);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (!pages)
39*4882a593Smuzhiyun 		pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
40*4882a593Smuzhiyun 	return pages;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * pcpu_free_pages - free pages which were allocated for @chunk
45*4882a593Smuzhiyun  * @chunk: chunk pages were allocated for
46*4882a593Smuzhiyun  * @pages: array of pages to be freed, indexed by pcpu_page_idx()
47*4882a593Smuzhiyun  * @page_start: page index of the first page to be freed
48*4882a593Smuzhiyun  * @page_end: page index of the last page to be freed + 1
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * Free pages [@page_start and @page_end) in @pages for all units.
51*4882a593Smuzhiyun  * The pages were allocated for @chunk.
52*4882a593Smuzhiyun  */
pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end)53*4882a593Smuzhiyun static void pcpu_free_pages(struct pcpu_chunk *chunk,
54*4882a593Smuzhiyun 			    struct page **pages, int page_start, int page_end)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	unsigned int cpu;
57*4882a593Smuzhiyun 	int i;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
60*4882a593Smuzhiyun 		for (i = page_start; i < page_end; i++) {
61*4882a593Smuzhiyun 			struct page *page = pages[pcpu_page_idx(cpu, i)];
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 			if (page)
64*4882a593Smuzhiyun 				__free_page(page);
65*4882a593Smuzhiyun 		}
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /**
70*4882a593Smuzhiyun  * pcpu_alloc_pages - allocates pages for @chunk
71*4882a593Smuzhiyun  * @chunk: target chunk
72*4882a593Smuzhiyun  * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
73*4882a593Smuzhiyun  * @page_start: page index of the first page to be allocated
74*4882a593Smuzhiyun  * @page_end: page index of the last page to be allocated + 1
75*4882a593Smuzhiyun  * @gfp: allocation flags passed to the underlying allocator
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * Allocate pages [@page_start,@page_end) into @pages for all units.
78*4882a593Smuzhiyun  * The allocation is for @chunk.  Percpu core doesn't care about the
79*4882a593Smuzhiyun  * content of @pages and will pass it verbatim to pcpu_map_pages().
80*4882a593Smuzhiyun  */
pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp)81*4882a593Smuzhiyun static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
82*4882a593Smuzhiyun 			    struct page **pages, int page_start, int page_end,
83*4882a593Smuzhiyun 			    gfp_t gfp)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	unsigned int cpu, tcpu;
86*4882a593Smuzhiyun 	int i;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	gfp |= __GFP_HIGHMEM;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
91*4882a593Smuzhiyun 		for (i = page_start; i < page_end; i++) {
92*4882a593Smuzhiyun 			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
95*4882a593Smuzhiyun 			if (!*pagep)
96*4882a593Smuzhiyun 				goto err;
97*4882a593Smuzhiyun 		}
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 	return 0;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun err:
102*4882a593Smuzhiyun 	while (--i >= page_start)
103*4882a593Smuzhiyun 		__free_page(pages[pcpu_page_idx(cpu, i)]);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	for_each_possible_cpu(tcpu) {
106*4882a593Smuzhiyun 		if (tcpu == cpu)
107*4882a593Smuzhiyun 			break;
108*4882a593Smuzhiyun 		for (i = page_start; i < page_end; i++)
109*4882a593Smuzhiyun 			__free_page(pages[pcpu_page_idx(tcpu, i)]);
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 	return -ENOMEM;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * pcpu_pre_unmap_flush - flush cache prior to unmapping
116*4882a593Smuzhiyun  * @chunk: chunk the regions to be flushed belongs to
117*4882a593Smuzhiyun  * @page_start: page index of the first page to be flushed
118*4882a593Smuzhiyun  * @page_end: page index of the last page to be flushed + 1
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Pages in [@page_start,@page_end) of @chunk are about to be
121*4882a593Smuzhiyun  * unmapped.  Flush cache.  As each flushing trial can be very
122*4882a593Smuzhiyun  * expensive, issue flush on the whole region at once rather than
123*4882a593Smuzhiyun  * doing it for each cpu.  This could be an overkill but is more
124*4882a593Smuzhiyun  * scalable.
125*4882a593Smuzhiyun  */
pcpu_pre_unmap_flush(struct pcpu_chunk * chunk,int page_start,int page_end)126*4882a593Smuzhiyun static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
127*4882a593Smuzhiyun 				 int page_start, int page_end)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	flush_cache_vunmap(
130*4882a593Smuzhiyun 		pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
131*4882a593Smuzhiyun 		pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
__pcpu_unmap_pages(unsigned long addr,int nr_pages)134*4882a593Smuzhiyun static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun  * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
141*4882a593Smuzhiyun  * @chunk: chunk of interest
142*4882a593Smuzhiyun  * @pages: pages array which can be used to pass information to free
143*4882a593Smuzhiyun  * @page_start: page index of the first page to unmap
144*4882a593Smuzhiyun  * @page_end: page index of the last page to unmap + 1
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
147*4882a593Smuzhiyun  * Corresponding elements in @pages were cleared by the caller and can
148*4882a593Smuzhiyun  * be used to carry information to pcpu_free_pages() which will be
149*4882a593Smuzhiyun  * called after all unmaps are finished.  The caller should call
150*4882a593Smuzhiyun  * proper pre/post flush functions.
151*4882a593Smuzhiyun  */
pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end)152*4882a593Smuzhiyun static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
153*4882a593Smuzhiyun 			     struct page **pages, int page_start, int page_end)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	unsigned int cpu;
156*4882a593Smuzhiyun 	int i;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
159*4882a593Smuzhiyun 		for (i = page_start; i < page_end; i++) {
160*4882a593Smuzhiyun 			struct page *page;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 			page = pcpu_chunk_page(chunk, cpu, i);
163*4882a593Smuzhiyun 			WARN_ON(!page);
164*4882a593Smuzhiyun 			pages[pcpu_page_idx(cpu, i)] = page;
165*4882a593Smuzhiyun 		}
166*4882a593Smuzhiyun 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
167*4882a593Smuzhiyun 				   page_end - page_start);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /**
172*4882a593Smuzhiyun  * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
173*4882a593Smuzhiyun  * @chunk: pcpu_chunk the regions to be flushed belong to
174*4882a593Smuzhiyun  * @page_start: page index of the first page to be flushed
175*4882a593Smuzhiyun  * @page_end: page index of the last page to be flushed + 1
176*4882a593Smuzhiyun  *
177*4882a593Smuzhiyun  * Pages [@page_start,@page_end) of @chunk have been unmapped.  Flush
178*4882a593Smuzhiyun  * TLB for the regions.  This can be skipped if the area is to be
179*4882a593Smuzhiyun  * returned to vmalloc as vmalloc will handle TLB flushing lazily.
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
182*4882a593Smuzhiyun  * for the whole region.
183*4882a593Smuzhiyun  */
pcpu_post_unmap_tlb_flush(struct pcpu_chunk * chunk,int page_start,int page_end)184*4882a593Smuzhiyun static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
185*4882a593Smuzhiyun 				      int page_start, int page_end)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	flush_tlb_kernel_range(
188*4882a593Smuzhiyun 		pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
189*4882a593Smuzhiyun 		pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
__pcpu_map_pages(unsigned long addr,struct page ** pages,int nr_pages)192*4882a593Smuzhiyun static int __pcpu_map_pages(unsigned long addr, struct page **pages,
193*4882a593Smuzhiyun 			    int nr_pages)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
196*4882a593Smuzhiyun 					PAGE_KERNEL, pages);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /**
200*4882a593Smuzhiyun  * pcpu_map_pages - map pages into a pcpu_chunk
201*4882a593Smuzhiyun  * @chunk: chunk of interest
202*4882a593Smuzhiyun  * @pages: pages array containing pages to be mapped
203*4882a593Smuzhiyun  * @page_start: page index of the first page to map
204*4882a593Smuzhiyun  * @page_end: page index of the last page to map + 1
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  * For each cpu, map pages [@page_start,@page_end) into @chunk.  The
207*4882a593Smuzhiyun  * caller is responsible for calling pcpu_post_map_flush() after all
208*4882a593Smuzhiyun  * mappings are complete.
209*4882a593Smuzhiyun  *
210*4882a593Smuzhiyun  * This function is responsible for setting up whatever is necessary for
211*4882a593Smuzhiyun  * reverse lookup (addr -> chunk).
212*4882a593Smuzhiyun  */
pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end)213*4882a593Smuzhiyun static int pcpu_map_pages(struct pcpu_chunk *chunk,
214*4882a593Smuzhiyun 			  struct page **pages, int page_start, int page_end)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	unsigned int cpu, tcpu;
217*4882a593Smuzhiyun 	int i, err;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
220*4882a593Smuzhiyun 		err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
221*4882a593Smuzhiyun 				       &pages[pcpu_page_idx(cpu, page_start)],
222*4882a593Smuzhiyun 				       page_end - page_start);
223*4882a593Smuzhiyun 		if (err < 0)
224*4882a593Smuzhiyun 			goto err;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		for (i = page_start; i < page_end; i++)
227*4882a593Smuzhiyun 			pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
228*4882a593Smuzhiyun 					    chunk);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 	return 0;
231*4882a593Smuzhiyun err:
232*4882a593Smuzhiyun 	for_each_possible_cpu(tcpu) {
233*4882a593Smuzhiyun 		if (tcpu == cpu)
234*4882a593Smuzhiyun 			break;
235*4882a593Smuzhiyun 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
236*4882a593Smuzhiyun 				   page_end - page_start);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
239*4882a593Smuzhiyun 	return err;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /**
243*4882a593Smuzhiyun  * pcpu_post_map_flush - flush cache after mapping
244*4882a593Smuzhiyun  * @chunk: pcpu_chunk the regions to be flushed belong to
245*4882a593Smuzhiyun  * @page_start: page index of the first page to be flushed
246*4882a593Smuzhiyun  * @page_end: page index of the last page to be flushed + 1
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * Pages [@page_start,@page_end) of @chunk have been mapped.  Flush
249*4882a593Smuzhiyun  * cache.
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
252*4882a593Smuzhiyun  * for the whole region.
253*4882a593Smuzhiyun  */
pcpu_post_map_flush(struct pcpu_chunk * chunk,int page_start,int page_end)254*4882a593Smuzhiyun static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
255*4882a593Smuzhiyun 				int page_start, int page_end)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	flush_cache_vmap(
258*4882a593Smuzhiyun 		pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
259*4882a593Smuzhiyun 		pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun  * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
264*4882a593Smuzhiyun  * @chunk: chunk of interest
265*4882a593Smuzhiyun  * @page_start: the start page
266*4882a593Smuzhiyun  * @page_end: the end page
267*4882a593Smuzhiyun  * @gfp: allocation flags passed to the underlying memory allocator
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  * For each cpu, populate and map pages [@page_start,@page_end) into
270*4882a593Smuzhiyun  * @chunk.
271*4882a593Smuzhiyun  *
272*4882a593Smuzhiyun  * CONTEXT:
273*4882a593Smuzhiyun  * pcpu_alloc_mutex, does GFP_KERNEL allocation.
274*4882a593Smuzhiyun  */
pcpu_populate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end,gfp_t gfp)275*4882a593Smuzhiyun static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
276*4882a593Smuzhiyun 			       int page_start, int page_end, gfp_t gfp)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	struct page **pages;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	pages = pcpu_get_pages();
281*4882a593Smuzhiyun 	if (!pages)
282*4882a593Smuzhiyun 		return -ENOMEM;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
285*4882a593Smuzhiyun 		return -ENOMEM;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
288*4882a593Smuzhiyun 		pcpu_free_pages(chunk, pages, page_start, page_end);
289*4882a593Smuzhiyun 		return -ENOMEM;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 	pcpu_post_map_flush(chunk, page_start, page_end);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	return 0;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun  * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
298*4882a593Smuzhiyun  * @chunk: chunk to depopulate
299*4882a593Smuzhiyun  * @page_start: the start page
300*4882a593Smuzhiyun  * @page_end: the end page
301*4882a593Smuzhiyun  *
302*4882a593Smuzhiyun  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
303*4882a593Smuzhiyun  * from @chunk.
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  * CONTEXT:
306*4882a593Smuzhiyun  * pcpu_alloc_mutex.
307*4882a593Smuzhiyun  */
pcpu_depopulate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end)308*4882a593Smuzhiyun static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
309*4882a593Smuzhiyun 				  int page_start, int page_end)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct page **pages;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/*
314*4882a593Smuzhiyun 	 * If control reaches here, there must have been at least one
315*4882a593Smuzhiyun 	 * successful population attempt so the temp pages array must
316*4882a593Smuzhiyun 	 * be available now.
317*4882a593Smuzhiyun 	 */
318*4882a593Smuzhiyun 	pages = pcpu_get_pages();
319*4882a593Smuzhiyun 	BUG_ON(!pages);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* unmap and free */
322*4882a593Smuzhiyun 	pcpu_pre_unmap_flush(chunk, page_start, page_end);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	pcpu_unmap_pages(chunk, pages, page_start, page_end);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* no need to flush tlb, vmalloc will handle it lazily */
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	pcpu_free_pages(chunk, pages, page_start, page_end);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
pcpu_create_chunk(enum pcpu_chunk_type type,gfp_t gfp)331*4882a593Smuzhiyun static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
332*4882a593Smuzhiyun 					    gfp_t gfp)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct pcpu_chunk *chunk;
335*4882a593Smuzhiyun 	struct vm_struct **vms;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	chunk = pcpu_alloc_chunk(type, gfp);
338*4882a593Smuzhiyun 	if (!chunk)
339*4882a593Smuzhiyun 		return NULL;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
342*4882a593Smuzhiyun 				pcpu_nr_groups, pcpu_atom_size);
343*4882a593Smuzhiyun 	if (!vms) {
344*4882a593Smuzhiyun 		pcpu_free_chunk(chunk);
345*4882a593Smuzhiyun 		return NULL;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	chunk->data = vms;
349*4882a593Smuzhiyun 	chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	pcpu_stats_chunk_alloc();
352*4882a593Smuzhiyun 	trace_percpu_create_chunk(chunk->base_addr);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return chunk;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
pcpu_destroy_chunk(struct pcpu_chunk * chunk)357*4882a593Smuzhiyun static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	if (!chunk)
360*4882a593Smuzhiyun 		return;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	pcpu_stats_chunk_dealloc();
363*4882a593Smuzhiyun 	trace_percpu_destroy_chunk(chunk->base_addr);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (chunk->data)
366*4882a593Smuzhiyun 		pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
367*4882a593Smuzhiyun 	pcpu_free_chunk(chunk);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
pcpu_addr_to_page(void * addr)370*4882a593Smuzhiyun static struct page *pcpu_addr_to_page(void *addr)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	return vmalloc_to_page(addr);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
pcpu_verify_alloc_info(const struct pcpu_alloc_info * ai)375*4882a593Smuzhiyun static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	/* no extra restriction */
378*4882a593Smuzhiyun 	return 0;
379*4882a593Smuzhiyun }
380