1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * mm/percpu.c - percpu memory allocator
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2009 SUSE Linux Products GmbH
6*4882a593Smuzhiyun * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2017 Facebook Inc.
9*4882a593Smuzhiyun * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The percpu allocator handles both static and dynamic areas. Percpu
12*4882a593Smuzhiyun * areas are allocated in chunks which are divided into units. There is
13*4882a593Smuzhiyun * a 1-to-1 mapping for units to possible cpus. These units are grouped
14*4882a593Smuzhiyun * based on NUMA properties of the machine.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * c0 c1 c2
17*4882a593Smuzhiyun * ------------------- ------------------- ------------
18*4882a593Smuzhiyun * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19*4882a593Smuzhiyun * ------------------- ...... ------------------- .... ------------
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Allocation is done by offsets into a unit's address space. Ie., an
22*4882a593Smuzhiyun * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23*4882a593Smuzhiyun * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24*4882a593Smuzhiyun * and even sparse. Access is handled by configuring percpu base
25*4882a593Smuzhiyun * registers according to the cpu to unit mappings and offsetting the
26*4882a593Smuzhiyun * base address using pcpu_unit_size.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * There is special consideration for the first chunk which must handle
29*4882a593Smuzhiyun * the static percpu variables in the kernel image as allocation services
30*4882a593Smuzhiyun * are not online yet. In short, the first chunk is structured like so:
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * <Static | [Reserved] | Dynamic>
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * The static data is copied from the original section managed by the
35*4882a593Smuzhiyun * linker. The reserved section, if non-zero, primarily manages static
36*4882a593Smuzhiyun * percpu variables from kernel modules. Finally, the dynamic section
37*4882a593Smuzhiyun * takes care of normal allocations.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * The allocator organizes chunks into lists according to free size and
40*4882a593Smuzhiyun * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41*4882a593Smuzhiyun * flag should be passed. All memcg-aware allocations are sharing one set
42*4882a593Smuzhiyun * of chunks and all unaccounted allocations and allocations performed
43*4882a593Smuzhiyun * by processes belonging to the root memory cgroup are using the second set.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * The allocator tries to allocate from the fullest chunk first. Each chunk
46*4882a593Smuzhiyun * is managed by a bitmap with metadata blocks. The allocation map is updated
47*4882a593Smuzhiyun * on every allocation and free to reflect the current state while the boundary
48*4882a593Smuzhiyun * map is only updated on allocation. Each metadata block contains
49*4882a593Smuzhiyun * information to help mitigate the need to iterate over large portions
50*4882a593Smuzhiyun * of the bitmap. The reverse mapping from page to chunk is stored in
51*4882a593Smuzhiyun * the page's index. Lastly, units are lazily backed and grow in unison.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * There is a unique conversion that goes on here between bytes and bits.
54*4882a593Smuzhiyun * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55*4882a593Smuzhiyun * tracks the number of pages it is responsible for in nr_pages. Helper
56*4882a593Smuzhiyun * functions are used to convert from between the bytes, bits, and blocks.
57*4882a593Smuzhiyun * All hints are managed in bits unless explicitly stated.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * To use this allocator, arch code should do the following:
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62*4882a593Smuzhiyun * regular address to percpu pointer and back if they need to be
63*4882a593Smuzhiyun * different from the default
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * - use pcpu_setup_first_chunk() during percpu area initialization to
66*4882a593Smuzhiyun * setup the first chunk containing the kernel static percpu area
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #include <linux/bitmap.h>
72*4882a593Smuzhiyun #include <linux/memblock.h>
73*4882a593Smuzhiyun #include <linux/err.h>
74*4882a593Smuzhiyun #include <linux/lcm.h>
75*4882a593Smuzhiyun #include <linux/list.h>
76*4882a593Smuzhiyun #include <linux/log2.h>
77*4882a593Smuzhiyun #include <linux/mm.h>
78*4882a593Smuzhiyun #include <linux/module.h>
79*4882a593Smuzhiyun #include <linux/mutex.h>
80*4882a593Smuzhiyun #include <linux/percpu.h>
81*4882a593Smuzhiyun #include <linux/pfn.h>
82*4882a593Smuzhiyun #include <linux/slab.h>
83*4882a593Smuzhiyun #include <linux/spinlock.h>
84*4882a593Smuzhiyun #include <linux/vmalloc.h>
85*4882a593Smuzhiyun #include <linux/workqueue.h>
86*4882a593Smuzhiyun #include <linux/kmemleak.h>
87*4882a593Smuzhiyun #include <linux/sched.h>
88*4882a593Smuzhiyun #include <linux/sched/mm.h>
89*4882a593Smuzhiyun #include <linux/memcontrol.h>
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #include <asm/cacheflush.h>
92*4882a593Smuzhiyun #include <asm/sections.h>
93*4882a593Smuzhiyun #include <asm/tlbflush.h>
94*4882a593Smuzhiyun #include <asm/io.h>
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
97*4882a593Smuzhiyun #include <trace/events/percpu.h>
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #include "percpu-internal.h"
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
102*4882a593Smuzhiyun #define PCPU_SLOT_BASE_SHIFT 5
103*4882a593Smuzhiyun /* chunks in slots below this are subject to being sidelined on failed alloc */
104*4882a593Smuzhiyun #define PCPU_SLOT_FAIL_THRESHOLD 3
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define PCPU_EMPTY_POP_PAGES_LOW 2
107*4882a593Smuzhiyun #define PCPU_EMPTY_POP_PAGES_HIGH 4
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #ifdef CONFIG_SMP
110*4882a593Smuzhiyun /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
111*4882a593Smuzhiyun #ifndef __addr_to_pcpu_ptr
112*4882a593Smuzhiyun #define __addr_to_pcpu_ptr(addr) \
113*4882a593Smuzhiyun (void __percpu *)((unsigned long)(addr) - \
114*4882a593Smuzhiyun (unsigned long)pcpu_base_addr + \
115*4882a593Smuzhiyun (unsigned long)__per_cpu_start)
116*4882a593Smuzhiyun #endif
117*4882a593Smuzhiyun #ifndef __pcpu_ptr_to_addr
118*4882a593Smuzhiyun #define __pcpu_ptr_to_addr(ptr) \
119*4882a593Smuzhiyun (void __force *)((unsigned long)(ptr) + \
120*4882a593Smuzhiyun (unsigned long)pcpu_base_addr - \
121*4882a593Smuzhiyun (unsigned long)__per_cpu_start)
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun #else /* CONFIG_SMP */
124*4882a593Smuzhiyun /* on UP, it's always identity mapped */
125*4882a593Smuzhiyun #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
126*4882a593Smuzhiyun #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
127*4882a593Smuzhiyun #endif /* CONFIG_SMP */
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun static int pcpu_unit_pages __ro_after_init;
130*4882a593Smuzhiyun static int pcpu_unit_size __ro_after_init;
131*4882a593Smuzhiyun static int pcpu_nr_units __ro_after_init;
132*4882a593Smuzhiyun static int pcpu_atom_size __ro_after_init;
133*4882a593Smuzhiyun int pcpu_nr_slots __ro_after_init;
134*4882a593Smuzhiyun static size_t pcpu_chunk_struct_size __ro_after_init;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* cpus with the lowest and highest unit addresses */
137*4882a593Smuzhiyun static unsigned int pcpu_low_unit_cpu __ro_after_init;
138*4882a593Smuzhiyun static unsigned int pcpu_high_unit_cpu __ro_after_init;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* the address of the first chunk which starts with the kernel static area */
141*4882a593Smuzhiyun void *pcpu_base_addr __ro_after_init;
142*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcpu_base_addr);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
145*4882a593Smuzhiyun const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* group information, used for vm allocation */
148*4882a593Smuzhiyun static int pcpu_nr_groups __ro_after_init;
149*4882a593Smuzhiyun static const unsigned long *pcpu_group_offsets __ro_after_init;
150*4882a593Smuzhiyun static const size_t *pcpu_group_sizes __ro_after_init;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * The first chunk which always exists. Note that unlike other
154*4882a593Smuzhiyun * chunks, this one can be allocated and mapped in several different
155*4882a593Smuzhiyun * ways and thus often doesn't live in the vmalloc area.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * Optional reserved chunk. This chunk reserves part of the first
161*4882a593Smuzhiyun * chunk and serves it for reserved allocations. When the reserved
162*4882a593Smuzhiyun * region doesn't exist, the following variable is NULL.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
167*4882a593Smuzhiyun static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* chunks which need their map areas extended, protected by pcpu_lock */
172*4882a593Smuzhiyun static LIST_HEAD(pcpu_map_extend_chunks);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * The number of empty populated pages by chunk type, protected by pcpu_lock.
176*4882a593Smuzhiyun * The reserved chunk doesn't contribute to the count.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * The number of populated pages in use by the allocator, protected by
182*4882a593Smuzhiyun * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
183*4882a593Smuzhiyun * allocated/deallocated, it is allocated/deallocated in all units of a chunk
184*4882a593Smuzhiyun * and increments/decrements this count by 1).
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun static unsigned long pcpu_nr_populated;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Balance work is used to populate or destroy chunks asynchronously. We
190*4882a593Smuzhiyun * try to keep the number of populated free pages between
191*4882a593Smuzhiyun * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
192*4882a593Smuzhiyun * empty chunk.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun static void pcpu_balance_workfn(struct work_struct *work);
195*4882a593Smuzhiyun static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
196*4882a593Smuzhiyun static bool pcpu_async_enabled __read_mostly;
197*4882a593Smuzhiyun static bool pcpu_atomic_alloc_failed;
198*4882a593Smuzhiyun
pcpu_schedule_balance_work(void)199*4882a593Smuzhiyun static void pcpu_schedule_balance_work(void)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun if (pcpu_async_enabled)
202*4882a593Smuzhiyun schedule_work(&pcpu_balance_work);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /**
206*4882a593Smuzhiyun * pcpu_addr_in_chunk - check if the address is served from this chunk
207*4882a593Smuzhiyun * @chunk: chunk of interest
208*4882a593Smuzhiyun * @addr: percpu address
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * RETURNS:
211*4882a593Smuzhiyun * True if the address is served from this chunk.
212*4882a593Smuzhiyun */
pcpu_addr_in_chunk(struct pcpu_chunk * chunk,void * addr)213*4882a593Smuzhiyun static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun void *start_addr, *end_addr;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (!chunk)
218*4882a593Smuzhiyun return false;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun start_addr = chunk->base_addr + chunk->start_offset;
221*4882a593Smuzhiyun end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
222*4882a593Smuzhiyun chunk->end_offset;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return addr >= start_addr && addr < end_addr;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
__pcpu_size_to_slot(int size)227*4882a593Smuzhiyun static int __pcpu_size_to_slot(int size)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun int highbit = fls(size); /* size is in bytes */
230*4882a593Smuzhiyun return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
pcpu_size_to_slot(int size)233*4882a593Smuzhiyun static int pcpu_size_to_slot(int size)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun if (size == pcpu_unit_size)
236*4882a593Smuzhiyun return pcpu_nr_slots - 1;
237*4882a593Smuzhiyun return __pcpu_size_to_slot(size);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
pcpu_chunk_slot(const struct pcpu_chunk * chunk)240*4882a593Smuzhiyun static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
245*4882a593Smuzhiyun chunk_md->contig_hint == 0)
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* set the pointer to a chunk in a page struct */
pcpu_set_page_chunk(struct page * page,struct pcpu_chunk * pcpu)252*4882a593Smuzhiyun static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun page->index = (unsigned long)pcpu;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* obtain pointer to a chunk from a page struct */
pcpu_get_page_chunk(struct page * page)258*4882a593Smuzhiyun static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun return (struct pcpu_chunk *)page->index;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
pcpu_page_idx(unsigned int cpu,int page_idx)263*4882a593Smuzhiyun static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
pcpu_unit_page_offset(unsigned int cpu,int page_idx)268*4882a593Smuzhiyun static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
pcpu_chunk_addr(struct pcpu_chunk * chunk,unsigned int cpu,int page_idx)273*4882a593Smuzhiyun static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
274*4882a593Smuzhiyun unsigned int cpu, int page_idx)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun return (unsigned long)chunk->base_addr +
277*4882a593Smuzhiyun pcpu_unit_page_offset(cpu, page_idx);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * The following are helper functions to help access bitmaps and convert
282*4882a593Smuzhiyun * between bitmap offsets to address offsets.
283*4882a593Smuzhiyun */
pcpu_index_alloc_map(struct pcpu_chunk * chunk,int index)284*4882a593Smuzhiyun static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun return chunk->alloc_map +
287*4882a593Smuzhiyun (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
pcpu_off_to_block_index(int off)290*4882a593Smuzhiyun static unsigned long pcpu_off_to_block_index(int off)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun return off / PCPU_BITMAP_BLOCK_BITS;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
pcpu_off_to_block_off(int off)295*4882a593Smuzhiyun static unsigned long pcpu_off_to_block_off(int off)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun return off & (PCPU_BITMAP_BLOCK_BITS - 1);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
pcpu_block_off_to_off(int index,int off)300*4882a593Smuzhiyun static unsigned long pcpu_block_off_to_off(int index, int off)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun return index * PCPU_BITMAP_BLOCK_BITS + off;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * pcpu_next_hint - determine which hint to use
307*4882a593Smuzhiyun * @block: block of interest
308*4882a593Smuzhiyun * @alloc_bits: size of allocation
309*4882a593Smuzhiyun *
310*4882a593Smuzhiyun * This determines if we should scan based on the scan_hint or first_free.
311*4882a593Smuzhiyun * In general, we want to scan from first_free to fulfill allocations by
312*4882a593Smuzhiyun * first fit. However, if we know a scan_hint at position scan_hint_start
313*4882a593Smuzhiyun * cannot fulfill an allocation, we can begin scanning from there knowing
314*4882a593Smuzhiyun * the contig_hint will be our fallback.
315*4882a593Smuzhiyun */
pcpu_next_hint(struct pcpu_block_md * block,int alloc_bits)316*4882a593Smuzhiyun static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * The three conditions below determine if we can skip past the
320*4882a593Smuzhiyun * scan_hint. First, does the scan hint exist. Second, is the
321*4882a593Smuzhiyun * contig_hint after the scan_hint (possibly not true iff
322*4882a593Smuzhiyun * contig_hint == scan_hint). Third, is the allocation request
323*4882a593Smuzhiyun * larger than the scan_hint.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun if (block->scan_hint &&
326*4882a593Smuzhiyun block->contig_hint_start > block->scan_hint_start &&
327*4882a593Smuzhiyun alloc_bits > block->scan_hint)
328*4882a593Smuzhiyun return block->scan_hint_start + block->scan_hint;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun return block->first_free;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun * pcpu_next_md_free_region - finds the next hint free area
335*4882a593Smuzhiyun * @chunk: chunk of interest
336*4882a593Smuzhiyun * @bit_off: chunk offset
337*4882a593Smuzhiyun * @bits: size of free area
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * Helper function for pcpu_for_each_md_free_region. It checks
340*4882a593Smuzhiyun * block->contig_hint and performs aggregation across blocks to find the
341*4882a593Smuzhiyun * next hint. It modifies bit_off and bits in-place to be consumed in the
342*4882a593Smuzhiyun * loop.
343*4882a593Smuzhiyun */
pcpu_next_md_free_region(struct pcpu_chunk * chunk,int * bit_off,int * bits)344*4882a593Smuzhiyun static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
345*4882a593Smuzhiyun int *bits)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun int i = pcpu_off_to_block_index(*bit_off);
348*4882a593Smuzhiyun int block_off = pcpu_off_to_block_off(*bit_off);
349*4882a593Smuzhiyun struct pcpu_block_md *block;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun *bits = 0;
352*4882a593Smuzhiyun for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
353*4882a593Smuzhiyun block++, i++) {
354*4882a593Smuzhiyun /* handles contig area across blocks */
355*4882a593Smuzhiyun if (*bits) {
356*4882a593Smuzhiyun *bits += block->left_free;
357*4882a593Smuzhiyun if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
358*4882a593Smuzhiyun continue;
359*4882a593Smuzhiyun return;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * This checks three things. First is there a contig_hint to
364*4882a593Smuzhiyun * check. Second, have we checked this hint before by
365*4882a593Smuzhiyun * comparing the block_off. Third, is this the same as the
366*4882a593Smuzhiyun * right contig hint. In the last case, it spills over into
367*4882a593Smuzhiyun * the next block and should be handled by the contig area
368*4882a593Smuzhiyun * across blocks code.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun *bits = block->contig_hint;
371*4882a593Smuzhiyun if (*bits && block->contig_hint_start >= block_off &&
372*4882a593Smuzhiyun *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
373*4882a593Smuzhiyun *bit_off = pcpu_block_off_to_off(i,
374*4882a593Smuzhiyun block->contig_hint_start);
375*4882a593Smuzhiyun return;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun /* reset to satisfy the second predicate above */
378*4882a593Smuzhiyun block_off = 0;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun *bits = block->right_free;
381*4882a593Smuzhiyun *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /**
386*4882a593Smuzhiyun * pcpu_next_fit_region - finds fit areas for a given allocation request
387*4882a593Smuzhiyun * @chunk: chunk of interest
388*4882a593Smuzhiyun * @alloc_bits: size of allocation
389*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE)
390*4882a593Smuzhiyun * @bit_off: chunk offset
391*4882a593Smuzhiyun * @bits: size of free area
392*4882a593Smuzhiyun *
393*4882a593Smuzhiyun * Finds the next free region that is viable for use with a given size and
394*4882a593Smuzhiyun * alignment. This only returns if there is a valid area to be used for this
395*4882a593Smuzhiyun * allocation. block->first_free is returned if the allocation request fits
396*4882a593Smuzhiyun * within the block to see if the request can be fulfilled prior to the contig
397*4882a593Smuzhiyun * hint.
398*4882a593Smuzhiyun */
pcpu_next_fit_region(struct pcpu_chunk * chunk,int alloc_bits,int align,int * bit_off,int * bits)399*4882a593Smuzhiyun static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
400*4882a593Smuzhiyun int align, int *bit_off, int *bits)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun int i = pcpu_off_to_block_index(*bit_off);
403*4882a593Smuzhiyun int block_off = pcpu_off_to_block_off(*bit_off);
404*4882a593Smuzhiyun struct pcpu_block_md *block;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun *bits = 0;
407*4882a593Smuzhiyun for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
408*4882a593Smuzhiyun block++, i++) {
409*4882a593Smuzhiyun /* handles contig area across blocks */
410*4882a593Smuzhiyun if (*bits) {
411*4882a593Smuzhiyun *bits += block->left_free;
412*4882a593Smuzhiyun if (*bits >= alloc_bits)
413*4882a593Smuzhiyun return;
414*4882a593Smuzhiyun if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
415*4882a593Smuzhiyun continue;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* check block->contig_hint */
419*4882a593Smuzhiyun *bits = ALIGN(block->contig_hint_start, align) -
420*4882a593Smuzhiyun block->contig_hint_start;
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun * This uses the block offset to determine if this has been
423*4882a593Smuzhiyun * checked in the prior iteration.
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun if (block->contig_hint &&
426*4882a593Smuzhiyun block->contig_hint_start >= block_off &&
427*4882a593Smuzhiyun block->contig_hint >= *bits + alloc_bits) {
428*4882a593Smuzhiyun int start = pcpu_next_hint(block, alloc_bits);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun *bits += alloc_bits + block->contig_hint_start -
431*4882a593Smuzhiyun start;
432*4882a593Smuzhiyun *bit_off = pcpu_block_off_to_off(i, start);
433*4882a593Smuzhiyun return;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun /* reset to satisfy the second predicate above */
436*4882a593Smuzhiyun block_off = 0;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
439*4882a593Smuzhiyun align);
440*4882a593Smuzhiyun *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
441*4882a593Smuzhiyun *bit_off = pcpu_block_off_to_off(i, *bit_off);
442*4882a593Smuzhiyun if (*bits >= alloc_bits)
443*4882a593Smuzhiyun return;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* no valid offsets were found - fail condition */
447*4882a593Smuzhiyun *bit_off = pcpu_chunk_map_bits(chunk);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Metadata free area iterators. These perform aggregation of free areas
452*4882a593Smuzhiyun * based on the metadata blocks and return the offset @bit_off and size in
453*4882a593Smuzhiyun * bits of the free area @bits. pcpu_for_each_fit_region only returns when
454*4882a593Smuzhiyun * a fit is found for the allocation request.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
457*4882a593Smuzhiyun for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
458*4882a593Smuzhiyun (bit_off) < pcpu_chunk_map_bits((chunk)); \
459*4882a593Smuzhiyun (bit_off) += (bits) + 1, \
460*4882a593Smuzhiyun pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
463*4882a593Smuzhiyun for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
464*4882a593Smuzhiyun &(bits)); \
465*4882a593Smuzhiyun (bit_off) < pcpu_chunk_map_bits((chunk)); \
466*4882a593Smuzhiyun (bit_off) += (bits), \
467*4882a593Smuzhiyun pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
468*4882a593Smuzhiyun &(bits)))
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /**
471*4882a593Smuzhiyun * pcpu_mem_zalloc - allocate memory
472*4882a593Smuzhiyun * @size: bytes to allocate
473*4882a593Smuzhiyun * @gfp: allocation flags
474*4882a593Smuzhiyun *
475*4882a593Smuzhiyun * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
476*4882a593Smuzhiyun * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
477*4882a593Smuzhiyun * This is to facilitate passing through whitelisted flags. The
478*4882a593Smuzhiyun * returned memory is always zeroed.
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * RETURNS:
481*4882a593Smuzhiyun * Pointer to the allocated area on success, NULL on failure.
482*4882a593Smuzhiyun */
pcpu_mem_zalloc(size_t size,gfp_t gfp)483*4882a593Smuzhiyun static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun if (WARN_ON_ONCE(!slab_is_available()))
486*4882a593Smuzhiyun return NULL;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (size <= PAGE_SIZE)
489*4882a593Smuzhiyun return kzalloc(size, gfp);
490*4882a593Smuzhiyun else
491*4882a593Smuzhiyun return __vmalloc(size, gfp | __GFP_ZERO);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /**
495*4882a593Smuzhiyun * pcpu_mem_free - free memory
496*4882a593Smuzhiyun * @ptr: memory to free
497*4882a593Smuzhiyun *
498*4882a593Smuzhiyun * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
499*4882a593Smuzhiyun */
pcpu_mem_free(void * ptr)500*4882a593Smuzhiyun static void pcpu_mem_free(void *ptr)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun kvfree(ptr);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
__pcpu_chunk_move(struct pcpu_chunk * chunk,int slot,bool move_front)505*4882a593Smuzhiyun static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
506*4882a593Smuzhiyun bool move_front)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun if (chunk != pcpu_reserved_chunk) {
509*4882a593Smuzhiyun struct list_head *pcpu_slot;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
512*4882a593Smuzhiyun if (move_front)
513*4882a593Smuzhiyun list_move(&chunk->list, &pcpu_slot[slot]);
514*4882a593Smuzhiyun else
515*4882a593Smuzhiyun list_move_tail(&chunk->list, &pcpu_slot[slot]);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
pcpu_chunk_move(struct pcpu_chunk * chunk,int slot)519*4882a593Smuzhiyun static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun __pcpu_chunk_move(chunk, slot, true);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
526*4882a593Smuzhiyun * @chunk: chunk of interest
527*4882a593Smuzhiyun * @oslot: the previous slot it was on
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * This function is called after an allocation or free changed @chunk.
530*4882a593Smuzhiyun * New slot according to the changed state is determined and @chunk is
531*4882a593Smuzhiyun * moved to the slot. Note that the reserved chunk is never put on
532*4882a593Smuzhiyun * chunk slots.
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * CONTEXT:
535*4882a593Smuzhiyun * pcpu_lock.
536*4882a593Smuzhiyun */
pcpu_chunk_relocate(struct pcpu_chunk * chunk,int oslot)537*4882a593Smuzhiyun static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun int nslot = pcpu_chunk_slot(chunk);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (oslot != nslot)
542*4882a593Smuzhiyun __pcpu_chunk_move(chunk, nslot, oslot < nslot);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * pcpu_update_empty_pages - update empty page counters
547*4882a593Smuzhiyun * @chunk: chunk of interest
548*4882a593Smuzhiyun * @nr: nr of empty pages
549*4882a593Smuzhiyun *
550*4882a593Smuzhiyun * This is used to keep track of the empty pages now based on the premise
551*4882a593Smuzhiyun * a md_block covers a page. The hint update functions recognize if a block
552*4882a593Smuzhiyun * is made full or broken to calculate deltas for keeping track of free pages.
553*4882a593Smuzhiyun */
pcpu_update_empty_pages(struct pcpu_chunk * chunk,int nr)554*4882a593Smuzhiyun static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun chunk->nr_empty_pop_pages += nr;
557*4882a593Smuzhiyun if (chunk != pcpu_reserved_chunk)
558*4882a593Smuzhiyun pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun * pcpu_region_overlap - determines if two regions overlap
563*4882a593Smuzhiyun * @a: start of first region, inclusive
564*4882a593Smuzhiyun * @b: end of first region, exclusive
565*4882a593Smuzhiyun * @x: start of second region, inclusive
566*4882a593Smuzhiyun * @y: end of second region, exclusive
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * This is used to determine if the hint region [a, b) overlaps with the
569*4882a593Smuzhiyun * allocated region [x, y).
570*4882a593Smuzhiyun */
pcpu_region_overlap(int a,int b,int x,int y)571*4882a593Smuzhiyun static inline bool pcpu_region_overlap(int a, int b, int x, int y)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun return (a < y) && (x < b);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun * pcpu_block_update - updates a block given a free area
578*4882a593Smuzhiyun * @block: block of interest
579*4882a593Smuzhiyun * @start: start offset in block
580*4882a593Smuzhiyun * @end: end offset in block
581*4882a593Smuzhiyun *
582*4882a593Smuzhiyun * Updates a block given a known free area. The region [start, end) is
583*4882a593Smuzhiyun * expected to be the entirety of the free area within a block. Chooses
584*4882a593Smuzhiyun * the best starting offset if the contig hints are equal.
585*4882a593Smuzhiyun */
pcpu_block_update(struct pcpu_block_md * block,int start,int end)586*4882a593Smuzhiyun static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun int contig = end - start;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun block->first_free = min(block->first_free, start);
591*4882a593Smuzhiyun if (start == 0)
592*4882a593Smuzhiyun block->left_free = contig;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (end == block->nr_bits)
595*4882a593Smuzhiyun block->right_free = contig;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (contig > block->contig_hint) {
598*4882a593Smuzhiyun /* promote the old contig_hint to be the new scan_hint */
599*4882a593Smuzhiyun if (start > block->contig_hint_start) {
600*4882a593Smuzhiyun if (block->contig_hint > block->scan_hint) {
601*4882a593Smuzhiyun block->scan_hint_start =
602*4882a593Smuzhiyun block->contig_hint_start;
603*4882a593Smuzhiyun block->scan_hint = block->contig_hint;
604*4882a593Smuzhiyun } else if (start < block->scan_hint_start) {
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * The old contig_hint == scan_hint. But, the
607*4882a593Smuzhiyun * new contig is larger so hold the invariant
608*4882a593Smuzhiyun * scan_hint_start < contig_hint_start.
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun block->scan_hint = 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun } else {
613*4882a593Smuzhiyun block->scan_hint = 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun block->contig_hint_start = start;
616*4882a593Smuzhiyun block->contig_hint = contig;
617*4882a593Smuzhiyun } else if (contig == block->contig_hint) {
618*4882a593Smuzhiyun if (block->contig_hint_start &&
619*4882a593Smuzhiyun (!start ||
620*4882a593Smuzhiyun __ffs(start) > __ffs(block->contig_hint_start))) {
621*4882a593Smuzhiyun /* start has a better alignment so use it */
622*4882a593Smuzhiyun block->contig_hint_start = start;
623*4882a593Smuzhiyun if (start < block->scan_hint_start &&
624*4882a593Smuzhiyun block->contig_hint > block->scan_hint)
625*4882a593Smuzhiyun block->scan_hint = 0;
626*4882a593Smuzhiyun } else if (start > block->scan_hint_start ||
627*4882a593Smuzhiyun block->contig_hint > block->scan_hint) {
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * Knowing contig == contig_hint, update the scan_hint
630*4882a593Smuzhiyun * if it is farther than or larger than the current
631*4882a593Smuzhiyun * scan_hint.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun block->scan_hint_start = start;
634*4882a593Smuzhiyun block->scan_hint = contig;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun } else {
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * The region is smaller than the contig_hint. So only update
639*4882a593Smuzhiyun * the scan_hint if it is larger than or equal and farther than
640*4882a593Smuzhiyun * the current scan_hint.
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun if ((start < block->contig_hint_start &&
643*4882a593Smuzhiyun (contig > block->scan_hint ||
644*4882a593Smuzhiyun (contig == block->scan_hint &&
645*4882a593Smuzhiyun start > block->scan_hint_start)))) {
646*4882a593Smuzhiyun block->scan_hint_start = start;
647*4882a593Smuzhiyun block->scan_hint = contig;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /*
653*4882a593Smuzhiyun * pcpu_block_update_scan - update a block given a free area from a scan
654*4882a593Smuzhiyun * @chunk: chunk of interest
655*4882a593Smuzhiyun * @bit_off: chunk offset
656*4882a593Smuzhiyun * @bits: size of free area
657*4882a593Smuzhiyun *
658*4882a593Smuzhiyun * Finding the final allocation spot first goes through pcpu_find_block_fit()
659*4882a593Smuzhiyun * to find a block that can hold the allocation and then pcpu_alloc_area()
660*4882a593Smuzhiyun * where a scan is used. When allocations require specific alignments,
661*4882a593Smuzhiyun * we can inadvertently create holes which will not be seen in the alloc
662*4882a593Smuzhiyun * or free paths.
663*4882a593Smuzhiyun *
664*4882a593Smuzhiyun * This takes a given free area hole and updates a block as it may change the
665*4882a593Smuzhiyun * scan_hint. We need to scan backwards to ensure we don't miss free bits
666*4882a593Smuzhiyun * from alignment.
667*4882a593Smuzhiyun */
pcpu_block_update_scan(struct pcpu_chunk * chunk,int bit_off,int bits)668*4882a593Smuzhiyun static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
669*4882a593Smuzhiyun int bits)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun int s_off = pcpu_off_to_block_off(bit_off);
672*4882a593Smuzhiyun int e_off = s_off + bits;
673*4882a593Smuzhiyun int s_index, l_bit;
674*4882a593Smuzhiyun struct pcpu_block_md *block;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (e_off > PCPU_BITMAP_BLOCK_BITS)
677*4882a593Smuzhiyun return;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun s_index = pcpu_off_to_block_index(bit_off);
680*4882a593Smuzhiyun block = chunk->md_blocks + s_index;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* scan backwards in case of alignment skipping free bits */
683*4882a593Smuzhiyun l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
684*4882a593Smuzhiyun s_off = (s_off == l_bit) ? 0 : l_bit + 1;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun pcpu_block_update(block, s_off, e_off);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /**
690*4882a593Smuzhiyun * pcpu_chunk_refresh_hint - updates metadata about a chunk
691*4882a593Smuzhiyun * @chunk: chunk of interest
692*4882a593Smuzhiyun * @full_scan: if we should scan from the beginning
693*4882a593Smuzhiyun *
694*4882a593Smuzhiyun * Iterates over the metadata blocks to find the largest contig area.
695*4882a593Smuzhiyun * A full scan can be avoided on the allocation path as this is triggered
696*4882a593Smuzhiyun * if we broke the contig_hint. In doing so, the scan_hint will be before
697*4882a593Smuzhiyun * the contig_hint or after if the scan_hint == contig_hint. This cannot
698*4882a593Smuzhiyun * be prevented on freeing as we want to find the largest area possibly
699*4882a593Smuzhiyun * spanning blocks.
700*4882a593Smuzhiyun */
pcpu_chunk_refresh_hint(struct pcpu_chunk * chunk,bool full_scan)701*4882a593Smuzhiyun static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct pcpu_block_md *chunk_md = &chunk->chunk_md;
704*4882a593Smuzhiyun int bit_off, bits;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /* promote scan_hint to contig_hint */
707*4882a593Smuzhiyun if (!full_scan && chunk_md->scan_hint) {
708*4882a593Smuzhiyun bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
709*4882a593Smuzhiyun chunk_md->contig_hint_start = chunk_md->scan_hint_start;
710*4882a593Smuzhiyun chunk_md->contig_hint = chunk_md->scan_hint;
711*4882a593Smuzhiyun chunk_md->scan_hint = 0;
712*4882a593Smuzhiyun } else {
713*4882a593Smuzhiyun bit_off = chunk_md->first_free;
714*4882a593Smuzhiyun chunk_md->contig_hint = 0;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun bits = 0;
718*4882a593Smuzhiyun pcpu_for_each_md_free_region(chunk, bit_off, bits)
719*4882a593Smuzhiyun pcpu_block_update(chunk_md, bit_off, bit_off + bits);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /**
723*4882a593Smuzhiyun * pcpu_block_refresh_hint
724*4882a593Smuzhiyun * @chunk: chunk of interest
725*4882a593Smuzhiyun * @index: index of the metadata block
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * Scans over the block beginning at first_free and updates the block
728*4882a593Smuzhiyun * metadata accordingly.
729*4882a593Smuzhiyun */
pcpu_block_refresh_hint(struct pcpu_chunk * chunk,int index)730*4882a593Smuzhiyun static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun struct pcpu_block_md *block = chunk->md_blocks + index;
733*4882a593Smuzhiyun unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
734*4882a593Smuzhiyun unsigned int rs, re, start; /* region start, region end */
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* promote scan_hint to contig_hint */
737*4882a593Smuzhiyun if (block->scan_hint) {
738*4882a593Smuzhiyun start = block->scan_hint_start + block->scan_hint;
739*4882a593Smuzhiyun block->contig_hint_start = block->scan_hint_start;
740*4882a593Smuzhiyun block->contig_hint = block->scan_hint;
741*4882a593Smuzhiyun block->scan_hint = 0;
742*4882a593Smuzhiyun } else {
743*4882a593Smuzhiyun start = block->first_free;
744*4882a593Smuzhiyun block->contig_hint = 0;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun block->right_free = 0;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /* iterate over free areas and update the contig hints */
750*4882a593Smuzhiyun bitmap_for_each_clear_region(alloc_map, rs, re, start,
751*4882a593Smuzhiyun PCPU_BITMAP_BLOCK_BITS)
752*4882a593Smuzhiyun pcpu_block_update(block, rs, re);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /**
756*4882a593Smuzhiyun * pcpu_block_update_hint_alloc - update hint on allocation path
757*4882a593Smuzhiyun * @chunk: chunk of interest
758*4882a593Smuzhiyun * @bit_off: chunk offset
759*4882a593Smuzhiyun * @bits: size of request
760*4882a593Smuzhiyun *
761*4882a593Smuzhiyun * Updates metadata for the allocation path. The metadata only has to be
762*4882a593Smuzhiyun * refreshed by a full scan iff the chunk's contig hint is broken. Block level
763*4882a593Smuzhiyun * scans are required if the block's contig hint is broken.
764*4882a593Smuzhiyun */
pcpu_block_update_hint_alloc(struct pcpu_chunk * chunk,int bit_off,int bits)765*4882a593Smuzhiyun static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
766*4882a593Smuzhiyun int bits)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct pcpu_block_md *chunk_md = &chunk->chunk_md;
769*4882a593Smuzhiyun int nr_empty_pages = 0;
770*4882a593Smuzhiyun struct pcpu_block_md *s_block, *e_block, *block;
771*4882a593Smuzhiyun int s_index, e_index; /* block indexes of the freed allocation */
772*4882a593Smuzhiyun int s_off, e_off; /* block offsets of the freed allocation */
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /*
775*4882a593Smuzhiyun * Calculate per block offsets.
776*4882a593Smuzhiyun * The calculation uses an inclusive range, but the resulting offsets
777*4882a593Smuzhiyun * are [start, end). e_index always points to the last block in the
778*4882a593Smuzhiyun * range.
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun s_index = pcpu_off_to_block_index(bit_off);
781*4882a593Smuzhiyun e_index = pcpu_off_to_block_index(bit_off + bits - 1);
782*4882a593Smuzhiyun s_off = pcpu_off_to_block_off(bit_off);
783*4882a593Smuzhiyun e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun s_block = chunk->md_blocks + s_index;
786*4882a593Smuzhiyun e_block = chunk->md_blocks + e_index;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /*
789*4882a593Smuzhiyun * Update s_block.
790*4882a593Smuzhiyun * block->first_free must be updated if the allocation takes its place.
791*4882a593Smuzhiyun * If the allocation breaks the contig_hint, a scan is required to
792*4882a593Smuzhiyun * restore this hint.
793*4882a593Smuzhiyun */
794*4882a593Smuzhiyun if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
795*4882a593Smuzhiyun nr_empty_pages++;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (s_off == s_block->first_free)
798*4882a593Smuzhiyun s_block->first_free = find_next_zero_bit(
799*4882a593Smuzhiyun pcpu_index_alloc_map(chunk, s_index),
800*4882a593Smuzhiyun PCPU_BITMAP_BLOCK_BITS,
801*4882a593Smuzhiyun s_off + bits);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (pcpu_region_overlap(s_block->scan_hint_start,
804*4882a593Smuzhiyun s_block->scan_hint_start + s_block->scan_hint,
805*4882a593Smuzhiyun s_off,
806*4882a593Smuzhiyun s_off + bits))
807*4882a593Smuzhiyun s_block->scan_hint = 0;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (pcpu_region_overlap(s_block->contig_hint_start,
810*4882a593Smuzhiyun s_block->contig_hint_start +
811*4882a593Smuzhiyun s_block->contig_hint,
812*4882a593Smuzhiyun s_off,
813*4882a593Smuzhiyun s_off + bits)) {
814*4882a593Smuzhiyun /* block contig hint is broken - scan to fix it */
815*4882a593Smuzhiyun if (!s_off)
816*4882a593Smuzhiyun s_block->left_free = 0;
817*4882a593Smuzhiyun pcpu_block_refresh_hint(chunk, s_index);
818*4882a593Smuzhiyun } else {
819*4882a593Smuzhiyun /* update left and right contig manually */
820*4882a593Smuzhiyun s_block->left_free = min(s_block->left_free, s_off);
821*4882a593Smuzhiyun if (s_index == e_index)
822*4882a593Smuzhiyun s_block->right_free = min_t(int, s_block->right_free,
823*4882a593Smuzhiyun PCPU_BITMAP_BLOCK_BITS - e_off);
824*4882a593Smuzhiyun else
825*4882a593Smuzhiyun s_block->right_free = 0;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * Update e_block.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun if (s_index != e_index) {
832*4882a593Smuzhiyun if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
833*4882a593Smuzhiyun nr_empty_pages++;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * When the allocation is across blocks, the end is along
837*4882a593Smuzhiyun * the left part of the e_block.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun e_block->first_free = find_next_zero_bit(
840*4882a593Smuzhiyun pcpu_index_alloc_map(chunk, e_index),
841*4882a593Smuzhiyun PCPU_BITMAP_BLOCK_BITS, e_off);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (e_off == PCPU_BITMAP_BLOCK_BITS) {
844*4882a593Smuzhiyun /* reset the block */
845*4882a593Smuzhiyun e_block++;
846*4882a593Smuzhiyun } else {
847*4882a593Smuzhiyun if (e_off > e_block->scan_hint_start)
848*4882a593Smuzhiyun e_block->scan_hint = 0;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun e_block->left_free = 0;
851*4882a593Smuzhiyun if (e_off > e_block->contig_hint_start) {
852*4882a593Smuzhiyun /* contig hint is broken - scan to fix it */
853*4882a593Smuzhiyun pcpu_block_refresh_hint(chunk, e_index);
854*4882a593Smuzhiyun } else {
855*4882a593Smuzhiyun e_block->right_free =
856*4882a593Smuzhiyun min_t(int, e_block->right_free,
857*4882a593Smuzhiyun PCPU_BITMAP_BLOCK_BITS - e_off);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* update in-between md_blocks */
862*4882a593Smuzhiyun nr_empty_pages += (e_index - s_index - 1);
863*4882a593Smuzhiyun for (block = s_block + 1; block < e_block; block++) {
864*4882a593Smuzhiyun block->scan_hint = 0;
865*4882a593Smuzhiyun block->contig_hint = 0;
866*4882a593Smuzhiyun block->left_free = 0;
867*4882a593Smuzhiyun block->right_free = 0;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (nr_empty_pages)
872*4882a593Smuzhiyun pcpu_update_empty_pages(chunk, -nr_empty_pages);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (pcpu_region_overlap(chunk_md->scan_hint_start,
875*4882a593Smuzhiyun chunk_md->scan_hint_start +
876*4882a593Smuzhiyun chunk_md->scan_hint,
877*4882a593Smuzhiyun bit_off,
878*4882a593Smuzhiyun bit_off + bits))
879*4882a593Smuzhiyun chunk_md->scan_hint = 0;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * The only time a full chunk scan is required is if the chunk
883*4882a593Smuzhiyun * contig hint is broken. Otherwise, it means a smaller space
884*4882a593Smuzhiyun * was used and therefore the chunk contig hint is still correct.
885*4882a593Smuzhiyun */
886*4882a593Smuzhiyun if (pcpu_region_overlap(chunk_md->contig_hint_start,
887*4882a593Smuzhiyun chunk_md->contig_hint_start +
888*4882a593Smuzhiyun chunk_md->contig_hint,
889*4882a593Smuzhiyun bit_off,
890*4882a593Smuzhiyun bit_off + bits))
891*4882a593Smuzhiyun pcpu_chunk_refresh_hint(chunk, false);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun * pcpu_block_update_hint_free - updates the block hints on the free path
896*4882a593Smuzhiyun * @chunk: chunk of interest
897*4882a593Smuzhiyun * @bit_off: chunk offset
898*4882a593Smuzhiyun * @bits: size of request
899*4882a593Smuzhiyun *
900*4882a593Smuzhiyun * Updates metadata for the allocation path. This avoids a blind block
901*4882a593Smuzhiyun * refresh by making use of the block contig hints. If this fails, it scans
902*4882a593Smuzhiyun * forward and backward to determine the extent of the free area. This is
903*4882a593Smuzhiyun * capped at the boundary of blocks.
904*4882a593Smuzhiyun *
905*4882a593Smuzhiyun * A chunk update is triggered if a page becomes free, a block becomes free,
906*4882a593Smuzhiyun * or the free spans across blocks. This tradeoff is to minimize iterating
907*4882a593Smuzhiyun * over the block metadata to update chunk_md->contig_hint.
908*4882a593Smuzhiyun * chunk_md->contig_hint may be off by up to a page, but it will never be more
909*4882a593Smuzhiyun * than the available space. If the contig hint is contained in one block, it
910*4882a593Smuzhiyun * will be accurate.
911*4882a593Smuzhiyun */
pcpu_block_update_hint_free(struct pcpu_chunk * chunk,int bit_off,int bits)912*4882a593Smuzhiyun static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
913*4882a593Smuzhiyun int bits)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun int nr_empty_pages = 0;
916*4882a593Smuzhiyun struct pcpu_block_md *s_block, *e_block, *block;
917*4882a593Smuzhiyun int s_index, e_index; /* block indexes of the freed allocation */
918*4882a593Smuzhiyun int s_off, e_off; /* block offsets of the freed allocation */
919*4882a593Smuzhiyun int start, end; /* start and end of the whole free area */
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /*
922*4882a593Smuzhiyun * Calculate per block offsets.
923*4882a593Smuzhiyun * The calculation uses an inclusive range, but the resulting offsets
924*4882a593Smuzhiyun * are [start, end). e_index always points to the last block in the
925*4882a593Smuzhiyun * range.
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun s_index = pcpu_off_to_block_index(bit_off);
928*4882a593Smuzhiyun e_index = pcpu_off_to_block_index(bit_off + bits - 1);
929*4882a593Smuzhiyun s_off = pcpu_off_to_block_off(bit_off);
930*4882a593Smuzhiyun e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun s_block = chunk->md_blocks + s_index;
933*4882a593Smuzhiyun e_block = chunk->md_blocks + e_index;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /*
936*4882a593Smuzhiyun * Check if the freed area aligns with the block->contig_hint.
937*4882a593Smuzhiyun * If it does, then the scan to find the beginning/end of the
938*4882a593Smuzhiyun * larger free area can be avoided.
939*4882a593Smuzhiyun *
940*4882a593Smuzhiyun * start and end refer to beginning and end of the free area
941*4882a593Smuzhiyun * within each their respective blocks. This is not necessarily
942*4882a593Smuzhiyun * the entire free area as it may span blocks past the beginning
943*4882a593Smuzhiyun * or end of the block.
944*4882a593Smuzhiyun */
945*4882a593Smuzhiyun start = s_off;
946*4882a593Smuzhiyun if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
947*4882a593Smuzhiyun start = s_block->contig_hint_start;
948*4882a593Smuzhiyun } else {
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * Scan backwards to find the extent of the free area.
951*4882a593Smuzhiyun * find_last_bit returns the starting bit, so if the start bit
952*4882a593Smuzhiyun * is returned, that means there was no last bit and the
953*4882a593Smuzhiyun * remainder of the chunk is free.
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
956*4882a593Smuzhiyun start);
957*4882a593Smuzhiyun start = (start == l_bit) ? 0 : l_bit + 1;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun end = e_off;
961*4882a593Smuzhiyun if (e_off == e_block->contig_hint_start)
962*4882a593Smuzhiyun end = e_block->contig_hint_start + e_block->contig_hint;
963*4882a593Smuzhiyun else
964*4882a593Smuzhiyun end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
965*4882a593Smuzhiyun PCPU_BITMAP_BLOCK_BITS, end);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /* update s_block */
968*4882a593Smuzhiyun e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
969*4882a593Smuzhiyun if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
970*4882a593Smuzhiyun nr_empty_pages++;
971*4882a593Smuzhiyun pcpu_block_update(s_block, start, e_off);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /* freeing in the same block */
974*4882a593Smuzhiyun if (s_index != e_index) {
975*4882a593Smuzhiyun /* update e_block */
976*4882a593Smuzhiyun if (end == PCPU_BITMAP_BLOCK_BITS)
977*4882a593Smuzhiyun nr_empty_pages++;
978*4882a593Smuzhiyun pcpu_block_update(e_block, 0, end);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* reset md_blocks in the middle */
981*4882a593Smuzhiyun nr_empty_pages += (e_index - s_index - 1);
982*4882a593Smuzhiyun for (block = s_block + 1; block < e_block; block++) {
983*4882a593Smuzhiyun block->first_free = 0;
984*4882a593Smuzhiyun block->scan_hint = 0;
985*4882a593Smuzhiyun block->contig_hint_start = 0;
986*4882a593Smuzhiyun block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
987*4882a593Smuzhiyun block->left_free = PCPU_BITMAP_BLOCK_BITS;
988*4882a593Smuzhiyun block->right_free = PCPU_BITMAP_BLOCK_BITS;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (nr_empty_pages)
993*4882a593Smuzhiyun pcpu_update_empty_pages(chunk, nr_empty_pages);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /*
996*4882a593Smuzhiyun * Refresh chunk metadata when the free makes a block free or spans
997*4882a593Smuzhiyun * across blocks. The contig_hint may be off by up to a page, but if
998*4882a593Smuzhiyun * the contig_hint is contained in a block, it will be accurate with
999*4882a593Smuzhiyun * the else condition below.
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1002*4882a593Smuzhiyun pcpu_chunk_refresh_hint(chunk, true);
1003*4882a593Smuzhiyun else
1004*4882a593Smuzhiyun pcpu_block_update(&chunk->chunk_md,
1005*4882a593Smuzhiyun pcpu_block_off_to_off(s_index, start),
1006*4882a593Smuzhiyun end);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /**
1010*4882a593Smuzhiyun * pcpu_is_populated - determines if the region is populated
1011*4882a593Smuzhiyun * @chunk: chunk of interest
1012*4882a593Smuzhiyun * @bit_off: chunk offset
1013*4882a593Smuzhiyun * @bits: size of area
1014*4882a593Smuzhiyun * @next_off: return value for the next offset to start searching
1015*4882a593Smuzhiyun *
1016*4882a593Smuzhiyun * For atomic allocations, check if the backing pages are populated.
1017*4882a593Smuzhiyun *
1018*4882a593Smuzhiyun * RETURNS:
1019*4882a593Smuzhiyun * Bool if the backing pages are populated.
1020*4882a593Smuzhiyun * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1021*4882a593Smuzhiyun */
pcpu_is_populated(struct pcpu_chunk * chunk,int bit_off,int bits,int * next_off)1022*4882a593Smuzhiyun static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1023*4882a593Smuzhiyun int *next_off)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun unsigned int page_start, page_end, rs, re;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1028*4882a593Smuzhiyun page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun rs = page_start;
1031*4882a593Smuzhiyun bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
1032*4882a593Smuzhiyun if (rs >= page_end)
1033*4882a593Smuzhiyun return true;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1036*4882a593Smuzhiyun return false;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /**
1040*4882a593Smuzhiyun * pcpu_find_block_fit - finds the block index to start searching
1041*4882a593Smuzhiyun * @chunk: chunk of interest
1042*4882a593Smuzhiyun * @alloc_bits: size of request in allocation units
1043*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE bytes)
1044*4882a593Smuzhiyun * @pop_only: use populated regions only
1045*4882a593Smuzhiyun *
1046*4882a593Smuzhiyun * Given a chunk and an allocation spec, find the offset to begin searching
1047*4882a593Smuzhiyun * for a free region. This iterates over the bitmap metadata blocks to
1048*4882a593Smuzhiyun * find an offset that will be guaranteed to fit the requirements. It is
1049*4882a593Smuzhiyun * not quite first fit as if the allocation does not fit in the contig hint
1050*4882a593Smuzhiyun * of a block or chunk, it is skipped. This errs on the side of caution
1051*4882a593Smuzhiyun * to prevent excess iteration. Poor alignment can cause the allocator to
1052*4882a593Smuzhiyun * skip over blocks and chunks that have valid free areas.
1053*4882a593Smuzhiyun *
1054*4882a593Smuzhiyun * RETURNS:
1055*4882a593Smuzhiyun * The offset in the bitmap to begin searching.
1056*4882a593Smuzhiyun * -1 if no offset is found.
1057*4882a593Smuzhiyun */
pcpu_find_block_fit(struct pcpu_chunk * chunk,int alloc_bits,size_t align,bool pop_only)1058*4882a593Smuzhiyun static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1059*4882a593Smuzhiyun size_t align, bool pop_only)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1062*4882a593Smuzhiyun int bit_off, bits, next_off;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /*
1065*4882a593Smuzhiyun * Check to see if the allocation can fit in the chunk's contig hint.
1066*4882a593Smuzhiyun * This is an optimization to prevent scanning by assuming if it
1067*4882a593Smuzhiyun * cannot fit in the global hint, there is memory pressure and creating
1068*4882a593Smuzhiyun * a new chunk would happen soon.
1069*4882a593Smuzhiyun */
1070*4882a593Smuzhiyun bit_off = ALIGN(chunk_md->contig_hint_start, align) -
1071*4882a593Smuzhiyun chunk_md->contig_hint_start;
1072*4882a593Smuzhiyun if (bit_off + alloc_bits > chunk_md->contig_hint)
1073*4882a593Smuzhiyun return -1;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1076*4882a593Smuzhiyun bits = 0;
1077*4882a593Smuzhiyun pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1078*4882a593Smuzhiyun if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1079*4882a593Smuzhiyun &next_off))
1080*4882a593Smuzhiyun break;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun bit_off = next_off;
1083*4882a593Smuzhiyun bits = 0;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (bit_off == pcpu_chunk_map_bits(chunk))
1087*4882a593Smuzhiyun return -1;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun return bit_off;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /*
1093*4882a593Smuzhiyun * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1094*4882a593Smuzhiyun * @map: the address to base the search on
1095*4882a593Smuzhiyun * @size: the bitmap size in bits
1096*4882a593Smuzhiyun * @start: the bitnumber to start searching at
1097*4882a593Smuzhiyun * @nr: the number of zeroed bits we're looking for
1098*4882a593Smuzhiyun * @align_mask: alignment mask for zero area
1099*4882a593Smuzhiyun * @largest_off: offset of the largest area skipped
1100*4882a593Smuzhiyun * @largest_bits: size of the largest area skipped
1101*4882a593Smuzhiyun *
1102*4882a593Smuzhiyun * The @align_mask should be one less than a power of 2.
1103*4882a593Smuzhiyun *
1104*4882a593Smuzhiyun * This is a modified version of bitmap_find_next_zero_area_off() to remember
1105*4882a593Smuzhiyun * the largest area that was skipped. This is imperfect, but in general is
1106*4882a593Smuzhiyun * good enough. The largest remembered region is the largest failed region
1107*4882a593Smuzhiyun * seen. This does not include anything we possibly skipped due to alignment.
1108*4882a593Smuzhiyun * pcpu_block_update_scan() does scan backwards to try and recover what was
1109*4882a593Smuzhiyun * lost to alignment. While this can cause scanning to miss earlier possible
1110*4882a593Smuzhiyun * free areas, smaller allocations will eventually fill those holes.
1111*4882a593Smuzhiyun */
pcpu_find_zero_area(unsigned long * map,unsigned long size,unsigned long start,unsigned long nr,unsigned long align_mask,unsigned long * largest_off,unsigned long * largest_bits)1112*4882a593Smuzhiyun static unsigned long pcpu_find_zero_area(unsigned long *map,
1113*4882a593Smuzhiyun unsigned long size,
1114*4882a593Smuzhiyun unsigned long start,
1115*4882a593Smuzhiyun unsigned long nr,
1116*4882a593Smuzhiyun unsigned long align_mask,
1117*4882a593Smuzhiyun unsigned long *largest_off,
1118*4882a593Smuzhiyun unsigned long *largest_bits)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun unsigned long index, end, i, area_off, area_bits;
1121*4882a593Smuzhiyun again:
1122*4882a593Smuzhiyun index = find_next_zero_bit(map, size, start);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* Align allocation */
1125*4882a593Smuzhiyun index = __ALIGN_MASK(index, align_mask);
1126*4882a593Smuzhiyun area_off = index;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun end = index + nr;
1129*4882a593Smuzhiyun if (end > size)
1130*4882a593Smuzhiyun return end;
1131*4882a593Smuzhiyun i = find_next_bit(map, end, index);
1132*4882a593Smuzhiyun if (i < end) {
1133*4882a593Smuzhiyun area_bits = i - area_off;
1134*4882a593Smuzhiyun /* remember largest unused area with best alignment */
1135*4882a593Smuzhiyun if (area_bits > *largest_bits ||
1136*4882a593Smuzhiyun (area_bits == *largest_bits && *largest_off &&
1137*4882a593Smuzhiyun (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1138*4882a593Smuzhiyun *largest_off = area_off;
1139*4882a593Smuzhiyun *largest_bits = area_bits;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun start = i + 1;
1143*4882a593Smuzhiyun goto again;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun return index;
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun /**
1149*4882a593Smuzhiyun * pcpu_alloc_area - allocates an area from a pcpu_chunk
1150*4882a593Smuzhiyun * @chunk: chunk of interest
1151*4882a593Smuzhiyun * @alloc_bits: size of request in allocation units
1152*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE)
1153*4882a593Smuzhiyun * @start: bit_off to start searching
1154*4882a593Smuzhiyun *
1155*4882a593Smuzhiyun * This function takes in a @start offset to begin searching to fit an
1156*4882a593Smuzhiyun * allocation of @alloc_bits with alignment @align. It needs to scan
1157*4882a593Smuzhiyun * the allocation map because if it fits within the block's contig hint,
1158*4882a593Smuzhiyun * @start will be block->first_free. This is an attempt to fill the
1159*4882a593Smuzhiyun * allocation prior to breaking the contig hint. The allocation and
1160*4882a593Smuzhiyun * boundary maps are updated accordingly if it confirms a valid
1161*4882a593Smuzhiyun * free area.
1162*4882a593Smuzhiyun *
1163*4882a593Smuzhiyun * RETURNS:
1164*4882a593Smuzhiyun * Allocated addr offset in @chunk on success.
1165*4882a593Smuzhiyun * -1 if no matching area is found.
1166*4882a593Smuzhiyun */
pcpu_alloc_area(struct pcpu_chunk * chunk,int alloc_bits,size_t align,int start)1167*4882a593Smuzhiyun static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1168*4882a593Smuzhiyun size_t align, int start)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1171*4882a593Smuzhiyun size_t align_mask = (align) ? (align - 1) : 0;
1172*4882a593Smuzhiyun unsigned long area_off = 0, area_bits = 0;
1173*4882a593Smuzhiyun int bit_off, end, oslot;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun lockdep_assert_held(&pcpu_lock);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun oslot = pcpu_chunk_slot(chunk);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun /*
1180*4882a593Smuzhiyun * Search to find a fit.
1181*4882a593Smuzhiyun */
1182*4882a593Smuzhiyun end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1183*4882a593Smuzhiyun pcpu_chunk_map_bits(chunk));
1184*4882a593Smuzhiyun bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1185*4882a593Smuzhiyun align_mask, &area_off, &area_bits);
1186*4882a593Smuzhiyun if (bit_off >= end)
1187*4882a593Smuzhiyun return -1;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (area_bits)
1190*4882a593Smuzhiyun pcpu_block_update_scan(chunk, area_off, area_bits);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun /* update alloc map */
1193*4882a593Smuzhiyun bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /* update boundary map */
1196*4882a593Smuzhiyun set_bit(bit_off, chunk->bound_map);
1197*4882a593Smuzhiyun bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1198*4882a593Smuzhiyun set_bit(bit_off + alloc_bits, chunk->bound_map);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /* update first free bit */
1203*4882a593Smuzhiyun if (bit_off == chunk_md->first_free)
1204*4882a593Smuzhiyun chunk_md->first_free = find_next_zero_bit(
1205*4882a593Smuzhiyun chunk->alloc_map,
1206*4882a593Smuzhiyun pcpu_chunk_map_bits(chunk),
1207*4882a593Smuzhiyun bit_off + alloc_bits);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun pcpu_chunk_relocate(chunk, oslot);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun return bit_off * PCPU_MIN_ALLOC_SIZE;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /**
1217*4882a593Smuzhiyun * pcpu_free_area - frees the corresponding offset
1218*4882a593Smuzhiyun * @chunk: chunk of interest
1219*4882a593Smuzhiyun * @off: addr offset into chunk
1220*4882a593Smuzhiyun *
1221*4882a593Smuzhiyun * This function determines the size of an allocation to free using
1222*4882a593Smuzhiyun * the boundary bitmap and clears the allocation map.
1223*4882a593Smuzhiyun *
1224*4882a593Smuzhiyun * RETURNS:
1225*4882a593Smuzhiyun * Number of freed bytes.
1226*4882a593Smuzhiyun */
pcpu_free_area(struct pcpu_chunk * chunk,int off)1227*4882a593Smuzhiyun static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1230*4882a593Smuzhiyun int bit_off, bits, end, oslot, freed;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun lockdep_assert_held(&pcpu_lock);
1233*4882a593Smuzhiyun pcpu_stats_area_dealloc(chunk);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun oslot = pcpu_chunk_slot(chunk);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun bit_off = off / PCPU_MIN_ALLOC_SIZE;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /* find end index */
1240*4882a593Smuzhiyun end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1241*4882a593Smuzhiyun bit_off + 1);
1242*4882a593Smuzhiyun bits = end - bit_off;
1243*4882a593Smuzhiyun bitmap_clear(chunk->alloc_map, bit_off, bits);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun freed = bits * PCPU_MIN_ALLOC_SIZE;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun /* update metadata */
1248*4882a593Smuzhiyun chunk->free_bytes += freed;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun /* update first free bit */
1251*4882a593Smuzhiyun chunk_md->first_free = min(chunk_md->first_free, bit_off);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun pcpu_block_update_hint_free(chunk, bit_off, bits);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun pcpu_chunk_relocate(chunk, oslot);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun return freed;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
pcpu_init_md_block(struct pcpu_block_md * block,int nr_bits)1260*4882a593Smuzhiyun static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun block->scan_hint = 0;
1263*4882a593Smuzhiyun block->contig_hint = nr_bits;
1264*4882a593Smuzhiyun block->left_free = nr_bits;
1265*4882a593Smuzhiyun block->right_free = nr_bits;
1266*4882a593Smuzhiyun block->first_free = 0;
1267*4882a593Smuzhiyun block->nr_bits = nr_bits;
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
pcpu_init_md_blocks(struct pcpu_chunk * chunk)1270*4882a593Smuzhiyun static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun struct pcpu_block_md *md_block;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun /* init the chunk's block */
1275*4882a593Smuzhiyun pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun for (md_block = chunk->md_blocks;
1278*4882a593Smuzhiyun md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1279*4882a593Smuzhiyun md_block++)
1280*4882a593Smuzhiyun pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun /**
1284*4882a593Smuzhiyun * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1285*4882a593Smuzhiyun * @tmp_addr: the start of the region served
1286*4882a593Smuzhiyun * @map_size: size of the region served
1287*4882a593Smuzhiyun *
1288*4882a593Smuzhiyun * This is responsible for creating the chunks that serve the first chunk. The
1289*4882a593Smuzhiyun * base_addr is page aligned down of @tmp_addr while the region end is page
1290*4882a593Smuzhiyun * aligned up. Offsets are kept track of to determine the region served. All
1291*4882a593Smuzhiyun * this is done to appease the bitmap allocator in avoiding partial blocks.
1292*4882a593Smuzhiyun *
1293*4882a593Smuzhiyun * RETURNS:
1294*4882a593Smuzhiyun * Chunk serving the region at @tmp_addr of @map_size.
1295*4882a593Smuzhiyun */
pcpu_alloc_first_chunk(unsigned long tmp_addr,int map_size)1296*4882a593Smuzhiyun static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1297*4882a593Smuzhiyun int map_size)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun struct pcpu_chunk *chunk;
1300*4882a593Smuzhiyun unsigned long aligned_addr, lcm_align;
1301*4882a593Smuzhiyun int start_offset, offset_bits, region_size, region_bits;
1302*4882a593Smuzhiyun size_t alloc_size;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun /* region calculations */
1305*4882a593Smuzhiyun aligned_addr = tmp_addr & PAGE_MASK;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun start_offset = tmp_addr - aligned_addr;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun /*
1310*4882a593Smuzhiyun * Align the end of the region with the LCM of PAGE_SIZE and
1311*4882a593Smuzhiyun * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1312*4882a593Smuzhiyun * the other.
1313*4882a593Smuzhiyun */
1314*4882a593Smuzhiyun lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1315*4882a593Smuzhiyun region_size = ALIGN(start_offset + map_size, lcm_align);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun /* allocate chunk */
1318*4882a593Smuzhiyun alloc_size = struct_size(chunk, populated,
1319*4882a593Smuzhiyun BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1320*4882a593Smuzhiyun chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1321*4882a593Smuzhiyun if (!chunk)
1322*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
1323*4882a593Smuzhiyun alloc_size);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun INIT_LIST_HEAD(&chunk->list);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun chunk->base_addr = (void *)aligned_addr;
1328*4882a593Smuzhiyun chunk->start_offset = start_offset;
1329*4882a593Smuzhiyun chunk->end_offset = region_size - chunk->start_offset - map_size;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun chunk->nr_pages = region_size >> PAGE_SHIFT;
1332*4882a593Smuzhiyun region_bits = pcpu_chunk_map_bits(chunk);
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1335*4882a593Smuzhiyun chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1336*4882a593Smuzhiyun if (!chunk->alloc_map)
1337*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
1338*4882a593Smuzhiyun alloc_size);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun alloc_size =
1341*4882a593Smuzhiyun BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1342*4882a593Smuzhiyun chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1343*4882a593Smuzhiyun if (!chunk->bound_map)
1344*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
1345*4882a593Smuzhiyun alloc_size);
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1348*4882a593Smuzhiyun chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1349*4882a593Smuzhiyun if (!chunk->md_blocks)
1350*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
1351*4882a593Smuzhiyun alloc_size);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun #ifdef CONFIG_MEMCG_KMEM
1354*4882a593Smuzhiyun /* first chunk isn't memcg-aware */
1355*4882a593Smuzhiyun chunk->obj_cgroups = NULL;
1356*4882a593Smuzhiyun #endif
1357*4882a593Smuzhiyun pcpu_init_md_blocks(chunk);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* manage populated page bitmap */
1360*4882a593Smuzhiyun chunk->immutable = true;
1361*4882a593Smuzhiyun bitmap_fill(chunk->populated, chunk->nr_pages);
1362*4882a593Smuzhiyun chunk->nr_populated = chunk->nr_pages;
1363*4882a593Smuzhiyun chunk->nr_empty_pop_pages = chunk->nr_pages;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun chunk->free_bytes = map_size;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun if (chunk->start_offset) {
1368*4882a593Smuzhiyun /* hide the beginning of the bitmap */
1369*4882a593Smuzhiyun offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1370*4882a593Smuzhiyun bitmap_set(chunk->alloc_map, 0, offset_bits);
1371*4882a593Smuzhiyun set_bit(0, chunk->bound_map);
1372*4882a593Smuzhiyun set_bit(offset_bits, chunk->bound_map);
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun chunk->chunk_md.first_free = offset_bits;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun if (chunk->end_offset) {
1380*4882a593Smuzhiyun /* hide the end of the bitmap */
1381*4882a593Smuzhiyun offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1382*4882a593Smuzhiyun bitmap_set(chunk->alloc_map,
1383*4882a593Smuzhiyun pcpu_chunk_map_bits(chunk) - offset_bits,
1384*4882a593Smuzhiyun offset_bits);
1385*4882a593Smuzhiyun set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1386*4882a593Smuzhiyun chunk->bound_map);
1387*4882a593Smuzhiyun set_bit(region_bits, chunk->bound_map);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1390*4882a593Smuzhiyun - offset_bits, offset_bits);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun return chunk;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
pcpu_alloc_chunk(enum pcpu_chunk_type type,gfp_t gfp)1396*4882a593Smuzhiyun static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun struct pcpu_chunk *chunk;
1399*4882a593Smuzhiyun int region_bits;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1402*4882a593Smuzhiyun if (!chunk)
1403*4882a593Smuzhiyun return NULL;
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun INIT_LIST_HEAD(&chunk->list);
1406*4882a593Smuzhiyun chunk->nr_pages = pcpu_unit_pages;
1407*4882a593Smuzhiyun region_bits = pcpu_chunk_map_bits(chunk);
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1410*4882a593Smuzhiyun sizeof(chunk->alloc_map[0]), gfp);
1411*4882a593Smuzhiyun if (!chunk->alloc_map)
1412*4882a593Smuzhiyun goto alloc_map_fail;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1415*4882a593Smuzhiyun sizeof(chunk->bound_map[0]), gfp);
1416*4882a593Smuzhiyun if (!chunk->bound_map)
1417*4882a593Smuzhiyun goto bound_map_fail;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1420*4882a593Smuzhiyun sizeof(chunk->md_blocks[0]), gfp);
1421*4882a593Smuzhiyun if (!chunk->md_blocks)
1422*4882a593Smuzhiyun goto md_blocks_fail;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun #ifdef CONFIG_MEMCG_KMEM
1425*4882a593Smuzhiyun if (pcpu_is_memcg_chunk(type)) {
1426*4882a593Smuzhiyun chunk->obj_cgroups =
1427*4882a593Smuzhiyun pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1428*4882a593Smuzhiyun sizeof(struct obj_cgroup *), gfp);
1429*4882a593Smuzhiyun if (!chunk->obj_cgroups)
1430*4882a593Smuzhiyun goto objcg_fail;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun #endif
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun pcpu_init_md_blocks(chunk);
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun /* init metadata */
1437*4882a593Smuzhiyun chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun return chunk;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun #ifdef CONFIG_MEMCG_KMEM
1442*4882a593Smuzhiyun objcg_fail:
1443*4882a593Smuzhiyun pcpu_mem_free(chunk->md_blocks);
1444*4882a593Smuzhiyun #endif
1445*4882a593Smuzhiyun md_blocks_fail:
1446*4882a593Smuzhiyun pcpu_mem_free(chunk->bound_map);
1447*4882a593Smuzhiyun bound_map_fail:
1448*4882a593Smuzhiyun pcpu_mem_free(chunk->alloc_map);
1449*4882a593Smuzhiyun alloc_map_fail:
1450*4882a593Smuzhiyun pcpu_mem_free(chunk);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun return NULL;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
pcpu_free_chunk(struct pcpu_chunk * chunk)1455*4882a593Smuzhiyun static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun if (!chunk)
1458*4882a593Smuzhiyun return;
1459*4882a593Smuzhiyun #ifdef CONFIG_MEMCG_KMEM
1460*4882a593Smuzhiyun pcpu_mem_free(chunk->obj_cgroups);
1461*4882a593Smuzhiyun #endif
1462*4882a593Smuzhiyun pcpu_mem_free(chunk->md_blocks);
1463*4882a593Smuzhiyun pcpu_mem_free(chunk->bound_map);
1464*4882a593Smuzhiyun pcpu_mem_free(chunk->alloc_map);
1465*4882a593Smuzhiyun pcpu_mem_free(chunk);
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /**
1469*4882a593Smuzhiyun * pcpu_chunk_populated - post-population bookkeeping
1470*4882a593Smuzhiyun * @chunk: pcpu_chunk which got populated
1471*4882a593Smuzhiyun * @page_start: the start page
1472*4882a593Smuzhiyun * @page_end: the end page
1473*4882a593Smuzhiyun *
1474*4882a593Smuzhiyun * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1475*4882a593Smuzhiyun * the bookkeeping information accordingly. Must be called after each
1476*4882a593Smuzhiyun * successful population.
1477*4882a593Smuzhiyun *
1478*4882a593Smuzhiyun * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1479*4882a593Smuzhiyun * is to serve an allocation in that area.
1480*4882a593Smuzhiyun */
pcpu_chunk_populated(struct pcpu_chunk * chunk,int page_start,int page_end)1481*4882a593Smuzhiyun static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1482*4882a593Smuzhiyun int page_end)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun int nr = page_end - page_start;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun lockdep_assert_held(&pcpu_lock);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun bitmap_set(chunk->populated, page_start, nr);
1489*4882a593Smuzhiyun chunk->nr_populated += nr;
1490*4882a593Smuzhiyun pcpu_nr_populated += nr;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun pcpu_update_empty_pages(chunk, nr);
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun /**
1496*4882a593Smuzhiyun * pcpu_chunk_depopulated - post-depopulation bookkeeping
1497*4882a593Smuzhiyun * @chunk: pcpu_chunk which got depopulated
1498*4882a593Smuzhiyun * @page_start: the start page
1499*4882a593Smuzhiyun * @page_end: the end page
1500*4882a593Smuzhiyun *
1501*4882a593Smuzhiyun * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1502*4882a593Smuzhiyun * Update the bookkeeping information accordingly. Must be called after
1503*4882a593Smuzhiyun * each successful depopulation.
1504*4882a593Smuzhiyun */
pcpu_chunk_depopulated(struct pcpu_chunk * chunk,int page_start,int page_end)1505*4882a593Smuzhiyun static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1506*4882a593Smuzhiyun int page_start, int page_end)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun int nr = page_end - page_start;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun lockdep_assert_held(&pcpu_lock);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun bitmap_clear(chunk->populated, page_start, nr);
1513*4882a593Smuzhiyun chunk->nr_populated -= nr;
1514*4882a593Smuzhiyun pcpu_nr_populated -= nr;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun pcpu_update_empty_pages(chunk, -nr);
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /*
1520*4882a593Smuzhiyun * Chunk management implementation.
1521*4882a593Smuzhiyun *
1522*4882a593Smuzhiyun * To allow different implementations, chunk alloc/free and
1523*4882a593Smuzhiyun * [de]population are implemented in a separate file which is pulled
1524*4882a593Smuzhiyun * into this file and compiled together. The following functions
1525*4882a593Smuzhiyun * should be implemented.
1526*4882a593Smuzhiyun *
1527*4882a593Smuzhiyun * pcpu_populate_chunk - populate the specified range of a chunk
1528*4882a593Smuzhiyun * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1529*4882a593Smuzhiyun * pcpu_create_chunk - create a new chunk
1530*4882a593Smuzhiyun * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1531*4882a593Smuzhiyun * pcpu_addr_to_page - translate address to physical address
1532*4882a593Smuzhiyun * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1533*4882a593Smuzhiyun */
1534*4882a593Smuzhiyun static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1535*4882a593Smuzhiyun int page_start, int page_end, gfp_t gfp);
1536*4882a593Smuzhiyun static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1537*4882a593Smuzhiyun int page_start, int page_end);
1538*4882a593Smuzhiyun static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
1539*4882a593Smuzhiyun gfp_t gfp);
1540*4882a593Smuzhiyun static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1541*4882a593Smuzhiyun static struct page *pcpu_addr_to_page(void *addr);
1542*4882a593Smuzhiyun static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun #ifdef CONFIG_NEED_PER_CPU_KM
1545*4882a593Smuzhiyun #include "percpu-km.c"
1546*4882a593Smuzhiyun #else
1547*4882a593Smuzhiyun #include "percpu-vm.c"
1548*4882a593Smuzhiyun #endif
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun /**
1551*4882a593Smuzhiyun * pcpu_chunk_addr_search - determine chunk containing specified address
1552*4882a593Smuzhiyun * @addr: address for which the chunk needs to be determined.
1553*4882a593Smuzhiyun *
1554*4882a593Smuzhiyun * This is an internal function that handles all but static allocations.
1555*4882a593Smuzhiyun * Static percpu address values should never be passed into the allocator.
1556*4882a593Smuzhiyun *
1557*4882a593Smuzhiyun * RETURNS:
1558*4882a593Smuzhiyun * The address of the found chunk.
1559*4882a593Smuzhiyun */
pcpu_chunk_addr_search(void * addr)1560*4882a593Smuzhiyun static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun /* is it in the dynamic region (first chunk)? */
1563*4882a593Smuzhiyun if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1564*4882a593Smuzhiyun return pcpu_first_chunk;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun /* is it in the reserved region? */
1567*4882a593Smuzhiyun if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1568*4882a593Smuzhiyun return pcpu_reserved_chunk;
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun /*
1571*4882a593Smuzhiyun * The address is relative to unit0 which might be unused and
1572*4882a593Smuzhiyun * thus unmapped. Offset the address to the unit space of the
1573*4882a593Smuzhiyun * current processor before looking it up in the vmalloc
1574*4882a593Smuzhiyun * space. Note that any possible cpu id can be used here, so
1575*4882a593Smuzhiyun * there's no need to worry about preemption or cpu hotplug.
1576*4882a593Smuzhiyun */
1577*4882a593Smuzhiyun addr += pcpu_unit_offsets[raw_smp_processor_id()];
1578*4882a593Smuzhiyun return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun #ifdef CONFIG_MEMCG_KMEM
pcpu_memcg_pre_alloc_hook(size_t size,gfp_t gfp,struct obj_cgroup ** objcgp)1582*4882a593Smuzhiyun static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1583*4882a593Smuzhiyun struct obj_cgroup **objcgp)
1584*4882a593Smuzhiyun {
1585*4882a593Smuzhiyun struct obj_cgroup *objcg;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
1588*4882a593Smuzhiyun return PCPU_CHUNK_ROOT;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun objcg = get_obj_cgroup_from_current();
1591*4882a593Smuzhiyun if (!objcg)
1592*4882a593Smuzhiyun return PCPU_CHUNK_ROOT;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1595*4882a593Smuzhiyun obj_cgroup_put(objcg);
1596*4882a593Smuzhiyun return PCPU_FAIL_ALLOC;
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun *objcgp = objcg;
1600*4882a593Smuzhiyun return PCPU_CHUNK_MEMCG;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
pcpu_memcg_post_alloc_hook(struct obj_cgroup * objcg,struct pcpu_chunk * chunk,int off,size_t size)1603*4882a593Smuzhiyun static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1604*4882a593Smuzhiyun struct pcpu_chunk *chunk, int off,
1605*4882a593Smuzhiyun size_t size)
1606*4882a593Smuzhiyun {
1607*4882a593Smuzhiyun if (!objcg)
1608*4882a593Smuzhiyun return;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun if (chunk) {
1611*4882a593Smuzhiyun chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun rcu_read_lock();
1614*4882a593Smuzhiyun mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1615*4882a593Smuzhiyun size * num_possible_cpus());
1616*4882a593Smuzhiyun rcu_read_unlock();
1617*4882a593Smuzhiyun } else {
1618*4882a593Smuzhiyun obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1619*4882a593Smuzhiyun obj_cgroup_put(objcg);
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun
pcpu_memcg_free_hook(struct pcpu_chunk * chunk,int off,size_t size)1623*4882a593Smuzhiyun static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1624*4882a593Smuzhiyun {
1625*4882a593Smuzhiyun struct obj_cgroup *objcg;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)))
1628*4882a593Smuzhiyun return;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1631*4882a593Smuzhiyun chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun rcu_read_lock();
1636*4882a593Smuzhiyun mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1637*4882a593Smuzhiyun -(size * num_possible_cpus()));
1638*4882a593Smuzhiyun rcu_read_unlock();
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun obj_cgroup_put(objcg);
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun #else /* CONFIG_MEMCG_KMEM */
1644*4882a593Smuzhiyun static enum pcpu_chunk_type
pcpu_memcg_pre_alloc_hook(size_t size,gfp_t gfp,struct obj_cgroup ** objcgp)1645*4882a593Smuzhiyun pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun return PCPU_CHUNK_ROOT;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
pcpu_memcg_post_alloc_hook(struct obj_cgroup * objcg,struct pcpu_chunk * chunk,int off,size_t size)1650*4882a593Smuzhiyun static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1651*4882a593Smuzhiyun struct pcpu_chunk *chunk, int off,
1652*4882a593Smuzhiyun size_t size)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun
pcpu_memcg_free_hook(struct pcpu_chunk * chunk,int off,size_t size)1656*4882a593Smuzhiyun static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun #endif /* CONFIG_MEMCG_KMEM */
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun /**
1662*4882a593Smuzhiyun * pcpu_alloc - the percpu allocator
1663*4882a593Smuzhiyun * @size: size of area to allocate in bytes
1664*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE)
1665*4882a593Smuzhiyun * @reserved: allocate from the reserved chunk if available
1666*4882a593Smuzhiyun * @gfp: allocation flags
1667*4882a593Smuzhiyun *
1668*4882a593Smuzhiyun * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1669*4882a593Smuzhiyun * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1670*4882a593Smuzhiyun * then no warning will be triggered on invalid or failed allocation
1671*4882a593Smuzhiyun * requests.
1672*4882a593Smuzhiyun *
1673*4882a593Smuzhiyun * RETURNS:
1674*4882a593Smuzhiyun * Percpu pointer to the allocated area on success, NULL on failure.
1675*4882a593Smuzhiyun */
pcpu_alloc(size_t size,size_t align,bool reserved,gfp_t gfp)1676*4882a593Smuzhiyun static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1677*4882a593Smuzhiyun gfp_t gfp)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun gfp_t pcpu_gfp;
1680*4882a593Smuzhiyun bool is_atomic;
1681*4882a593Smuzhiyun bool do_warn;
1682*4882a593Smuzhiyun enum pcpu_chunk_type type;
1683*4882a593Smuzhiyun struct list_head *pcpu_slot;
1684*4882a593Smuzhiyun struct obj_cgroup *objcg = NULL;
1685*4882a593Smuzhiyun static int warn_limit = 10;
1686*4882a593Smuzhiyun struct pcpu_chunk *chunk, *next;
1687*4882a593Smuzhiyun const char *err;
1688*4882a593Smuzhiyun int slot, off, cpu, ret;
1689*4882a593Smuzhiyun unsigned long flags;
1690*4882a593Smuzhiyun void __percpu *ptr;
1691*4882a593Smuzhiyun size_t bits, bit_align;
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun gfp = current_gfp_context(gfp);
1694*4882a593Smuzhiyun /* whitelisted flags that can be passed to the backing allocators */
1695*4882a593Smuzhiyun pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1696*4882a593Smuzhiyun is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1697*4882a593Smuzhiyun do_warn = !(gfp & __GFP_NOWARN);
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /*
1700*4882a593Smuzhiyun * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1701*4882a593Smuzhiyun * therefore alignment must be a minimum of that many bytes.
1702*4882a593Smuzhiyun * An allocation may have internal fragmentation from rounding up
1703*4882a593Smuzhiyun * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1704*4882a593Smuzhiyun */
1705*4882a593Smuzhiyun if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1706*4882a593Smuzhiyun align = PCPU_MIN_ALLOC_SIZE;
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1709*4882a593Smuzhiyun bits = size >> PCPU_MIN_ALLOC_SHIFT;
1710*4882a593Smuzhiyun bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1713*4882a593Smuzhiyun !is_power_of_2(align))) {
1714*4882a593Smuzhiyun WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1715*4882a593Smuzhiyun size, align);
1716*4882a593Smuzhiyun return NULL;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
1720*4882a593Smuzhiyun if (unlikely(type == PCPU_FAIL_ALLOC))
1721*4882a593Smuzhiyun return NULL;
1722*4882a593Smuzhiyun pcpu_slot = pcpu_chunk_list(type);
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun if (!is_atomic) {
1725*4882a593Smuzhiyun /*
1726*4882a593Smuzhiyun * pcpu_balance_workfn() allocates memory under this mutex,
1727*4882a593Smuzhiyun * and it may wait for memory reclaim. Allow current task
1728*4882a593Smuzhiyun * to become OOM victim, in case of memory pressure.
1729*4882a593Smuzhiyun */
1730*4882a593Smuzhiyun if (gfp & __GFP_NOFAIL) {
1731*4882a593Smuzhiyun mutex_lock(&pcpu_alloc_mutex);
1732*4882a593Smuzhiyun } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1733*4882a593Smuzhiyun pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1734*4882a593Smuzhiyun return NULL;
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun spin_lock_irqsave(&pcpu_lock, flags);
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun /* serve reserved allocations from the reserved chunk if available */
1741*4882a593Smuzhiyun if (reserved && pcpu_reserved_chunk) {
1742*4882a593Smuzhiyun chunk = pcpu_reserved_chunk;
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1745*4882a593Smuzhiyun if (off < 0) {
1746*4882a593Smuzhiyun err = "alloc from reserved chunk failed";
1747*4882a593Smuzhiyun goto fail_unlock;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun off = pcpu_alloc_area(chunk, bits, bit_align, off);
1751*4882a593Smuzhiyun if (off >= 0)
1752*4882a593Smuzhiyun goto area_found;
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun err = "alloc from reserved chunk failed";
1755*4882a593Smuzhiyun goto fail_unlock;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun restart:
1759*4882a593Smuzhiyun /* search through normal chunks */
1760*4882a593Smuzhiyun for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1761*4882a593Smuzhiyun list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
1762*4882a593Smuzhiyun off = pcpu_find_block_fit(chunk, bits, bit_align,
1763*4882a593Smuzhiyun is_atomic);
1764*4882a593Smuzhiyun if (off < 0) {
1765*4882a593Smuzhiyun if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1766*4882a593Smuzhiyun pcpu_chunk_move(chunk, 0);
1767*4882a593Smuzhiyun continue;
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun off = pcpu_alloc_area(chunk, bits, bit_align, off);
1771*4882a593Smuzhiyun if (off >= 0)
1772*4882a593Smuzhiyun goto area_found;
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun spin_unlock_irqrestore(&pcpu_lock, flags);
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun /*
1780*4882a593Smuzhiyun * No space left. Create a new chunk. We don't want multiple
1781*4882a593Smuzhiyun * tasks to create chunks simultaneously. Serialize and create iff
1782*4882a593Smuzhiyun * there's still no empty chunk after grabbing the mutex.
1783*4882a593Smuzhiyun */
1784*4882a593Smuzhiyun if (is_atomic) {
1785*4882a593Smuzhiyun err = "atomic alloc failed, no space left";
1786*4882a593Smuzhiyun goto fail;
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1790*4882a593Smuzhiyun chunk = pcpu_create_chunk(type, pcpu_gfp);
1791*4882a593Smuzhiyun if (!chunk) {
1792*4882a593Smuzhiyun err = "failed to allocate new chunk";
1793*4882a593Smuzhiyun goto fail;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun spin_lock_irqsave(&pcpu_lock, flags);
1797*4882a593Smuzhiyun pcpu_chunk_relocate(chunk, -1);
1798*4882a593Smuzhiyun } else {
1799*4882a593Smuzhiyun spin_lock_irqsave(&pcpu_lock, flags);
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun goto restart;
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun area_found:
1805*4882a593Smuzhiyun pcpu_stats_area_alloc(chunk, size);
1806*4882a593Smuzhiyun spin_unlock_irqrestore(&pcpu_lock, flags);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun /* populate if not all pages are already there */
1809*4882a593Smuzhiyun if (!is_atomic) {
1810*4882a593Smuzhiyun unsigned int page_start, page_end, rs, re;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun page_start = PFN_DOWN(off);
1813*4882a593Smuzhiyun page_end = PFN_UP(off + size);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun bitmap_for_each_clear_region(chunk->populated, rs, re,
1816*4882a593Smuzhiyun page_start, page_end) {
1817*4882a593Smuzhiyun WARN_ON(chunk->immutable);
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun spin_lock_irqsave(&pcpu_lock, flags);
1822*4882a593Smuzhiyun if (ret) {
1823*4882a593Smuzhiyun pcpu_free_area(chunk, off);
1824*4882a593Smuzhiyun err = "failed to populate";
1825*4882a593Smuzhiyun goto fail_unlock;
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun pcpu_chunk_populated(chunk, rs, re);
1828*4882a593Smuzhiyun spin_unlock_irqrestore(&pcpu_lock, flags);
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun mutex_unlock(&pcpu_alloc_mutex);
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
1835*4882a593Smuzhiyun pcpu_schedule_balance_work();
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun /* clear the areas and return address relative to base address */
1838*4882a593Smuzhiyun for_each_possible_cpu(cpu)
1839*4882a593Smuzhiyun memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1842*4882a593Smuzhiyun kmemleak_alloc_percpu(ptr, size, gfp);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1845*4882a593Smuzhiyun chunk->base_addr, off, ptr);
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun return ptr;
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun fail_unlock:
1852*4882a593Smuzhiyun spin_unlock_irqrestore(&pcpu_lock, flags);
1853*4882a593Smuzhiyun fail:
1854*4882a593Smuzhiyun trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun if (!is_atomic && do_warn && warn_limit) {
1857*4882a593Smuzhiyun pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1858*4882a593Smuzhiyun size, align, is_atomic, err);
1859*4882a593Smuzhiyun dump_stack();
1860*4882a593Smuzhiyun if (!--warn_limit)
1861*4882a593Smuzhiyun pr_info("limit reached, disable warning\n");
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun if (is_atomic) {
1864*4882a593Smuzhiyun /* see the flag handling in pcpu_blance_workfn() */
1865*4882a593Smuzhiyun pcpu_atomic_alloc_failed = true;
1866*4882a593Smuzhiyun pcpu_schedule_balance_work();
1867*4882a593Smuzhiyun } else {
1868*4882a593Smuzhiyun mutex_unlock(&pcpu_alloc_mutex);
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun return NULL;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun /**
1877*4882a593Smuzhiyun * __alloc_percpu_gfp - allocate dynamic percpu area
1878*4882a593Smuzhiyun * @size: size of area to allocate in bytes
1879*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE)
1880*4882a593Smuzhiyun * @gfp: allocation flags
1881*4882a593Smuzhiyun *
1882*4882a593Smuzhiyun * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1883*4882a593Smuzhiyun * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1884*4882a593Smuzhiyun * be called from any context but is a lot more likely to fail. If @gfp
1885*4882a593Smuzhiyun * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1886*4882a593Smuzhiyun * allocation requests.
1887*4882a593Smuzhiyun *
1888*4882a593Smuzhiyun * RETURNS:
1889*4882a593Smuzhiyun * Percpu pointer to the allocated area on success, NULL on failure.
1890*4882a593Smuzhiyun */
__alloc_percpu_gfp(size_t size,size_t align,gfp_t gfp)1891*4882a593Smuzhiyun void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun return pcpu_alloc(size, align, false, gfp);
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun /**
1898*4882a593Smuzhiyun * __alloc_percpu - allocate dynamic percpu area
1899*4882a593Smuzhiyun * @size: size of area to allocate in bytes
1900*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE)
1901*4882a593Smuzhiyun *
1902*4882a593Smuzhiyun * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1903*4882a593Smuzhiyun */
__alloc_percpu(size_t size,size_t align)1904*4882a593Smuzhiyun void __percpu *__alloc_percpu(size_t size, size_t align)
1905*4882a593Smuzhiyun {
1906*4882a593Smuzhiyun return pcpu_alloc(size, align, false, GFP_KERNEL);
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__alloc_percpu);
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun /**
1911*4882a593Smuzhiyun * __alloc_reserved_percpu - allocate reserved percpu area
1912*4882a593Smuzhiyun * @size: size of area to allocate in bytes
1913*4882a593Smuzhiyun * @align: alignment of area (max PAGE_SIZE)
1914*4882a593Smuzhiyun *
1915*4882a593Smuzhiyun * Allocate zero-filled percpu area of @size bytes aligned at @align
1916*4882a593Smuzhiyun * from reserved percpu area if arch has set it up; otherwise,
1917*4882a593Smuzhiyun * allocation is served from the same dynamic area. Might sleep.
1918*4882a593Smuzhiyun * Might trigger writeouts.
1919*4882a593Smuzhiyun *
1920*4882a593Smuzhiyun * CONTEXT:
1921*4882a593Smuzhiyun * Does GFP_KERNEL allocation.
1922*4882a593Smuzhiyun *
1923*4882a593Smuzhiyun * RETURNS:
1924*4882a593Smuzhiyun * Percpu pointer to the allocated area on success, NULL on failure.
1925*4882a593Smuzhiyun */
__alloc_reserved_percpu(size_t size,size_t align)1926*4882a593Smuzhiyun void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun return pcpu_alloc(size, align, true, GFP_KERNEL);
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun /**
1932*4882a593Smuzhiyun * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1933*4882a593Smuzhiyun * @type: chunk type
1934*4882a593Smuzhiyun *
1935*4882a593Smuzhiyun * Reclaim all fully free chunks except for the first one. This is also
1936*4882a593Smuzhiyun * responsible for maintaining the pool of empty populated pages. However,
1937*4882a593Smuzhiyun * it is possible that this is called when physical memory is scarce causing
1938*4882a593Smuzhiyun * OOM killer to be triggered. We should avoid doing so until an actual
1939*4882a593Smuzhiyun * allocation causes the failure as it is possible that requests can be
1940*4882a593Smuzhiyun * serviced from already backed regions.
1941*4882a593Smuzhiyun */
__pcpu_balance_workfn(enum pcpu_chunk_type type)1942*4882a593Smuzhiyun static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
1943*4882a593Smuzhiyun {
1944*4882a593Smuzhiyun /* gfp flags passed to underlying allocators */
1945*4882a593Smuzhiyun const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1946*4882a593Smuzhiyun LIST_HEAD(to_free);
1947*4882a593Smuzhiyun struct list_head *pcpu_slot = pcpu_chunk_list(type);
1948*4882a593Smuzhiyun struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1949*4882a593Smuzhiyun struct pcpu_chunk *chunk, *next;
1950*4882a593Smuzhiyun int slot, nr_to_pop, ret;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun /*
1953*4882a593Smuzhiyun * There's no reason to keep around multiple unused chunks and VM
1954*4882a593Smuzhiyun * areas can be scarce. Destroy all free chunks except for one.
1955*4882a593Smuzhiyun */
1956*4882a593Smuzhiyun mutex_lock(&pcpu_alloc_mutex);
1957*4882a593Smuzhiyun spin_lock_irq(&pcpu_lock);
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun list_for_each_entry_safe(chunk, next, free_head, list) {
1960*4882a593Smuzhiyun WARN_ON(chunk->immutable);
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun /* spare the first one */
1963*4882a593Smuzhiyun if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1964*4882a593Smuzhiyun continue;
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun list_move(&chunk->list, &to_free);
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun spin_unlock_irq(&pcpu_lock);
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun list_for_each_entry_safe(chunk, next, &to_free, list) {
1972*4882a593Smuzhiyun unsigned int rs, re;
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun bitmap_for_each_set_region(chunk->populated, rs, re, 0,
1975*4882a593Smuzhiyun chunk->nr_pages) {
1976*4882a593Smuzhiyun pcpu_depopulate_chunk(chunk, rs, re);
1977*4882a593Smuzhiyun spin_lock_irq(&pcpu_lock);
1978*4882a593Smuzhiyun pcpu_chunk_depopulated(chunk, rs, re);
1979*4882a593Smuzhiyun spin_unlock_irq(&pcpu_lock);
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun pcpu_destroy_chunk(chunk);
1982*4882a593Smuzhiyun cond_resched();
1983*4882a593Smuzhiyun }
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun /*
1986*4882a593Smuzhiyun * Ensure there are certain number of free populated pages for
1987*4882a593Smuzhiyun * atomic allocs. Fill up from the most packed so that atomic
1988*4882a593Smuzhiyun * allocs don't increase fragmentation. If atomic allocation
1989*4882a593Smuzhiyun * failed previously, always populate the maximum amount. This
1990*4882a593Smuzhiyun * should prevent atomic allocs larger than PAGE_SIZE from keeping
1991*4882a593Smuzhiyun * failing indefinitely; however, large atomic allocs are not
1992*4882a593Smuzhiyun * something we support properly and can be highly unreliable and
1993*4882a593Smuzhiyun * inefficient.
1994*4882a593Smuzhiyun */
1995*4882a593Smuzhiyun retry_pop:
1996*4882a593Smuzhiyun if (pcpu_atomic_alloc_failed) {
1997*4882a593Smuzhiyun nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1998*4882a593Smuzhiyun /* best effort anyway, don't worry about synchronization */
1999*4882a593Smuzhiyun pcpu_atomic_alloc_failed = false;
2000*4882a593Smuzhiyun } else {
2001*4882a593Smuzhiyun nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2002*4882a593Smuzhiyun pcpu_nr_empty_pop_pages[type],
2003*4882a593Smuzhiyun 0, PCPU_EMPTY_POP_PAGES_HIGH);
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
2007*4882a593Smuzhiyun unsigned int nr_unpop = 0, rs, re;
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (!nr_to_pop)
2010*4882a593Smuzhiyun break;
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun spin_lock_irq(&pcpu_lock);
2013*4882a593Smuzhiyun list_for_each_entry(chunk, &pcpu_slot[slot], list) {
2014*4882a593Smuzhiyun nr_unpop = chunk->nr_pages - chunk->nr_populated;
2015*4882a593Smuzhiyun if (nr_unpop)
2016*4882a593Smuzhiyun break;
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun spin_unlock_irq(&pcpu_lock);
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun if (!nr_unpop)
2021*4882a593Smuzhiyun continue;
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun /* @chunk can't go away while pcpu_alloc_mutex is held */
2024*4882a593Smuzhiyun bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
2025*4882a593Smuzhiyun chunk->nr_pages) {
2026*4882a593Smuzhiyun int nr = min_t(int, re - rs, nr_to_pop);
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2029*4882a593Smuzhiyun if (!ret) {
2030*4882a593Smuzhiyun nr_to_pop -= nr;
2031*4882a593Smuzhiyun spin_lock_irq(&pcpu_lock);
2032*4882a593Smuzhiyun pcpu_chunk_populated(chunk, rs, rs + nr);
2033*4882a593Smuzhiyun spin_unlock_irq(&pcpu_lock);
2034*4882a593Smuzhiyun } else {
2035*4882a593Smuzhiyun nr_to_pop = 0;
2036*4882a593Smuzhiyun }
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun if (!nr_to_pop)
2039*4882a593Smuzhiyun break;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun if (nr_to_pop) {
2044*4882a593Smuzhiyun /* ran out of chunks to populate, create a new one and retry */
2045*4882a593Smuzhiyun chunk = pcpu_create_chunk(type, gfp);
2046*4882a593Smuzhiyun if (chunk) {
2047*4882a593Smuzhiyun spin_lock_irq(&pcpu_lock);
2048*4882a593Smuzhiyun pcpu_chunk_relocate(chunk, -1);
2049*4882a593Smuzhiyun spin_unlock_irq(&pcpu_lock);
2050*4882a593Smuzhiyun goto retry_pop;
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun mutex_unlock(&pcpu_alloc_mutex);
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun /**
2058*4882a593Smuzhiyun * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2059*4882a593Smuzhiyun * @work: unused
2060*4882a593Smuzhiyun *
2061*4882a593Smuzhiyun * Call __pcpu_balance_workfn() for each chunk type.
2062*4882a593Smuzhiyun */
pcpu_balance_workfn(struct work_struct * work)2063*4882a593Smuzhiyun static void pcpu_balance_workfn(struct work_struct *work)
2064*4882a593Smuzhiyun {
2065*4882a593Smuzhiyun enum pcpu_chunk_type type;
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2068*4882a593Smuzhiyun __pcpu_balance_workfn(type);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun /**
2072*4882a593Smuzhiyun * free_percpu - free percpu area
2073*4882a593Smuzhiyun * @ptr: pointer to area to free
2074*4882a593Smuzhiyun *
2075*4882a593Smuzhiyun * Free percpu area @ptr.
2076*4882a593Smuzhiyun *
2077*4882a593Smuzhiyun * CONTEXT:
2078*4882a593Smuzhiyun * Can be called from atomic context.
2079*4882a593Smuzhiyun */
free_percpu(void __percpu * ptr)2080*4882a593Smuzhiyun void free_percpu(void __percpu *ptr)
2081*4882a593Smuzhiyun {
2082*4882a593Smuzhiyun void *addr;
2083*4882a593Smuzhiyun struct pcpu_chunk *chunk;
2084*4882a593Smuzhiyun unsigned long flags;
2085*4882a593Smuzhiyun int size, off;
2086*4882a593Smuzhiyun bool need_balance = false;
2087*4882a593Smuzhiyun struct list_head *pcpu_slot;
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun if (!ptr)
2090*4882a593Smuzhiyun return;
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun kmemleak_free_percpu(ptr);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun addr = __pcpu_ptr_to_addr(ptr);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun spin_lock_irqsave(&pcpu_lock, flags);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun chunk = pcpu_chunk_addr_search(addr);
2099*4882a593Smuzhiyun off = addr - chunk->base_addr;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun size = pcpu_free_area(chunk, off);
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun pcpu_memcg_free_hook(chunk, off, size);
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun /* if there are more than one fully free chunks, wake up grim reaper */
2108*4882a593Smuzhiyun if (chunk->free_bytes == pcpu_unit_size) {
2109*4882a593Smuzhiyun struct pcpu_chunk *pos;
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
2112*4882a593Smuzhiyun if (pos != chunk) {
2113*4882a593Smuzhiyun need_balance = true;
2114*4882a593Smuzhiyun break;
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun spin_unlock_irqrestore(&pcpu_lock, flags);
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun if (need_balance)
2123*4882a593Smuzhiyun pcpu_schedule_balance_work();
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(free_percpu);
2126*4882a593Smuzhiyun
__is_kernel_percpu_address(unsigned long addr,unsigned long * can_addr)2127*4882a593Smuzhiyun bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun #ifdef CONFIG_SMP
2130*4882a593Smuzhiyun const size_t static_size = __per_cpu_end - __per_cpu_start;
2131*4882a593Smuzhiyun void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2132*4882a593Smuzhiyun unsigned int cpu;
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
2135*4882a593Smuzhiyun void *start = per_cpu_ptr(base, cpu);
2136*4882a593Smuzhiyun void *va = (void *)addr;
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun if (va >= start && va < start + static_size) {
2139*4882a593Smuzhiyun if (can_addr) {
2140*4882a593Smuzhiyun *can_addr = (unsigned long) (va - start);
2141*4882a593Smuzhiyun *can_addr += (unsigned long)
2142*4882a593Smuzhiyun per_cpu_ptr(base, get_boot_cpu_id());
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun return true;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun #endif
2148*4882a593Smuzhiyun /* on UP, can't distinguish from other static vars, always false */
2149*4882a593Smuzhiyun return false;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun /**
2153*4882a593Smuzhiyun * is_kernel_percpu_address - test whether address is from static percpu area
2154*4882a593Smuzhiyun * @addr: address to test
2155*4882a593Smuzhiyun *
2156*4882a593Smuzhiyun * Test whether @addr belongs to in-kernel static percpu area. Module
2157*4882a593Smuzhiyun * static percpu areas are not considered. For those, use
2158*4882a593Smuzhiyun * is_module_percpu_address().
2159*4882a593Smuzhiyun *
2160*4882a593Smuzhiyun * RETURNS:
2161*4882a593Smuzhiyun * %true if @addr is from in-kernel static percpu area, %false otherwise.
2162*4882a593Smuzhiyun */
is_kernel_percpu_address(unsigned long addr)2163*4882a593Smuzhiyun bool is_kernel_percpu_address(unsigned long addr)
2164*4882a593Smuzhiyun {
2165*4882a593Smuzhiyun return __is_kernel_percpu_address(addr, NULL);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /**
2169*4882a593Smuzhiyun * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2170*4882a593Smuzhiyun * @addr: the address to be converted to physical address
2171*4882a593Smuzhiyun *
2172*4882a593Smuzhiyun * Given @addr which is dereferenceable address obtained via one of
2173*4882a593Smuzhiyun * percpu access macros, this function translates it into its physical
2174*4882a593Smuzhiyun * address. The caller is responsible for ensuring @addr stays valid
2175*4882a593Smuzhiyun * until this function finishes.
2176*4882a593Smuzhiyun *
2177*4882a593Smuzhiyun * percpu allocator has special setup for the first chunk, which currently
2178*4882a593Smuzhiyun * supports either embedding in linear address space or vmalloc mapping,
2179*4882a593Smuzhiyun * and, from the second one, the backing allocator (currently either vm or
2180*4882a593Smuzhiyun * km) provides translation.
2181*4882a593Smuzhiyun *
2182*4882a593Smuzhiyun * The addr can be translated simply without checking if it falls into the
2183*4882a593Smuzhiyun * first chunk. But the current code reflects better how percpu allocator
2184*4882a593Smuzhiyun * actually works, and the verification can discover both bugs in percpu
2185*4882a593Smuzhiyun * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2186*4882a593Smuzhiyun * code.
2187*4882a593Smuzhiyun *
2188*4882a593Smuzhiyun * RETURNS:
2189*4882a593Smuzhiyun * The physical address for @addr.
2190*4882a593Smuzhiyun */
per_cpu_ptr_to_phys(void * addr)2191*4882a593Smuzhiyun phys_addr_t per_cpu_ptr_to_phys(void *addr)
2192*4882a593Smuzhiyun {
2193*4882a593Smuzhiyun void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2194*4882a593Smuzhiyun bool in_first_chunk = false;
2195*4882a593Smuzhiyun unsigned long first_low, first_high;
2196*4882a593Smuzhiyun unsigned int cpu;
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun /*
2199*4882a593Smuzhiyun * The following test on unit_low/high isn't strictly
2200*4882a593Smuzhiyun * necessary but will speed up lookups of addresses which
2201*4882a593Smuzhiyun * aren't in the first chunk.
2202*4882a593Smuzhiyun *
2203*4882a593Smuzhiyun * The address check is against full chunk sizes. pcpu_base_addr
2204*4882a593Smuzhiyun * points to the beginning of the first chunk including the
2205*4882a593Smuzhiyun * static region. Assumes good intent as the first chunk may
2206*4882a593Smuzhiyun * not be full (ie. < pcpu_unit_pages in size).
2207*4882a593Smuzhiyun */
2208*4882a593Smuzhiyun first_low = (unsigned long)pcpu_base_addr +
2209*4882a593Smuzhiyun pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2210*4882a593Smuzhiyun first_high = (unsigned long)pcpu_base_addr +
2211*4882a593Smuzhiyun pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2212*4882a593Smuzhiyun if ((unsigned long)addr >= first_low &&
2213*4882a593Smuzhiyun (unsigned long)addr < first_high) {
2214*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
2215*4882a593Smuzhiyun void *start = per_cpu_ptr(base, cpu);
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun if (addr >= start && addr < start + pcpu_unit_size) {
2218*4882a593Smuzhiyun in_first_chunk = true;
2219*4882a593Smuzhiyun break;
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun if (in_first_chunk) {
2225*4882a593Smuzhiyun if (!is_vmalloc_addr(addr))
2226*4882a593Smuzhiyun return __pa(addr);
2227*4882a593Smuzhiyun else
2228*4882a593Smuzhiyun return page_to_phys(vmalloc_to_page(addr)) +
2229*4882a593Smuzhiyun offset_in_page(addr);
2230*4882a593Smuzhiyun } else
2231*4882a593Smuzhiyun return page_to_phys(pcpu_addr_to_page(addr)) +
2232*4882a593Smuzhiyun offset_in_page(addr);
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(per_cpu_ptr_to_phys);
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun /**
2237*4882a593Smuzhiyun * pcpu_alloc_alloc_info - allocate percpu allocation info
2238*4882a593Smuzhiyun * @nr_groups: the number of groups
2239*4882a593Smuzhiyun * @nr_units: the number of units
2240*4882a593Smuzhiyun *
2241*4882a593Smuzhiyun * Allocate ai which is large enough for @nr_groups groups containing
2242*4882a593Smuzhiyun * @nr_units units. The returned ai's groups[0].cpu_map points to the
2243*4882a593Smuzhiyun * cpu_map array which is long enough for @nr_units and filled with
2244*4882a593Smuzhiyun * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2245*4882a593Smuzhiyun * pointer of other groups.
2246*4882a593Smuzhiyun *
2247*4882a593Smuzhiyun * RETURNS:
2248*4882a593Smuzhiyun * Pointer to the allocated pcpu_alloc_info on success, NULL on
2249*4882a593Smuzhiyun * failure.
2250*4882a593Smuzhiyun */
pcpu_alloc_alloc_info(int nr_groups,int nr_units)2251*4882a593Smuzhiyun struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2252*4882a593Smuzhiyun int nr_units)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun struct pcpu_alloc_info *ai;
2255*4882a593Smuzhiyun size_t base_size, ai_size;
2256*4882a593Smuzhiyun void *ptr;
2257*4882a593Smuzhiyun int unit;
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun base_size = ALIGN(struct_size(ai, groups, nr_groups),
2260*4882a593Smuzhiyun __alignof__(ai->groups[0].cpu_map[0]));
2261*4882a593Smuzhiyun ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2264*4882a593Smuzhiyun if (!ptr)
2265*4882a593Smuzhiyun return NULL;
2266*4882a593Smuzhiyun ai = ptr;
2267*4882a593Smuzhiyun ptr += base_size;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun ai->groups[0].cpu_map = ptr;
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun for (unit = 0; unit < nr_units; unit++)
2272*4882a593Smuzhiyun ai->groups[0].cpu_map[unit] = NR_CPUS;
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun ai->nr_groups = nr_groups;
2275*4882a593Smuzhiyun ai->__ai_size = PFN_ALIGN(ai_size);
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun return ai;
2278*4882a593Smuzhiyun }
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun /**
2281*4882a593Smuzhiyun * pcpu_free_alloc_info - free percpu allocation info
2282*4882a593Smuzhiyun * @ai: pcpu_alloc_info to free
2283*4882a593Smuzhiyun *
2284*4882a593Smuzhiyun * Free @ai which was allocated by pcpu_alloc_alloc_info().
2285*4882a593Smuzhiyun */
pcpu_free_alloc_info(struct pcpu_alloc_info * ai)2286*4882a593Smuzhiyun void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2287*4882a593Smuzhiyun {
2288*4882a593Smuzhiyun memblock_free_early(__pa(ai), ai->__ai_size);
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun /**
2292*4882a593Smuzhiyun * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2293*4882a593Smuzhiyun * @lvl: loglevel
2294*4882a593Smuzhiyun * @ai: allocation info to dump
2295*4882a593Smuzhiyun *
2296*4882a593Smuzhiyun * Print out information about @ai using loglevel @lvl.
2297*4882a593Smuzhiyun */
pcpu_dump_alloc_info(const char * lvl,const struct pcpu_alloc_info * ai)2298*4882a593Smuzhiyun static void pcpu_dump_alloc_info(const char *lvl,
2299*4882a593Smuzhiyun const struct pcpu_alloc_info *ai)
2300*4882a593Smuzhiyun {
2301*4882a593Smuzhiyun int group_width = 1, cpu_width = 1, width;
2302*4882a593Smuzhiyun char empty_str[] = "--------";
2303*4882a593Smuzhiyun int alloc = 0, alloc_end = 0;
2304*4882a593Smuzhiyun int group, v;
2305*4882a593Smuzhiyun int upa, apl; /* units per alloc, allocs per line */
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun v = ai->nr_groups;
2308*4882a593Smuzhiyun while (v /= 10)
2309*4882a593Smuzhiyun group_width++;
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun v = num_possible_cpus();
2312*4882a593Smuzhiyun while (v /= 10)
2313*4882a593Smuzhiyun cpu_width++;
2314*4882a593Smuzhiyun empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun upa = ai->alloc_size / ai->unit_size;
2317*4882a593Smuzhiyun width = upa * (cpu_width + 1) + group_width + 3;
2318*4882a593Smuzhiyun apl = rounddown_pow_of_two(max(60 / width, 1));
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2321*4882a593Smuzhiyun lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2322*4882a593Smuzhiyun ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun for (group = 0; group < ai->nr_groups; group++) {
2325*4882a593Smuzhiyun const struct pcpu_group_info *gi = &ai->groups[group];
2326*4882a593Smuzhiyun int unit = 0, unit_end = 0;
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun BUG_ON(gi->nr_units % upa);
2329*4882a593Smuzhiyun for (alloc_end += gi->nr_units / upa;
2330*4882a593Smuzhiyun alloc < alloc_end; alloc++) {
2331*4882a593Smuzhiyun if (!(alloc % apl)) {
2332*4882a593Smuzhiyun pr_cont("\n");
2333*4882a593Smuzhiyun printk("%spcpu-alloc: ", lvl);
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun pr_cont("[%0*d] ", group_width, group);
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun for (unit_end += upa; unit < unit_end; unit++)
2338*4882a593Smuzhiyun if (gi->cpu_map[unit] != NR_CPUS)
2339*4882a593Smuzhiyun pr_cont("%0*d ",
2340*4882a593Smuzhiyun cpu_width, gi->cpu_map[unit]);
2341*4882a593Smuzhiyun else
2342*4882a593Smuzhiyun pr_cont("%s ", empty_str);
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun pr_cont("\n");
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun /**
2349*4882a593Smuzhiyun * pcpu_setup_first_chunk - initialize the first percpu chunk
2350*4882a593Smuzhiyun * @ai: pcpu_alloc_info describing how to percpu area is shaped
2351*4882a593Smuzhiyun * @base_addr: mapped address
2352*4882a593Smuzhiyun *
2353*4882a593Smuzhiyun * Initialize the first percpu chunk which contains the kernel static
2354*4882a593Smuzhiyun * percpu area. This function is to be called from arch percpu area
2355*4882a593Smuzhiyun * setup path.
2356*4882a593Smuzhiyun *
2357*4882a593Smuzhiyun * @ai contains all information necessary to initialize the first
2358*4882a593Smuzhiyun * chunk and prime the dynamic percpu allocator.
2359*4882a593Smuzhiyun *
2360*4882a593Smuzhiyun * @ai->static_size is the size of static percpu area.
2361*4882a593Smuzhiyun *
2362*4882a593Smuzhiyun * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2363*4882a593Smuzhiyun * reserve after the static area in the first chunk. This reserves
2364*4882a593Smuzhiyun * the first chunk such that it's available only through reserved
2365*4882a593Smuzhiyun * percpu allocation. This is primarily used to serve module percpu
2366*4882a593Smuzhiyun * static areas on architectures where the addressing model has
2367*4882a593Smuzhiyun * limited offset range for symbol relocations to guarantee module
2368*4882a593Smuzhiyun * percpu symbols fall inside the relocatable range.
2369*4882a593Smuzhiyun *
2370*4882a593Smuzhiyun * @ai->dyn_size determines the number of bytes available for dynamic
2371*4882a593Smuzhiyun * allocation in the first chunk. The area between @ai->static_size +
2372*4882a593Smuzhiyun * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2373*4882a593Smuzhiyun *
2374*4882a593Smuzhiyun * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2375*4882a593Smuzhiyun * and equal to or larger than @ai->static_size + @ai->reserved_size +
2376*4882a593Smuzhiyun * @ai->dyn_size.
2377*4882a593Smuzhiyun *
2378*4882a593Smuzhiyun * @ai->atom_size is the allocation atom size and used as alignment
2379*4882a593Smuzhiyun * for vm areas.
2380*4882a593Smuzhiyun *
2381*4882a593Smuzhiyun * @ai->alloc_size is the allocation size and always multiple of
2382*4882a593Smuzhiyun * @ai->atom_size. This is larger than @ai->atom_size if
2383*4882a593Smuzhiyun * @ai->unit_size is larger than @ai->atom_size.
2384*4882a593Smuzhiyun *
2385*4882a593Smuzhiyun * @ai->nr_groups and @ai->groups describe virtual memory layout of
2386*4882a593Smuzhiyun * percpu areas. Units which should be colocated are put into the
2387*4882a593Smuzhiyun * same group. Dynamic VM areas will be allocated according to these
2388*4882a593Smuzhiyun * groupings. If @ai->nr_groups is zero, a single group containing
2389*4882a593Smuzhiyun * all units is assumed.
2390*4882a593Smuzhiyun *
2391*4882a593Smuzhiyun * The caller should have mapped the first chunk at @base_addr and
2392*4882a593Smuzhiyun * copied static data to each unit.
2393*4882a593Smuzhiyun *
2394*4882a593Smuzhiyun * The first chunk will always contain a static and a dynamic region.
2395*4882a593Smuzhiyun * However, the static region is not managed by any chunk. If the first
2396*4882a593Smuzhiyun * chunk also contains a reserved region, it is served by two chunks -
2397*4882a593Smuzhiyun * one for the reserved region and one for the dynamic region. They
2398*4882a593Smuzhiyun * share the same vm, but use offset regions in the area allocation map.
2399*4882a593Smuzhiyun * The chunk serving the dynamic region is circulated in the chunk slots
2400*4882a593Smuzhiyun * and available for dynamic allocation like any other chunk.
2401*4882a593Smuzhiyun */
pcpu_setup_first_chunk(const struct pcpu_alloc_info * ai,void * base_addr)2402*4882a593Smuzhiyun void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2403*4882a593Smuzhiyun void *base_addr)
2404*4882a593Smuzhiyun {
2405*4882a593Smuzhiyun size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2406*4882a593Smuzhiyun size_t static_size, dyn_size;
2407*4882a593Smuzhiyun struct pcpu_chunk *chunk;
2408*4882a593Smuzhiyun unsigned long *group_offsets;
2409*4882a593Smuzhiyun size_t *group_sizes;
2410*4882a593Smuzhiyun unsigned long *unit_off;
2411*4882a593Smuzhiyun unsigned int cpu;
2412*4882a593Smuzhiyun int *unit_map;
2413*4882a593Smuzhiyun int group, unit, i;
2414*4882a593Smuzhiyun int map_size;
2415*4882a593Smuzhiyun unsigned long tmp_addr;
2416*4882a593Smuzhiyun size_t alloc_size;
2417*4882a593Smuzhiyun enum pcpu_chunk_type type;
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun #define PCPU_SETUP_BUG_ON(cond) do { \
2420*4882a593Smuzhiyun if (unlikely(cond)) { \
2421*4882a593Smuzhiyun pr_emerg("failed to initialize, %s\n", #cond); \
2422*4882a593Smuzhiyun pr_emerg("cpu_possible_mask=%*pb\n", \
2423*4882a593Smuzhiyun cpumask_pr_args(cpu_possible_mask)); \
2424*4882a593Smuzhiyun pcpu_dump_alloc_info(KERN_EMERG, ai); \
2425*4882a593Smuzhiyun BUG(); \
2426*4882a593Smuzhiyun } \
2427*4882a593Smuzhiyun } while (0)
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun /* sanity checks */
2430*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2431*4882a593Smuzhiyun #ifdef CONFIG_SMP
2432*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!ai->static_size);
2433*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2434*4882a593Smuzhiyun #endif
2435*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!base_addr);
2436*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2437*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2438*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2439*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2440*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2441*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2442*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!ai->dyn_size);
2443*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2444*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2445*4882a593Smuzhiyun IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2446*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun /* process group information and build config tables accordingly */
2449*4882a593Smuzhiyun alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2450*4882a593Smuzhiyun group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2451*4882a593Smuzhiyun if (!group_offsets)
2452*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
2453*4882a593Smuzhiyun alloc_size);
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2456*4882a593Smuzhiyun group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2457*4882a593Smuzhiyun if (!group_sizes)
2458*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
2459*4882a593Smuzhiyun alloc_size);
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2462*4882a593Smuzhiyun unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2463*4882a593Smuzhiyun if (!unit_map)
2464*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
2465*4882a593Smuzhiyun alloc_size);
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2468*4882a593Smuzhiyun unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2469*4882a593Smuzhiyun if (!unit_off)
2470*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
2471*4882a593Smuzhiyun alloc_size);
2472*4882a593Smuzhiyun
2473*4882a593Smuzhiyun for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2474*4882a593Smuzhiyun unit_map[cpu] = UINT_MAX;
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun pcpu_low_unit_cpu = NR_CPUS;
2477*4882a593Smuzhiyun pcpu_high_unit_cpu = NR_CPUS;
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2480*4882a593Smuzhiyun const struct pcpu_group_info *gi = &ai->groups[group];
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun group_offsets[group] = gi->base_offset;
2483*4882a593Smuzhiyun group_sizes[group] = gi->nr_units * ai->unit_size;
2484*4882a593Smuzhiyun
2485*4882a593Smuzhiyun for (i = 0; i < gi->nr_units; i++) {
2486*4882a593Smuzhiyun cpu = gi->cpu_map[i];
2487*4882a593Smuzhiyun if (cpu == NR_CPUS)
2488*4882a593Smuzhiyun continue;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2491*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2492*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun unit_map[cpu] = unit + i;
2495*4882a593Smuzhiyun unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun /* determine low/high unit_cpu */
2498*4882a593Smuzhiyun if (pcpu_low_unit_cpu == NR_CPUS ||
2499*4882a593Smuzhiyun unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2500*4882a593Smuzhiyun pcpu_low_unit_cpu = cpu;
2501*4882a593Smuzhiyun if (pcpu_high_unit_cpu == NR_CPUS ||
2502*4882a593Smuzhiyun unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2503*4882a593Smuzhiyun pcpu_high_unit_cpu = cpu;
2504*4882a593Smuzhiyun }
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun pcpu_nr_units = unit;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun for_each_possible_cpu(cpu)
2509*4882a593Smuzhiyun PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun /* we're done parsing the input, undefine BUG macro and dump config */
2512*4882a593Smuzhiyun #undef PCPU_SETUP_BUG_ON
2513*4882a593Smuzhiyun pcpu_dump_alloc_info(KERN_DEBUG, ai);
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun pcpu_nr_groups = ai->nr_groups;
2516*4882a593Smuzhiyun pcpu_group_offsets = group_offsets;
2517*4882a593Smuzhiyun pcpu_group_sizes = group_sizes;
2518*4882a593Smuzhiyun pcpu_unit_map = unit_map;
2519*4882a593Smuzhiyun pcpu_unit_offsets = unit_off;
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun /* determine basic parameters */
2522*4882a593Smuzhiyun pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2523*4882a593Smuzhiyun pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2524*4882a593Smuzhiyun pcpu_atom_size = ai->atom_size;
2525*4882a593Smuzhiyun pcpu_chunk_struct_size = struct_size(chunk, populated,
2526*4882a593Smuzhiyun BITS_TO_LONGS(pcpu_unit_pages));
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun pcpu_stats_save_ai(ai);
2529*4882a593Smuzhiyun
2530*4882a593Smuzhiyun /*
2531*4882a593Smuzhiyun * Allocate chunk slots. The additional last slot is for
2532*4882a593Smuzhiyun * empty chunks.
2533*4882a593Smuzhiyun */
2534*4882a593Smuzhiyun pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2535*4882a593Smuzhiyun pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2536*4882a593Smuzhiyun sizeof(pcpu_chunk_lists[0]) *
2537*4882a593Smuzhiyun PCPU_NR_CHUNK_TYPES,
2538*4882a593Smuzhiyun SMP_CACHE_BYTES);
2539*4882a593Smuzhiyun if (!pcpu_chunk_lists)
2540*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
2541*4882a593Smuzhiyun pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) *
2542*4882a593Smuzhiyun PCPU_NR_CHUNK_TYPES);
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2545*4882a593Smuzhiyun for (i = 0; i < pcpu_nr_slots; i++)
2546*4882a593Smuzhiyun INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]);
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun /*
2549*4882a593Smuzhiyun * The end of the static region needs to be aligned with the
2550*4882a593Smuzhiyun * minimum allocation size as this offsets the reserved and
2551*4882a593Smuzhiyun * dynamic region. The first chunk ends page aligned by
2552*4882a593Smuzhiyun * expanding the dynamic region, therefore the dynamic region
2553*4882a593Smuzhiyun * can be shrunk to compensate while still staying above the
2554*4882a593Smuzhiyun * configured sizes.
2555*4882a593Smuzhiyun */
2556*4882a593Smuzhiyun static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2557*4882a593Smuzhiyun dyn_size = ai->dyn_size - (static_size - ai->static_size);
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun /*
2560*4882a593Smuzhiyun * Initialize first chunk.
2561*4882a593Smuzhiyun * If the reserved_size is non-zero, this initializes the reserved
2562*4882a593Smuzhiyun * chunk. If the reserved_size is zero, the reserved chunk is NULL
2563*4882a593Smuzhiyun * and the dynamic region is initialized here. The first chunk,
2564*4882a593Smuzhiyun * pcpu_first_chunk, will always point to the chunk that serves
2565*4882a593Smuzhiyun * the dynamic region.
2566*4882a593Smuzhiyun */
2567*4882a593Smuzhiyun tmp_addr = (unsigned long)base_addr + static_size;
2568*4882a593Smuzhiyun map_size = ai->reserved_size ?: dyn_size;
2569*4882a593Smuzhiyun chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2570*4882a593Smuzhiyun
2571*4882a593Smuzhiyun /* init dynamic chunk if necessary */
2572*4882a593Smuzhiyun if (ai->reserved_size) {
2573*4882a593Smuzhiyun pcpu_reserved_chunk = chunk;
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun tmp_addr = (unsigned long)base_addr + static_size +
2576*4882a593Smuzhiyun ai->reserved_size;
2577*4882a593Smuzhiyun map_size = dyn_size;
2578*4882a593Smuzhiyun chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun /* link the first chunk in */
2582*4882a593Smuzhiyun pcpu_first_chunk = chunk;
2583*4882a593Smuzhiyun pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
2584*4882a593Smuzhiyun pcpu_chunk_relocate(pcpu_first_chunk, -1);
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun /* include all regions of the first chunk */
2587*4882a593Smuzhiyun pcpu_nr_populated += PFN_DOWN(size_sum);
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun pcpu_stats_chunk_alloc();
2590*4882a593Smuzhiyun trace_percpu_create_chunk(base_addr);
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun /* we're done */
2593*4882a593Smuzhiyun pcpu_base_addr = base_addr;
2594*4882a593Smuzhiyun }
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun #ifdef CONFIG_SMP
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2599*4882a593Smuzhiyun [PCPU_FC_AUTO] = "auto",
2600*4882a593Smuzhiyun [PCPU_FC_EMBED] = "embed",
2601*4882a593Smuzhiyun [PCPU_FC_PAGE] = "page",
2602*4882a593Smuzhiyun };
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2605*4882a593Smuzhiyun
percpu_alloc_setup(char * str)2606*4882a593Smuzhiyun static int __init percpu_alloc_setup(char *str)
2607*4882a593Smuzhiyun {
2608*4882a593Smuzhiyun if (!str)
2609*4882a593Smuzhiyun return -EINVAL;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun if (0)
2612*4882a593Smuzhiyun /* nada */;
2613*4882a593Smuzhiyun #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2614*4882a593Smuzhiyun else if (!strcmp(str, "embed"))
2615*4882a593Smuzhiyun pcpu_chosen_fc = PCPU_FC_EMBED;
2616*4882a593Smuzhiyun #endif
2617*4882a593Smuzhiyun #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2618*4882a593Smuzhiyun else if (!strcmp(str, "page"))
2619*4882a593Smuzhiyun pcpu_chosen_fc = PCPU_FC_PAGE;
2620*4882a593Smuzhiyun #endif
2621*4882a593Smuzhiyun else
2622*4882a593Smuzhiyun pr_warn("unknown allocator %s specified\n", str);
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun return 0;
2625*4882a593Smuzhiyun }
2626*4882a593Smuzhiyun early_param("percpu_alloc", percpu_alloc_setup);
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun /*
2629*4882a593Smuzhiyun * pcpu_embed_first_chunk() is used by the generic percpu setup.
2630*4882a593Smuzhiyun * Build it if needed by the arch config or the generic setup is going
2631*4882a593Smuzhiyun * to be used.
2632*4882a593Smuzhiyun */
2633*4882a593Smuzhiyun #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2634*4882a593Smuzhiyun !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2635*4882a593Smuzhiyun #define BUILD_EMBED_FIRST_CHUNK
2636*4882a593Smuzhiyun #endif
2637*4882a593Smuzhiyun
2638*4882a593Smuzhiyun /* build pcpu_page_first_chunk() iff needed by the arch config */
2639*4882a593Smuzhiyun #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2640*4882a593Smuzhiyun #define BUILD_PAGE_FIRST_CHUNK
2641*4882a593Smuzhiyun #endif
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2644*4882a593Smuzhiyun #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2645*4882a593Smuzhiyun /**
2646*4882a593Smuzhiyun * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2647*4882a593Smuzhiyun * @reserved_size: the size of reserved percpu area in bytes
2648*4882a593Smuzhiyun * @dyn_size: minimum free size for dynamic allocation in bytes
2649*4882a593Smuzhiyun * @atom_size: allocation atom size
2650*4882a593Smuzhiyun * @cpu_distance_fn: callback to determine distance between cpus, optional
2651*4882a593Smuzhiyun *
2652*4882a593Smuzhiyun * This function determines grouping of units, their mappings to cpus
2653*4882a593Smuzhiyun * and other parameters considering needed percpu size, allocation
2654*4882a593Smuzhiyun * atom size and distances between CPUs.
2655*4882a593Smuzhiyun *
2656*4882a593Smuzhiyun * Groups are always multiples of atom size and CPUs which are of
2657*4882a593Smuzhiyun * LOCAL_DISTANCE both ways are grouped together and share space for
2658*4882a593Smuzhiyun * units in the same group. The returned configuration is guaranteed
2659*4882a593Smuzhiyun * to have CPUs on different nodes on different groups and >=75% usage
2660*4882a593Smuzhiyun * of allocated virtual address space.
2661*4882a593Smuzhiyun *
2662*4882a593Smuzhiyun * RETURNS:
2663*4882a593Smuzhiyun * On success, pointer to the new allocation_info is returned. On
2664*4882a593Smuzhiyun * failure, ERR_PTR value is returned.
2665*4882a593Smuzhiyun */
pcpu_build_alloc_info(size_t reserved_size,size_t dyn_size,size_t atom_size,pcpu_fc_cpu_distance_fn_t cpu_distance_fn)2666*4882a593Smuzhiyun static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2667*4882a593Smuzhiyun size_t reserved_size, size_t dyn_size,
2668*4882a593Smuzhiyun size_t atom_size,
2669*4882a593Smuzhiyun pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2670*4882a593Smuzhiyun {
2671*4882a593Smuzhiyun static int group_map[NR_CPUS] __initdata;
2672*4882a593Smuzhiyun static int group_cnt[NR_CPUS] __initdata;
2673*4882a593Smuzhiyun const size_t static_size = __per_cpu_end - __per_cpu_start;
2674*4882a593Smuzhiyun int nr_groups = 1, nr_units = 0;
2675*4882a593Smuzhiyun size_t size_sum, min_unit_size, alloc_size;
2676*4882a593Smuzhiyun int upa, max_upa, best_upa; /* units_per_alloc */
2677*4882a593Smuzhiyun int last_allocs, group, unit;
2678*4882a593Smuzhiyun unsigned int cpu, tcpu;
2679*4882a593Smuzhiyun struct pcpu_alloc_info *ai;
2680*4882a593Smuzhiyun unsigned int *cpu_map;
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun /* this function may be called multiple times */
2683*4882a593Smuzhiyun memset(group_map, 0, sizeof(group_map));
2684*4882a593Smuzhiyun memset(group_cnt, 0, sizeof(group_cnt));
2685*4882a593Smuzhiyun
2686*4882a593Smuzhiyun /* calculate size_sum and ensure dyn_size is enough for early alloc */
2687*4882a593Smuzhiyun size_sum = PFN_ALIGN(static_size + reserved_size +
2688*4882a593Smuzhiyun max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2689*4882a593Smuzhiyun dyn_size = size_sum - static_size - reserved_size;
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun /*
2692*4882a593Smuzhiyun * Determine min_unit_size, alloc_size and max_upa such that
2693*4882a593Smuzhiyun * alloc_size is multiple of atom_size and is the smallest
2694*4882a593Smuzhiyun * which can accommodate 4k aligned segments which are equal to
2695*4882a593Smuzhiyun * or larger than min_unit_size.
2696*4882a593Smuzhiyun */
2697*4882a593Smuzhiyun min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun /* determine the maximum # of units that can fit in an allocation */
2700*4882a593Smuzhiyun alloc_size = roundup(min_unit_size, atom_size);
2701*4882a593Smuzhiyun upa = alloc_size / min_unit_size;
2702*4882a593Smuzhiyun while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2703*4882a593Smuzhiyun upa--;
2704*4882a593Smuzhiyun max_upa = upa;
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun /* group cpus according to their proximity */
2707*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
2708*4882a593Smuzhiyun group = 0;
2709*4882a593Smuzhiyun next_group:
2710*4882a593Smuzhiyun for_each_possible_cpu(tcpu) {
2711*4882a593Smuzhiyun if (cpu == tcpu)
2712*4882a593Smuzhiyun break;
2713*4882a593Smuzhiyun if (group_map[tcpu] == group && cpu_distance_fn &&
2714*4882a593Smuzhiyun (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2715*4882a593Smuzhiyun cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2716*4882a593Smuzhiyun group++;
2717*4882a593Smuzhiyun nr_groups = max(nr_groups, group + 1);
2718*4882a593Smuzhiyun goto next_group;
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun group_map[cpu] = group;
2722*4882a593Smuzhiyun group_cnt[group]++;
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun /*
2726*4882a593Smuzhiyun * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2727*4882a593Smuzhiyun * Expand the unit_size until we use >= 75% of the units allocated.
2728*4882a593Smuzhiyun * Related to atom_size, which could be much larger than the unit_size.
2729*4882a593Smuzhiyun */
2730*4882a593Smuzhiyun last_allocs = INT_MAX;
2731*4882a593Smuzhiyun for (upa = max_upa; upa; upa--) {
2732*4882a593Smuzhiyun int allocs = 0, wasted = 0;
2733*4882a593Smuzhiyun
2734*4882a593Smuzhiyun if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2735*4882a593Smuzhiyun continue;
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun for (group = 0; group < nr_groups; group++) {
2738*4882a593Smuzhiyun int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2739*4882a593Smuzhiyun allocs += this_allocs;
2740*4882a593Smuzhiyun wasted += this_allocs * upa - group_cnt[group];
2741*4882a593Smuzhiyun }
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun /*
2744*4882a593Smuzhiyun * Don't accept if wastage is over 1/3. The
2745*4882a593Smuzhiyun * greater-than comparison ensures upa==1 always
2746*4882a593Smuzhiyun * passes the following check.
2747*4882a593Smuzhiyun */
2748*4882a593Smuzhiyun if (wasted > num_possible_cpus() / 3)
2749*4882a593Smuzhiyun continue;
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun /* and then don't consume more memory */
2752*4882a593Smuzhiyun if (allocs > last_allocs)
2753*4882a593Smuzhiyun break;
2754*4882a593Smuzhiyun last_allocs = allocs;
2755*4882a593Smuzhiyun best_upa = upa;
2756*4882a593Smuzhiyun }
2757*4882a593Smuzhiyun upa = best_upa;
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun /* allocate and fill alloc_info */
2760*4882a593Smuzhiyun for (group = 0; group < nr_groups; group++)
2761*4882a593Smuzhiyun nr_units += roundup(group_cnt[group], upa);
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2764*4882a593Smuzhiyun if (!ai)
2765*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2766*4882a593Smuzhiyun cpu_map = ai->groups[0].cpu_map;
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun for (group = 0; group < nr_groups; group++) {
2769*4882a593Smuzhiyun ai->groups[group].cpu_map = cpu_map;
2770*4882a593Smuzhiyun cpu_map += roundup(group_cnt[group], upa);
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun ai->static_size = static_size;
2774*4882a593Smuzhiyun ai->reserved_size = reserved_size;
2775*4882a593Smuzhiyun ai->dyn_size = dyn_size;
2776*4882a593Smuzhiyun ai->unit_size = alloc_size / upa;
2777*4882a593Smuzhiyun ai->atom_size = atom_size;
2778*4882a593Smuzhiyun ai->alloc_size = alloc_size;
2779*4882a593Smuzhiyun
2780*4882a593Smuzhiyun for (group = 0, unit = 0; group < nr_groups; group++) {
2781*4882a593Smuzhiyun struct pcpu_group_info *gi = &ai->groups[group];
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun /*
2784*4882a593Smuzhiyun * Initialize base_offset as if all groups are located
2785*4882a593Smuzhiyun * back-to-back. The caller should update this to
2786*4882a593Smuzhiyun * reflect actual allocation.
2787*4882a593Smuzhiyun */
2788*4882a593Smuzhiyun gi->base_offset = unit * ai->unit_size;
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun for_each_possible_cpu(cpu)
2791*4882a593Smuzhiyun if (group_map[cpu] == group)
2792*4882a593Smuzhiyun gi->cpu_map[gi->nr_units++] = cpu;
2793*4882a593Smuzhiyun gi->nr_units = roundup(gi->nr_units, upa);
2794*4882a593Smuzhiyun unit += gi->nr_units;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun BUG_ON(unit != nr_units);
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun return ai;
2799*4882a593Smuzhiyun }
2800*4882a593Smuzhiyun #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun #if defined(BUILD_EMBED_FIRST_CHUNK)
2803*4882a593Smuzhiyun /**
2804*4882a593Smuzhiyun * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2805*4882a593Smuzhiyun * @reserved_size: the size of reserved percpu area in bytes
2806*4882a593Smuzhiyun * @dyn_size: minimum free size for dynamic allocation in bytes
2807*4882a593Smuzhiyun * @atom_size: allocation atom size
2808*4882a593Smuzhiyun * @cpu_distance_fn: callback to determine distance between cpus, optional
2809*4882a593Smuzhiyun * @alloc_fn: function to allocate percpu page
2810*4882a593Smuzhiyun * @free_fn: function to free percpu page
2811*4882a593Smuzhiyun *
2812*4882a593Smuzhiyun * This is a helper to ease setting up embedded first percpu chunk and
2813*4882a593Smuzhiyun * can be called where pcpu_setup_first_chunk() is expected.
2814*4882a593Smuzhiyun *
2815*4882a593Smuzhiyun * If this function is used to setup the first chunk, it is allocated
2816*4882a593Smuzhiyun * by calling @alloc_fn and used as-is without being mapped into
2817*4882a593Smuzhiyun * vmalloc area. Allocations are always whole multiples of @atom_size
2818*4882a593Smuzhiyun * aligned to @atom_size.
2819*4882a593Smuzhiyun *
2820*4882a593Smuzhiyun * This enables the first chunk to piggy back on the linear physical
2821*4882a593Smuzhiyun * mapping which often uses larger page size. Please note that this
2822*4882a593Smuzhiyun * can result in very sparse cpu->unit mapping on NUMA machines thus
2823*4882a593Smuzhiyun * requiring large vmalloc address space. Don't use this allocator if
2824*4882a593Smuzhiyun * vmalloc space is not orders of magnitude larger than distances
2825*4882a593Smuzhiyun * between node memory addresses (ie. 32bit NUMA machines).
2826*4882a593Smuzhiyun *
2827*4882a593Smuzhiyun * @dyn_size specifies the minimum dynamic area size.
2828*4882a593Smuzhiyun *
2829*4882a593Smuzhiyun * If the needed size is smaller than the minimum or specified unit
2830*4882a593Smuzhiyun * size, the leftover is returned using @free_fn.
2831*4882a593Smuzhiyun *
2832*4882a593Smuzhiyun * RETURNS:
2833*4882a593Smuzhiyun * 0 on success, -errno on failure.
2834*4882a593Smuzhiyun */
pcpu_embed_first_chunk(size_t reserved_size,size_t dyn_size,size_t atom_size,pcpu_fc_cpu_distance_fn_t cpu_distance_fn,pcpu_fc_alloc_fn_t alloc_fn,pcpu_fc_free_fn_t free_fn)2835*4882a593Smuzhiyun int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2836*4882a593Smuzhiyun size_t atom_size,
2837*4882a593Smuzhiyun pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2838*4882a593Smuzhiyun pcpu_fc_alloc_fn_t alloc_fn,
2839*4882a593Smuzhiyun pcpu_fc_free_fn_t free_fn)
2840*4882a593Smuzhiyun {
2841*4882a593Smuzhiyun void *base = (void *)ULONG_MAX;
2842*4882a593Smuzhiyun void **areas = NULL;
2843*4882a593Smuzhiyun struct pcpu_alloc_info *ai;
2844*4882a593Smuzhiyun size_t size_sum, areas_size;
2845*4882a593Smuzhiyun unsigned long max_distance;
2846*4882a593Smuzhiyun int group, i, highest_group, rc = 0;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2849*4882a593Smuzhiyun cpu_distance_fn);
2850*4882a593Smuzhiyun if (IS_ERR(ai))
2851*4882a593Smuzhiyun return PTR_ERR(ai);
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2854*4882a593Smuzhiyun areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2857*4882a593Smuzhiyun if (!areas) {
2858*4882a593Smuzhiyun rc = -ENOMEM;
2859*4882a593Smuzhiyun goto out_free;
2860*4882a593Smuzhiyun }
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun /* allocate, copy and determine base address & max_distance */
2863*4882a593Smuzhiyun highest_group = 0;
2864*4882a593Smuzhiyun for (group = 0; group < ai->nr_groups; group++) {
2865*4882a593Smuzhiyun struct pcpu_group_info *gi = &ai->groups[group];
2866*4882a593Smuzhiyun unsigned int cpu = NR_CPUS;
2867*4882a593Smuzhiyun void *ptr;
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2870*4882a593Smuzhiyun cpu = gi->cpu_map[i];
2871*4882a593Smuzhiyun BUG_ON(cpu == NR_CPUS);
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun /* allocate space for the whole group */
2874*4882a593Smuzhiyun ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2875*4882a593Smuzhiyun if (!ptr) {
2876*4882a593Smuzhiyun rc = -ENOMEM;
2877*4882a593Smuzhiyun goto out_free_areas;
2878*4882a593Smuzhiyun }
2879*4882a593Smuzhiyun /* kmemleak tracks the percpu allocations separately */
2880*4882a593Smuzhiyun kmemleak_free(ptr);
2881*4882a593Smuzhiyun areas[group] = ptr;
2882*4882a593Smuzhiyun
2883*4882a593Smuzhiyun base = min(ptr, base);
2884*4882a593Smuzhiyun if (ptr > areas[highest_group])
2885*4882a593Smuzhiyun highest_group = group;
2886*4882a593Smuzhiyun }
2887*4882a593Smuzhiyun max_distance = areas[highest_group] - base;
2888*4882a593Smuzhiyun max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun /* warn if maximum distance is further than 75% of vmalloc space */
2891*4882a593Smuzhiyun if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2892*4882a593Smuzhiyun pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2893*4882a593Smuzhiyun max_distance, VMALLOC_TOTAL);
2894*4882a593Smuzhiyun #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2895*4882a593Smuzhiyun /* and fail if we have fallback */
2896*4882a593Smuzhiyun rc = -EINVAL;
2897*4882a593Smuzhiyun goto out_free_areas;
2898*4882a593Smuzhiyun #endif
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun /*
2902*4882a593Smuzhiyun * Copy data and free unused parts. This should happen after all
2903*4882a593Smuzhiyun * allocations are complete; otherwise, we may end up with
2904*4882a593Smuzhiyun * overlapping groups.
2905*4882a593Smuzhiyun */
2906*4882a593Smuzhiyun for (group = 0; group < ai->nr_groups; group++) {
2907*4882a593Smuzhiyun struct pcpu_group_info *gi = &ai->groups[group];
2908*4882a593Smuzhiyun void *ptr = areas[group];
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2911*4882a593Smuzhiyun if (gi->cpu_map[i] == NR_CPUS) {
2912*4882a593Smuzhiyun /* unused unit, free whole */
2913*4882a593Smuzhiyun free_fn(ptr, ai->unit_size);
2914*4882a593Smuzhiyun continue;
2915*4882a593Smuzhiyun }
2916*4882a593Smuzhiyun /* copy and return the unused part */
2917*4882a593Smuzhiyun memcpy(ptr, __per_cpu_load, ai->static_size);
2918*4882a593Smuzhiyun free_fn(ptr + size_sum, ai->unit_size - size_sum);
2919*4882a593Smuzhiyun }
2920*4882a593Smuzhiyun }
2921*4882a593Smuzhiyun
2922*4882a593Smuzhiyun /* base address is now known, determine group base offsets */
2923*4882a593Smuzhiyun for (group = 0; group < ai->nr_groups; group++) {
2924*4882a593Smuzhiyun ai->groups[group].base_offset = areas[group] - base;
2925*4882a593Smuzhiyun }
2926*4882a593Smuzhiyun
2927*4882a593Smuzhiyun pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2928*4882a593Smuzhiyun PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2929*4882a593Smuzhiyun ai->dyn_size, ai->unit_size);
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun pcpu_setup_first_chunk(ai, base);
2932*4882a593Smuzhiyun goto out_free;
2933*4882a593Smuzhiyun
2934*4882a593Smuzhiyun out_free_areas:
2935*4882a593Smuzhiyun for (group = 0; group < ai->nr_groups; group++)
2936*4882a593Smuzhiyun if (areas[group])
2937*4882a593Smuzhiyun free_fn(areas[group],
2938*4882a593Smuzhiyun ai->groups[group].nr_units * ai->unit_size);
2939*4882a593Smuzhiyun out_free:
2940*4882a593Smuzhiyun pcpu_free_alloc_info(ai);
2941*4882a593Smuzhiyun if (areas)
2942*4882a593Smuzhiyun memblock_free_early(__pa(areas), areas_size);
2943*4882a593Smuzhiyun return rc;
2944*4882a593Smuzhiyun }
2945*4882a593Smuzhiyun #endif /* BUILD_EMBED_FIRST_CHUNK */
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun #ifdef BUILD_PAGE_FIRST_CHUNK
2948*4882a593Smuzhiyun /**
2949*4882a593Smuzhiyun * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2950*4882a593Smuzhiyun * @reserved_size: the size of reserved percpu area in bytes
2951*4882a593Smuzhiyun * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2952*4882a593Smuzhiyun * @free_fn: function to free percpu page, always called with PAGE_SIZE
2953*4882a593Smuzhiyun * @populate_pte_fn: function to populate pte
2954*4882a593Smuzhiyun *
2955*4882a593Smuzhiyun * This is a helper to ease setting up page-remapped first percpu
2956*4882a593Smuzhiyun * chunk and can be called where pcpu_setup_first_chunk() is expected.
2957*4882a593Smuzhiyun *
2958*4882a593Smuzhiyun * This is the basic allocator. Static percpu area is allocated
2959*4882a593Smuzhiyun * page-by-page into vmalloc area.
2960*4882a593Smuzhiyun *
2961*4882a593Smuzhiyun * RETURNS:
2962*4882a593Smuzhiyun * 0 on success, -errno on failure.
2963*4882a593Smuzhiyun */
pcpu_page_first_chunk(size_t reserved_size,pcpu_fc_alloc_fn_t alloc_fn,pcpu_fc_free_fn_t free_fn,pcpu_fc_populate_pte_fn_t populate_pte_fn)2964*4882a593Smuzhiyun int __init pcpu_page_first_chunk(size_t reserved_size,
2965*4882a593Smuzhiyun pcpu_fc_alloc_fn_t alloc_fn,
2966*4882a593Smuzhiyun pcpu_fc_free_fn_t free_fn,
2967*4882a593Smuzhiyun pcpu_fc_populate_pte_fn_t populate_pte_fn)
2968*4882a593Smuzhiyun {
2969*4882a593Smuzhiyun static struct vm_struct vm;
2970*4882a593Smuzhiyun struct pcpu_alloc_info *ai;
2971*4882a593Smuzhiyun char psize_str[16];
2972*4882a593Smuzhiyun int unit_pages;
2973*4882a593Smuzhiyun size_t pages_size;
2974*4882a593Smuzhiyun struct page **pages;
2975*4882a593Smuzhiyun int unit, i, j, rc = 0;
2976*4882a593Smuzhiyun int upa;
2977*4882a593Smuzhiyun int nr_g0_units;
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2980*4882a593Smuzhiyun
2981*4882a593Smuzhiyun ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2982*4882a593Smuzhiyun if (IS_ERR(ai))
2983*4882a593Smuzhiyun return PTR_ERR(ai);
2984*4882a593Smuzhiyun BUG_ON(ai->nr_groups != 1);
2985*4882a593Smuzhiyun upa = ai->alloc_size/ai->unit_size;
2986*4882a593Smuzhiyun nr_g0_units = roundup(num_possible_cpus(), upa);
2987*4882a593Smuzhiyun if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
2988*4882a593Smuzhiyun pcpu_free_alloc_info(ai);
2989*4882a593Smuzhiyun return -EINVAL;
2990*4882a593Smuzhiyun }
2991*4882a593Smuzhiyun
2992*4882a593Smuzhiyun unit_pages = ai->unit_size >> PAGE_SHIFT;
2993*4882a593Smuzhiyun
2994*4882a593Smuzhiyun /* unaligned allocations can't be freed, round up to page size */
2995*4882a593Smuzhiyun pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2996*4882a593Smuzhiyun sizeof(pages[0]));
2997*4882a593Smuzhiyun pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
2998*4882a593Smuzhiyun if (!pages)
2999*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
3000*4882a593Smuzhiyun pages_size);
3001*4882a593Smuzhiyun
3002*4882a593Smuzhiyun /* allocate pages */
3003*4882a593Smuzhiyun j = 0;
3004*4882a593Smuzhiyun for (unit = 0; unit < num_possible_cpus(); unit++) {
3005*4882a593Smuzhiyun unsigned int cpu = ai->groups[0].cpu_map[unit];
3006*4882a593Smuzhiyun for (i = 0; i < unit_pages; i++) {
3007*4882a593Smuzhiyun void *ptr;
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3010*4882a593Smuzhiyun if (!ptr) {
3011*4882a593Smuzhiyun pr_warn("failed to allocate %s page for cpu%u\n",
3012*4882a593Smuzhiyun psize_str, cpu);
3013*4882a593Smuzhiyun goto enomem;
3014*4882a593Smuzhiyun }
3015*4882a593Smuzhiyun /* kmemleak tracks the percpu allocations separately */
3016*4882a593Smuzhiyun kmemleak_free(ptr);
3017*4882a593Smuzhiyun pages[j++] = virt_to_page(ptr);
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun
3021*4882a593Smuzhiyun /* allocate vm area, map the pages and copy static data */
3022*4882a593Smuzhiyun vm.flags = VM_ALLOC;
3023*4882a593Smuzhiyun vm.size = num_possible_cpus() * ai->unit_size;
3024*4882a593Smuzhiyun vm_area_register_early(&vm, PAGE_SIZE);
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun for (unit = 0; unit < num_possible_cpus(); unit++) {
3027*4882a593Smuzhiyun unsigned long unit_addr =
3028*4882a593Smuzhiyun (unsigned long)vm.addr + unit * ai->unit_size;
3029*4882a593Smuzhiyun
3030*4882a593Smuzhiyun for (i = 0; i < unit_pages; i++)
3031*4882a593Smuzhiyun populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun /* pte already populated, the following shouldn't fail */
3034*4882a593Smuzhiyun rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3035*4882a593Smuzhiyun unit_pages);
3036*4882a593Smuzhiyun if (rc < 0)
3037*4882a593Smuzhiyun panic("failed to map percpu area, err=%d\n", rc);
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun /*
3040*4882a593Smuzhiyun * FIXME: Archs with virtual cache should flush local
3041*4882a593Smuzhiyun * cache for the linear mapping here - something
3042*4882a593Smuzhiyun * equivalent to flush_cache_vmap() on the local cpu.
3043*4882a593Smuzhiyun * flush_cache_vmap() can't be used as most supporting
3044*4882a593Smuzhiyun * data structures are not set up yet.
3045*4882a593Smuzhiyun */
3046*4882a593Smuzhiyun
3047*4882a593Smuzhiyun /* copy static data */
3048*4882a593Smuzhiyun memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3049*4882a593Smuzhiyun }
3050*4882a593Smuzhiyun
3051*4882a593Smuzhiyun /* we're ready, commit */
3052*4882a593Smuzhiyun pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3053*4882a593Smuzhiyun unit_pages, psize_str, ai->static_size,
3054*4882a593Smuzhiyun ai->reserved_size, ai->dyn_size);
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun pcpu_setup_first_chunk(ai, vm.addr);
3057*4882a593Smuzhiyun goto out_free_ar;
3058*4882a593Smuzhiyun
3059*4882a593Smuzhiyun enomem:
3060*4882a593Smuzhiyun while (--j >= 0)
3061*4882a593Smuzhiyun free_fn(page_address(pages[j]), PAGE_SIZE);
3062*4882a593Smuzhiyun rc = -ENOMEM;
3063*4882a593Smuzhiyun out_free_ar:
3064*4882a593Smuzhiyun memblock_free_early(__pa(pages), pages_size);
3065*4882a593Smuzhiyun pcpu_free_alloc_info(ai);
3066*4882a593Smuzhiyun return rc;
3067*4882a593Smuzhiyun }
3068*4882a593Smuzhiyun #endif /* BUILD_PAGE_FIRST_CHUNK */
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3071*4882a593Smuzhiyun /*
3072*4882a593Smuzhiyun * Generic SMP percpu area setup.
3073*4882a593Smuzhiyun *
3074*4882a593Smuzhiyun * The embedding helper is used because its behavior closely resembles
3075*4882a593Smuzhiyun * the original non-dynamic generic percpu area setup. This is
3076*4882a593Smuzhiyun * important because many archs have addressing restrictions and might
3077*4882a593Smuzhiyun * fail if the percpu area is located far away from the previous
3078*4882a593Smuzhiyun * location. As an added bonus, in non-NUMA cases, embedding is
3079*4882a593Smuzhiyun * generally a good idea TLB-wise because percpu area can piggy back
3080*4882a593Smuzhiyun * on the physical linear memory mapping which uses large page
3081*4882a593Smuzhiyun * mappings on applicable archs.
3082*4882a593Smuzhiyun */
3083*4882a593Smuzhiyun unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3084*4882a593Smuzhiyun EXPORT_SYMBOL(__per_cpu_offset);
3085*4882a593Smuzhiyun
pcpu_dfl_fc_alloc(unsigned int cpu,size_t size,size_t align)3086*4882a593Smuzhiyun static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3087*4882a593Smuzhiyun size_t align)
3088*4882a593Smuzhiyun {
3089*4882a593Smuzhiyun return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
3090*4882a593Smuzhiyun }
3091*4882a593Smuzhiyun
pcpu_dfl_fc_free(void * ptr,size_t size)3092*4882a593Smuzhiyun static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3093*4882a593Smuzhiyun {
3094*4882a593Smuzhiyun memblock_free_early(__pa(ptr), size);
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun
setup_per_cpu_areas(void)3097*4882a593Smuzhiyun void __init setup_per_cpu_areas(void)
3098*4882a593Smuzhiyun {
3099*4882a593Smuzhiyun unsigned long delta;
3100*4882a593Smuzhiyun unsigned int cpu;
3101*4882a593Smuzhiyun int rc;
3102*4882a593Smuzhiyun
3103*4882a593Smuzhiyun /*
3104*4882a593Smuzhiyun * Always reserve area for module percpu variables. That's
3105*4882a593Smuzhiyun * what the legacy allocator did.
3106*4882a593Smuzhiyun */
3107*4882a593Smuzhiyun rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3108*4882a593Smuzhiyun PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3109*4882a593Smuzhiyun pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3110*4882a593Smuzhiyun if (rc < 0)
3111*4882a593Smuzhiyun panic("Failed to initialize percpu areas.");
3112*4882a593Smuzhiyun
3113*4882a593Smuzhiyun delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3114*4882a593Smuzhiyun for_each_possible_cpu(cpu)
3115*4882a593Smuzhiyun __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3116*4882a593Smuzhiyun }
3117*4882a593Smuzhiyun #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3118*4882a593Smuzhiyun
3119*4882a593Smuzhiyun #else /* CONFIG_SMP */
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun /*
3122*4882a593Smuzhiyun * UP percpu area setup.
3123*4882a593Smuzhiyun *
3124*4882a593Smuzhiyun * UP always uses km-based percpu allocator with identity mapping.
3125*4882a593Smuzhiyun * Static percpu variables are indistinguishable from the usual static
3126*4882a593Smuzhiyun * variables and don't require any special preparation.
3127*4882a593Smuzhiyun */
setup_per_cpu_areas(void)3128*4882a593Smuzhiyun void __init setup_per_cpu_areas(void)
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun const size_t unit_size =
3131*4882a593Smuzhiyun roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3132*4882a593Smuzhiyun PERCPU_DYNAMIC_RESERVE));
3133*4882a593Smuzhiyun struct pcpu_alloc_info *ai;
3134*4882a593Smuzhiyun void *fc;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun ai = pcpu_alloc_alloc_info(1, 1);
3137*4882a593Smuzhiyun fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3138*4882a593Smuzhiyun if (!ai || !fc)
3139*4882a593Smuzhiyun panic("Failed to allocate memory for percpu areas.");
3140*4882a593Smuzhiyun /* kmemleak tracks the percpu allocations separately */
3141*4882a593Smuzhiyun kmemleak_free(fc);
3142*4882a593Smuzhiyun
3143*4882a593Smuzhiyun ai->dyn_size = unit_size;
3144*4882a593Smuzhiyun ai->unit_size = unit_size;
3145*4882a593Smuzhiyun ai->atom_size = unit_size;
3146*4882a593Smuzhiyun ai->alloc_size = unit_size;
3147*4882a593Smuzhiyun ai->groups[0].nr_units = 1;
3148*4882a593Smuzhiyun ai->groups[0].cpu_map[0] = 0;
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun pcpu_setup_first_chunk(ai, fc);
3151*4882a593Smuzhiyun pcpu_free_alloc_info(ai);
3152*4882a593Smuzhiyun }
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun #endif /* CONFIG_SMP */
3155*4882a593Smuzhiyun
3156*4882a593Smuzhiyun /*
3157*4882a593Smuzhiyun * pcpu_nr_pages - calculate total number of populated backing pages
3158*4882a593Smuzhiyun *
3159*4882a593Smuzhiyun * This reflects the number of pages populated to back chunks. Metadata is
3160*4882a593Smuzhiyun * excluded in the number exposed in meminfo as the number of backing pages
3161*4882a593Smuzhiyun * scales with the number of cpus and can quickly outweigh the memory used for
3162*4882a593Smuzhiyun * metadata. It also keeps this calculation nice and simple.
3163*4882a593Smuzhiyun *
3164*4882a593Smuzhiyun * RETURNS:
3165*4882a593Smuzhiyun * Total number of populated backing pages in use by the allocator.
3166*4882a593Smuzhiyun */
pcpu_nr_pages(void)3167*4882a593Smuzhiyun unsigned long pcpu_nr_pages(void)
3168*4882a593Smuzhiyun {
3169*4882a593Smuzhiyun return pcpu_nr_populated * pcpu_nr_units;
3170*4882a593Smuzhiyun }
3171*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcpu_nr_pages);
3172*4882a593Smuzhiyun
3173*4882a593Smuzhiyun /*
3174*4882a593Smuzhiyun * Percpu allocator is initialized early during boot when neither slab or
3175*4882a593Smuzhiyun * workqueue is available. Plug async management until everything is up
3176*4882a593Smuzhiyun * and running.
3177*4882a593Smuzhiyun */
percpu_enable_async(void)3178*4882a593Smuzhiyun static int __init percpu_enable_async(void)
3179*4882a593Smuzhiyun {
3180*4882a593Smuzhiyun pcpu_async_enabled = true;
3181*4882a593Smuzhiyun return 0;
3182*4882a593Smuzhiyun }
3183*4882a593Smuzhiyun subsys_initcall(percpu_enable_async);
3184