1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15 #define pr_fmt(fmt) "cma: " fmt
16
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 # define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/sizes.h>
30 #include <linux/slab.h>
31 #include <linux/log2.h>
32 #include <linux/cma.h>
33 #include <linux/highmem.h>
34 #include <linux/io.h>
35 #include <linux/kmemleak.h>
36 #include <linux/sched.h>
37 #include <linux/jiffies.h>
38 #include <trace/events/cma.h>
39
40 #undef CREATE_TRACE_POINTS
41 #include <trace/hooks/mm.h>
42
43 #include "cma.h"
44
45 extern void lru_cache_disable(void);
46 extern void lru_cache_enable(void);
47
48 struct cma cma_areas[MAX_CMA_AREAS];
49 unsigned cma_area_count;
50 static DEFINE_MUTEX(cma_mutex);
51
cma_get_base(const struct cma * cma)52 phys_addr_t cma_get_base(const struct cma *cma)
53 {
54 return PFN_PHYS(cma->base_pfn);
55 }
56
cma_get_size(const struct cma * cma)57 unsigned long cma_get_size(const struct cma *cma)
58 {
59 return cma->count << PAGE_SHIFT;
60 }
61
cma_get_name(const struct cma * cma)62 const char *cma_get_name(const struct cma *cma)
63 {
64 return cma->name;
65 }
66 EXPORT_SYMBOL_GPL(cma_get_name);
67
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)68 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
69 unsigned int align_order)
70 {
71 if (align_order <= cma->order_per_bit)
72 return 0;
73 return (1UL << (align_order - cma->order_per_bit)) - 1;
74 }
75
76 /*
77 * Find the offset of the base PFN from the specified align_order.
78 * The value returned is represented in order_per_bits.
79 */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)80 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
81 unsigned int align_order)
82 {
83 return (cma->base_pfn & ((1UL << align_order) - 1))
84 >> cma->order_per_bit;
85 }
86
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)87 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
88 unsigned long pages)
89 {
90 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
91 }
92
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned int count)93 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
94 unsigned int count)
95 {
96 unsigned long bitmap_no, bitmap_count;
97
98 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
99 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
100
101 mutex_lock(&cma->lock);
102 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
103 mutex_unlock(&cma->lock);
104 }
105
cma_activate_area(struct cma * cma)106 static void __init cma_activate_area(struct cma *cma)
107 {
108 unsigned long base_pfn = cma->base_pfn, pfn;
109 struct zone *zone;
110
111 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
112 if (!cma->bitmap)
113 goto out_error;
114
115 if (IS_ENABLED(CONFIG_CMA_INACTIVE))
116 goto out;
117 /*
118 * alloc_contig_range() requires the pfn range specified to be in the
119 * same zone. Simplify by forcing the entire CMA resv range to be in the
120 * same zone.
121 */
122 WARN_ON_ONCE(!pfn_valid(base_pfn));
123 zone = page_zone(pfn_to_page(base_pfn));
124 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
125 WARN_ON_ONCE(!pfn_valid(pfn));
126 if (page_zone(pfn_to_page(pfn)) != zone)
127 goto not_in_zone;
128 }
129
130 for (pfn = base_pfn; pfn < base_pfn + cma->count;
131 pfn += pageblock_nr_pages)
132 init_cma_reserved_pageblock(pfn_to_page(pfn));
133
134 out:
135 mutex_init(&cma->lock);
136
137 #ifdef CONFIG_CMA_DEBUGFS
138 INIT_HLIST_HEAD(&cma->mem_head);
139 spin_lock_init(&cma->mem_head_lock);
140 #endif
141
142 return;
143
144 not_in_zone:
145 bitmap_free(cma->bitmap);
146 out_error:
147 /* Expose all pages to the buddy, they are useless for CMA. */
148 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
149 free_reserved_page(pfn_to_page(pfn));
150 totalcma_pages -= cma->count;
151 cma->count = 0;
152 pr_err("CMA area %s could not be activated\n", cma->name);
153 return;
154 }
155
cma_init_reserved_areas(void)156 static int __init cma_init_reserved_areas(void)
157 {
158 int i;
159
160 for (i = 0; i < cma_area_count; i++)
161 cma_activate_area(&cma_areas[i]);
162
163 return 0;
164 }
165 core_initcall(cma_init_reserved_areas);
166
167 /**
168 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
169 * @base: Base address of the reserved area
170 * @size: Size of the reserved area (in bytes),
171 * @order_per_bit: Order of pages represented by one bit on bitmap.
172 * @name: The name of the area. If this parameter is NULL, the name of
173 * the area will be set to "cmaN", where N is a running counter of
174 * used areas.
175 * @res_cma: Pointer to store the created cma region.
176 *
177 * This function creates custom contiguous area from already reserved memory.
178 */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)179 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
180 unsigned int order_per_bit,
181 const char *name,
182 struct cma **res_cma)
183 {
184 struct cma *cma;
185 #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
186 phys_addr_t alignment;
187 #endif
188
189 /* Sanity checks */
190 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
191 pr_err("Not enough slots for CMA reserved regions!\n");
192 return -ENOSPC;
193 }
194
195 if (!size || !memblock_is_region_reserved(base, size))
196 return -EINVAL;
197
198 #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
199 /* ensure minimal alignment required by mm core */
200 alignment = PAGE_SIZE <<
201 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
202
203 /* alignment should be aligned with order_per_bit */
204 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
205 return -EINVAL;
206
207 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
208 return -EINVAL;
209 #endif
210
211 /*
212 * Each reserved area must be initialised later, when more kernel
213 * subsystems (like slab allocator) are available.
214 */
215 cma = &cma_areas[cma_area_count];
216
217 if (name)
218 snprintf(cma->name, CMA_MAX_NAME, name);
219 else
220 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
221
222 cma->base_pfn = PFN_DOWN(base);
223 cma->count = size >> PAGE_SHIFT;
224 cma->order_per_bit = order_per_bit;
225 *res_cma = cma;
226 cma_area_count++;
227 totalcma_pages += (size / PAGE_SIZE);
228
229 return 0;
230 }
231
232 /**
233 * cma_declare_contiguous_nid() - reserve custom contiguous area
234 * @base: Base address of the reserved area optional, use 0 for any
235 * @size: Size of the reserved area (in bytes),
236 * @limit: End address of the reserved memory (optional, 0 for any).
237 * @alignment: Alignment for the CMA area, should be power of 2 or zero
238 * @order_per_bit: Order of pages represented by one bit on bitmap.
239 * @fixed: hint about where to place the reserved area
240 * @name: The name of the area. See function cma_init_reserved_mem()
241 * @res_cma: Pointer to store the created cma region.
242 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
243 *
244 * This function reserves memory from early allocator. It should be
245 * called by arch specific code once the early allocator (memblock or bootmem)
246 * has been activated and all other subsystems have already allocated/reserved
247 * memory. This function allows to create custom reserved areas.
248 *
249 * If @fixed is true, reserve contiguous area at exactly @base. If false,
250 * reserve in range from @base to @limit.
251 */
cma_declare_contiguous_nid(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma,int nid)252 int __init cma_declare_contiguous_nid(phys_addr_t base,
253 phys_addr_t size, phys_addr_t limit,
254 phys_addr_t alignment, unsigned int order_per_bit,
255 bool fixed, const char *name, struct cma **res_cma,
256 int nid)
257 {
258 phys_addr_t memblock_end = memblock_end_of_DRAM();
259 phys_addr_t highmem_start;
260 int ret = 0;
261
262 /*
263 * We can't use __pa(high_memory) directly, since high_memory
264 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
265 * complain. Find the boundary by adding one to the last valid
266 * address.
267 */
268 highmem_start = __pa(high_memory - 1) + 1;
269 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
270 __func__, &size, &base, &limit, &alignment);
271
272 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
273 pr_err("Not enough slots for CMA reserved regions!\n");
274 return -ENOSPC;
275 }
276
277 if (!size)
278 return -EINVAL;
279
280 if (alignment && !is_power_of_2(alignment))
281 return -EINVAL;
282
283 #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
284 /*
285 * Sanitise input arguments.
286 * Pages both ends in CMA area could be merged into adjacent unmovable
287 * migratetype page by page allocator's buddy algorithm. In the case,
288 * you couldn't get a contiguous memory, which is not what we want.
289 */
290 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
291 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
292 if (fixed && base & (alignment - 1)) {
293 ret = -EINVAL;
294 pr_err("Region at %pa must be aligned to %pa bytes\n",
295 &base, &alignment);
296 goto err;
297 }
298 #endif
299 base = ALIGN(base, alignment);
300 size = ALIGN(size, alignment);
301 limit &= ~(alignment - 1);
302
303 if (!base)
304 fixed = false;
305
306 /* size should be aligned with order_per_bit */
307 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
308 return -EINVAL;
309
310 /*
311 * If allocating at a fixed base the request region must not cross the
312 * low/high memory boundary.
313 */
314 if (fixed && base < highmem_start && base + size > highmem_start) {
315 ret = -EINVAL;
316 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
317 &base, &highmem_start);
318 goto err;
319 }
320
321 /*
322 * If the limit is unspecified or above the memblock end, its effective
323 * value will be the memblock end. Set it explicitly to simplify further
324 * checks.
325 */
326 if (limit == 0 || limit > memblock_end)
327 limit = memblock_end;
328
329 if (base + size > limit) {
330 ret = -EINVAL;
331 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
332 &size, &base, &limit);
333 goto err;
334 }
335
336 /* Reserve memory */
337 if (fixed) {
338 if (memblock_is_region_reserved(base, size) ||
339 memblock_reserve(base, size) < 0) {
340 ret = -EBUSY;
341 goto err;
342 }
343 } else {
344 phys_addr_t addr = 0;
345
346 /*
347 * All pages in the reserved area must come from the same zone.
348 * If the requested region crosses the low/high memory boundary,
349 * try allocating from high memory first and fall back to low
350 * memory in case of failure.
351 */
352 if (base < highmem_start && limit > highmem_start) {
353 addr = memblock_alloc_range_nid(size, alignment,
354 highmem_start, limit, nid, true);
355 limit = highmem_start;
356 }
357
358 /*
359 * If there is enough memory, try a bottom-up allocation first.
360 * It will place the new cma area close to the start of the node
361 * and guarantee that the compaction is moving pages out of the
362 * cma area and not into it.
363 * Avoid using first 4GB to not interfere with constrained zones
364 * like DMA/DMA32.
365 */
366 #ifdef CONFIG_PHYS_ADDR_T_64BIT
367 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
368 memblock_set_bottom_up(true);
369 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
370 limit, nid, true);
371 memblock_set_bottom_up(false);
372 }
373 #endif
374
375 if (!addr) {
376 addr = memblock_alloc_range_nid(size, alignment, base,
377 limit, nid, true);
378 if (!addr) {
379 ret = -ENOMEM;
380 goto err;
381 }
382 }
383
384 /*
385 * kmemleak scans/reads tracked objects for pointers to other
386 * objects but this address isn't mapped and accessible
387 */
388 kmemleak_ignore_phys(addr);
389 base = addr;
390 }
391
392 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
393 if (ret)
394 goto free_mem;
395
396 #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
397 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
398 &base);
399 #else
400 pr_info("Reserved %ld KiB at %pa\n", (unsigned long)size / SZ_1K,
401 &base);
402 #endif
403 return 0;
404
405 free_mem:
406 memblock_free(base, size);
407 err:
408 #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
409 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
410 #else
411 pr_err("Failed to reserve %ld KiB\n", (unsigned long)size / SZ_1K);
412 #endif
413 return ret;
414 }
415
416 #ifdef CONFIG_CMA_DEBUG
cma_debug_show_areas(struct cma * cma)417 static void cma_debug_show_areas(struct cma *cma)
418 {
419 unsigned long next_zero_bit, next_set_bit, nr_zero;
420 unsigned long start = 0;
421 unsigned long nr_part, nr_total = 0;
422 unsigned long nbits = cma_bitmap_maxno(cma);
423
424 mutex_lock(&cma->lock);
425 pr_info("number of available pages: ");
426 for (;;) {
427 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
428 if (next_zero_bit >= nbits)
429 break;
430 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
431 nr_zero = next_set_bit - next_zero_bit;
432 nr_part = nr_zero << cma->order_per_bit;
433 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
434 next_zero_bit);
435 nr_total += nr_part;
436 start = next_zero_bit + nr_zero;
437 }
438 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
439 mutex_unlock(&cma->lock);
440 }
441 #else
cma_debug_show_areas(struct cma * cma)442 static inline void cma_debug_show_areas(struct cma *cma) { }
443 #endif
444
445 /**
446 * cma_alloc() - allocate pages from contiguous area
447 * @cma: Contiguous memory region for which the allocation is performed.
448 * @count: Requested number of pages.
449 * @align: Requested alignment of pages (in PAGE_SIZE order).
450 * @gfp_mask: GFP mask to use during the cma allocation.
451 *
452 * This function allocates part of contiguous memory on specific
453 * contiguous memory area.
454 */
cma_alloc(struct cma * cma,size_t count,unsigned int align,gfp_t gfp_mask)455 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
456 gfp_t gfp_mask)
457 {
458 unsigned long mask, offset;
459 unsigned long pfn = -1;
460 unsigned long start = 0;
461 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
462 size_t i;
463 struct page *page = NULL;
464 int ret = -ENOMEM;
465 int num_attempts = 0;
466 int max_retries = 5;
467 s64 ts;
468 struct cma_alloc_info cma_info = {0};
469
470 trace_android_vh_cma_alloc_start(&ts);
471
472 if (!cma || !cma->count || !cma->bitmap)
473 goto out;
474
475 pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__,
476 (void *)cma, count, align, gfp_mask);
477
478 if (!count)
479 goto out;
480
481 trace_cma_alloc_start(cma->name, count, align);
482
483 mask = cma_bitmap_aligned_mask(cma, align);
484 offset = cma_bitmap_aligned_offset(cma, align);
485 bitmap_maxno = cma_bitmap_maxno(cma);
486 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
487
488 if (bitmap_count > bitmap_maxno)
489 goto out;
490
491 lru_cache_disable();
492 for (;;) {
493 struct acr_info info = {0};
494
495 mutex_lock(&cma->lock);
496 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
497 bitmap_maxno, start, bitmap_count, mask,
498 offset);
499 if (bitmap_no >= bitmap_maxno) {
500 if ((num_attempts < max_retries) && (ret == -EBUSY)) {
501 mutex_unlock(&cma->lock);
502
503 if (fatal_signal_pending(current) ||
504 (gfp_mask & __GFP_NORETRY))
505 break;
506
507 /*
508 * Page may be momentarily pinned by some other
509 * process which has been scheduled out, e.g.
510 * in exit path, during unmap call, or process
511 * fork and so cannot be freed there. Sleep
512 * for 100ms and retry the allocation.
513 */
514 start = 0;
515 ret = -ENOMEM;
516 schedule_timeout_killable(msecs_to_jiffies(100));
517 num_attempts++;
518 continue;
519 } else {
520 mutex_unlock(&cma->lock);
521 break;
522 }
523 }
524 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
525 /*
526 * It's safe to drop the lock here. We've marked this region for
527 * our exclusive use. If the migration fails we will take the
528 * lock again and unmark it.
529 */
530 mutex_unlock(&cma->lock);
531
532 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
533 if (IS_ENABLED(CONFIG_CMA_INACTIVE)) {
534 page = pfn_to_page(pfn);
535 lru_cache_enable();
536 goto out;
537 }
538 mutex_lock(&cma_mutex);
539 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info);
540 mutex_unlock(&cma_mutex);
541 cma_info.nr_migrated += info.nr_migrated;
542 cma_info.nr_reclaimed += info.nr_reclaimed;
543 cma_info.nr_mapped += info.nr_mapped;
544 if (info.err) {
545 if (info.err & ACR_ERR_ISOLATE)
546 cma_info.nr_isolate_fail++;
547 if (info.err & ACR_ERR_MIGRATE)
548 cma_info.nr_migrate_fail++;
549 if (info.err & ACR_ERR_TEST)
550 cma_info.nr_test_fail++;
551 }
552 if (ret == 0) {
553 page = pfn_to_page(pfn);
554 break;
555 }
556
557 cma_clear_bitmap(cma, pfn, count);
558 if (ret != -EBUSY)
559 break;
560
561 pr_debug("%s(): memory range at %p is busy, retrying\n",
562 __func__, pfn_to_page(pfn));
563
564 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
565 count, align);
566
567 if (info.failed_pfn && gfp_mask & __GFP_NORETRY) {
568 /* try again from following failed page */
569 start = (pfn_max_align_up(info.failed_pfn + 1) -
570 cma->base_pfn) >> cma->order_per_bit;
571
572 } else {
573 /* try again with a bit different memory target */
574 start = bitmap_no + mask + 1;
575 }
576 }
577
578 lru_cache_enable();
579 trace_cma_alloc_finish(cma->name, pfn, page, count, align);
580 trace_cma_alloc_info(cma->name, page, count, align, &cma_info);
581
582 /*
583 * CMA can allocate multiple page blocks, which results in different
584 * blocks being marked with different tags. Reset the tags to ignore
585 * those page blocks.
586 */
587 if (page) {
588 for (i = 0; i < count; i++)
589 page_kasan_tag_reset(page + i);
590 }
591
592 if (ret && !(gfp_mask & __GFP_NOWARN)) {
593 pr_err("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
594 __func__, cma->name, count, ret);
595 cma_debug_show_areas(cma);
596 }
597
598 pr_debug("%s(): returned %p\n", __func__, page);
599 out:
600 trace_android_vh_cma_alloc_finish(cma, page, count, align, gfp_mask, ts);
601 if (page) {
602 count_vm_event(CMA_ALLOC_SUCCESS);
603 cma_sysfs_account_success_pages(cma, count);
604 } else {
605 count_vm_event(CMA_ALLOC_FAIL);
606 if (cma)
607 cma_sysfs_account_fail_pages(cma, count);
608 }
609
610 return page;
611 }
612 EXPORT_SYMBOL_GPL(cma_alloc);
613
614 /**
615 * cma_release() - release allocated pages
616 * @cma: Contiguous memory region for which the allocation is performed.
617 * @pages: Allocated pages.
618 * @count: Number of allocated pages.
619 *
620 * This function releases memory allocated by cma_alloc().
621 * It returns false when provided pages do not belong to contiguous area and
622 * true otherwise.
623 */
cma_release(struct cma * cma,const struct page * pages,unsigned int count)624 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
625 {
626 unsigned long pfn;
627
628 if (!cma || !pages)
629 return false;
630
631 pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count);
632
633 pfn = page_to_pfn(pages);
634
635 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
636 return false;
637
638 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
639 if (!IS_ENABLED(CONFIG_CMA_INACTIVE))
640 free_contig_range(pfn, count);
641 cma_clear_bitmap(cma, pfn, count);
642 trace_cma_release(cma->name, pfn, pages, count);
643
644 return true;
645 }
646 EXPORT_SYMBOL_GPL(cma_release);
647
648 #ifdef CONFIG_NO_GKI
cma_used_pages(void)649 unsigned long cma_used_pages(void)
650 {
651 struct cma *cma;
652 unsigned long used;
653 unsigned long val = 0;
654 int i;
655
656 for (i = 0; i < cma_area_count; i++) {
657 cma = &cma_areas[i];
658 mutex_lock(&cma->lock);
659 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
660 mutex_unlock(&cma->lock);
661 val += used << cma->order_per_bit;
662 }
663 return val;
664 }
665 EXPORT_SYMBOL_GPL(cma_used_pages);
666 #endif
667
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)668 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
669 {
670 int i;
671
672 for (i = 0; i < cma_area_count; i++) {
673 int ret = it(&cma_areas[i], data);
674
675 if (ret)
676 return ret;
677 }
678
679 return 0;
680 }
681 EXPORT_SYMBOL_GPL(cma_for_each_area);
682