Lines Matching full:cma

15 #define pr_fmt(fmt) "cma: " fmt
32 #include <linux/cma.h>
38 #include <trace/events/cma.h>
43 #include "cma.h"
48 struct cma cma_areas[MAX_CMA_AREAS];
52 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
54 return PFN_PHYS(cma->base_pfn); in cma_get_base()
57 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
59 return cma->count << PAGE_SHIFT; in cma_get_size()
62 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
64 return cma->name; in cma_get_name()
68 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, in cma_bitmap_aligned_mask() argument
71 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
73 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
80 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, in cma_bitmap_aligned_offset() argument
83 return (cma->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
84 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
87 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, in cma_bitmap_pages_to_bits() argument
90 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
93 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument
98 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
99 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
101 mutex_lock(&cma->lock); in cma_clear_bitmap()
102 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
103 mutex_unlock(&cma->lock); in cma_clear_bitmap()
106 static void __init cma_activate_area(struct cma *cma) in cma_activate_area() argument
108 unsigned long base_pfn = cma->base_pfn, pfn; in cma_activate_area()
111 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); in cma_activate_area()
112 if (!cma->bitmap) in cma_activate_area()
119 * same zone. Simplify by forcing the entire CMA resv range to be in the in cma_activate_area()
124 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area()
130 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area()
135 mutex_init(&cma->lock); in cma_activate_area()
138 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
139 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
145 bitmap_free(cma->bitmap); in cma_activate_area()
147 /* Expose all pages to the buddy, they are useless for CMA. */ in cma_activate_area()
148 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area()
150 totalcma_pages -= cma->count; in cma_activate_area()
151 cma->count = 0; in cma_activate_area()
152 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
175 * @res_cma: Pointer to store the created cma region.
182 struct cma **res_cma) in cma_init_reserved_mem()
184 struct cma *cma; in cma_init_reserved_mem() local
191 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_init_reserved_mem()
215 cma = &cma_areas[cma_area_count]; in cma_init_reserved_mem()
218 snprintf(cma->name, CMA_MAX_NAME, name); in cma_init_reserved_mem()
220 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_init_reserved_mem()
222 cma->base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
223 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
224 cma->order_per_bit = order_per_bit; in cma_init_reserved_mem()
225 *res_cma = cma; in cma_init_reserved_mem()
237 * @alignment: Alignment for the CMA area, should be power of 2 or zero
241 * @res_cma: Pointer to store the created cma region.
255 bool fixed, const char *name, struct cma **res_cma, in cma_declare_contiguous_nid()
273 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_declare_contiguous_nid()
286 * Pages both ends in CMA area could be merged into adjacent unmovable in cma_declare_contiguous_nid()
360 * It will place the new cma area close to the start of the node in cma_declare_contiguous_nid()
362 * cma area and not into it. in cma_declare_contiguous_nid()
417 static void cma_debug_show_areas(struct cma *cma) in cma_debug_show_areas() argument
422 unsigned long nbits = cma_bitmap_maxno(cma); in cma_debug_show_areas()
424 mutex_lock(&cma->lock); in cma_debug_show_areas()
427 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); in cma_debug_show_areas()
430 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); in cma_debug_show_areas()
432 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
438 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas()
439 mutex_unlock(&cma->lock); in cma_debug_show_areas()
442 static inline void cma_debug_show_areas(struct cma *cma) { } in cma_debug_show_areas() argument
447 * @cma: Contiguous memory region for which the allocation is performed.
450 * @gfp_mask: GFP mask to use during the cma allocation.
455 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, in cma_alloc() argument
472 if (!cma || !cma->count || !cma->bitmap) in cma_alloc()
475 pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__, in cma_alloc()
476 (void *)cma, count, align, gfp_mask); in cma_alloc()
481 trace_cma_alloc_start(cma->name, count, align); in cma_alloc()
483 mask = cma_bitmap_aligned_mask(cma, align); in cma_alloc()
484 offset = cma_bitmap_aligned_offset(cma, align); in cma_alloc()
485 bitmap_maxno = cma_bitmap_maxno(cma); in cma_alloc()
486 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_alloc()
495 mutex_lock(&cma->lock); in cma_alloc()
496 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, in cma_alloc()
501 mutex_unlock(&cma->lock); in cma_alloc()
520 mutex_unlock(&cma->lock); in cma_alloc()
524 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); in cma_alloc()
530 mutex_unlock(&cma->lock); in cma_alloc()
532 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); in cma_alloc()
557 cma_clear_bitmap(cma, pfn, count); in cma_alloc()
564 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in cma_alloc()
570 cma->base_pfn) >> cma->order_per_bit; in cma_alloc()
579 trace_cma_alloc_finish(cma->name, pfn, page, count, align); in cma_alloc()
580 trace_cma_alloc_info(cma->name, page, count, align, &cma_info); in cma_alloc()
583 * CMA can allocate multiple page blocks, which results in different in cma_alloc()
594 __func__, cma->name, count, ret); in cma_alloc()
595 cma_debug_show_areas(cma); in cma_alloc()
600 trace_android_vh_cma_alloc_finish(cma, page, count, align, gfp_mask, ts); in cma_alloc()
603 cma_sysfs_account_success_pages(cma, count); in cma_alloc()
606 if (cma) in cma_alloc()
607 cma_sysfs_account_fail_pages(cma, count); in cma_alloc()
616 * @cma: Contiguous memory region for which the allocation is performed.
624 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release() argument
628 if (!cma || !pages) in cma_release()
635 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) in cma_release()
638 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); in cma_release()
641 cma_clear_bitmap(cma, pfn, count); in cma_release()
642 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
651 struct cma *cma; in cma_used_pages() local
657 cma = &cma_areas[i]; in cma_used_pages()
658 mutex_lock(&cma->lock); in cma_used_pages()
659 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); in cma_used_pages()
660 mutex_unlock(&cma->lock); in cma_used_pages()
661 val += used << cma->order_per_bit; in cma_used_pages()
668 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) in cma_for_each_area() argument