Lines Matching +full:reserved +full:- +full:memory
1 // SPDX-License-Identifier: GPL-2.0+
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
9 * Contiguous Memory Allocator
11 * The Contiguous Memory Allocator (CMA) makes it possible to
12 * allocate big contiguous chunks of memory after the system has
17 * Various devices on embedded systems have no scatter-getter and/or
18 * IO map support and require contiguous blocks of memory to
22 * Such devices often require big memory buffers (a full HD frame
24 * MB of memory), which makes mechanisms such as kmalloc() or
27 * At the same time, a solution where a big memory region is
28 * reserved for a device is suboptimal since often more memory is
29 * reserved then strictly required and, moreover, the memory is
32 * CMA tries to solve this issue by operating on memory regions
34 * can use the memory for pagecache and when device driver requests
51 #include <linux/dma-map-ops.h>
68 * The size can be set in bytes or as a percentage of the total memory
76 static phys_addr_t size_cmdline __initdata = -1;
84 return -EINVAL; in early_cma()
91 if (*p != '-') { in early_cma()
154 pr_debug("%s: reserved %llu MiB on node %d\n", __func__, in dma_pernuma_cma_reserve()
161 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
162 * @limit: End address of the reserved memory (optional, 0 for any).
164 * This function reserves memory from early allocator. It should be
166 * has been activated and all other subsystems have already allocated/reserved
167 * memory.
178 if (size_cmdline != -1) { in dma_contiguous_reserve()
213 * dma_contiguous_reserve_area() - reserve custom contiguous area
214 * @size: Size of the reserved area (in bytes),
215 * @base: Base address of the reserved area optional, use 0 for any
216 * @limit: End address of the reserved memory (optional, 0 for any).
218 * @fixed: hint about where to place the reserved area
220 * This function reserves memory from early allocator. It should be
222 * has been activated and all other subsystems have already allocated/reserved
223 * memory. This function allows to create custom reserved areas for specific
236 "reserved", res_cma); in dma_contiguous_reserve_area()
240 /* Architecture specific contiguous memory fixup. */ in dma_contiguous_reserve_area()
248 * dma_alloc_from_contiguous() - allocate pages from contiguous area
254 * This function allocates memory buffer for specified device. It uses
255 * device specific contiguous memory area if available or the default
270 * dma_release_from_contiguous() - release allocated pages
275 * This function releases memory allocated by dma_alloc_from_contiguous().
294 * dma_alloc_contiguous() - allocate contiguous pages
299 * tries to use device specific contiguous memory area if available, or it
300 * tries to use per-numa cma, if the allocation fails, it will fallback to
303 * Note that it bypass one-page size of allocations from the per-numa and
318 if (dev->cma_area) in dma_alloc_contiguous()
319 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
346 * dma_free_contiguous() - release allocated pages
351 * This function releases memory allocated by dma_alloc_contiguous(). As the
354 * upon a false-return.
361 if (dev->cma_area) { in dma_free_contiguous()
362 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
366 * otherwise, page is from either per-numa cma or default cma in dma_free_contiguous()
382 * Support for reserved memory regions defined in device tree
394 dev->cma_area = rmem->priv; in rmem_cma_device_init()
401 dev->cma_area = NULL; in rmem_cma_device_release()
411 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); in rmem_cma_setup()
412 phys_addr_t mask = align - 1; in rmem_cma_setup()
413 unsigned long node = rmem->fdt_node; in rmem_cma_setup()
414 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); in rmem_cma_setup()
418 if (size_cmdline != -1 && default_cma) { in rmem_cma_setup()
419 pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", in rmem_cma_setup()
420 rmem->name); in rmem_cma_setup()
421 return -EBUSY; in rmem_cma_setup()
425 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_cma_setup()
426 return -EINVAL; in rmem_cma_setup()
428 if ((rmem->base & mask) || (rmem->size & mask)) { in rmem_cma_setup()
429 pr_err("Reserved memory: incorrect alignment of CMA region\n"); in rmem_cma_setup()
430 return -EINVAL; in rmem_cma_setup()
433 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup()
435 pr_err("Reserved memory: unable to setup CMA region\n"); in rmem_cma_setup()
438 /* Architecture specific contiguous memory fixup. */ in rmem_cma_setup()
439 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup()
444 rmem->ops = &rmem_cma_ops; in rmem_cma_setup()
445 rmem->priv = cma; in rmem_cma_setup()
447 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", in rmem_cma_setup()
448 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_cma_setup()
452 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);