1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DMA Pool allocator
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2001 David Brownell
6*4882a593Smuzhiyun * Copyright 2007 Intel Corporation
7*4882a593Smuzhiyun * Author: Matthew Wilcox <willy@linux.intel.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This allocator returns small blocks of a given size which are DMA-able by
10*4882a593Smuzhiyun * the given device. It uses the dma_alloc_coherent page allocator to get
11*4882a593Smuzhiyun * new pages, then splits them up into blocks of the required size.
12*4882a593Smuzhiyun * Many older drivers still have their own code to do this.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The current design of this allocator is fairly simple. The pool is
15*4882a593Smuzhiyun * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16*4882a593Smuzhiyun * allocated pages. Each page in the page_list is split into blocks of at
17*4882a593Smuzhiyun * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18*4882a593Smuzhiyun * list of free blocks within the page. Used blocks aren't tracked, but we
19*4882a593Smuzhiyun * keep a count of how many are currently allocated from each page.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/device.h>
23*4882a593Smuzhiyun #include <linux/dma-mapping.h>
24*4882a593Smuzhiyun #include <linux/dmapool.h>
25*4882a593Smuzhiyun #include <linux/kernel.h>
26*4882a593Smuzhiyun #include <linux/list.h>
27*4882a593Smuzhiyun #include <linux/export.h>
28*4882a593Smuzhiyun #include <linux/mutex.h>
29*4882a593Smuzhiyun #include <linux/poison.h>
30*4882a593Smuzhiyun #include <linux/sched.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun #include <linux/stat.h>
33*4882a593Smuzhiyun #include <linux/spinlock.h>
34*4882a593Smuzhiyun #include <linux/string.h>
35*4882a593Smuzhiyun #include <linux/types.h>
36*4882a593Smuzhiyun #include <linux/wait.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
39*4882a593Smuzhiyun #define DMAPOOL_DEBUG 1
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct dma_pool { /* the pool */
43*4882a593Smuzhiyun struct list_head page_list;
44*4882a593Smuzhiyun spinlock_t lock;
45*4882a593Smuzhiyun size_t size;
46*4882a593Smuzhiyun struct device *dev;
47*4882a593Smuzhiyun size_t allocation;
48*4882a593Smuzhiyun size_t boundary;
49*4882a593Smuzhiyun char name[32];
50*4882a593Smuzhiyun struct list_head pools;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct dma_page { /* cacheable header for 'allocation' bytes */
54*4882a593Smuzhiyun struct list_head page_list;
55*4882a593Smuzhiyun void *vaddr;
56*4882a593Smuzhiyun dma_addr_t dma;
57*4882a593Smuzhiyun unsigned int in_use;
58*4882a593Smuzhiyun unsigned int offset;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static DEFINE_MUTEX(pools_lock);
62*4882a593Smuzhiyun static DEFINE_MUTEX(pools_reg_lock);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static ssize_t
show_pools(struct device * dev,struct device_attribute * attr,char * buf)65*4882a593Smuzhiyun show_pools(struct device *dev, struct device_attribute *attr, char *buf)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun unsigned temp;
68*4882a593Smuzhiyun unsigned size;
69*4882a593Smuzhiyun char *next;
70*4882a593Smuzhiyun struct dma_page *page;
71*4882a593Smuzhiyun struct dma_pool *pool;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun next = buf;
74*4882a593Smuzhiyun size = PAGE_SIZE;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun temp = scnprintf(next, size, "poolinfo - 0.1\n");
77*4882a593Smuzhiyun size -= temp;
78*4882a593Smuzhiyun next += temp;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun mutex_lock(&pools_lock);
81*4882a593Smuzhiyun list_for_each_entry(pool, &dev->dma_pools, pools) {
82*4882a593Smuzhiyun unsigned pages = 0;
83*4882a593Smuzhiyun unsigned blocks = 0;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun spin_lock_irq(&pool->lock);
86*4882a593Smuzhiyun list_for_each_entry(page, &pool->page_list, page_list) {
87*4882a593Smuzhiyun pages++;
88*4882a593Smuzhiyun blocks += page->in_use;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun spin_unlock_irq(&pool->lock);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* per-pool info, no real statistics yet */
93*4882a593Smuzhiyun temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
94*4882a593Smuzhiyun pool->name, blocks,
95*4882a593Smuzhiyun pages * (pool->allocation / pool->size),
96*4882a593Smuzhiyun pool->size, pages);
97*4882a593Smuzhiyun size -= temp;
98*4882a593Smuzhiyun next += temp;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun mutex_unlock(&pools_lock);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return PAGE_SIZE - size;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun static DEVICE_ATTR(pools, 0444, show_pools, NULL);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109*4882a593Smuzhiyun * @name: name of pool, for diagnostics
110*4882a593Smuzhiyun * @dev: device that will be doing the DMA
111*4882a593Smuzhiyun * @size: size of the blocks in this pool.
112*4882a593Smuzhiyun * @align: alignment requirement for blocks; must be a power of two
113*4882a593Smuzhiyun * @boundary: returned blocks won't cross this power of two boundary
114*4882a593Smuzhiyun * Context: not in_interrupt()
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * Given one of these pools, dma_pool_alloc()
117*4882a593Smuzhiyun * may be used to allocate memory. Such memory will all have "consistent"
118*4882a593Smuzhiyun * DMA mappings, accessible by the device and its driver without using
119*4882a593Smuzhiyun * cache flushing primitives. The actual size of blocks allocated may be
120*4882a593Smuzhiyun * larger than requested because of alignment.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123*4882a593Smuzhiyun * cross that size boundary. This is useful for devices which have
124*4882a593Smuzhiyun * addressing restrictions on individual DMA transfers, such as not crossing
125*4882a593Smuzhiyun * boundaries of 4KBytes.
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * Return: a dma allocation pool with the requested characteristics, or
128*4882a593Smuzhiyun * %NULL if one can't be created.
129*4882a593Smuzhiyun */
dma_pool_create(const char * name,struct device * dev,size_t size,size_t align,size_t boundary)130*4882a593Smuzhiyun struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131*4882a593Smuzhiyun size_t size, size_t align, size_t boundary)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct dma_pool *retval;
134*4882a593Smuzhiyun size_t allocation;
135*4882a593Smuzhiyun bool empty = false;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (align == 0)
138*4882a593Smuzhiyun align = 1;
139*4882a593Smuzhiyun else if (align & (align - 1))
140*4882a593Smuzhiyun return NULL;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (size == 0)
143*4882a593Smuzhiyun return NULL;
144*4882a593Smuzhiyun else if (size < 4)
145*4882a593Smuzhiyun size = 4;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun size = ALIGN(size, align);
148*4882a593Smuzhiyun allocation = max_t(size_t, size, PAGE_SIZE);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (!boundary)
151*4882a593Smuzhiyun boundary = allocation;
152*4882a593Smuzhiyun else if ((boundary < size) || (boundary & (boundary - 1)))
153*4882a593Smuzhiyun return NULL;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
156*4882a593Smuzhiyun if (!retval)
157*4882a593Smuzhiyun return retval;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun strlcpy(retval->name, name, sizeof(retval->name));
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun retval->dev = dev;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun INIT_LIST_HEAD(&retval->page_list);
164*4882a593Smuzhiyun spin_lock_init(&retval->lock);
165*4882a593Smuzhiyun retval->size = size;
166*4882a593Smuzhiyun retval->boundary = boundary;
167*4882a593Smuzhiyun retval->allocation = allocation;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun INIT_LIST_HEAD(&retval->pools);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * pools_lock ensures that the ->dma_pools list does not get corrupted.
173*4882a593Smuzhiyun * pools_reg_lock ensures that there is not a race between
174*4882a593Smuzhiyun * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
175*4882a593Smuzhiyun * when the first invocation of dma_pool_create() failed on
176*4882a593Smuzhiyun * device_create_file() and the second assumes that it has been done (I
177*4882a593Smuzhiyun * know it is a short window).
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun mutex_lock(&pools_reg_lock);
180*4882a593Smuzhiyun mutex_lock(&pools_lock);
181*4882a593Smuzhiyun if (list_empty(&dev->dma_pools))
182*4882a593Smuzhiyun empty = true;
183*4882a593Smuzhiyun list_add(&retval->pools, &dev->dma_pools);
184*4882a593Smuzhiyun mutex_unlock(&pools_lock);
185*4882a593Smuzhiyun if (empty) {
186*4882a593Smuzhiyun int err;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun err = device_create_file(dev, &dev_attr_pools);
189*4882a593Smuzhiyun if (err) {
190*4882a593Smuzhiyun mutex_lock(&pools_lock);
191*4882a593Smuzhiyun list_del(&retval->pools);
192*4882a593Smuzhiyun mutex_unlock(&pools_lock);
193*4882a593Smuzhiyun mutex_unlock(&pools_reg_lock);
194*4882a593Smuzhiyun kfree(retval);
195*4882a593Smuzhiyun return NULL;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun mutex_unlock(&pools_reg_lock);
199*4882a593Smuzhiyun return retval;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun EXPORT_SYMBOL(dma_pool_create);
202*4882a593Smuzhiyun
pool_initialise_page(struct dma_pool * pool,struct dma_page * page)203*4882a593Smuzhiyun static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun unsigned int offset = 0;
206*4882a593Smuzhiyun unsigned int next_boundary = pool->boundary;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun do {
209*4882a593Smuzhiyun unsigned int next = offset + pool->size;
210*4882a593Smuzhiyun if (unlikely((next + pool->size) >= next_boundary)) {
211*4882a593Smuzhiyun next = next_boundary;
212*4882a593Smuzhiyun next_boundary += pool->boundary;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun *(int *)(page->vaddr + offset) = next;
215*4882a593Smuzhiyun offset = next;
216*4882a593Smuzhiyun } while (offset < pool->allocation);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
pool_alloc_page(struct dma_pool * pool,gfp_t mem_flags)219*4882a593Smuzhiyun static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct dma_page *page;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun page = kmalloc(sizeof(*page), mem_flags);
224*4882a593Smuzhiyun if (!page)
225*4882a593Smuzhiyun return NULL;
226*4882a593Smuzhiyun page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
227*4882a593Smuzhiyun &page->dma, mem_flags);
228*4882a593Smuzhiyun if (page->vaddr) {
229*4882a593Smuzhiyun #ifdef DMAPOOL_DEBUG
230*4882a593Smuzhiyun memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
231*4882a593Smuzhiyun #endif
232*4882a593Smuzhiyun pool_initialise_page(pool, page);
233*4882a593Smuzhiyun page->in_use = 0;
234*4882a593Smuzhiyun page->offset = 0;
235*4882a593Smuzhiyun } else {
236*4882a593Smuzhiyun kfree(page);
237*4882a593Smuzhiyun page = NULL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun return page;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
is_page_busy(struct dma_page * page)242*4882a593Smuzhiyun static inline bool is_page_busy(struct dma_page *page)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun return page->in_use != 0;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
pool_free_page(struct dma_pool * pool,struct dma_page * page)247*4882a593Smuzhiyun static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun dma_addr_t dma = page->dma;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun #ifdef DMAPOOL_DEBUG
252*4882a593Smuzhiyun memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
253*4882a593Smuzhiyun #endif
254*4882a593Smuzhiyun dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
255*4882a593Smuzhiyun list_del(&page->page_list);
256*4882a593Smuzhiyun kfree(page);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun * dma_pool_destroy - destroys a pool of dma memory blocks.
261*4882a593Smuzhiyun * @pool: dma pool that will be destroyed
262*4882a593Smuzhiyun * Context: !in_interrupt()
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * Caller guarantees that no more memory from the pool is in use,
265*4882a593Smuzhiyun * and that nothing will try to use the pool after this call.
266*4882a593Smuzhiyun */
dma_pool_destroy(struct dma_pool * pool)267*4882a593Smuzhiyun void dma_pool_destroy(struct dma_pool *pool)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct dma_page *page, *tmp;
270*4882a593Smuzhiyun bool empty = false;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (unlikely(!pool))
273*4882a593Smuzhiyun return;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun mutex_lock(&pools_reg_lock);
276*4882a593Smuzhiyun mutex_lock(&pools_lock);
277*4882a593Smuzhiyun list_del(&pool->pools);
278*4882a593Smuzhiyun if (pool->dev && list_empty(&pool->dev->dma_pools))
279*4882a593Smuzhiyun empty = true;
280*4882a593Smuzhiyun mutex_unlock(&pools_lock);
281*4882a593Smuzhiyun if (empty)
282*4882a593Smuzhiyun device_remove_file(pool->dev, &dev_attr_pools);
283*4882a593Smuzhiyun mutex_unlock(&pools_reg_lock);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
286*4882a593Smuzhiyun if (is_page_busy(page)) {
287*4882a593Smuzhiyun if (pool->dev)
288*4882a593Smuzhiyun dev_err(pool->dev, "%s %s, %p busy\n", __func__,
289*4882a593Smuzhiyun pool->name, page->vaddr);
290*4882a593Smuzhiyun else
291*4882a593Smuzhiyun pr_err("%s %s, %p busy\n", __func__,
292*4882a593Smuzhiyun pool->name, page->vaddr);
293*4882a593Smuzhiyun /* leak the still-in-use consistent memory */
294*4882a593Smuzhiyun list_del(&page->page_list);
295*4882a593Smuzhiyun kfree(page);
296*4882a593Smuzhiyun } else
297*4882a593Smuzhiyun pool_free_page(pool, page);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun kfree(pool);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun EXPORT_SYMBOL(dma_pool_destroy);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun * dma_pool_alloc - get a block of consistent memory
306*4882a593Smuzhiyun * @pool: dma pool that will produce the block
307*4882a593Smuzhiyun * @mem_flags: GFP_* bitmask
308*4882a593Smuzhiyun * @handle: pointer to dma address of block
309*4882a593Smuzhiyun *
310*4882a593Smuzhiyun * Return: the kernel virtual address of a currently unused block,
311*4882a593Smuzhiyun * and reports its dma address through the handle.
312*4882a593Smuzhiyun * If such a memory block can't be allocated, %NULL is returned.
313*4882a593Smuzhiyun */
dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle)314*4882a593Smuzhiyun void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315*4882a593Smuzhiyun dma_addr_t *handle)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun unsigned long flags;
318*4882a593Smuzhiyun struct dma_page *page;
319*4882a593Smuzhiyun size_t offset;
320*4882a593Smuzhiyun void *retval;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun might_sleep_if(gfpflags_allow_blocking(mem_flags));
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun spin_lock_irqsave(&pool->lock, flags);
325*4882a593Smuzhiyun list_for_each_entry(page, &pool->page_list, page_list) {
326*4882a593Smuzhiyun if (page->offset < pool->allocation)
327*4882a593Smuzhiyun goto ready;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
331*4882a593Smuzhiyun spin_unlock_irqrestore(&pool->lock, flags);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
334*4882a593Smuzhiyun if (!page)
335*4882a593Smuzhiyun return NULL;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun spin_lock_irqsave(&pool->lock, flags);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun list_add(&page->page_list, &pool->page_list);
340*4882a593Smuzhiyun ready:
341*4882a593Smuzhiyun page->in_use++;
342*4882a593Smuzhiyun offset = page->offset;
343*4882a593Smuzhiyun page->offset = *(int *)(page->vaddr + offset);
344*4882a593Smuzhiyun retval = offset + page->vaddr;
345*4882a593Smuzhiyun *handle = offset + page->dma;
346*4882a593Smuzhiyun #ifdef DMAPOOL_DEBUG
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun int i;
349*4882a593Smuzhiyun u8 *data = retval;
350*4882a593Smuzhiyun /* page->offset is stored in first 4 bytes */
351*4882a593Smuzhiyun for (i = sizeof(page->offset); i < pool->size; i++) {
352*4882a593Smuzhiyun if (data[i] == POOL_POISON_FREED)
353*4882a593Smuzhiyun continue;
354*4882a593Smuzhiyun if (pool->dev)
355*4882a593Smuzhiyun dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356*4882a593Smuzhiyun __func__, pool->name, retval);
357*4882a593Smuzhiyun else
358*4882a593Smuzhiyun pr_err("%s %s, %p (corrupted)\n",
359*4882a593Smuzhiyun __func__, pool->name, retval);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * Dump the first 4 bytes even if they are not
363*4882a593Smuzhiyun * POOL_POISON_FREED
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
366*4882a593Smuzhiyun data, pool->size, 1);
367*4882a593Smuzhiyun break;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun if (!(mem_flags & __GFP_ZERO))
371*4882a593Smuzhiyun memset(retval, POOL_POISON_ALLOCATED, pool->size);
372*4882a593Smuzhiyun #endif
373*4882a593Smuzhiyun spin_unlock_irqrestore(&pool->lock, flags);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (want_init_on_alloc(mem_flags))
376*4882a593Smuzhiyun memset(retval, 0, pool->size);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return retval;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun EXPORT_SYMBOL(dma_pool_alloc);
381*4882a593Smuzhiyun
pool_find_page(struct dma_pool * pool,dma_addr_t dma)382*4882a593Smuzhiyun static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct dma_page *page;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun list_for_each_entry(page, &pool->page_list, page_list) {
387*4882a593Smuzhiyun if (dma < page->dma)
388*4882a593Smuzhiyun continue;
389*4882a593Smuzhiyun if ((dma - page->dma) < pool->allocation)
390*4882a593Smuzhiyun return page;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun return NULL;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * dma_pool_free - put block back into dma pool
397*4882a593Smuzhiyun * @pool: the dma pool holding the block
398*4882a593Smuzhiyun * @vaddr: virtual address of block
399*4882a593Smuzhiyun * @dma: dma address of block
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * Caller promises neither device nor driver will again touch this block
402*4882a593Smuzhiyun * unless it is first re-allocated.
403*4882a593Smuzhiyun */
dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma)404*4882a593Smuzhiyun void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct dma_page *page;
407*4882a593Smuzhiyun unsigned long flags;
408*4882a593Smuzhiyun unsigned int offset;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun spin_lock_irqsave(&pool->lock, flags);
411*4882a593Smuzhiyun page = pool_find_page(pool, dma);
412*4882a593Smuzhiyun if (!page) {
413*4882a593Smuzhiyun spin_unlock_irqrestore(&pool->lock, flags);
414*4882a593Smuzhiyun if (pool->dev)
415*4882a593Smuzhiyun dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416*4882a593Smuzhiyun __func__, pool->name, vaddr, &dma);
417*4882a593Smuzhiyun else
418*4882a593Smuzhiyun pr_err("%s %s, %p/%pad (bad dma)\n",
419*4882a593Smuzhiyun __func__, pool->name, vaddr, &dma);
420*4882a593Smuzhiyun return;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun offset = vaddr - page->vaddr;
424*4882a593Smuzhiyun if (want_init_on_free())
425*4882a593Smuzhiyun memset(vaddr, 0, pool->size);
426*4882a593Smuzhiyun #ifdef DMAPOOL_DEBUG
427*4882a593Smuzhiyun if ((dma - page->dma) != offset) {
428*4882a593Smuzhiyun spin_unlock_irqrestore(&pool->lock, flags);
429*4882a593Smuzhiyun if (pool->dev)
430*4882a593Smuzhiyun dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431*4882a593Smuzhiyun __func__, pool->name, vaddr, &dma);
432*4882a593Smuzhiyun else
433*4882a593Smuzhiyun pr_err("%s %s, %p (bad vaddr)/%pad\n",
434*4882a593Smuzhiyun __func__, pool->name, vaddr, &dma);
435*4882a593Smuzhiyun return;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun unsigned int chain = page->offset;
439*4882a593Smuzhiyun while (chain < pool->allocation) {
440*4882a593Smuzhiyun if (chain != offset) {
441*4882a593Smuzhiyun chain = *(int *)(page->vaddr + chain);
442*4882a593Smuzhiyun continue;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun spin_unlock_irqrestore(&pool->lock, flags);
445*4882a593Smuzhiyun if (pool->dev)
446*4882a593Smuzhiyun dev_err(pool->dev, "%s %s, dma %pad already free\n",
447*4882a593Smuzhiyun __func__, pool->name, &dma);
448*4882a593Smuzhiyun else
449*4882a593Smuzhiyun pr_err("%s %s, dma %pad already free\n",
450*4882a593Smuzhiyun __func__, pool->name, &dma);
451*4882a593Smuzhiyun return;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun memset(vaddr, POOL_POISON_FREED, pool->size);
455*4882a593Smuzhiyun #endif
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun page->in_use--;
458*4882a593Smuzhiyun *(int *)vaddr = page->offset;
459*4882a593Smuzhiyun page->offset = offset;
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Resist a temptation to do
462*4882a593Smuzhiyun * if (!is_page_busy(page)) pool_free_page(pool, page);
463*4882a593Smuzhiyun * Better have a few empty pages hang around.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun spin_unlock_irqrestore(&pool->lock, flags);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun EXPORT_SYMBOL(dma_pool_free);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * Managed DMA pool
471*4882a593Smuzhiyun */
dmam_pool_release(struct device * dev,void * res)472*4882a593Smuzhiyun static void dmam_pool_release(struct device *dev, void *res)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct dma_pool *pool = *(struct dma_pool **)res;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun dma_pool_destroy(pool);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
dmam_pool_match(struct device * dev,void * res,void * match_data)479*4882a593Smuzhiyun static int dmam_pool_match(struct device *dev, void *res, void *match_data)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun return *(struct dma_pool **)res == match_data;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /**
485*4882a593Smuzhiyun * dmam_pool_create - Managed dma_pool_create()
486*4882a593Smuzhiyun * @name: name of pool, for diagnostics
487*4882a593Smuzhiyun * @dev: device that will be doing the DMA
488*4882a593Smuzhiyun * @size: size of the blocks in this pool.
489*4882a593Smuzhiyun * @align: alignment requirement for blocks; must be a power of two
490*4882a593Smuzhiyun * @allocation: returned blocks won't cross this boundary (or zero)
491*4882a593Smuzhiyun *
492*4882a593Smuzhiyun * Managed dma_pool_create(). DMA pool created with this function is
493*4882a593Smuzhiyun * automatically destroyed on driver detach.
494*4882a593Smuzhiyun *
495*4882a593Smuzhiyun * Return: a managed dma allocation pool with the requested
496*4882a593Smuzhiyun * characteristics, or %NULL if one can't be created.
497*4882a593Smuzhiyun */
dmam_pool_create(const char * name,struct device * dev,size_t size,size_t align,size_t allocation)498*4882a593Smuzhiyun struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
499*4882a593Smuzhiyun size_t size, size_t align, size_t allocation)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun struct dma_pool **ptr, *pool;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504*4882a593Smuzhiyun if (!ptr)
505*4882a593Smuzhiyun return NULL;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508*4882a593Smuzhiyun if (pool)
509*4882a593Smuzhiyun devres_add(dev, ptr);
510*4882a593Smuzhiyun else
511*4882a593Smuzhiyun devres_free(ptr);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return pool;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun EXPORT_SYMBOL(dmam_pool_create);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /**
518*4882a593Smuzhiyun * dmam_pool_destroy - Managed dma_pool_destroy()
519*4882a593Smuzhiyun * @pool: dma pool that will be destroyed
520*4882a593Smuzhiyun *
521*4882a593Smuzhiyun * Managed dma_pool_destroy().
522*4882a593Smuzhiyun */
dmam_pool_destroy(struct dma_pool * pool)523*4882a593Smuzhiyun void dmam_pool_destroy(struct dma_pool *pool)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct device *dev = pool->dev;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun EXPORT_SYMBOL(dmam_pool_destroy);
530