1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mm/dma-mapping.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2000-2004 Russell King
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * DMA uncached mapping support.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/genalloc.h>
12*4882a593Smuzhiyun #include <linux/gfp.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <linux/dma-direct.h>
18*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
19*4882a593Smuzhiyun #include <linux/highmem.h>
20*4882a593Smuzhiyun #include <linux/memblock.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/iommu.h>
23*4882a593Smuzhiyun #include <linux/io.h>
24*4882a593Smuzhiyun #include <linux/vmalloc.h>
25*4882a593Smuzhiyun #include <linux/sizes.h>
26*4882a593Smuzhiyun #include <linux/cma.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm/memory.h>
29*4882a593Smuzhiyun #include <asm/highmem.h>
30*4882a593Smuzhiyun #include <asm/cacheflush.h>
31*4882a593Smuzhiyun #include <asm/tlbflush.h>
32*4882a593Smuzhiyun #include <asm/mach/arch.h>
33*4882a593Smuzhiyun #include <asm/dma-iommu.h>
34*4882a593Smuzhiyun #include <asm/mach/map.h>
35*4882a593Smuzhiyun #include <asm/system_info.h>
36*4882a593Smuzhiyun #include <xen/swiotlb-xen.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include "dma.h"
39*4882a593Smuzhiyun #include "mm.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct arm_dma_alloc_args {
42*4882a593Smuzhiyun struct device *dev;
43*4882a593Smuzhiyun size_t size;
44*4882a593Smuzhiyun gfp_t gfp;
45*4882a593Smuzhiyun pgprot_t prot;
46*4882a593Smuzhiyun const void *caller;
47*4882a593Smuzhiyun bool want_vaddr;
48*4882a593Smuzhiyun int coherent_flag;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct arm_dma_free_args {
52*4882a593Smuzhiyun struct device *dev;
53*4882a593Smuzhiyun size_t size;
54*4882a593Smuzhiyun void *cpu_addr;
55*4882a593Smuzhiyun struct page *page;
56*4882a593Smuzhiyun bool want_vaddr;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define NORMAL 0
60*4882a593Smuzhiyun #define COHERENT 1
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct arm_dma_allocator {
63*4882a593Smuzhiyun void *(*alloc)(struct arm_dma_alloc_args *args,
64*4882a593Smuzhiyun struct page **ret_page);
65*4882a593Smuzhiyun void (*free)(struct arm_dma_free_args *args);
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct arm_dma_buffer {
69*4882a593Smuzhiyun struct list_head list;
70*4882a593Smuzhiyun void *virt;
71*4882a593Smuzhiyun struct arm_dma_allocator *allocator;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun static LIST_HEAD(arm_dma_bufs);
75*4882a593Smuzhiyun static DEFINE_SPINLOCK(arm_dma_bufs_lock);
76*4882a593Smuzhiyun
arm_dma_buffer_find(void * virt)77*4882a593Smuzhiyun static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct arm_dma_buffer *buf, *found = NULL;
80*4882a593Smuzhiyun unsigned long flags;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun spin_lock_irqsave(&arm_dma_bufs_lock, flags);
83*4882a593Smuzhiyun list_for_each_entry(buf, &arm_dma_bufs, list) {
84*4882a593Smuzhiyun if (buf->virt == virt) {
85*4882a593Smuzhiyun list_del(&buf->list);
86*4882a593Smuzhiyun found = buf;
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
91*4882a593Smuzhiyun return found;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * The DMA API is built upon the notion of "buffer ownership". A buffer
96*4882a593Smuzhiyun * is either exclusively owned by the CPU (and therefore may be accessed
97*4882a593Smuzhiyun * by it) or exclusively owned by the DMA device. These helper functions
98*4882a593Smuzhiyun * represent the transitions between these two ownership states.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Note, however, that on later ARMs, this notion does not work due to
101*4882a593Smuzhiyun * speculative prefetches. We model our approach on the assumption that
102*4882a593Smuzhiyun * the CPU does do speculative prefetches, which means we clean caches
103*4882a593Smuzhiyun * before transfers and delay cache invalidation until transfer completion.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun static void __dma_page_cpu_to_dev(struct page *, unsigned long,
107*4882a593Smuzhiyun size_t, enum dma_data_direction);
108*4882a593Smuzhiyun static void __dma_page_dev_to_cpu(struct page *, unsigned long,
109*4882a593Smuzhiyun size_t, enum dma_data_direction);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /**
112*4882a593Smuzhiyun * arm_dma_map_page - map a portion of a page for streaming DMA
113*4882a593Smuzhiyun * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
114*4882a593Smuzhiyun * @page: page that buffer resides in
115*4882a593Smuzhiyun * @offset: offset into page for start of buffer
116*4882a593Smuzhiyun * @size: size of buffer to map
117*4882a593Smuzhiyun * @dir: DMA transfer direction
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * Ensure that any data held in the cache is appropriately discarded
120*4882a593Smuzhiyun * or written back.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * The device owns this memory once this call has completed. The CPU
123*4882a593Smuzhiyun * can regain ownership by calling dma_unmap_page().
124*4882a593Smuzhiyun */
arm_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)125*4882a593Smuzhiyun static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
126*4882a593Smuzhiyun unsigned long offset, size_t size, enum dma_data_direction dir,
127*4882a593Smuzhiyun unsigned long attrs)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
130*4882a593Smuzhiyun __dma_page_cpu_to_dev(page, offset, size, dir);
131*4882a593Smuzhiyun return pfn_to_dma(dev, page_to_pfn(page)) + offset;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
arm_coherent_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)134*4882a593Smuzhiyun static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
135*4882a593Smuzhiyun unsigned long offset, size_t size, enum dma_data_direction dir,
136*4882a593Smuzhiyun unsigned long attrs)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return pfn_to_dma(dev, page_to_pfn(page)) + offset;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
143*4882a593Smuzhiyun * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
144*4882a593Smuzhiyun * @handle: DMA address of buffer
145*4882a593Smuzhiyun * @size: size of buffer (same as passed to dma_map_page)
146*4882a593Smuzhiyun * @dir: DMA transfer direction (same as passed to dma_map_page)
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Unmap a page streaming mode DMA translation. The handle and size
149*4882a593Smuzhiyun * must match what was provided in the previous dma_map_page() call.
150*4882a593Smuzhiyun * All other usages are undefined.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * After this call, reads by the CPU to the buffer are guaranteed to see
153*4882a593Smuzhiyun * whatever the device wrote there.
154*4882a593Smuzhiyun */
arm_dma_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)155*4882a593Smuzhiyun static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
156*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
159*4882a593Smuzhiyun __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
160*4882a593Smuzhiyun handle & ~PAGE_MASK, size, dir);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
arm_dma_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)163*4882a593Smuzhiyun static void arm_dma_sync_single_for_cpu(struct device *dev,
164*4882a593Smuzhiyun dma_addr_t handle, size_t size, enum dma_data_direction dir)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned int offset = handle & (PAGE_SIZE - 1);
167*4882a593Smuzhiyun struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
168*4882a593Smuzhiyun __dma_page_dev_to_cpu(page, offset, size, dir);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
arm_dma_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)171*4882a593Smuzhiyun static void arm_dma_sync_single_for_device(struct device *dev,
172*4882a593Smuzhiyun dma_addr_t handle, size_t size, enum dma_data_direction dir)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun unsigned int offset = handle & (PAGE_SIZE - 1);
175*4882a593Smuzhiyun struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
176*4882a593Smuzhiyun __dma_page_cpu_to_dev(page, offset, size, dir);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Return whether the given device DMA address mask can be supported
181*4882a593Smuzhiyun * properly. For example, if your device can only drive the low 24-bits
182*4882a593Smuzhiyun * during bus mastering, then you would pass 0x00ffffff as the mask
183*4882a593Smuzhiyun * to this function.
184*4882a593Smuzhiyun */
arm_dma_supported(struct device * dev,u64 mask)185*4882a593Smuzhiyun static int arm_dma_supported(struct device *dev, u64 mask)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Translate the device's DMA mask to a PFN limit. This
191*4882a593Smuzhiyun * PFN number includes the page which we can DMA to.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun return dma_to_pfn(dev, mask) >= max_dma_pfn;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun const struct dma_map_ops arm_dma_ops = {
197*4882a593Smuzhiyun .alloc = arm_dma_alloc,
198*4882a593Smuzhiyun .free = arm_dma_free,
199*4882a593Smuzhiyun .alloc_pages = dma_direct_alloc_pages,
200*4882a593Smuzhiyun .free_pages = dma_direct_free_pages,
201*4882a593Smuzhiyun .mmap = arm_dma_mmap,
202*4882a593Smuzhiyun .get_sgtable = arm_dma_get_sgtable,
203*4882a593Smuzhiyun .map_page = arm_dma_map_page,
204*4882a593Smuzhiyun .unmap_page = arm_dma_unmap_page,
205*4882a593Smuzhiyun .map_sg = arm_dma_map_sg,
206*4882a593Smuzhiyun .unmap_sg = arm_dma_unmap_sg,
207*4882a593Smuzhiyun .map_resource = dma_direct_map_resource,
208*4882a593Smuzhiyun .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
209*4882a593Smuzhiyun .sync_single_for_device = arm_dma_sync_single_for_device,
210*4882a593Smuzhiyun .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
211*4882a593Smuzhiyun .sync_sg_for_device = arm_dma_sync_sg_for_device,
212*4882a593Smuzhiyun .dma_supported = arm_dma_supported,
213*4882a593Smuzhiyun .get_required_mask = dma_direct_get_required_mask,
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun EXPORT_SYMBOL(arm_dma_ops);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
218*4882a593Smuzhiyun dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
219*4882a593Smuzhiyun static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
220*4882a593Smuzhiyun dma_addr_t handle, unsigned long attrs);
221*4882a593Smuzhiyun static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
222*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
223*4882a593Smuzhiyun unsigned long attrs);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun const struct dma_map_ops arm_coherent_dma_ops = {
226*4882a593Smuzhiyun .alloc = arm_coherent_dma_alloc,
227*4882a593Smuzhiyun .free = arm_coherent_dma_free,
228*4882a593Smuzhiyun .alloc_pages = dma_direct_alloc_pages,
229*4882a593Smuzhiyun .free_pages = dma_direct_free_pages,
230*4882a593Smuzhiyun .mmap = arm_coherent_dma_mmap,
231*4882a593Smuzhiyun .get_sgtable = arm_dma_get_sgtable,
232*4882a593Smuzhiyun .map_page = arm_coherent_dma_map_page,
233*4882a593Smuzhiyun .map_sg = arm_dma_map_sg,
234*4882a593Smuzhiyun .map_resource = dma_direct_map_resource,
235*4882a593Smuzhiyun .dma_supported = arm_dma_supported,
236*4882a593Smuzhiyun .get_required_mask = dma_direct_get_required_mask,
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun EXPORT_SYMBOL(arm_coherent_dma_ops);
239*4882a593Smuzhiyun
__dma_clear_buffer(struct page * page,size_t size,int coherent_flag)240*4882a593Smuzhiyun static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * Ensure that the allocated pages are zeroed, and that any data
244*4882a593Smuzhiyun * lurking in the kernel direct-mapped region is invalidated.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun if (PageHighMem(page)) {
247*4882a593Smuzhiyun phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
248*4882a593Smuzhiyun phys_addr_t end = base + size;
249*4882a593Smuzhiyun while (size > 0) {
250*4882a593Smuzhiyun void *ptr = kmap_atomic(page);
251*4882a593Smuzhiyun memset(ptr, 0, PAGE_SIZE);
252*4882a593Smuzhiyun if (coherent_flag != COHERENT)
253*4882a593Smuzhiyun dmac_flush_range(ptr, ptr + PAGE_SIZE);
254*4882a593Smuzhiyun kunmap_atomic(ptr);
255*4882a593Smuzhiyun page++;
256*4882a593Smuzhiyun size -= PAGE_SIZE;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun if (coherent_flag != COHERENT)
259*4882a593Smuzhiyun outer_flush_range(base, end);
260*4882a593Smuzhiyun } else {
261*4882a593Smuzhiyun void *ptr = page_address(page);
262*4882a593Smuzhiyun memset(ptr, 0, size);
263*4882a593Smuzhiyun if (coherent_flag != COHERENT) {
264*4882a593Smuzhiyun dmac_flush_range(ptr, ptr + size);
265*4882a593Smuzhiyun outer_flush_range(__pa(ptr), __pa(ptr) + size);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Allocate a DMA buffer for 'dev' of size 'size' using the
272*4882a593Smuzhiyun * specified gfp mask. Note that 'size' must be page aligned.
273*4882a593Smuzhiyun */
__dma_alloc_buffer(struct device * dev,size_t size,gfp_t gfp,int coherent_flag)274*4882a593Smuzhiyun static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
275*4882a593Smuzhiyun gfp_t gfp, int coherent_flag)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun unsigned long order = get_order(size);
278*4882a593Smuzhiyun struct page *page, *p, *e;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun page = alloc_pages(gfp, order);
281*4882a593Smuzhiyun if (!page)
282*4882a593Smuzhiyun return NULL;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Now split the huge page and free the excess pages
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun split_page(page, order);
288*4882a593Smuzhiyun for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
289*4882a593Smuzhiyun __free_page(p);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun __dma_clear_buffer(page, size, coherent_flag);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return page;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Free a DMA buffer. 'size' must be page aligned.
298*4882a593Smuzhiyun */
__dma_free_buffer(struct page * page,size_t size)299*4882a593Smuzhiyun static void __dma_free_buffer(struct page *page, size_t size)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct page *e = page + (size >> PAGE_SHIFT);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun while (page < e) {
304*4882a593Smuzhiyun __free_page(page);
305*4882a593Smuzhiyun page++;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun static void *__alloc_from_contiguous(struct device *dev, size_t size,
310*4882a593Smuzhiyun pgprot_t prot, struct page **ret_page,
311*4882a593Smuzhiyun const void *caller, bool want_vaddr,
312*4882a593Smuzhiyun int coherent_flag, gfp_t gfp);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
315*4882a593Smuzhiyun pgprot_t prot, struct page **ret_page,
316*4882a593Smuzhiyun const void *caller, bool want_vaddr);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
319*4882a593Smuzhiyun static struct gen_pool *atomic_pool __ro_after_init;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
322*4882a593Smuzhiyun
early_coherent_pool(char * p)323*4882a593Smuzhiyun static int __init early_coherent_pool(char *p)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun atomic_pool_size = memparse(p, &p);
326*4882a593Smuzhiyun return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun early_param("coherent_pool", early_coherent_pool);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * Initialise the coherent pool for atomic allocations.
332*4882a593Smuzhiyun */
atomic_pool_init(void)333*4882a593Smuzhiyun static int __init atomic_pool_init(void)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
336*4882a593Smuzhiyun gfp_t gfp = GFP_KERNEL | GFP_DMA;
337*4882a593Smuzhiyun struct page *page;
338*4882a593Smuzhiyun void *ptr;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
341*4882a593Smuzhiyun if (!atomic_pool)
342*4882a593Smuzhiyun goto out;
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun * The atomic pool is only used for non-coherent allocations
345*4882a593Smuzhiyun * so we must pass NORMAL for coherent_flag.
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun if (dev_get_cma_area(NULL))
348*4882a593Smuzhiyun ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
349*4882a593Smuzhiyun &page, atomic_pool_init, true, NORMAL,
350*4882a593Smuzhiyun GFP_KERNEL);
351*4882a593Smuzhiyun else
352*4882a593Smuzhiyun ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
353*4882a593Smuzhiyun &page, atomic_pool_init, true);
354*4882a593Smuzhiyun if (ptr) {
355*4882a593Smuzhiyun int ret;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
358*4882a593Smuzhiyun page_to_phys(page),
359*4882a593Smuzhiyun atomic_pool_size, -1);
360*4882a593Smuzhiyun if (ret)
361*4882a593Smuzhiyun goto destroy_genpool;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun gen_pool_set_algo(atomic_pool,
364*4882a593Smuzhiyun gen_pool_first_fit_order_align,
365*4882a593Smuzhiyun NULL);
366*4882a593Smuzhiyun pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
367*4882a593Smuzhiyun atomic_pool_size / 1024);
368*4882a593Smuzhiyun return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun destroy_genpool:
372*4882a593Smuzhiyun gen_pool_destroy(atomic_pool);
373*4882a593Smuzhiyun atomic_pool = NULL;
374*4882a593Smuzhiyun out:
375*4882a593Smuzhiyun pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
376*4882a593Smuzhiyun atomic_pool_size / 1024);
377*4882a593Smuzhiyun return -ENOMEM;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * CMA is activated by core_initcall, so we must be called after it.
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun postcore_initcall(atomic_pool_init);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun struct dma_contig_early_reserve {
385*4882a593Smuzhiyun phys_addr_t base;
386*4882a593Smuzhiyun unsigned long size;
387*4882a593Smuzhiyun };
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun static int dma_mmu_remap_num __initdata;
392*4882a593Smuzhiyun
dma_contiguous_early_fixup(phys_addr_t base,unsigned long size)393*4882a593Smuzhiyun void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun dma_mmu_remap[dma_mmu_remap_num].base = base;
396*4882a593Smuzhiyun dma_mmu_remap[dma_mmu_remap_num].size = size;
397*4882a593Smuzhiyun dma_mmu_remap_num++;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
dma_contiguous_remap(void)400*4882a593Smuzhiyun void __init dma_contiguous_remap(void)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun int i;
403*4882a593Smuzhiyun for (i = 0; i < dma_mmu_remap_num; i++) {
404*4882a593Smuzhiyun phys_addr_t start = dma_mmu_remap[i].base;
405*4882a593Smuzhiyun phys_addr_t end = start + dma_mmu_remap[i].size;
406*4882a593Smuzhiyun struct map_desc map;
407*4882a593Smuzhiyun unsigned long addr;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (end > arm_lowmem_limit)
410*4882a593Smuzhiyun end = arm_lowmem_limit;
411*4882a593Smuzhiyun if (start >= end)
412*4882a593Smuzhiyun continue;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun map.pfn = __phys_to_pfn(start);
415*4882a593Smuzhiyun map.virtual = __phys_to_virt(start);
416*4882a593Smuzhiyun map.length = end - start;
417*4882a593Smuzhiyun map.type = MT_MEMORY_DMA_READY;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Clear previous low-memory mapping to ensure that the
421*4882a593Smuzhiyun * TLB does not see any conflicting entries, then flush
422*4882a593Smuzhiyun * the TLB of the old entries before creating new mappings.
423*4882a593Smuzhiyun *
424*4882a593Smuzhiyun * This ensures that any speculatively loaded TLB entries
425*4882a593Smuzhiyun * (even though they may be rare) can not cause any problems,
426*4882a593Smuzhiyun * and ensures that this code is architecturally compliant.
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
429*4882a593Smuzhiyun addr += PMD_SIZE)
430*4882a593Smuzhiyun pmd_clear(pmd_off_k(addr));
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun flush_tlb_kernel_range(__phys_to_virt(start),
433*4882a593Smuzhiyun __phys_to_virt(end));
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun iotable_init(&map, 1);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
__dma_update_pte(pte_t * pte,unsigned long addr,void * data)439*4882a593Smuzhiyun static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct page *page = virt_to_page(addr);
442*4882a593Smuzhiyun pgprot_t prot = *(pgprot_t *)data;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun set_pte_ext(pte, mk_pte(page, prot), 0);
445*4882a593Smuzhiyun return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
__dma_remap(struct page * page,size_t size,pgprot_t prot)448*4882a593Smuzhiyun static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun unsigned long start = (unsigned long) page_address(page);
451*4882a593Smuzhiyun unsigned end = start + size;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
454*4882a593Smuzhiyun flush_tlb_kernel_range(start, end);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
__alloc_remap_buffer(struct device * dev,size_t size,gfp_t gfp,pgprot_t prot,struct page ** ret_page,const void * caller,bool want_vaddr)457*4882a593Smuzhiyun static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
458*4882a593Smuzhiyun pgprot_t prot, struct page **ret_page,
459*4882a593Smuzhiyun const void *caller, bool want_vaddr)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct page *page;
462*4882a593Smuzhiyun void *ptr = NULL;
463*4882a593Smuzhiyun /*
464*4882a593Smuzhiyun * __alloc_remap_buffer is only called when the device is
465*4882a593Smuzhiyun * non-coherent
466*4882a593Smuzhiyun */
467*4882a593Smuzhiyun page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
468*4882a593Smuzhiyun if (!page)
469*4882a593Smuzhiyun return NULL;
470*4882a593Smuzhiyun if (!want_vaddr)
471*4882a593Smuzhiyun goto out;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun ptr = dma_common_contiguous_remap(page, size, prot, caller);
474*4882a593Smuzhiyun if (!ptr) {
475*4882a593Smuzhiyun __dma_free_buffer(page, size);
476*4882a593Smuzhiyun return NULL;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun out:
480*4882a593Smuzhiyun *ret_page = page;
481*4882a593Smuzhiyun return ptr;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
__alloc_from_pool(size_t size,struct page ** ret_page)484*4882a593Smuzhiyun static void *__alloc_from_pool(size_t size, struct page **ret_page)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun unsigned long val;
487*4882a593Smuzhiyun void *ptr = NULL;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!atomic_pool) {
490*4882a593Smuzhiyun WARN(1, "coherent pool not initialised!\n");
491*4882a593Smuzhiyun return NULL;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun val = gen_pool_alloc(atomic_pool, size);
495*4882a593Smuzhiyun if (val) {
496*4882a593Smuzhiyun phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun *ret_page = phys_to_page(phys);
499*4882a593Smuzhiyun ptr = (void *)val;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return ptr;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
__in_atomic_pool(void * start,size_t size)505*4882a593Smuzhiyun static bool __in_atomic_pool(void *start, size_t size)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
__free_from_pool(void * start,size_t size)510*4882a593Smuzhiyun static int __free_from_pool(void *start, size_t size)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun if (!__in_atomic_pool(start, size))
513*4882a593Smuzhiyun return 0;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun gen_pool_free(atomic_pool, (unsigned long)start, size);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun return 1;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
__alloc_from_contiguous(struct device * dev,size_t size,pgprot_t prot,struct page ** ret_page,const void * caller,bool want_vaddr,int coherent_flag,gfp_t gfp)520*4882a593Smuzhiyun static void *__alloc_from_contiguous(struct device *dev, size_t size,
521*4882a593Smuzhiyun pgprot_t prot, struct page **ret_page,
522*4882a593Smuzhiyun const void *caller, bool want_vaddr,
523*4882a593Smuzhiyun int coherent_flag, gfp_t gfp)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun unsigned long order = get_order(size);
526*4882a593Smuzhiyun size_t count = size >> PAGE_SHIFT;
527*4882a593Smuzhiyun struct page *page;
528*4882a593Smuzhiyun void *ptr = NULL;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
531*4882a593Smuzhiyun if (!page)
532*4882a593Smuzhiyun return NULL;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun __dma_clear_buffer(page, size, coherent_flag);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (!want_vaddr)
537*4882a593Smuzhiyun goto out;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (PageHighMem(page)) {
540*4882a593Smuzhiyun ptr = dma_common_contiguous_remap(page, size, prot, caller);
541*4882a593Smuzhiyun if (!ptr) {
542*4882a593Smuzhiyun dma_release_from_contiguous(dev, page, count);
543*4882a593Smuzhiyun return NULL;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun } else {
546*4882a593Smuzhiyun __dma_remap(page, size, prot);
547*4882a593Smuzhiyun ptr = page_address(page);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun out:
551*4882a593Smuzhiyun *ret_page = page;
552*4882a593Smuzhiyun return ptr;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
__free_from_contiguous(struct device * dev,struct page * page,void * cpu_addr,size_t size,bool want_vaddr)555*4882a593Smuzhiyun static void __free_from_contiguous(struct device *dev, struct page *page,
556*4882a593Smuzhiyun void *cpu_addr, size_t size, bool want_vaddr)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun if (want_vaddr) {
559*4882a593Smuzhiyun if (PageHighMem(page))
560*4882a593Smuzhiyun dma_common_free_remap(cpu_addr, size);
561*4882a593Smuzhiyun else
562*4882a593Smuzhiyun __dma_remap(page, size, PAGE_KERNEL);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
__get_dma_pgprot(unsigned long attrs,pgprot_t prot)567*4882a593Smuzhiyun static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
570*4882a593Smuzhiyun pgprot_writecombine(prot) :
571*4882a593Smuzhiyun pgprot_dmacoherent(prot);
572*4882a593Smuzhiyun return prot;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
__alloc_simple_buffer(struct device * dev,size_t size,gfp_t gfp,struct page ** ret_page)575*4882a593Smuzhiyun static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
576*4882a593Smuzhiyun struct page **ret_page)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct page *page;
579*4882a593Smuzhiyun /* __alloc_simple_buffer is only called when the device is coherent */
580*4882a593Smuzhiyun page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
581*4882a593Smuzhiyun if (!page)
582*4882a593Smuzhiyun return NULL;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun *ret_page = page;
585*4882a593Smuzhiyun return page_address(page);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
simple_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)588*4882a593Smuzhiyun static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
589*4882a593Smuzhiyun struct page **ret_page)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun return __alloc_simple_buffer(args->dev, args->size, args->gfp,
592*4882a593Smuzhiyun ret_page);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
simple_allocator_free(struct arm_dma_free_args * args)595*4882a593Smuzhiyun static void simple_allocator_free(struct arm_dma_free_args *args)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun __dma_free_buffer(args->page, args->size);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun static struct arm_dma_allocator simple_allocator = {
601*4882a593Smuzhiyun .alloc = simple_allocator_alloc,
602*4882a593Smuzhiyun .free = simple_allocator_free,
603*4882a593Smuzhiyun };
604*4882a593Smuzhiyun
cma_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)605*4882a593Smuzhiyun static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
606*4882a593Smuzhiyun struct page **ret_page)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun return __alloc_from_contiguous(args->dev, args->size, args->prot,
609*4882a593Smuzhiyun ret_page, args->caller,
610*4882a593Smuzhiyun args->want_vaddr, args->coherent_flag,
611*4882a593Smuzhiyun args->gfp);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
cma_allocator_free(struct arm_dma_free_args * args)614*4882a593Smuzhiyun static void cma_allocator_free(struct arm_dma_free_args *args)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun __free_from_contiguous(args->dev, args->page, args->cpu_addr,
617*4882a593Smuzhiyun args->size, args->want_vaddr);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun static struct arm_dma_allocator cma_allocator = {
621*4882a593Smuzhiyun .alloc = cma_allocator_alloc,
622*4882a593Smuzhiyun .free = cma_allocator_free,
623*4882a593Smuzhiyun };
624*4882a593Smuzhiyun
pool_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)625*4882a593Smuzhiyun static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
626*4882a593Smuzhiyun struct page **ret_page)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun return __alloc_from_pool(args->size, ret_page);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
pool_allocator_free(struct arm_dma_free_args * args)631*4882a593Smuzhiyun static void pool_allocator_free(struct arm_dma_free_args *args)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun __free_from_pool(args->cpu_addr, args->size);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun static struct arm_dma_allocator pool_allocator = {
637*4882a593Smuzhiyun .alloc = pool_allocator_alloc,
638*4882a593Smuzhiyun .free = pool_allocator_free,
639*4882a593Smuzhiyun };
640*4882a593Smuzhiyun
remap_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)641*4882a593Smuzhiyun static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
642*4882a593Smuzhiyun struct page **ret_page)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun return __alloc_remap_buffer(args->dev, args->size, args->gfp,
645*4882a593Smuzhiyun args->prot, ret_page, args->caller,
646*4882a593Smuzhiyun args->want_vaddr);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
remap_allocator_free(struct arm_dma_free_args * args)649*4882a593Smuzhiyun static void remap_allocator_free(struct arm_dma_free_args *args)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun if (args->want_vaddr)
652*4882a593Smuzhiyun dma_common_free_remap(args->cpu_addr, args->size);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun __dma_free_buffer(args->page, args->size);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun static struct arm_dma_allocator remap_allocator = {
658*4882a593Smuzhiyun .alloc = remap_allocator_alloc,
659*4882a593Smuzhiyun .free = remap_allocator_free,
660*4882a593Smuzhiyun };
661*4882a593Smuzhiyun
__dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,pgprot_t prot,bool is_coherent,unsigned long attrs,const void * caller)662*4882a593Smuzhiyun static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
663*4882a593Smuzhiyun gfp_t gfp, pgprot_t prot, bool is_coherent,
664*4882a593Smuzhiyun unsigned long attrs, const void *caller)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
667*4882a593Smuzhiyun struct page *page = NULL;
668*4882a593Smuzhiyun void *addr;
669*4882a593Smuzhiyun bool allowblock, cma;
670*4882a593Smuzhiyun struct arm_dma_buffer *buf;
671*4882a593Smuzhiyun struct arm_dma_alloc_args args = {
672*4882a593Smuzhiyun .dev = dev,
673*4882a593Smuzhiyun .size = PAGE_ALIGN(size),
674*4882a593Smuzhiyun .gfp = gfp,
675*4882a593Smuzhiyun .prot = prot,
676*4882a593Smuzhiyun .caller = caller,
677*4882a593Smuzhiyun .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
678*4882a593Smuzhiyun .coherent_flag = is_coherent ? COHERENT : NORMAL,
679*4882a593Smuzhiyun };
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun #ifdef CONFIG_DMA_API_DEBUG
682*4882a593Smuzhiyun u64 limit = (mask + 1) & ~mask;
683*4882a593Smuzhiyun if (limit && size >= limit) {
684*4882a593Smuzhiyun dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
685*4882a593Smuzhiyun size, mask);
686*4882a593Smuzhiyun return NULL;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun #endif
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun buf = kzalloc(sizeof(*buf),
691*4882a593Smuzhiyun gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
692*4882a593Smuzhiyun if (!buf)
693*4882a593Smuzhiyun return NULL;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (mask < 0xffffffffULL)
696*4882a593Smuzhiyun gfp |= GFP_DMA;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun * Following is a work-around (a.k.a. hack) to prevent pages
700*4882a593Smuzhiyun * with __GFP_COMP being passed to split_page() which cannot
701*4882a593Smuzhiyun * handle them. The real problem is that this flag probably
702*4882a593Smuzhiyun * should be 0 on ARM as it is not supported on this
703*4882a593Smuzhiyun * platform; see CONFIG_HUGETLBFS.
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun gfp &= ~(__GFP_COMP);
706*4882a593Smuzhiyun args.gfp = gfp;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun *handle = DMA_MAPPING_ERROR;
709*4882a593Smuzhiyun allowblock = gfpflags_allow_blocking(gfp);
710*4882a593Smuzhiyun cma = allowblock ? dev_get_cma_area(dev) : false;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (cma)
713*4882a593Smuzhiyun buf->allocator = &cma_allocator;
714*4882a593Smuzhiyun else if (is_coherent)
715*4882a593Smuzhiyun buf->allocator = &simple_allocator;
716*4882a593Smuzhiyun else if (allowblock)
717*4882a593Smuzhiyun buf->allocator = &remap_allocator;
718*4882a593Smuzhiyun else
719*4882a593Smuzhiyun buf->allocator = &pool_allocator;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun addr = buf->allocator->alloc(&args, &page);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (page) {
724*4882a593Smuzhiyun unsigned long flags;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun *handle = pfn_to_dma(dev, page_to_pfn(page));
727*4882a593Smuzhiyun buf->virt = args.want_vaddr ? addr : page;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun spin_lock_irqsave(&arm_dma_bufs_lock, flags);
730*4882a593Smuzhiyun list_add(&buf->list, &arm_dma_bufs);
731*4882a593Smuzhiyun spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
732*4882a593Smuzhiyun } else {
733*4882a593Smuzhiyun kfree(buf);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun return args.want_vaddr ? addr : page;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /*
740*4882a593Smuzhiyun * Allocate DMA-coherent memory space and return both the kernel remapped
741*4882a593Smuzhiyun * virtual and bus address for that space.
742*4882a593Smuzhiyun */
arm_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)743*4882a593Smuzhiyun void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
744*4882a593Smuzhiyun gfp_t gfp, unsigned long attrs)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return __dma_alloc(dev, size, handle, gfp, prot, false,
749*4882a593Smuzhiyun attrs, __builtin_return_address(0));
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
arm_coherent_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)752*4882a593Smuzhiyun static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
753*4882a593Smuzhiyun dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
756*4882a593Smuzhiyun attrs, __builtin_return_address(0));
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
__arm_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)759*4882a593Smuzhiyun static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
760*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
761*4882a593Smuzhiyun unsigned long attrs)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun int ret = -ENXIO;
764*4882a593Smuzhiyun unsigned long nr_vma_pages = vma_pages(vma);
765*4882a593Smuzhiyun unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
766*4882a593Smuzhiyun unsigned long pfn = dma_to_pfn(dev, dma_addr);
767*4882a593Smuzhiyun unsigned long off = vma->vm_pgoff;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
770*4882a593Smuzhiyun return ret;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
773*4882a593Smuzhiyun ret = remap_pfn_range(vma, vma->vm_start,
774*4882a593Smuzhiyun pfn + off,
775*4882a593Smuzhiyun vma->vm_end - vma->vm_start,
776*4882a593Smuzhiyun vma->vm_page_prot);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return ret;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /*
783*4882a593Smuzhiyun * Create userspace mapping for the DMA-coherent memory.
784*4882a593Smuzhiyun */
arm_coherent_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)785*4882a593Smuzhiyun static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
786*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
787*4882a593Smuzhiyun unsigned long attrs)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
arm_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)792*4882a593Smuzhiyun int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
793*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
794*4882a593Smuzhiyun unsigned long attrs)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
797*4882a593Smuzhiyun return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * Free a buffer as defined by the above mapping.
802*4882a593Smuzhiyun */
__arm_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs,bool is_coherent)803*4882a593Smuzhiyun static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
804*4882a593Smuzhiyun dma_addr_t handle, unsigned long attrs,
805*4882a593Smuzhiyun bool is_coherent)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
808*4882a593Smuzhiyun struct arm_dma_buffer *buf;
809*4882a593Smuzhiyun struct arm_dma_free_args args = {
810*4882a593Smuzhiyun .dev = dev,
811*4882a593Smuzhiyun .size = PAGE_ALIGN(size),
812*4882a593Smuzhiyun .cpu_addr = cpu_addr,
813*4882a593Smuzhiyun .page = page,
814*4882a593Smuzhiyun .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
815*4882a593Smuzhiyun };
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun buf = arm_dma_buffer_find(cpu_addr);
818*4882a593Smuzhiyun if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
819*4882a593Smuzhiyun return;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun buf->allocator->free(&args);
822*4882a593Smuzhiyun kfree(buf);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
arm_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)825*4882a593Smuzhiyun void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
826*4882a593Smuzhiyun dma_addr_t handle, unsigned long attrs)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
arm_coherent_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)831*4882a593Smuzhiyun static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
832*4882a593Smuzhiyun dma_addr_t handle, unsigned long attrs)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
arm_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t handle,size_t size,unsigned long attrs)837*4882a593Smuzhiyun int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
838*4882a593Smuzhiyun void *cpu_addr, dma_addr_t handle, size_t size,
839*4882a593Smuzhiyun unsigned long attrs)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun unsigned long pfn = dma_to_pfn(dev, handle);
842*4882a593Smuzhiyun struct page *page;
843*4882a593Smuzhiyun int ret;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* If the PFN is not valid, we do not have a struct page */
846*4882a593Smuzhiyun if (!pfn_valid(pfn))
847*4882a593Smuzhiyun return -ENXIO;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun page = pfn_to_page(pfn);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
852*4882a593Smuzhiyun if (unlikely(ret))
853*4882a593Smuzhiyun return ret;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
856*4882a593Smuzhiyun return 0;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
dma_cache_maint_page(struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,void (* op)(const void *,size_t,int))859*4882a593Smuzhiyun static void dma_cache_maint_page(struct page *page, unsigned long offset,
860*4882a593Smuzhiyun size_t size, enum dma_data_direction dir,
861*4882a593Smuzhiyun void (*op)(const void *, size_t, int))
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun unsigned long pfn;
864*4882a593Smuzhiyun size_t left = size;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun pfn = page_to_pfn(page) + offset / PAGE_SIZE;
867*4882a593Smuzhiyun offset %= PAGE_SIZE;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /*
870*4882a593Smuzhiyun * A single sg entry may refer to multiple physically contiguous
871*4882a593Smuzhiyun * pages. But we still need to process highmem pages individually.
872*4882a593Smuzhiyun * If highmem is not configured then the bulk of this loop gets
873*4882a593Smuzhiyun * optimized out.
874*4882a593Smuzhiyun */
875*4882a593Smuzhiyun do {
876*4882a593Smuzhiyun size_t len = left;
877*4882a593Smuzhiyun void *vaddr;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun page = pfn_to_page(pfn);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (PageHighMem(page)) {
882*4882a593Smuzhiyun if (len + offset > PAGE_SIZE)
883*4882a593Smuzhiyun len = PAGE_SIZE - offset;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (cache_is_vipt_nonaliasing()) {
886*4882a593Smuzhiyun vaddr = kmap_atomic(page);
887*4882a593Smuzhiyun op(vaddr + offset, len, dir);
888*4882a593Smuzhiyun kunmap_atomic(vaddr);
889*4882a593Smuzhiyun } else {
890*4882a593Smuzhiyun vaddr = kmap_high_get(page);
891*4882a593Smuzhiyun if (vaddr) {
892*4882a593Smuzhiyun op(vaddr + offset, len, dir);
893*4882a593Smuzhiyun kunmap_high(page);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun } else {
897*4882a593Smuzhiyun vaddr = page_address(page) + offset;
898*4882a593Smuzhiyun op(vaddr, len, dir);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun offset = 0;
901*4882a593Smuzhiyun pfn++;
902*4882a593Smuzhiyun left -= len;
903*4882a593Smuzhiyun } while (left);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /*
907*4882a593Smuzhiyun * Make an area consistent for devices.
908*4882a593Smuzhiyun * Note: Drivers should NOT use this function directly, as it will break
909*4882a593Smuzhiyun * platforms with CONFIG_DMABOUNCE.
910*4882a593Smuzhiyun * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
911*4882a593Smuzhiyun */
__dma_page_cpu_to_dev(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)912*4882a593Smuzhiyun static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
913*4882a593Smuzhiyun size_t size, enum dma_data_direction dir)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun phys_addr_t paddr;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun dma_cache_maint_page(page, off, size, dir, dmac_map_area);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun paddr = page_to_phys(page) + off;
920*4882a593Smuzhiyun if (dir == DMA_FROM_DEVICE) {
921*4882a593Smuzhiyun outer_inv_range(paddr, paddr + size);
922*4882a593Smuzhiyun } else {
923*4882a593Smuzhiyun outer_clean_range(paddr, paddr + size);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun /* FIXME: non-speculating: flush on bidirectional mappings? */
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
__dma_page_dev_to_cpu(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)928*4882a593Smuzhiyun static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
929*4882a593Smuzhiyun size_t size, enum dma_data_direction dir)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun phys_addr_t paddr = page_to_phys(page) + off;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* FIXME: non-speculating: not required */
934*4882a593Smuzhiyun /* in any case, don't bother invalidating if DMA to device */
935*4882a593Smuzhiyun if (dir != DMA_TO_DEVICE) {
936*4882a593Smuzhiyun outer_inv_range(paddr, paddr + size);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * Mark the D-cache clean for these pages to avoid extra flushing.
943*4882a593Smuzhiyun */
944*4882a593Smuzhiyun if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
945*4882a593Smuzhiyun unsigned long pfn;
946*4882a593Smuzhiyun size_t left = size;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun pfn = page_to_pfn(page) + off / PAGE_SIZE;
949*4882a593Smuzhiyun off %= PAGE_SIZE;
950*4882a593Smuzhiyun if (off) {
951*4882a593Smuzhiyun pfn++;
952*4882a593Smuzhiyun left -= PAGE_SIZE - off;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun while (left >= PAGE_SIZE) {
955*4882a593Smuzhiyun page = pfn_to_page(pfn++);
956*4882a593Smuzhiyun set_bit(PG_dcache_clean, &page->flags);
957*4882a593Smuzhiyun left -= PAGE_SIZE;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /**
963*4882a593Smuzhiyun * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
964*4882a593Smuzhiyun * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
965*4882a593Smuzhiyun * @sg: list of buffers
966*4882a593Smuzhiyun * @nents: number of buffers to map
967*4882a593Smuzhiyun * @dir: DMA transfer direction
968*4882a593Smuzhiyun *
969*4882a593Smuzhiyun * Map a set of buffers described by scatterlist in streaming mode for DMA.
970*4882a593Smuzhiyun * This is the scatter-gather version of the dma_map_single interface.
971*4882a593Smuzhiyun * Here the scatter gather list elements are each tagged with the
972*4882a593Smuzhiyun * appropriate dma address and length. They are obtained via
973*4882a593Smuzhiyun * sg_dma_{address,length}.
974*4882a593Smuzhiyun *
975*4882a593Smuzhiyun * Device ownership issues as mentioned for dma_map_single are the same
976*4882a593Smuzhiyun * here.
977*4882a593Smuzhiyun */
arm_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)978*4882a593Smuzhiyun int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
979*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun const struct dma_map_ops *ops = get_dma_ops(dev);
982*4882a593Smuzhiyun struct scatterlist *s;
983*4882a593Smuzhiyun int i, j;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun for_each_sg(sg, s, nents, i) {
986*4882a593Smuzhiyun #ifdef CONFIG_NEED_SG_DMA_LENGTH
987*4882a593Smuzhiyun s->dma_length = s->length;
988*4882a593Smuzhiyun #endif
989*4882a593Smuzhiyun s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
990*4882a593Smuzhiyun s->length, dir, attrs);
991*4882a593Smuzhiyun if (dma_mapping_error(dev, s->dma_address))
992*4882a593Smuzhiyun goto bad_mapping;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun return nents;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun bad_mapping:
997*4882a593Smuzhiyun for_each_sg(sg, s, i, j)
998*4882a593Smuzhiyun ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
999*4882a593Smuzhiyun return 0;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun /**
1003*4882a593Smuzhiyun * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1004*4882a593Smuzhiyun * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1005*4882a593Smuzhiyun * @sg: list of buffers
1006*4882a593Smuzhiyun * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1007*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1008*4882a593Smuzhiyun *
1009*4882a593Smuzhiyun * Unmap a set of streaming mode DMA translations. Again, CPU access
1010*4882a593Smuzhiyun * rules concerning calls here are the same as for dma_unmap_single().
1011*4882a593Smuzhiyun */
arm_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1012*4882a593Smuzhiyun void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1013*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun const struct dma_map_ops *ops = get_dma_ops(dev);
1016*4882a593Smuzhiyun struct scatterlist *s;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun int i;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun for_each_sg(sg, s, nents, i)
1021*4882a593Smuzhiyun ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /**
1025*4882a593Smuzhiyun * arm_dma_sync_sg_for_cpu
1026*4882a593Smuzhiyun * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1027*4882a593Smuzhiyun * @sg: list of buffers
1028*4882a593Smuzhiyun * @nents: number of buffers to map (returned from dma_map_sg)
1029*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1030*4882a593Smuzhiyun */
arm_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1031*4882a593Smuzhiyun void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1032*4882a593Smuzhiyun int nents, enum dma_data_direction dir)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun const struct dma_map_ops *ops = get_dma_ops(dev);
1035*4882a593Smuzhiyun struct scatterlist *s;
1036*4882a593Smuzhiyun int i;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun for_each_sg(sg, s, nents, i)
1039*4882a593Smuzhiyun ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
1040*4882a593Smuzhiyun dir);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /**
1044*4882a593Smuzhiyun * arm_dma_sync_sg_for_device
1045*4882a593Smuzhiyun * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1046*4882a593Smuzhiyun * @sg: list of buffers
1047*4882a593Smuzhiyun * @nents: number of buffers to map (returned from dma_map_sg)
1048*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1049*4882a593Smuzhiyun */
arm_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1050*4882a593Smuzhiyun void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1051*4882a593Smuzhiyun int nents, enum dma_data_direction dir)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun const struct dma_map_ops *ops = get_dma_ops(dev);
1054*4882a593Smuzhiyun struct scatterlist *s;
1055*4882a593Smuzhiyun int i;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun for_each_sg(sg, s, nents, i)
1058*4882a593Smuzhiyun ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
1059*4882a593Smuzhiyun dir);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
arm_get_dma_map_ops(bool coherent)1062*4882a593Smuzhiyun static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun /*
1065*4882a593Smuzhiyun * When CONFIG_ARM_LPAE is set, physical address can extend above
1066*4882a593Smuzhiyun * 32-bits, which then can't be addressed by devices that only support
1067*4882a593Smuzhiyun * 32-bit DMA.
1068*4882a593Smuzhiyun * Use the generic dma-direct / swiotlb ops code in that case, as that
1069*4882a593Smuzhiyun * handles bounce buffering for us.
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARM_LPAE))
1072*4882a593Smuzhiyun return NULL;
1073*4882a593Smuzhiyun return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun #ifdef CONFIG_ARM_DMA_USE_IOMMU
1077*4882a593Smuzhiyun
__dma_info_to_prot(enum dma_data_direction dir,unsigned long attrs)1078*4882a593Smuzhiyun static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun int prot = 0;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun if (attrs & DMA_ATTR_PRIVILEGED)
1083*4882a593Smuzhiyun prot |= IOMMU_PRIV;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun switch (dir) {
1086*4882a593Smuzhiyun case DMA_BIDIRECTIONAL:
1087*4882a593Smuzhiyun return prot | IOMMU_READ | IOMMU_WRITE;
1088*4882a593Smuzhiyun case DMA_TO_DEVICE:
1089*4882a593Smuzhiyun return prot | IOMMU_READ;
1090*4882a593Smuzhiyun case DMA_FROM_DEVICE:
1091*4882a593Smuzhiyun return prot | IOMMU_WRITE;
1092*4882a593Smuzhiyun default:
1093*4882a593Smuzhiyun return prot;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun /* IOMMU */
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1100*4882a593Smuzhiyun
__alloc_iova(struct dma_iommu_mapping * mapping,size_t size)1101*4882a593Smuzhiyun static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1102*4882a593Smuzhiyun size_t size)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun unsigned int order = get_order(size);
1105*4882a593Smuzhiyun unsigned int align = 0;
1106*4882a593Smuzhiyun unsigned int count, start;
1107*4882a593Smuzhiyun size_t mapping_size = mapping->bits << PAGE_SHIFT;
1108*4882a593Smuzhiyun unsigned long flags;
1109*4882a593Smuzhiyun dma_addr_t iova;
1110*4882a593Smuzhiyun int i;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1113*4882a593Smuzhiyun order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1116*4882a593Smuzhiyun align = (1 << order) - 1;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun spin_lock_irqsave(&mapping->lock, flags);
1119*4882a593Smuzhiyun for (i = 0; i < mapping->nr_bitmaps; i++) {
1120*4882a593Smuzhiyun start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1121*4882a593Smuzhiyun mapping->bits, 0, count, align);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (start > mapping->bits)
1124*4882a593Smuzhiyun continue;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun bitmap_set(mapping->bitmaps[i], start, count);
1127*4882a593Smuzhiyun break;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun /*
1131*4882a593Smuzhiyun * No unused range found. Try to extend the existing mapping
1132*4882a593Smuzhiyun * and perform a second attempt to reserve an IO virtual
1133*4882a593Smuzhiyun * address range of size bytes.
1134*4882a593Smuzhiyun */
1135*4882a593Smuzhiyun if (i == mapping->nr_bitmaps) {
1136*4882a593Smuzhiyun if (extend_iommu_mapping(mapping)) {
1137*4882a593Smuzhiyun spin_unlock_irqrestore(&mapping->lock, flags);
1138*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1142*4882a593Smuzhiyun mapping->bits, 0, count, align);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun if (start > mapping->bits) {
1145*4882a593Smuzhiyun spin_unlock_irqrestore(&mapping->lock, flags);
1146*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun bitmap_set(mapping->bitmaps[i], start, count);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun spin_unlock_irqrestore(&mapping->lock, flags);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun iova = mapping->base + (mapping_size * i);
1154*4882a593Smuzhiyun iova += start << PAGE_SHIFT;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun return iova;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
__free_iova(struct dma_iommu_mapping * mapping,dma_addr_t addr,size_t size)1159*4882a593Smuzhiyun static inline void __free_iova(struct dma_iommu_mapping *mapping,
1160*4882a593Smuzhiyun dma_addr_t addr, size_t size)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun unsigned int start, count;
1163*4882a593Smuzhiyun size_t mapping_size = mapping->bits << PAGE_SHIFT;
1164*4882a593Smuzhiyun unsigned long flags;
1165*4882a593Smuzhiyun dma_addr_t bitmap_base;
1166*4882a593Smuzhiyun u32 bitmap_index;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (!size)
1169*4882a593Smuzhiyun return;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
1172*4882a593Smuzhiyun BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun bitmap_base = mapping->base + mapping_size * bitmap_index;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun start = (addr - bitmap_base) >> PAGE_SHIFT;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun if (addr + size > bitmap_base + mapping_size) {
1179*4882a593Smuzhiyun /*
1180*4882a593Smuzhiyun * The address range to be freed reaches into the iova
1181*4882a593Smuzhiyun * range of the next bitmap. This should not happen as
1182*4882a593Smuzhiyun * we don't allow this in __alloc_iova (at the
1183*4882a593Smuzhiyun * moment).
1184*4882a593Smuzhiyun */
1185*4882a593Smuzhiyun BUG();
1186*4882a593Smuzhiyun } else
1187*4882a593Smuzhiyun count = size >> PAGE_SHIFT;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun spin_lock_irqsave(&mapping->lock, flags);
1190*4882a593Smuzhiyun bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1191*4882a593Smuzhiyun spin_unlock_irqrestore(&mapping->lock, flags);
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1195*4882a593Smuzhiyun static const int iommu_order_array[] = { 9, 8, 4, 0 };
1196*4882a593Smuzhiyun
__iommu_alloc_buffer(struct device * dev,size_t size,gfp_t gfp,unsigned long attrs,int coherent_flag)1197*4882a593Smuzhiyun static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1198*4882a593Smuzhiyun gfp_t gfp, unsigned long attrs,
1199*4882a593Smuzhiyun int coherent_flag)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun struct page **pages;
1202*4882a593Smuzhiyun int count = size >> PAGE_SHIFT;
1203*4882a593Smuzhiyun int array_size = count * sizeof(struct page *);
1204*4882a593Smuzhiyun int i = 0;
1205*4882a593Smuzhiyun int order_idx = 0;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (array_size <= PAGE_SIZE)
1208*4882a593Smuzhiyun pages = kzalloc(array_size, GFP_KERNEL);
1209*4882a593Smuzhiyun else
1210*4882a593Smuzhiyun pages = vzalloc(array_size);
1211*4882a593Smuzhiyun if (!pages)
1212*4882a593Smuzhiyun return NULL;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun unsigned long order = get_order(size);
1217*4882a593Smuzhiyun struct page *page;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun page = dma_alloc_from_contiguous(dev, count, order,
1220*4882a593Smuzhiyun gfp & __GFP_NOWARN);
1221*4882a593Smuzhiyun if (!page)
1222*4882a593Smuzhiyun goto error;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun __dma_clear_buffer(page, size, coherent_flag);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun for (i = 0; i < count; i++)
1227*4882a593Smuzhiyun pages[i] = page + i;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun return pages;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /* Go straight to 4K chunks if caller says it's OK. */
1233*4882a593Smuzhiyun if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
1234*4882a593Smuzhiyun order_idx = ARRAY_SIZE(iommu_order_array) - 1;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun /*
1237*4882a593Smuzhiyun * IOMMU can map any pages, so himem can also be used here
1238*4882a593Smuzhiyun */
1239*4882a593Smuzhiyun gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun while (count) {
1242*4882a593Smuzhiyun int j, order;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun order = iommu_order_array[order_idx];
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun /* Drop down when we get small */
1247*4882a593Smuzhiyun if (__fls(count) < order) {
1248*4882a593Smuzhiyun order_idx++;
1249*4882a593Smuzhiyun continue;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun if (order) {
1253*4882a593Smuzhiyun /* See if it's easy to allocate a high-order chunk */
1254*4882a593Smuzhiyun pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /* Go down a notch at first sign of pressure */
1257*4882a593Smuzhiyun if (!pages[i]) {
1258*4882a593Smuzhiyun order_idx++;
1259*4882a593Smuzhiyun continue;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun } else {
1262*4882a593Smuzhiyun pages[i] = alloc_pages(gfp, 0);
1263*4882a593Smuzhiyun if (!pages[i])
1264*4882a593Smuzhiyun goto error;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (order) {
1268*4882a593Smuzhiyun split_page(pages[i], order);
1269*4882a593Smuzhiyun j = 1 << order;
1270*4882a593Smuzhiyun while (--j)
1271*4882a593Smuzhiyun pages[i + j] = pages[i] + j;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
1275*4882a593Smuzhiyun i += 1 << order;
1276*4882a593Smuzhiyun count -= 1 << order;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun return pages;
1280*4882a593Smuzhiyun error:
1281*4882a593Smuzhiyun while (i--)
1282*4882a593Smuzhiyun if (pages[i])
1283*4882a593Smuzhiyun __free_pages(pages[i], 0);
1284*4882a593Smuzhiyun kvfree(pages);
1285*4882a593Smuzhiyun return NULL;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
__iommu_free_buffer(struct device * dev,struct page ** pages,size_t size,unsigned long attrs)1288*4882a593Smuzhiyun static int __iommu_free_buffer(struct device *dev, struct page **pages,
1289*4882a593Smuzhiyun size_t size, unsigned long attrs)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun int count = size >> PAGE_SHIFT;
1292*4882a593Smuzhiyun int i;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1295*4882a593Smuzhiyun dma_release_from_contiguous(dev, pages[0], count);
1296*4882a593Smuzhiyun } else {
1297*4882a593Smuzhiyun for (i = 0; i < count; i++)
1298*4882a593Smuzhiyun if (pages[i])
1299*4882a593Smuzhiyun __free_pages(pages[i], 0);
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun kvfree(pages);
1303*4882a593Smuzhiyun return 0;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /*
1307*4882a593Smuzhiyun * Create a mapping in device IO address space for specified pages
1308*4882a593Smuzhiyun */
1309*4882a593Smuzhiyun static dma_addr_t
__iommu_create_mapping(struct device * dev,struct page ** pages,size_t size,unsigned long attrs)1310*4882a593Smuzhiyun __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1311*4882a593Smuzhiyun unsigned long attrs)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1314*4882a593Smuzhiyun unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1315*4882a593Smuzhiyun dma_addr_t dma_addr, iova;
1316*4882a593Smuzhiyun int i;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun dma_addr = __alloc_iova(mapping, size);
1319*4882a593Smuzhiyun if (dma_addr == DMA_MAPPING_ERROR)
1320*4882a593Smuzhiyun return dma_addr;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun iova = dma_addr;
1323*4882a593Smuzhiyun for (i = 0; i < count; ) {
1324*4882a593Smuzhiyun int ret;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1327*4882a593Smuzhiyun phys_addr_t phys = page_to_phys(pages[i]);
1328*4882a593Smuzhiyun unsigned int len, j;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun for (j = i + 1; j < count; j++, next_pfn++)
1331*4882a593Smuzhiyun if (page_to_pfn(pages[j]) != next_pfn)
1332*4882a593Smuzhiyun break;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun len = (j - i) << PAGE_SHIFT;
1335*4882a593Smuzhiyun ret = iommu_map(mapping->domain, iova, phys, len,
1336*4882a593Smuzhiyun __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1337*4882a593Smuzhiyun if (ret < 0)
1338*4882a593Smuzhiyun goto fail;
1339*4882a593Smuzhiyun iova += len;
1340*4882a593Smuzhiyun i = j;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun return dma_addr;
1343*4882a593Smuzhiyun fail:
1344*4882a593Smuzhiyun iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1345*4882a593Smuzhiyun __free_iova(mapping, dma_addr, size);
1346*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun
__iommu_remove_mapping(struct device * dev,dma_addr_t iova,size_t size)1349*4882a593Smuzhiyun static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /*
1354*4882a593Smuzhiyun * add optional in-page offset from iova to size and align
1355*4882a593Smuzhiyun * result to page size
1356*4882a593Smuzhiyun */
1357*4882a593Smuzhiyun size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1358*4882a593Smuzhiyun iova &= PAGE_MASK;
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun iommu_unmap(mapping->domain, iova, size);
1361*4882a593Smuzhiyun __free_iova(mapping, iova, size);
1362*4882a593Smuzhiyun return 0;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
__atomic_get_pages(void * addr)1365*4882a593Smuzhiyun static struct page **__atomic_get_pages(void *addr)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun struct page *page;
1368*4882a593Smuzhiyun phys_addr_t phys;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1371*4882a593Smuzhiyun page = phys_to_page(phys);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun return (struct page **)page;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
__iommu_get_pages(void * cpu_addr,unsigned long attrs)1376*4882a593Smuzhiyun static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1379*4882a593Smuzhiyun return __atomic_get_pages(cpu_addr);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1382*4882a593Smuzhiyun return cpu_addr;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun return dma_common_find_pages(cpu_addr);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
__iommu_alloc_simple(struct device * dev,size_t size,gfp_t gfp,dma_addr_t * handle,int coherent_flag,unsigned long attrs)1387*4882a593Smuzhiyun static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1388*4882a593Smuzhiyun dma_addr_t *handle, int coherent_flag,
1389*4882a593Smuzhiyun unsigned long attrs)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct page *page;
1392*4882a593Smuzhiyun void *addr;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun if (coherent_flag == COHERENT)
1395*4882a593Smuzhiyun addr = __alloc_simple_buffer(dev, size, gfp, &page);
1396*4882a593Smuzhiyun else
1397*4882a593Smuzhiyun addr = __alloc_from_pool(size, &page);
1398*4882a593Smuzhiyun if (!addr)
1399*4882a593Smuzhiyun return NULL;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun *handle = __iommu_create_mapping(dev, &page, size, attrs);
1402*4882a593Smuzhiyun if (*handle == DMA_MAPPING_ERROR)
1403*4882a593Smuzhiyun goto err_mapping;
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun return addr;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun err_mapping:
1408*4882a593Smuzhiyun __free_from_pool(addr, size);
1409*4882a593Smuzhiyun return NULL;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun
__iommu_free_atomic(struct device * dev,void * cpu_addr,dma_addr_t handle,size_t size,int coherent_flag)1412*4882a593Smuzhiyun static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1413*4882a593Smuzhiyun dma_addr_t handle, size_t size, int coherent_flag)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun __iommu_remove_mapping(dev, handle, size);
1416*4882a593Smuzhiyun if (coherent_flag == COHERENT)
1417*4882a593Smuzhiyun __dma_free_buffer(virt_to_page(cpu_addr), size);
1418*4882a593Smuzhiyun else
1419*4882a593Smuzhiyun __free_from_pool(cpu_addr, size);
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
__arm_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs,int coherent_flag)1422*4882a593Smuzhiyun static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1423*4882a593Smuzhiyun dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
1424*4882a593Smuzhiyun int coherent_flag)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1427*4882a593Smuzhiyun struct page **pages;
1428*4882a593Smuzhiyun void *addr = NULL;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun *handle = DMA_MAPPING_ERROR;
1431*4882a593Smuzhiyun size = PAGE_ALIGN(size);
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1434*4882a593Smuzhiyun return __iommu_alloc_simple(dev, size, gfp, handle,
1435*4882a593Smuzhiyun coherent_flag, attrs);
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /*
1438*4882a593Smuzhiyun * Following is a work-around (a.k.a. hack) to prevent pages
1439*4882a593Smuzhiyun * with __GFP_COMP being passed to split_page() which cannot
1440*4882a593Smuzhiyun * handle them. The real problem is that this flag probably
1441*4882a593Smuzhiyun * should be 0 on ARM as it is not supported on this
1442*4882a593Smuzhiyun * platform; see CONFIG_HUGETLBFS.
1443*4882a593Smuzhiyun */
1444*4882a593Smuzhiyun gfp &= ~(__GFP_COMP);
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1447*4882a593Smuzhiyun if (!pages)
1448*4882a593Smuzhiyun return NULL;
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun *handle = __iommu_create_mapping(dev, pages, size, attrs);
1451*4882a593Smuzhiyun if (*handle == DMA_MAPPING_ERROR)
1452*4882a593Smuzhiyun goto err_buffer;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1455*4882a593Smuzhiyun return pages;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun addr = dma_common_pages_remap(pages, size, prot,
1458*4882a593Smuzhiyun __builtin_return_address(0));
1459*4882a593Smuzhiyun if (!addr)
1460*4882a593Smuzhiyun goto err_mapping;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun return addr;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun err_mapping:
1465*4882a593Smuzhiyun __iommu_remove_mapping(dev, *handle, size);
1466*4882a593Smuzhiyun err_buffer:
1467*4882a593Smuzhiyun __iommu_free_buffer(dev, pages, size, attrs);
1468*4882a593Smuzhiyun return NULL;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
arm_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1471*4882a593Smuzhiyun static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1472*4882a593Smuzhiyun dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
arm_coherent_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1477*4882a593Smuzhiyun static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
1478*4882a593Smuzhiyun dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
__arm_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1483*4882a593Smuzhiyun static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1484*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
1485*4882a593Smuzhiyun unsigned long attrs)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1488*4882a593Smuzhiyun unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1489*4882a593Smuzhiyun int err;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun if (!pages)
1492*4882a593Smuzhiyun return -ENXIO;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun if (vma->vm_pgoff >= nr_pages)
1495*4882a593Smuzhiyun return -ENXIO;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun err = vm_map_pages(vma, pages, nr_pages);
1498*4882a593Smuzhiyun if (err)
1499*4882a593Smuzhiyun pr_err("Remapping memory failed: %d\n", err);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun return err;
1502*4882a593Smuzhiyun }
arm_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1503*4882a593Smuzhiyun static int arm_iommu_mmap_attrs(struct device *dev,
1504*4882a593Smuzhiyun struct vm_area_struct *vma, void *cpu_addr,
1505*4882a593Smuzhiyun dma_addr_t dma_addr, size_t size, unsigned long attrs)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun
arm_coherent_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1512*4882a593Smuzhiyun static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1513*4882a593Smuzhiyun struct vm_area_struct *vma, void *cpu_addr,
1514*4882a593Smuzhiyun dma_addr_t dma_addr, size_t size, unsigned long attrs)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /*
1520*4882a593Smuzhiyun * free a page as defined by the above mapping.
1521*4882a593Smuzhiyun * Must not be called with IRQs disabled.
1522*4882a593Smuzhiyun */
__arm_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs,int coherent_flag)1523*4882a593Smuzhiyun static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1524*4882a593Smuzhiyun dma_addr_t handle, unsigned long attrs, int coherent_flag)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun struct page **pages;
1527*4882a593Smuzhiyun size = PAGE_ALIGN(size);
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1530*4882a593Smuzhiyun __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1531*4882a593Smuzhiyun return;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun pages = __iommu_get_pages(cpu_addr, attrs);
1535*4882a593Smuzhiyun if (!pages) {
1536*4882a593Smuzhiyun WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1537*4882a593Smuzhiyun return;
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
1541*4882a593Smuzhiyun dma_common_free_remap(cpu_addr, size);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun __iommu_remove_mapping(dev, handle, size);
1544*4882a593Smuzhiyun __iommu_free_buffer(dev, pages, size, attrs);
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
arm_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1547*4882a593Smuzhiyun static void arm_iommu_free_attrs(struct device *dev, size_t size,
1548*4882a593Smuzhiyun void *cpu_addr, dma_addr_t handle,
1549*4882a593Smuzhiyun unsigned long attrs)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun
arm_coherent_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1554*4882a593Smuzhiyun static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1555*4882a593Smuzhiyun void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
arm_iommu_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1560*4882a593Smuzhiyun static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1561*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr,
1562*4882a593Smuzhiyun size_t size, unsigned long attrs)
1563*4882a593Smuzhiyun {
1564*4882a593Smuzhiyun unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1565*4882a593Smuzhiyun struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun if (!pages)
1568*4882a593Smuzhiyun return -ENXIO;
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1571*4882a593Smuzhiyun GFP_KERNEL);
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun /*
1575*4882a593Smuzhiyun * Map a part of the scatter-gather list into contiguous io address space
1576*4882a593Smuzhiyun */
__map_sg_chunk(struct device * dev,struct scatterlist * sg,size_t size,dma_addr_t * handle,enum dma_data_direction dir,unsigned long attrs,bool is_coherent)1577*4882a593Smuzhiyun static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1578*4882a593Smuzhiyun size_t size, dma_addr_t *handle,
1579*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs,
1580*4882a593Smuzhiyun bool is_coherent)
1581*4882a593Smuzhiyun {
1582*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1583*4882a593Smuzhiyun dma_addr_t iova, iova_base;
1584*4882a593Smuzhiyun int ret = 0;
1585*4882a593Smuzhiyun unsigned int count;
1586*4882a593Smuzhiyun struct scatterlist *s;
1587*4882a593Smuzhiyun int prot;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun size = PAGE_ALIGN(size);
1590*4882a593Smuzhiyun *handle = DMA_MAPPING_ERROR;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun iova_base = iova = __alloc_iova(mapping, size);
1593*4882a593Smuzhiyun if (iova == DMA_MAPPING_ERROR)
1594*4882a593Smuzhiyun return -ENOMEM;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1597*4882a593Smuzhiyun phys_addr_t phys = page_to_phys(sg_page(s));
1598*4882a593Smuzhiyun unsigned int len = PAGE_ALIGN(s->offset + s->length);
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1601*4882a593Smuzhiyun __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun prot = __dma_info_to_prot(dir, attrs);
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun ret = iommu_map(mapping->domain, iova, phys, len, prot);
1606*4882a593Smuzhiyun if (ret < 0)
1607*4882a593Smuzhiyun goto fail;
1608*4882a593Smuzhiyun count += len >> PAGE_SHIFT;
1609*4882a593Smuzhiyun iova += len;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun *handle = iova_base;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun return 0;
1614*4882a593Smuzhiyun fail:
1615*4882a593Smuzhiyun iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1616*4882a593Smuzhiyun __free_iova(mapping, iova_base, size);
1617*4882a593Smuzhiyun return ret;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
__iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs,bool is_coherent)1620*4882a593Smuzhiyun static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1621*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs,
1622*4882a593Smuzhiyun bool is_coherent)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun struct scatterlist *s = sg, *dma = sg, *start = sg;
1625*4882a593Smuzhiyun int i, count = 0;
1626*4882a593Smuzhiyun unsigned int offset = s->offset;
1627*4882a593Smuzhiyun unsigned int size = s->offset + s->length;
1628*4882a593Smuzhiyun unsigned int max = dma_get_max_seg_size(dev);
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun for (i = 1; i < nents; i++) {
1631*4882a593Smuzhiyun s = sg_next(s);
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun s->dma_address = DMA_MAPPING_ERROR;
1634*4882a593Smuzhiyun s->dma_length = 0;
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1637*4882a593Smuzhiyun if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1638*4882a593Smuzhiyun dir, attrs, is_coherent) < 0)
1639*4882a593Smuzhiyun goto bad_mapping;
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun dma->dma_address += offset;
1642*4882a593Smuzhiyun dma->dma_length = size - offset;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun size = offset = s->offset;
1645*4882a593Smuzhiyun start = s;
1646*4882a593Smuzhiyun dma = sg_next(dma);
1647*4882a593Smuzhiyun count += 1;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun size += s->length;
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1652*4882a593Smuzhiyun is_coherent) < 0)
1653*4882a593Smuzhiyun goto bad_mapping;
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun dma->dma_address += offset;
1656*4882a593Smuzhiyun dma->dma_length = size - offset;
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun return count+1;
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun bad_mapping:
1661*4882a593Smuzhiyun for_each_sg(sg, s, count, i)
1662*4882a593Smuzhiyun __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1663*4882a593Smuzhiyun return 0;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun /**
1667*4882a593Smuzhiyun * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1668*4882a593Smuzhiyun * @dev: valid struct device pointer
1669*4882a593Smuzhiyun * @sg: list of buffers
1670*4882a593Smuzhiyun * @nents: number of buffers to map
1671*4882a593Smuzhiyun * @dir: DMA transfer direction
1672*4882a593Smuzhiyun *
1673*4882a593Smuzhiyun * Map a set of i/o coherent buffers described by scatterlist in streaming
1674*4882a593Smuzhiyun * mode for DMA. The scatter gather list elements are merged together (if
1675*4882a593Smuzhiyun * possible) and tagged with the appropriate dma address and length. They are
1676*4882a593Smuzhiyun * obtained via sg_dma_{address,length}.
1677*4882a593Smuzhiyun */
arm_coherent_iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1678*4882a593Smuzhiyun static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1679*4882a593Smuzhiyun int nents, enum dma_data_direction dir, unsigned long attrs)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun /**
1685*4882a593Smuzhiyun * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1686*4882a593Smuzhiyun * @dev: valid struct device pointer
1687*4882a593Smuzhiyun * @sg: list of buffers
1688*4882a593Smuzhiyun * @nents: number of buffers to map
1689*4882a593Smuzhiyun * @dir: DMA transfer direction
1690*4882a593Smuzhiyun *
1691*4882a593Smuzhiyun * Map a set of buffers described by scatterlist in streaming mode for DMA.
1692*4882a593Smuzhiyun * The scatter gather list elements are merged together (if possible) and
1693*4882a593Smuzhiyun * tagged with the appropriate dma address and length. They are obtained via
1694*4882a593Smuzhiyun * sg_dma_{address,length}.
1695*4882a593Smuzhiyun */
arm_iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1696*4882a593Smuzhiyun static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1697*4882a593Smuzhiyun int nents, enum dma_data_direction dir, unsigned long attrs)
1698*4882a593Smuzhiyun {
1699*4882a593Smuzhiyun return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
__iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs,bool is_coherent)1702*4882a593Smuzhiyun static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1703*4882a593Smuzhiyun int nents, enum dma_data_direction dir,
1704*4882a593Smuzhiyun unsigned long attrs, bool is_coherent)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun struct scatterlist *s;
1707*4882a593Smuzhiyun int i;
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun for_each_sg(sg, s, nents, i) {
1710*4882a593Smuzhiyun if (sg_dma_len(s))
1711*4882a593Smuzhiyun __iommu_remove_mapping(dev, sg_dma_address(s),
1712*4882a593Smuzhiyun sg_dma_len(s));
1713*4882a593Smuzhiyun if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1714*4882a593Smuzhiyun __dma_page_dev_to_cpu(sg_page(s), s->offset,
1715*4882a593Smuzhiyun s->length, dir);
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun /**
1720*4882a593Smuzhiyun * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1721*4882a593Smuzhiyun * @dev: valid struct device pointer
1722*4882a593Smuzhiyun * @sg: list of buffers
1723*4882a593Smuzhiyun * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1724*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1725*4882a593Smuzhiyun *
1726*4882a593Smuzhiyun * Unmap a set of streaming mode DMA translations. Again, CPU access
1727*4882a593Smuzhiyun * rules concerning calls here are the same as for dma_unmap_single().
1728*4882a593Smuzhiyun */
arm_coherent_iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1729*4882a593Smuzhiyun static void arm_coherent_iommu_unmap_sg(struct device *dev,
1730*4882a593Smuzhiyun struct scatterlist *sg, int nents, enum dma_data_direction dir,
1731*4882a593Smuzhiyun unsigned long attrs)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun /**
1737*4882a593Smuzhiyun * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1738*4882a593Smuzhiyun * @dev: valid struct device pointer
1739*4882a593Smuzhiyun * @sg: list of buffers
1740*4882a593Smuzhiyun * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1741*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1742*4882a593Smuzhiyun *
1743*4882a593Smuzhiyun * Unmap a set of streaming mode DMA translations. Again, CPU access
1744*4882a593Smuzhiyun * rules concerning calls here are the same as for dma_unmap_single().
1745*4882a593Smuzhiyun */
arm_iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1746*4882a593Smuzhiyun static void arm_iommu_unmap_sg(struct device *dev,
1747*4882a593Smuzhiyun struct scatterlist *sg, int nents,
1748*4882a593Smuzhiyun enum dma_data_direction dir,
1749*4882a593Smuzhiyun unsigned long attrs)
1750*4882a593Smuzhiyun {
1751*4882a593Smuzhiyun __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun /**
1755*4882a593Smuzhiyun * arm_iommu_sync_sg_for_cpu
1756*4882a593Smuzhiyun * @dev: valid struct device pointer
1757*4882a593Smuzhiyun * @sg: list of buffers
1758*4882a593Smuzhiyun * @nents: number of buffers to map (returned from dma_map_sg)
1759*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1760*4882a593Smuzhiyun */
arm_iommu_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1761*4882a593Smuzhiyun static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1762*4882a593Smuzhiyun struct scatterlist *sg,
1763*4882a593Smuzhiyun int nents, enum dma_data_direction dir)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun struct scatterlist *s;
1766*4882a593Smuzhiyun int i;
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun for_each_sg(sg, s, nents, i)
1769*4882a593Smuzhiyun __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun /**
1774*4882a593Smuzhiyun * arm_iommu_sync_sg_for_device
1775*4882a593Smuzhiyun * @dev: valid struct device pointer
1776*4882a593Smuzhiyun * @sg: list of buffers
1777*4882a593Smuzhiyun * @nents: number of buffers to map (returned from dma_map_sg)
1778*4882a593Smuzhiyun * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1779*4882a593Smuzhiyun */
arm_iommu_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1780*4882a593Smuzhiyun static void arm_iommu_sync_sg_for_device(struct device *dev,
1781*4882a593Smuzhiyun struct scatterlist *sg,
1782*4882a593Smuzhiyun int nents, enum dma_data_direction dir)
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun struct scatterlist *s;
1785*4882a593Smuzhiyun int i;
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun for_each_sg(sg, s, nents, i)
1788*4882a593Smuzhiyun __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun /**
1793*4882a593Smuzhiyun * arm_coherent_iommu_map_page
1794*4882a593Smuzhiyun * @dev: valid struct device pointer
1795*4882a593Smuzhiyun * @page: page that buffer resides in
1796*4882a593Smuzhiyun * @offset: offset into page for start of buffer
1797*4882a593Smuzhiyun * @size: size of buffer to map
1798*4882a593Smuzhiyun * @dir: DMA transfer direction
1799*4882a593Smuzhiyun *
1800*4882a593Smuzhiyun * Coherent IOMMU aware version of arm_dma_map_page()
1801*4882a593Smuzhiyun */
arm_coherent_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1802*4882a593Smuzhiyun static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1803*4882a593Smuzhiyun unsigned long offset, size_t size, enum dma_data_direction dir,
1804*4882a593Smuzhiyun unsigned long attrs)
1805*4882a593Smuzhiyun {
1806*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1807*4882a593Smuzhiyun dma_addr_t dma_addr;
1808*4882a593Smuzhiyun int ret, prot, len = PAGE_ALIGN(size + offset);
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun dma_addr = __alloc_iova(mapping, len);
1811*4882a593Smuzhiyun if (dma_addr == DMA_MAPPING_ERROR)
1812*4882a593Smuzhiyun return dma_addr;
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun prot = __dma_info_to_prot(dir, attrs);
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1817*4882a593Smuzhiyun if (ret < 0)
1818*4882a593Smuzhiyun goto fail;
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun return dma_addr + offset;
1821*4882a593Smuzhiyun fail:
1822*4882a593Smuzhiyun __free_iova(mapping, dma_addr, len);
1823*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun /**
1827*4882a593Smuzhiyun * arm_iommu_map_page
1828*4882a593Smuzhiyun * @dev: valid struct device pointer
1829*4882a593Smuzhiyun * @page: page that buffer resides in
1830*4882a593Smuzhiyun * @offset: offset into page for start of buffer
1831*4882a593Smuzhiyun * @size: size of buffer to map
1832*4882a593Smuzhiyun * @dir: DMA transfer direction
1833*4882a593Smuzhiyun *
1834*4882a593Smuzhiyun * IOMMU aware version of arm_dma_map_page()
1835*4882a593Smuzhiyun */
arm_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1836*4882a593Smuzhiyun static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1837*4882a593Smuzhiyun unsigned long offset, size_t size, enum dma_data_direction dir,
1838*4882a593Smuzhiyun unsigned long attrs)
1839*4882a593Smuzhiyun {
1840*4882a593Smuzhiyun if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1841*4882a593Smuzhiyun __dma_page_cpu_to_dev(page, offset, size, dir);
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun /**
1847*4882a593Smuzhiyun * arm_coherent_iommu_unmap_page
1848*4882a593Smuzhiyun * @dev: valid struct device pointer
1849*4882a593Smuzhiyun * @handle: DMA address of buffer
1850*4882a593Smuzhiyun * @size: size of buffer (same as passed to dma_map_page)
1851*4882a593Smuzhiyun * @dir: DMA transfer direction (same as passed to dma_map_page)
1852*4882a593Smuzhiyun *
1853*4882a593Smuzhiyun * Coherent IOMMU aware version of arm_dma_unmap_page()
1854*4882a593Smuzhiyun */
arm_coherent_iommu_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1855*4882a593Smuzhiyun static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1856*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1859*4882a593Smuzhiyun dma_addr_t iova = handle & PAGE_MASK;
1860*4882a593Smuzhiyun int offset = handle & ~PAGE_MASK;
1861*4882a593Smuzhiyun int len = PAGE_ALIGN(size + offset);
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun if (!iova)
1864*4882a593Smuzhiyun return;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun iommu_unmap(mapping->domain, iova, len);
1867*4882a593Smuzhiyun __free_iova(mapping, iova, len);
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun /**
1871*4882a593Smuzhiyun * arm_iommu_unmap_page
1872*4882a593Smuzhiyun * @dev: valid struct device pointer
1873*4882a593Smuzhiyun * @handle: DMA address of buffer
1874*4882a593Smuzhiyun * @size: size of buffer (same as passed to dma_map_page)
1875*4882a593Smuzhiyun * @dir: DMA transfer direction (same as passed to dma_map_page)
1876*4882a593Smuzhiyun *
1877*4882a593Smuzhiyun * IOMMU aware version of arm_dma_unmap_page()
1878*4882a593Smuzhiyun */
arm_iommu_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1879*4882a593Smuzhiyun static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1880*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs)
1881*4882a593Smuzhiyun {
1882*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1883*4882a593Smuzhiyun dma_addr_t iova = handle & PAGE_MASK;
1884*4882a593Smuzhiyun struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1885*4882a593Smuzhiyun int offset = handle & ~PAGE_MASK;
1886*4882a593Smuzhiyun int len = PAGE_ALIGN(size + offset);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun if (!iova)
1889*4882a593Smuzhiyun return;
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1892*4882a593Smuzhiyun __dma_page_dev_to_cpu(page, offset, size, dir);
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun iommu_unmap(mapping->domain, iova, len);
1895*4882a593Smuzhiyun __free_iova(mapping, iova, len);
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun /**
1899*4882a593Smuzhiyun * arm_iommu_map_resource - map a device resource for DMA
1900*4882a593Smuzhiyun * @dev: valid struct device pointer
1901*4882a593Smuzhiyun * @phys_addr: physical address of resource
1902*4882a593Smuzhiyun * @size: size of resource to map
1903*4882a593Smuzhiyun * @dir: DMA transfer direction
1904*4882a593Smuzhiyun */
arm_iommu_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)1905*4882a593Smuzhiyun static dma_addr_t arm_iommu_map_resource(struct device *dev,
1906*4882a593Smuzhiyun phys_addr_t phys_addr, size_t size,
1907*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1910*4882a593Smuzhiyun dma_addr_t dma_addr;
1911*4882a593Smuzhiyun int ret, prot;
1912*4882a593Smuzhiyun phys_addr_t addr = phys_addr & PAGE_MASK;
1913*4882a593Smuzhiyun unsigned int offset = phys_addr & ~PAGE_MASK;
1914*4882a593Smuzhiyun size_t len = PAGE_ALIGN(size + offset);
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun dma_addr = __alloc_iova(mapping, len);
1917*4882a593Smuzhiyun if (dma_addr == DMA_MAPPING_ERROR)
1918*4882a593Smuzhiyun return dma_addr;
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
1923*4882a593Smuzhiyun if (ret < 0)
1924*4882a593Smuzhiyun goto fail;
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun return dma_addr + offset;
1927*4882a593Smuzhiyun fail:
1928*4882a593Smuzhiyun __free_iova(mapping, dma_addr, len);
1929*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun /**
1933*4882a593Smuzhiyun * arm_iommu_unmap_resource - unmap a device DMA resource
1934*4882a593Smuzhiyun * @dev: valid struct device pointer
1935*4882a593Smuzhiyun * @dma_handle: DMA address to resource
1936*4882a593Smuzhiyun * @size: size of resource to map
1937*4882a593Smuzhiyun * @dir: DMA transfer direction
1938*4882a593Smuzhiyun */
arm_iommu_unmap_resource(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1939*4882a593Smuzhiyun static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
1940*4882a593Smuzhiyun size_t size, enum dma_data_direction dir,
1941*4882a593Smuzhiyun unsigned long attrs)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1944*4882a593Smuzhiyun dma_addr_t iova = dma_handle & PAGE_MASK;
1945*4882a593Smuzhiyun unsigned int offset = dma_handle & ~PAGE_MASK;
1946*4882a593Smuzhiyun size_t len = PAGE_ALIGN(size + offset);
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun if (!iova)
1949*4882a593Smuzhiyun return;
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun iommu_unmap(mapping->domain, iova, len);
1952*4882a593Smuzhiyun __free_iova(mapping, iova, len);
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun
arm_iommu_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)1955*4882a593Smuzhiyun static void arm_iommu_sync_single_for_cpu(struct device *dev,
1956*4882a593Smuzhiyun dma_addr_t handle, size_t size, enum dma_data_direction dir)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1959*4882a593Smuzhiyun dma_addr_t iova = handle & PAGE_MASK;
1960*4882a593Smuzhiyun struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1961*4882a593Smuzhiyun unsigned int offset = handle & ~PAGE_MASK;
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun if (!iova)
1964*4882a593Smuzhiyun return;
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun __dma_page_dev_to_cpu(page, offset, size, dir);
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
arm_iommu_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)1969*4882a593Smuzhiyun static void arm_iommu_sync_single_for_device(struct device *dev,
1970*4882a593Smuzhiyun dma_addr_t handle, size_t size, enum dma_data_direction dir)
1971*4882a593Smuzhiyun {
1972*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1973*4882a593Smuzhiyun dma_addr_t iova = handle & PAGE_MASK;
1974*4882a593Smuzhiyun struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1975*4882a593Smuzhiyun unsigned int offset = handle & ~PAGE_MASK;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun if (!iova)
1978*4882a593Smuzhiyun return;
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun __dma_page_cpu_to_dev(page, offset, size, dir);
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun static const struct dma_map_ops iommu_ops = {
1984*4882a593Smuzhiyun .alloc = arm_iommu_alloc_attrs,
1985*4882a593Smuzhiyun .free = arm_iommu_free_attrs,
1986*4882a593Smuzhiyun .mmap = arm_iommu_mmap_attrs,
1987*4882a593Smuzhiyun .get_sgtable = arm_iommu_get_sgtable,
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun .map_page = arm_iommu_map_page,
1990*4882a593Smuzhiyun .unmap_page = arm_iommu_unmap_page,
1991*4882a593Smuzhiyun .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1992*4882a593Smuzhiyun .sync_single_for_device = arm_iommu_sync_single_for_device,
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun .map_sg = arm_iommu_map_sg,
1995*4882a593Smuzhiyun .unmap_sg = arm_iommu_unmap_sg,
1996*4882a593Smuzhiyun .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1997*4882a593Smuzhiyun .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun .map_resource = arm_iommu_map_resource,
2000*4882a593Smuzhiyun .unmap_resource = arm_iommu_unmap_resource,
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun .dma_supported = arm_dma_supported,
2003*4882a593Smuzhiyun };
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun static const struct dma_map_ops iommu_coherent_ops = {
2006*4882a593Smuzhiyun .alloc = arm_coherent_iommu_alloc_attrs,
2007*4882a593Smuzhiyun .free = arm_coherent_iommu_free_attrs,
2008*4882a593Smuzhiyun .mmap = arm_coherent_iommu_mmap_attrs,
2009*4882a593Smuzhiyun .get_sgtable = arm_iommu_get_sgtable,
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun .map_page = arm_coherent_iommu_map_page,
2012*4882a593Smuzhiyun .unmap_page = arm_coherent_iommu_unmap_page,
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun .map_sg = arm_coherent_iommu_map_sg,
2015*4882a593Smuzhiyun .unmap_sg = arm_coherent_iommu_unmap_sg,
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun .map_resource = arm_iommu_map_resource,
2018*4882a593Smuzhiyun .unmap_resource = arm_iommu_unmap_resource,
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun .dma_supported = arm_dma_supported,
2021*4882a593Smuzhiyun };
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun /**
2024*4882a593Smuzhiyun * arm_iommu_create_mapping
2025*4882a593Smuzhiyun * @bus: pointer to the bus holding the client device (for IOMMU calls)
2026*4882a593Smuzhiyun * @base: start address of the valid IO address space
2027*4882a593Smuzhiyun * @size: maximum size of the valid IO address space
2028*4882a593Smuzhiyun *
2029*4882a593Smuzhiyun * Creates a mapping structure which holds information about used/unused
2030*4882a593Smuzhiyun * IO address ranges, which is required to perform memory allocation and
2031*4882a593Smuzhiyun * mapping with IOMMU aware functions.
2032*4882a593Smuzhiyun *
2033*4882a593Smuzhiyun * The client device need to be attached to the mapping with
2034*4882a593Smuzhiyun * arm_iommu_attach_device function.
2035*4882a593Smuzhiyun */
2036*4882a593Smuzhiyun struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type * bus,dma_addr_t base,u64 size)2037*4882a593Smuzhiyun arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
2038*4882a593Smuzhiyun {
2039*4882a593Smuzhiyun unsigned int bits = size >> PAGE_SHIFT;
2040*4882a593Smuzhiyun unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
2041*4882a593Smuzhiyun struct dma_iommu_mapping *mapping;
2042*4882a593Smuzhiyun int extensions = 1;
2043*4882a593Smuzhiyun int err = -ENOMEM;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun /* currently only 32-bit DMA address space is supported */
2046*4882a593Smuzhiyun if (size > DMA_BIT_MASK(32) + 1)
2047*4882a593Smuzhiyun return ERR_PTR(-ERANGE);
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun if (!bitmap_size)
2050*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun if (bitmap_size > PAGE_SIZE) {
2053*4882a593Smuzhiyun extensions = bitmap_size / PAGE_SIZE;
2054*4882a593Smuzhiyun bitmap_size = PAGE_SIZE;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
2058*4882a593Smuzhiyun if (!mapping)
2059*4882a593Smuzhiyun goto err;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun mapping->bitmap_size = bitmap_size;
2062*4882a593Smuzhiyun mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
2063*4882a593Smuzhiyun GFP_KERNEL);
2064*4882a593Smuzhiyun if (!mapping->bitmaps)
2065*4882a593Smuzhiyun goto err2;
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
2068*4882a593Smuzhiyun if (!mapping->bitmaps[0])
2069*4882a593Smuzhiyun goto err3;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun mapping->nr_bitmaps = 1;
2072*4882a593Smuzhiyun mapping->extensions = extensions;
2073*4882a593Smuzhiyun mapping->base = base;
2074*4882a593Smuzhiyun mapping->bits = BITS_PER_BYTE * bitmap_size;
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun spin_lock_init(&mapping->lock);
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun mapping->domain = iommu_domain_alloc(bus);
2079*4882a593Smuzhiyun if (!mapping->domain)
2080*4882a593Smuzhiyun goto err4;
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun kref_init(&mapping->kref);
2083*4882a593Smuzhiyun return mapping;
2084*4882a593Smuzhiyun err4:
2085*4882a593Smuzhiyun kfree(mapping->bitmaps[0]);
2086*4882a593Smuzhiyun err3:
2087*4882a593Smuzhiyun kfree(mapping->bitmaps);
2088*4882a593Smuzhiyun err2:
2089*4882a593Smuzhiyun kfree(mapping);
2090*4882a593Smuzhiyun err:
2091*4882a593Smuzhiyun return ERR_PTR(err);
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
2094*4882a593Smuzhiyun
release_iommu_mapping(struct kref * kref)2095*4882a593Smuzhiyun static void release_iommu_mapping(struct kref *kref)
2096*4882a593Smuzhiyun {
2097*4882a593Smuzhiyun int i;
2098*4882a593Smuzhiyun struct dma_iommu_mapping *mapping =
2099*4882a593Smuzhiyun container_of(kref, struct dma_iommu_mapping, kref);
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun iommu_domain_free(mapping->domain);
2102*4882a593Smuzhiyun for (i = 0; i < mapping->nr_bitmaps; i++)
2103*4882a593Smuzhiyun kfree(mapping->bitmaps[i]);
2104*4882a593Smuzhiyun kfree(mapping->bitmaps);
2105*4882a593Smuzhiyun kfree(mapping);
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun
extend_iommu_mapping(struct dma_iommu_mapping * mapping)2108*4882a593Smuzhiyun static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2109*4882a593Smuzhiyun {
2110*4882a593Smuzhiyun int next_bitmap;
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun if (mapping->nr_bitmaps >= mapping->extensions)
2113*4882a593Smuzhiyun return -EINVAL;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun next_bitmap = mapping->nr_bitmaps;
2116*4882a593Smuzhiyun mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2117*4882a593Smuzhiyun GFP_ATOMIC);
2118*4882a593Smuzhiyun if (!mapping->bitmaps[next_bitmap])
2119*4882a593Smuzhiyun return -ENOMEM;
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun mapping->nr_bitmaps++;
2122*4882a593Smuzhiyun
2123*4882a593Smuzhiyun return 0;
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
arm_iommu_release_mapping(struct dma_iommu_mapping * mapping)2126*4882a593Smuzhiyun void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun if (mapping)
2129*4882a593Smuzhiyun kref_put(&mapping->kref, release_iommu_mapping);
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
2132*4882a593Smuzhiyun
__arm_iommu_attach_device(struct device * dev,struct dma_iommu_mapping * mapping)2133*4882a593Smuzhiyun static int __arm_iommu_attach_device(struct device *dev,
2134*4882a593Smuzhiyun struct dma_iommu_mapping *mapping)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun int err;
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun err = iommu_attach_device(mapping->domain, dev);
2139*4882a593Smuzhiyun if (err)
2140*4882a593Smuzhiyun return err;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun kref_get(&mapping->kref);
2143*4882a593Smuzhiyun to_dma_iommu_mapping(dev) = mapping;
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2146*4882a593Smuzhiyun return 0;
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun
2149*4882a593Smuzhiyun /**
2150*4882a593Smuzhiyun * arm_iommu_attach_device
2151*4882a593Smuzhiyun * @dev: valid struct device pointer
2152*4882a593Smuzhiyun * @mapping: io address space mapping structure (returned from
2153*4882a593Smuzhiyun * arm_iommu_create_mapping)
2154*4882a593Smuzhiyun *
2155*4882a593Smuzhiyun * Attaches specified io address space mapping to the provided device.
2156*4882a593Smuzhiyun * This replaces the dma operations (dma_map_ops pointer) with the
2157*4882a593Smuzhiyun * IOMMU aware version.
2158*4882a593Smuzhiyun *
2159*4882a593Smuzhiyun * More than one client might be attached to the same io address space
2160*4882a593Smuzhiyun * mapping.
2161*4882a593Smuzhiyun */
arm_iommu_attach_device(struct device * dev,struct dma_iommu_mapping * mapping)2162*4882a593Smuzhiyun int arm_iommu_attach_device(struct device *dev,
2163*4882a593Smuzhiyun struct dma_iommu_mapping *mapping)
2164*4882a593Smuzhiyun {
2165*4882a593Smuzhiyun int err;
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun err = __arm_iommu_attach_device(dev, mapping);
2168*4882a593Smuzhiyun if (err)
2169*4882a593Smuzhiyun return err;
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun set_dma_ops(dev, &iommu_ops);
2172*4882a593Smuzhiyun return 0;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun /**
2177*4882a593Smuzhiyun * arm_iommu_detach_device
2178*4882a593Smuzhiyun * @dev: valid struct device pointer
2179*4882a593Smuzhiyun *
2180*4882a593Smuzhiyun * Detaches the provided device from a previously attached map.
2181*4882a593Smuzhiyun * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2182*4882a593Smuzhiyun */
arm_iommu_detach_device(struct device * dev)2183*4882a593Smuzhiyun void arm_iommu_detach_device(struct device *dev)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun struct dma_iommu_mapping *mapping;
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun mapping = to_dma_iommu_mapping(dev);
2188*4882a593Smuzhiyun if (!mapping) {
2189*4882a593Smuzhiyun dev_warn(dev, "Not attached\n");
2190*4882a593Smuzhiyun return;
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun iommu_detach_device(mapping->domain, dev);
2194*4882a593Smuzhiyun kref_put(&mapping->kref, release_iommu_mapping);
2195*4882a593Smuzhiyun to_dma_iommu_mapping(dev) = NULL;
2196*4882a593Smuzhiyun set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2201*4882a593Smuzhiyun
arm_get_iommu_dma_map_ops(bool coherent)2202*4882a593Smuzhiyun static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2203*4882a593Smuzhiyun {
2204*4882a593Smuzhiyun return coherent ? &iommu_coherent_ops : &iommu_ops;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun
arm_setup_iommu_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu)2207*4882a593Smuzhiyun static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2208*4882a593Smuzhiyun const struct iommu_ops *iommu)
2209*4882a593Smuzhiyun {
2210*4882a593Smuzhiyun struct dma_iommu_mapping *mapping;
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun if (!iommu)
2213*4882a593Smuzhiyun return false;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2216*4882a593Smuzhiyun if (IS_ERR(mapping)) {
2217*4882a593Smuzhiyun pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2218*4882a593Smuzhiyun size, dev_name(dev));
2219*4882a593Smuzhiyun return false;
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun if (__arm_iommu_attach_device(dev, mapping)) {
2223*4882a593Smuzhiyun pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2224*4882a593Smuzhiyun dev_name(dev));
2225*4882a593Smuzhiyun arm_iommu_release_mapping(mapping);
2226*4882a593Smuzhiyun return false;
2227*4882a593Smuzhiyun }
2228*4882a593Smuzhiyun
2229*4882a593Smuzhiyun return true;
2230*4882a593Smuzhiyun }
2231*4882a593Smuzhiyun
arm_teardown_iommu_dma_ops(struct device * dev)2232*4882a593Smuzhiyun static void arm_teardown_iommu_dma_ops(struct device *dev)
2233*4882a593Smuzhiyun {
2234*4882a593Smuzhiyun struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun if (!mapping)
2237*4882a593Smuzhiyun return;
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun arm_iommu_detach_device(dev);
2240*4882a593Smuzhiyun arm_iommu_release_mapping(mapping);
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun #else
2244*4882a593Smuzhiyun
arm_setup_iommu_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu)2245*4882a593Smuzhiyun static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2246*4882a593Smuzhiyun const struct iommu_ops *iommu)
2247*4882a593Smuzhiyun {
2248*4882a593Smuzhiyun return false;
2249*4882a593Smuzhiyun }
2250*4882a593Smuzhiyun
arm_teardown_iommu_dma_ops(struct device * dev)2251*4882a593Smuzhiyun static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2256*4882a593Smuzhiyun
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)2257*4882a593Smuzhiyun void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2258*4882a593Smuzhiyun const struct iommu_ops *iommu, bool coherent)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun const struct dma_map_ops *dma_ops;
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun dev->archdata.dma_coherent = coherent;
2263*4882a593Smuzhiyun #ifdef CONFIG_SWIOTLB
2264*4882a593Smuzhiyun dev->dma_coherent = coherent;
2265*4882a593Smuzhiyun #endif
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun /*
2268*4882a593Smuzhiyun * Don't override the dma_ops if they have already been set. Ideally
2269*4882a593Smuzhiyun * this should be the only location where dma_ops are set, remove this
2270*4882a593Smuzhiyun * check when all other callers of set_dma_ops will have disappeared.
2271*4882a593Smuzhiyun */
2272*4882a593Smuzhiyun if (dev->dma_ops)
2273*4882a593Smuzhiyun return;
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2276*4882a593Smuzhiyun dma_ops = arm_get_iommu_dma_map_ops(coherent);
2277*4882a593Smuzhiyun else
2278*4882a593Smuzhiyun dma_ops = arm_get_dma_map_ops(coherent);
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun set_dma_ops(dev, dma_ops);
2281*4882a593Smuzhiyun
2282*4882a593Smuzhiyun #ifdef CONFIG_XEN
2283*4882a593Smuzhiyun if (xen_initial_domain())
2284*4882a593Smuzhiyun dev->dma_ops = &xen_swiotlb_dma_ops;
2285*4882a593Smuzhiyun #endif
2286*4882a593Smuzhiyun dev->archdata.dma_ops_setup = true;
2287*4882a593Smuzhiyun }
2288*4882a593Smuzhiyun
arch_teardown_dma_ops(struct device * dev)2289*4882a593Smuzhiyun void arch_teardown_dma_ops(struct device *dev)
2290*4882a593Smuzhiyun {
2291*4882a593Smuzhiyun if (!dev->archdata.dma_ops_setup)
2292*4882a593Smuzhiyun return;
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun arm_teardown_iommu_dma_ops(dev);
2295*4882a593Smuzhiyun /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2296*4882a593Smuzhiyun set_dma_ops(dev, NULL);
2297*4882a593Smuzhiyun }
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun #ifdef CONFIG_SWIOTLB
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)2300*4882a593Smuzhiyun void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
2301*4882a593Smuzhiyun enum dma_data_direction dir)
2302*4882a593Smuzhiyun {
2303*4882a593Smuzhiyun __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2304*4882a593Smuzhiyun size, dir);
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)2307*4882a593Smuzhiyun void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
2308*4882a593Smuzhiyun enum dma_data_direction dir)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2311*4882a593Smuzhiyun size, dir);
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun
arch_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)2314*4882a593Smuzhiyun void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
2315*4882a593Smuzhiyun gfp_t gfp, unsigned long attrs)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun return __dma_alloc(dev, size, dma_handle, gfp,
2318*4882a593Smuzhiyun __get_dma_pgprot(attrs, PAGE_KERNEL), false,
2319*4882a593Smuzhiyun attrs, __builtin_return_address(0));
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun
arch_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)2322*4882a593Smuzhiyun void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
2323*4882a593Smuzhiyun dma_addr_t dma_handle, unsigned long attrs)
2324*4882a593Smuzhiyun {
2325*4882a593Smuzhiyun __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun #endif /* CONFIG_SWIOTLB */
2328