1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * This header is for implementations of dma_map_ops and related code.
4*4882a593Smuzhiyun * It should not be included in drivers just using the DMA API.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #ifndef _LINUX_DMA_MAP_OPS_H
7*4882a593Smuzhiyun #define _LINUX_DMA_MAP_OPS_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/pgtable.h>
11*4882a593Smuzhiyun #include <linux/android_kabi.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun struct cma;
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun struct dma_map_ops {
16*4882a593Smuzhiyun void *(*alloc)(struct device *dev, size_t size,
17*4882a593Smuzhiyun dma_addr_t *dma_handle, gfp_t gfp,
18*4882a593Smuzhiyun unsigned long attrs);
19*4882a593Smuzhiyun void (*free)(struct device *dev, size_t size, void *vaddr,
20*4882a593Smuzhiyun dma_addr_t dma_handle, unsigned long attrs);
21*4882a593Smuzhiyun struct page *(*alloc_pages)(struct device *dev, size_t size,
22*4882a593Smuzhiyun dma_addr_t *dma_handle, enum dma_data_direction dir,
23*4882a593Smuzhiyun gfp_t gfp);
24*4882a593Smuzhiyun void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
25*4882a593Smuzhiyun dma_addr_t dma_handle, enum dma_data_direction dir);
26*4882a593Smuzhiyun void *(*alloc_noncoherent)(struct device *dev, size_t size,
27*4882a593Smuzhiyun dma_addr_t *dma_handle, enum dma_data_direction dir,
28*4882a593Smuzhiyun gfp_t gfp);
29*4882a593Smuzhiyun void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
30*4882a593Smuzhiyun dma_addr_t dma_handle, enum dma_data_direction dir);
31*4882a593Smuzhiyun int (*mmap)(struct device *, struct vm_area_struct *,
32*4882a593Smuzhiyun void *, dma_addr_t, size_t, unsigned long attrs);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
35*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
36*4882a593Smuzhiyun unsigned long attrs);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun dma_addr_t (*map_page)(struct device *dev, struct page *page,
39*4882a593Smuzhiyun unsigned long offset, size_t size,
40*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs);
41*4882a593Smuzhiyun void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
42*4882a593Smuzhiyun size_t size, enum dma_data_direction dir,
43*4882a593Smuzhiyun unsigned long attrs);
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * map_sg returns 0 on error and a value > 0 on success.
46*4882a593Smuzhiyun * It should never return a value < 0.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
49*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs);
50*4882a593Smuzhiyun void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
51*4882a593Smuzhiyun enum dma_data_direction dir, unsigned long attrs);
52*4882a593Smuzhiyun dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
53*4882a593Smuzhiyun size_t size, enum dma_data_direction dir,
54*4882a593Smuzhiyun unsigned long attrs);
55*4882a593Smuzhiyun void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
56*4882a593Smuzhiyun size_t size, enum dma_data_direction dir,
57*4882a593Smuzhiyun unsigned long attrs);
58*4882a593Smuzhiyun void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
59*4882a593Smuzhiyun size_t size, enum dma_data_direction dir);
60*4882a593Smuzhiyun void (*sync_single_for_device)(struct device *dev,
61*4882a593Smuzhiyun dma_addr_t dma_handle, size_t size,
62*4882a593Smuzhiyun enum dma_data_direction dir);
63*4882a593Smuzhiyun void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
64*4882a593Smuzhiyun int nents, enum dma_data_direction dir);
65*4882a593Smuzhiyun void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
66*4882a593Smuzhiyun int nents, enum dma_data_direction dir);
67*4882a593Smuzhiyun void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
68*4882a593Smuzhiyun enum dma_data_direction direction);
69*4882a593Smuzhiyun int (*dma_supported)(struct device *dev, u64 mask);
70*4882a593Smuzhiyun u64 (*get_required_mask)(struct device *dev);
71*4882a593Smuzhiyun size_t (*max_mapping_size)(struct device *dev);
72*4882a593Smuzhiyun unsigned long (*get_merge_boundary)(struct device *dev);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
75*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
76*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
77*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #ifdef CONFIG_DMA_OPS
81*4882a593Smuzhiyun #include <asm/dma-mapping.h>
82*4882a593Smuzhiyun
get_dma_ops(struct device * dev)83*4882a593Smuzhiyun static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun if (dev->dma_ops)
86*4882a593Smuzhiyun return dev->dma_ops;
87*4882a593Smuzhiyun return get_arch_dma_ops(dev->bus);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)90*4882a593Smuzhiyun static inline void set_dma_ops(struct device *dev,
91*4882a593Smuzhiyun const struct dma_map_ops *dma_ops)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun dev->dma_ops = dma_ops;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun #else /* CONFIG_DMA_OPS */
get_dma_ops(struct device * dev)96*4882a593Smuzhiyun static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun return NULL;
99*4882a593Smuzhiyun }
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)100*4882a593Smuzhiyun static inline void set_dma_ops(struct device *dev,
101*4882a593Smuzhiyun const struct dma_map_ops *dma_ops)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif /* CONFIG_DMA_OPS */
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #ifdef CONFIG_DMA_CMA
107*4882a593Smuzhiyun extern struct cma *dma_contiguous_default_area;
108*4882a593Smuzhiyun
dev_get_cma_area(struct device * dev)109*4882a593Smuzhiyun static inline struct cma *dev_get_cma_area(struct device *dev)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun if (dev && dev->cma_area)
112*4882a593Smuzhiyun return dev->cma_area;
113*4882a593Smuzhiyun return dma_contiguous_default_area;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun void dma_contiguous_reserve(phys_addr_t addr_limit);
117*4882a593Smuzhiyun int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
118*4882a593Smuzhiyun phys_addr_t limit, struct cma **res_cma, bool fixed);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
121*4882a593Smuzhiyun unsigned int order, bool no_warn);
122*4882a593Smuzhiyun bool dma_release_from_contiguous(struct device *dev, struct page *pages,
123*4882a593Smuzhiyun int count);
124*4882a593Smuzhiyun struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
125*4882a593Smuzhiyun void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
128*4882a593Smuzhiyun #else /* CONFIG_DMA_CMA */
dev_get_cma_area(struct device * dev)129*4882a593Smuzhiyun static inline struct cma *dev_get_cma_area(struct device *dev)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun return NULL;
132*4882a593Smuzhiyun }
dma_contiguous_reserve(phys_addr_t limit)133*4882a593Smuzhiyun static inline void dma_contiguous_reserve(phys_addr_t limit)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun }
dma_contiguous_reserve_area(phys_addr_t size,phys_addr_t base,phys_addr_t limit,struct cma ** res_cma,bool fixed)136*4882a593Smuzhiyun static inline int dma_contiguous_reserve_area(phys_addr_t size,
137*4882a593Smuzhiyun phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
138*4882a593Smuzhiyun bool fixed)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun return -ENOSYS;
141*4882a593Smuzhiyun }
dma_alloc_from_contiguous(struct device * dev,size_t count,unsigned int order,bool no_warn)142*4882a593Smuzhiyun static inline struct page *dma_alloc_from_contiguous(struct device *dev,
143*4882a593Smuzhiyun size_t count, unsigned int order, bool no_warn)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return NULL;
146*4882a593Smuzhiyun }
dma_release_from_contiguous(struct device * dev,struct page * pages,int count)147*4882a593Smuzhiyun static inline bool dma_release_from_contiguous(struct device *dev,
148*4882a593Smuzhiyun struct page *pages, int count)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun return false;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
dma_alloc_contiguous(struct device * dev,size_t size,gfp_t gfp)153*4882a593Smuzhiyun static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
154*4882a593Smuzhiyun gfp_t gfp)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun return NULL;
157*4882a593Smuzhiyun }
dma_free_contiguous(struct device * dev,struct page * page,size_t size)158*4882a593Smuzhiyun static inline void dma_free_contiguous(struct device *dev, struct page *page,
159*4882a593Smuzhiyun size_t size)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun __free_pages(page, get_order(size));
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun #endif /* CONFIG_DMA_CMA*/
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #ifdef CONFIG_DMA_PERNUMA_CMA
166*4882a593Smuzhiyun void dma_pernuma_cma_reserve(void);
167*4882a593Smuzhiyun #else
dma_pernuma_cma_reserve(void)168*4882a593Smuzhiyun static inline void dma_pernuma_cma_reserve(void) { }
169*4882a593Smuzhiyun #endif /* CONFIG_DMA_PERNUMA_CMA */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #ifdef CONFIG_DMA_DECLARE_COHERENT
172*4882a593Smuzhiyun int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
173*4882a593Smuzhiyun dma_addr_t device_addr, size_t size);
174*4882a593Smuzhiyun void dma_release_coherent_memory(struct device *dev);
175*4882a593Smuzhiyun int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
176*4882a593Smuzhiyun dma_addr_t *dma_handle, void **ret);
177*4882a593Smuzhiyun int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
178*4882a593Smuzhiyun int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
179*4882a593Smuzhiyun void *cpu_addr, size_t size, int *ret);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
182*4882a593Smuzhiyun dma_addr_t *dma_handle);
183*4882a593Smuzhiyun int dma_release_from_global_coherent(int order, void *vaddr);
184*4882a593Smuzhiyun int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
185*4882a593Smuzhiyun size_t size, int *ret);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #else
dma_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size)188*4882a593Smuzhiyun static inline int dma_declare_coherent_memory(struct device *dev,
189*4882a593Smuzhiyun phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun return -ENOSYS;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
195*4882a593Smuzhiyun #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
196*4882a593Smuzhiyun #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
dma_release_coherent_memory(struct device * dev)197*4882a593Smuzhiyun static inline void dma_release_coherent_memory(struct device *dev) { }
198*4882a593Smuzhiyun
dma_alloc_from_global_coherent(struct device * dev,ssize_t size,dma_addr_t * dma_handle)199*4882a593Smuzhiyun static inline void *dma_alloc_from_global_coherent(struct device *dev,
200*4882a593Smuzhiyun ssize_t size, dma_addr_t *dma_handle)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun return NULL;
203*4882a593Smuzhiyun }
dma_release_from_global_coherent(int order,void * vaddr)204*4882a593Smuzhiyun static inline int dma_release_from_global_coherent(int order, void *vaddr)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun return 0;
207*4882a593Smuzhiyun }
dma_mmap_from_global_coherent(struct vm_area_struct * vma,void * cpu_addr,size_t size,int * ret)208*4882a593Smuzhiyun static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
209*4882a593Smuzhiyun void *cpu_addr, size_t size, int *ret)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun #endif /* CONFIG_DMA_DECLARE_COHERENT */
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
216*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
217*4882a593Smuzhiyun unsigned long attrs);
218*4882a593Smuzhiyun int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
219*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
220*4882a593Smuzhiyun unsigned long attrs);
221*4882a593Smuzhiyun struct page *dma_common_alloc_pages(struct device *dev, size_t size,
222*4882a593Smuzhiyun dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
223*4882a593Smuzhiyun void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
224*4882a593Smuzhiyun dma_addr_t dma_handle, enum dma_data_direction dir);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun struct page **dma_common_find_pages(void *cpu_addr);
227*4882a593Smuzhiyun void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
228*4882a593Smuzhiyun const void *caller);
229*4882a593Smuzhiyun void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
230*4882a593Smuzhiyun const void *caller);
231*4882a593Smuzhiyun void dma_common_free_remap(void *cpu_addr, size_t size);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun struct page *dma_alloc_from_pool(struct device *dev, size_t size,
234*4882a593Smuzhiyun void **cpu_addr, gfp_t flags,
235*4882a593Smuzhiyun bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
236*4882a593Smuzhiyun bool dma_free_from_pool(struct device *dev, void *start, size_t size);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
239*4882a593Smuzhiyun #include <asm/dma-coherence.h>
240*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
241*4882a593Smuzhiyun defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
242*4882a593Smuzhiyun defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
dev_is_dma_coherent(struct device * dev)243*4882a593Smuzhiyun static inline bool dev_is_dma_coherent(struct device *dev)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun return dev->dma_coherent;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun #else
dev_is_dma_coherent(struct device * dev)248*4882a593Smuzhiyun static inline bool dev_is_dma_coherent(struct device *dev)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun return true;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
255*4882a593Smuzhiyun gfp_t gfp, unsigned long attrs);
256*4882a593Smuzhiyun void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
257*4882a593Smuzhiyun dma_addr_t dma_addr, unsigned long attrs);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun #ifdef CONFIG_MMU
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Page protection so that devices that can't snoop CPU caches can use the
262*4882a593Smuzhiyun * memory coherently. We default to pgprot_noncached which is usually used
263*4882a593Smuzhiyun * for ioremap as a safe bet, but architectures can override this with less
264*4882a593Smuzhiyun * strict semantics if possible.
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun #ifndef pgprot_dmacoherent
267*4882a593Smuzhiyun #define pgprot_dmacoherent(prot) pgprot_noncached(prot)
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * If there is no system cache pgprot, then fallback to dmacoherent
272*4882a593Smuzhiyun * pgprot, as the expectation is that the device is not coherent.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun #ifndef pgprot_syscached
275*4882a593Smuzhiyun #define pgprot_syscached(prot) pgprot_dmacoherent(prot)
276*4882a593Smuzhiyun #endif
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
279*4882a593Smuzhiyun #else
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)280*4882a593Smuzhiyun static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
281*4882a593Smuzhiyun unsigned long attrs)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun return prot; /* no protection bits supported without page tables */
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun #endif /* CONFIG_MMU */
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
288*4882a593Smuzhiyun void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
289*4882a593Smuzhiyun enum dma_data_direction dir);
290*4882a593Smuzhiyun #else
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)291*4882a593Smuzhiyun static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
292*4882a593Smuzhiyun enum dma_data_direction dir)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
298*4882a593Smuzhiyun void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
299*4882a593Smuzhiyun enum dma_data_direction dir);
300*4882a593Smuzhiyun #else
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)301*4882a593Smuzhiyun static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
302*4882a593Smuzhiyun enum dma_data_direction dir)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
308*4882a593Smuzhiyun void arch_sync_dma_for_cpu_all(void);
309*4882a593Smuzhiyun #else
arch_sync_dma_for_cpu_all(void)310*4882a593Smuzhiyun static inline void arch_sync_dma_for_cpu_all(void)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
316*4882a593Smuzhiyun void arch_dma_prep_coherent(struct page *page, size_t size);
317*4882a593Smuzhiyun #else
arch_dma_prep_coherent(struct page * page,size_t size)318*4882a593Smuzhiyun static inline void arch_dma_prep_coherent(struct page *page, size_t size)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
324*4882a593Smuzhiyun void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
325*4882a593Smuzhiyun #else
arch_dma_mark_clean(phys_addr_t paddr,size_t size)326*4882a593Smuzhiyun static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun #endif /* ARCH_HAS_DMA_MARK_CLEAN */
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun void *arch_dma_set_uncached(void *addr, size_t size);
332*4882a593Smuzhiyun void arch_dma_clear_uncached(void *addr, size_t size);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
335*4882a593Smuzhiyun void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
336*4882a593Smuzhiyun const struct iommu_ops *iommu, bool coherent);
337*4882a593Smuzhiyun #else
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)338*4882a593Smuzhiyun static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
339*4882a593Smuzhiyun u64 size, const struct iommu_ops *iommu, bool coherent)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
345*4882a593Smuzhiyun void arch_teardown_dma_ops(struct device *dev);
346*4882a593Smuzhiyun #else
arch_teardown_dma_ops(struct device * dev)347*4882a593Smuzhiyun static inline void arch_teardown_dma_ops(struct device *dev)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun #ifdef CONFIG_DMA_API_DEBUG
353*4882a593Smuzhiyun void dma_debug_add_bus(struct bus_type *bus);
354*4882a593Smuzhiyun void debug_dma_dump_mappings(struct device *dev);
355*4882a593Smuzhiyun #else
dma_debug_add_bus(struct bus_type * bus)356*4882a593Smuzhiyun static inline void dma_debug_add_bus(struct bus_type *bus)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun }
debug_dma_dump_mappings(struct device * dev)359*4882a593Smuzhiyun static inline void debug_dma_dump_mappings(struct device *dev)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun #endif /* CONFIG_DMA_API_DEBUG */
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun extern const struct dma_map_ops dma_dummy_ops;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun #endif /* _LINUX_DMA_MAP_OPS_H */
367