xref: /OK3568_Linux_fs/kernel/include/linux/ion.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _ION_KERNEL_H
7*4882a593Smuzhiyun #define _ION_KERNEL_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/dma-buf.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/dma-direction.h>
13*4882a593Smuzhiyun #include <linux/kref.h>
14*4882a593Smuzhiyun #include <linux/mm_types.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/mutex.h>
17*4882a593Smuzhiyun #include <linux/rbtree.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/shrinker.h>
20*4882a593Smuzhiyun #include <linux/types.h>
21*4882a593Smuzhiyun #include <uapi/linux/ion.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /**
24*4882a593Smuzhiyun  * struct ion_buffer - metadata for a particular buffer
25*4882a593Smuzhiyun  * @list:		element in list of deferred freeable buffers
26*4882a593Smuzhiyun  * @heap:		back pointer to the heap the buffer came from
27*4882a593Smuzhiyun  * @flags:		buffer specific flags
28*4882a593Smuzhiyun  * @private_flags:	internal buffer specific flags
29*4882a593Smuzhiyun  * @size:		size of the buffer
30*4882a593Smuzhiyun  * @priv_virt:		private data to the buffer representable as
31*4882a593Smuzhiyun  *			a void *
32*4882a593Smuzhiyun  * @lock:		protects the buffers cnt fields
33*4882a593Smuzhiyun  * @kmap_cnt:		number of times the buffer is mapped to the kernel
34*4882a593Smuzhiyun  * @vaddr:		the kernel mapping if kmap_cnt is not zero
35*4882a593Smuzhiyun  * @sg_table:		the sg table for the buffer
36*4882a593Smuzhiyun  * @attachments:	list of devices attached to this buffer
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun struct ion_buffer {
39*4882a593Smuzhiyun 	struct list_head list;
40*4882a593Smuzhiyun 	struct ion_heap *heap;
41*4882a593Smuzhiyun 	unsigned long flags;
42*4882a593Smuzhiyun 	unsigned long private_flags;
43*4882a593Smuzhiyun 	size_t size;
44*4882a593Smuzhiyun 	void *priv_virt;
45*4882a593Smuzhiyun 	struct mutex lock;
46*4882a593Smuzhiyun 	int kmap_cnt;
47*4882a593Smuzhiyun 	void *vaddr;
48*4882a593Smuzhiyun 	struct sg_table *sg_table;
49*4882a593Smuzhiyun 	struct list_head attachments;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * struct ion_heap_ops - ops to operate on a given heap
54*4882a593Smuzhiyun  * @allocate:		allocate memory
55*4882a593Smuzhiyun  * @free:		free memory
56*4882a593Smuzhiyun  * @get_pool_size:	get pool size in pages
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * allocate returns 0 on success, -errno on error.
59*4882a593Smuzhiyun  * map_dma and map_kernel return pointer on success, ERR_PTR on
60*4882a593Smuzhiyun  * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
61*4882a593Smuzhiyun  * the buffer's private_flags when called from a shrinker. In that
62*4882a593Smuzhiyun  * case, the pages being free'd must be truly free'd back to the
63*4882a593Smuzhiyun  * system, not put in a page pool or otherwise cached.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun struct ion_heap_ops {
66*4882a593Smuzhiyun 	int (*allocate)(struct ion_heap *heap,
67*4882a593Smuzhiyun 			struct ion_buffer *buffer, unsigned long len,
68*4882a593Smuzhiyun 			unsigned long flags);
69*4882a593Smuzhiyun 	void (*free)(struct ion_buffer *buffer);
70*4882a593Smuzhiyun 	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
71*4882a593Smuzhiyun 	long (*get_pool_size)(struct ion_heap *heap);
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * heap flags - flags between the heaps and core ion code
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #define ION_HEAP_FLAG_DEFER_FREE BIT(0)
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun  * private flags - flags internal to ion
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun  * Buffer is being freed from a shrinker function. Skip any possible
84*4882a593Smuzhiyun  * heap-specific caching mechanism (e.g. page pools). Guarantees that
85*4882a593Smuzhiyun  * any buffer storage that came from the system allocator will be
86*4882a593Smuzhiyun  * returned to the system allocator.
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun #define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * struct ion_heap - represents a heap in the system
92*4882a593Smuzhiyun  * @node:		rb node to put the heap on the device's tree of heaps
93*4882a593Smuzhiyun  * @type:		type of heap
94*4882a593Smuzhiyun  * @ops:		ops struct as above
95*4882a593Smuzhiyun  * @buf_ops:		dma_buf ops specific to the heap implementation.
96*4882a593Smuzhiyun  * @flags:		flags
97*4882a593Smuzhiyun  * @id:			id of heap, also indicates priority of this heap when
98*4882a593Smuzhiyun  *			allocating.  These are specified by platform data and
99*4882a593Smuzhiyun  *			MUST be unique
100*4882a593Smuzhiyun  * @name:		used for debugging
101*4882a593Smuzhiyun  * @owner:		kernel module that implements this heap
102*4882a593Smuzhiyun  * @shrinker:		a shrinker for the heap
103*4882a593Smuzhiyun  * @free_list:		free list head if deferred free is used
104*4882a593Smuzhiyun  * @free_list_size	size of the deferred free list in bytes
105*4882a593Smuzhiyun  * @lock:		protects the free list
106*4882a593Smuzhiyun  * @waitqueue:		queue to wait on from deferred free thread
107*4882a593Smuzhiyun  * @task:		task struct of deferred free thread
108*4882a593Smuzhiyun  * @num_of_buffers	the number of currently allocated buffers
109*4882a593Smuzhiyun  * @num_of_alloc_bytes	the number of allocated bytes
110*4882a593Smuzhiyun  * @alloc_bytes_wm	the number of allocated bytes watermark
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  * Represents a pool of memory from which buffers can be made.  In some
113*4882a593Smuzhiyun  * systems the only heap is regular system memory allocated via vmalloc.
114*4882a593Smuzhiyun  * On others, some blocks might require large physically contiguous buffers
115*4882a593Smuzhiyun  * that are allocated from a specially reserved heap.
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun struct ion_heap {
118*4882a593Smuzhiyun 	struct plist_node node;
119*4882a593Smuzhiyun 	enum ion_heap_type type;
120*4882a593Smuzhiyun 	struct ion_heap_ops *ops;
121*4882a593Smuzhiyun 	struct dma_buf_ops buf_ops;
122*4882a593Smuzhiyun 	unsigned long flags;
123*4882a593Smuzhiyun 	unsigned int id;
124*4882a593Smuzhiyun 	const char *name;
125*4882a593Smuzhiyun 	struct module *owner;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* deferred free support */
128*4882a593Smuzhiyun 	struct shrinker shrinker;
129*4882a593Smuzhiyun 	struct list_head free_list;
130*4882a593Smuzhiyun 	size_t free_list_size;
131*4882a593Smuzhiyun 	spinlock_t free_lock;
132*4882a593Smuzhiyun 	wait_queue_head_t waitqueue;
133*4882a593Smuzhiyun 	struct task_struct *task;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* heap statistics */
136*4882a593Smuzhiyun 	u64 num_of_buffers;
137*4882a593Smuzhiyun 	u64 num_of_alloc_bytes;
138*4882a593Smuzhiyun 	u64 alloc_bytes_wm;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* protect heap statistics */
141*4882a593Smuzhiyun 	spinlock_t stat_lock;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* heap's debugfs root */
144*4882a593Smuzhiyun 	struct dentry *debugfs_dir;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #define ion_device_add_heap(heap) __ion_device_add_heap(heap, THIS_MODULE)
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun  * struct ion_dma_buf_attachment - hold device-table attachment data for buffer
151*4882a593Smuzhiyun  * @dev:	device attached to the buffer.
152*4882a593Smuzhiyun  * @table:	cached mapping.
153*4882a593Smuzhiyun  * @list:	list of ion_dma_buf_attachment.
154*4882a593Smuzhiyun  */
155*4882a593Smuzhiyun struct ion_dma_buf_attachment {
156*4882a593Smuzhiyun 	struct device *dev;
157*4882a593Smuzhiyun 	struct sg_table *table;
158*4882a593Smuzhiyun 	struct list_head list;
159*4882a593Smuzhiyun 	bool mapped:1;
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #ifdef CONFIG_ION
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun  * __ion_device_add_heap - adds a heap to the ion device
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * @heap:               the heap to add
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * Returns 0 on success, negative error otherwise.
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun int __ion_device_add_heap(struct ion_heap *heap, struct module *owner);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun  * ion_device_remove_heap - removes a heap from ion device
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  * @heap:		pointer to the heap to be removed
177*4882a593Smuzhiyun  */
178*4882a593Smuzhiyun void ion_device_remove_heap(struct ion_heap *heap);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun  * ion_heap_init_shrinker
182*4882a593Smuzhiyun  * @heap:		the heap
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
185*4882a593Smuzhiyun  * this function will be called to setup a shrinker to shrink the freelists
186*4882a593Smuzhiyun  * and call the heap's shrink op.
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun int ion_heap_init_shrinker(struct ion_heap *heap);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * ion_heap_init_deferred_free -- initialize deferred free functionality
192*4882a593Smuzhiyun  * @heap:		the heap
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
195*4882a593Smuzhiyun  * be called to setup deferred frees. Calls to free the buffer will
196*4882a593Smuzhiyun  * return immediately and the actual free will occur some time later
197*4882a593Smuzhiyun  */
198*4882a593Smuzhiyun int ion_heap_init_deferred_free(struct ion_heap *heap);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  * ion_heap_freelist_add - add a buffer to the deferred free list
202*4882a593Smuzhiyun  * @heap:		the heap
203*4882a593Smuzhiyun  * @buffer:		the buffer
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Adds an item to the deferred freelist.
206*4882a593Smuzhiyun  */
207*4882a593Smuzhiyun void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun  * ion_heap_freelist_drain - drain the deferred free list
211*4882a593Smuzhiyun  * @heap:		the heap
212*4882a593Smuzhiyun  * @size:		amount of memory to drain in bytes
213*4882a593Smuzhiyun  *
214*4882a593Smuzhiyun  * Drains the indicated amount of memory from the deferred freelist immediately.
215*4882a593Smuzhiyun  * Returns the total amount freed.  The total freed may be higher depending
216*4882a593Smuzhiyun  * on the size of the items in the list, or lower if there is insufficient
217*4882a593Smuzhiyun  * total memory on the freelist.
218*4882a593Smuzhiyun  */
219*4882a593Smuzhiyun size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * ion_heap_freelist_shrink - drain the deferred free
223*4882a593Smuzhiyun  *				list, skipping any heap-specific
224*4882a593Smuzhiyun  *				pooling or caching mechanisms
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * @heap:		the heap
227*4882a593Smuzhiyun  * @size:		amount of memory to drain in bytes
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * Drains the indicated amount of memory from the deferred freelist immediately.
230*4882a593Smuzhiyun  * Returns the total amount freed.  The total freed may be higher depending
231*4882a593Smuzhiyun  * on the size of the items in the list, or lower if there is insufficient
232*4882a593Smuzhiyun  * total memory on the freelist.
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  * Unlike with @ion_heap_freelist_drain, don't put any pages back into
235*4882a593Smuzhiyun  * page pools or otherwise cache the pages. Everything must be
236*4882a593Smuzhiyun  * genuinely free'd back to the system. If you're free'ing from a
237*4882a593Smuzhiyun  * shrinker you probably want to use this. Note that this relies on
238*4882a593Smuzhiyun  * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
239*4882a593Smuzhiyun  * flag.
240*4882a593Smuzhiyun  */
241*4882a593Smuzhiyun size_t ion_heap_freelist_shrink(struct ion_heap *heap,
242*4882a593Smuzhiyun 				size_t size);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun  * ion_heap_freelist_size - returns the size of the freelist in bytes
246*4882a593Smuzhiyun  * @heap:		the heap
247*4882a593Smuzhiyun  */
248*4882a593Smuzhiyun size_t ion_heap_freelist_size(struct ion_heap *heap);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * ion_heap_map_kernel - map the ion_buffer in kernel virtual address space.
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  * @heap:               the heap
254*4882a593Smuzhiyun  * @buffer:             buffer to be mapped
255*4882a593Smuzhiyun  *
256*4882a593Smuzhiyun  * Maps the buffer using vmap(). The function respects cache flags for the
257*4882a593Smuzhiyun  * buffer and creates the page table entries accordingly. Returns virtual
258*4882a593Smuzhiyun  * address at the beginning of the buffer or ERR_PTR.
259*4882a593Smuzhiyun  */
260*4882a593Smuzhiyun void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun  * ion_heap_unmap_kernel - unmap ion_buffer
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * @buffer:             buffer to be unmapped
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * ION wrapper for vunmap() of the ion buffer.
268*4882a593Smuzhiyun  */
269*4882a593Smuzhiyun void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun  * ion_heap_map_user - map given ion buffer in provided vma
273*4882a593Smuzhiyun  *
274*4882a593Smuzhiyun  * @heap:               the heap this buffer belongs to
275*4882a593Smuzhiyun  * @buffer:             Ion buffer to be mapped
276*4882a593Smuzhiyun  * @vma:                vma of the process where buffer should be mapped.
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * Maps the buffer using remap_pfn_range() into specific process's vma starting
279*4882a593Smuzhiyun  * with vma->vm_start. The vma size is expected to be >= ion buffer size.
280*4882a593Smuzhiyun  * If not, a partial buffer mapping may be created. Returns 0 on success.
281*4882a593Smuzhiyun  */
282*4882a593Smuzhiyun int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
283*4882a593Smuzhiyun 		      struct vm_area_struct *vma);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /* ion_buffer_zero - zeroes out an ion buffer respecting the ION_FLAGs.
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * @buffer:		ion_buffer to zero
288*4882a593Smuzhiyun  *
289*4882a593Smuzhiyun  * Returns 0 on success, negative error otherwise.
290*4882a593Smuzhiyun  */
291*4882a593Smuzhiyun int ion_buffer_zero(struct ion_buffer *buffer);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun  * ion_buffer_prep_noncached - flush cache before non-cached mapping
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * @buffer:		ion_buffer to flush
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * The memory allocated by the heap could be in the CPU cache. To map
299*4882a593Smuzhiyun  * this memory as non-cached, we need to flush the associated cache
300*4882a593Smuzhiyun  * first. Without the flush, it is possible for stale dirty cache lines
301*4882a593Smuzhiyun  * to be evicted after the ION client started writing into this buffer,
302*4882a593Smuzhiyun  * leading to data corruption.
303*4882a593Smuzhiyun  */
304*4882a593Smuzhiyun void ion_buffer_prep_noncached(struct ion_buffer *buffer);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun  * ion_alloc - Allocates an ion buffer of given size from given heap
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  * @len:               size of the buffer to be allocated.
310*4882a593Smuzhiyun  * @heap_id_mask:      a bitwise maks of heap ids to allocate from
311*4882a593Smuzhiyun  * @flags:             ION_BUFFER_XXXX flags for the new buffer.
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * The function exports a dma_buf object for the new ion buffer internally
314*4882a593Smuzhiyun  * and returns that to the caller. So, the buffer is ready to be used by other
315*4882a593Smuzhiyun  * drivers immediately. Returns ERR_PTR in case of failure.
316*4882a593Smuzhiyun  */
317*4882a593Smuzhiyun struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
318*4882a593Smuzhiyun 			  unsigned int flags);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun  * ion_free - Releases the ion buffer.
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * @buffer:             ion buffer to be released
324*4882a593Smuzhiyun  */
325*4882a593Smuzhiyun int ion_free(struct ion_buffer *buffer);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun  * ion_query_heaps_kernel - Returns information about available heaps to
329*4882a593Smuzhiyun  * in-kernel clients.
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * @hdata:             pointer to array of struct ion_heap_data.
332*4882a593Smuzhiyun  * @size:             size of @hdata array.
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  * Returns the number of available heaps and populates @hdata with information
335*4882a593Smuzhiyun  * regarding the same. When invoked with @size as 0, the function with return
336*4882a593Smuzhiyun  * the number of available heaps without modifying @hdata. When the number of
337*4882a593Smuzhiyun  * available heaps is higher than @size, @size is returned instead of the
338*4882a593Smuzhiyun  * actual number of available heaps.
339*4882a593Smuzhiyun  */
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size);
342*4882a593Smuzhiyun #else
343*4882a593Smuzhiyun 
__ion_device_add_heap(struct ion_heap * heap,struct module * owner)344*4882a593Smuzhiyun static inline int __ion_device_add_heap(struct ion_heap *heap,
345*4882a593Smuzhiyun 				      struct module *owner)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	return -ENODEV;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
ion_heap_init_shrinker(struct ion_heap * heap)350*4882a593Smuzhiyun static inline int ion_heap_init_shrinker(struct ion_heap *heap)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	return -ENODEV;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
ion_heap_init_deferred_free(struct ion_heap * heap)355*4882a593Smuzhiyun static inline int ion_heap_init_deferred_free(struct ion_heap *heap)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	return -ENODEV;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
ion_heap_freelist_add(struct ion_heap * heap,struct ion_buffer * buffer)360*4882a593Smuzhiyun static inline void ion_heap_freelist_add(struct ion_heap *heap,
361*4882a593Smuzhiyun 					 struct ion_buffer *buffer) {}
362*4882a593Smuzhiyun 
ion_heap_freelist_drain(struct ion_heap * heap,size_t size)363*4882a593Smuzhiyun static inline size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	return -ENODEV;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
ion_heap_freelist_shrink(struct ion_heap * heap,size_t size)368*4882a593Smuzhiyun static inline size_t ion_heap_freelist_shrink(struct ion_heap *heap,
369*4882a593Smuzhiyun 					      size_t size)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	return -ENODEV;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
ion_heap_freelist_size(struct ion_heap * heap)374*4882a593Smuzhiyun static inline size_t ion_heap_freelist_size(struct ion_heap *heap)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	return -ENODEV;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
ion_heap_map_kernel(struct ion_heap * heap,struct ion_buffer * buffer)379*4882a593Smuzhiyun static inline void *ion_heap_map_kernel(struct ion_heap *heap,
380*4882a593Smuzhiyun 					struct ion_buffer *buffer)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	return ERR_PTR(-ENODEV);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
ion_heap_unmap_kernel(struct ion_heap * heap,struct ion_buffer * buffer)385*4882a593Smuzhiyun static inline void ion_heap_unmap_kernel(struct ion_heap *heap,
386*4882a593Smuzhiyun 					 struct ion_buffer *buffer) {}
387*4882a593Smuzhiyun 
ion_heap_map_user(struct ion_heap * heap,struct ion_buffer * buffer,struct vm_area_struct * vma)388*4882a593Smuzhiyun static inline int ion_heap_map_user(struct ion_heap *heap,
389*4882a593Smuzhiyun 				    struct ion_buffer *buffer,
390*4882a593Smuzhiyun 				    struct vm_area_struct *vma)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	return -ENODEV;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
ion_buffer_zero(struct ion_buffer * buffer)395*4882a593Smuzhiyun static inline int ion_buffer_zero(struct ion_buffer *buffer)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	return -EINVAL;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
ion_buffer_prep_noncached(struct ion_buffer * buffer)400*4882a593Smuzhiyun static inline void ion_buffer_prep_noncached(struct ion_buffer *buffer) {}
401*4882a593Smuzhiyun 
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)402*4882a593Smuzhiyun static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
403*4882a593Smuzhiyun 					unsigned int flags)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
ion_free(struct ion_buffer * buffer)408*4882a593Smuzhiyun static inline int ion_free(struct ion_buffer *buffer)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	return 0;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
ion_query_heaps_kernel(struct ion_heap_data * hdata,size_t size)413*4882a593Smuzhiyun static inline size_t ion_query_heaps_kernel(struct ion_heap_data *hdata,
414*4882a593Smuzhiyun 					 size_t size)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun #endif /* CONFIG_ION */
419*4882a593Smuzhiyun #endif /* _ION_KERNEL_H */
420