xref: /OK3568_Linux_fs/external/mpp/osal/allocator/ion.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * include/linux/ion.h
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2011 Google, Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is licensed under the terms of the GNU General Public
7*4882a593Smuzhiyun  * License version 2, as published by the Free Software Foundation, and
8*4882a593Smuzhiyun  * may be copied, distributed, and modified under those terms.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
11*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13*4882a593Smuzhiyun  * GNU General Public License for more details.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #ifndef _LINUX_ION_H
18*4882a593Smuzhiyun #define _LINUX_ION_H
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/types.h>
21*4882a593Smuzhiyun #define ION_VERSION     "1.0"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun typedef int ion_user_handle_t;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * enum ion_heap_types - list of all possible types of heaps
27*4882a593Smuzhiyun  * @ION_HEAP_TYPE_SYSTEM:    memory allocated via vmalloc
28*4882a593Smuzhiyun  * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
29*4882a593Smuzhiyun  * @ION_HEAP_TYPE_CARVEOUT:  memory allocated from a prereserved
30*4882a593Smuzhiyun  *               carveout heap, allocations are physically
31*4882a593Smuzhiyun  *               contiguous
32*4882a593Smuzhiyun  * @ION_HEAP_TYPE_DMA:       memory allocated via DMA API
33*4882a593Smuzhiyun  * @ION_NUM_HEAPS:       helper for iterating over heaps, a bit mask
34*4882a593Smuzhiyun  *               is used to identify the heaps, so only 32
35*4882a593Smuzhiyun  *               total heap types are supported
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun enum ion_heap_type {
38*4882a593Smuzhiyun     ION_HEAP_TYPE_SYSTEM,
39*4882a593Smuzhiyun     ION_HEAP_TYPE_SYSTEM_CONTIG,
40*4882a593Smuzhiyun     ION_HEAP_TYPE_CARVEOUT,
41*4882a593Smuzhiyun     ION_HEAP_TYPE_CHUNK,
42*4882a593Smuzhiyun     ION_HEAP_TYPE_DMA,
43*4882a593Smuzhiyun     ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
44*4882a593Smuzhiyun                  are at the end of this enum */
45*4882a593Smuzhiyun     ION_NUM_HEAPS = 16,
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun enum ion_heap_ids {
48*4882a593Smuzhiyun     ION_NOR_HEAP_ID = 0,
49*4882a593Smuzhiyun     ION_CMA_HEAP_ID = 1,
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun     ION_VPU_ID = 16,
52*4882a593Smuzhiyun     ION_CAM_ID = 17,
53*4882a593Smuzhiyun     ION_UI_ID = 18,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define ION_HEAP_SYSTEM_MASK        (1 << ION_HEAP_TYPE_SYSTEM)
57*4882a593Smuzhiyun #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
58*4882a593Smuzhiyun #define ION_HEAP_CARVEOUT_MASK      (1 << ION_HEAP_TYPE_CARVEOUT)
59*4882a593Smuzhiyun #define ION_HEAP_TYPE_DMA_MASK      (1 << ION_HEAP_TYPE_DMA)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #ifdef __KERNEL__
62*4882a593Smuzhiyun struct ion_device;
63*4882a593Smuzhiyun struct ion_heap;
64*4882a593Smuzhiyun struct ion_mapper;
65*4882a593Smuzhiyun struct ion_client;
66*4882a593Smuzhiyun struct ion_buffer;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* This should be removed some day when phys_addr_t's are fully
69*4882a593Smuzhiyun    plumbed in the kernel, and all instances of ion_phys_addr_t should
70*4882a593Smuzhiyun    be converted to phys_addr_t.  For the time being many kernel interfaces
71*4882a593Smuzhiyun    do not accept phys_addr_t's that would have to */
72*4882a593Smuzhiyun #define ion_phys_addr_t unsigned long
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * struct ion_platform_heap - defines a heap in the given platform
76*4882a593Smuzhiyun  * @type:   type of the heap from ion_heap_type enum
77*4882a593Smuzhiyun  * @id:     unique identifier for heap.  When allocating (lower numbers
78*4882a593Smuzhiyun  *      will be allocated from first)
79*4882a593Smuzhiyun  * @name:   used for debug purposes
80*4882a593Smuzhiyun  * @base:   base address of heap in physical memory if applicable
81*4882a593Smuzhiyun  * @size:   size of the heap in bytes if applicable
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * Provided by the board file.
84*4882a593Smuzhiyun  */
85*4882a593Smuzhiyun struct ion_platform_heap {
86*4882a593Smuzhiyun     enum ion_heap_type type;
87*4882a593Smuzhiyun     unsigned int id;
88*4882a593Smuzhiyun     const char *name;
89*4882a593Smuzhiyun     ion_phys_addr_t base;
90*4882a593Smuzhiyun     size_t size;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun  * struct ion_platform_data - array of platform heaps passed from board file
95*4882a593Smuzhiyun  * @nr:     number of structures in the array
96*4882a593Smuzhiyun  * @heaps:  array of platform_heap structions
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * Provided by the board file in the form of platform data to a platform device.
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun struct ion_platform_data {
101*4882a593Smuzhiyun     int nr;
102*4882a593Smuzhiyun     struct ion_platform_heap heaps[];
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun  * ion_client_create() -  allocate a client and returns it
107*4882a593Smuzhiyun  * @dev:    the global ion device
108*4882a593Smuzhiyun  * @heap_mask:  mask of heaps this client can allocate from
109*4882a593Smuzhiyun  * @name:   used for debugging
110*4882a593Smuzhiyun  */
111*4882a593Smuzhiyun struct ion_client *ion_client_create(struct ion_device *dev,
112*4882a593Smuzhiyun                                      unsigned int heap_mask, const char *name);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * ion_client_destroy() -  free's a client and all it's handles
116*4882a593Smuzhiyun  * @client: the client
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * Free the provided client and all it's resources including
119*4882a593Smuzhiyun  * any handles it is holding.
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun void ion_client_destroy(struct ion_client *client);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun  * ion_alloc - allocate ion memory
125*4882a593Smuzhiyun  * @client: the client
126*4882a593Smuzhiyun  * @len:    size of the allocation
127*4882a593Smuzhiyun  * @align:  requested allocation alignment, lots of hardware blocks have
128*4882a593Smuzhiyun  *      alignment requirements of some kind
129*4882a593Smuzhiyun  * @flags:  mask of heaps to allocate from, if multiple bits are set
130*4882a593Smuzhiyun  *      heaps will be tried in order from lowest to highest order bit
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * Allocate memory in one of the heaps provided in heap mask and return
133*4882a593Smuzhiyun  * an opaque handle to it.
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
136*4882a593Smuzhiyun                              size_t align, unsigned int flags);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * ion_free - free a handle
140*4882a593Smuzhiyun  * @client: the client
141*4882a593Smuzhiyun  * @handle: the handle to free
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * Free the provided handle.
144*4882a593Smuzhiyun  */
145*4882a593Smuzhiyun void ion_free(struct ion_client *client, struct ion_handle *handle);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun  * ion_phys - returns the physical address and len of a handle
149*4882a593Smuzhiyun  * @client: the client
150*4882a593Smuzhiyun  * @handle: the handle
151*4882a593Smuzhiyun  * @addr:   a pointer to put the address in
152*4882a593Smuzhiyun  * @len:    a pointer to put the length in
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * This function queries the heap for a particular handle to get the
155*4882a593Smuzhiyun  * handle's physical address.  It't output is only correct if
156*4882a593Smuzhiyun  * a heap returns physically contiguous memory -- in other cases
157*4882a593Smuzhiyun  * this api should not be implemented -- ion_map_dma should be used
158*4882a593Smuzhiyun  * instead.  Returns -EINVAL if the handle is invalid.  This has
159*4882a593Smuzhiyun  * no implications on the reference counting of the handle --
160*4882a593Smuzhiyun  * the returned value may not be valid if the caller is not
161*4882a593Smuzhiyun  * holding a reference.
162*4882a593Smuzhiyun  */
163*4882a593Smuzhiyun int ion_phys(struct ion_client *client, struct ion_handle *handle,
164*4882a593Smuzhiyun              ion_phys_addr_t *addr, size_t *len);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * ion_map_kernel - create mapping for the given handle
168*4882a593Smuzhiyun  * @client: the client
169*4882a593Smuzhiyun  * @handle: handle to map
170*4882a593Smuzhiyun  *
171*4882a593Smuzhiyun  * Map the given handle into the kernel and return a kernel address that
172*4882a593Smuzhiyun  * can be used to access this address.
173*4882a593Smuzhiyun  */
174*4882a593Smuzhiyun void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun  * ion_unmap_kernel() - destroy a kernel mapping for a handle
178*4882a593Smuzhiyun  * @client: the client
179*4882a593Smuzhiyun  * @handle: handle to unmap
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun  * ion_map_dma - create a dma mapping for a given handle
185*4882a593Smuzhiyun  * @client: the client
186*4882a593Smuzhiyun  * @handle: handle to map
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * Return an sglist describing the given handle
189*4882a593Smuzhiyun  */
190*4882a593Smuzhiyun struct scatterlist *ion_map_dma(struct ion_client *client,
191*4882a593Smuzhiyun                                 struct ion_handle *handle);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /**
194*4882a593Smuzhiyun  * ion_unmap_dma() - destroy a dma mapping for a handle
195*4882a593Smuzhiyun  * @client: the client
196*4882a593Smuzhiyun  * @handle: handle to unmap
197*4882a593Smuzhiyun  */
198*4882a593Smuzhiyun void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  * ion_share() - given a handle, obtain a buffer to pass to other clients
202*4882a593Smuzhiyun  * @client: the client
203*4882a593Smuzhiyun  * @handle: the handle to share
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Given a handle, return a buffer, which exists in a global name
206*4882a593Smuzhiyun  * space, and can be passed to other clients.  Should be passed into ion_import
207*4882a593Smuzhiyun  * to obtain a new handle for this buffer.
208*4882a593Smuzhiyun  *
209*4882a593Smuzhiyun  * NOTE: This function does do not an extra reference.  The burden is on the
210*4882a593Smuzhiyun  * caller to make sure the buffer doesn't go away while it's being passed to
211*4882a593Smuzhiyun  * another client.  That is, ion_free should not be called on this handle until
212*4882a593Smuzhiyun  * the buffer has been imported into the other client.
213*4882a593Smuzhiyun  */
214*4882a593Smuzhiyun struct ion_buffer *ion_share(struct ion_client *client,
215*4882a593Smuzhiyun                              struct ion_handle *handle);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun  * ion_import() - given an buffer in another client, import it
219*4882a593Smuzhiyun  * @client: this blocks client
220*4882a593Smuzhiyun  * @buffer: the buffer to import (as obtained from ion_share)
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  * Given a buffer, add it to the client and return the handle to use to refer
223*4882a593Smuzhiyun  * to it further.  This is called to share a handle from one kernel client to
224*4882a593Smuzhiyun  * another.
225*4882a593Smuzhiyun  */
226*4882a593Smuzhiyun struct ion_handle *ion_import(struct ion_client *client,
227*4882a593Smuzhiyun                               struct ion_buffer *buffer);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun  * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
231*4882a593Smuzhiyun  * @client: this blocks client
232*4882a593Smuzhiyun  * @fd:     the fd
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  * A helper function for drivers that will be recieving ion buffers shared
235*4882a593Smuzhiyun  * with them from userspace.  These buffers are represented by a file
236*4882a593Smuzhiyun  * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
237*4882a593Smuzhiyun  * This function coverts that fd into the underlying buffer, and returns
238*4882a593Smuzhiyun  * the handle to use to refer to it further.
239*4882a593Smuzhiyun  */
240*4882a593Smuzhiyun struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
241*4882a593Smuzhiyun #endif /* __KERNEL__ */
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun  * DOC: Ion Userspace API
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * create a client by opening /dev/ion
247*4882a593Smuzhiyun  * most operations handled via following ioctls
248*4882a593Smuzhiyun  *
249*4882a593Smuzhiyun  */
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /**
252*4882a593Smuzhiyun  * struct ion_allocation_data - metadata passed from userspace for allocations
253*4882a593Smuzhiyun  * @len:    size of the allocation
254*4882a593Smuzhiyun  * @align:  required alignment of the allocation
255*4882a593Smuzhiyun  * @flags:  flags passed to heap
256*4882a593Smuzhiyun  * @handle: pointer that will be populated with a cookie to use to refer
257*4882a593Smuzhiyun  *      to this allocation
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * Provided by userspace as an argument to the ioctl
260*4882a593Smuzhiyun  */
261*4882a593Smuzhiyun struct ion_allocation_data {
262*4882a593Smuzhiyun     size_t len;
263*4882a593Smuzhiyun     size_t align;
264*4882a593Smuzhiyun     unsigned int heap_id_mask;
265*4882a593Smuzhiyun     unsigned int flags;
266*4882a593Smuzhiyun     ion_user_handle_t handle;
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun  * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
271*4882a593Smuzhiyun  * @handle: a handle
272*4882a593Smuzhiyun  * @fd:     a file descriptor representing that handle
273*4882a593Smuzhiyun  *
274*4882a593Smuzhiyun  * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
275*4882a593Smuzhiyun  * the handle returned from ion alloc, and the kernel returns the file
276*4882a593Smuzhiyun  * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
277*4882a593Smuzhiyun  * provides the file descriptor and the kernel returns the handle.
278*4882a593Smuzhiyun  */
279*4882a593Smuzhiyun struct ion_fd_data {
280*4882a593Smuzhiyun     ion_user_handle_t handle;
281*4882a593Smuzhiyun     int fd;
282*4882a593Smuzhiyun };
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /**
285*4882a593Smuzhiyun  * struct ion_handle_data - a handle passed to/from the kernel
286*4882a593Smuzhiyun  * @handle: a handle
287*4882a593Smuzhiyun  */
288*4882a593Smuzhiyun struct ion_handle_data {
289*4882a593Smuzhiyun     ion_user_handle_t handle;
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun  * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
294*4882a593Smuzhiyun  * @cmd:    the custom ioctl function to call
295*4882a593Smuzhiyun  * @arg:    additional data to pass to the custom ioctl, typically a user
296*4882a593Smuzhiyun  *      pointer to a predefined structure
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * This works just like the regular cmd and arg fields of an ioctl.
299*4882a593Smuzhiyun  */
300*4882a593Smuzhiyun struct ion_custom_data {
301*4882a593Smuzhiyun     unsigned int cmd;
302*4882a593Smuzhiyun     unsigned long arg;
303*4882a593Smuzhiyun };
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun /* struct ion_flush_data - data passed to ion for flushing caches
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * @handle: handle with data to flush
308*4882a593Smuzhiyun  * @fd:     fd to flush
309*4882a593Smuzhiyun  * @vaddr:  userspace virtual address mapped with mmap
310*4882a593Smuzhiyun  * @offset: offset into the handle to flush
311*4882a593Smuzhiyun  * @length: length of handle to flush
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Performs cache operations on the handle. If p is the start address
314*4882a593Smuzhiyun  * of the handle, p + offset through p + offset + length will have
315*4882a593Smuzhiyun  * the cache operations performed
316*4882a593Smuzhiyun  */
317*4882a593Smuzhiyun struct ion_flush_data {
318*4882a593Smuzhiyun     //struct ion_handle *handle;
319*4882a593Smuzhiyun     ion_user_handle_t handle;
320*4882a593Smuzhiyun     int fd;
321*4882a593Smuzhiyun     void *vaddr;
322*4882a593Smuzhiyun     unsigned int offset;
323*4882a593Smuzhiyun     unsigned int length;
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun /// no available in new ion-kernel
327*4882a593Smuzhiyun struct ion_phys_data {
328*4882a593Smuzhiyun     //struct ion_handle *handle;
329*4882a593Smuzhiyun     ion_user_handle_t handle;
330*4882a593Smuzhiyun     unsigned long phys;
331*4882a593Smuzhiyun     unsigned long size;
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun struct ion_cacheop_data {
335*4882a593Smuzhiyun #define ION_CACHE_FLUSH     0
336*4882a593Smuzhiyun #define ION_CACHE_CLEAN     1
337*4882a593Smuzhiyun #define ION_CACHE_INV       2
338*4882a593Smuzhiyun     unsigned int type;
339*4882a593Smuzhiyun     struct ion_handle *handle;
340*4882a593Smuzhiyun     void *virt;
341*4882a593Smuzhiyun };
342*4882a593Smuzhiyun struct ion_buffer_info {
343*4882a593Smuzhiyun     unsigned long phys;
344*4882a593Smuzhiyun     unsigned long size;
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun struct ion_client_info {
347*4882a593Smuzhiyun #define MAX_BUFFER_COUNT    127
348*4882a593Smuzhiyun     unsigned int count;
349*4882a593Smuzhiyun     unsigned long total_size;
350*4882a593Smuzhiyun     struct ion_buffer_info buf[MAX_BUFFER_COUNT];
351*4882a593Smuzhiyun };
352*4882a593Smuzhiyun struct ion_heap_info {
353*4882a593Smuzhiyun     unsigned int id;
354*4882a593Smuzhiyun     unsigned long allocated_size;
355*4882a593Smuzhiyun     unsigned long max_allocated;
356*4882a593Smuzhiyun     unsigned long total_size;
357*4882a593Smuzhiyun };
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun struct ion_share_obj_data {
360*4882a593Smuzhiyun     int fd;
361*4882a593Smuzhiyun     void *obj;
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun ///////////////////////////////////////////////////
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun #define ION_IOC_MAGIC       'I'
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun  * DOC: ION_IOC_ALLOC - allocate memory
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  * Takes an ion_allocation_data struct and returns it with the handle field
371*4882a593Smuzhiyun  * populated with the opaque handle for the allocation.
372*4882a593Smuzhiyun  */
373*4882a593Smuzhiyun #define ION_IOC_ALLOC       _IOWR(ION_IOC_MAGIC, 0, \
374*4882a593Smuzhiyun                       struct ion_allocation_data)
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun  * DOC: ION_IOC_FREE - free memory
378*4882a593Smuzhiyun  *
379*4882a593Smuzhiyun  * Takes an ion_handle_data struct and frees the handle.
380*4882a593Smuzhiyun  */
381*4882a593Smuzhiyun #define ION_IOC_FREE        _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun  * DOC: ION_IOC_MAP - get a file descriptor to mmap
385*4882a593Smuzhiyun  *
386*4882a593Smuzhiyun  * Takes an ion_fd_data struct with the handle field populated with a valid
387*4882a593Smuzhiyun  * opaque handle.  Returns the struct with the fd field set to a file
388*4882a593Smuzhiyun  * descriptor open in the current address space.  This file descriptor
389*4882a593Smuzhiyun  * can then be used as an argument to mmap.
390*4882a593Smuzhiyun  */
391*4882a593Smuzhiyun #define ION_IOC_MAP     _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun  * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
395*4882a593Smuzhiyun  *
396*4882a593Smuzhiyun  * Takes an ion_fd_data struct with the handle field populated with a valid
397*4882a593Smuzhiyun  * opaque handle.  Returns the struct with the fd field set to a file
398*4882a593Smuzhiyun  * descriptor open in the current address space.  This file descriptor
399*4882a593Smuzhiyun  * can then be passed to another process.  The corresponding opaque handle can
400*4882a593Smuzhiyun  * be retrieved via ION_IOC_IMPORT.
401*4882a593Smuzhiyun  */
402*4882a593Smuzhiyun #define ION_IOC_SHARE       _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun  * DOC: ION_IOC_IMPORT - imports a shared file descriptor
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * Takes an ion_fd_data struct with the fd field populated with a valid file
408*4882a593Smuzhiyun  * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
409*4882a593Smuzhiyun  * filed set to the corresponding opaque handle.
410*4882a593Smuzhiyun  */
411*4882a593Smuzhiyun #define ION_IOC_IMPORT      _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /**
414*4882a593Smuzhiyun  * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * Deprecated in favor of using the dma_buf api's correctly (syncing
417*4882a593Smuzhiyun  * will happend automatically when the buffer is mapped to a device).
418*4882a593Smuzhiyun  * If necessary should be used after touching a cached buffer from the cpu,
419*4882a593Smuzhiyun  * this will make the buffer in memory coherent.
420*4882a593Smuzhiyun  */
421*4882a593Smuzhiyun #define ION_IOC_SYNC        _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun  * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
425*4882a593Smuzhiyun  *
426*4882a593Smuzhiyun  * Takes the argument of the architecture specific ioctl to call and
427*4882a593Smuzhiyun  * passes appropriate userdata for that ioctl
428*4882a593Smuzhiyun  */
429*4882a593Smuzhiyun #define ION_IOC_CUSTOM          _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun #if 1
433*4882a593Smuzhiyun #define ION_IOC_ROCKCHIP_MAGIC 'R'
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun  * Clean the caches of the handle specified.
437*4882a593Smuzhiyun  */
438*4882a593Smuzhiyun #define ION_IOC_CLEAN_CACHES    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 0, \
439*4882a593Smuzhiyun                         struct ion_flush_data)
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun  * Invalidate the caches of the handle specified.
442*4882a593Smuzhiyun  */
443*4882a593Smuzhiyun #define ION_IOC_INV_CACHES  _IOWR(ION_IOC_ROCKCHIP_MAGIC, 1, \
444*4882a593Smuzhiyun                         struct ion_flush_data)
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun  * Clean and invalidate the caches of the handle specified.
447*4882a593Smuzhiyun  */
448*4882a593Smuzhiyun #define ION_IOC_CLEAN_INV_CACHES    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 2, \
449*4882a593Smuzhiyun                         struct ion_flush_data)
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * Get phys addr of the handle specified.
453*4882a593Smuzhiyun  */
454*4882a593Smuzhiyun #define ION_IOC_GET_PHYS    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 3, \
455*4882a593Smuzhiyun                         struct ion_phys_data)
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun #define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 0,   struct ion_flush_data)
458*4882a593Smuzhiyun #define ION_IOC_INV_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 1,   struct ion_flush_data)
459*4882a593Smuzhiyun /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
460*4882a593Smuzhiyun #define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 2,   struct ion_flush_data)
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun #define ION_IOC_GET_SHARE _IOWR(ION_IOC_ROCKCHIP_MAGIC, 4, struct ion_share_obj_data)
463*4882a593Smuzhiyun #define ION_IOC_SET_SHARE _IOWR(ION_IOC_ROCKCHIP_MAGIC, 5, struct ion_share_obj_data)
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun #else
466*4882a593Smuzhiyun /// no available in new ion-kernel.
467*4882a593Smuzhiyun #define ION_CUSTOM_GET_PHYS     _IOWR(ION_IOC_MAGIC, 15, \
468*4882a593Smuzhiyun                             struct ion_phys_data)
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun #define ION_CUSTOM_CACHE_OP     _IOWR(ION_IOC_MAGIC, 8, \
471*4882a593Smuzhiyun                             struct ion_cacheop_data)
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun #define ION_CUSTOM_GET_CLIENT_INFO  _IOWR(ION_IOC_MAGIC, 9, \
474*4882a593Smuzhiyun                             struct ion_client_info)
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #define ION_CUSTOM_GET_HEAP_INFO    _IOWR(ION_IOC_MAGIC, 10, \
477*4882a593Smuzhiyun                             struct ion_heap_info)
478*4882a593Smuzhiyun /* Compatible with pmem */
479*4882a593Smuzhiyun struct ion_pmem_region {
480*4882a593Smuzhiyun     unsigned long offset;
481*4882a593Smuzhiyun     unsigned long len;
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun #define ION_PMEM_GET_PHYS       _IOW('p', 1, unsigned int)
484*4882a593Smuzhiyun #define ION_PMEM_CACHE_FLUSH        _IOW('p', 8, unsigned int)
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun ///////////////////////////////////////////
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun #endif /* _LINUX_ION_H */
489