xref: /rockchip-linux_mpp/osal/allocator/ion.h (revision 437bfbeb9567cca9cd9080e3f6954aa9d6a94f18)
1 /*
2  * include/linux/ion.h
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #ifndef _LINUX_ION_H
18 #define _LINUX_ION_H
19 
20 #include <linux/types.h>
21 #define ION_VERSION     "1.0"
22 
23 typedef int ion_user_handle_t;
24 
25 /**
26  * enum ion_heap_types - list of all possible types of heaps
27  * @ION_HEAP_TYPE_SYSTEM:    memory allocated via vmalloc
28  * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
29  * @ION_HEAP_TYPE_CARVEOUT:  memory allocated from a prereserved
30  *               carveout heap, allocations are physically
31  *               contiguous
32  * @ION_HEAP_TYPE_DMA:       memory allocated via DMA API
33  * @ION_NUM_HEAPS:       helper for iterating over heaps, a bit mask
34  *               is used to identify the heaps, so only 32
35  *               total heap types are supported
36  */
37 enum ion_heap_type {
38     ION_HEAP_TYPE_SYSTEM,
39     ION_HEAP_TYPE_SYSTEM_CONTIG,
40     ION_HEAP_TYPE_CARVEOUT,
41     ION_HEAP_TYPE_CHUNK,
42     ION_HEAP_TYPE_DMA,
43     ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
44                  are at the end of this enum */
45     ION_NUM_HEAPS = 16,
46 };
47 enum ion_heap_ids {
48     ION_NOR_HEAP_ID = 0,
49     ION_CMA_HEAP_ID = 1,
50 
51     ION_VPU_ID = 16,
52     ION_CAM_ID = 17,
53     ION_UI_ID = 18,
54 };
55 
56 #define ION_HEAP_SYSTEM_MASK        (1 << ION_HEAP_TYPE_SYSTEM)
57 #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
58 #define ION_HEAP_CARVEOUT_MASK      (1 << ION_HEAP_TYPE_CARVEOUT)
59 #define ION_HEAP_TYPE_DMA_MASK      (1 << ION_HEAP_TYPE_DMA)
60 
61 #ifdef __KERNEL__
62 struct ion_device;
63 struct ion_heap;
64 struct ion_mapper;
65 struct ion_client;
66 struct ion_buffer;
67 
68 /* This should be removed some day when phys_addr_t's are fully
69    plumbed in the kernel, and all instances of ion_phys_addr_t should
70    be converted to phys_addr_t.  For the time being many kernel interfaces
71    do not accept phys_addr_t's that would have to */
72 #define ion_phys_addr_t unsigned long
73 
74 /**
75  * struct ion_platform_heap - defines a heap in the given platform
76  * @type:   type of the heap from ion_heap_type enum
77  * @id:     unique identifier for heap.  When allocating (lower numbers
78  *      will be allocated from first)
79  * @name:   used for debug purposes
80  * @base:   base address of heap in physical memory if applicable
81  * @size:   size of the heap in bytes if applicable
82  *
83  * Provided by the board file.
84  */
85 struct ion_platform_heap {
86     enum ion_heap_type type;
87     unsigned int id;
88     const char *name;
89     ion_phys_addr_t base;
90     size_t size;
91 };
92 
93 /**
94  * struct ion_platform_data - array of platform heaps passed from board file
95  * @nr:     number of structures in the array
96  * @heaps:  array of platform_heap structions
97  *
98  * Provided by the board file in the form of platform data to a platform device.
99  */
100 struct ion_platform_data {
101     int nr;
102     struct ion_platform_heap heaps[];
103 };
104 
105 /**
106  * ion_client_create() -  allocate a client and returns it
107  * @dev:    the global ion device
108  * @heap_mask:  mask of heaps this client can allocate from
109  * @name:   used for debugging
110  */
111 struct ion_client *ion_client_create(struct ion_device *dev,
112                                      unsigned int heap_mask, const char *name);
113 
114 /**
115  * ion_client_destroy() -  free's a client and all it's handles
116  * @client: the client
117  *
118  * Free the provided client and all it's resources including
119  * any handles it is holding.
120  */
121 void ion_client_destroy(struct ion_client *client);
122 
123 /**
124  * ion_alloc - allocate ion memory
125  * @client: the client
126  * @len:    size of the allocation
127  * @align:  requested allocation alignment, lots of hardware blocks have
128  *      alignment requirements of some kind
129  * @flags:  mask of heaps to allocate from, if multiple bits are set
130  *      heaps will be tried in order from lowest to highest order bit
131  *
132  * Allocate memory in one of the heaps provided in heap mask and return
133  * an opaque handle to it.
134  */
135 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
136                              size_t align, unsigned int flags);
137 
138 /**
139  * ion_free - free a handle
140  * @client: the client
141  * @handle: the handle to free
142  *
143  * Free the provided handle.
144  */
145 void ion_free(struct ion_client *client, struct ion_handle *handle);
146 
147 /**
148  * ion_phys - returns the physical address and len of a handle
149  * @client: the client
150  * @handle: the handle
151  * @addr:   a pointer to put the address in
152  * @len:    a pointer to put the length in
153  *
154  * This function queries the heap for a particular handle to get the
155  * handle's physical address.  It't output is only correct if
156  * a heap returns physically contiguous memory -- in other cases
157  * this api should not be implemented -- ion_map_dma should be used
158  * instead.  Returns -EINVAL if the handle is invalid.  This has
159  * no implications on the reference counting of the handle --
160  * the returned value may not be valid if the caller is not
161  * holding a reference.
162  */
163 int ion_phys(struct ion_client *client, struct ion_handle *handle,
164              ion_phys_addr_t *addr, size_t *len);
165 
166 /**
167  * ion_map_kernel - create mapping for the given handle
168  * @client: the client
169  * @handle: handle to map
170  *
171  * Map the given handle into the kernel and return a kernel address that
172  * can be used to access this address.
173  */
174 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
175 
176 /**
177  * ion_unmap_kernel() - destroy a kernel mapping for a handle
178  * @client: the client
179  * @handle: handle to unmap
180  */
181 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
182 
183 /**
184  * ion_map_dma - create a dma mapping for a given handle
185  * @client: the client
186  * @handle: handle to map
187  *
188  * Return an sglist describing the given handle
189  */
190 struct scatterlist *ion_map_dma(struct ion_client *client,
191                                 struct ion_handle *handle);
192 
193 /**
194  * ion_unmap_dma() - destroy a dma mapping for a handle
195  * @client: the client
196  * @handle: handle to unmap
197  */
198 void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
199 
200 /**
201  * ion_share() - given a handle, obtain a buffer to pass to other clients
202  * @client: the client
203  * @handle: the handle to share
204  *
205  * Given a handle, return a buffer, which exists in a global name
206  * space, and can be passed to other clients.  Should be passed into ion_import
207  * to obtain a new handle for this buffer.
208  *
209  * NOTE: This function does do not an extra reference.  The burden is on the
210  * caller to make sure the buffer doesn't go away while it's being passed to
211  * another client.  That is, ion_free should not be called on this handle until
212  * the buffer has been imported into the other client.
213  */
214 struct ion_buffer *ion_share(struct ion_client *client,
215                              struct ion_handle *handle);
216 
217 /**
218  * ion_import() - given an buffer in another client, import it
219  * @client: this blocks client
220  * @buffer: the buffer to import (as obtained from ion_share)
221  *
222  * Given a buffer, add it to the client and return the handle to use to refer
223  * to it further.  This is called to share a handle from one kernel client to
224  * another.
225  */
226 struct ion_handle *ion_import(struct ion_client *client,
227                               struct ion_buffer *buffer);
228 
229 /**
230  * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
231  * @client: this blocks client
232  * @fd:     the fd
233  *
234  * A helper function for drivers that will be recieving ion buffers shared
235  * with them from userspace.  These buffers are represented by a file
236  * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
237  * This function coverts that fd into the underlying buffer, and returns
238  * the handle to use to refer to it further.
239  */
240 struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
241 #endif /* __KERNEL__ */
242 
243 /**
244  * DOC: Ion Userspace API
245  *
246  * create a client by opening /dev/ion
247  * most operations handled via following ioctls
248  *
249  */
250 
251 /**
252  * struct ion_allocation_data - metadata passed from userspace for allocations
253  * @len:    size of the allocation
254  * @align:  required alignment of the allocation
255  * @flags:  flags passed to heap
256  * @handle: pointer that will be populated with a cookie to use to refer
257  *      to this allocation
258  *
259  * Provided by userspace as an argument to the ioctl
260  */
261 struct ion_allocation_data {
262     size_t len;
263     size_t align;
264     unsigned int heap_id_mask;
265     unsigned int flags;
266     ion_user_handle_t handle;
267 };
268 
269 /**
270  * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
271  * @handle: a handle
272  * @fd:     a file descriptor representing that handle
273  *
274  * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
275  * the handle returned from ion alloc, and the kernel returns the file
276  * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
277  * provides the file descriptor and the kernel returns the handle.
278  */
279 struct ion_fd_data {
280     ion_user_handle_t handle;
281     int fd;
282 };
283 
284 /**
285  * struct ion_handle_data - a handle passed to/from the kernel
286  * @handle: a handle
287  */
288 struct ion_handle_data {
289     ion_user_handle_t handle;
290 };
291 
292 /**
293  * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
294  * @cmd:    the custom ioctl function to call
295  * @arg:    additional data to pass to the custom ioctl, typically a user
296  *      pointer to a predefined structure
297  *
298  * This works just like the regular cmd and arg fields of an ioctl.
299  */
300 struct ion_custom_data {
301     unsigned int cmd;
302     unsigned long arg;
303 };
304 
305 /* struct ion_flush_data - data passed to ion for flushing caches
306  *
307  * @handle: handle with data to flush
308  * @fd:     fd to flush
309  * @vaddr:  userspace virtual address mapped with mmap
310  * @offset: offset into the handle to flush
311  * @length: length of handle to flush
312  *
313  * Performs cache operations on the handle. If p is the start address
314  * of the handle, p + offset through p + offset + length will have
315  * the cache operations performed
316  */
317 struct ion_flush_data {
318     //struct ion_handle *handle;
319     ion_user_handle_t handle;
320     int fd;
321     void *vaddr;
322     unsigned int offset;
323     unsigned int length;
324 };
325 
326 /// no available in new ion-kernel
327 struct ion_phys_data {
328     //struct ion_handle *handle;
329     ion_user_handle_t handle;
330     unsigned long phys;
331     unsigned long size;
332 };
333 
334 struct ion_cacheop_data {
335 #define ION_CACHE_FLUSH     0
336 #define ION_CACHE_CLEAN     1
337 #define ION_CACHE_INV       2
338     unsigned int type;
339     struct ion_handle *handle;
340     void *virt;
341 };
342 struct ion_buffer_info {
343     unsigned long phys;
344     unsigned long size;
345 };
346 struct ion_client_info {
347 #define MAX_BUFFER_COUNT    127
348     unsigned int count;
349     unsigned long total_size;
350     struct ion_buffer_info buf[MAX_BUFFER_COUNT];
351 };
352 struct ion_heap_info {
353     unsigned int id;
354     unsigned long allocated_size;
355     unsigned long max_allocated;
356     unsigned long total_size;
357 };
358 
359 struct ion_share_obj_data {
360     int fd;
361     void *obj;
362 };
363 ///////////////////////////////////////////////////
364 
365 #define ION_IOC_MAGIC       'I'
366 
367 /**
368  * DOC: ION_IOC_ALLOC - allocate memory
369  *
370  * Takes an ion_allocation_data struct and returns it with the handle field
371  * populated with the opaque handle for the allocation.
372  */
373 #define ION_IOC_ALLOC       _IOWR(ION_IOC_MAGIC, 0, \
374                       struct ion_allocation_data)
375 
376 /**
377  * DOC: ION_IOC_FREE - free memory
378  *
379  * Takes an ion_handle_data struct and frees the handle.
380  */
381 #define ION_IOC_FREE        _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
382 
383 /**
384  * DOC: ION_IOC_MAP - get a file descriptor to mmap
385  *
386  * Takes an ion_fd_data struct with the handle field populated with a valid
387  * opaque handle.  Returns the struct with the fd field set to a file
388  * descriptor open in the current address space.  This file descriptor
389  * can then be used as an argument to mmap.
390  */
391 #define ION_IOC_MAP     _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
392 
393 /**
394  * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
395  *
396  * Takes an ion_fd_data struct with the handle field populated with a valid
397  * opaque handle.  Returns the struct with the fd field set to a file
398  * descriptor open in the current address space.  This file descriptor
399  * can then be passed to another process.  The corresponding opaque handle can
400  * be retrieved via ION_IOC_IMPORT.
401  */
402 #define ION_IOC_SHARE       _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
403 
404 /**
405  * DOC: ION_IOC_IMPORT - imports a shared file descriptor
406  *
407  * Takes an ion_fd_data struct with the fd field populated with a valid file
408  * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
409  * filed set to the corresponding opaque handle.
410  */
411 #define ION_IOC_IMPORT      _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
412 
413 /**
414  * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
415  *
416  * Deprecated in favor of using the dma_buf api's correctly (syncing
417  * will happend automatically when the buffer is mapped to a device).
418  * If necessary should be used after touching a cached buffer from the cpu,
419  * this will make the buffer in memory coherent.
420  */
421 #define ION_IOC_SYNC        _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
422 
423 /**
424  * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
425  *
426  * Takes the argument of the architecture specific ioctl to call and
427  * passes appropriate userdata for that ioctl
428  */
429 #define ION_IOC_CUSTOM          _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
430 
431 
432 #if 1
433 #define ION_IOC_ROCKCHIP_MAGIC 'R'
434 
435 /**
436  * Clean the caches of the handle specified.
437  */
438 #define ION_IOC_CLEAN_CACHES    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 0, \
439                         struct ion_flush_data)
440 /**
441  * Invalidate the caches of the handle specified.
442  */
443 #define ION_IOC_INV_CACHES  _IOWR(ION_IOC_ROCKCHIP_MAGIC, 1, \
444                         struct ion_flush_data)
445 /**
446  * Clean and invalidate the caches of the handle specified.
447  */
448 #define ION_IOC_CLEAN_INV_CACHES    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 2, \
449                         struct ion_flush_data)
450 
451 /**
452  * Get phys addr of the handle specified.
453  */
454 #define ION_IOC_GET_PHYS    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 3, \
455                         struct ion_phys_data)
456 
457 #define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 0,   struct ion_flush_data)
458 #define ION_IOC_INV_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 1,   struct ion_flush_data)
459 /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
460 #define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 2,   struct ion_flush_data)
461 
462 #define ION_IOC_GET_SHARE _IOWR(ION_IOC_ROCKCHIP_MAGIC, 4, struct ion_share_obj_data)
463 #define ION_IOC_SET_SHARE _IOWR(ION_IOC_ROCKCHIP_MAGIC, 5, struct ion_share_obj_data)
464 
465 #else
466 /// no available in new ion-kernel.
467 #define ION_CUSTOM_GET_PHYS     _IOWR(ION_IOC_MAGIC, 15, \
468                             struct ion_phys_data)
469 
470 #define ION_CUSTOM_CACHE_OP     _IOWR(ION_IOC_MAGIC, 8, \
471                             struct ion_cacheop_data)
472 
473 #define ION_CUSTOM_GET_CLIENT_INFO  _IOWR(ION_IOC_MAGIC, 9, \
474                             struct ion_client_info)
475 
476 #define ION_CUSTOM_GET_HEAP_INFO    _IOWR(ION_IOC_MAGIC, 10, \
477                             struct ion_heap_info)
478 /* Compatible with pmem */
479 struct ion_pmem_region {
480     unsigned long offset;
481     unsigned long len;
482 };
483 #define ION_PMEM_GET_PHYS       _IOW('p', 1, unsigned int)
484 #define ION_PMEM_CACHE_FLUSH        _IOW('p', 8, unsigned int)
485 #endif
486 ///////////////////////////////////////////
487 
488 #endif /* _LINUX_ION_H */
489