1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ION Memory Allocator
4 *
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
7 *
8 */
9
10 #include <linux/bitmap.h>
11 #include <linux/debugfs.h>
12 #include <linux/device.h>
13 #include <linux/dma-buf.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/file.h>
17 #include <linux/freezer.h>
18 #include <linux/fs.h>
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/mm_types.h>
23 #include <linux/rbtree.h>
24 #include <linux/sched/task.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27
28 #include "ion_private.h"
29
30 #define ION_CURRENT_ABI_VERSION 2
31
32 static struct ion_device *internal_dev;
33
34 /* Entry into ION allocator for rest of the kernel */
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)35 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
36 unsigned int flags)
37 {
38 return ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
39 }
40 EXPORT_SYMBOL_GPL(ion_alloc);
41
ion_free(struct ion_buffer * buffer)42 int ion_free(struct ion_buffer *buffer)
43 {
44 return ion_buffer_destroy(internal_dev, buffer);
45 }
46 EXPORT_SYMBOL_GPL(ion_free);
47
ion_alloc_fd(size_t len,unsigned int heap_id_mask,unsigned int flags)48 static int ion_alloc_fd(size_t len, unsigned int heap_id_mask,
49 unsigned int flags)
50 {
51 int fd;
52 struct dma_buf *dmabuf;
53
54 dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
55 if (IS_ERR(dmabuf))
56 return PTR_ERR(dmabuf);
57
58 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
59 if (fd < 0)
60 dma_buf_put(dmabuf);
61
62 return fd;
63 }
64
ion_query_heaps_kernel(struct ion_heap_data * hdata,size_t size)65 size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size)
66 {
67 struct ion_device *dev = internal_dev;
68 size_t i = 0, num_heaps = 0;
69 struct ion_heap *heap;
70
71 down_read(&dev->lock);
72
73 // If size is 0, return without updating hdata.
74 if (size == 0) {
75 num_heaps = dev->heap_cnt;
76 goto out;
77 }
78
79 plist_for_each_entry(heap, &dev->heaps, node) {
80 strncpy(hdata[i].name, heap->name, MAX_HEAP_NAME);
81 hdata[i].name[MAX_HEAP_NAME - 1] = '\0';
82 hdata[i].type = heap->type;
83 hdata[i].heap_id = heap->id;
84
85 i++;
86 if (i >= size)
87 break;
88 }
89
90 num_heaps = i;
91 out:
92 up_read(&dev->lock);
93 return num_heaps;
94 }
95 EXPORT_SYMBOL_GPL(ion_query_heaps_kernel);
96
ion_query_heaps(struct ion_heap_query * query)97 static int ion_query_heaps(struct ion_heap_query *query)
98 {
99 struct ion_device *dev = internal_dev;
100 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
101 int ret = -EINVAL, cnt = 0, max_cnt;
102 struct ion_heap *heap;
103 struct ion_heap_data hdata;
104
105 memset(&hdata, 0, sizeof(hdata));
106
107 down_read(&dev->lock);
108 if (!buffer) {
109 query->cnt = dev->heap_cnt;
110 ret = 0;
111 goto out;
112 }
113
114 if (query->cnt <= 0)
115 goto out;
116
117 max_cnt = query->cnt;
118
119 plist_for_each_entry(heap, &dev->heaps, node) {
120 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
121 hdata.name[sizeof(hdata.name) - 1] = '\0';
122 hdata.type = heap->type;
123 hdata.heap_id = heap->id;
124
125 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
126 ret = -EFAULT;
127 goto out;
128 }
129
130 cnt++;
131 if (cnt >= max_cnt)
132 break;
133 }
134
135 query->cnt = cnt;
136 ret = 0;
137 out:
138 up_read(&dev->lock);
139 return ret;
140 }
141
142 union ion_ioctl_arg {
143 struct ion_allocation_data allocation;
144 struct ion_heap_query query;
145 u32 ion_abi_version;
146 };
147
validate_ioctl_arg(unsigned int cmd,union ion_ioctl_arg * arg)148 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
149 {
150 switch (cmd) {
151 case ION_IOC_HEAP_QUERY:
152 if (arg->query.reserved0 ||
153 arg->query.reserved1 ||
154 arg->query.reserved2)
155 return -EINVAL;
156 break;
157 default:
158 break;
159 }
160
161 return 0;
162 }
163
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)164 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
165 {
166 int ret = 0;
167 union ion_ioctl_arg data;
168
169 if (_IOC_SIZE(cmd) > sizeof(data))
170 return -EINVAL;
171
172 /*
173 * The copy_from_user is unconditional here for both read and write
174 * to do the validate. If there is no write for the ioctl, the
175 * buffer is cleared
176 */
177 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
178 return -EFAULT;
179
180 ret = validate_ioctl_arg(cmd, &data);
181 if (ret) {
182 pr_warn_once("%s: ioctl validate failed\n", __func__);
183 return ret;
184 }
185
186 if (!(_IOC_DIR(cmd) & _IOC_WRITE))
187 memset(&data, 0, sizeof(data));
188
189 switch (cmd) {
190 case ION_IOC_ALLOC:
191 {
192 int fd;
193
194 fd = ion_alloc_fd(data.allocation.len,
195 data.allocation.heap_id_mask,
196 data.allocation.flags);
197 if (fd < 0)
198 return fd;
199
200 data.allocation.fd = fd;
201
202 break;
203 }
204 case ION_IOC_HEAP_QUERY:
205 ret = ion_query_heaps(&data.query);
206 break;
207 case ION_IOC_ABI_VERSION:
208 data.ion_abi_version = ION_CURRENT_ABI_VERSION;
209 break;
210 default:
211 return -ENOTTY;
212 }
213
214 if (_IOC_DIR(cmd) & _IOC_READ) {
215 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
216 return -EFAULT;
217 }
218 return ret;
219 }
220
221 static const struct file_operations ion_fops = {
222 .owner = THIS_MODULE,
223 .unlocked_ioctl = ion_ioctl,
224 .compat_ioctl = compat_ptr_ioctl,
225 };
226
debug_shrink_set(void * data,u64 val)227 static int debug_shrink_set(void *data, u64 val)
228 {
229 struct ion_heap *heap = data;
230 struct shrink_control sc;
231 int objs;
232
233 sc.gfp_mask = GFP_HIGHUSER;
234 sc.nr_to_scan = val;
235
236 if (!val) {
237 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
238 sc.nr_to_scan = objs;
239 }
240
241 heap->shrinker.scan_objects(&heap->shrinker, &sc);
242 return 0;
243 }
244
debug_shrink_get(void * data,u64 * val)245 static int debug_shrink_get(void *data, u64 *val)
246 {
247 struct ion_heap *heap = data;
248 struct shrink_control sc;
249 int objs;
250
251 sc.gfp_mask = GFP_HIGHUSER;
252 sc.nr_to_scan = 0;
253
254 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
255 *val = objs;
256 return 0;
257 }
258
259 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
260 debug_shrink_set, "%llu\n");
261
ion_assign_heap_id(struct ion_heap * heap,struct ion_device * dev)262 static int ion_assign_heap_id(struct ion_heap *heap, struct ion_device *dev)
263 {
264 int id_bit = -EINVAL;
265 int start_bit = -1, end_bit = -1;
266
267 switch (heap->type) {
268 case ION_HEAP_TYPE_SYSTEM:
269 id_bit = __ffs(ION_HEAP_SYSTEM);
270 break;
271 case ION_HEAP_TYPE_DMA:
272 start_bit = __ffs(ION_HEAP_DMA_START);
273 end_bit = __ffs(ION_HEAP_DMA_END);
274 break;
275 case ION_HEAP_TYPE_CUSTOM ... ION_HEAP_TYPE_MAX:
276 start_bit = __ffs(ION_HEAP_CUSTOM_START);
277 end_bit = __ffs(ION_HEAP_CUSTOM_END);
278 break;
279 default:
280 return -EINVAL;
281 }
282
283 /* For carveout, dma & custom heaps, we first let the heaps choose their
284 * own IDs. This allows the old behaviour of knowing the heap ids
285 * of these type of heaps in advance in user space. If a heap with
286 * that ID already exists, it is an error.
287 *
288 * If the heap hasn't picked an id by itself, then we assign it
289 * one.
290 */
291 if (id_bit < 0) {
292 if (heap->id) {
293 id_bit = __ffs(heap->id);
294 if (id_bit < start_bit || id_bit > end_bit)
295 return -EINVAL;
296 } else {
297 id_bit = find_next_zero_bit(dev->heap_ids, end_bit + 1,
298 start_bit);
299 if (id_bit > end_bit)
300 return -ENOSPC;
301 }
302 }
303
304 if (test_and_set_bit(id_bit, dev->heap_ids))
305 return -EEXIST;
306 heap->id = id_bit;
307 dev->heap_cnt++;
308
309 return 0;
310 }
311
__ion_device_add_heap(struct ion_heap * heap,struct module * owner)312 int __ion_device_add_heap(struct ion_heap *heap, struct module *owner)
313 {
314 struct ion_device *dev = internal_dev;
315 int ret;
316 struct dentry *heap_root;
317 char debug_name[64];
318
319 if (!heap || !heap->ops || !heap->ops->allocate || !heap->ops->free) {
320 pr_err("%s: invalid heap or heap_ops\n", __func__);
321 ret = -EINVAL;
322 goto out;
323 }
324
325 heap->owner = owner;
326 spin_lock_init(&heap->free_lock);
327 spin_lock_init(&heap->stat_lock);
328 heap->free_list_size = 0;
329
330 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
331 ret = ion_heap_init_deferred_free(heap);
332 if (ret)
333 goto out_heap_cleanup;
334 }
335
336 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
337 ret = ion_heap_init_shrinker(heap);
338 if (ret) {
339 pr_err("%s: Failed to register shrinker\n", __func__);
340 goto out_heap_cleanup;
341 }
342 }
343
344 heap->num_of_buffers = 0;
345 heap->num_of_alloc_bytes = 0;
346 heap->alloc_bytes_wm = 0;
347
348 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
349 debugfs_create_u64("num_of_buffers",
350 0444, heap_root,
351 &heap->num_of_buffers);
352 debugfs_create_u64("num_of_alloc_bytes",
353 0444,
354 heap_root,
355 &heap->num_of_alloc_bytes);
356 debugfs_create_u64("alloc_bytes_wm",
357 0444,
358 heap_root,
359 &heap->alloc_bytes_wm);
360
361 if (heap->shrinker.count_objects &&
362 heap->shrinker.scan_objects) {
363 snprintf(debug_name, 64, "%s_shrink", heap->name);
364 debugfs_create_file(debug_name,
365 0644,
366 heap_root,
367 heap,
368 &debug_shrink_fops);
369 }
370
371 heap->debugfs_dir = heap_root;
372 down_write(&dev->lock);
373 ret = ion_assign_heap_id(heap, dev);
374 if (ret) {
375 pr_err("%s: Failed to assign heap id for heap type %x\n",
376 __func__, heap->type);
377 up_write(&dev->lock);
378 goto out_debugfs_cleanup;
379 }
380
381 /*
382 * use negative heap->id to reverse the priority -- when traversing
383 * the list later attempt higher id numbers first
384 */
385 plist_node_init(&heap->node, -heap->id);
386 plist_add(&heap->node, &dev->heaps);
387
388 up_write(&dev->lock);
389
390 return 0;
391
392 out_debugfs_cleanup:
393 debugfs_remove_recursive(heap->debugfs_dir);
394 out_heap_cleanup:
395 ion_heap_cleanup(heap);
396 out:
397 return ret;
398 }
399 EXPORT_SYMBOL_GPL(__ion_device_add_heap);
400
ion_device_remove_heap(struct ion_heap * heap)401 void ion_device_remove_heap(struct ion_heap *heap)
402 {
403 struct ion_device *dev = internal_dev;
404
405 if (!heap) {
406 pr_err("%s: Invalid argument\n", __func__);
407 return;
408 }
409
410 // take semaphore and remove the heap from dev->heap list
411 down_write(&dev->lock);
412 /* So no new allocations can happen from this heap */
413 plist_del(&heap->node, &dev->heaps);
414 if (ion_heap_cleanup(heap) != 0) {
415 pr_warn("%s: failed to cleanup heap (%s)\n",
416 __func__, heap->name);
417 }
418 debugfs_remove_recursive(heap->debugfs_dir);
419 clear_bit(heap->id, dev->heap_ids);
420 dev->heap_cnt--;
421 up_write(&dev->lock);
422 }
423 EXPORT_SYMBOL_GPL(ion_device_remove_heap);
424
425 static ssize_t
total_heaps_kb_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)426 total_heaps_kb_show(struct kobject *kobj, struct kobj_attribute *attr,
427 char *buf)
428 {
429 return sprintf(buf, "%llu\n",
430 div_u64(ion_get_total_heap_bytes(), 1024));
431 }
432
433 static ssize_t
total_pools_kb_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)434 total_pools_kb_show(struct kobject *kobj, struct kobj_attribute *attr,
435 char *buf)
436 {
437 struct ion_device *dev = internal_dev;
438 struct ion_heap *heap;
439 u64 total_pages = 0;
440
441 down_read(&dev->lock);
442 plist_for_each_entry(heap, &dev->heaps, node)
443 if (heap->ops->get_pool_size)
444 total_pages += heap->ops->get_pool_size(heap);
445 up_read(&dev->lock);
446
447 return sprintf(buf, "%llu\n", total_pages * (PAGE_SIZE / 1024));
448 }
449
450 static struct kobj_attribute total_heaps_kb_attr =
451 __ATTR_RO(total_heaps_kb);
452
453 static struct kobj_attribute total_pools_kb_attr =
454 __ATTR_RO(total_pools_kb);
455
456 static struct attribute *ion_device_attrs[] = {
457 &total_heaps_kb_attr.attr,
458 &total_pools_kb_attr.attr,
459 NULL,
460 };
461
462 ATTRIBUTE_GROUPS(ion_device);
463
ion_init_sysfs(void)464 static int ion_init_sysfs(void)
465 {
466 struct kobject *ion_kobj;
467 int ret;
468
469 ion_kobj = kobject_create_and_add("ion", kernel_kobj);
470 if (!ion_kobj)
471 return -ENOMEM;
472
473 ret = sysfs_create_groups(ion_kobj, ion_device_groups);
474 if (ret) {
475 kobject_put(ion_kobj);
476 return ret;
477 }
478
479 return 0;
480 }
481
ion_device_create(void)482 static int ion_device_create(void)
483 {
484 struct ion_device *idev;
485 int ret;
486
487 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
488 if (!idev)
489 return -ENOMEM;
490
491 idev->dev.minor = MISC_DYNAMIC_MINOR;
492 idev->dev.name = "ion";
493 idev->dev.fops = &ion_fops;
494 idev->dev.parent = NULL;
495 ret = misc_register(&idev->dev);
496 if (ret) {
497 pr_err("ion: failed to register misc device.\n");
498 goto err_reg;
499 }
500
501 ret = ion_init_sysfs();
502 if (ret) {
503 pr_err("ion: failed to add sysfs attributes.\n");
504 goto err_sysfs;
505 }
506
507 idev->debug_root = debugfs_create_dir("ion", NULL);
508 init_rwsem(&idev->lock);
509 plist_head_init(&idev->heaps);
510 internal_dev = idev;
511 return 0;
512
513 err_sysfs:
514 misc_deregister(&idev->dev);
515 err_reg:
516 kfree(idev);
517 return ret;
518 }
519 subsys_initcall(ion_device_create);
520