1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ION Memory Allocator generic heap helpers
4 *
5 * Copyright (C) 2011 Google, Inc.
6 */
7
8 #include <linux/err.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
17
18 #include "ion_private.h"
19
ion_heap_shrink_count(struct shrinker * shrinker,struct shrink_control * sc)20 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
21 struct shrink_control *sc)
22 {
23 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
24 shrinker);
25 int total = 0;
26
27 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
28
29 if (heap->ops->shrink)
30 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
31
32 return total;
33 }
34
ion_heap_shrink_scan(struct shrinker * shrinker,struct shrink_control * sc)35 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
36 struct shrink_control *sc)
37 {
38 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
39 shrinker);
40 int freed = 0;
41 int to_scan = sc->nr_to_scan;
42
43 if (to_scan == 0)
44 return 0;
45
46 /*
47 * shrink the free list first, no point in zeroing the memory if we're
48 * just going to reclaim it. Also, skip any possible page pooling.
49 */
50 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
51 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
52 PAGE_SIZE;
53
54 to_scan -= freed;
55 if (to_scan <= 0)
56 return freed;
57
58 if (heap->ops->shrink)
59 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
60
61 return freed;
62 }
63
_ion_heap_freelist_drain(struct ion_heap * heap,size_t size,bool skip_pools)64 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
65 bool skip_pools)
66 {
67 struct ion_buffer *buffer;
68 size_t total_drained = 0;
69
70 if (ion_heap_freelist_size(heap) == 0)
71 return 0;
72
73 spin_lock(&heap->free_lock);
74 if (size == 0)
75 size = heap->free_list_size;
76
77 while (!list_empty(&heap->free_list)) {
78 if (total_drained >= size)
79 break;
80 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
81 list);
82 list_del(&buffer->list);
83 heap->free_list_size -= buffer->size;
84 if (skip_pools)
85 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
86 total_drained += buffer->size;
87 spin_unlock(&heap->free_lock);
88 ion_buffer_release(buffer);
89 spin_lock(&heap->free_lock);
90 }
91 spin_unlock(&heap->free_lock);
92
93 return total_drained;
94 }
95
ion_heap_deferred_free(void * data)96 static int ion_heap_deferred_free(void *data)
97 {
98 struct ion_heap *heap = data;
99
100 while (true) {
101 struct ion_buffer *buffer;
102
103 wait_event_freezable(heap->waitqueue,
104 (ion_heap_freelist_size(heap) > 0 ||
105 kthread_should_stop()));
106
107 spin_lock(&heap->free_lock);
108 if (list_empty(&heap->free_list)) {
109 spin_unlock(&heap->free_lock);
110 if (!kthread_should_stop())
111 continue;
112 break;
113 }
114 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
115 list);
116 list_del(&buffer->list);
117 heap->free_list_size -= buffer->size;
118 spin_unlock(&heap->free_lock);
119 ion_buffer_release(buffer);
120 }
121
122 return 0;
123 }
124
ion_heap_map_kernel(struct ion_heap * heap,struct ion_buffer * buffer)125 void *ion_heap_map_kernel(struct ion_heap *heap,
126 struct ion_buffer *buffer)
127 {
128 struct scatterlist *sg;
129 int i, j;
130 void *vaddr;
131 pgprot_t pgprot;
132 struct sg_table *table = buffer->sg_table;
133 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
134 struct page **pages = vmalloc(array_size(npages,
135 sizeof(struct page *)));
136 struct page **tmp = pages;
137
138 if (!pages)
139 return ERR_PTR(-ENOMEM);
140
141 if (buffer->flags & ION_FLAG_CACHED)
142 pgprot = PAGE_KERNEL;
143 else
144 pgprot = pgprot_writecombine(PAGE_KERNEL);
145
146 for_each_sg(table->sgl, sg, table->nents, i) {
147 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
148 struct page *page = sg_page(sg);
149
150 BUG_ON(i >= npages);
151 for (j = 0; j < npages_this_entry; j++)
152 *(tmp++) = page++;
153 }
154 vaddr = vmap(pages, npages, VM_MAP, pgprot);
155 vfree(pages);
156
157 if (!vaddr)
158 return ERR_PTR(-ENOMEM);
159
160 return vaddr;
161 }
162 EXPORT_SYMBOL_GPL(ion_heap_map_kernel);
163
ion_heap_unmap_kernel(struct ion_heap * heap,struct ion_buffer * buffer)164 void ion_heap_unmap_kernel(struct ion_heap *heap,
165 struct ion_buffer *buffer)
166 {
167 vunmap(buffer->vaddr);
168 }
169 EXPORT_SYMBOL_GPL(ion_heap_unmap_kernel);
170
ion_heap_map_user(struct ion_heap * heap,struct ion_buffer * buffer,struct vm_area_struct * vma)171 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
172 struct vm_area_struct *vma)
173 {
174 struct sg_table *table = buffer->sg_table;
175 unsigned long addr = vma->vm_start;
176 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
177 struct scatterlist *sg;
178 int i;
179 int ret;
180
181 for_each_sg(table->sgl, sg, table->nents, i) {
182 struct page *page = sg_page(sg);
183 unsigned long remainder = vma->vm_end - addr;
184 unsigned long len = sg->length;
185
186 if (offset >= sg->length) {
187 offset -= sg->length;
188 continue;
189 } else if (offset) {
190 page += offset / PAGE_SIZE;
191 len = sg->length - offset;
192 offset = 0;
193 }
194 len = min(len, remainder);
195 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
196 vma->vm_page_prot);
197 if (ret)
198 return ret;
199 addr += len;
200 if (addr >= vma->vm_end)
201 return 0;
202 }
203
204 return 0;
205 }
206 EXPORT_SYMBOL_GPL(ion_heap_map_user);
207
ion_heap_freelist_add(struct ion_heap * heap,struct ion_buffer * buffer)208 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
209 {
210 spin_lock(&heap->free_lock);
211 list_add(&buffer->list, &heap->free_list);
212 heap->free_list_size += buffer->size;
213 spin_unlock(&heap->free_lock);
214 wake_up(&heap->waitqueue);
215 }
216
ion_heap_freelist_size(struct ion_heap * heap)217 size_t ion_heap_freelist_size(struct ion_heap *heap)
218 {
219 size_t size;
220
221 spin_lock(&heap->free_lock);
222 size = heap->free_list_size;
223 spin_unlock(&heap->free_lock);
224
225 return size;
226 }
227
ion_heap_freelist_drain(struct ion_heap * heap,size_t size)228 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
229 {
230 return _ion_heap_freelist_drain(heap, size, false);
231 }
232
ion_heap_freelist_shrink(struct ion_heap * heap,size_t size)233 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
234 {
235 return _ion_heap_freelist_drain(heap, size, true);
236 }
237
ion_heap_init_deferred_free(struct ion_heap * heap)238 int ion_heap_init_deferred_free(struct ion_heap *heap)
239 {
240 INIT_LIST_HEAD(&heap->free_list);
241 init_waitqueue_head(&heap->waitqueue);
242 heap->task = kthread_run(ion_heap_deferred_free, heap,
243 "%s", heap->name);
244 if (IS_ERR(heap->task)) {
245 pr_err("%s: creating thread for deferred free failed\n",
246 __func__);
247 return PTR_ERR_OR_ZERO(heap->task);
248 }
249 sched_set_normal(heap->task, 19);
250
251 return 0;
252 }
253
ion_heap_init_shrinker(struct ion_heap * heap)254 int ion_heap_init_shrinker(struct ion_heap *heap)
255 {
256 heap->shrinker.count_objects = ion_heap_shrink_count;
257 heap->shrinker.scan_objects = ion_heap_shrink_scan;
258 heap->shrinker.seeks = DEFAULT_SEEKS;
259 heap->shrinker.batch = 0;
260
261 return register_shrinker(&heap->shrinker);
262 }
263
ion_heap_cleanup(struct ion_heap * heap)264 int ion_heap_cleanup(struct ion_heap *heap)
265 {
266 int ret;
267
268 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE &&
269 !IS_ERR_OR_NULL(heap->task)) {
270 size_t free_list_size = ion_heap_freelist_size(heap);
271 size_t total_drained = ion_heap_freelist_drain(heap, 0);
272
273 if (total_drained != free_list_size) {
274 pr_err("%s: %s heap drained %zu bytes, requested %zu\n",
275 __func__, heap->name, free_list_size,
276 total_drained);
277 return -EBUSY;
278 }
279 ret = kthread_stop(heap->task);
280 if (ret < 0) {
281 pr_err("%s: failed to stop heap free thread\n",
282 __func__);
283 return ret;
284 }
285 }
286
287 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
288 unregister_shrinker(&heap->shrinker);
289
290 return 0;
291 }
292