1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ION Memory Allocator - dmabuf interface
4 *
5 * Copyright (c) 2019, Google, Inc.
6 */
7
8 #include <linux/device.h>
9 #include <linux/mm.h>
10 #include <linux/scatterlist.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "ion_private.h"
15
dup_sg_table(struct sg_table * table)16 static struct sg_table *dup_sg_table(struct sg_table *table)
17 {
18 struct sg_table *new_table;
19 int ret, i;
20 struct scatterlist *sg, *new_sg;
21
22 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
23 if (!new_table)
24 return ERR_PTR(-ENOMEM);
25
26 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
27 if (ret) {
28 kfree(new_table);
29 return ERR_PTR(-ENOMEM);
30 }
31
32 new_sg = new_table->sgl;
33 for_each_sg(table->sgl, sg, table->nents, i) {
34 memcpy(new_sg, sg, sizeof(*sg));
35 new_sg->dma_address = 0;
36 new_sg = sg_next(new_sg);
37 }
38
39 return new_table;
40 }
41
free_duped_table(struct sg_table * table)42 static void free_duped_table(struct sg_table *table)
43 {
44 sg_free_table(table);
45 kfree(table);
46 }
47
ion_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)48 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
49 struct dma_buf_attachment *attachment)
50 {
51 struct ion_dma_buf_attachment *a;
52 struct sg_table *table;
53 struct ion_buffer *buffer = dmabuf->priv;
54 struct ion_heap *heap = buffer->heap;
55
56 if (heap->buf_ops.attach)
57 return heap->buf_ops.attach(dmabuf, attachment);
58
59 a = kzalloc(sizeof(*a), GFP_KERNEL);
60 if (!a)
61 return -ENOMEM;
62
63 table = dup_sg_table(buffer->sg_table);
64 if (IS_ERR(table)) {
65 kfree(a);
66 return -ENOMEM;
67 }
68
69 a->table = table;
70 a->dev = attachment->dev;
71 INIT_LIST_HEAD(&a->list);
72 a->mapped = false;
73
74 attachment->priv = a;
75
76 mutex_lock(&buffer->lock);
77 list_add(&a->list, &buffer->attachments);
78 mutex_unlock(&buffer->lock);
79
80 return 0;
81 }
82
ion_dma_buf_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)83 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attachment)
85 {
86 struct ion_dma_buf_attachment *a = attachment->priv;
87 struct ion_buffer *buffer = dmabuf->priv;
88 struct ion_heap *heap = buffer->heap;
89
90 if (heap->buf_ops.detach)
91 return heap->buf_ops.detach(dmabuf, attachment);
92
93 mutex_lock(&buffer->lock);
94 list_del(&a->list);
95 mutex_unlock(&buffer->lock);
96 free_duped_table(a->table);
97
98 kfree(a);
99 }
100
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)101 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
102 enum dma_data_direction direction)
103 {
104 struct ion_buffer *buffer = attachment->dmabuf->priv;
105 struct ion_heap *heap = buffer->heap;
106 struct ion_dma_buf_attachment *a;
107 struct sg_table *table;
108 unsigned long attrs = attachment->dma_map_attrs;
109
110 if (heap->buf_ops.map_dma_buf)
111 return heap->buf_ops.map_dma_buf(attachment, direction);
112
113 a = attachment->priv;
114 table = a->table;
115
116 if (!(buffer->flags & ION_FLAG_CACHED))
117 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
118
119 if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents,
120 direction, attrs))
121 return ERR_PTR(-ENOMEM);
122
123 a->mapped = true;
124
125 return table;
126 }
127
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)128 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
129 struct sg_table *table,
130 enum dma_data_direction direction)
131 {
132 struct ion_buffer *buffer = attachment->dmabuf->priv;
133 struct ion_heap *heap = buffer->heap;
134 struct ion_dma_buf_attachment *a = attachment->priv;
135 unsigned long attrs = attachment->dma_map_attrs;
136
137 if (heap->buf_ops.unmap_dma_buf)
138 return heap->buf_ops.unmap_dma_buf(attachment, table,
139 direction);
140
141 a->mapped = false;
142
143 if (!(buffer->flags & ION_FLAG_CACHED))
144 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
145
146 dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
147 direction, attrs);
148 }
149
ion_dma_buf_release(struct dma_buf * dmabuf)150 static void ion_dma_buf_release(struct dma_buf *dmabuf)
151 {
152 struct ion_buffer *buffer = dmabuf->priv;
153 struct ion_heap *heap = buffer->heap;
154
155 if (heap->buf_ops.release)
156 return heap->buf_ops.release(dmabuf);
157
158 ion_free(buffer);
159 }
160
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)161 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
162 enum dma_data_direction direction)
163 {
164 struct ion_buffer *buffer = dmabuf->priv;
165 struct ion_heap *heap = buffer->heap;
166 struct ion_dma_buf_attachment *a;
167
168 if (heap->buf_ops.begin_cpu_access)
169 return heap->buf_ops.begin_cpu_access(dmabuf, direction);
170
171 mutex_lock(&buffer->lock);
172 if (!(buffer->flags & ION_FLAG_CACHED))
173 goto unlock;
174
175 list_for_each_entry(a, &buffer->attachments, list) {
176 if (!a->mapped)
177 continue;
178 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
179 direction);
180 }
181
182 unlock:
183 mutex_unlock(&buffer->lock);
184 return 0;
185 }
186
187 static int
ion_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)188 ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
189 enum dma_data_direction direction,
190 unsigned int offset, unsigned int len)
191 {
192 struct ion_buffer *buffer = dmabuf->priv;
193 struct ion_heap *heap = buffer->heap;
194
195 /* This is done to make sure partial buffer cache flush / invalidate is
196 * allowed. The implementation may be vendor specific in this case, so
197 * ion core does not provide a default implementation
198 */
199 if (!heap->buf_ops.begin_cpu_access_partial)
200 return -EOPNOTSUPP;
201
202 return heap->buf_ops.begin_cpu_access_partial(dmabuf, direction, offset,
203 len);
204 }
205
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)206 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
207 enum dma_data_direction direction)
208 {
209 struct ion_buffer *buffer = dmabuf->priv;
210 struct ion_heap *heap = buffer->heap;
211 struct ion_dma_buf_attachment *a;
212
213 if (heap->buf_ops.end_cpu_access)
214 return heap->buf_ops.end_cpu_access(dmabuf, direction);
215
216 mutex_lock(&buffer->lock);
217 if (!(buffer->flags & ION_FLAG_CACHED))
218 goto unlock;
219
220 list_for_each_entry(a, &buffer->attachments, list) {
221 if (!a->mapped)
222 continue;
223 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
224 direction);
225 }
226 unlock:
227 mutex_unlock(&buffer->lock);
228
229 return 0;
230 }
231
ion_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)232 static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
233 enum dma_data_direction direction,
234 unsigned int offset,
235 unsigned int len)
236 {
237 struct ion_buffer *buffer = dmabuf->priv;
238 struct ion_heap *heap = buffer->heap;
239
240 /* This is done to make sure partial buffer cache flush / invalidate is
241 * allowed. The implementation may be vendor specific in this case, so
242 * ion core does not provide a default implementation
243 */
244 if (!heap->buf_ops.end_cpu_access_partial)
245 return -EOPNOTSUPP;
246
247 return heap->buf_ops.end_cpu_access_partial(dmabuf, direction, offset,
248 len);
249 }
250
ion_dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)251 static int ion_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
252 {
253 struct ion_buffer *buffer = dmabuf->priv;
254 struct ion_heap *heap = buffer->heap;
255 int ret;
256
257 /* now map it to userspace */
258 if (heap->buf_ops.mmap) {
259 ret = heap->buf_ops.mmap(dmabuf, vma);
260 } else {
261 mutex_lock(&buffer->lock);
262 if (!(buffer->flags & ION_FLAG_CACHED))
263 vma->vm_page_prot =
264 pgprot_writecombine(vma->vm_page_prot);
265
266 ret = ion_heap_map_user(heap, buffer, vma);
267 mutex_unlock(&buffer->lock);
268 }
269
270 if (ret)
271 pr_err("%s: failure mapping buffer to userspace\n", __func__);
272
273 return ret;
274 }
275
ion_dma_buf_vmap(struct dma_buf * dmabuf)276 static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
277 {
278 struct ion_buffer *buffer = dmabuf->priv;
279 struct ion_heap *heap = buffer->heap;
280 void *vaddr;
281
282 if (heap->buf_ops.vmap)
283 return heap->buf_ops.vmap(dmabuf);
284
285 mutex_lock(&buffer->lock);
286 vaddr = ion_buffer_kmap_get(buffer);
287 mutex_unlock(&buffer->lock);
288
289 return vaddr;
290 }
291
ion_dma_buf_vunmap(struct dma_buf * dmabuf,void * vaddr)292 static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
293 {
294 struct ion_buffer *buffer = dmabuf->priv;
295 struct ion_heap *heap = buffer->heap;
296
297 if (heap->buf_ops.vunmap) {
298 heap->buf_ops.vunmap(dmabuf, vaddr);
299 return;
300 }
301
302 mutex_lock(&buffer->lock);
303 ion_buffer_kmap_put(buffer);
304 mutex_unlock(&buffer->lock);
305 }
306
ion_dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)307 static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
308 {
309 struct ion_buffer *buffer = dmabuf->priv;
310 struct ion_heap *heap = buffer->heap;
311
312 if (!heap->buf_ops.get_flags)
313 return -EOPNOTSUPP;
314
315 return heap->buf_ops.get_flags(dmabuf, flags);
316 }
317
318 static const struct dma_buf_ops dma_buf_ops = {
319 .attach = ion_dma_buf_attach,
320 .detach = ion_dma_buf_detatch,
321 .map_dma_buf = ion_map_dma_buf,
322 .unmap_dma_buf = ion_unmap_dma_buf,
323 .release = ion_dma_buf_release,
324 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
325 .begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial,
326 .end_cpu_access = ion_dma_buf_end_cpu_access,
327 .end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial,
328 .mmap = ion_dma_buf_mmap,
329 .vmap = ion_dma_buf_vmap,
330 .vunmap = ion_dma_buf_vunmap,
331 .get_flags = ion_dma_buf_get_flags,
332 };
333
ion_dmabuf_alloc(struct ion_device * dev,size_t len,unsigned int heap_id_mask,unsigned int flags)334 struct dma_buf *ion_dmabuf_alloc(struct ion_device *dev, size_t len,
335 unsigned int heap_id_mask,
336 unsigned int flags)
337 {
338 struct ion_buffer *buffer;
339 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
340 struct dma_buf *dmabuf;
341
342 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
343 len, heap_id_mask, flags);
344
345 buffer = ion_buffer_alloc(dev, len, heap_id_mask, flags);
346 if (IS_ERR(buffer))
347 return ERR_CAST(buffer);
348
349 exp_info.ops = &dma_buf_ops;
350 exp_info.size = buffer->size;
351 exp_info.flags = O_RDWR;
352 exp_info.priv = buffer;
353
354 dmabuf = dma_buf_export(&exp_info);
355 if (IS_ERR(dmabuf))
356 ion_buffer_destroy(dev, buffer);
357
358 return dmabuf;
359 }
360