1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 */
6
7 #include <linux/dma-buf-cache.h>
8 #include <linux/iommu.h>
9 #include <linux/vmalloc.h>
10
11 #include <drm/drm.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
15
16 #include <linux/genalloc.h>
17 #include <linux/iommu.h>
18 #include <linux/pagemap.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rockchip/rockchip_sip.h>
21
22 #include "rockchip_drm_drv.h"
23 #include "rockchip_drm_gem.h"
24
25 static u32 bank_bit_first = 12;
26 static u32 bank_bit_mask = 0x7;
27
28 struct page_info {
29 struct page *page;
30 struct list_head list;
31 };
32
33 #define PG_ROUND 8
34
rockchip_gem_iommu_map(struct rockchip_gem_object * rk_obj)35 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
36 {
37 struct drm_device *drm = rk_obj->base.dev;
38 struct rockchip_drm_private *private = drm->dev_private;
39 int prot = IOMMU_READ | IOMMU_WRITE;
40 ssize_t ret;
41
42 mutex_lock(&private->mm_lock);
43 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
44 rk_obj->base.size, PAGE_SIZE,
45 0, 0);
46 mutex_unlock(&private->mm_lock);
47
48 if (ret < 0) {
49 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
50 return ret;
51 }
52
53 rk_obj->dma_addr = rk_obj->mm.start;
54
55 ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
56 prot);
57 if (ret < rk_obj->base.size) {
58 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
59 ret, rk_obj->base.size);
60 ret = -ENOMEM;
61 goto err_remove_node;
62 }
63
64 iommu_flush_iotlb_all(private->domain);
65
66 rk_obj->size = ret;
67
68 return 0;
69
70 err_remove_node:
71 mutex_lock(&private->mm_lock);
72 drm_mm_remove_node(&rk_obj->mm);
73 mutex_unlock(&private->mm_lock);
74
75 return ret;
76 }
77
rockchip_gem_iommu_unmap(struct rockchip_gem_object * rk_obj)78 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
79 {
80 struct drm_device *drm = rk_obj->base.dev;
81 struct rockchip_drm_private *private = drm->dev_private;
82
83 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
84
85 mutex_lock(&private->mm_lock);
86
87 drm_mm_remove_node(&rk_obj->mm);
88
89 mutex_unlock(&private->mm_lock);
90
91 return 0;
92 }
93
rockchip_gem_free_list(struct list_head lists[])94 static void rockchip_gem_free_list(struct list_head lists[])
95 {
96 struct page_info *info, *tmp_info;
97 int i;
98
99 for (i = 0; i < PG_ROUND; i++) {
100 list_for_each_entry_safe(info, tmp_info, &lists[i], list) {
101 list_del(&info->list);
102 kfree(info);
103 }
104 }
105 }
106
rockchip_gem_get_ddr_info(void)107 void rockchip_gem_get_ddr_info(void)
108 {
109 struct dram_addrmap_info *ddr_map_info;
110
111 ddr_map_info = sip_smc_get_dram_map();
112 if (ddr_map_info) {
113 bank_bit_first = ddr_map_info->bank_bit_first;
114 bank_bit_mask = ddr_map_info->bank_bit_mask;
115 }
116 }
117
rockchip_gem_get_pages(struct rockchip_gem_object * rk_obj)118 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
119 {
120 struct drm_device *drm = rk_obj->base.dev;
121 int ret, i;
122 struct scatterlist *s;
123 unsigned int cur_page;
124 struct page **pages, **dst_pages;
125 int j;
126 int n_pages;
127 unsigned long chunk_pages;
128 unsigned long remain;
129 struct list_head lists[PG_ROUND];
130 dma_addr_t phys;
131 int end = 0;
132 unsigned int bit_index;
133 unsigned int block_index[PG_ROUND] = {0};
134 struct page_info *info;
135 unsigned int maximum;
136
137 for (i = 0; i < PG_ROUND; i++)
138 INIT_LIST_HEAD(&lists[i]);
139
140 pages = drm_gem_get_pages(&rk_obj->base);
141 if (IS_ERR(pages))
142 return PTR_ERR(pages);
143
144 rk_obj->pages = pages;
145
146 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
147
148 n_pages = rk_obj->num_pages;
149
150 dst_pages = __vmalloc(sizeof(struct page *) * n_pages,
151 GFP_KERNEL | __GFP_HIGHMEM);
152 if (!dst_pages) {
153 ret = -ENOMEM;
154 goto err_put_pages;
155 }
156
157 DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n",
158 bank_bit_first, bank_bit_mask);
159
160 cur_page = 0;
161 remain = n_pages;
162 /* look for the end of the current chunk */
163 while (remain) {
164 for (j = cur_page + 1; j < n_pages; ++j) {
165 if (page_to_pfn(pages[j]) !=
166 page_to_pfn(pages[j - 1]) + 1)
167 break;
168 }
169
170 chunk_pages = j - cur_page;
171 if (chunk_pages >= PG_ROUND) {
172 for (i = 0; i < chunk_pages; i++)
173 dst_pages[end + i] = pages[cur_page + i];
174 end += chunk_pages;
175 } else {
176 for (i = 0; i < chunk_pages; i++) {
177 info = kmalloc(sizeof(*info), GFP_KERNEL);
178 if (!info) {
179 ret = -ENOMEM;
180 goto err_put_list;
181 }
182
183 INIT_LIST_HEAD(&info->list);
184 info->page = pages[cur_page + i];
185 phys = page_to_phys(info->page);
186 bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND;
187 list_add_tail(&info->list, &lists[bit_index]);
188 block_index[bit_index]++;
189 }
190 }
191
192 cur_page = j;
193 remain -= chunk_pages;
194 }
195
196 maximum = block_index[0];
197 for (i = 1; i < PG_ROUND; i++)
198 maximum = max(maximum, block_index[i]);
199
200 for (i = 0; i < maximum; i++) {
201 for (j = 0; j < PG_ROUND; j++) {
202 if (!list_empty(&lists[j])) {
203 struct page_info *info;
204
205 info = list_first_entry(&lists[j],
206 struct page_info, list);
207 dst_pages[end++] = info->page;
208 list_del(&info->list);
209 kfree(info);
210 }
211 }
212 }
213
214 DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__,
215 end, n_pages);
216 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
217 dst_pages, rk_obj->num_pages);
218 if (IS_ERR(rk_obj->sgt)) {
219 ret = PTR_ERR(rk_obj->sgt);
220 goto err_put_list;
221 }
222
223 rk_obj->pages = dst_pages;
224
225 /*
226 * Fake up the SG table so that dma_sync_sg_for_device() can be used
227 * to flush the pages associated with it.
228 *
229 * TODO: Replace this by drm_clflush_sg() once it can be implemented
230 * without relying on symbols that are not exported.
231 */
232 for_each_sgtable_sg(rk_obj->sgt, s, i)
233 sg_dma_address(s) = sg_phys(s);
234
235 dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
236
237 kvfree(pages);
238
239 return 0;
240
241 err_put_list:
242 rockchip_gem_free_list(lists);
243 kvfree(dst_pages);
244 err_put_pages:
245 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
246 return ret;
247 }
248
rockchip_gem_put_pages(struct rockchip_gem_object * rk_obj)249 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
250 {
251 sg_free_table(rk_obj->sgt);
252 kfree(rk_obj->sgt);
253 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
254 }
255
256 static inline void *drm_calloc_large(size_t nmemb, size_t size);
257 static inline void drm_free_large(void *ptr);
258 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj);
rockchip_gem_alloc_dma(struct rockchip_gem_object * rk_obj,bool alloc_kmap)259 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
260 bool alloc_kmap)
261 {
262 struct drm_gem_object *obj = &rk_obj->base;
263 struct drm_device *drm = obj->dev;
264 struct sg_table *sgt;
265 int ret, i;
266 struct scatterlist *s;
267
268 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
269
270 if (!alloc_kmap)
271 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
272
273 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
274 &rk_obj->dma_handle, GFP_KERNEL,
275 rk_obj->dma_attrs);
276 if (!rk_obj->kvaddr) {
277 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
278 return -ENOMEM;
279 }
280
281 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
282 if (!sgt) {
283 ret = -ENOMEM;
284 goto err_dma_free;
285 }
286
287 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
288 rk_obj->dma_handle, obj->size,
289 rk_obj->dma_attrs);
290 if (ret) {
291 DRM_ERROR("failed to allocate sgt, %d\n", ret);
292 goto err_sgt_free;
293 }
294
295 for_each_sg(sgt->sgl, s, sgt->nents, i)
296 sg_dma_address(s) = sg_phys(s);
297
298 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
299
300 rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
301 sizeof(*rk_obj->pages));
302 if (!rk_obj->pages) {
303 ret = -ENOMEM;
304 DRM_ERROR("failed to allocate pages.\n");
305 goto err_sg_table_free;
306 }
307
308 if (drm_prime_sg_to_page_addr_arrays(sgt, rk_obj->pages, NULL,
309 rk_obj->num_pages)) {
310 DRM_ERROR("invalid sgtable.\n");
311 ret = -EINVAL;
312 goto err_page_free;
313 }
314
315 rk_obj->sgt = sgt;
316
317 return 0;
318
319 err_page_free:
320 drm_free_large(rk_obj->pages);
321 err_sg_table_free:
322 sg_free_table(sgt);
323 err_sgt_free:
324 kfree(sgt);
325 err_dma_free:
326 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
327 rk_obj->dma_handle, rk_obj->dma_attrs);
328
329 return ret;
330 }
331
drm_calloc_large(size_t nmemb,size_t size)332 static inline void *drm_calloc_large(size_t nmemb, size_t size)
333 {
334 if (size != 0 && nmemb > SIZE_MAX / size)
335 return NULL;
336
337 if (size * nmemb <= PAGE_SIZE)
338 return kcalloc(nmemb, size, GFP_KERNEL);
339
340 return __vmalloc(size * nmemb,
341 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
342 }
343
drm_free_large(void * ptr)344 static inline void drm_free_large(void *ptr)
345 {
346 kvfree(ptr);
347 }
348
rockchip_gem_alloc_secure(struct rockchip_gem_object * rk_obj)349 static int rockchip_gem_alloc_secure(struct rockchip_gem_object *rk_obj)
350 {
351 struct drm_gem_object *obj = &rk_obj->base;
352 struct drm_device *drm = obj->dev;
353 struct rockchip_drm_private *private = drm->dev_private;
354 unsigned long paddr;
355 struct sg_table *sgt;
356 int ret = 0, i;
357
358 if (!private->secure_buffer_pool) {
359 DRM_ERROR("No secure buffer pool found\n");
360 return -ENOMEM;
361 }
362
363 paddr = gen_pool_alloc(private->secure_buffer_pool, rk_obj->base.size);
364 if (!paddr) {
365 DRM_ERROR("failed to allocate secure buffer\n");
366 return -ENOMEM;
367 }
368
369 rk_obj->dma_handle = paddr;
370 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
371
372 rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
373 sizeof(*rk_obj->pages));
374 if (!rk_obj->pages) {
375 DRM_ERROR("failed to allocate pages.\n");
376 ret = -ENOMEM;
377 goto err_buf_free;
378 }
379
380 i = 0;
381 while (i < rk_obj->num_pages) {
382 rk_obj->pages[i] = phys_to_page(paddr);
383 paddr += PAGE_SIZE;
384 i++;
385 }
386 sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
387 if (IS_ERR(sgt)) {
388 ret = PTR_ERR(sgt);
389 goto err_free_pages;
390 }
391
392 rk_obj->sgt = sgt;
393
394 return 0;
395
396 err_free_pages:
397 drm_free_large(rk_obj->pages);
398 err_buf_free:
399 gen_pool_free(private->secure_buffer_pool, paddr, rk_obj->base.size);
400
401 return ret;
402 }
403
rockchip_gem_free_secure(struct rockchip_gem_object * rk_obj)404 static void rockchip_gem_free_secure(struct rockchip_gem_object *rk_obj)
405 {
406 struct drm_gem_object *obj = &rk_obj->base;
407 struct drm_device *drm = obj->dev;
408 struct rockchip_drm_private *private = drm->dev_private;
409
410 drm_free_large(rk_obj->pages);
411 sg_free_table(rk_obj->sgt);
412 kfree(rk_obj->sgt);
413 gen_pool_free(private->secure_buffer_pool, rk_obj->dma_handle,
414 rk_obj->base.size);
415 }
416
is_vop_enabled(void)417 static inline bool is_vop_enabled(void)
418 {
419 return (IS_ENABLED(CONFIG_ROCKCHIP_VOP) || IS_ENABLED(CONFIG_ROCKCHIP_VOP2));
420 }
421
rockchip_gem_alloc_buf(struct rockchip_gem_object * rk_obj,bool alloc_kmap)422 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
423 bool alloc_kmap)
424 {
425 struct drm_gem_object *obj = &rk_obj->base;
426 struct drm_device *drm = obj->dev;
427 struct rockchip_drm_private *private = drm->dev_private;
428 int ret = 0;
429
430 if (!private->domain && is_vop_enabled())
431 rk_obj->flags |= ROCKCHIP_BO_CONTIG;
432
433 if (rk_obj->flags & ROCKCHIP_BO_SECURE) {
434 rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SECURE;
435 rk_obj->flags |= ROCKCHIP_BO_CONTIG;
436 if (alloc_kmap) {
437 DRM_ERROR("Not allow alloc secure buffer with kmap\n");
438 return -EINVAL;
439 }
440 ret = rockchip_gem_alloc_secure(rk_obj);
441 if (ret)
442 return ret;
443 } else if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
444 rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_CMA;
445 ret = rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
446 if (ret)
447 return ret;
448 } else {
449 rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SHMEM;
450 ret = rockchip_gem_get_pages(rk_obj);
451 if (ret < 0)
452 return ret;
453
454 if (alloc_kmap) {
455 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages,
456 VM_MAP,
457 pgprot_writecombine(PAGE_KERNEL));
458 if (!rk_obj->kvaddr) {
459 DRM_ERROR("failed to vmap() buffer\n");
460 ret = -ENOMEM;
461 goto err_iommu_free;
462 }
463 }
464 }
465
466 if (private->domain) {
467 ret = rockchip_gem_iommu_map(rk_obj);
468 if (ret < 0)
469 goto err_free;
470 } else if (is_vop_enabled()) {
471 WARN_ON(!rk_obj->dma_handle);
472 rk_obj->dma_addr = rk_obj->dma_handle;
473 }
474
475 return 0;
476
477 err_iommu_free:
478 if (private->domain)
479 rockchip_gem_iommu_unmap(rk_obj);
480 err_free:
481 if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE)
482 rockchip_gem_free_secure(rk_obj);
483 else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA)
484 rockchip_gem_free_dma(rk_obj);
485 else
486 rockchip_gem_put_pages(rk_obj);
487 return ret;
488 }
489
rockchip_gem_free_dma(struct rockchip_gem_object * rk_obj)490 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
491 {
492 struct drm_gem_object *obj = &rk_obj->base;
493 struct drm_device *drm = obj->dev;
494
495 drm_free_large(rk_obj->pages);
496 sg_free_table(rk_obj->sgt);
497 kfree(rk_obj->sgt);
498 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
499 rk_obj->dma_handle, rk_obj->dma_attrs);
500 }
501
rockchip_gem_free_buf(struct rockchip_gem_object * rk_obj)502 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
503 {
504 struct drm_device *drm = rk_obj->base.dev;
505 struct rockchip_drm_private *private = drm->dev_private;
506
507 if (private->domain)
508 rockchip_gem_iommu_unmap(rk_obj);
509
510 if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
511 vunmap(rk_obj->kvaddr);
512 rockchip_gem_put_pages(rk_obj);
513 } else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
514 rockchip_gem_free_secure(rk_obj);
515 } else {
516 rockchip_gem_free_dma(rk_obj);
517 }
518 }
519
rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object * obj,struct vm_area_struct * vma)520 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
521 struct vm_area_struct *vma)
522 {
523 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
524 unsigned int count = obj->size >> PAGE_SHIFT;
525 unsigned long user_count = vma_pages(vma);
526
527 if (user_count == 0)
528 return -ENXIO;
529
530 return vm_map_pages(vma, rk_obj->pages, count);
531 }
532
rockchip_drm_gem_object_mmap_dma(struct drm_gem_object * obj,struct vm_area_struct * vma)533 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
534 struct vm_area_struct *vma)
535 {
536 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
537 struct drm_device *drm = obj->dev;
538
539 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
540 obj->size, rk_obj->dma_attrs);
541 }
542
rockchip_drm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)543 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
544 struct vm_area_struct *vma)
545 {
546 int ret;
547 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
548
549 /* default is wc. */
550 if (rk_obj->flags & ROCKCHIP_BO_CACHABLE)
551 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
552
553 /*
554 * We allocated a struct page table for rk_obj, so clear
555 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
556 */
557 vma->vm_flags &= ~VM_PFNMAP;
558
559 if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
560 DRM_ERROR("Disallow mmap for secure buffer\n");
561 ret = -EINVAL;
562 } else if (rk_obj->pages) {
563 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
564 } else {
565 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
566 }
567
568 if (ret)
569 drm_gem_vm_close(vma);
570
571 return ret;
572 }
573
rockchip_gem_mmap_buf(struct drm_gem_object * obj,struct vm_area_struct * vma)574 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
575 struct vm_area_struct *vma)
576 {
577 int ret;
578
579 ret = drm_gem_mmap_obj(obj, obj->size, vma);
580 if (ret)
581 return ret;
582
583 return rockchip_drm_gem_object_mmap(obj, vma);
584 }
585
586 /* drm driver mmap file operations */
rockchip_gem_mmap(struct file * filp,struct vm_area_struct * vma)587 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
588 {
589 struct drm_gem_object *obj;
590 int ret;
591
592 ret = drm_gem_mmap(filp, vma);
593 if (ret)
594 return ret;
595
596 /*
597 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
598 * whole buffer from the start.
599 */
600 vma->vm_pgoff = 0;
601
602 obj = vma->vm_private_data;
603
604 return rockchip_drm_gem_object_mmap(obj, vma);
605 }
606
rockchip_gem_release_object(struct rockchip_gem_object * rk_obj)607 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
608 {
609 drm_gem_object_release(&rk_obj->base);
610 kfree(rk_obj);
611 }
612
613 static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device * drm,unsigned int size,unsigned int flags)614 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size,
615 unsigned int flags)
616 {
617 struct address_space *mapping;
618 struct rockchip_gem_object *rk_obj;
619 struct drm_gem_object *obj;
620
621 #ifdef CONFIG_ARM_LPAE
622 gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE | __GFP_DMA32;
623 #else
624 gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
625 #endif
626
627 if (flags & ROCKCHIP_BO_DMA32)
628 gfp_mask |= __GFP_DMA32;
629
630 size = round_up(size, PAGE_SIZE);
631
632 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
633 if (!rk_obj)
634 return ERR_PTR(-ENOMEM);
635
636 obj = &rk_obj->base;
637
638 drm_gem_object_init(drm, obj, size);
639
640 mapping = file_inode(obj->filp)->i_mapping;
641 mapping_set_gfp_mask(mapping, gfp_mask);
642
643 return rk_obj;
644 }
645
646 struct rockchip_gem_object *
rockchip_gem_create_object(struct drm_device * drm,unsigned int size,bool alloc_kmap,unsigned int flags)647 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
648 bool alloc_kmap, unsigned int flags)
649 {
650 struct rockchip_gem_object *rk_obj;
651 int ret;
652
653 rk_obj = rockchip_gem_alloc_object(drm, size, flags);
654 if (IS_ERR(rk_obj))
655 return rk_obj;
656 rk_obj->flags = flags;
657
658 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
659 if (ret)
660 goto err_free_rk_obj;
661
662 return rk_obj;
663
664 err_free_rk_obj:
665 rockchip_gem_release_object(rk_obj);
666 return ERR_PTR(ret);
667 }
668
669 /*
670 * rockchip_gem_destroy - destroy gem object
671 *
672 * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if
673 * CONFIG_DMABUF_CACHE is enabled.
674 *
675 * Same as drm_prime_gem_destroy
676 */
rockchip_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)677 static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
678 {
679 struct dma_buf_attachment *attach;
680 struct dma_buf *dma_buf;
681
682 attach = obj->import_attach;
683 if (sg)
684 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
685 dma_buf = attach->dmabuf;
686 dma_buf_detach(attach->dmabuf, attach);
687 /* remove the reference */
688 dma_buf_put(dma_buf);
689 }
690
691 /*
692 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
693 * callback function
694 */
rockchip_gem_free_object(struct drm_gem_object * obj)695 void rockchip_gem_free_object(struct drm_gem_object *obj)
696 {
697 struct drm_device *drm = obj->dev;
698 struct rockchip_drm_private *private = drm->dev_private;
699 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
700
701 if (obj->import_attach) {
702 if (private->domain) {
703 rockchip_gem_iommu_unmap(rk_obj);
704 } else {
705 dma_unmap_sgtable(drm->dev, rk_obj->sgt,
706 DMA_BIDIRECTIONAL, 0);
707 }
708 drm_free_large(rk_obj->pages);
709 rockchip_gem_destroy(obj, rk_obj->sgt);
710 } else {
711 rockchip_gem_free_buf(rk_obj);
712 }
713
714 rockchip_gem_release_object(rk_obj);
715 }
716
717 /*
718 * rockchip_gem_create_with_handle - allocate an object with the given
719 * size and create a gem handle on it
720 *
721 * returns a struct rockchip_gem_object* on success or ERR_PTR values
722 * on failure.
723 */
724 static struct rockchip_gem_object *
rockchip_gem_create_with_handle(struct drm_file * file_priv,struct drm_device * drm,unsigned int size,unsigned int * handle,unsigned int flags)725 rockchip_gem_create_with_handle(struct drm_file *file_priv,
726 struct drm_device *drm, unsigned int size,
727 unsigned int *handle, unsigned int flags)
728 {
729 struct rockchip_gem_object *rk_obj;
730 struct drm_gem_object *obj;
731 int ret;
732 bool alloc_kmap = flags & ROCKCHIP_BO_ALLOC_KMAP ? true : false;
733
734 rk_obj = rockchip_gem_create_object(drm, size, alloc_kmap, flags);
735 if (IS_ERR(rk_obj))
736 return ERR_CAST(rk_obj);
737
738 obj = &rk_obj->base;
739
740 /*
741 * allocate a id of idr table where the obj is registered
742 * and handle has the id what user can see.
743 */
744 ret = drm_gem_handle_create(file_priv, obj, handle);
745 if (ret)
746 goto err_handle_create;
747
748 /* drop reference from allocate - handle holds it now. */
749 drm_gem_object_put(obj);
750
751 return rk_obj;
752
753 err_handle_create:
754 rockchip_gem_free_object(obj);
755
756 return ERR_PTR(ret);
757 }
758
759 /*
760 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
761 * function
762 *
763 * This aligns the pitch and size arguments to the minimum required. wrap
764 * this into your own function if you need bigger alignment.
765 */
rockchip_gem_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)766 int rockchip_gem_dumb_create(struct drm_file *file_priv,
767 struct drm_device *dev,
768 struct drm_mode_create_dumb *args)
769 {
770 struct rockchip_gem_object *rk_obj;
771 u32 min_pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
772
773 /*
774 * align to 64 bytes since Mali requires it.
775 */
776 args->pitch = ALIGN(min_pitch, 64);
777 args->size = args->pitch * args->height;
778
779 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
780 &args->handle, args->flags);
781
782 return PTR_ERR_OR_ZERO(rk_obj);
783 }
784
785 /*
786 * Allocate a sg_table for this GEM object.
787 * Note: Both the table's contents, and the sg_table itself must be freed by
788 * the caller.
789 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
790 */
rockchip_gem_prime_get_sg_table(struct drm_gem_object * obj)791 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
792 {
793 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
794 struct drm_device *drm = obj->dev;
795 struct sg_table *sgt;
796 int ret;
797
798 if (rk_obj->pages)
799 return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
800
801 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
802 if (!sgt)
803 return ERR_PTR(-ENOMEM);
804
805 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
806 rk_obj->dma_addr, obj->size,
807 rk_obj->dma_attrs);
808 if (ret) {
809 DRM_ERROR("failed to allocate sgt, %d\n", ret);
810 kfree(sgt);
811 return ERR_PTR(ret);
812 }
813
814 return sgt;
815 }
816
817 static int
rockchip_gem_iommu_map_sg(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg,struct rockchip_gem_object * rk_obj)818 rockchip_gem_iommu_map_sg(struct drm_device *drm,
819 struct dma_buf_attachment *attach,
820 struct sg_table *sg,
821 struct rockchip_gem_object *rk_obj)
822 {
823 rk_obj->sgt = sg;
824 return rockchip_gem_iommu_map(rk_obj);
825 }
826
827 static int
rockchip_gem_dma_map_sg(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg,struct rockchip_gem_object * rk_obj)828 rockchip_gem_dma_map_sg(struct drm_device *drm,
829 struct dma_buf_attachment *attach,
830 struct sg_table *sg,
831 struct rockchip_gem_object *rk_obj)
832 {
833 int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
834 if (err)
835 return err;
836
837 if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
838 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
839 dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
840 return -EINVAL;
841 }
842
843 rk_obj->dma_addr = sg_dma_address(sg->sgl);
844 rk_obj->sgt = sg;
845 return 0;
846 }
847
848 struct drm_gem_object *
rockchip_gem_prime_import_sg_table(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg)849 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
850 struct dma_buf_attachment *attach,
851 struct sg_table *sg)
852 {
853 struct rockchip_drm_private *private = drm->dev_private;
854 struct rockchip_gem_object *rk_obj;
855 int ret;
856
857 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size, 0);
858 if (IS_ERR(rk_obj))
859 return ERR_CAST(rk_obj);
860
861 if (private->domain)
862 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
863 else
864 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
865
866 if (ret < 0) {
867 DRM_ERROR("failed to import sg table: %d\n", ret);
868 goto err_free_rk_obj;
869 }
870
871 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
872 rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages));
873 if (!rk_obj->pages) {
874 DRM_ERROR("failed to allocate pages.\n");
875 ret = -ENOMEM;
876 goto err_free_rk_obj;
877 }
878
879 ret = drm_prime_sg_to_page_addr_arrays(sg, rk_obj->pages, NULL, rk_obj->num_pages);
880 if (ret < 0) {
881 DRM_ERROR("invalid sgtable.\n");
882 drm_free_large(rk_obj->pages);
883 goto err_free_rk_obj;
884 }
885
886 return &rk_obj->base;
887
888 err_free_rk_obj:
889 rockchip_gem_release_object(rk_obj);
890 return ERR_PTR(ret);
891 }
892
rockchip_gem_prime_vmap(struct drm_gem_object * obj)893 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
894 {
895 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
896
897 if (rk_obj->pages)
898 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
899 pgprot_writecombine(PAGE_KERNEL));
900
901 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
902 return NULL;
903
904 return rk_obj->kvaddr;
905 }
906
rockchip_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)907 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
908 {
909 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
910
911 if (rk_obj->pages) {
912 vunmap(vaddr);
913 return;
914 }
915
916 /* Nothing to do if allocated by DMA mapping API. */
917 }
918
rockchip_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)919 int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
920 struct drm_file *file_priv)
921 {
922 struct drm_rockchip_gem_create *args = data;
923 struct rockchip_gem_object *rk_obj;
924
925 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
926 &args->handle, args->flags);
927 return PTR_ERR_OR_ZERO(rk_obj);
928 }
929
rockchip_gem_map_offset_ioctl(struct drm_device * drm,void * data,struct drm_file * file_priv)930 int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
931 struct drm_file *file_priv)
932 {
933 struct drm_rockchip_gem_map_off *args = data;
934
935 return drm_gem_dumb_map_offset(file_priv, drm, args->handle,
936 &args->offset);
937 }
938
rockchip_gem_get_phys_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)939 int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data,
940 struct drm_file *file_priv)
941 {
942 struct drm_rockchip_gem_phys *args = data;
943 struct rockchip_gem_object *rk_obj;
944 struct drm_gem_object *obj;
945 int ret = 0;
946
947 obj = drm_gem_object_lookup(file_priv, args->handle);
948 if (!obj) {
949 DRM_ERROR("failed to lookup gem object.\n");
950 return -EINVAL;
951 }
952 rk_obj = to_rockchip_obj(obj);
953
954 if (!(rk_obj->flags & ROCKCHIP_BO_CONTIG)) {
955 DRM_ERROR("Can't get phys address from non-continue buf.\n");
956 ret = -EINVAL;
957 goto out;
958 }
959
960 args->phy_addr = page_to_phys(rk_obj->pages[0]);
961
962 out:
963 drm_gem_object_put(obj);
964
965 return ret;
966 }
967
rockchip_gem_prime_begin_cpu_access(struct drm_gem_object * obj,enum dma_data_direction dir)968 int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
969 enum dma_data_direction dir)
970 {
971 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
972 struct drm_device *drm = obj->dev;
973
974 if (!rk_obj->sgt)
975 return 0;
976
977 dma_sync_sg_for_cpu(drm->dev, rk_obj->sgt->sgl,
978 rk_obj->sgt->nents, dir);
979 return 0;
980 }
981
rockchip_gem_prime_end_cpu_access(struct drm_gem_object * obj,enum dma_data_direction dir)982 int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
983 enum dma_data_direction dir)
984 {
985 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
986 struct drm_device *drm = obj->dev;
987
988 if (!rk_obj->sgt)
989 return 0;
990
991 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl,
992 rk_obj->sgt->nents, dir);
993 return 0;
994 }
995