Lines Matching full:buffer

61 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;  in rk_cma_heap_attach()  local
64 size_t size = buffer->pagecount << PAGE_SHIFT; in rk_cma_heap_attach()
78 sg_set_page(table->sgl, buffer->cma_pages, PAGE_ALIGN(size), 0); in rk_cma_heap_attach()
86 buffer->attached = true; in rk_cma_heap_attach()
88 mutex_lock(&buffer->lock); in rk_cma_heap_attach()
89 list_add(&a->list, &buffer->attachments); in rk_cma_heap_attach()
90 mutex_unlock(&buffer->lock); in rk_cma_heap_attach()
98 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_detach() local
101 mutex_lock(&buffer->lock); in rk_cma_heap_detach()
103 mutex_unlock(&buffer->lock); in rk_cma_heap_detach()
105 buffer->attached = false; in rk_cma_heap_detach()
143 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_dma_buf_begin_cpu_access_partial() local
146 if (buffer->vmap_cnt) in rk_cma_heap_dma_buf_begin_cpu_access_partial()
147 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); in rk_cma_heap_dma_buf_begin_cpu_access_partial()
149 mutex_lock(&buffer->lock); in rk_cma_heap_dma_buf_begin_cpu_access_partial()
150 list_for_each_entry(a, &buffer->attachments, list) { in rk_cma_heap_dma_buf_begin_cpu_access_partial()
157 if (buffer->phys && !buffer->attached) in rk_cma_heap_dma_buf_begin_cpu_access_partial()
158 dma_sync_single_for_cpu(rk_dma_heap_get_dev(buffer->heap->heap), in rk_cma_heap_dma_buf_begin_cpu_access_partial()
159 buffer->phys + offset, in rk_cma_heap_dma_buf_begin_cpu_access_partial()
162 mutex_unlock(&buffer->lock); in rk_cma_heap_dma_buf_begin_cpu_access_partial()
173 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_dma_buf_end_cpu_access_partial() local
176 if (buffer->vmap_cnt) in rk_cma_heap_dma_buf_end_cpu_access_partial()
177 flush_kernel_vmap_range(buffer->vaddr, buffer->len); in rk_cma_heap_dma_buf_end_cpu_access_partial()
179 mutex_lock(&buffer->lock); in rk_cma_heap_dma_buf_end_cpu_access_partial()
180 list_for_each_entry(a, &buffer->attachments, list) { in rk_cma_heap_dma_buf_end_cpu_access_partial()
187 if (buffer->phys && !buffer->attached) in rk_cma_heap_dma_buf_end_cpu_access_partial()
188 dma_sync_single_for_device(rk_dma_heap_get_dev(buffer->heap->heap), in rk_cma_heap_dma_buf_end_cpu_access_partial()
189 buffer->phys + offset, in rk_cma_heap_dma_buf_end_cpu_access_partial()
192 mutex_unlock(&buffer->lock); in rk_cma_heap_dma_buf_end_cpu_access_partial()
200 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_dma_buf_begin_cpu_access() local
201 unsigned int len = buffer->pagecount * PAGE_SIZE; in rk_cma_heap_dma_buf_begin_cpu_access()
209 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_dma_buf_end_cpu_access() local
210 unsigned int len = buffer->pagecount * PAGE_SIZE; in rk_cma_heap_dma_buf_end_cpu_access()
217 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_mmap() local
221 ret = remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->phys), in rk_cma_heap_mmap()
229 static void *rk_cma_heap_do_vmap(struct rk_cma_heap_buffer *buffer) in rk_cma_heap_do_vmap() argument
234 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot); in rk_cma_heap_do_vmap()
243 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_vmap() local
246 mutex_lock(&buffer->lock); in rk_cma_heap_vmap()
247 if (buffer->vmap_cnt) { in rk_cma_heap_vmap()
248 buffer->vmap_cnt++; in rk_cma_heap_vmap()
249 vaddr = buffer->vaddr; in rk_cma_heap_vmap()
253 vaddr = rk_cma_heap_do_vmap(buffer); in rk_cma_heap_vmap()
257 buffer->vaddr = vaddr; in rk_cma_heap_vmap()
258 buffer->vmap_cnt++; in rk_cma_heap_vmap()
260 mutex_unlock(&buffer->lock); in rk_cma_heap_vmap()
267 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_vunmap() local
269 mutex_lock(&buffer->lock); in rk_cma_heap_vunmap()
270 if (!--buffer->vmap_cnt) { in rk_cma_heap_vunmap()
271 vunmap(buffer->vaddr); in rk_cma_heap_vunmap()
272 buffer->vaddr = NULL; in rk_cma_heap_vunmap()
274 mutex_unlock(&buffer->lock); in rk_cma_heap_vunmap()
280 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_remove_dmabuf_list() local
281 struct rk_cma_heap *cma_heap = buffer->heap; in rk_cma_heap_remove_dmabuf_list()
303 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_add_dmabuf_list() local
304 struct rk_cma_heap *cma_heap = buffer->heap; in rk_cma_heap_add_dmabuf_list()
313 buf->start = buffer->phys; in rk_cma_heap_add_dmabuf_list()
314 buf->end = buf->start + buffer->len - 1; in rk_cma_heap_add_dmabuf_list()
386 struct rk_cma_heap_buffer *buffer = dmabuf->priv; in rk_cma_heap_dma_buf_release() local
387 struct rk_cma_heap *cma_heap = buffer->heap; in rk_cma_heap_dma_buf_release()
390 if (buffer->vmap_cnt > 0) { in rk_cma_heap_dma_buf_release()
391 WARN(1, "%s: buffer still mapped in the kernel\n", __func__); in rk_cma_heap_dma_buf_release()
392 vunmap(buffer->vaddr); in rk_cma_heap_dma_buf_release()
398 kfree(buffer->pages); in rk_cma_heap_dma_buf_release()
400 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); in rk_cma_heap_dma_buf_release()
401 rk_dma_heap_total_dec(heap, buffer->len); in rk_cma_heap_dma_buf_release()
403 kfree(buffer); in rk_cma_heap_dma_buf_release()
429 struct rk_cma_heap_buffer *buffer; in rk_cma_heap_allocate() local
439 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in rk_cma_heap_allocate()
440 if (!buffer) in rk_cma_heap_allocate()
443 INIT_LIST_HEAD(&buffer->attachments); in rk_cma_heap_allocate()
444 mutex_init(&buffer->lock); in rk_cma_heap_allocate()
445 buffer->len = size; in rk_cma_heap_allocate()
477 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), in rk_cma_heap_allocate()
479 if (!buffer->pages) { in rk_cma_heap_allocate()
485 buffer->pages[pg] = &cma_pages[pg]; in rk_cma_heap_allocate()
487 buffer->cma_pages = cma_pages; in rk_cma_heap_allocate()
488 buffer->heap = cma_heap; in rk_cma_heap_allocate()
489 buffer->pagecount = pagecount; in rk_cma_heap_allocate()
494 exp_info.size = buffer->len; in rk_cma_heap_allocate()
496 exp_info.priv = buffer; in rk_cma_heap_allocate()
503 buffer->phys = page_to_phys(cma_pages); in rk_cma_heap_allocate()
504 dma_sync_single_for_cpu(rk_dma_heap_get_dev(heap), buffer->phys, in rk_cma_heap_allocate()
505 buffer->pagecount * PAGE_SIZE, in rk_cma_heap_allocate()
512 rk_dma_heap_total_inc(heap, buffer->len); in rk_cma_heap_allocate()
519 kfree(buffer->pages); in rk_cma_heap_allocate()
523 kfree(buffer); in rk_cma_heap_allocate()