Lines Matching full:buffer

60 	struct cma_heap_buffer *buffer = dmabuf->priv;  in cma_heap_attach()  local
68 ret = sg_alloc_table_from_pages(&a->table, buffer->pages, in cma_heap_attach()
69 buffer->pagecount, 0, in cma_heap_attach()
70 buffer->pagecount << PAGE_SHIFT, in cma_heap_attach()
81 a->uncached = buffer->uncached; in cma_heap_attach()
84 mutex_lock(&buffer->lock); in cma_heap_attach()
85 list_add(&a->list, &buffer->attachments); in cma_heap_attach()
86 mutex_unlock(&buffer->lock); in cma_heap_attach()
94 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_detach() local
97 mutex_lock(&buffer->lock); in cma_heap_detach()
99 mutex_unlock(&buffer->lock); in cma_heap_detach()
144 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_begin_cpu_access_partial() local
145 phys_addr_t phys = page_to_phys(buffer->cma_pages); in cma_heap_dma_buf_begin_cpu_access_partial()
147 if (buffer->vmap_cnt) in cma_heap_dma_buf_begin_cpu_access_partial()
148 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_begin_cpu_access_partial()
150 if (buffer->uncached) in cma_heap_dma_buf_begin_cpu_access_partial()
153 mutex_lock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access_partial()
154 dma_sync_single_for_cpu(dma_heap_get_dev(buffer->heap->heap), in cma_heap_dma_buf_begin_cpu_access_partial()
158 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access_partial()
169 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_end_cpu_access_partial() local
170 phys_addr_t phys = page_to_phys(buffer->cma_pages); in cma_heap_dma_buf_end_cpu_access_partial()
172 if (buffer->vmap_cnt) in cma_heap_dma_buf_end_cpu_access_partial()
173 flush_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_end_cpu_access_partial()
175 if (buffer->uncached) in cma_heap_dma_buf_end_cpu_access_partial()
178 mutex_lock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access_partial()
179 dma_sync_single_for_device(dma_heap_get_dev(buffer->heap->heap), in cma_heap_dma_buf_end_cpu_access_partial()
183 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access_partial()
191 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_begin_cpu_access() local
194 if (buffer->vmap_cnt) in cma_heap_dma_buf_begin_cpu_access()
195 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_begin_cpu_access()
197 mutex_lock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access()
198 list_for_each_entry(a, &buffer->attachments, list) { in cma_heap_dma_buf_begin_cpu_access()
203 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access()
211 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_end_cpu_access() local
214 if (buffer->vmap_cnt) in cma_heap_dma_buf_end_cpu_access()
215 flush_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_end_cpu_access()
217 mutex_lock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access()
218 list_for_each_entry(a, &buffer->attachments, list) { in cma_heap_dma_buf_end_cpu_access()
223 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access()
231 struct cma_heap_buffer *buffer = vma->vm_private_data; in cma_heap_vm_fault() local
233 if (vmf->pgoff > buffer->pagecount) in cma_heap_vm_fault()
236 vmf->page = buffer->pages[vmf->pgoff]; in cma_heap_vm_fault()
248 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_mmap() local
253 if (buffer->uncached) in cma_heap_mmap()
257 vma->vm_private_data = buffer; in cma_heap_mmap()
262 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) in cma_heap_do_vmap() argument
267 if (buffer->uncached) in cma_heap_do_vmap()
270 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot); in cma_heap_do_vmap()
279 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_vmap() local
282 mutex_lock(&buffer->lock); in cma_heap_vmap()
283 if (buffer->vmap_cnt) { in cma_heap_vmap()
284 buffer->vmap_cnt++; in cma_heap_vmap()
285 vaddr = buffer->vaddr; in cma_heap_vmap()
289 vaddr = cma_heap_do_vmap(buffer); in cma_heap_vmap()
293 buffer->vaddr = vaddr; in cma_heap_vmap()
294 buffer->vmap_cnt++; in cma_heap_vmap()
296 mutex_unlock(&buffer->lock); in cma_heap_vmap()
303 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_vunmap() local
305 mutex_lock(&buffer->lock); in cma_heap_vunmap()
306 if (!--buffer->vmap_cnt) { in cma_heap_vunmap()
307 vunmap(buffer->vaddr); in cma_heap_vunmap()
308 buffer->vaddr = NULL; in cma_heap_vunmap()
310 mutex_unlock(&buffer->lock); in cma_heap_vunmap()
315 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_release() local
316 struct cma_heap *cma_heap = buffer->heap; in cma_heap_dma_buf_release()
318 if (buffer->vmap_cnt > 0) { in cma_heap_dma_buf_release()
319 WARN(1, "%s: buffer still mapped in the kernel\n", __func__); in cma_heap_dma_buf_release()
320 vunmap(buffer->vaddr); in cma_heap_dma_buf_release()
324 kfree(buffer->pages); in cma_heap_dma_buf_release()
326 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); in cma_heap_dma_buf_release()
327 kfree(buffer); in cma_heap_dma_buf_release()
353 struct cma_heap_buffer *buffer; in cma_heap_do_allocate() local
364 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in cma_heap_do_allocate()
365 if (!buffer) in cma_heap_do_allocate()
368 buffer->uncached = uncached; in cma_heap_do_allocate()
370 INIT_LIST_HEAD(&buffer->attachments); in cma_heap_do_allocate()
371 mutex_init(&buffer->lock); in cma_heap_do_allocate()
372 buffer->len = size; in cma_heap_do_allocate()
404 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); in cma_heap_do_allocate()
405 if (!buffer->pages) { in cma_heap_do_allocate()
411 buffer->pages[pg] = &cma_pages[pg]; in cma_heap_do_allocate()
413 buffer->cma_pages = cma_pages; in cma_heap_do_allocate()
414 buffer->heap = cma_heap; in cma_heap_do_allocate()
415 buffer->pagecount = pagecount; in cma_heap_do_allocate()
420 exp_info.size = buffer->len; in cma_heap_do_allocate()
422 exp_info.priv = buffer; in cma_heap_do_allocate()
429 if (buffer->uncached) { in cma_heap_do_allocate()
430 dma = dma_map_page(dma_heap_get_dev(heap), buffer->cma_pages, 0, in cma_heap_do_allocate()
431 buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE); in cma_heap_do_allocate()
433 buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE); in cma_heap_do_allocate()
439 kfree(buffer->pages); in cma_heap_do_allocate()
443 kfree(buffer); in cma_heap_do_allocate()
461 struct cma_heap_buffer *buffer; in cma_heap_get_phys() local
473 buffer = dmabuf->priv; in cma_heap_get_phys()
474 if (IS_ERR_OR_NULL(buffer)) in cma_heap_get_phys()
477 if (buffer->heap != cma_heap) in cma_heap_get_phys()
480 phys->paddr = page_to_phys(buffer->cma_pages); in cma_heap_get_phys()