Lines Matching refs:alloc
61 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, in binder_alloc_buffer_size() argument
64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
65 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
69 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
72 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
80 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
84 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
91 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
99 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
103 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument
105 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
124 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
128 struct binder_alloc *alloc, in binder_alloc_prepare_to_free_locked() argument
131 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
171 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, in binder_alloc_prepare_to_free() argument
176 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
177 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
178 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
182 static int binder_update_page_range(struct binder_alloc *alloc, int allocate, in binder_update_page_range() argument
193 "%d: %s pages %pK-%pK\n", alloc->pid, in binder_update_page_range()
199 trace_binder_update_page_range(alloc, allocate, start, end); in binder_update_page_range()
205 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
212 if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) in binder_update_page_range()
213 mm = alloc->vma_vm_mm; in binder_update_page_range()
217 vma = alloc->vma; in binder_update_page_range()
223 alloc->pid); in binder_update_page_range()
232 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
233 page = &alloc->pages[index]; in binder_update_page_range()
236 trace_binder_alloc_lru_start(alloc, index); in binder_update_page_range()
241 trace_binder_alloc_lru_end(alloc, index); in binder_update_page_range()
248 trace_binder_alloc_page_start(alloc, index); in binder_update_page_range()
254 alloc->pid, page_addr); in binder_update_page_range()
257 page->alloc = alloc; in binder_update_page_range()
264 alloc->pid, user_page_addr); in binder_update_page_range()
268 if (index + 1 > alloc->pages_high) in binder_update_page_range()
269 alloc->pages_high = index + 1; in binder_update_page_range()
271 trace_binder_alloc_page_end(alloc, index); in binder_update_page_range()
284 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
285 page = &alloc->pages[index]; in binder_update_page_range()
287 trace_binder_free_lru_start(alloc, index); in binder_update_page_range()
292 trace_binder_free_lru_end(alloc, index); in binder_update_page_range()
314 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, in binder_alloc_set_vma() argument
318 alloc->vma_vm_mm = vma->vm_mm; in binder_alloc_set_vma()
326 alloc->vma = vma; in binder_alloc_set_vma()
330 struct binder_alloc *alloc) in binder_alloc_get_vma() argument
334 if (alloc->vma) { in binder_alloc_get_vma()
337 vma = alloc->vma; in binder_alloc_get_vma()
342 static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) in debug_low_async_space_locked() argument
356 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
363 total_alloc_size += binder_alloc_buffer_size(alloc, buffer) in debug_low_async_space_locked()
373 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
376 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
377 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
378 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
386 struct binder_alloc *alloc, in binder_alloc_new_buf_locked() argument
393 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
402 if (!binder_alloc_get_vma(alloc)) { in binder_alloc_new_buf_locked()
405 alloc->pid); in binder_alloc_new_buf_locked()
415 alloc->pid, data_size, offsets_size); in binder_alloc_new_buf_locked()
422 alloc->pid, extra_buffers_size); in binder_alloc_new_buf_locked()
425 trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async); in binder_alloc_new_buf_locked()
427 alloc->free_async_space < size + sizeof(struct binder_buffer)) { in binder_alloc_new_buf_locked()
430 alloc->pid, size); in binder_alloc_new_buf_locked()
440 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
460 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked()
463 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
469 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked()
472 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
480 alloc->pid, size); in binder_alloc_new_buf_locked()
490 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
495 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
504 ret = binder_update_page_range(alloc, 1, (void __user *) in binder_alloc_new_buf_locked()
515 __func__, alloc->pid); in binder_alloc_new_buf_locked()
521 binder_insert_free_buffer(alloc, new_buffer); in binder_alloc_new_buf_locked()
524 rb_erase(best_fit, &alloc->free_buffers); in binder_alloc_new_buf_locked()
527 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
530 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
538 alloc->free_async_space -= size + sizeof(struct binder_buffer); in binder_alloc_new_buf_locked()
541 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
542 if (alloc->free_async_space < alloc->buffer_size / 10) { in binder_alloc_new_buf_locked()
548 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); in binder_alloc_new_buf_locked()
550 alloc->oneway_spam_detected = false; in binder_alloc_new_buf_locked()
556 binder_update_page_range(alloc, 0, (void __user *) in binder_alloc_new_buf_locked()
578 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, in binder_alloc_new_buf() argument
587 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
588 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
590 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
605 static void binder_delete_free_buffer(struct binder_alloc *alloc, in binder_delete_free_buffer() argument
611 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
618 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
622 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
628 alloc->pid, in binder_delete_free_buffer()
637 alloc->pid, buffer->user_data); in binder_delete_free_buffer()
644 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
647 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
654 static void binder_free_buf_locked(struct binder_alloc *alloc, in binder_free_buf_locked() argument
659 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
667 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
672 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
673 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
676 alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); in binder_free_buf_locked()
680 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
683 binder_update_page_range(alloc, 0, in binder_free_buf_locked()
688 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
690 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
694 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
695 binder_delete_free_buffer(alloc, next); in binder_free_buf_locked()
698 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
702 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
703 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
707 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
710 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
719 void binder_alloc_free_buf(struct binder_alloc *alloc, in binder_alloc_free_buf() argument
731 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
734 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
735 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
736 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
752 int binder_alloc_mmap_handler(struct binder_alloc *alloc, in binder_alloc_mmap_handler() argument
760 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
765 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
769 alloc->buffer = (void __user *)vma->vm_start; in binder_alloc_mmap_handler()
771 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
772 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
774 if (alloc->pages == NULL) { in binder_alloc_mmap_handler()
787 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
788 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
790 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
791 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
792 binder_alloc_set_vma(alloc, vma); in binder_alloc_mmap_handler()
793 mmgrab(alloc->vma_vm_mm); in binder_alloc_mmap_handler()
798 kfree(alloc->pages); in binder_alloc_mmap_handler()
799 alloc->pages = NULL; in binder_alloc_mmap_handler()
801 alloc->buffer = NULL; in binder_alloc_mmap_handler()
803 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
808 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
814 void binder_alloc_deferred_release(struct binder_alloc *alloc) in binder_alloc_deferred_release() argument
821 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
822 BUG_ON(alloc->vma); in binder_alloc_deferred_release()
824 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
831 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
834 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
838 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
839 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
844 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
849 if (alloc->pages) { in binder_alloc_deferred_release()
852 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
856 if (!alloc->pages[i].page_ptr) in binder_alloc_deferred_release()
860 &alloc->pages[i].lru); in binder_alloc_deferred_release()
861 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
864 __func__, alloc->pid, i, page_addr, in binder_alloc_deferred_release()
866 __free_page(alloc->pages[i].page_ptr); in binder_alloc_deferred_release()
869 kfree(alloc->pages); in binder_alloc_deferred_release()
871 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
872 if (alloc->vma_vm_mm) in binder_alloc_deferred_release()
873 mmdrop(alloc->vma_vm_mm); in binder_alloc_deferred_release()
877 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
899 struct binder_alloc *alloc) in binder_alloc_print_allocated() argument
903 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
904 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated()
907 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
916 struct binder_alloc *alloc) in binder_alloc_print_pages() argument
924 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
929 if (binder_alloc_get_vma(alloc) != NULL) { in binder_alloc_print_pages()
930 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
931 page = &alloc->pages[i]; in binder_alloc_print_pages()
940 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
942 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
951 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) in binder_alloc_get_allocated_count() argument
956 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
957 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
959 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
972 void binder_alloc_vma_close(struct binder_alloc *alloc) in binder_alloc_vma_close() argument
974 binder_alloc_set_vma(alloc, NULL); in binder_alloc_vma_close()
996 struct binder_alloc *alloc; in binder_alloc_free_page() local
1001 alloc = page->alloc; in binder_alloc_free_page()
1002 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
1008 index = page - alloc->pages; in binder_alloc_free_page()
1009 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1011 mm = alloc->vma_vm_mm; in binder_alloc_free_page()
1016 vma = binder_alloc_get_vma(alloc); in binder_alloc_free_page()
1022 trace_binder_unmap_user_start(alloc, index); in binder_alloc_free_page()
1026 trace_binder_unmap_user_end(alloc, index); in binder_alloc_free_page()
1031 trace_binder_unmap_kernel_start(alloc, index); in binder_alloc_free_page()
1036 trace_binder_unmap_kernel_end(alloc, index); in binder_alloc_free_page()
1039 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1046 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1081 void binder_alloc_init(struct binder_alloc *alloc) in binder_alloc_init() argument
1083 alloc->pid = current->group_leader->pid; in binder_alloc_init()
1084 mutex_init(&alloc->mutex); in binder_alloc_init()
1085 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()
1119 static inline bool check_buffer(struct binder_alloc *alloc, in check_buffer() argument
1123 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1151 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page() argument
1157 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
1162 lru_page = &alloc->pages[index]; in binder_alloc_get_page()
1174 static void binder_alloc_clear_buf(struct binder_alloc *alloc, in binder_alloc_clear_buf() argument
1177 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
1186 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
1210 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_user_to_buffer() argument
1216 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1226 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1241 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, in binder_alloc_do_buffer_copy() argument
1249 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1259 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1281 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_to_buffer() argument
1287 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1291 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, in binder_alloc_copy_from_buffer() argument
1297 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()