Lines Matching full:buffer

51 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)  in binder_buffer_next()  argument
53 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
56 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) in binder_buffer_prev() argument
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
62 struct binder_buffer *buffer) in binder_alloc_buffer_size() argument
64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
65 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
66 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
74 struct binder_buffer *buffer; in binder_insert_free_buffer() local
83 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
88 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
89 BUG_ON(!buffer->free); in binder_insert_free_buffer()
91 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
107 struct binder_buffer *buffer; in binder_insert_allocated_buffer_locked() local
113 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
114 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
116 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
118 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
132 struct binder_buffer *buffer; in binder_alloc_prepare_to_free_locked() local
138 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
139 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
141 if (uptr < buffer->user_data) in binder_alloc_prepare_to_free_locked()
143 else if (uptr > buffer->user_data) in binder_alloc_prepare_to_free_locked()
148 * free the buffer when in use by kernel or in binder_alloc_prepare_to_free_locked()
151 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
153 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
154 return buffer; in binder_alloc_prepare_to_free_locked()
161 * binder_alloc_prepare_to_free() - get buffer given user ptr
163 * @user_ptr: User pointer to buffer data
165 * Validate userspace pointer to buffer data and return buffer corresponding to
166 * that user pointer. Search the rb tree for buffer that matches user data
169 * Return: Pointer to buffer or NULL
174 struct binder_buffer *buffer; in binder_alloc_prepare_to_free() local
177 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
179 return buffer; in binder_alloc_prepare_to_free()
205 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
232 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
284 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
320 * If we see alloc->vma is not NULL, buffer data structures set up in binder_alloc_set_vma()
352 struct binder_buffer *buffer; in debug_low_async_space_locked() local
358 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_low_async_space_locked()
359 if (buffer->pid != pid) in debug_low_async_space_locked()
361 if (!buffer->async_transaction) in debug_low_async_space_locked()
363 total_alloc_size += binder_alloc_buffer_size(alloc, buffer) in debug_low_async_space_locked()
370 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
394 struct binder_buffer *buffer; in binder_alloc_new_buf_locked() local
438 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
439 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
440 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
462 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
463 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
471 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
472 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
489 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
490 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
494 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
495 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
498 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); in binder_alloc_new_buf_locked()
501 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); in binder_alloc_new_buf_locked()
505 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); in binder_alloc_new_buf_locked()
512 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_new_buf_locked()
514 pr_err("%s: %d failed to alloc new buffer struct\n", in binder_alloc_new_buf_locked()
518 new_buffer->user_data = (u8 __user *)buffer->user_data + size; in binder_alloc_new_buf_locked()
519 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
525 buffer->free = 0; in binder_alloc_new_buf_locked()
526 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
527 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
530 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
531 buffer->data_size = data_size; in binder_alloc_new_buf_locked()
532 buffer->offsets_size = offsets_size; in binder_alloc_new_buf_locked()
533 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
534 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf_locked()
535 buffer->pid = pid; in binder_alloc_new_buf_locked()
536 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
546 * buffer size). in binder_alloc_new_buf_locked()
548 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); in binder_alloc_new_buf_locked()
553 return buffer; in binder_alloc_new_buf_locked()
557 PAGE_ALIGN((uintptr_t)buffer->user_data), in binder_alloc_new_buf_locked()
563 * binder_alloc_new_buf() - Allocate a new binder buffer
565 * @data_size: size of user data buffer
566 * @offsets_size: user specified buffer offset
568 * @is_async: buffer for async transaction
571 * Allocate a new buffer given the requested sizes. Returns
572 * the kernel version of the buffer pointer. The size allocated
576 * Return: The allocated buffer or %NULL if error
585 struct binder_buffer *buffer; in binder_alloc_new_buf() local
588 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
591 return buffer; in binder_alloc_new_buf()
594 static void __user *buffer_start_page(struct binder_buffer *buffer) in buffer_start_page() argument
596 return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); in buffer_start_page()
599 static void __user *prev_buffer_end_page(struct binder_buffer *buffer) in prev_buffer_end_page() argument
602 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); in prev_buffer_end_page()
606 struct binder_buffer *buffer) in binder_delete_free_buffer() argument
611 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
612 prev = binder_buffer_prev(buffer); in binder_delete_free_buffer()
614 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { in binder_delete_free_buffer()
617 "%d: merge free, buffer %pK share page with %pK\n", in binder_delete_free_buffer()
618 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
622 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
623 next = binder_buffer_next(buffer); in binder_delete_free_buffer()
624 if (buffer_start_page(next) == buffer_start_page(buffer)) { in binder_delete_free_buffer()
627 "%d: merge free, buffer %pK share page with %pK\n", in binder_delete_free_buffer()
629 buffer->user_data, in binder_delete_free_buffer()
634 if (PAGE_ALIGNED(buffer->user_data)) { in binder_delete_free_buffer()
636 "%d: merge free, buffer start %pK is page aligned\n", in binder_delete_free_buffer()
637 alloc->pid, buffer->user_data); in binder_delete_free_buffer()
643 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", in binder_delete_free_buffer()
644 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
647 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
648 buffer_start_page(buffer) + PAGE_SIZE); in binder_delete_free_buffer()
650 list_del(&buffer->entry); in binder_delete_free_buffer()
651 kfree(buffer); in binder_delete_free_buffer()
655 struct binder_buffer *buffer) in binder_free_buf_locked() argument
659 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
661 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
662 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
663 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
667 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
669 BUG_ON(buffer->free); in binder_free_buf_locked()
671 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
672 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
673 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
675 if (buffer->async_transaction) { in binder_free_buf_locked()
684 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), in binder_free_buf_locked()
686 buffer->user_data + buffer_size) & PAGE_MASK)); in binder_free_buf_locked()
688 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
689 buffer->free = 1; in binder_free_buf_locked()
690 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
691 struct binder_buffer *next = binder_buffer_next(buffer); in binder_free_buf_locked()
698 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
699 struct binder_buffer *prev = binder_buffer_prev(buffer); in binder_free_buf_locked()
702 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
704 buffer = prev; in binder_free_buf_locked()
707 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
711 struct binder_buffer *buffer);
713 * binder_alloc_free_buf() - free a binder buffer
715 * @buffer: kernel pointer to buffer
717 * Free the buffer allocated via binder_alloc_new_buf()
720 struct binder_buffer *buffer) in binder_alloc_free_buf() argument
730 if (buffer->clear_on_free) { in binder_alloc_free_buf()
731 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
732 buffer->clear_on_free = false; in binder_alloc_free_buf()
735 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
757 struct binder_buffer *buffer; in binder_alloc_mmap_handler() local
769 alloc->buffer = (void __user *)vma->vm_start; in binder_alloc_mmap_handler()
780 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_mmap_handler()
781 if (!buffer) { in binder_alloc_mmap_handler()
783 failure_string = "alloc buffer struct"; in binder_alloc_mmap_handler()
787 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
788 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
789 buffer->free = 1; in binder_alloc_mmap_handler()
790 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
801 alloc->buffer = NULL; in binder_alloc_mmap_handler()
818 struct binder_buffer *buffer; in binder_alloc_deferred_release() local
825 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
828 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
830 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
831 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
832 buffer->clear_on_free = false; in binder_alloc_deferred_release()
834 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
839 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
841 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
843 list_del(&buffer->entry); in binder_alloc_deferred_release()
845 kfree(buffer); in binder_alloc_deferred_release()
861 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
881 struct binder_buffer *buffer) in print_binder_buffer() argument
884 prefix, buffer->debug_id, buffer->user_data, in print_binder_buffer()
885 buffer->data_size, buffer->offsets_size, in print_binder_buffer()
886 buffer->extra_buffers_size, in print_binder_buffer()
887 buffer->transaction ? "active" : "delivered"); in print_binder_buffer()
891 * binder_alloc_print_allocated() - print buffer info
895 * Prints information about every buffer associated with
905 print_binder_buffer(m, " buffer", in binder_alloc_print_allocated()
1009 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1101 * check_buffer() - verify that buffer/offset is safe to access
1103 * @buffer: binder buffer to be accessed
1104 * @offset: offset into @buffer data
1108 * @buffer and that the buffer is currently active and not freeable.
1110 * allowed to touch the buffer in two cases:
1112 * 1) when the buffer is being created:
1113 * (buffer->free == 0 && buffer->allow_user_free == 0)
1114 * 2) when the buffer is being torn down:
1115 * (buffer->free == 0 && buffer->transaction == NULL).
1117 * Return: true if the buffer is safe to access
1120 struct binder_buffer *buffer, in check_buffer() argument
1123 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1128 !buffer->free && in check_buffer()
1129 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1133 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1135 * @buffer: binder buffer to be accessed
1136 * @buffer_offset: offset into @buffer data
1140 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1144 * to a valid address within the @buffer and that @buffer is
1152 struct binder_buffer *buffer, in binder_alloc_get_page() argument
1157 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
1168 * binder_alloc_clear_buf() - zero out buffer
1170 * @buffer: binder buffer to be cleared
1172 * memset the given buffer to 0
1175 struct binder_buffer *buffer) in binder_alloc_clear_buf() argument
1177 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
1186 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
1200 * @buffer: binder buffer to be accessed
1201 * @buffer_offset: offset into @buffer data
1202 * @from: userspace pointer to source buffer
1205 * Copy bytes from source userspace to target buffer.
1211 struct binder_buffer *buffer, in binder_alloc_copy_user_to_buffer() argument
1216 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1226 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1243 struct binder_buffer *buffer, in binder_alloc_do_buffer_copy() argument
1249 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1259 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1282 struct binder_buffer *buffer, in binder_alloc_copy_to_buffer() argument
1287 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1293 struct binder_buffer *buffer, in binder_alloc_copy_from_buffer() argument
1297 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()