xref: /OK3568_Linux_fs/kernel/drivers/android/binder_alloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 #include <trace/hooks/binder.h>
29 
30 struct list_lru binder_alloc_lru;
31 
32 static DEFINE_MUTEX(binder_alloc_mmap_lock);
33 
34 enum {
35 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
36 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
37 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
38 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
39 };
40 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
41 
42 module_param_named(debug_mask, binder_alloc_debug_mask,
43 		   uint, 0644);
44 
45 #define binder_alloc_debug(mask, x...) \
46 	do { \
47 		if (binder_alloc_debug_mask & mask) \
48 			pr_info_ratelimited(x); \
49 	} while (0)
50 
binder_buffer_next(struct binder_buffer * buffer)51 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52 {
53 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
54 }
55 
binder_buffer_prev(struct binder_buffer * buffer)56 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57 {
58 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59 }
60 
binder_alloc_buffer_size(struct binder_alloc * alloc,struct binder_buffer * buffer)61 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 				       struct binder_buffer *buffer)
63 {
64 	if (list_is_last(&buffer->entry, &alloc->buffers))
65 		return alloc->buffer + alloc->buffer_size - buffer->user_data;
66 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
67 }
68 
binder_insert_free_buffer(struct binder_alloc * alloc,struct binder_buffer * new_buffer)69 static void binder_insert_free_buffer(struct binder_alloc *alloc,
70 				      struct binder_buffer *new_buffer)
71 {
72 	struct rb_node **p = &alloc->free_buffers.rb_node;
73 	struct rb_node *parent = NULL;
74 	struct binder_buffer *buffer;
75 	size_t buffer_size;
76 	size_t new_buffer_size;
77 
78 	BUG_ON(!new_buffer->free);
79 
80 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
81 
82 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
83 		     "%d: add free buffer, size %zd, at %pK\n",
84 		      alloc->pid, new_buffer_size, new_buffer);
85 
86 	while (*p) {
87 		parent = *p;
88 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
89 		BUG_ON(!buffer->free);
90 
91 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
92 
93 		if (new_buffer_size < buffer_size)
94 			p = &parent->rb_left;
95 		else
96 			p = &parent->rb_right;
97 	}
98 	rb_link_node(&new_buffer->rb_node, parent, p);
99 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
100 }
101 
binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer)102 static void binder_insert_allocated_buffer_locked(
103 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104 {
105 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
106 	struct rb_node *parent = NULL;
107 	struct binder_buffer *buffer;
108 
109 	BUG_ON(new_buffer->free);
110 
111 	while (*p) {
112 		parent = *p;
113 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
114 		BUG_ON(buffer->free);
115 
116 		if (new_buffer->user_data < buffer->user_data)
117 			p = &parent->rb_left;
118 		else if (new_buffer->user_data > buffer->user_data)
119 			p = &parent->rb_right;
120 		else
121 			BUG();
122 	}
123 	rb_link_node(&new_buffer->rb_node, parent, p);
124 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
125 }
126 
binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,uintptr_t user_ptr)127 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
128 		struct binder_alloc *alloc,
129 		uintptr_t user_ptr)
130 {
131 	struct rb_node *n = alloc->allocated_buffers.rb_node;
132 	struct binder_buffer *buffer;
133 	void __user *uptr;
134 
135 	uptr = (void __user *)user_ptr;
136 
137 	while (n) {
138 		buffer = rb_entry(n, struct binder_buffer, rb_node);
139 		BUG_ON(buffer->free);
140 
141 		if (uptr < buffer->user_data)
142 			n = n->rb_left;
143 		else if (uptr > buffer->user_data)
144 			n = n->rb_right;
145 		else {
146 			/*
147 			 * Guard against user threads attempting to
148 			 * free the buffer when in use by kernel or
149 			 * after it's already been freed.
150 			 */
151 			if (!buffer->allow_user_free)
152 				return ERR_PTR(-EPERM);
153 			buffer->allow_user_free = 0;
154 			return buffer;
155 		}
156 	}
157 	return NULL;
158 }
159 
160 /**
161  * binder_alloc_prepare_to_free() - get buffer given user ptr
162  * @alloc:	binder_alloc for this proc
163  * @user_ptr:	User pointer to buffer data
164  *
165  * Validate userspace pointer to buffer data and return buffer corresponding to
166  * that user pointer. Search the rb tree for buffer that matches user data
167  * pointer.
168  *
169  * Return:	Pointer to buffer or NULL
170  */
binder_alloc_prepare_to_free(struct binder_alloc * alloc,uintptr_t user_ptr)171 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
172 						   uintptr_t user_ptr)
173 {
174 	struct binder_buffer *buffer;
175 
176 	mutex_lock(&alloc->mutex);
177 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
178 	mutex_unlock(&alloc->mutex);
179 	return buffer;
180 }
181 
binder_update_page_range(struct binder_alloc * alloc,int allocate,void __user * start,void __user * end)182 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
183 				    void __user *start, void __user *end)
184 {
185 	void __user *page_addr;
186 	unsigned long user_page_addr;
187 	struct binder_lru_page *page;
188 	struct vm_area_struct *vma = NULL;
189 	struct mm_struct *mm = NULL;
190 	bool need_mm = false;
191 
192 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
193 		     "%d: %s pages %pK-%pK\n", alloc->pid,
194 		     allocate ? "allocate" : "free", start, end);
195 
196 	if (end <= start)
197 		return 0;
198 
199 	trace_binder_update_page_range(alloc, allocate, start, end);
200 
201 	if (allocate == 0)
202 		goto free_range;
203 
204 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
205 		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
206 		if (!page->page_ptr) {
207 			need_mm = true;
208 			break;
209 		}
210 	}
211 
212 	if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
213 		mm = alloc->vma_vm_mm;
214 
215 	if (mm) {
216 		mmap_write_lock(mm);
217 		vma = alloc->vma;
218 	}
219 
220 	if (!vma && need_mm) {
221 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
222 				   "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
223 				   alloc->pid);
224 		goto err_no_vma;
225 	}
226 
227 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
228 		int ret;
229 		bool on_lru;
230 		size_t index;
231 
232 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
233 		page = &alloc->pages[index];
234 
235 		if (page->page_ptr) {
236 			trace_binder_alloc_lru_start(alloc, index);
237 
238 			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
239 			WARN_ON(!on_lru);
240 
241 			trace_binder_alloc_lru_end(alloc, index);
242 			continue;
243 		}
244 
245 		if (WARN_ON(!vma))
246 			goto err_page_ptr_cleared;
247 
248 		trace_binder_alloc_page_start(alloc, index);
249 		page->page_ptr = alloc_page(GFP_KERNEL |
250 					    __GFP_HIGHMEM |
251 					    __GFP_ZERO);
252 		if (!page->page_ptr) {
253 			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
254 				alloc->pid, page_addr);
255 			goto err_alloc_page_failed;
256 		}
257 		page->alloc = alloc;
258 		INIT_LIST_HEAD(&page->lru);
259 
260 		user_page_addr = (uintptr_t)page_addr;
261 		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
262 		if (ret) {
263 			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
264 			       alloc->pid, user_page_addr);
265 			goto err_vm_insert_page_failed;
266 		}
267 
268 		if (index + 1 > alloc->pages_high)
269 			alloc->pages_high = index + 1;
270 
271 		trace_binder_alloc_page_end(alloc, index);
272 	}
273 	if (mm) {
274 		mmap_write_unlock(mm);
275 		mmput(mm);
276 	}
277 	return 0;
278 
279 free_range:
280 	for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
281 		bool ret;
282 		size_t index;
283 
284 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
285 		page = &alloc->pages[index];
286 
287 		trace_binder_free_lru_start(alloc, index);
288 
289 		ret = list_lru_add(&binder_alloc_lru, &page->lru);
290 		WARN_ON(!ret);
291 
292 		trace_binder_free_lru_end(alloc, index);
293 		if (page_addr == start)
294 			break;
295 		continue;
296 
297 err_vm_insert_page_failed:
298 		__free_page(page->page_ptr);
299 		page->page_ptr = NULL;
300 err_alloc_page_failed:
301 err_page_ptr_cleared:
302 		if (page_addr == start)
303 			break;
304 	}
305 err_no_vma:
306 	if (mm) {
307 		mmap_write_unlock(mm);
308 		mmput(mm);
309 	}
310 	return vma ? -ENOMEM : -ESRCH;
311 }
312 
313 
binder_alloc_set_vma(struct binder_alloc * alloc,struct vm_area_struct * vma)314 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
315 		struct vm_area_struct *vma)
316 {
317 	if (vma)
318 		alloc->vma_vm_mm = vma->vm_mm;
319 	/*
320 	 * If we see alloc->vma is not NULL, buffer data structures set up
321 	 * completely. Look at smp_rmb side binder_alloc_get_vma.
322 	 * We also want to guarantee new alloc->vma_vm_mm is always visible
323 	 * if alloc->vma is set.
324 	 */
325 	smp_wmb();
326 	alloc->vma = vma;
327 }
328 
binder_alloc_get_vma(struct binder_alloc * alloc)329 static inline struct vm_area_struct *binder_alloc_get_vma(
330 		struct binder_alloc *alloc)
331 {
332 	struct vm_area_struct *vma = NULL;
333 
334 	if (alloc->vma) {
335 		/* Look at description in binder_alloc_set_vma */
336 		smp_rmb();
337 		vma = alloc->vma;
338 	}
339 	return vma;
340 }
341 
debug_low_async_space_locked(struct binder_alloc * alloc,int pid)342 static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
343 {
344 	/*
345 	 * Find the amount and size of buffers allocated by the current caller;
346 	 * The idea is that once we cross the threshold, whoever is responsible
347 	 * for the low async space is likely to try to send another async txn,
348 	 * and at some point we'll catch them in the act. This is more efficient
349 	 * than keeping a map per pid.
350 	 */
351 	struct rb_node *n;
352 	struct binder_buffer *buffer;
353 	size_t total_alloc_size = 0;
354 	size_t num_buffers = 0;
355 
356 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
357 		 n = rb_next(n)) {
358 		buffer = rb_entry(n, struct binder_buffer, rb_node);
359 		if (buffer->pid != pid)
360 			continue;
361 		if (!buffer->async_transaction)
362 			continue;
363 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
364 			+ sizeof(struct binder_buffer);
365 		num_buffers++;
366 	}
367 
368 	/*
369 	 * Warn if this pid has more than 50 transactions, or more than 50% of
370 	 * async space (which is 25% of total buffer size). Oneway spam is only
371 	 * detected when the threshold is exceeded.
372 	 */
373 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
374 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
375 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
376 			      alloc->pid, pid, num_buffers, total_alloc_size);
377 		if (!alloc->oneway_spam_detected) {
378 			alloc->oneway_spam_detected = true;
379 			return true;
380 		}
381 	}
382 	return false;
383 }
384 
binder_alloc_new_buf_locked(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async,int pid)385 static struct binder_buffer *binder_alloc_new_buf_locked(
386 				struct binder_alloc *alloc,
387 				size_t data_size,
388 				size_t offsets_size,
389 				size_t extra_buffers_size,
390 				int is_async,
391 				int pid)
392 {
393 	struct rb_node *n = alloc->free_buffers.rb_node;
394 	struct binder_buffer *buffer;
395 	size_t buffer_size;
396 	struct rb_node *best_fit = NULL;
397 	void __user *has_page_addr;
398 	void __user *end_page_addr;
399 	size_t size, data_offsets_size;
400 	int ret;
401 
402 	if (!binder_alloc_get_vma(alloc)) {
403 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
404 				   "%d: binder_alloc_buf, no vma\n",
405 				   alloc->pid);
406 		return ERR_PTR(-ESRCH);
407 	}
408 
409 	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
410 		ALIGN(offsets_size, sizeof(void *));
411 
412 	if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
413 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
414 				"%d: got transaction with invalid size %zd-%zd\n",
415 				alloc->pid, data_size, offsets_size);
416 		return ERR_PTR(-EINVAL);
417 	}
418 	size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
419 	if (size < data_offsets_size || size < extra_buffers_size) {
420 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
421 				"%d: got transaction with invalid extra_buffers_size %zd\n",
422 				alloc->pid, extra_buffers_size);
423 		return ERR_PTR(-EINVAL);
424 	}
425 	trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
426 	if (is_async &&
427 	    alloc->free_async_space < size + sizeof(struct binder_buffer)) {
428 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
429 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
430 			      alloc->pid, size);
431 		return ERR_PTR(-ENOSPC);
432 	}
433 
434 	/* Pad 0-size buffers so they get assigned unique addresses */
435 	size = max(size, sizeof(void *));
436 
437 	while (n) {
438 		buffer = rb_entry(n, struct binder_buffer, rb_node);
439 		BUG_ON(!buffer->free);
440 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
441 
442 		if (size < buffer_size) {
443 			best_fit = n;
444 			n = n->rb_left;
445 		} else if (size > buffer_size)
446 			n = n->rb_right;
447 		else {
448 			best_fit = n;
449 			break;
450 		}
451 	}
452 	if (best_fit == NULL) {
453 		size_t allocated_buffers = 0;
454 		size_t largest_alloc_size = 0;
455 		size_t total_alloc_size = 0;
456 		size_t free_buffers = 0;
457 		size_t largest_free_size = 0;
458 		size_t total_free_size = 0;
459 
460 		for (n = rb_first(&alloc->allocated_buffers); n != NULL;
461 		     n = rb_next(n)) {
462 			buffer = rb_entry(n, struct binder_buffer, rb_node);
463 			buffer_size = binder_alloc_buffer_size(alloc, buffer);
464 			allocated_buffers++;
465 			total_alloc_size += buffer_size;
466 			if (buffer_size > largest_alloc_size)
467 				largest_alloc_size = buffer_size;
468 		}
469 		for (n = rb_first(&alloc->free_buffers); n != NULL;
470 		     n = rb_next(n)) {
471 			buffer = rb_entry(n, struct binder_buffer, rb_node);
472 			buffer_size = binder_alloc_buffer_size(alloc, buffer);
473 			free_buffers++;
474 			total_free_size += buffer_size;
475 			if (buffer_size > largest_free_size)
476 				largest_free_size = buffer_size;
477 		}
478 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
479 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
480 				   alloc->pid, size);
481 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
482 				   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
483 				   total_alloc_size, allocated_buffers,
484 				   largest_alloc_size, total_free_size,
485 				   free_buffers, largest_free_size);
486 		return ERR_PTR(-ENOSPC);
487 	}
488 	if (n == NULL) {
489 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
490 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
491 	}
492 
493 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
494 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
495 		      alloc->pid, size, buffer, buffer_size);
496 
497 	has_page_addr = (void __user *)
498 		(((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
499 	WARN_ON(n && buffer_size != size);
500 	end_page_addr =
501 		(void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
502 	if (end_page_addr > has_page_addr)
503 		end_page_addr = has_page_addr;
504 	ret = binder_update_page_range(alloc, 1, (void __user *)
505 		PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
506 	if (ret)
507 		return ERR_PTR(ret);
508 
509 	if (buffer_size != size) {
510 		struct binder_buffer *new_buffer;
511 
512 		new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
513 		if (!new_buffer) {
514 			pr_err("%s: %d failed to alloc new buffer struct\n",
515 			       __func__, alloc->pid);
516 			goto err_alloc_buf_struct_failed;
517 		}
518 		new_buffer->user_data = (u8 __user *)buffer->user_data + size;
519 		list_add(&new_buffer->entry, &buffer->entry);
520 		new_buffer->free = 1;
521 		binder_insert_free_buffer(alloc, new_buffer);
522 	}
523 
524 	rb_erase(best_fit, &alloc->free_buffers);
525 	buffer->free = 0;
526 	buffer->allow_user_free = 0;
527 	binder_insert_allocated_buffer_locked(alloc, buffer);
528 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
529 		     "%d: binder_alloc_buf size %zd got %pK\n",
530 		      alloc->pid, size, buffer);
531 	buffer->data_size = data_size;
532 	buffer->offsets_size = offsets_size;
533 	buffer->async_transaction = is_async;
534 	buffer->extra_buffers_size = extra_buffers_size;
535 	buffer->pid = pid;
536 	buffer->oneway_spam_suspect = false;
537 	if (is_async) {
538 		alloc->free_async_space -= size + sizeof(struct binder_buffer);
539 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
540 			     "%d: binder_alloc_buf size %zd async free %zd\n",
541 			      alloc->pid, size, alloc->free_async_space);
542 		if (alloc->free_async_space < alloc->buffer_size / 10) {
543 			/*
544 			 * Start detecting spammers once we have less than 20%
545 			 * of async space left (which is less than 10% of total
546 			 * buffer size).
547 			 */
548 			buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
549 		} else {
550 			alloc->oneway_spam_detected = false;
551 		}
552 	}
553 	return buffer;
554 
555 err_alloc_buf_struct_failed:
556 	binder_update_page_range(alloc, 0, (void __user *)
557 				 PAGE_ALIGN((uintptr_t)buffer->user_data),
558 				 end_page_addr);
559 	return ERR_PTR(-ENOMEM);
560 }
561 
562 /**
563  * binder_alloc_new_buf() - Allocate a new binder buffer
564  * @alloc:              binder_alloc for this proc
565  * @data_size:          size of user data buffer
566  * @offsets_size:       user specified buffer offset
567  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
568  * @is_async:           buffer for async transaction
569  * @pid:				pid to attribute allocation to (used for debugging)
570  *
571  * Allocate a new buffer given the requested sizes. Returns
572  * the kernel version of the buffer pointer. The size allocated
573  * is the sum of the three given sizes (each rounded up to
574  * pointer-sized boundary)
575  *
576  * Return:	The allocated buffer or %NULL if error
577  */
binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async,int pid)578 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
579 					   size_t data_size,
580 					   size_t offsets_size,
581 					   size_t extra_buffers_size,
582 					   int is_async,
583 					   int pid)
584 {
585 	struct binder_buffer *buffer;
586 
587 	mutex_lock(&alloc->mutex);
588 	buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
589 					     extra_buffers_size, is_async, pid);
590 	mutex_unlock(&alloc->mutex);
591 	return buffer;
592 }
593 
buffer_start_page(struct binder_buffer * buffer)594 static void __user *buffer_start_page(struct binder_buffer *buffer)
595 {
596 	return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
597 }
598 
prev_buffer_end_page(struct binder_buffer * buffer)599 static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
600 {
601 	return (void __user *)
602 		(((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
603 }
604 
binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer)605 static void binder_delete_free_buffer(struct binder_alloc *alloc,
606 				      struct binder_buffer *buffer)
607 {
608 	struct binder_buffer *prev, *next = NULL;
609 	bool to_free = true;
610 
611 	BUG_ON(alloc->buffers.next == &buffer->entry);
612 	prev = binder_buffer_prev(buffer);
613 	BUG_ON(!prev->free);
614 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
615 		to_free = false;
616 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
617 				   "%d: merge free, buffer %pK share page with %pK\n",
618 				   alloc->pid, buffer->user_data,
619 				   prev->user_data);
620 	}
621 
622 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
623 		next = binder_buffer_next(buffer);
624 		if (buffer_start_page(next) == buffer_start_page(buffer)) {
625 			to_free = false;
626 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
627 					   "%d: merge free, buffer %pK share page with %pK\n",
628 					   alloc->pid,
629 					   buffer->user_data,
630 					   next->user_data);
631 		}
632 	}
633 
634 	if (PAGE_ALIGNED(buffer->user_data)) {
635 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
636 				   "%d: merge free, buffer start %pK is page aligned\n",
637 				   alloc->pid, buffer->user_data);
638 		to_free = false;
639 	}
640 
641 	if (to_free) {
642 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
643 				   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
644 				   alloc->pid, buffer->user_data,
645 				   prev->user_data,
646 				   next ? next->user_data : NULL);
647 		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
648 					 buffer_start_page(buffer) + PAGE_SIZE);
649 	}
650 	list_del(&buffer->entry);
651 	kfree(buffer);
652 }
653 
binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer)654 static void binder_free_buf_locked(struct binder_alloc *alloc,
655 				   struct binder_buffer *buffer)
656 {
657 	size_t size, buffer_size;
658 
659 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
660 
661 	size = ALIGN(buffer->data_size, sizeof(void *)) +
662 		ALIGN(buffer->offsets_size, sizeof(void *)) +
663 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
664 
665 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
666 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
667 		      alloc->pid, buffer, size, buffer_size);
668 
669 	BUG_ON(buffer->free);
670 	BUG_ON(size > buffer_size);
671 	BUG_ON(buffer->transaction != NULL);
672 	BUG_ON(buffer->user_data < alloc->buffer);
673 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
674 
675 	if (buffer->async_transaction) {
676 		alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
677 
678 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
679 			     "%d: binder_free_buf size %zd async free %zd\n",
680 			      alloc->pid, size, alloc->free_async_space);
681 	}
682 
683 	binder_update_page_range(alloc, 0,
684 		(void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
685 		(void __user *)(((uintptr_t)
686 			  buffer->user_data + buffer_size) & PAGE_MASK));
687 
688 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
689 	buffer->free = 1;
690 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
691 		struct binder_buffer *next = binder_buffer_next(buffer);
692 
693 		if (next->free) {
694 			rb_erase(&next->rb_node, &alloc->free_buffers);
695 			binder_delete_free_buffer(alloc, next);
696 		}
697 	}
698 	if (alloc->buffers.next != &buffer->entry) {
699 		struct binder_buffer *prev = binder_buffer_prev(buffer);
700 
701 		if (prev->free) {
702 			binder_delete_free_buffer(alloc, buffer);
703 			rb_erase(&prev->rb_node, &alloc->free_buffers);
704 			buffer = prev;
705 		}
706 	}
707 	binder_insert_free_buffer(alloc, buffer);
708 }
709 
710 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
711 				   struct binder_buffer *buffer);
712 /**
713  * binder_alloc_free_buf() - free a binder buffer
714  * @alloc:	binder_alloc for this proc
715  * @buffer:	kernel pointer to buffer
716  *
717  * Free the buffer allocated via binder_alloc_new_buf()
718  */
binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)719 void binder_alloc_free_buf(struct binder_alloc *alloc,
720 			    struct binder_buffer *buffer)
721 {
722 	/*
723 	 * We could eliminate the call to binder_alloc_clear_buf()
724 	 * from binder_alloc_deferred_release() by moving this to
725 	 * binder_alloc_free_buf_locked(). However, that could
726 	 * increase contention for the alloc mutex if clear_on_free
727 	 * is used frequently for large buffers. The mutex is not
728 	 * needed for correctness here.
729 	 */
730 	if (buffer->clear_on_free) {
731 		binder_alloc_clear_buf(alloc, buffer);
732 		buffer->clear_on_free = false;
733 	}
734 	mutex_lock(&alloc->mutex);
735 	binder_free_buf_locked(alloc, buffer);
736 	mutex_unlock(&alloc->mutex);
737 }
738 
739 /**
740  * binder_alloc_mmap_handler() - map virtual address space for proc
741  * @alloc:	alloc structure for this proc
742  * @vma:	vma passed to mmap()
743  *
744  * Called by binder_mmap() to initialize the space specified in
745  * vma for allocating binder buffers
746  *
747  * Return:
748  *      0 = success
749  *      -EBUSY = address space already mapped
750  *      -ENOMEM = failed to map memory to given address space
751  */
binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma)752 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
753 			      struct vm_area_struct *vma)
754 {
755 	int ret;
756 	const char *failure_string;
757 	struct binder_buffer *buffer;
758 
759 	mutex_lock(&binder_alloc_mmap_lock);
760 	if (alloc->buffer_size) {
761 		ret = -EBUSY;
762 		failure_string = "already mapped";
763 		goto err_already_mapped;
764 	}
765 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
766 				   SZ_4M);
767 	mutex_unlock(&binder_alloc_mmap_lock);
768 
769 	alloc->buffer = (void __user *)vma->vm_start;
770 
771 	alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
772 			       sizeof(alloc->pages[0]),
773 			       GFP_KERNEL);
774 	if (alloc->pages == NULL) {
775 		ret = -ENOMEM;
776 		failure_string = "alloc page array";
777 		goto err_alloc_pages_failed;
778 	}
779 
780 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
781 	if (!buffer) {
782 		ret = -ENOMEM;
783 		failure_string = "alloc buffer struct";
784 		goto err_alloc_buf_struct_failed;
785 	}
786 
787 	buffer->user_data = alloc->buffer;
788 	list_add(&buffer->entry, &alloc->buffers);
789 	buffer->free = 1;
790 	binder_insert_free_buffer(alloc, buffer);
791 	alloc->free_async_space = alloc->buffer_size / 2;
792 	binder_alloc_set_vma(alloc, vma);
793 	mmgrab(alloc->vma_vm_mm);
794 
795 	return 0;
796 
797 err_alloc_buf_struct_failed:
798 	kfree(alloc->pages);
799 	alloc->pages = NULL;
800 err_alloc_pages_failed:
801 	alloc->buffer = NULL;
802 	mutex_lock(&binder_alloc_mmap_lock);
803 	alloc->buffer_size = 0;
804 err_already_mapped:
805 	mutex_unlock(&binder_alloc_mmap_lock);
806 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
807 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
808 			   alloc->pid, vma->vm_start, vma->vm_end,
809 			   failure_string, ret);
810 	return ret;
811 }
812 
813 
binder_alloc_deferred_release(struct binder_alloc * alloc)814 void binder_alloc_deferred_release(struct binder_alloc *alloc)
815 {
816 	struct rb_node *n;
817 	int buffers, page_count;
818 	struct binder_buffer *buffer;
819 
820 	buffers = 0;
821 	mutex_lock(&alloc->mutex);
822 	BUG_ON(alloc->vma);
823 
824 	while ((n = rb_first(&alloc->allocated_buffers))) {
825 		buffer = rb_entry(n, struct binder_buffer, rb_node);
826 
827 		/* Transaction should already have been freed */
828 		BUG_ON(buffer->transaction);
829 
830 		if (buffer->clear_on_free) {
831 			binder_alloc_clear_buf(alloc, buffer);
832 			buffer->clear_on_free = false;
833 		}
834 		binder_free_buf_locked(alloc, buffer);
835 		buffers++;
836 	}
837 
838 	while (!list_empty(&alloc->buffers)) {
839 		buffer = list_first_entry(&alloc->buffers,
840 					  struct binder_buffer, entry);
841 		WARN_ON(!buffer->free);
842 
843 		list_del(&buffer->entry);
844 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
845 		kfree(buffer);
846 	}
847 
848 	page_count = 0;
849 	if (alloc->pages) {
850 		int i;
851 
852 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
853 			void __user *page_addr;
854 			bool on_lru;
855 
856 			if (!alloc->pages[i].page_ptr)
857 				continue;
858 
859 			on_lru = list_lru_del(&binder_alloc_lru,
860 					      &alloc->pages[i].lru);
861 			page_addr = alloc->buffer + i * PAGE_SIZE;
862 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
863 				     "%s: %d: page %d at %pK %s\n",
864 				     __func__, alloc->pid, i, page_addr,
865 				     on_lru ? "on lru" : "active");
866 			__free_page(alloc->pages[i].page_ptr);
867 			page_count++;
868 		}
869 		kfree(alloc->pages);
870 	}
871 	mutex_unlock(&alloc->mutex);
872 	if (alloc->vma_vm_mm)
873 		mmdrop(alloc->vma_vm_mm);
874 
875 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
876 		     "%s: %d buffers %d, pages %d\n",
877 		     __func__, alloc->pid, buffers, page_count);
878 }
879 
print_binder_buffer(struct seq_file * m,const char * prefix,struct binder_buffer * buffer)880 static void print_binder_buffer(struct seq_file *m, const char *prefix,
881 				struct binder_buffer *buffer)
882 {
883 	seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
884 		   prefix, buffer->debug_id, buffer->user_data,
885 		   buffer->data_size, buffer->offsets_size,
886 		   buffer->extra_buffers_size,
887 		   buffer->transaction ? "active" : "delivered");
888 }
889 
890 /**
891  * binder_alloc_print_allocated() - print buffer info
892  * @m:     seq_file for output via seq_printf()
893  * @alloc: binder_alloc for this proc
894  *
895  * Prints information about every buffer associated with
896  * the binder_alloc state to the given seq_file
897  */
binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc)898 void binder_alloc_print_allocated(struct seq_file *m,
899 				  struct binder_alloc *alloc)
900 {
901 	struct rb_node *n;
902 
903 	mutex_lock(&alloc->mutex);
904 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
905 		print_binder_buffer(m, "  buffer",
906 				    rb_entry(n, struct binder_buffer, rb_node));
907 	mutex_unlock(&alloc->mutex);
908 }
909 
910 /**
911  * binder_alloc_print_pages() - print page usage
912  * @m:     seq_file for output via seq_printf()
913  * @alloc: binder_alloc for this proc
914  */
binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc)915 void binder_alloc_print_pages(struct seq_file *m,
916 			      struct binder_alloc *alloc)
917 {
918 	struct binder_lru_page *page;
919 	int i;
920 	int active = 0;
921 	int lru = 0;
922 	int free = 0;
923 
924 	mutex_lock(&alloc->mutex);
925 	/*
926 	 * Make sure the binder_alloc is fully initialized, otherwise we might
927 	 * read inconsistent state.
928 	 */
929 	if (binder_alloc_get_vma(alloc) != NULL) {
930 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
931 			page = &alloc->pages[i];
932 			if (!page->page_ptr)
933 				free++;
934 			else if (list_empty(&page->lru))
935 				active++;
936 			else
937 				lru++;
938 		}
939 	}
940 	mutex_unlock(&alloc->mutex);
941 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
942 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
943 }
944 
945 /**
946  * binder_alloc_get_allocated_count() - return count of buffers
947  * @alloc: binder_alloc for this proc
948  *
949  * Return: count of allocated buffers
950  */
binder_alloc_get_allocated_count(struct binder_alloc * alloc)951 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
952 {
953 	struct rb_node *n;
954 	int count = 0;
955 
956 	mutex_lock(&alloc->mutex);
957 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
958 		count++;
959 	mutex_unlock(&alloc->mutex);
960 	return count;
961 }
962 
963 
964 /**
965  * binder_alloc_vma_close() - invalidate address space
966  * @alloc: binder_alloc for this proc
967  *
968  * Called from binder_vma_close() when releasing address space.
969  * Clears alloc->vma to prevent new incoming transactions from
970  * allocating more buffers.
971  */
binder_alloc_vma_close(struct binder_alloc * alloc)972 void binder_alloc_vma_close(struct binder_alloc *alloc)
973 {
974 	binder_alloc_set_vma(alloc, NULL);
975 }
976 
977 /**
978  * binder_alloc_free_page() - shrinker callback to free pages
979  * @item:   item to free
980  * @lock:   lock protecting the item
981  * @cb_arg: callback argument
982  *
983  * Called from list_lru_walk() in binder_shrink_scan() to free
984  * up pages when the system is under memory pressure.
985  */
binder_alloc_free_page(struct list_head * item,struct list_lru_one * lru,spinlock_t * lock,void * cb_arg)986 enum lru_status binder_alloc_free_page(struct list_head *item,
987 				       struct list_lru_one *lru,
988 				       spinlock_t *lock,
989 				       void *cb_arg)
990 	__must_hold(lock)
991 {
992 	struct mm_struct *mm = NULL;
993 	struct binder_lru_page *page = container_of(item,
994 						    struct binder_lru_page,
995 						    lru);
996 	struct binder_alloc *alloc;
997 	uintptr_t page_addr;
998 	size_t index;
999 	struct vm_area_struct *vma;
1000 
1001 	alloc = page->alloc;
1002 	if (!mutex_trylock(&alloc->mutex))
1003 		goto err_get_alloc_mutex_failed;
1004 
1005 	if (!page->page_ptr)
1006 		goto err_page_already_freed;
1007 
1008 	index = page - alloc->pages;
1009 	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1010 
1011 	mm = alloc->vma_vm_mm;
1012 	if (!mmget_not_zero(mm))
1013 		goto err_mmget;
1014 	if (!mmap_read_trylock(mm))
1015 		goto err_mmap_read_lock_failed;
1016 	vma = binder_alloc_get_vma(alloc);
1017 
1018 	list_lru_isolate(lru, item);
1019 	spin_unlock(lock);
1020 
1021 	if (vma) {
1022 		trace_binder_unmap_user_start(alloc, index);
1023 
1024 		zap_page_range(vma, page_addr, PAGE_SIZE);
1025 
1026 		trace_binder_unmap_user_end(alloc, index);
1027 	}
1028 	mmap_read_unlock(mm);
1029 	mmput_async(mm);
1030 
1031 	trace_binder_unmap_kernel_start(alloc, index);
1032 
1033 	__free_page(page->page_ptr);
1034 	page->page_ptr = NULL;
1035 
1036 	trace_binder_unmap_kernel_end(alloc, index);
1037 
1038 	spin_lock(lock);
1039 	mutex_unlock(&alloc->mutex);
1040 	return LRU_REMOVED_RETRY;
1041 
1042 err_mmap_read_lock_failed:
1043 	mmput_async(mm);
1044 err_mmget:
1045 err_page_already_freed:
1046 	mutex_unlock(&alloc->mutex);
1047 err_get_alloc_mutex_failed:
1048 	return LRU_SKIP;
1049 }
1050 
1051 static unsigned long
binder_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1052 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1053 {
1054 	unsigned long ret = list_lru_count(&binder_alloc_lru);
1055 	return ret;
1056 }
1057 
1058 static unsigned long
binder_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1059 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1060 {
1061 	unsigned long ret;
1062 
1063 	ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1064 			    NULL, sc->nr_to_scan);
1065 	return ret;
1066 }
1067 
1068 static struct shrinker binder_shrinker = {
1069 	.count_objects = binder_shrink_count,
1070 	.scan_objects = binder_shrink_scan,
1071 	.seeks = DEFAULT_SEEKS,
1072 };
1073 
1074 /**
1075  * binder_alloc_init() - called by binder_open() for per-proc initialization
1076  * @alloc: binder_alloc for this proc
1077  *
1078  * Called from binder_open() to initialize binder_alloc fields for
1079  * new binder proc
1080  */
binder_alloc_init(struct binder_alloc * alloc)1081 void binder_alloc_init(struct binder_alloc *alloc)
1082 {
1083 	alloc->pid = current->group_leader->pid;
1084 	mutex_init(&alloc->mutex);
1085 	INIT_LIST_HEAD(&alloc->buffers);
1086 }
1087 
binder_alloc_shrinker_init(void)1088 int binder_alloc_shrinker_init(void)
1089 {
1090 	int ret = list_lru_init(&binder_alloc_lru);
1091 
1092 	if (ret == 0) {
1093 		ret = register_shrinker(&binder_shrinker);
1094 		if (ret)
1095 			list_lru_destroy(&binder_alloc_lru);
1096 	}
1097 	return ret;
1098 }
1099 
1100 /**
1101  * check_buffer() - verify that buffer/offset is safe to access
1102  * @alloc: binder_alloc for this proc
1103  * @buffer: binder buffer to be accessed
1104  * @offset: offset into @buffer data
1105  * @bytes: bytes to access from offset
1106  *
1107  * Check that the @offset/@bytes are within the size of the given
1108  * @buffer and that the buffer is currently active and not freeable.
1109  * Offsets must also be multiples of sizeof(u32). The kernel is
1110  * allowed to touch the buffer in two cases:
1111  *
1112  * 1) when the buffer is being created:
1113  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1114  * 2) when the buffer is being torn down:
1115  *     (buffer->free == 0 && buffer->transaction == NULL).
1116  *
1117  * Return: true if the buffer is safe to access
1118  */
check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes)1119 static inline bool check_buffer(struct binder_alloc *alloc,
1120 				struct binder_buffer *buffer,
1121 				binder_size_t offset, size_t bytes)
1122 {
1123 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1124 
1125 	return buffer_size >= bytes &&
1126 		offset <= buffer_size - bytes &&
1127 		IS_ALIGNED(offset, sizeof(u32)) &&
1128 		!buffer->free &&
1129 		(!buffer->allow_user_free || !buffer->transaction);
1130 }
1131 
1132 /**
1133  * binder_alloc_get_page() - get kernel pointer for given buffer offset
1134  * @alloc: binder_alloc for this proc
1135  * @buffer: binder buffer to be accessed
1136  * @buffer_offset: offset into @buffer data
1137  * @pgoffp: address to copy final page offset to
1138  *
1139  * Lookup the struct page corresponding to the address
1140  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1141  * NULL, the byte-offset into the page is written there.
1142  *
1143  * The caller is responsible to ensure that the offset points
1144  * to a valid address within the @buffer and that @buffer is
1145  * not freeable by the user. Since it can't be freed, we are
1146  * guaranteed that the corresponding elements of @alloc->pages[]
1147  * cannot change.
1148  *
1149  * Return: struct page
1150  */
binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp)1151 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1152 					  struct binder_buffer *buffer,
1153 					  binder_size_t buffer_offset,
1154 					  pgoff_t *pgoffp)
1155 {
1156 	binder_size_t buffer_space_offset = buffer_offset +
1157 		(buffer->user_data - alloc->buffer);
1158 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1159 	size_t index = buffer_space_offset >> PAGE_SHIFT;
1160 	struct binder_lru_page *lru_page;
1161 
1162 	lru_page = &alloc->pages[index];
1163 	*pgoffp = pgoff;
1164 	return lru_page->page_ptr;
1165 }
1166 
1167 /**
1168  * binder_alloc_clear_buf() - zero out buffer
1169  * @alloc: binder_alloc for this proc
1170  * @buffer: binder buffer to be cleared
1171  *
1172  * memset the given buffer to 0
1173  */
binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)1174 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1175 				   struct binder_buffer *buffer)
1176 {
1177 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1178 	binder_size_t buffer_offset = 0;
1179 
1180 	while (bytes) {
1181 		unsigned long size;
1182 		struct page *page;
1183 		pgoff_t pgoff;
1184 		void *kptr;
1185 
1186 		page = binder_alloc_get_page(alloc, buffer,
1187 					     buffer_offset, &pgoff);
1188 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1189 		kptr = kmap(page) + pgoff;
1190 		memset(kptr, 0, size);
1191 		kunmap(page);
1192 		bytes -= size;
1193 		buffer_offset += size;
1194 	}
1195 }
1196 
1197 /**
1198  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1199  * @alloc: binder_alloc for this proc
1200  * @buffer: binder buffer to be accessed
1201  * @buffer_offset: offset into @buffer data
1202  * @from: userspace pointer to source buffer
1203  * @bytes: bytes to copy
1204  *
1205  * Copy bytes from source userspace to target buffer.
1206  *
1207  * Return: bytes remaining to be copied
1208  */
1209 unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes)1210 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1211 				 struct binder_buffer *buffer,
1212 				 binder_size_t buffer_offset,
1213 				 const void __user *from,
1214 				 size_t bytes)
1215 {
1216 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1217 		return bytes;
1218 
1219 	while (bytes) {
1220 		unsigned long size;
1221 		unsigned long ret;
1222 		struct page *page;
1223 		pgoff_t pgoff;
1224 		void *kptr;
1225 
1226 		page = binder_alloc_get_page(alloc, buffer,
1227 					     buffer_offset, &pgoff);
1228 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1229 		kptr = kmap(page) + pgoff;
1230 		ret = copy_from_user(kptr, from, size);
1231 		kunmap(page);
1232 		if (ret)
1233 			return bytes - size + ret;
1234 		bytes -= size;
1235 		from += size;
1236 		buffer_offset += size;
1237 	}
1238 	return 0;
1239 }
1240 
binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes)1241 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1242 				       bool to_buffer,
1243 				       struct binder_buffer *buffer,
1244 				       binder_size_t buffer_offset,
1245 				       void *ptr,
1246 				       size_t bytes)
1247 {
1248 	/* All copies must be 32-bit aligned and 32-bit size */
1249 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1250 		return -EINVAL;
1251 
1252 	while (bytes) {
1253 		unsigned long size;
1254 		struct page *page;
1255 		pgoff_t pgoff;
1256 		void *tmpptr;
1257 		void *base_ptr;
1258 
1259 		page = binder_alloc_get_page(alloc, buffer,
1260 					     buffer_offset, &pgoff);
1261 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1262 		base_ptr = kmap_atomic(page);
1263 		tmpptr = base_ptr + pgoff;
1264 		if (to_buffer)
1265 			memcpy(tmpptr, ptr, size);
1266 		else
1267 			memcpy(ptr, tmpptr, size);
1268 		/*
1269 		 * kunmap_atomic() takes care of flushing the cache
1270 		 * if this device has VIVT cache arch
1271 		 */
1272 		kunmap_atomic(base_ptr);
1273 		bytes -= size;
1274 		pgoff = 0;
1275 		ptr = ptr + size;
1276 		buffer_offset += size;
1277 	}
1278 	return 0;
1279 }
1280 
binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes)1281 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1282 				struct binder_buffer *buffer,
1283 				binder_size_t buffer_offset,
1284 				void *src,
1285 				size_t bytes)
1286 {
1287 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1288 					   src, bytes);
1289 }
1290 
binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes)1291 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1292 				  void *dest,
1293 				  struct binder_buffer *buffer,
1294 				  binder_size_t buffer_offset,
1295 				  size_t bytes)
1296 {
1297 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1298 					   dest, bytes);
1299 }
1300 
1301