1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* binder_alloc.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Android IPC Subsystem
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2007-2017 Google, Inc.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/sched/mm.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/rtmutex.h>
15*4882a593Smuzhiyun #include <linux/rbtree.h>
16*4882a593Smuzhiyun #include <linux/seq_file.h>
17*4882a593Smuzhiyun #include <linux/vmalloc.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/list_lru.h>
21*4882a593Smuzhiyun #include <linux/ratelimit.h>
22*4882a593Smuzhiyun #include <asm/cacheflush.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <linux/highmem.h>
25*4882a593Smuzhiyun #include <linux/sizes.h>
26*4882a593Smuzhiyun #include "binder_alloc.h"
27*4882a593Smuzhiyun #include "binder_trace.h"
28*4882a593Smuzhiyun #include <trace/hooks/binder.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun struct list_lru binder_alloc_lru;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static DEFINE_MUTEX(binder_alloc_mmap_lock);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun enum {
35*4882a593Smuzhiyun BINDER_DEBUG_USER_ERROR = 1U << 0,
36*4882a593Smuzhiyun BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37*4882a593Smuzhiyun BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38*4882a593Smuzhiyun BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun module_param_named(debug_mask, binder_alloc_debug_mask,
43*4882a593Smuzhiyun uint, 0644);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define binder_alloc_debug(mask, x...) \
46*4882a593Smuzhiyun do { \
47*4882a593Smuzhiyun if (binder_alloc_debug_mask & mask) \
48*4882a593Smuzhiyun pr_info_ratelimited(x); \
49*4882a593Smuzhiyun } while (0)
50*4882a593Smuzhiyun
binder_buffer_next(struct binder_buffer * buffer)51*4882a593Smuzhiyun static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun return list_entry(buffer->entry.next, struct binder_buffer, entry);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
binder_buffer_prev(struct binder_buffer * buffer)56*4882a593Smuzhiyun static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
binder_alloc_buffer_size(struct binder_alloc * alloc,struct binder_buffer * buffer)61*4882a593Smuzhiyun static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62*4882a593Smuzhiyun struct binder_buffer *buffer)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun if (list_is_last(&buffer->entry, &alloc->buffers))
65*4882a593Smuzhiyun return alloc->buffer + alloc->buffer_size - buffer->user_data;
66*4882a593Smuzhiyun return binder_buffer_next(buffer)->user_data - buffer->user_data;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
binder_insert_free_buffer(struct binder_alloc * alloc,struct binder_buffer * new_buffer)69*4882a593Smuzhiyun static void binder_insert_free_buffer(struct binder_alloc *alloc,
70*4882a593Smuzhiyun struct binder_buffer *new_buffer)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct rb_node **p = &alloc->free_buffers.rb_node;
73*4882a593Smuzhiyun struct rb_node *parent = NULL;
74*4882a593Smuzhiyun struct binder_buffer *buffer;
75*4882a593Smuzhiyun size_t buffer_size;
76*4882a593Smuzhiyun size_t new_buffer_size;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun BUG_ON(!new_buffer->free);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
83*4882a593Smuzhiyun "%d: add free buffer, size %zd, at %pK\n",
84*4882a593Smuzhiyun alloc->pid, new_buffer_size, new_buffer);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun while (*p) {
87*4882a593Smuzhiyun parent = *p;
88*4882a593Smuzhiyun buffer = rb_entry(parent, struct binder_buffer, rb_node);
89*4882a593Smuzhiyun BUG_ON(!buffer->free);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun buffer_size = binder_alloc_buffer_size(alloc, buffer);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (new_buffer_size < buffer_size)
94*4882a593Smuzhiyun p = &parent->rb_left;
95*4882a593Smuzhiyun else
96*4882a593Smuzhiyun p = &parent->rb_right;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun rb_link_node(&new_buffer->rb_node, parent, p);
99*4882a593Smuzhiyun rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer)102*4882a593Smuzhiyun static void binder_insert_allocated_buffer_locked(
103*4882a593Smuzhiyun struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct rb_node **p = &alloc->allocated_buffers.rb_node;
106*4882a593Smuzhiyun struct rb_node *parent = NULL;
107*4882a593Smuzhiyun struct binder_buffer *buffer;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun BUG_ON(new_buffer->free);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun while (*p) {
112*4882a593Smuzhiyun parent = *p;
113*4882a593Smuzhiyun buffer = rb_entry(parent, struct binder_buffer, rb_node);
114*4882a593Smuzhiyun BUG_ON(buffer->free);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (new_buffer->user_data < buffer->user_data)
117*4882a593Smuzhiyun p = &parent->rb_left;
118*4882a593Smuzhiyun else if (new_buffer->user_data > buffer->user_data)
119*4882a593Smuzhiyun p = &parent->rb_right;
120*4882a593Smuzhiyun else
121*4882a593Smuzhiyun BUG();
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun rb_link_node(&new_buffer->rb_node, parent, p);
124*4882a593Smuzhiyun rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,uintptr_t user_ptr)127*4882a593Smuzhiyun static struct binder_buffer *binder_alloc_prepare_to_free_locked(
128*4882a593Smuzhiyun struct binder_alloc *alloc,
129*4882a593Smuzhiyun uintptr_t user_ptr)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct rb_node *n = alloc->allocated_buffers.rb_node;
132*4882a593Smuzhiyun struct binder_buffer *buffer;
133*4882a593Smuzhiyun void __user *uptr;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun uptr = (void __user *)user_ptr;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun while (n) {
138*4882a593Smuzhiyun buffer = rb_entry(n, struct binder_buffer, rb_node);
139*4882a593Smuzhiyun BUG_ON(buffer->free);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (uptr < buffer->user_data)
142*4882a593Smuzhiyun n = n->rb_left;
143*4882a593Smuzhiyun else if (uptr > buffer->user_data)
144*4882a593Smuzhiyun n = n->rb_right;
145*4882a593Smuzhiyun else {
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun * Guard against user threads attempting to
148*4882a593Smuzhiyun * free the buffer when in use by kernel or
149*4882a593Smuzhiyun * after it's already been freed.
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun if (!buffer->allow_user_free)
152*4882a593Smuzhiyun return ERR_PTR(-EPERM);
153*4882a593Smuzhiyun buffer->allow_user_free = 0;
154*4882a593Smuzhiyun return buffer;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun return NULL;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun * binder_alloc_prepare_to_free() - get buffer given user ptr
162*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
163*4882a593Smuzhiyun * @user_ptr: User pointer to buffer data
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * Validate userspace pointer to buffer data and return buffer corresponding to
166*4882a593Smuzhiyun * that user pointer. Search the rb tree for buffer that matches user data
167*4882a593Smuzhiyun * pointer.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Return: Pointer to buffer or NULL
170*4882a593Smuzhiyun */
binder_alloc_prepare_to_free(struct binder_alloc * alloc,uintptr_t user_ptr)171*4882a593Smuzhiyun struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
172*4882a593Smuzhiyun uintptr_t user_ptr)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct binder_buffer *buffer;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
177*4882a593Smuzhiyun buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
178*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
179*4882a593Smuzhiyun return buffer;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
binder_update_page_range(struct binder_alloc * alloc,int allocate,void __user * start,void __user * end)182*4882a593Smuzhiyun static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
183*4882a593Smuzhiyun void __user *start, void __user *end)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun void __user *page_addr;
186*4882a593Smuzhiyun unsigned long user_page_addr;
187*4882a593Smuzhiyun struct binder_lru_page *page;
188*4882a593Smuzhiyun struct vm_area_struct *vma = NULL;
189*4882a593Smuzhiyun struct mm_struct *mm = NULL;
190*4882a593Smuzhiyun bool need_mm = false;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
193*4882a593Smuzhiyun "%d: %s pages %pK-%pK\n", alloc->pid,
194*4882a593Smuzhiyun allocate ? "allocate" : "free", start, end);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (end <= start)
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun trace_binder_update_page_range(alloc, allocate, start, end);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (allocate == 0)
202*4882a593Smuzhiyun goto free_range;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
205*4882a593Smuzhiyun page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
206*4882a593Smuzhiyun if (!page->page_ptr) {
207*4882a593Smuzhiyun need_mm = true;
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
213*4882a593Smuzhiyun mm = alloc->vma_vm_mm;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (mm) {
216*4882a593Smuzhiyun mmap_write_lock(mm);
217*4882a593Smuzhiyun vma = alloc->vma;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (!vma && need_mm) {
221*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
222*4882a593Smuzhiyun "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
223*4882a593Smuzhiyun alloc->pid);
224*4882a593Smuzhiyun goto err_no_vma;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
228*4882a593Smuzhiyun int ret;
229*4882a593Smuzhiyun bool on_lru;
230*4882a593Smuzhiyun size_t index;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun index = (page_addr - alloc->buffer) / PAGE_SIZE;
233*4882a593Smuzhiyun page = &alloc->pages[index];
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (page->page_ptr) {
236*4882a593Smuzhiyun trace_binder_alloc_lru_start(alloc, index);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
239*4882a593Smuzhiyun WARN_ON(!on_lru);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun trace_binder_alloc_lru_end(alloc, index);
242*4882a593Smuzhiyun continue;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (WARN_ON(!vma))
246*4882a593Smuzhiyun goto err_page_ptr_cleared;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun trace_binder_alloc_page_start(alloc, index);
249*4882a593Smuzhiyun page->page_ptr = alloc_page(GFP_KERNEL |
250*4882a593Smuzhiyun __GFP_HIGHMEM |
251*4882a593Smuzhiyun __GFP_ZERO);
252*4882a593Smuzhiyun if (!page->page_ptr) {
253*4882a593Smuzhiyun pr_err("%d: binder_alloc_buf failed for page at %pK\n",
254*4882a593Smuzhiyun alloc->pid, page_addr);
255*4882a593Smuzhiyun goto err_alloc_page_failed;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun page->alloc = alloc;
258*4882a593Smuzhiyun INIT_LIST_HEAD(&page->lru);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun user_page_addr = (uintptr_t)page_addr;
261*4882a593Smuzhiyun ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
262*4882a593Smuzhiyun if (ret) {
263*4882a593Smuzhiyun pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
264*4882a593Smuzhiyun alloc->pid, user_page_addr);
265*4882a593Smuzhiyun goto err_vm_insert_page_failed;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (index + 1 > alloc->pages_high)
269*4882a593Smuzhiyun alloc->pages_high = index + 1;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun trace_binder_alloc_page_end(alloc, index);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun if (mm) {
274*4882a593Smuzhiyun mmap_write_unlock(mm);
275*4882a593Smuzhiyun mmput(mm);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun free_range:
280*4882a593Smuzhiyun for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
281*4882a593Smuzhiyun bool ret;
282*4882a593Smuzhiyun size_t index;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun index = (page_addr - alloc->buffer) / PAGE_SIZE;
285*4882a593Smuzhiyun page = &alloc->pages[index];
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun trace_binder_free_lru_start(alloc, index);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun ret = list_lru_add(&binder_alloc_lru, &page->lru);
290*4882a593Smuzhiyun WARN_ON(!ret);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun trace_binder_free_lru_end(alloc, index);
293*4882a593Smuzhiyun if (page_addr == start)
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun continue;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun err_vm_insert_page_failed:
298*4882a593Smuzhiyun __free_page(page->page_ptr);
299*4882a593Smuzhiyun page->page_ptr = NULL;
300*4882a593Smuzhiyun err_alloc_page_failed:
301*4882a593Smuzhiyun err_page_ptr_cleared:
302*4882a593Smuzhiyun if (page_addr == start)
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun err_no_vma:
306*4882a593Smuzhiyun if (mm) {
307*4882a593Smuzhiyun mmap_write_unlock(mm);
308*4882a593Smuzhiyun mmput(mm);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun return vma ? -ENOMEM : -ESRCH;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun
binder_alloc_set_vma(struct binder_alloc * alloc,struct vm_area_struct * vma)314*4882a593Smuzhiyun static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
315*4882a593Smuzhiyun struct vm_area_struct *vma)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun if (vma)
318*4882a593Smuzhiyun alloc->vma_vm_mm = vma->vm_mm;
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * If we see alloc->vma is not NULL, buffer data structures set up
321*4882a593Smuzhiyun * completely. Look at smp_rmb side binder_alloc_get_vma.
322*4882a593Smuzhiyun * We also want to guarantee new alloc->vma_vm_mm is always visible
323*4882a593Smuzhiyun * if alloc->vma is set.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun smp_wmb();
326*4882a593Smuzhiyun alloc->vma = vma;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
binder_alloc_get_vma(struct binder_alloc * alloc)329*4882a593Smuzhiyun static inline struct vm_area_struct *binder_alloc_get_vma(
330*4882a593Smuzhiyun struct binder_alloc *alloc)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct vm_area_struct *vma = NULL;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (alloc->vma) {
335*4882a593Smuzhiyun /* Look at description in binder_alloc_set_vma */
336*4882a593Smuzhiyun smp_rmb();
337*4882a593Smuzhiyun vma = alloc->vma;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun return vma;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
debug_low_async_space_locked(struct binder_alloc * alloc,int pid)342*4882a593Smuzhiyun static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * Find the amount and size of buffers allocated by the current caller;
346*4882a593Smuzhiyun * The idea is that once we cross the threshold, whoever is responsible
347*4882a593Smuzhiyun * for the low async space is likely to try to send another async txn,
348*4882a593Smuzhiyun * and at some point we'll catch them in the act. This is more efficient
349*4882a593Smuzhiyun * than keeping a map per pid.
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun struct rb_node *n;
352*4882a593Smuzhiyun struct binder_buffer *buffer;
353*4882a593Smuzhiyun size_t total_alloc_size = 0;
354*4882a593Smuzhiyun size_t num_buffers = 0;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun for (n = rb_first(&alloc->allocated_buffers); n != NULL;
357*4882a593Smuzhiyun n = rb_next(n)) {
358*4882a593Smuzhiyun buffer = rb_entry(n, struct binder_buffer, rb_node);
359*4882a593Smuzhiyun if (buffer->pid != pid)
360*4882a593Smuzhiyun continue;
361*4882a593Smuzhiyun if (!buffer->async_transaction)
362*4882a593Smuzhiyun continue;
363*4882a593Smuzhiyun total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
364*4882a593Smuzhiyun + sizeof(struct binder_buffer);
365*4882a593Smuzhiyun num_buffers++;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * Warn if this pid has more than 50 transactions, or more than 50% of
370*4882a593Smuzhiyun * async space (which is 25% of total buffer size). Oneway spam is only
371*4882a593Smuzhiyun * detected when the threshold is exceeded.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
374*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
375*4882a593Smuzhiyun "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
376*4882a593Smuzhiyun alloc->pid, pid, num_buffers, total_alloc_size);
377*4882a593Smuzhiyun if (!alloc->oneway_spam_detected) {
378*4882a593Smuzhiyun alloc->oneway_spam_detected = true;
379*4882a593Smuzhiyun return true;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun return false;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
binder_alloc_new_buf_locked(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async,int pid)385*4882a593Smuzhiyun static struct binder_buffer *binder_alloc_new_buf_locked(
386*4882a593Smuzhiyun struct binder_alloc *alloc,
387*4882a593Smuzhiyun size_t data_size,
388*4882a593Smuzhiyun size_t offsets_size,
389*4882a593Smuzhiyun size_t extra_buffers_size,
390*4882a593Smuzhiyun int is_async,
391*4882a593Smuzhiyun int pid)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct rb_node *n = alloc->free_buffers.rb_node;
394*4882a593Smuzhiyun struct binder_buffer *buffer;
395*4882a593Smuzhiyun size_t buffer_size;
396*4882a593Smuzhiyun struct rb_node *best_fit = NULL;
397*4882a593Smuzhiyun void __user *has_page_addr;
398*4882a593Smuzhiyun void __user *end_page_addr;
399*4882a593Smuzhiyun size_t size, data_offsets_size;
400*4882a593Smuzhiyun int ret;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!binder_alloc_get_vma(alloc)) {
403*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
404*4882a593Smuzhiyun "%d: binder_alloc_buf, no vma\n",
405*4882a593Smuzhiyun alloc->pid);
406*4882a593Smuzhiyun return ERR_PTR(-ESRCH);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun data_offsets_size = ALIGN(data_size, sizeof(void *)) +
410*4882a593Smuzhiyun ALIGN(offsets_size, sizeof(void *));
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
413*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
414*4882a593Smuzhiyun "%d: got transaction with invalid size %zd-%zd\n",
415*4882a593Smuzhiyun alloc->pid, data_size, offsets_size);
416*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
419*4882a593Smuzhiyun if (size < data_offsets_size || size < extra_buffers_size) {
420*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
421*4882a593Smuzhiyun "%d: got transaction with invalid extra_buffers_size %zd\n",
422*4882a593Smuzhiyun alloc->pid, extra_buffers_size);
423*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
426*4882a593Smuzhiyun if (is_async &&
427*4882a593Smuzhiyun alloc->free_async_space < size + sizeof(struct binder_buffer)) {
428*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
429*4882a593Smuzhiyun "%d: binder_alloc_buf size %zd failed, no async space left\n",
430*4882a593Smuzhiyun alloc->pid, size);
431*4882a593Smuzhiyun return ERR_PTR(-ENOSPC);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Pad 0-size buffers so they get assigned unique addresses */
435*4882a593Smuzhiyun size = max(size, sizeof(void *));
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun while (n) {
438*4882a593Smuzhiyun buffer = rb_entry(n, struct binder_buffer, rb_node);
439*4882a593Smuzhiyun BUG_ON(!buffer->free);
440*4882a593Smuzhiyun buffer_size = binder_alloc_buffer_size(alloc, buffer);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (size < buffer_size) {
443*4882a593Smuzhiyun best_fit = n;
444*4882a593Smuzhiyun n = n->rb_left;
445*4882a593Smuzhiyun } else if (size > buffer_size)
446*4882a593Smuzhiyun n = n->rb_right;
447*4882a593Smuzhiyun else {
448*4882a593Smuzhiyun best_fit = n;
449*4882a593Smuzhiyun break;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun if (best_fit == NULL) {
453*4882a593Smuzhiyun size_t allocated_buffers = 0;
454*4882a593Smuzhiyun size_t largest_alloc_size = 0;
455*4882a593Smuzhiyun size_t total_alloc_size = 0;
456*4882a593Smuzhiyun size_t free_buffers = 0;
457*4882a593Smuzhiyun size_t largest_free_size = 0;
458*4882a593Smuzhiyun size_t total_free_size = 0;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun for (n = rb_first(&alloc->allocated_buffers); n != NULL;
461*4882a593Smuzhiyun n = rb_next(n)) {
462*4882a593Smuzhiyun buffer = rb_entry(n, struct binder_buffer, rb_node);
463*4882a593Smuzhiyun buffer_size = binder_alloc_buffer_size(alloc, buffer);
464*4882a593Smuzhiyun allocated_buffers++;
465*4882a593Smuzhiyun total_alloc_size += buffer_size;
466*4882a593Smuzhiyun if (buffer_size > largest_alloc_size)
467*4882a593Smuzhiyun largest_alloc_size = buffer_size;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun for (n = rb_first(&alloc->free_buffers); n != NULL;
470*4882a593Smuzhiyun n = rb_next(n)) {
471*4882a593Smuzhiyun buffer = rb_entry(n, struct binder_buffer, rb_node);
472*4882a593Smuzhiyun buffer_size = binder_alloc_buffer_size(alloc, buffer);
473*4882a593Smuzhiyun free_buffers++;
474*4882a593Smuzhiyun total_free_size += buffer_size;
475*4882a593Smuzhiyun if (buffer_size > largest_free_size)
476*4882a593Smuzhiyun largest_free_size = buffer_size;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
479*4882a593Smuzhiyun "%d: binder_alloc_buf size %zd failed, no address space\n",
480*4882a593Smuzhiyun alloc->pid, size);
481*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
482*4882a593Smuzhiyun "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
483*4882a593Smuzhiyun total_alloc_size, allocated_buffers,
484*4882a593Smuzhiyun largest_alloc_size, total_free_size,
485*4882a593Smuzhiyun free_buffers, largest_free_size);
486*4882a593Smuzhiyun return ERR_PTR(-ENOSPC);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun if (n == NULL) {
489*4882a593Smuzhiyun buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
490*4882a593Smuzhiyun buffer_size = binder_alloc_buffer_size(alloc, buffer);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
494*4882a593Smuzhiyun "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
495*4882a593Smuzhiyun alloc->pid, size, buffer, buffer_size);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun has_page_addr = (void __user *)
498*4882a593Smuzhiyun (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
499*4882a593Smuzhiyun WARN_ON(n && buffer_size != size);
500*4882a593Smuzhiyun end_page_addr =
501*4882a593Smuzhiyun (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
502*4882a593Smuzhiyun if (end_page_addr > has_page_addr)
503*4882a593Smuzhiyun end_page_addr = has_page_addr;
504*4882a593Smuzhiyun ret = binder_update_page_range(alloc, 1, (void __user *)
505*4882a593Smuzhiyun PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
506*4882a593Smuzhiyun if (ret)
507*4882a593Smuzhiyun return ERR_PTR(ret);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (buffer_size != size) {
510*4882a593Smuzhiyun struct binder_buffer *new_buffer;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
513*4882a593Smuzhiyun if (!new_buffer) {
514*4882a593Smuzhiyun pr_err("%s: %d failed to alloc new buffer struct\n",
515*4882a593Smuzhiyun __func__, alloc->pid);
516*4882a593Smuzhiyun goto err_alloc_buf_struct_failed;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun new_buffer->user_data = (u8 __user *)buffer->user_data + size;
519*4882a593Smuzhiyun list_add(&new_buffer->entry, &buffer->entry);
520*4882a593Smuzhiyun new_buffer->free = 1;
521*4882a593Smuzhiyun binder_insert_free_buffer(alloc, new_buffer);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun rb_erase(best_fit, &alloc->free_buffers);
525*4882a593Smuzhiyun buffer->free = 0;
526*4882a593Smuzhiyun buffer->allow_user_free = 0;
527*4882a593Smuzhiyun binder_insert_allocated_buffer_locked(alloc, buffer);
528*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
529*4882a593Smuzhiyun "%d: binder_alloc_buf size %zd got %pK\n",
530*4882a593Smuzhiyun alloc->pid, size, buffer);
531*4882a593Smuzhiyun buffer->data_size = data_size;
532*4882a593Smuzhiyun buffer->offsets_size = offsets_size;
533*4882a593Smuzhiyun buffer->async_transaction = is_async;
534*4882a593Smuzhiyun buffer->extra_buffers_size = extra_buffers_size;
535*4882a593Smuzhiyun buffer->pid = pid;
536*4882a593Smuzhiyun buffer->oneway_spam_suspect = false;
537*4882a593Smuzhiyun if (is_async) {
538*4882a593Smuzhiyun alloc->free_async_space -= size + sizeof(struct binder_buffer);
539*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
540*4882a593Smuzhiyun "%d: binder_alloc_buf size %zd async free %zd\n",
541*4882a593Smuzhiyun alloc->pid, size, alloc->free_async_space);
542*4882a593Smuzhiyun if (alloc->free_async_space < alloc->buffer_size / 10) {
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun * Start detecting spammers once we have less than 20%
545*4882a593Smuzhiyun * of async space left (which is less than 10% of total
546*4882a593Smuzhiyun * buffer size).
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
549*4882a593Smuzhiyun } else {
550*4882a593Smuzhiyun alloc->oneway_spam_detected = false;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun return buffer;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun err_alloc_buf_struct_failed:
556*4882a593Smuzhiyun binder_update_page_range(alloc, 0, (void __user *)
557*4882a593Smuzhiyun PAGE_ALIGN((uintptr_t)buffer->user_data),
558*4882a593Smuzhiyun end_page_addr);
559*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /**
563*4882a593Smuzhiyun * binder_alloc_new_buf() - Allocate a new binder buffer
564*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
565*4882a593Smuzhiyun * @data_size: size of user data buffer
566*4882a593Smuzhiyun * @offsets_size: user specified buffer offset
567*4882a593Smuzhiyun * @extra_buffers_size: size of extra space for meta-data (eg, security context)
568*4882a593Smuzhiyun * @is_async: buffer for async transaction
569*4882a593Smuzhiyun * @pid: pid to attribute allocation to (used for debugging)
570*4882a593Smuzhiyun *
571*4882a593Smuzhiyun * Allocate a new buffer given the requested sizes. Returns
572*4882a593Smuzhiyun * the kernel version of the buffer pointer. The size allocated
573*4882a593Smuzhiyun * is the sum of the three given sizes (each rounded up to
574*4882a593Smuzhiyun * pointer-sized boundary)
575*4882a593Smuzhiyun *
576*4882a593Smuzhiyun * Return: The allocated buffer or %NULL if error
577*4882a593Smuzhiyun */
binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async,int pid)578*4882a593Smuzhiyun struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
579*4882a593Smuzhiyun size_t data_size,
580*4882a593Smuzhiyun size_t offsets_size,
581*4882a593Smuzhiyun size_t extra_buffers_size,
582*4882a593Smuzhiyun int is_async,
583*4882a593Smuzhiyun int pid)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct binder_buffer *buffer;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
588*4882a593Smuzhiyun buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
589*4882a593Smuzhiyun extra_buffers_size, is_async, pid);
590*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
591*4882a593Smuzhiyun return buffer;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
buffer_start_page(struct binder_buffer * buffer)594*4882a593Smuzhiyun static void __user *buffer_start_page(struct binder_buffer *buffer)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
prev_buffer_end_page(struct binder_buffer * buffer)599*4882a593Smuzhiyun static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun return (void __user *)
602*4882a593Smuzhiyun (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer)605*4882a593Smuzhiyun static void binder_delete_free_buffer(struct binder_alloc *alloc,
606*4882a593Smuzhiyun struct binder_buffer *buffer)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun struct binder_buffer *prev, *next = NULL;
609*4882a593Smuzhiyun bool to_free = true;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun BUG_ON(alloc->buffers.next == &buffer->entry);
612*4882a593Smuzhiyun prev = binder_buffer_prev(buffer);
613*4882a593Smuzhiyun BUG_ON(!prev->free);
614*4882a593Smuzhiyun if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
615*4882a593Smuzhiyun to_free = false;
616*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
617*4882a593Smuzhiyun "%d: merge free, buffer %pK share page with %pK\n",
618*4882a593Smuzhiyun alloc->pid, buffer->user_data,
619*4882a593Smuzhiyun prev->user_data);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (!list_is_last(&buffer->entry, &alloc->buffers)) {
623*4882a593Smuzhiyun next = binder_buffer_next(buffer);
624*4882a593Smuzhiyun if (buffer_start_page(next) == buffer_start_page(buffer)) {
625*4882a593Smuzhiyun to_free = false;
626*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
627*4882a593Smuzhiyun "%d: merge free, buffer %pK share page with %pK\n",
628*4882a593Smuzhiyun alloc->pid,
629*4882a593Smuzhiyun buffer->user_data,
630*4882a593Smuzhiyun next->user_data);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (PAGE_ALIGNED(buffer->user_data)) {
635*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
636*4882a593Smuzhiyun "%d: merge free, buffer start %pK is page aligned\n",
637*4882a593Smuzhiyun alloc->pid, buffer->user_data);
638*4882a593Smuzhiyun to_free = false;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (to_free) {
642*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
643*4882a593Smuzhiyun "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
644*4882a593Smuzhiyun alloc->pid, buffer->user_data,
645*4882a593Smuzhiyun prev->user_data,
646*4882a593Smuzhiyun next ? next->user_data : NULL);
647*4882a593Smuzhiyun binder_update_page_range(alloc, 0, buffer_start_page(buffer),
648*4882a593Smuzhiyun buffer_start_page(buffer) + PAGE_SIZE);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun list_del(&buffer->entry);
651*4882a593Smuzhiyun kfree(buffer);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer)654*4882a593Smuzhiyun static void binder_free_buf_locked(struct binder_alloc *alloc,
655*4882a593Smuzhiyun struct binder_buffer *buffer)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun size_t size, buffer_size;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun buffer_size = binder_alloc_buffer_size(alloc, buffer);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun size = ALIGN(buffer->data_size, sizeof(void *)) +
662*4882a593Smuzhiyun ALIGN(buffer->offsets_size, sizeof(void *)) +
663*4882a593Smuzhiyun ALIGN(buffer->extra_buffers_size, sizeof(void *));
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
666*4882a593Smuzhiyun "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
667*4882a593Smuzhiyun alloc->pid, buffer, size, buffer_size);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun BUG_ON(buffer->free);
670*4882a593Smuzhiyun BUG_ON(size > buffer_size);
671*4882a593Smuzhiyun BUG_ON(buffer->transaction != NULL);
672*4882a593Smuzhiyun BUG_ON(buffer->user_data < alloc->buffer);
673*4882a593Smuzhiyun BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (buffer->async_transaction) {
676*4882a593Smuzhiyun alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
679*4882a593Smuzhiyun "%d: binder_free_buf size %zd async free %zd\n",
680*4882a593Smuzhiyun alloc->pid, size, alloc->free_async_space);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun binder_update_page_range(alloc, 0,
684*4882a593Smuzhiyun (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
685*4882a593Smuzhiyun (void __user *)(((uintptr_t)
686*4882a593Smuzhiyun buffer->user_data + buffer_size) & PAGE_MASK));
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
689*4882a593Smuzhiyun buffer->free = 1;
690*4882a593Smuzhiyun if (!list_is_last(&buffer->entry, &alloc->buffers)) {
691*4882a593Smuzhiyun struct binder_buffer *next = binder_buffer_next(buffer);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (next->free) {
694*4882a593Smuzhiyun rb_erase(&next->rb_node, &alloc->free_buffers);
695*4882a593Smuzhiyun binder_delete_free_buffer(alloc, next);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun if (alloc->buffers.next != &buffer->entry) {
699*4882a593Smuzhiyun struct binder_buffer *prev = binder_buffer_prev(buffer);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (prev->free) {
702*4882a593Smuzhiyun binder_delete_free_buffer(alloc, buffer);
703*4882a593Smuzhiyun rb_erase(&prev->rb_node, &alloc->free_buffers);
704*4882a593Smuzhiyun buffer = prev;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun binder_insert_free_buffer(alloc, buffer);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun static void binder_alloc_clear_buf(struct binder_alloc *alloc,
711*4882a593Smuzhiyun struct binder_buffer *buffer);
712*4882a593Smuzhiyun /**
713*4882a593Smuzhiyun * binder_alloc_free_buf() - free a binder buffer
714*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
715*4882a593Smuzhiyun * @buffer: kernel pointer to buffer
716*4882a593Smuzhiyun *
717*4882a593Smuzhiyun * Free the buffer allocated via binder_alloc_new_buf()
718*4882a593Smuzhiyun */
binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)719*4882a593Smuzhiyun void binder_alloc_free_buf(struct binder_alloc *alloc,
720*4882a593Smuzhiyun struct binder_buffer *buffer)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun /*
723*4882a593Smuzhiyun * We could eliminate the call to binder_alloc_clear_buf()
724*4882a593Smuzhiyun * from binder_alloc_deferred_release() by moving this to
725*4882a593Smuzhiyun * binder_alloc_free_buf_locked(). However, that could
726*4882a593Smuzhiyun * increase contention for the alloc mutex if clear_on_free
727*4882a593Smuzhiyun * is used frequently for large buffers. The mutex is not
728*4882a593Smuzhiyun * needed for correctness here.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun if (buffer->clear_on_free) {
731*4882a593Smuzhiyun binder_alloc_clear_buf(alloc, buffer);
732*4882a593Smuzhiyun buffer->clear_on_free = false;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
735*4882a593Smuzhiyun binder_free_buf_locked(alloc, buffer);
736*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /**
740*4882a593Smuzhiyun * binder_alloc_mmap_handler() - map virtual address space for proc
741*4882a593Smuzhiyun * @alloc: alloc structure for this proc
742*4882a593Smuzhiyun * @vma: vma passed to mmap()
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * Called by binder_mmap() to initialize the space specified in
745*4882a593Smuzhiyun * vma for allocating binder buffers
746*4882a593Smuzhiyun *
747*4882a593Smuzhiyun * Return:
748*4882a593Smuzhiyun * 0 = success
749*4882a593Smuzhiyun * -EBUSY = address space already mapped
750*4882a593Smuzhiyun * -ENOMEM = failed to map memory to given address space
751*4882a593Smuzhiyun */
binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma)752*4882a593Smuzhiyun int binder_alloc_mmap_handler(struct binder_alloc *alloc,
753*4882a593Smuzhiyun struct vm_area_struct *vma)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun int ret;
756*4882a593Smuzhiyun const char *failure_string;
757*4882a593Smuzhiyun struct binder_buffer *buffer;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun mutex_lock(&binder_alloc_mmap_lock);
760*4882a593Smuzhiyun if (alloc->buffer_size) {
761*4882a593Smuzhiyun ret = -EBUSY;
762*4882a593Smuzhiyun failure_string = "already mapped";
763*4882a593Smuzhiyun goto err_already_mapped;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
766*4882a593Smuzhiyun SZ_4M);
767*4882a593Smuzhiyun mutex_unlock(&binder_alloc_mmap_lock);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun alloc->buffer = (void __user *)vma->vm_start;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
772*4882a593Smuzhiyun sizeof(alloc->pages[0]),
773*4882a593Smuzhiyun GFP_KERNEL);
774*4882a593Smuzhiyun if (alloc->pages == NULL) {
775*4882a593Smuzhiyun ret = -ENOMEM;
776*4882a593Smuzhiyun failure_string = "alloc page array";
777*4882a593Smuzhiyun goto err_alloc_pages_failed;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
781*4882a593Smuzhiyun if (!buffer) {
782*4882a593Smuzhiyun ret = -ENOMEM;
783*4882a593Smuzhiyun failure_string = "alloc buffer struct";
784*4882a593Smuzhiyun goto err_alloc_buf_struct_failed;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun buffer->user_data = alloc->buffer;
788*4882a593Smuzhiyun list_add(&buffer->entry, &alloc->buffers);
789*4882a593Smuzhiyun buffer->free = 1;
790*4882a593Smuzhiyun binder_insert_free_buffer(alloc, buffer);
791*4882a593Smuzhiyun alloc->free_async_space = alloc->buffer_size / 2;
792*4882a593Smuzhiyun binder_alloc_set_vma(alloc, vma);
793*4882a593Smuzhiyun mmgrab(alloc->vma_vm_mm);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun return 0;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun err_alloc_buf_struct_failed:
798*4882a593Smuzhiyun kfree(alloc->pages);
799*4882a593Smuzhiyun alloc->pages = NULL;
800*4882a593Smuzhiyun err_alloc_pages_failed:
801*4882a593Smuzhiyun alloc->buffer = NULL;
802*4882a593Smuzhiyun mutex_lock(&binder_alloc_mmap_lock);
803*4882a593Smuzhiyun alloc->buffer_size = 0;
804*4882a593Smuzhiyun err_already_mapped:
805*4882a593Smuzhiyun mutex_unlock(&binder_alloc_mmap_lock);
806*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
807*4882a593Smuzhiyun "%s: %d %lx-%lx %s failed %d\n", __func__,
808*4882a593Smuzhiyun alloc->pid, vma->vm_start, vma->vm_end,
809*4882a593Smuzhiyun failure_string, ret);
810*4882a593Smuzhiyun return ret;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun
binder_alloc_deferred_release(struct binder_alloc * alloc)814*4882a593Smuzhiyun void binder_alloc_deferred_release(struct binder_alloc *alloc)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct rb_node *n;
817*4882a593Smuzhiyun int buffers, page_count;
818*4882a593Smuzhiyun struct binder_buffer *buffer;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun buffers = 0;
821*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
822*4882a593Smuzhiyun BUG_ON(alloc->vma);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun while ((n = rb_first(&alloc->allocated_buffers))) {
825*4882a593Smuzhiyun buffer = rb_entry(n, struct binder_buffer, rb_node);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* Transaction should already have been freed */
828*4882a593Smuzhiyun BUG_ON(buffer->transaction);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (buffer->clear_on_free) {
831*4882a593Smuzhiyun binder_alloc_clear_buf(alloc, buffer);
832*4882a593Smuzhiyun buffer->clear_on_free = false;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun binder_free_buf_locked(alloc, buffer);
835*4882a593Smuzhiyun buffers++;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun while (!list_empty(&alloc->buffers)) {
839*4882a593Smuzhiyun buffer = list_first_entry(&alloc->buffers,
840*4882a593Smuzhiyun struct binder_buffer, entry);
841*4882a593Smuzhiyun WARN_ON(!buffer->free);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun list_del(&buffer->entry);
844*4882a593Smuzhiyun WARN_ON_ONCE(!list_empty(&alloc->buffers));
845*4882a593Smuzhiyun kfree(buffer);
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun page_count = 0;
849*4882a593Smuzhiyun if (alloc->pages) {
850*4882a593Smuzhiyun int i;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
853*4882a593Smuzhiyun void __user *page_addr;
854*4882a593Smuzhiyun bool on_lru;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if (!alloc->pages[i].page_ptr)
857*4882a593Smuzhiyun continue;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun on_lru = list_lru_del(&binder_alloc_lru,
860*4882a593Smuzhiyun &alloc->pages[i].lru);
861*4882a593Smuzhiyun page_addr = alloc->buffer + i * PAGE_SIZE;
862*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
863*4882a593Smuzhiyun "%s: %d: page %d at %pK %s\n",
864*4882a593Smuzhiyun __func__, alloc->pid, i, page_addr,
865*4882a593Smuzhiyun on_lru ? "on lru" : "active");
866*4882a593Smuzhiyun __free_page(alloc->pages[i].page_ptr);
867*4882a593Smuzhiyun page_count++;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun kfree(alloc->pages);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
872*4882a593Smuzhiyun if (alloc->vma_vm_mm)
873*4882a593Smuzhiyun mmdrop(alloc->vma_vm_mm);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
876*4882a593Smuzhiyun "%s: %d buffers %d, pages %d\n",
877*4882a593Smuzhiyun __func__, alloc->pid, buffers, page_count);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
print_binder_buffer(struct seq_file * m,const char * prefix,struct binder_buffer * buffer)880*4882a593Smuzhiyun static void print_binder_buffer(struct seq_file *m, const char *prefix,
881*4882a593Smuzhiyun struct binder_buffer *buffer)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
884*4882a593Smuzhiyun prefix, buffer->debug_id, buffer->user_data,
885*4882a593Smuzhiyun buffer->data_size, buffer->offsets_size,
886*4882a593Smuzhiyun buffer->extra_buffers_size,
887*4882a593Smuzhiyun buffer->transaction ? "active" : "delivered");
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /**
891*4882a593Smuzhiyun * binder_alloc_print_allocated() - print buffer info
892*4882a593Smuzhiyun * @m: seq_file for output via seq_printf()
893*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
894*4882a593Smuzhiyun *
895*4882a593Smuzhiyun * Prints information about every buffer associated with
896*4882a593Smuzhiyun * the binder_alloc state to the given seq_file
897*4882a593Smuzhiyun */
binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc)898*4882a593Smuzhiyun void binder_alloc_print_allocated(struct seq_file *m,
899*4882a593Smuzhiyun struct binder_alloc *alloc)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun struct rb_node *n;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
904*4882a593Smuzhiyun for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
905*4882a593Smuzhiyun print_binder_buffer(m, " buffer",
906*4882a593Smuzhiyun rb_entry(n, struct binder_buffer, rb_node));
907*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /**
911*4882a593Smuzhiyun * binder_alloc_print_pages() - print page usage
912*4882a593Smuzhiyun * @m: seq_file for output via seq_printf()
913*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
914*4882a593Smuzhiyun */
binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc)915*4882a593Smuzhiyun void binder_alloc_print_pages(struct seq_file *m,
916*4882a593Smuzhiyun struct binder_alloc *alloc)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct binder_lru_page *page;
919*4882a593Smuzhiyun int i;
920*4882a593Smuzhiyun int active = 0;
921*4882a593Smuzhiyun int lru = 0;
922*4882a593Smuzhiyun int free = 0;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
925*4882a593Smuzhiyun /*
926*4882a593Smuzhiyun * Make sure the binder_alloc is fully initialized, otherwise we might
927*4882a593Smuzhiyun * read inconsistent state.
928*4882a593Smuzhiyun */
929*4882a593Smuzhiyun if (binder_alloc_get_vma(alloc) != NULL) {
930*4882a593Smuzhiyun for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
931*4882a593Smuzhiyun page = &alloc->pages[i];
932*4882a593Smuzhiyun if (!page->page_ptr)
933*4882a593Smuzhiyun free++;
934*4882a593Smuzhiyun else if (list_empty(&page->lru))
935*4882a593Smuzhiyun active++;
936*4882a593Smuzhiyun else
937*4882a593Smuzhiyun lru++;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
941*4882a593Smuzhiyun seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
942*4882a593Smuzhiyun seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /**
946*4882a593Smuzhiyun * binder_alloc_get_allocated_count() - return count of buffers
947*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
948*4882a593Smuzhiyun *
949*4882a593Smuzhiyun * Return: count of allocated buffers
950*4882a593Smuzhiyun */
binder_alloc_get_allocated_count(struct binder_alloc * alloc)951*4882a593Smuzhiyun int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun struct rb_node *n;
954*4882a593Smuzhiyun int count = 0;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
957*4882a593Smuzhiyun for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
958*4882a593Smuzhiyun count++;
959*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
960*4882a593Smuzhiyun return count;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /**
965*4882a593Smuzhiyun * binder_alloc_vma_close() - invalidate address space
966*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
967*4882a593Smuzhiyun *
968*4882a593Smuzhiyun * Called from binder_vma_close() when releasing address space.
969*4882a593Smuzhiyun * Clears alloc->vma to prevent new incoming transactions from
970*4882a593Smuzhiyun * allocating more buffers.
971*4882a593Smuzhiyun */
binder_alloc_vma_close(struct binder_alloc * alloc)972*4882a593Smuzhiyun void binder_alloc_vma_close(struct binder_alloc *alloc)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun binder_alloc_set_vma(alloc, NULL);
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /**
978*4882a593Smuzhiyun * binder_alloc_free_page() - shrinker callback to free pages
979*4882a593Smuzhiyun * @item: item to free
980*4882a593Smuzhiyun * @lock: lock protecting the item
981*4882a593Smuzhiyun * @cb_arg: callback argument
982*4882a593Smuzhiyun *
983*4882a593Smuzhiyun * Called from list_lru_walk() in binder_shrink_scan() to free
984*4882a593Smuzhiyun * up pages when the system is under memory pressure.
985*4882a593Smuzhiyun */
binder_alloc_free_page(struct list_head * item,struct list_lru_one * lru,spinlock_t * lock,void * cb_arg)986*4882a593Smuzhiyun enum lru_status binder_alloc_free_page(struct list_head *item,
987*4882a593Smuzhiyun struct list_lru_one *lru,
988*4882a593Smuzhiyun spinlock_t *lock,
989*4882a593Smuzhiyun void *cb_arg)
990*4882a593Smuzhiyun __must_hold(lock)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct mm_struct *mm = NULL;
993*4882a593Smuzhiyun struct binder_lru_page *page = container_of(item,
994*4882a593Smuzhiyun struct binder_lru_page,
995*4882a593Smuzhiyun lru);
996*4882a593Smuzhiyun struct binder_alloc *alloc;
997*4882a593Smuzhiyun uintptr_t page_addr;
998*4882a593Smuzhiyun size_t index;
999*4882a593Smuzhiyun struct vm_area_struct *vma;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun alloc = page->alloc;
1002*4882a593Smuzhiyun if (!mutex_trylock(&alloc->mutex))
1003*4882a593Smuzhiyun goto err_get_alloc_mutex_failed;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (!page->page_ptr)
1006*4882a593Smuzhiyun goto err_page_already_freed;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun index = page - alloc->pages;
1009*4882a593Smuzhiyun page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun mm = alloc->vma_vm_mm;
1012*4882a593Smuzhiyun if (!mmget_not_zero(mm))
1013*4882a593Smuzhiyun goto err_mmget;
1014*4882a593Smuzhiyun if (!mmap_read_trylock(mm))
1015*4882a593Smuzhiyun goto err_mmap_read_lock_failed;
1016*4882a593Smuzhiyun vma = binder_alloc_get_vma(alloc);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun list_lru_isolate(lru, item);
1019*4882a593Smuzhiyun spin_unlock(lock);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (vma) {
1022*4882a593Smuzhiyun trace_binder_unmap_user_start(alloc, index);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun zap_page_range(vma, page_addr, PAGE_SIZE);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun trace_binder_unmap_user_end(alloc, index);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun mmap_read_unlock(mm);
1029*4882a593Smuzhiyun mmput_async(mm);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun trace_binder_unmap_kernel_start(alloc, index);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun __free_page(page->page_ptr);
1034*4882a593Smuzhiyun page->page_ptr = NULL;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun trace_binder_unmap_kernel_end(alloc, index);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun spin_lock(lock);
1039*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
1040*4882a593Smuzhiyun return LRU_REMOVED_RETRY;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun err_mmap_read_lock_failed:
1043*4882a593Smuzhiyun mmput_async(mm);
1044*4882a593Smuzhiyun err_mmget:
1045*4882a593Smuzhiyun err_page_already_freed:
1046*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
1047*4882a593Smuzhiyun err_get_alloc_mutex_failed:
1048*4882a593Smuzhiyun return LRU_SKIP;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun static unsigned long
binder_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1052*4882a593Smuzhiyun binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun unsigned long ret = list_lru_count(&binder_alloc_lru);
1055*4882a593Smuzhiyun return ret;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun static unsigned long
binder_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1059*4882a593Smuzhiyun binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun unsigned long ret;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1064*4882a593Smuzhiyun NULL, sc->nr_to_scan);
1065*4882a593Smuzhiyun return ret;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun static struct shrinker binder_shrinker = {
1069*4882a593Smuzhiyun .count_objects = binder_shrink_count,
1070*4882a593Smuzhiyun .scan_objects = binder_shrink_scan,
1071*4882a593Smuzhiyun .seeks = DEFAULT_SEEKS,
1072*4882a593Smuzhiyun };
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun /**
1075*4882a593Smuzhiyun * binder_alloc_init() - called by binder_open() for per-proc initialization
1076*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
1077*4882a593Smuzhiyun *
1078*4882a593Smuzhiyun * Called from binder_open() to initialize binder_alloc fields for
1079*4882a593Smuzhiyun * new binder proc
1080*4882a593Smuzhiyun */
binder_alloc_init(struct binder_alloc * alloc)1081*4882a593Smuzhiyun void binder_alloc_init(struct binder_alloc *alloc)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun alloc->pid = current->group_leader->pid;
1084*4882a593Smuzhiyun mutex_init(&alloc->mutex);
1085*4882a593Smuzhiyun INIT_LIST_HEAD(&alloc->buffers);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
binder_alloc_shrinker_init(void)1088*4882a593Smuzhiyun int binder_alloc_shrinker_init(void)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun int ret = list_lru_init(&binder_alloc_lru);
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (ret == 0) {
1093*4882a593Smuzhiyun ret = register_shrinker(&binder_shrinker);
1094*4882a593Smuzhiyun if (ret)
1095*4882a593Smuzhiyun list_lru_destroy(&binder_alloc_lru);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun return ret;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun /**
1101*4882a593Smuzhiyun * check_buffer() - verify that buffer/offset is safe to access
1102*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
1103*4882a593Smuzhiyun * @buffer: binder buffer to be accessed
1104*4882a593Smuzhiyun * @offset: offset into @buffer data
1105*4882a593Smuzhiyun * @bytes: bytes to access from offset
1106*4882a593Smuzhiyun *
1107*4882a593Smuzhiyun * Check that the @offset/@bytes are within the size of the given
1108*4882a593Smuzhiyun * @buffer and that the buffer is currently active and not freeable.
1109*4882a593Smuzhiyun * Offsets must also be multiples of sizeof(u32). The kernel is
1110*4882a593Smuzhiyun * allowed to touch the buffer in two cases:
1111*4882a593Smuzhiyun *
1112*4882a593Smuzhiyun * 1) when the buffer is being created:
1113*4882a593Smuzhiyun * (buffer->free == 0 && buffer->allow_user_free == 0)
1114*4882a593Smuzhiyun * 2) when the buffer is being torn down:
1115*4882a593Smuzhiyun * (buffer->free == 0 && buffer->transaction == NULL).
1116*4882a593Smuzhiyun *
1117*4882a593Smuzhiyun * Return: true if the buffer is safe to access
1118*4882a593Smuzhiyun */
check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes)1119*4882a593Smuzhiyun static inline bool check_buffer(struct binder_alloc *alloc,
1120*4882a593Smuzhiyun struct binder_buffer *buffer,
1121*4882a593Smuzhiyun binder_size_t offset, size_t bytes)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun return buffer_size >= bytes &&
1126*4882a593Smuzhiyun offset <= buffer_size - bytes &&
1127*4882a593Smuzhiyun IS_ALIGNED(offset, sizeof(u32)) &&
1128*4882a593Smuzhiyun !buffer->free &&
1129*4882a593Smuzhiyun (!buffer->allow_user_free || !buffer->transaction);
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun /**
1133*4882a593Smuzhiyun * binder_alloc_get_page() - get kernel pointer for given buffer offset
1134*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
1135*4882a593Smuzhiyun * @buffer: binder buffer to be accessed
1136*4882a593Smuzhiyun * @buffer_offset: offset into @buffer data
1137*4882a593Smuzhiyun * @pgoffp: address to copy final page offset to
1138*4882a593Smuzhiyun *
1139*4882a593Smuzhiyun * Lookup the struct page corresponding to the address
1140*4882a593Smuzhiyun * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1141*4882a593Smuzhiyun * NULL, the byte-offset into the page is written there.
1142*4882a593Smuzhiyun *
1143*4882a593Smuzhiyun * The caller is responsible to ensure that the offset points
1144*4882a593Smuzhiyun * to a valid address within the @buffer and that @buffer is
1145*4882a593Smuzhiyun * not freeable by the user. Since it can't be freed, we are
1146*4882a593Smuzhiyun * guaranteed that the corresponding elements of @alloc->pages[]
1147*4882a593Smuzhiyun * cannot change.
1148*4882a593Smuzhiyun *
1149*4882a593Smuzhiyun * Return: struct page
1150*4882a593Smuzhiyun */
binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp)1151*4882a593Smuzhiyun static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1152*4882a593Smuzhiyun struct binder_buffer *buffer,
1153*4882a593Smuzhiyun binder_size_t buffer_offset,
1154*4882a593Smuzhiyun pgoff_t *pgoffp)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun binder_size_t buffer_space_offset = buffer_offset +
1157*4882a593Smuzhiyun (buffer->user_data - alloc->buffer);
1158*4882a593Smuzhiyun pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1159*4882a593Smuzhiyun size_t index = buffer_space_offset >> PAGE_SHIFT;
1160*4882a593Smuzhiyun struct binder_lru_page *lru_page;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun lru_page = &alloc->pages[index];
1163*4882a593Smuzhiyun *pgoffp = pgoff;
1164*4882a593Smuzhiyun return lru_page->page_ptr;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /**
1168*4882a593Smuzhiyun * binder_alloc_clear_buf() - zero out buffer
1169*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
1170*4882a593Smuzhiyun * @buffer: binder buffer to be cleared
1171*4882a593Smuzhiyun *
1172*4882a593Smuzhiyun * memset the given buffer to 0
1173*4882a593Smuzhiyun */
binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)1174*4882a593Smuzhiyun static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1175*4882a593Smuzhiyun struct binder_buffer *buffer)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1178*4882a593Smuzhiyun binder_size_t buffer_offset = 0;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun while (bytes) {
1181*4882a593Smuzhiyun unsigned long size;
1182*4882a593Smuzhiyun struct page *page;
1183*4882a593Smuzhiyun pgoff_t pgoff;
1184*4882a593Smuzhiyun void *kptr;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun page = binder_alloc_get_page(alloc, buffer,
1187*4882a593Smuzhiyun buffer_offset, &pgoff);
1188*4882a593Smuzhiyun size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1189*4882a593Smuzhiyun kptr = kmap(page) + pgoff;
1190*4882a593Smuzhiyun memset(kptr, 0, size);
1191*4882a593Smuzhiyun kunmap(page);
1192*4882a593Smuzhiyun bytes -= size;
1193*4882a593Smuzhiyun buffer_offset += size;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /**
1198*4882a593Smuzhiyun * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1199*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
1200*4882a593Smuzhiyun * @buffer: binder buffer to be accessed
1201*4882a593Smuzhiyun * @buffer_offset: offset into @buffer data
1202*4882a593Smuzhiyun * @from: userspace pointer to source buffer
1203*4882a593Smuzhiyun * @bytes: bytes to copy
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * Copy bytes from source userspace to target buffer.
1206*4882a593Smuzhiyun *
1207*4882a593Smuzhiyun * Return: bytes remaining to be copied
1208*4882a593Smuzhiyun */
1209*4882a593Smuzhiyun unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes)1210*4882a593Smuzhiyun binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1211*4882a593Smuzhiyun struct binder_buffer *buffer,
1212*4882a593Smuzhiyun binder_size_t buffer_offset,
1213*4882a593Smuzhiyun const void __user *from,
1214*4882a593Smuzhiyun size_t bytes)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1217*4882a593Smuzhiyun return bytes;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun while (bytes) {
1220*4882a593Smuzhiyun unsigned long size;
1221*4882a593Smuzhiyun unsigned long ret;
1222*4882a593Smuzhiyun struct page *page;
1223*4882a593Smuzhiyun pgoff_t pgoff;
1224*4882a593Smuzhiyun void *kptr;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun page = binder_alloc_get_page(alloc, buffer,
1227*4882a593Smuzhiyun buffer_offset, &pgoff);
1228*4882a593Smuzhiyun size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1229*4882a593Smuzhiyun kptr = kmap(page) + pgoff;
1230*4882a593Smuzhiyun ret = copy_from_user(kptr, from, size);
1231*4882a593Smuzhiyun kunmap(page);
1232*4882a593Smuzhiyun if (ret)
1233*4882a593Smuzhiyun return bytes - size + ret;
1234*4882a593Smuzhiyun bytes -= size;
1235*4882a593Smuzhiyun from += size;
1236*4882a593Smuzhiyun buffer_offset += size;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun return 0;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes)1241*4882a593Smuzhiyun static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1242*4882a593Smuzhiyun bool to_buffer,
1243*4882a593Smuzhiyun struct binder_buffer *buffer,
1244*4882a593Smuzhiyun binder_size_t buffer_offset,
1245*4882a593Smuzhiyun void *ptr,
1246*4882a593Smuzhiyun size_t bytes)
1247*4882a593Smuzhiyun {
1248*4882a593Smuzhiyun /* All copies must be 32-bit aligned and 32-bit size */
1249*4882a593Smuzhiyun if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1250*4882a593Smuzhiyun return -EINVAL;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun while (bytes) {
1253*4882a593Smuzhiyun unsigned long size;
1254*4882a593Smuzhiyun struct page *page;
1255*4882a593Smuzhiyun pgoff_t pgoff;
1256*4882a593Smuzhiyun void *tmpptr;
1257*4882a593Smuzhiyun void *base_ptr;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun page = binder_alloc_get_page(alloc, buffer,
1260*4882a593Smuzhiyun buffer_offset, &pgoff);
1261*4882a593Smuzhiyun size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1262*4882a593Smuzhiyun base_ptr = kmap_atomic(page);
1263*4882a593Smuzhiyun tmpptr = base_ptr + pgoff;
1264*4882a593Smuzhiyun if (to_buffer)
1265*4882a593Smuzhiyun memcpy(tmpptr, ptr, size);
1266*4882a593Smuzhiyun else
1267*4882a593Smuzhiyun memcpy(ptr, tmpptr, size);
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun * kunmap_atomic() takes care of flushing the cache
1270*4882a593Smuzhiyun * if this device has VIVT cache arch
1271*4882a593Smuzhiyun */
1272*4882a593Smuzhiyun kunmap_atomic(base_ptr);
1273*4882a593Smuzhiyun bytes -= size;
1274*4882a593Smuzhiyun pgoff = 0;
1275*4882a593Smuzhiyun ptr = ptr + size;
1276*4882a593Smuzhiyun buffer_offset += size;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun return 0;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes)1281*4882a593Smuzhiyun int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1282*4882a593Smuzhiyun struct binder_buffer *buffer,
1283*4882a593Smuzhiyun binder_size_t buffer_offset,
1284*4882a593Smuzhiyun void *src,
1285*4882a593Smuzhiyun size_t bytes)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1288*4882a593Smuzhiyun src, bytes);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes)1291*4882a593Smuzhiyun int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1292*4882a593Smuzhiyun void *dest,
1293*4882a593Smuzhiyun struct binder_buffer *buffer,
1294*4882a593Smuzhiyun binder_size_t buffer_offset,
1295*4882a593Smuzhiyun size_t bytes)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1298*4882a593Smuzhiyun dest, bytes);
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301