1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2017 Google, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _LINUX_BINDER_ALLOC_H
7*4882a593Smuzhiyun #define _LINUX_BINDER_ALLOC_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/rbtree.h>
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/rtmutex.h>
13*4882a593Smuzhiyun #include <linux/vmalloc.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/list_lru.h>
16*4882a593Smuzhiyun #include <uapi/linux/android/binder.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun extern struct list_lru binder_alloc_lru;
19*4882a593Smuzhiyun struct binder_transaction;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /**
22*4882a593Smuzhiyun * struct binder_buffer - buffer used for binder transactions
23*4882a593Smuzhiyun * @entry: entry alloc->buffers
24*4882a593Smuzhiyun * @rb_node: node for allocated_buffers/free_buffers rb trees
25*4882a593Smuzhiyun * @free: %true if buffer is free
26*4882a593Smuzhiyun * @clear_on_free: %true if buffer must be zeroed after use
27*4882a593Smuzhiyun * @allow_user_free: %true if user is allowed to free buffer
28*4882a593Smuzhiyun * @async_transaction: %true if buffer is in use for an async txn
29*4882a593Smuzhiyun * @oneway_spam_suspect: %true if total async allocate size just exceed
30*4882a593Smuzhiyun * spamming detect threshold
31*4882a593Smuzhiyun * @debug_id: unique ID for debugging
32*4882a593Smuzhiyun * @transaction: pointer to associated struct binder_transaction
33*4882a593Smuzhiyun * @target_node: struct binder_node associated with this buffer
34*4882a593Smuzhiyun * @data_size: size of @transaction data
35*4882a593Smuzhiyun * @offsets_size: size of array of offsets
36*4882a593Smuzhiyun * @extra_buffers_size: size of space for other objects (like sg lists)
37*4882a593Smuzhiyun * @user_data: user pointer to base of buffer space
38*4882a593Smuzhiyun * @pid: pid to attribute the buffer to (caller)
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Bookkeeping structure for binder transaction buffers
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun struct binder_buffer {
43*4882a593Smuzhiyun struct list_head entry; /* free and allocated entries by address */
44*4882a593Smuzhiyun struct rb_node rb_node; /* free entry by size or allocated entry */
45*4882a593Smuzhiyun /* by address */
46*4882a593Smuzhiyun unsigned free:1;
47*4882a593Smuzhiyun unsigned clear_on_free:1;
48*4882a593Smuzhiyun unsigned allow_user_free:1;
49*4882a593Smuzhiyun unsigned async_transaction:1;
50*4882a593Smuzhiyun unsigned oneway_spam_suspect:1;
51*4882a593Smuzhiyun unsigned debug_id:27;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct binder_transaction *transaction;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun struct binder_node *target_node;
56*4882a593Smuzhiyun size_t data_size;
57*4882a593Smuzhiyun size_t offsets_size;
58*4882a593Smuzhiyun size_t extra_buffers_size;
59*4882a593Smuzhiyun void __user *user_data;
60*4882a593Smuzhiyun int pid;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /**
64*4882a593Smuzhiyun * struct binder_lru_page - page object used for binder shrinker
65*4882a593Smuzhiyun * @page_ptr: pointer to physical page in mmap'd space
66*4882a593Smuzhiyun * @lru: entry in binder_alloc_lru
67*4882a593Smuzhiyun * @alloc: binder_alloc for a proc
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun struct binder_lru_page {
70*4882a593Smuzhiyun struct list_head lru;
71*4882a593Smuzhiyun struct page *page_ptr;
72*4882a593Smuzhiyun struct binder_alloc *alloc;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun * struct binder_alloc - per-binder proc state for binder allocator
77*4882a593Smuzhiyun * @vma: vm_area_struct passed to mmap_handler
78*4882a593Smuzhiyun * (invarient after mmap)
79*4882a593Smuzhiyun * @tsk: tid for task that called init for this proc
80*4882a593Smuzhiyun * (invariant after init)
81*4882a593Smuzhiyun * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
82*4882a593Smuzhiyun * @buffer: base of per-proc address space mapped via mmap
83*4882a593Smuzhiyun * @buffers: list of all buffers for this proc
84*4882a593Smuzhiyun * @free_buffers: rb tree of buffers available for allocation
85*4882a593Smuzhiyun * sorted by size
86*4882a593Smuzhiyun * @allocated_buffers: rb tree of allocated buffers sorted by address
87*4882a593Smuzhiyun * @free_async_space: VA space available for async buffers. This is
88*4882a593Smuzhiyun * initialized at mmap time to 1/2 the full VA space
89*4882a593Smuzhiyun * @pages: array of binder_lru_page
90*4882a593Smuzhiyun * @buffer_size: size of address space specified via mmap
91*4882a593Smuzhiyun * @pid: pid for associated binder_proc (invariant after init)
92*4882a593Smuzhiyun * @pages_high: high watermark of offset in @pages
93*4882a593Smuzhiyun * @oneway_spam_detected: %true if oneway spam detection fired, clear that
94*4882a593Smuzhiyun * flag once the async buffer has returned to a healthy state
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Bookkeeping structure for per-proc address space management for binder
97*4882a593Smuzhiyun * buffers. It is normally initialized during binder_init() and binder_mmap()
98*4882a593Smuzhiyun * calls. The address space is used for both user-visible buffers and for
99*4882a593Smuzhiyun * struct binder_buffer objects used to track the user buffers
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun struct binder_alloc {
102*4882a593Smuzhiyun struct mutex mutex;
103*4882a593Smuzhiyun struct vm_area_struct *vma;
104*4882a593Smuzhiyun struct mm_struct *vma_vm_mm;
105*4882a593Smuzhiyun void __user *buffer;
106*4882a593Smuzhiyun struct list_head buffers;
107*4882a593Smuzhiyun struct rb_root free_buffers;
108*4882a593Smuzhiyun struct rb_root allocated_buffers;
109*4882a593Smuzhiyun size_t free_async_space;
110*4882a593Smuzhiyun struct binder_lru_page *pages;
111*4882a593Smuzhiyun size_t buffer_size;
112*4882a593Smuzhiyun uint32_t buffer_free;
113*4882a593Smuzhiyun int pid;
114*4882a593Smuzhiyun size_t pages_high;
115*4882a593Smuzhiyun bool oneway_spam_detected;
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
119*4882a593Smuzhiyun void binder_selftest_alloc(struct binder_alloc *alloc);
120*4882a593Smuzhiyun #else
binder_selftest_alloc(struct binder_alloc * alloc)121*4882a593Smuzhiyun static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun enum lru_status binder_alloc_free_page(struct list_head *item,
124*4882a593Smuzhiyun struct list_lru_one *lru,
125*4882a593Smuzhiyun spinlock_t *lock, void *cb_arg);
126*4882a593Smuzhiyun extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
127*4882a593Smuzhiyun size_t data_size,
128*4882a593Smuzhiyun size_t offsets_size,
129*4882a593Smuzhiyun size_t extra_buffers_size,
130*4882a593Smuzhiyun int is_async,
131*4882a593Smuzhiyun int pid);
132*4882a593Smuzhiyun extern void binder_alloc_init(struct binder_alloc *alloc);
133*4882a593Smuzhiyun extern int binder_alloc_shrinker_init(void);
134*4882a593Smuzhiyun extern void binder_alloc_vma_close(struct binder_alloc *alloc);
135*4882a593Smuzhiyun extern struct binder_buffer *
136*4882a593Smuzhiyun binder_alloc_prepare_to_free(struct binder_alloc *alloc,
137*4882a593Smuzhiyun uintptr_t user_ptr);
138*4882a593Smuzhiyun extern void binder_alloc_free_buf(struct binder_alloc *alloc,
139*4882a593Smuzhiyun struct binder_buffer *buffer);
140*4882a593Smuzhiyun extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
141*4882a593Smuzhiyun struct vm_area_struct *vma);
142*4882a593Smuzhiyun extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
143*4882a593Smuzhiyun extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
144*4882a593Smuzhiyun extern void binder_alloc_print_allocated(struct seq_file *m,
145*4882a593Smuzhiyun struct binder_alloc *alloc);
146*4882a593Smuzhiyun void binder_alloc_print_pages(struct seq_file *m,
147*4882a593Smuzhiyun struct binder_alloc *alloc);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun * binder_alloc_get_free_async_space() - get free space available for async
151*4882a593Smuzhiyun * @alloc: binder_alloc for this proc
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * Return: the bytes remaining in the address-space for async transactions
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc * alloc)156*4882a593Smuzhiyun binder_alloc_get_free_async_space(struct binder_alloc *alloc)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun size_t free_async_space;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun mutex_lock(&alloc->mutex);
161*4882a593Smuzhiyun free_async_space = alloc->free_async_space;
162*4882a593Smuzhiyun mutex_unlock(&alloc->mutex);
163*4882a593Smuzhiyun return free_async_space;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun unsigned long
167*4882a593Smuzhiyun binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
168*4882a593Smuzhiyun struct binder_buffer *buffer,
169*4882a593Smuzhiyun binder_size_t buffer_offset,
170*4882a593Smuzhiyun const void __user *from,
171*4882a593Smuzhiyun size_t bytes);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
174*4882a593Smuzhiyun struct binder_buffer *buffer,
175*4882a593Smuzhiyun binder_size_t buffer_offset,
176*4882a593Smuzhiyun void *src,
177*4882a593Smuzhiyun size_t bytes);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
180*4882a593Smuzhiyun void *dest,
181*4882a593Smuzhiyun struct binder_buffer *buffer,
182*4882a593Smuzhiyun binder_size_t buffer_offset,
183*4882a593Smuzhiyun size_t bytes);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #endif /* _LINUX_BINDER_ALLOC_H */
186*4882a593Smuzhiyun
187