xref: /OK3568_Linux_fs/kernel/drivers/android/binder_internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #ifndef _LINUX_BINDER_INTERNAL_H
4*4882a593Smuzhiyun #define _LINUX_BINDER_INTERNAL_H
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/export.h>
7*4882a593Smuzhiyun #include <linux/fs.h>
8*4882a593Smuzhiyun #include <linux/list.h>
9*4882a593Smuzhiyun #include <linux/miscdevice.h>
10*4882a593Smuzhiyun #include <linux/mutex.h>
11*4882a593Smuzhiyun #include <linux/refcount.h>
12*4882a593Smuzhiyun #include <linux/stddef.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/uidgid.h>
15*4882a593Smuzhiyun #include <uapi/linux/android/binderfs.h>
16*4882a593Smuzhiyun #include "binder_alloc.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun struct binder_context {
19*4882a593Smuzhiyun 	struct binder_node *binder_context_mgr_node;
20*4882a593Smuzhiyun 	struct mutex context_mgr_node_lock;
21*4882a593Smuzhiyun 	kuid_t binder_context_mgr_uid;
22*4882a593Smuzhiyun 	const char *name;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * struct binder_device - information about a binder device node
27*4882a593Smuzhiyun  * @hlist:          list of binder devices (only used for devices requested via
28*4882a593Smuzhiyun  *                  CONFIG_ANDROID_BINDER_DEVICES)
29*4882a593Smuzhiyun  * @miscdev:        information about a binder character device node
30*4882a593Smuzhiyun  * @context:        binder context information
31*4882a593Smuzhiyun  * @binderfs_inode: This is the inode of the root dentry of the super block
32*4882a593Smuzhiyun  *                  belonging to a binderfs mount.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun struct binder_device {
35*4882a593Smuzhiyun 	struct hlist_node hlist;
36*4882a593Smuzhiyun 	struct miscdevice miscdev;
37*4882a593Smuzhiyun 	struct binder_context context;
38*4882a593Smuzhiyun 	struct inode *binderfs_inode;
39*4882a593Smuzhiyun 	refcount_t ref;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun  * binderfs_mount_opts - mount options for binderfs
44*4882a593Smuzhiyun  * @max: maximum number of allocatable binderfs binder devices
45*4882a593Smuzhiyun  * @stats_mode: enable binder stats in binderfs.
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun struct binderfs_mount_opts {
48*4882a593Smuzhiyun 	int max;
49*4882a593Smuzhiyun 	int stats_mode;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * binderfs_info - information about a binderfs mount
54*4882a593Smuzhiyun  * @ipc_ns:         The ipc namespace the binderfs mount belongs to.
55*4882a593Smuzhiyun  * @control_dentry: This records the dentry of this binderfs mount
56*4882a593Smuzhiyun  *                  binder-control device.
57*4882a593Smuzhiyun  * @root_uid:       uid that needs to be used when a new binder device is
58*4882a593Smuzhiyun  *                  created.
59*4882a593Smuzhiyun  * @root_gid:       gid that needs to be used when a new binder device is
60*4882a593Smuzhiyun  *                  created.
61*4882a593Smuzhiyun  * @mount_opts:     The mount options in use.
62*4882a593Smuzhiyun  * @device_count:   The current number of allocated binder devices.
63*4882a593Smuzhiyun  * @proc_log_dir:   Pointer to the directory dentry containing process-specific
64*4882a593Smuzhiyun  *                  logs.
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun struct binderfs_info {
67*4882a593Smuzhiyun 	struct ipc_namespace *ipc_ns;
68*4882a593Smuzhiyun 	struct dentry *control_dentry;
69*4882a593Smuzhiyun 	kuid_t root_uid;
70*4882a593Smuzhiyun 	kgid_t root_gid;
71*4882a593Smuzhiyun 	struct binderfs_mount_opts mount_opts;
72*4882a593Smuzhiyun 	int device_count;
73*4882a593Smuzhiyun 	struct dentry *proc_log_dir;
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun extern const struct file_operations binder_fops;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun extern char *binder_devices_param;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #ifdef CONFIG_ANDROID_BINDERFS
81*4882a593Smuzhiyun extern bool is_binderfs_device(const struct inode *inode);
82*4882a593Smuzhiyun extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
83*4882a593Smuzhiyun 					   const struct file_operations *fops,
84*4882a593Smuzhiyun 					   void *data);
85*4882a593Smuzhiyun extern void binderfs_remove_file(struct dentry *dentry);
86*4882a593Smuzhiyun #else
is_binderfs_device(const struct inode * inode)87*4882a593Smuzhiyun static inline bool is_binderfs_device(const struct inode *inode)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	return false;
90*4882a593Smuzhiyun }
binderfs_create_file(struct dentry * dir,const char * name,const struct file_operations * fops,void * data)91*4882a593Smuzhiyun static inline struct dentry *binderfs_create_file(struct dentry *dir,
92*4882a593Smuzhiyun 					   const char *name,
93*4882a593Smuzhiyun 					   const struct file_operations *fops,
94*4882a593Smuzhiyun 					   void *data)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	return NULL;
97*4882a593Smuzhiyun }
binderfs_remove_file(struct dentry * dentry)98*4882a593Smuzhiyun static inline void binderfs_remove_file(struct dentry *dentry) {}
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #ifdef CONFIG_ANDROID_BINDERFS
102*4882a593Smuzhiyun extern int __init init_binderfs(void);
103*4882a593Smuzhiyun #else
init_binderfs(void)104*4882a593Smuzhiyun static inline int __init init_binderfs(void)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun struct binder_debugfs_entry {
111*4882a593Smuzhiyun 	const char *name;
112*4882a593Smuzhiyun 	umode_t mode;
113*4882a593Smuzhiyun 	const struct file_operations *fops;
114*4882a593Smuzhiyun 	void *data;
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun extern const struct binder_debugfs_entry binder_debugfs_entries[];
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define binder_for_each_debugfs_entry(entry)	\
120*4882a593Smuzhiyun 	for ((entry) = binder_debugfs_entries;	\
121*4882a593Smuzhiyun 	     (entry)->name;			\
122*4882a593Smuzhiyun 	     (entry)++)
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun enum binder_stat_types {
125*4882a593Smuzhiyun 	BINDER_STAT_PROC,
126*4882a593Smuzhiyun 	BINDER_STAT_THREAD,
127*4882a593Smuzhiyun 	BINDER_STAT_NODE,
128*4882a593Smuzhiyun 	BINDER_STAT_REF,
129*4882a593Smuzhiyun 	BINDER_STAT_DEATH,
130*4882a593Smuzhiyun 	BINDER_STAT_TRANSACTION,
131*4882a593Smuzhiyun 	BINDER_STAT_TRANSACTION_COMPLETE,
132*4882a593Smuzhiyun 	BINDER_STAT_COUNT
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun struct binder_stats {
136*4882a593Smuzhiyun 	atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
137*4882a593Smuzhiyun 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
138*4882a593Smuzhiyun 	atomic_t obj_created[BINDER_STAT_COUNT];
139*4882a593Smuzhiyun 	atomic_t obj_deleted[BINDER_STAT_COUNT];
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun  * struct binder_work - work enqueued on a worklist
144*4882a593Smuzhiyun  * @entry:             node enqueued on list
145*4882a593Smuzhiyun  * @type:              type of work to be performed
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * There are separate work lists for proc, thread, and node (async).
148*4882a593Smuzhiyun  */
149*4882a593Smuzhiyun struct binder_work {
150*4882a593Smuzhiyun 	struct list_head entry;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	enum binder_work_type {
153*4882a593Smuzhiyun 		BINDER_WORK_TRANSACTION = 1,
154*4882a593Smuzhiyun 		BINDER_WORK_TRANSACTION_COMPLETE,
155*4882a593Smuzhiyun 		BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
156*4882a593Smuzhiyun 		BINDER_WORK_RETURN_ERROR,
157*4882a593Smuzhiyun 		BINDER_WORK_NODE,
158*4882a593Smuzhiyun 		BINDER_WORK_DEAD_BINDER,
159*4882a593Smuzhiyun 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
160*4882a593Smuzhiyun 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
161*4882a593Smuzhiyun 	} type;
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun struct binder_error {
165*4882a593Smuzhiyun 	struct binder_work work;
166*4882a593Smuzhiyun 	uint32_t cmd;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun  * struct binder_node - binder node bookkeeping
171*4882a593Smuzhiyun  * @debug_id:             unique ID for debugging
172*4882a593Smuzhiyun  *                        (invariant after initialized)
173*4882a593Smuzhiyun  * @lock:                 lock for node fields
174*4882a593Smuzhiyun  * @work:                 worklist element for node work
175*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
176*4882a593Smuzhiyun  * @rb_node:              element for proc->nodes tree
177*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
178*4882a593Smuzhiyun  * @dead_node:            element for binder_dead_nodes list
179*4882a593Smuzhiyun  *                        (protected by binder_dead_nodes_lock)
180*4882a593Smuzhiyun  * @proc:                 binder_proc that owns this node
181*4882a593Smuzhiyun  *                        (invariant after initialized)
182*4882a593Smuzhiyun  * @refs:                 list of references on this node
183*4882a593Smuzhiyun  *                        (protected by @lock)
184*4882a593Smuzhiyun  * @internal_strong_refs: used to take strong references when
185*4882a593Smuzhiyun  *                        initiating a transaction
186*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
187*4882a593Smuzhiyun  *                        and by @lock)
188*4882a593Smuzhiyun  * @local_weak_refs:      weak user refs from local process
189*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
190*4882a593Smuzhiyun  *                        and by @lock)
191*4882a593Smuzhiyun  * @local_strong_refs:    strong user refs from local process
192*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
193*4882a593Smuzhiyun  *                        and by @lock)
194*4882a593Smuzhiyun  * @tmp_refs:             temporary kernel refs
195*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock while @proc
196*4882a593Smuzhiyun  *                        is valid, and by binder_dead_nodes_lock
197*4882a593Smuzhiyun  *                        if @proc is NULL. During inc/dec and node release
198*4882a593Smuzhiyun  *                        it is also protected by @lock to provide safety
199*4882a593Smuzhiyun  *                        as the node dies and @proc becomes NULL)
200*4882a593Smuzhiyun  * @ptr:                  userspace pointer for node
201*4882a593Smuzhiyun  *                        (invariant, no lock needed)
202*4882a593Smuzhiyun  * @cookie:               userspace cookie for node
203*4882a593Smuzhiyun  *                        (invariant, no lock needed)
204*4882a593Smuzhiyun  * @has_strong_ref:       userspace notified of strong ref
205*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
206*4882a593Smuzhiyun  *                        and by @lock)
207*4882a593Smuzhiyun  * @pending_strong_ref:   userspace has acked notification of strong ref
208*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
209*4882a593Smuzhiyun  *                        and by @lock)
210*4882a593Smuzhiyun  * @has_weak_ref:         userspace notified of weak ref
211*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
212*4882a593Smuzhiyun  *                        and by @lock)
213*4882a593Smuzhiyun  * @pending_weak_ref:     userspace has acked notification of weak ref
214*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock if @proc
215*4882a593Smuzhiyun  *                        and by @lock)
216*4882a593Smuzhiyun  * @has_async_transaction: async transaction to node in progress
217*4882a593Smuzhiyun  *                        (protected by @lock)
218*4882a593Smuzhiyun  * @sched_policy:         minimum scheduling policy for node
219*4882a593Smuzhiyun  *                        (invariant after initialized)
220*4882a593Smuzhiyun  * @accept_fds:           file descriptor operations supported for node
221*4882a593Smuzhiyun  *                        (invariant after initialized)
222*4882a593Smuzhiyun  * @min_priority:         minimum scheduling priority
223*4882a593Smuzhiyun  *                        (invariant after initialized)
224*4882a593Smuzhiyun  * @inherit_rt:           inherit RT scheduling policy from caller
225*4882a593Smuzhiyun  * @txn_security_ctx:     require sender's security context
226*4882a593Smuzhiyun  *                        (invariant after initialized)
227*4882a593Smuzhiyun  * @async_todo:           list of async work items
228*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * Bookkeeping structure for binder nodes.
231*4882a593Smuzhiyun  */
232*4882a593Smuzhiyun struct binder_node {
233*4882a593Smuzhiyun 	int debug_id;
234*4882a593Smuzhiyun 	spinlock_t lock;
235*4882a593Smuzhiyun 	struct binder_work work;
236*4882a593Smuzhiyun 	union {
237*4882a593Smuzhiyun 		struct rb_node rb_node;
238*4882a593Smuzhiyun 		struct hlist_node dead_node;
239*4882a593Smuzhiyun 	};
240*4882a593Smuzhiyun 	struct binder_proc *proc;
241*4882a593Smuzhiyun 	struct hlist_head refs;
242*4882a593Smuzhiyun 	int internal_strong_refs;
243*4882a593Smuzhiyun 	int local_weak_refs;
244*4882a593Smuzhiyun 	int local_strong_refs;
245*4882a593Smuzhiyun 	int tmp_refs;
246*4882a593Smuzhiyun 	binder_uintptr_t ptr;
247*4882a593Smuzhiyun 	binder_uintptr_t cookie;
248*4882a593Smuzhiyun 	struct {
249*4882a593Smuzhiyun 		/*
250*4882a593Smuzhiyun 		 * bitfield elements protected by
251*4882a593Smuzhiyun 		 * proc inner_lock
252*4882a593Smuzhiyun 		 */
253*4882a593Smuzhiyun 		u8 has_strong_ref:1;
254*4882a593Smuzhiyun 		u8 pending_strong_ref:1;
255*4882a593Smuzhiyun 		u8 has_weak_ref:1;
256*4882a593Smuzhiyun 		u8 pending_weak_ref:1;
257*4882a593Smuzhiyun 	};
258*4882a593Smuzhiyun 	struct {
259*4882a593Smuzhiyun 		/*
260*4882a593Smuzhiyun 		 * invariant after initialization
261*4882a593Smuzhiyun 		 */
262*4882a593Smuzhiyun 		u8 sched_policy:2;
263*4882a593Smuzhiyun 		u8 inherit_rt:1;
264*4882a593Smuzhiyun 		u8 accept_fds:1;
265*4882a593Smuzhiyun 		u8 txn_security_ctx:1;
266*4882a593Smuzhiyun 		u8 min_priority;
267*4882a593Smuzhiyun 	};
268*4882a593Smuzhiyun 	bool has_async_transaction;
269*4882a593Smuzhiyun 	struct list_head async_todo;
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun struct binder_ref_death {
273*4882a593Smuzhiyun 	/**
274*4882a593Smuzhiyun 	 * @work: worklist element for death notifications
275*4882a593Smuzhiyun 	 *        (protected by inner_lock of the proc that
276*4882a593Smuzhiyun 	 *        this ref belongs to)
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 	struct binder_work work;
279*4882a593Smuzhiyun 	binder_uintptr_t cookie;
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun  * struct binder_ref_data - binder_ref counts and id
284*4882a593Smuzhiyun  * @debug_id:        unique ID for the ref
285*4882a593Smuzhiyun  * @desc:            unique userspace handle for ref
286*4882a593Smuzhiyun  * @strong:          strong ref count (debugging only if not locked)
287*4882a593Smuzhiyun  * @weak:            weak ref count (debugging only if not locked)
288*4882a593Smuzhiyun  *
289*4882a593Smuzhiyun  * Structure to hold ref count and ref id information. Since
290*4882a593Smuzhiyun  * the actual ref can only be accessed with a lock, this structure
291*4882a593Smuzhiyun  * is used to return information about the ref to callers of
292*4882a593Smuzhiyun  * ref inc/dec functions.
293*4882a593Smuzhiyun  */
294*4882a593Smuzhiyun struct binder_ref_data {
295*4882a593Smuzhiyun 	int debug_id;
296*4882a593Smuzhiyun 	uint32_t desc;
297*4882a593Smuzhiyun 	int strong;
298*4882a593Smuzhiyun 	int weak;
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /**
302*4882a593Smuzhiyun  * struct binder_ref - struct to track references on nodes
303*4882a593Smuzhiyun  * @data:        binder_ref_data containing id, handle, and current refcounts
304*4882a593Smuzhiyun  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
305*4882a593Smuzhiyun  * @rb_node_node: node for lookup by @node in proc's rb_tree
306*4882a593Smuzhiyun  * @node_entry:  list entry for node->refs list in target node
307*4882a593Smuzhiyun  *               (protected by @node->lock)
308*4882a593Smuzhiyun  * @proc:        binder_proc containing ref
309*4882a593Smuzhiyun  * @node:        binder_node of target node. When cleaning up a
310*4882a593Smuzhiyun  *               ref for deletion in binder_cleanup_ref, a non-NULL
311*4882a593Smuzhiyun  *               @node indicates the node must be freed
312*4882a593Smuzhiyun  * @death:       pointer to death notification (ref_death) if requested
313*4882a593Smuzhiyun  *               (protected by @node->lock)
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * Structure to track references from procA to target node (on procB). This
316*4882a593Smuzhiyun  * structure is unsafe to access without holding @proc->outer_lock.
317*4882a593Smuzhiyun  */
318*4882a593Smuzhiyun struct binder_ref {
319*4882a593Smuzhiyun 	/* Lookups needed: */
320*4882a593Smuzhiyun 	/*   node + proc => ref (transaction) */
321*4882a593Smuzhiyun 	/*   desc + proc => ref (transaction, inc/dec ref) */
322*4882a593Smuzhiyun 	/*   node => refs + procs (proc exit) */
323*4882a593Smuzhiyun 	struct binder_ref_data data;
324*4882a593Smuzhiyun 	struct rb_node rb_node_desc;
325*4882a593Smuzhiyun 	struct rb_node rb_node_node;
326*4882a593Smuzhiyun 	struct hlist_node node_entry;
327*4882a593Smuzhiyun 	struct binder_proc *proc;
328*4882a593Smuzhiyun 	struct binder_node *node;
329*4882a593Smuzhiyun 	struct binder_ref_death *death;
330*4882a593Smuzhiyun };
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun  * struct binder_priority - scheduler policy and priority
334*4882a593Smuzhiyun  * @sched_policy            scheduler policy
335*4882a593Smuzhiyun  * @prio                    [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * The binder driver supports inheriting the following scheduler policies:
338*4882a593Smuzhiyun  * SCHED_NORMAL
339*4882a593Smuzhiyun  * SCHED_BATCH
340*4882a593Smuzhiyun  * SCHED_FIFO
341*4882a593Smuzhiyun  * SCHED_RR
342*4882a593Smuzhiyun  */
343*4882a593Smuzhiyun struct binder_priority {
344*4882a593Smuzhiyun 	unsigned int sched_policy;
345*4882a593Smuzhiyun 	int prio;
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun  * struct binder_proc - binder process bookkeeping
350*4882a593Smuzhiyun  * @proc_node:            element for binder_procs list
351*4882a593Smuzhiyun  * @threads:              rbtree of binder_threads in this proc
352*4882a593Smuzhiyun  *                        (protected by @inner_lock)
353*4882a593Smuzhiyun  * @nodes:                rbtree of binder nodes associated with
354*4882a593Smuzhiyun  *                        this proc ordered by node->ptr
355*4882a593Smuzhiyun  *                        (protected by @inner_lock)
356*4882a593Smuzhiyun  * @refs_by_desc:         rbtree of refs ordered by ref->desc
357*4882a593Smuzhiyun  *                        (protected by @outer_lock)
358*4882a593Smuzhiyun  * @refs_by_node:         rbtree of refs ordered by ref->node
359*4882a593Smuzhiyun  *                        (protected by @outer_lock)
360*4882a593Smuzhiyun  * @waiting_threads:      threads currently waiting for proc work
361*4882a593Smuzhiyun  *                        (protected by @inner_lock)
362*4882a593Smuzhiyun  * @pid                   PID of group_leader of process
363*4882a593Smuzhiyun  *                        (invariant after initialized)
364*4882a593Smuzhiyun  * @tsk                   task_struct for group_leader of process
365*4882a593Smuzhiyun  *                        (invariant after initialized)
366*4882a593Smuzhiyun  * @deferred_work_node:   element for binder_deferred_list
367*4882a593Smuzhiyun  *                        (protected by binder_deferred_lock)
368*4882a593Smuzhiyun  * @deferred_work:        bitmap of deferred work to perform
369*4882a593Smuzhiyun  *                        (protected by binder_deferred_lock)
370*4882a593Smuzhiyun  * @outstanding_txns:     number of transactions to be transmitted before
371*4882a593Smuzhiyun  *                        processes in freeze_wait are woken up
372*4882a593Smuzhiyun  *                        (protected by @inner_lock)
373*4882a593Smuzhiyun  * @is_dead:              process is dead and awaiting free
374*4882a593Smuzhiyun  *                        when outstanding transactions are cleaned up
375*4882a593Smuzhiyun  *                        (protected by @inner_lock)
376*4882a593Smuzhiyun  * @is_frozen:            process is frozen and unable to service
377*4882a593Smuzhiyun  *                        binder transactions
378*4882a593Smuzhiyun  *                        (protected by @inner_lock)
379*4882a593Smuzhiyun  * @sync_recv:            process received sync transactions since last frozen
380*4882a593Smuzhiyun  *                        bit 0: received sync transaction after being frozen
381*4882a593Smuzhiyun  *                        bit 1: new pending sync transaction during freezing
382*4882a593Smuzhiyun  *                        (protected by @inner_lock)
383*4882a593Smuzhiyun  * @async_recv:           process received async transactions since last frozen
384*4882a593Smuzhiyun  *                        (protected by @inner_lock)
385*4882a593Smuzhiyun  * @freeze_wait:          waitqueue of processes waiting for all outstanding
386*4882a593Smuzhiyun  *                        transactions to be processed
387*4882a593Smuzhiyun  *                        (protected by @inner_lock)
388*4882a593Smuzhiyun  * @todo:                 list of work for this process
389*4882a593Smuzhiyun  *                        (protected by @inner_lock)
390*4882a593Smuzhiyun  * @stats:                per-process binder statistics
391*4882a593Smuzhiyun  *                        (atomics, no lock needed)
392*4882a593Smuzhiyun  * @delivered_death:      list of delivered death notification
393*4882a593Smuzhiyun  *                        (protected by @inner_lock)
394*4882a593Smuzhiyun  * @max_threads:          cap on number of binder threads
395*4882a593Smuzhiyun  *                        (protected by @inner_lock)
396*4882a593Smuzhiyun  * @requested_threads:    number of binder threads requested but not
397*4882a593Smuzhiyun  *                        yet started. In current implementation, can
398*4882a593Smuzhiyun  *                        only be 0 or 1.
399*4882a593Smuzhiyun  *                        (protected by @inner_lock)
400*4882a593Smuzhiyun  * @requested_threads_started: number binder threads started
401*4882a593Smuzhiyun  *                        (protected by @inner_lock)
402*4882a593Smuzhiyun  * @tmp_ref:              temporary reference to indicate proc is in use
403*4882a593Smuzhiyun  *                        (protected by @inner_lock)
404*4882a593Smuzhiyun  * @default_priority:     default scheduler priority
405*4882a593Smuzhiyun  *                        (invariant after initialized)
406*4882a593Smuzhiyun  * @debugfs_entry:        debugfs node
407*4882a593Smuzhiyun  * @alloc:                binder allocator bookkeeping
408*4882a593Smuzhiyun  * @context:              binder_context for this proc
409*4882a593Smuzhiyun  *                        (invariant after initialized)
410*4882a593Smuzhiyun  * @inner_lock:           can nest under outer_lock and/or node lock
411*4882a593Smuzhiyun  * @outer_lock:           no nesting under innor or node lock
412*4882a593Smuzhiyun  *                        Lock order: 1) outer, 2) node, 3) inner
413*4882a593Smuzhiyun  * @binderfs_entry:       process-specific binderfs log file
414*4882a593Smuzhiyun  * @oneway_spam_detection_enabled: process enabled oneway spam detection
415*4882a593Smuzhiyun  *                        or not
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * Bookkeeping structure for binder processes
418*4882a593Smuzhiyun  */
419*4882a593Smuzhiyun struct binder_proc {
420*4882a593Smuzhiyun 	struct hlist_node proc_node;
421*4882a593Smuzhiyun 	struct rb_root threads;
422*4882a593Smuzhiyun 	struct rb_root nodes;
423*4882a593Smuzhiyun 	struct rb_root refs_by_desc;
424*4882a593Smuzhiyun 	struct rb_root refs_by_node;
425*4882a593Smuzhiyun 	struct list_head waiting_threads;
426*4882a593Smuzhiyun 	int pid;
427*4882a593Smuzhiyun 	struct task_struct *tsk;
428*4882a593Smuzhiyun 	struct hlist_node deferred_work_node;
429*4882a593Smuzhiyun 	int deferred_work;
430*4882a593Smuzhiyun 	int outstanding_txns;
431*4882a593Smuzhiyun 	bool is_dead;
432*4882a593Smuzhiyun 	bool is_frozen;
433*4882a593Smuzhiyun 	bool sync_recv;
434*4882a593Smuzhiyun 	bool async_recv;
435*4882a593Smuzhiyun 	wait_queue_head_t freeze_wait;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	struct list_head todo;
438*4882a593Smuzhiyun 	struct binder_stats stats;
439*4882a593Smuzhiyun 	struct list_head delivered_death;
440*4882a593Smuzhiyun 	int max_threads;
441*4882a593Smuzhiyun 	int requested_threads;
442*4882a593Smuzhiyun 	int requested_threads_started;
443*4882a593Smuzhiyun 	int tmp_ref;
444*4882a593Smuzhiyun 	struct binder_priority default_priority;
445*4882a593Smuzhiyun 	struct dentry *debugfs_entry;
446*4882a593Smuzhiyun 	struct binder_alloc alloc;
447*4882a593Smuzhiyun 	struct binder_context *context;
448*4882a593Smuzhiyun 	spinlock_t inner_lock;
449*4882a593Smuzhiyun 	spinlock_t outer_lock;
450*4882a593Smuzhiyun 	struct dentry *binderfs_entry;
451*4882a593Smuzhiyun 	bool oneway_spam_detection_enabled;
452*4882a593Smuzhiyun };
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /**
455*4882a593Smuzhiyun  * struct binder_proc_ext - binder process bookkeeping
456*4882a593Smuzhiyun  * @proc:            element for binder_procs list
457*4882a593Smuzhiyun  * @cred                  struct cred associated with the `struct file`
458*4882a593Smuzhiyun  *                        in binder_open()
459*4882a593Smuzhiyun  *                        (invariant after initialized)
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * Extended binder_proc -- needed to add the "cred" field without
462*4882a593Smuzhiyun  * changing the KMI for binder_proc.
463*4882a593Smuzhiyun  */
464*4882a593Smuzhiyun struct binder_proc_ext {
465*4882a593Smuzhiyun 	struct binder_proc proc;
466*4882a593Smuzhiyun 	const struct cred *cred;
467*4882a593Smuzhiyun };
468*4882a593Smuzhiyun 
binder_get_cred(struct binder_proc * proc)469*4882a593Smuzhiyun static inline const struct cred *binder_get_cred(struct binder_proc *proc)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	struct binder_proc_ext *eproc;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	eproc = container_of(proc, struct binder_proc_ext, proc);
474*4882a593Smuzhiyun 	return eproc->cred;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun  * struct binder_thread - binder thread bookkeeping
479*4882a593Smuzhiyun  * @proc:                 binder process for this thread
480*4882a593Smuzhiyun  *                        (invariant after initialization)
481*4882a593Smuzhiyun  * @rb_node:              element for proc->threads rbtree
482*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
483*4882a593Smuzhiyun  * @waiting_thread_node:  element for @proc->waiting_threads list
484*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
485*4882a593Smuzhiyun  * @pid:                  PID for this thread
486*4882a593Smuzhiyun  *                        (invariant after initialization)
487*4882a593Smuzhiyun  * @looper:               bitmap of looping state
488*4882a593Smuzhiyun  *                        (only accessed by this thread)
489*4882a593Smuzhiyun  * @looper_needs_return:  looping thread needs to exit driver
490*4882a593Smuzhiyun  *                        (no lock needed)
491*4882a593Smuzhiyun  * @transaction_stack:    stack of in-progress transactions for this thread
492*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
493*4882a593Smuzhiyun  * @todo:                 list of work to do for this thread
494*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
495*4882a593Smuzhiyun  * @process_todo:         whether work in @todo should be processed
496*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
497*4882a593Smuzhiyun  * @return_error:         transaction errors reported by this thread
498*4882a593Smuzhiyun  *                        (only accessed by this thread)
499*4882a593Smuzhiyun  * @reply_error:          transaction errors reported by target thread
500*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
501*4882a593Smuzhiyun  * @wait:                 wait queue for thread work
502*4882a593Smuzhiyun  * @stats:                per-thread statistics
503*4882a593Smuzhiyun  *                        (atomics, no lock needed)
504*4882a593Smuzhiyun  * @tmp_ref:              temporary reference to indicate thread is in use
505*4882a593Smuzhiyun  *                        (atomic since @proc->inner_lock cannot
506*4882a593Smuzhiyun  *                        always be acquired)
507*4882a593Smuzhiyun  * @is_dead:              thread is dead and awaiting free
508*4882a593Smuzhiyun  *                        when outstanding transactions are cleaned up
509*4882a593Smuzhiyun  *                        (protected by @proc->inner_lock)
510*4882a593Smuzhiyun  * @task:                 struct task_struct for this thread
511*4882a593Smuzhiyun  *
512*4882a593Smuzhiyun  * Bookkeeping structure for binder threads.
513*4882a593Smuzhiyun  */
514*4882a593Smuzhiyun struct binder_thread {
515*4882a593Smuzhiyun 	struct binder_proc *proc;
516*4882a593Smuzhiyun 	struct rb_node rb_node;
517*4882a593Smuzhiyun 	struct list_head waiting_thread_node;
518*4882a593Smuzhiyun 	int pid;
519*4882a593Smuzhiyun 	int looper;              /* only modified by this thread */
520*4882a593Smuzhiyun 	bool looper_need_return; /* can be written by other thread */
521*4882a593Smuzhiyun 	struct binder_transaction *transaction_stack;
522*4882a593Smuzhiyun 	struct list_head todo;
523*4882a593Smuzhiyun 	bool process_todo;
524*4882a593Smuzhiyun 	struct binder_error return_error;
525*4882a593Smuzhiyun 	struct binder_error reply_error;
526*4882a593Smuzhiyun 	wait_queue_head_t wait;
527*4882a593Smuzhiyun 	struct binder_stats stats;
528*4882a593Smuzhiyun 	atomic_t tmp_ref;
529*4882a593Smuzhiyun 	bool is_dead;
530*4882a593Smuzhiyun 	struct task_struct *task;
531*4882a593Smuzhiyun };
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /**
534*4882a593Smuzhiyun  * struct binder_txn_fd_fixup - transaction fd fixup list element
535*4882a593Smuzhiyun  * @fixup_entry:          list entry
536*4882a593Smuzhiyun  * @file:                 struct file to be associated with new fd
537*4882a593Smuzhiyun  * @offset:               offset in buffer data to this fixup
538*4882a593Smuzhiyun  *
539*4882a593Smuzhiyun  * List element for fd fixups in a transaction. Since file
540*4882a593Smuzhiyun  * descriptors need to be allocated in the context of the
541*4882a593Smuzhiyun  * target process, we pass each fd to be processed in this
542*4882a593Smuzhiyun  * struct.
543*4882a593Smuzhiyun  */
544*4882a593Smuzhiyun struct binder_txn_fd_fixup {
545*4882a593Smuzhiyun 	struct list_head fixup_entry;
546*4882a593Smuzhiyun 	struct file *file;
547*4882a593Smuzhiyun 	size_t offset;
548*4882a593Smuzhiyun };
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun struct binder_transaction {
551*4882a593Smuzhiyun 	int debug_id;
552*4882a593Smuzhiyun 	struct binder_work work;
553*4882a593Smuzhiyun 	struct binder_thread *from;
554*4882a593Smuzhiyun 	struct binder_transaction *from_parent;
555*4882a593Smuzhiyun 	struct binder_proc *to_proc;
556*4882a593Smuzhiyun 	struct binder_thread *to_thread;
557*4882a593Smuzhiyun 	struct binder_transaction *to_parent;
558*4882a593Smuzhiyun 	unsigned need_reply:1;
559*4882a593Smuzhiyun 	/* unsigned is_dead:1; */	/* not used at the moment */
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	struct binder_buffer *buffer;
562*4882a593Smuzhiyun 	unsigned int	code;
563*4882a593Smuzhiyun 	unsigned int	flags;
564*4882a593Smuzhiyun 	struct binder_priority	priority;
565*4882a593Smuzhiyun 	struct binder_priority	saved_priority;
566*4882a593Smuzhiyun 	bool    set_priority_called;
567*4882a593Smuzhiyun 	kuid_t	sender_euid;
568*4882a593Smuzhiyun 	struct list_head fd_fixups;
569*4882a593Smuzhiyun 	binder_uintptr_t security_ctx;
570*4882a593Smuzhiyun 	/**
571*4882a593Smuzhiyun 	 * @lock:  protects @from, @to_proc, and @to_thread
572*4882a593Smuzhiyun 	 *
573*4882a593Smuzhiyun 	 * @from, @to_proc, and @to_thread can be set to NULL
574*4882a593Smuzhiyun 	 * during thread teardown
575*4882a593Smuzhiyun 	 */
576*4882a593Smuzhiyun 	spinlock_t lock;
577*4882a593Smuzhiyun 	ANDROID_VENDOR_DATA(1);
578*4882a593Smuzhiyun 	ANDROID_OEM_DATA_ARRAY(1, 2);
579*4882a593Smuzhiyun };
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun  * struct binder_object - union of flat binder object types
583*4882a593Smuzhiyun  * @hdr:   generic object header
584*4882a593Smuzhiyun  * @fbo:   binder object (nodes and refs)
585*4882a593Smuzhiyun  * @fdo:   file descriptor object
586*4882a593Smuzhiyun  * @bbo:   binder buffer pointer
587*4882a593Smuzhiyun  * @fdao:  file descriptor array
588*4882a593Smuzhiyun  *
589*4882a593Smuzhiyun  * Used for type-independent object copies
590*4882a593Smuzhiyun  */
591*4882a593Smuzhiyun struct binder_object {
592*4882a593Smuzhiyun 	union {
593*4882a593Smuzhiyun 		struct binder_object_header hdr;
594*4882a593Smuzhiyun 		struct flat_binder_object fbo;
595*4882a593Smuzhiyun 		struct binder_fd_object fdo;
596*4882a593Smuzhiyun 		struct binder_buffer_object bbo;
597*4882a593Smuzhiyun 		struct binder_fd_array_object fdao;
598*4882a593Smuzhiyun 	};
599*4882a593Smuzhiyun };
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun #endif /* _LINUX_BINDER_INTERNAL_H */
602