xref: /OK3568_Linux_fs/kernel/include/uapi/linux/android/binder.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2008 Google, Inc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Based on, but no longer compatible with, the original
6*4882a593Smuzhiyun  * OpenBinder.org binder driver interface, which is:
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (c) 2005 Palmsource, Inc.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This software is licensed under the terms of the GNU General Public
11*4882a593Smuzhiyun  * License version 2, as published by the Free Software Foundation, and
12*4882a593Smuzhiyun  * may be copied, distributed, and modified under those terms.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
15*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17*4882a593Smuzhiyun  * GNU General Public License for more details.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #ifndef _UAPI_LINUX_BINDER_H
22*4882a593Smuzhiyun #define _UAPI_LINUX_BINDER_H
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun #include <linux/ioctl.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define B_PACK_CHARS(c1, c2, c3, c4) \
28*4882a593Smuzhiyun 	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
29*4882a593Smuzhiyun #define B_TYPE_LARGE 0x85
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun enum {
32*4882a593Smuzhiyun 	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
33*4882a593Smuzhiyun 	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
34*4882a593Smuzhiyun 	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
35*4882a593Smuzhiyun 	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
36*4882a593Smuzhiyun 	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
37*4882a593Smuzhiyun 	BINDER_TYPE_FDA		= B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
38*4882a593Smuzhiyun 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
43*4882a593Smuzhiyun  * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun enum flat_binder_object_shifts {
47*4882a593Smuzhiyun 	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun  * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun enum flat_binder_object_flags {
54*4882a593Smuzhiyun 	/**
55*4882a593Smuzhiyun 	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
56*4882a593Smuzhiyun 	 *
57*4882a593Smuzhiyun 	 * These bits can be used to set the minimum scheduler priority
58*4882a593Smuzhiyun 	 * at which transactions into this node should run. Valid values
59*4882a593Smuzhiyun 	 * in these bits depend on the scheduler policy encoded in
60*4882a593Smuzhiyun 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
61*4882a593Smuzhiyun 	 *
62*4882a593Smuzhiyun 	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
63*4882a593Smuzhiyun 	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
64*4882a593Smuzhiyun 	 */
65*4882a593Smuzhiyun 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
66*4882a593Smuzhiyun 	/**
67*4882a593Smuzhiyun 	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
68*4882a593Smuzhiyun 	 */
69*4882a593Smuzhiyun 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	/**
72*4882a593Smuzhiyun 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
73*4882a593Smuzhiyun 	 *
74*4882a593Smuzhiyun 	 * These two bits can be used to set the min scheduling policy at which
75*4882a593Smuzhiyun 	 * transactions on this node should run. These match the UAPI
76*4882a593Smuzhiyun 	 * scheduler policy values, eg:
77*4882a593Smuzhiyun 	 * 00b: SCHED_NORMAL
78*4882a593Smuzhiyun 	 * 01b: SCHED_FIFO
79*4882a593Smuzhiyun 	 * 10b: SCHED_RR
80*4882a593Smuzhiyun 	 * 11b: SCHED_BATCH
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
83*4882a593Smuzhiyun 		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/**
86*4882a593Smuzhiyun 	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
87*4882a593Smuzhiyun 	 *
88*4882a593Smuzhiyun 	 * Only when set, calls into this node will inherit a real-time
89*4882a593Smuzhiyun 	 * scheduling policy from the caller (for synchronous transactions).
90*4882a593Smuzhiyun 	 */
91*4882a593Smuzhiyun 	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/**
94*4882a593Smuzhiyun 	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
95*4882a593Smuzhiyun 	 *
96*4882a593Smuzhiyun 	 * Only when set, causes senders to include their security
97*4882a593Smuzhiyun 	 * context
98*4882a593Smuzhiyun 	 */
99*4882a593Smuzhiyun 	FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #ifdef BINDER_IPC_32BIT
103*4882a593Smuzhiyun typedef __u32 binder_size_t;
104*4882a593Smuzhiyun typedef __u32 binder_uintptr_t;
105*4882a593Smuzhiyun #else
106*4882a593Smuzhiyun typedef __u64 binder_size_t;
107*4882a593Smuzhiyun typedef __u64 binder_uintptr_t;
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun  * struct binder_object_header - header shared by all binder metadata objects.
112*4882a593Smuzhiyun  * @type:	type of the object
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun struct binder_object_header {
115*4882a593Smuzhiyun 	__u32        type;
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * This is the flattened representation of a Binder object for transfer
120*4882a593Smuzhiyun  * between processes.  The 'offsets' supplied as part of a binder transaction
121*4882a593Smuzhiyun  * contains offsets into the data where these structures occur.  The Binder
122*4882a593Smuzhiyun  * driver takes care of re-writing the structure type and data as it moves
123*4882a593Smuzhiyun  * between processes.
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun struct flat_binder_object {
126*4882a593Smuzhiyun 	struct binder_object_header	hdr;
127*4882a593Smuzhiyun 	__u32				flags;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* 8 bytes of data. */
130*4882a593Smuzhiyun 	union {
131*4882a593Smuzhiyun 		binder_uintptr_t	binder;	/* local object */
132*4882a593Smuzhiyun 		__u32			handle;	/* remote object */
133*4882a593Smuzhiyun 	};
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* extra data associated with local object */
136*4882a593Smuzhiyun 	binder_uintptr_t	cookie;
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun  * struct binder_fd_object - describes a filedescriptor to be fixed up.
141*4882a593Smuzhiyun  * @hdr:	common header structure
142*4882a593Smuzhiyun  * @pad_flags:	padding to remain compatible with old userspace code
143*4882a593Smuzhiyun  * @pad_binder:	padding to remain compatible with old userspace code
144*4882a593Smuzhiyun  * @fd:		file descriptor
145*4882a593Smuzhiyun  * @cookie:	opaque data, used by user-space
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun struct binder_fd_object {
148*4882a593Smuzhiyun 	struct binder_object_header	hdr;
149*4882a593Smuzhiyun 	__u32				pad_flags;
150*4882a593Smuzhiyun 	union {
151*4882a593Smuzhiyun 		binder_uintptr_t	pad_binder;
152*4882a593Smuzhiyun 		__u32			fd;
153*4882a593Smuzhiyun 	};
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	binder_uintptr_t		cookie;
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /* struct binder_buffer_object - object describing a userspace buffer
159*4882a593Smuzhiyun  * @hdr:		common header structure
160*4882a593Smuzhiyun  * @flags:		one or more BINDER_BUFFER_* flags
161*4882a593Smuzhiyun  * @buffer:		address of the buffer
162*4882a593Smuzhiyun  * @length:		length of the buffer
163*4882a593Smuzhiyun  * @parent:		index in offset array pointing to parent buffer
164*4882a593Smuzhiyun  * @parent_offset:	offset in @parent pointing to this buffer
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * A binder_buffer object represents an object that the
167*4882a593Smuzhiyun  * binder kernel driver can copy verbatim to the target
168*4882a593Smuzhiyun  * address space. A buffer itself may be pointed to from
169*4882a593Smuzhiyun  * within another buffer, meaning that the pointer inside
170*4882a593Smuzhiyun  * that other buffer needs to be fixed up as well. This
171*4882a593Smuzhiyun  * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
172*4882a593Smuzhiyun  * flag in @flags, by setting @parent buffer to the index
173*4882a593Smuzhiyun  * in the offset array pointing to the parent binder_buffer_object,
174*4882a593Smuzhiyun  * and by setting @parent_offset to the offset in the parent buffer
175*4882a593Smuzhiyun  * at which the pointer to this buffer is located.
176*4882a593Smuzhiyun  */
177*4882a593Smuzhiyun struct binder_buffer_object {
178*4882a593Smuzhiyun 	struct binder_object_header	hdr;
179*4882a593Smuzhiyun 	__u32				flags;
180*4882a593Smuzhiyun 	binder_uintptr_t		buffer;
181*4882a593Smuzhiyun 	binder_size_t			length;
182*4882a593Smuzhiyun 	binder_size_t			parent;
183*4882a593Smuzhiyun 	binder_size_t			parent_offset;
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun enum {
187*4882a593Smuzhiyun 	BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /* struct binder_fd_array_object - object describing an array of fds in a buffer
191*4882a593Smuzhiyun  * @hdr:		common header structure
192*4882a593Smuzhiyun  * @pad:		padding to ensure correct alignment
193*4882a593Smuzhiyun  * @num_fds:		number of file descriptors in the buffer
194*4882a593Smuzhiyun  * @parent:		index in offset array to buffer holding the fd array
195*4882a593Smuzhiyun  * @parent_offset:	start offset of fd array in the buffer
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * A binder_fd_array object represents an array of file
198*4882a593Smuzhiyun  * descriptors embedded in a binder_buffer_object. It is
199*4882a593Smuzhiyun  * different from a regular binder_buffer_object because it
200*4882a593Smuzhiyun  * describes a list of file descriptors to fix up, not an opaque
201*4882a593Smuzhiyun  * blob of memory, and hence the kernel needs to treat it differently.
202*4882a593Smuzhiyun  *
203*4882a593Smuzhiyun  * An example of how this would be used is with Android's
204*4882a593Smuzhiyun  * native_handle_t object, which is a struct with a list of integers
205*4882a593Smuzhiyun  * and a list of file descriptors. The native_handle_t struct itself
206*4882a593Smuzhiyun  * will be represented by a struct binder_buffer_objct, whereas the
207*4882a593Smuzhiyun  * embedded list of file descriptors is represented by a
208*4882a593Smuzhiyun  * struct binder_fd_array_object with that binder_buffer_object as
209*4882a593Smuzhiyun  * a parent.
210*4882a593Smuzhiyun  */
211*4882a593Smuzhiyun struct binder_fd_array_object {
212*4882a593Smuzhiyun 	struct binder_object_header	hdr;
213*4882a593Smuzhiyun 	__u32				pad;
214*4882a593Smuzhiyun 	binder_size_t			num_fds;
215*4882a593Smuzhiyun 	binder_size_t			parent;
216*4882a593Smuzhiyun 	binder_size_t			parent_offset;
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun  * On 64-bit platforms where user code may run in 32-bits the driver must
221*4882a593Smuzhiyun  * translate the buffer (and local binder) addresses appropriately.
222*4882a593Smuzhiyun  */
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun struct binder_write_read {
225*4882a593Smuzhiyun 	binder_size_t		write_size;	/* bytes to write */
226*4882a593Smuzhiyun 	binder_size_t		write_consumed;	/* bytes consumed by driver */
227*4882a593Smuzhiyun 	binder_uintptr_t	write_buffer;
228*4882a593Smuzhiyun 	binder_size_t		read_size;	/* bytes to read */
229*4882a593Smuzhiyun 	binder_size_t		read_consumed;	/* bytes consumed by driver */
230*4882a593Smuzhiyun 	binder_uintptr_t	read_buffer;
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun /* Use with BINDER_VERSION, driver fills in fields. */
234*4882a593Smuzhiyun struct binder_version {
235*4882a593Smuzhiyun 	/* driver protocol version -- increment with incompatible change */
236*4882a593Smuzhiyun 	__s32       protocol_version;
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /* This is the current protocol version. */
240*4882a593Smuzhiyun #ifdef BINDER_IPC_32BIT
241*4882a593Smuzhiyun #define BINDER_CURRENT_PROTOCOL_VERSION 7
242*4882a593Smuzhiyun #else
243*4882a593Smuzhiyun #define BINDER_CURRENT_PROTOCOL_VERSION 8
244*4882a593Smuzhiyun #endif
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun  * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
248*4882a593Smuzhiyun  * Set ptr to NULL for the first call to get the info for the first node, and
249*4882a593Smuzhiyun  * then repeat the call passing the previously returned value to get the next
250*4882a593Smuzhiyun  * nodes.  ptr will be 0 when there are no more nodes.
251*4882a593Smuzhiyun  */
252*4882a593Smuzhiyun struct binder_node_debug_info {
253*4882a593Smuzhiyun 	binder_uintptr_t ptr;
254*4882a593Smuzhiyun 	binder_uintptr_t cookie;
255*4882a593Smuzhiyun 	__u32            has_strong_ref;
256*4882a593Smuzhiyun 	__u32            has_weak_ref;
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun struct binder_node_info_for_ref {
260*4882a593Smuzhiyun 	__u32            handle;
261*4882a593Smuzhiyun 	__u32            strong_count;
262*4882a593Smuzhiyun 	__u32            weak_count;
263*4882a593Smuzhiyun 	__u32            reserved1;
264*4882a593Smuzhiyun 	__u32            reserved2;
265*4882a593Smuzhiyun 	__u32            reserved3;
266*4882a593Smuzhiyun };
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun struct binder_freeze_info {
269*4882a593Smuzhiyun 	__u32            pid;
270*4882a593Smuzhiyun 	__u32            enable;
271*4882a593Smuzhiyun 	__u32            timeout_ms;
272*4882a593Smuzhiyun };
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun struct binder_frozen_status_info {
275*4882a593Smuzhiyun 	__u32            pid;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* process received sync transactions since last frozen
278*4882a593Smuzhiyun 	 * bit 0: received sync transaction after being frozen
279*4882a593Smuzhiyun 	 * bit 1: new pending sync transaction during freezing
280*4882a593Smuzhiyun 	 */
281*4882a593Smuzhiyun 	__u32            sync_recv;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* process received async transactions since last frozen */
284*4882a593Smuzhiyun 	__u32            async_recv;
285*4882a593Smuzhiyun };
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun #define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
288*4882a593Smuzhiyun #define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
289*4882a593Smuzhiyun #define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
290*4882a593Smuzhiyun #define BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32)
291*4882a593Smuzhiyun #define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
292*4882a593Smuzhiyun #define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
293*4882a593Smuzhiyun #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
294*4882a593Smuzhiyun #define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
295*4882a593Smuzhiyun #define BINDER_GET_NODE_INFO_FOR_REF	_IOWR('b', 12, struct binder_node_info_for_ref)
296*4882a593Smuzhiyun #define BINDER_SET_CONTEXT_MGR_EXT	_IOW('b', 13, struct flat_binder_object)
297*4882a593Smuzhiyun #define BINDER_FREEZE			_IOW('b', 14, struct binder_freeze_info)
298*4882a593Smuzhiyun #define BINDER_GET_FROZEN_INFO		_IOWR('b', 15, struct binder_frozen_status_info)
299*4882a593Smuzhiyun #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION	_IOW('b', 16, __u32)
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun  * NOTE: Two special error codes you should check for when calling
303*4882a593Smuzhiyun  * in to the driver are:
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  * EINTR -- The operation has been interupted.  This should be
306*4882a593Smuzhiyun  * handled by retrying the ioctl() until a different error code
307*4882a593Smuzhiyun  * is returned.
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  * ECONNREFUSED -- The driver is no longer accepting operations
310*4882a593Smuzhiyun  * from your process.  That is, the process is being destroyed.
311*4882a593Smuzhiyun  * You should handle this by exiting from your process.  Note
312*4882a593Smuzhiyun  * that once this error code is returned, all further calls to
313*4882a593Smuzhiyun  * the driver from any thread will return this same code.
314*4882a593Smuzhiyun  */
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun enum transaction_flags {
317*4882a593Smuzhiyun 	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
318*4882a593Smuzhiyun 	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
319*4882a593Smuzhiyun 	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
320*4882a593Smuzhiyun 	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
321*4882a593Smuzhiyun 	TF_CLEAR_BUF	= 0x20,	/* clear buffer on txn complete */
322*4882a593Smuzhiyun 	TF_UPDATE_TXN	= 0x40,	/* update the outdated pending async txn */
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun struct binder_transaction_data {
326*4882a593Smuzhiyun 	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
327*4882a593Smuzhiyun 	 * identifying the target and contents of the transaction.
328*4882a593Smuzhiyun 	 */
329*4882a593Smuzhiyun 	union {
330*4882a593Smuzhiyun 		/* target descriptor of command transaction */
331*4882a593Smuzhiyun 		__u32	handle;
332*4882a593Smuzhiyun 		/* target descriptor of return transaction */
333*4882a593Smuzhiyun 		binder_uintptr_t ptr;
334*4882a593Smuzhiyun 	} target;
335*4882a593Smuzhiyun 	binder_uintptr_t	cookie;	/* target object cookie */
336*4882a593Smuzhiyun 	__u32		code;		/* transaction command */
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* General information about the transaction. */
339*4882a593Smuzhiyun 	__u32	        flags;
340*4882a593Smuzhiyun 	pid_t		sender_pid;
341*4882a593Smuzhiyun 	uid_t		sender_euid;
342*4882a593Smuzhiyun 	binder_size_t	data_size;	/* number of bytes of data */
343*4882a593Smuzhiyun 	binder_size_t	offsets_size;	/* number of bytes of offsets */
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* If this transaction is inline, the data immediately
346*4882a593Smuzhiyun 	 * follows here; otherwise, it ends with a pointer to
347*4882a593Smuzhiyun 	 * the data buffer.
348*4882a593Smuzhiyun 	 */
349*4882a593Smuzhiyun 	union {
350*4882a593Smuzhiyun 		struct {
351*4882a593Smuzhiyun 			/* transaction data */
352*4882a593Smuzhiyun 			binder_uintptr_t	buffer;
353*4882a593Smuzhiyun 			/* offsets from buffer to flat_binder_object structs */
354*4882a593Smuzhiyun 			binder_uintptr_t	offsets;
355*4882a593Smuzhiyun 		} ptr;
356*4882a593Smuzhiyun 		__u8	buf[8];
357*4882a593Smuzhiyun 	} data;
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun struct binder_transaction_data_secctx {
361*4882a593Smuzhiyun 	struct binder_transaction_data transaction_data;
362*4882a593Smuzhiyun 	binder_uintptr_t secctx;
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun struct binder_transaction_data_sg {
366*4882a593Smuzhiyun 	struct binder_transaction_data transaction_data;
367*4882a593Smuzhiyun 	binder_size_t buffers_size;
368*4882a593Smuzhiyun };
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun struct binder_ptr_cookie {
371*4882a593Smuzhiyun 	binder_uintptr_t ptr;
372*4882a593Smuzhiyun 	binder_uintptr_t cookie;
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun struct binder_handle_cookie {
376*4882a593Smuzhiyun 	__u32 handle;
377*4882a593Smuzhiyun 	binder_uintptr_t cookie;
378*4882a593Smuzhiyun } __packed;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun struct binder_pri_desc {
381*4882a593Smuzhiyun 	__s32 priority;
382*4882a593Smuzhiyun 	__u32 desc;
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun struct binder_pri_ptr_cookie {
386*4882a593Smuzhiyun 	__s32 priority;
387*4882a593Smuzhiyun 	binder_uintptr_t ptr;
388*4882a593Smuzhiyun 	binder_uintptr_t cookie;
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun enum binder_driver_return_protocol {
392*4882a593Smuzhiyun 	BR_ERROR = _IOR('r', 0, __s32),
393*4882a593Smuzhiyun 	/*
394*4882a593Smuzhiyun 	 * int: error code
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	BR_OK = _IO('r', 1),
398*4882a593Smuzhiyun 	/* No parameters! */
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
401*4882a593Smuzhiyun 				      struct binder_transaction_data_secctx),
402*4882a593Smuzhiyun 	/*
403*4882a593Smuzhiyun 	 * binder_transaction_data_secctx: the received command.
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
406*4882a593Smuzhiyun 	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
407*4882a593Smuzhiyun 	/*
408*4882a593Smuzhiyun 	 * binder_transaction_data: the received command.
409*4882a593Smuzhiyun 	 */
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
412*4882a593Smuzhiyun 	/*
413*4882a593Smuzhiyun 	 * not currently supported
414*4882a593Smuzhiyun 	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
415*4882a593Smuzhiyun 	 * Else the remote object has acquired a primary reference.
416*4882a593Smuzhiyun 	 */
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	BR_DEAD_REPLY = _IO('r', 5),
419*4882a593Smuzhiyun 	/*
420*4882a593Smuzhiyun 	 * The target of the last transaction (either a bcTRANSACTION or
421*4882a593Smuzhiyun 	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
422*4882a593Smuzhiyun 	 */
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	BR_TRANSACTION_COMPLETE = _IO('r', 6),
425*4882a593Smuzhiyun 	/*
426*4882a593Smuzhiyun 	 * No parameters... always refers to the last transaction requested
427*4882a593Smuzhiyun 	 * (including replies).  Note that this will be sent even for
428*4882a593Smuzhiyun 	 * asynchronous transactions.
429*4882a593Smuzhiyun 	 */
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
432*4882a593Smuzhiyun 	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
433*4882a593Smuzhiyun 	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
434*4882a593Smuzhiyun 	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
435*4882a593Smuzhiyun 	/*
436*4882a593Smuzhiyun 	 * void *:	ptr to binder
437*4882a593Smuzhiyun 	 * void *: cookie for binder
438*4882a593Smuzhiyun 	 */
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
441*4882a593Smuzhiyun 	/*
442*4882a593Smuzhiyun 	 * not currently supported
443*4882a593Smuzhiyun 	 * int:	priority
444*4882a593Smuzhiyun 	 * void *: ptr to binder
445*4882a593Smuzhiyun 	 * void *: cookie for binder
446*4882a593Smuzhiyun 	 */
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	BR_NOOP = _IO('r', 12),
449*4882a593Smuzhiyun 	/*
450*4882a593Smuzhiyun 	 * No parameters.  Do nothing and examine the next command.  It exists
451*4882a593Smuzhiyun 	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
452*4882a593Smuzhiyun 	 */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	BR_SPAWN_LOOPER = _IO('r', 13),
455*4882a593Smuzhiyun 	/*
456*4882a593Smuzhiyun 	 * No parameters.  The driver has determined that a process has no
457*4882a593Smuzhiyun 	 * threads waiting to service incoming transactions.  When a process
458*4882a593Smuzhiyun 	 * receives this command, it must spawn a new service thread and
459*4882a593Smuzhiyun 	 * register it via bcENTER_LOOPER.
460*4882a593Smuzhiyun 	 */
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	BR_FINISHED = _IO('r', 14),
463*4882a593Smuzhiyun 	/*
464*4882a593Smuzhiyun 	 * not currently supported
465*4882a593Smuzhiyun 	 * stop threadpool thread
466*4882a593Smuzhiyun 	 */
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
469*4882a593Smuzhiyun 	/*
470*4882a593Smuzhiyun 	 * void *: cookie
471*4882a593Smuzhiyun 	 */
472*4882a593Smuzhiyun 	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * void *: cookie
475*4882a593Smuzhiyun 	 */
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	BR_FAILED_REPLY = _IO('r', 17),
478*4882a593Smuzhiyun 	/*
479*4882a593Smuzhiyun 	 * The last transaction (either a bcTRANSACTION or
480*4882a593Smuzhiyun 	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
481*4882a593Smuzhiyun 	 */
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	BR_FROZEN_REPLY = _IO('r', 18),
484*4882a593Smuzhiyun 	/*
485*4882a593Smuzhiyun 	 * The target of the last transaction (either a bcTRANSACTION or
486*4882a593Smuzhiyun 	 * a bcATTEMPT_ACQUIRE) is frozen.  No parameters.
487*4882a593Smuzhiyun 	 */
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
490*4882a593Smuzhiyun 	/*
491*4882a593Smuzhiyun 	 * Current process sent too many oneway calls to target, and the last
492*4882a593Smuzhiyun 	 * asynchronous transaction makes the allocated async buffer size exceed
493*4882a593Smuzhiyun 	 * detection threshold.  No parameters.
494*4882a593Smuzhiyun 	 */
495*4882a593Smuzhiyun };
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun enum binder_driver_command_protocol {
498*4882a593Smuzhiyun 	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
499*4882a593Smuzhiyun 	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
500*4882a593Smuzhiyun 	/*
501*4882a593Smuzhiyun 	 * binder_transaction_data: the sent command.
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
505*4882a593Smuzhiyun 	/*
506*4882a593Smuzhiyun 	 * not currently supported
507*4882a593Smuzhiyun 	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
508*4882a593Smuzhiyun 	 * Else you have acquired a primary reference on the object.
509*4882a593Smuzhiyun 	 */
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
512*4882a593Smuzhiyun 	/*
513*4882a593Smuzhiyun 	 * void *: ptr to transaction data received on a read
514*4882a593Smuzhiyun 	 */
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	BC_INCREFS = _IOW('c', 4, __u32),
517*4882a593Smuzhiyun 	BC_ACQUIRE = _IOW('c', 5, __u32),
518*4882a593Smuzhiyun 	BC_RELEASE = _IOW('c', 6, __u32),
519*4882a593Smuzhiyun 	BC_DECREFS = _IOW('c', 7, __u32),
520*4882a593Smuzhiyun 	/*
521*4882a593Smuzhiyun 	 * int:	descriptor
522*4882a593Smuzhiyun 	 */
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
525*4882a593Smuzhiyun 	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
526*4882a593Smuzhiyun 	/*
527*4882a593Smuzhiyun 	 * void *: ptr to binder
528*4882a593Smuzhiyun 	 * void *: cookie for binder
529*4882a593Smuzhiyun 	 */
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
532*4882a593Smuzhiyun 	/*
533*4882a593Smuzhiyun 	 * not currently supported
534*4882a593Smuzhiyun 	 * int: priority
535*4882a593Smuzhiyun 	 * int: descriptor
536*4882a593Smuzhiyun 	 */
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	BC_REGISTER_LOOPER = _IO('c', 11),
539*4882a593Smuzhiyun 	/*
540*4882a593Smuzhiyun 	 * No parameters.
541*4882a593Smuzhiyun 	 * Register a spawned looper thread with the device.
542*4882a593Smuzhiyun 	 */
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	BC_ENTER_LOOPER = _IO('c', 12),
545*4882a593Smuzhiyun 	BC_EXIT_LOOPER = _IO('c', 13),
546*4882a593Smuzhiyun 	/*
547*4882a593Smuzhiyun 	 * No parameters.
548*4882a593Smuzhiyun 	 * These two commands are sent as an application-level thread
549*4882a593Smuzhiyun 	 * enters and exits the binder loop, respectively.  They are
550*4882a593Smuzhiyun 	 * used so the binder can have an accurate count of the number
551*4882a593Smuzhiyun 	 * of looping threads it has available.
552*4882a593Smuzhiyun 	 */
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
555*4882a593Smuzhiyun 						struct binder_handle_cookie),
556*4882a593Smuzhiyun 	/*
557*4882a593Smuzhiyun 	 * int: handle
558*4882a593Smuzhiyun 	 * void *: cookie
559*4882a593Smuzhiyun 	 */
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
562*4882a593Smuzhiyun 						struct binder_handle_cookie),
563*4882a593Smuzhiyun 	/*
564*4882a593Smuzhiyun 	 * int: handle
565*4882a593Smuzhiyun 	 * void *: cookie
566*4882a593Smuzhiyun 	 */
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
569*4882a593Smuzhiyun 	/*
570*4882a593Smuzhiyun 	 * void *: cookie
571*4882a593Smuzhiyun 	 */
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
574*4882a593Smuzhiyun 	BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
575*4882a593Smuzhiyun 	/*
576*4882a593Smuzhiyun 	 * binder_transaction_data_sg: the sent command.
577*4882a593Smuzhiyun 	 */
578*4882a593Smuzhiyun };
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun #endif /* _UAPI_LINUX_BINDER_H */
581*4882a593Smuzhiyun 
582