1*53ee8cc1Swenshuai.xi #ifndef _LINUX_FUTEX_H 2*53ee8cc1Swenshuai.xi #define _LINUX_FUTEX_H 3*53ee8cc1Swenshuai.xi 4*53ee8cc1Swenshuai.xi #include <linux/types.h> 5*53ee8cc1Swenshuai.xi 6*53ee8cc1Swenshuai.xi struct inode; 7*53ee8cc1Swenshuai.xi struct mm_struct; 8*53ee8cc1Swenshuai.xi struct task_struct; 9*53ee8cc1Swenshuai.xi union ktime; 10*53ee8cc1Swenshuai.xi 11*53ee8cc1Swenshuai.xi /* Second argument to futex syscall */ 12*53ee8cc1Swenshuai.xi 13*53ee8cc1Swenshuai.xi 14*53ee8cc1Swenshuai.xi #define FUTEX_WAIT 0 15*53ee8cc1Swenshuai.xi #define FUTEX_WAKE 1 16*53ee8cc1Swenshuai.xi #define FUTEX_FD 2 17*53ee8cc1Swenshuai.xi #define FUTEX_REQUEUE 3 18*53ee8cc1Swenshuai.xi #define FUTEX_CMP_REQUEUE 4 19*53ee8cc1Swenshuai.xi #define FUTEX_WAKE_OP 5 20*53ee8cc1Swenshuai.xi #define FUTEX_LOCK_PI 6 21*53ee8cc1Swenshuai.xi #define FUTEX_UNLOCK_PI 7 22*53ee8cc1Swenshuai.xi #define FUTEX_TRYLOCK_PI 8 23*53ee8cc1Swenshuai.xi #define FUTEX_WAIT_BITSET 9 24*53ee8cc1Swenshuai.xi #define FUTEX_WAKE_BITSET 10 25*53ee8cc1Swenshuai.xi 26*53ee8cc1Swenshuai.xi #define FUTEX_PRIVATE_FLAG 128 27*53ee8cc1Swenshuai.xi #define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG 28*53ee8cc1Swenshuai.xi 29*53ee8cc1Swenshuai.xi #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) 30*53ee8cc1Swenshuai.xi #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) 31*53ee8cc1Swenshuai.xi #define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG) 32*53ee8cc1Swenshuai.xi #define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) 33*53ee8cc1Swenshuai.xi #define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) 34*53ee8cc1Swenshuai.xi #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) 35*53ee8cc1Swenshuai.xi #define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) 36*53ee8cc1Swenshuai.xi #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) 37*53ee8cc1Swenshuai.xi #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) 38*53ee8cc1Swenshuai.xi #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) 39*53ee8cc1Swenshuai.xi 40*53ee8cc1Swenshuai.xi /* 41*53ee8cc1Swenshuai.xi * Support for robust futexes: the kernel cleans up held futexes at 42*53ee8cc1Swenshuai.xi * thread exit time. 43*53ee8cc1Swenshuai.xi */ 44*53ee8cc1Swenshuai.xi 45*53ee8cc1Swenshuai.xi /* 46*53ee8cc1Swenshuai.xi * Per-lock list entry - embedded in user-space locks, somewhere close 47*53ee8cc1Swenshuai.xi * to the futex field. (Note: user-space uses a double-linked list to 48*53ee8cc1Swenshuai.xi * achieve O(1) list add and remove, but the kernel only needs to know 49*53ee8cc1Swenshuai.xi * about the forward link) 50*53ee8cc1Swenshuai.xi * 51*53ee8cc1Swenshuai.xi * NOTE: this structure is part of the syscall ABI, and must not be 52*53ee8cc1Swenshuai.xi * changed. 53*53ee8cc1Swenshuai.xi */ 54*53ee8cc1Swenshuai.xi struct robust_list { 55*53ee8cc1Swenshuai.xi struct robust_list *next; 56*53ee8cc1Swenshuai.xi }; 57*53ee8cc1Swenshuai.xi 58*53ee8cc1Swenshuai.xi /* 59*53ee8cc1Swenshuai.xi * Per-thread list head: 60*53ee8cc1Swenshuai.xi * 61*53ee8cc1Swenshuai.xi * NOTE: this structure is part of the syscall ABI, and must only be 62*53ee8cc1Swenshuai.xi * changed if the change is first communicated with the glibc folks. 63*53ee8cc1Swenshuai.xi * (When an incompatible change is done, we'll increase the structure 64*53ee8cc1Swenshuai.xi * size, which glibc will detect) 65*53ee8cc1Swenshuai.xi */ 66*53ee8cc1Swenshuai.xi struct robust_list_head { 67*53ee8cc1Swenshuai.xi /* 68*53ee8cc1Swenshuai.xi * The head of the list. Points back to itself if empty: 69*53ee8cc1Swenshuai.xi */ 70*53ee8cc1Swenshuai.xi struct robust_list list; 71*53ee8cc1Swenshuai.xi 72*53ee8cc1Swenshuai.xi /* 73*53ee8cc1Swenshuai.xi * This relative offset is set by user-space, it gives the kernel 74*53ee8cc1Swenshuai.xi * the relative position of the futex field to examine. This way 75*53ee8cc1Swenshuai.xi * we keep userspace flexible, to freely shape its data-structure, 76*53ee8cc1Swenshuai.xi * without hardcoding any particular offset into the kernel: 77*53ee8cc1Swenshuai.xi */ 78*53ee8cc1Swenshuai.xi long futex_offset; 79*53ee8cc1Swenshuai.xi 80*53ee8cc1Swenshuai.xi /* 81*53ee8cc1Swenshuai.xi * The death of the thread may race with userspace setting 82*53ee8cc1Swenshuai.xi * up a lock's links. So to handle this race, userspace first 83*53ee8cc1Swenshuai.xi * sets this field to the address of the to-be-taken lock, 84*53ee8cc1Swenshuai.xi * then does the lock acquire, and then adds itself to the 85*53ee8cc1Swenshuai.xi * list, and then clears this field. Hence the kernel will 86*53ee8cc1Swenshuai.xi * always have full knowledge of all locks that the thread 87*53ee8cc1Swenshuai.xi * _might_ have taken. We check the owner TID in any case, 88*53ee8cc1Swenshuai.xi * so only truly owned locks will be handled. 89*53ee8cc1Swenshuai.xi */ 90*53ee8cc1Swenshuai.xi struct robust_list *list_op_pending; 91*53ee8cc1Swenshuai.xi }; 92*53ee8cc1Swenshuai.xi 93*53ee8cc1Swenshuai.xi /* 94*53ee8cc1Swenshuai.xi * Are there any waiters for this robust futex: 95*53ee8cc1Swenshuai.xi */ 96*53ee8cc1Swenshuai.xi #define FUTEX_WAITERS 0x80000000 97*53ee8cc1Swenshuai.xi 98*53ee8cc1Swenshuai.xi /* 99*53ee8cc1Swenshuai.xi * The kernel signals via this bit that a thread holding a futex 100*53ee8cc1Swenshuai.xi * has exited without unlocking the futex. The kernel also does 101*53ee8cc1Swenshuai.xi * a FUTEX_WAKE on such futexes, after setting the bit, to wake 102*53ee8cc1Swenshuai.xi * up any possible waiters: 103*53ee8cc1Swenshuai.xi */ 104*53ee8cc1Swenshuai.xi #define FUTEX_OWNER_DIED 0x40000000 105*53ee8cc1Swenshuai.xi 106*53ee8cc1Swenshuai.xi /* 107*53ee8cc1Swenshuai.xi * The rest of the robust-futex field is for the TID: 108*53ee8cc1Swenshuai.xi */ 109*53ee8cc1Swenshuai.xi #define FUTEX_TID_MASK 0x3fffffff 110*53ee8cc1Swenshuai.xi 111*53ee8cc1Swenshuai.xi /* 112*53ee8cc1Swenshuai.xi * This limit protects against a deliberately circular list. 113*53ee8cc1Swenshuai.xi * (Not worth introducing an rlimit for it) 114*53ee8cc1Swenshuai.xi */ 115*53ee8cc1Swenshuai.xi #define ROBUST_LIST_LIMIT 2048 116*53ee8cc1Swenshuai.xi 117*53ee8cc1Swenshuai.xi /* 118*53ee8cc1Swenshuai.xi * bitset with all bits set for the FUTEX_xxx_BITSET OPs to request a 119*53ee8cc1Swenshuai.xi * match of any bit. 120*53ee8cc1Swenshuai.xi */ 121*53ee8cc1Swenshuai.xi #define FUTEX_BITSET_MATCH_ANY 0xffffffff 122*53ee8cc1Swenshuai.xi 123*53ee8cc1Swenshuai.xi 124*53ee8cc1Swenshuai.xi #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ 125*53ee8cc1Swenshuai.xi #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ 126*53ee8cc1Swenshuai.xi #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ 127*53ee8cc1Swenshuai.xi #define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */ 128*53ee8cc1Swenshuai.xi #define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */ 129*53ee8cc1Swenshuai.xi 130*53ee8cc1Swenshuai.xi #define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */ 131*53ee8cc1Swenshuai.xi 132*53ee8cc1Swenshuai.xi #define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */ 133*53ee8cc1Swenshuai.xi #define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */ 134*53ee8cc1Swenshuai.xi #define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */ 135*53ee8cc1Swenshuai.xi #define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */ 136*53ee8cc1Swenshuai.xi #define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */ 137*53ee8cc1Swenshuai.xi #define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */ 138*53ee8cc1Swenshuai.xi 139*53ee8cc1Swenshuai.xi /* FUTEX_WAKE_OP will perform atomically 140*53ee8cc1Swenshuai.xi int oldval = *(int *)UADDR2; 141*53ee8cc1Swenshuai.xi *(int *)UADDR2 = oldval OP OPARG; 142*53ee8cc1Swenshuai.xi if (oldval CMP CMPARG) 143*53ee8cc1Swenshuai.xi wake UADDR2; */ 144*53ee8cc1Swenshuai.xi 145*53ee8cc1Swenshuai.xi #define FUTEX_OP(op, oparg, cmp, cmparg) \ 146*53ee8cc1Swenshuai.xi (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ 147*53ee8cc1Swenshuai.xi | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) 148*53ee8cc1Swenshuai.xi 149*53ee8cc1Swenshuai.xi #endif 150