1*53ee8cc1Swenshuai.xi /* libc-internal interface for mutex locks. NPTL version.
2*53ee8cc1Swenshuai.xi Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3*53ee8cc1Swenshuai.xi This file is part of the GNU C Library.
4*53ee8cc1Swenshuai.xi
5*53ee8cc1Swenshuai.xi The GNU C Library is free software; you can redistribute it and/or
6*53ee8cc1Swenshuai.xi modify it under the terms of the GNU Lesser General Public License as
7*53ee8cc1Swenshuai.xi published by the Free Software Foundation; either version 2.1 of the
8*53ee8cc1Swenshuai.xi License, or (at your option) any later version.
9*53ee8cc1Swenshuai.xi
10*53ee8cc1Swenshuai.xi The GNU C Library is distributed in the hope that it will be useful,
11*53ee8cc1Swenshuai.xi but WITHOUT ANY WARRANTY; without even the implied warranty of
12*53ee8cc1Swenshuai.xi MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13*53ee8cc1Swenshuai.xi Lesser General Public License for more details.
14*53ee8cc1Swenshuai.xi
15*53ee8cc1Swenshuai.xi You should have received a copy of the GNU Lesser General Public
16*53ee8cc1Swenshuai.xi License along with the GNU C Library; see the file COPYING.LIB. If not,
17*53ee8cc1Swenshuai.xi write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18*53ee8cc1Swenshuai.xi Boston, MA 02111-1307, USA. */
19*53ee8cc1Swenshuai.xi
20*53ee8cc1Swenshuai.xi #ifndef _BITS_LIBC_LOCK_H
21*53ee8cc1Swenshuai.xi #define _BITS_LIBC_LOCK_H 1
22*53ee8cc1Swenshuai.xi
23*53ee8cc1Swenshuai.xi #include <pthread.h>
24*53ee8cc1Swenshuai.xi #define __need_NULL
25*53ee8cc1Swenshuai.xi #include <stddef.h>
26*53ee8cc1Swenshuai.xi
27*53ee8cc1Swenshuai.xi
28*53ee8cc1Swenshuai.xi /* Fortunately Linux now has a mean to do locking which is realtime
29*53ee8cc1Swenshuai.xi safe without the aid of the thread library. We also need no fancy
30*53ee8cc1Swenshuai.xi options like error checking mutexes etc. We only need simple
31*53ee8cc1Swenshuai.xi locks, maybe recursive. This can be easily and cheaply implemented
32*53ee8cc1Swenshuai.xi using futexes. We will use them everywhere except in ld.so since
33*53ee8cc1Swenshuai.xi ld.so might be used on old kernels with a different libc.so. */
34*53ee8cc1Swenshuai.xi #ifdef _LIBC
35*53ee8cc1Swenshuai.xi # include <lowlevellock.h>
36*53ee8cc1Swenshuai.xi # include <tls.h>
37*53ee8cc1Swenshuai.xi # include <pthread-functions.h>
38*53ee8cc1Swenshuai.xi #endif
39*53ee8cc1Swenshuai.xi
40*53ee8cc1Swenshuai.xi /* Mutex type. */
41*53ee8cc1Swenshuai.xi #if defined _LIBC || defined _IO_MTSAFE_IO
42*53ee8cc1Swenshuai.xi # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43*53ee8cc1Swenshuai.xi typedef pthread_mutex_t __libc_lock_t;
44*53ee8cc1Swenshuai.xi typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
45*53ee8cc1Swenshuai.xi # else
46*53ee8cc1Swenshuai.xi typedef int __libc_lock_t;
47*53ee8cc1Swenshuai.xi typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
48*53ee8cc1Swenshuai.xi # endif
49*53ee8cc1Swenshuai.xi typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
50*53ee8cc1Swenshuai.xi # ifdef __USE_UNIX98
51*53ee8cc1Swenshuai.xi typedef pthread_rwlock_t __libc_rwlock_t;
52*53ee8cc1Swenshuai.xi # else
53*53ee8cc1Swenshuai.xi typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
54*53ee8cc1Swenshuai.xi # endif
55*53ee8cc1Swenshuai.xi #else
56*53ee8cc1Swenshuai.xi typedef struct __libc_lock_opaque__ __libc_lock_t;
57*53ee8cc1Swenshuai.xi typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
58*53ee8cc1Swenshuai.xi typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
59*53ee8cc1Swenshuai.xi #endif
60*53ee8cc1Swenshuai.xi
61*53ee8cc1Swenshuai.xi /* Type for key to thread-specific data. */
62*53ee8cc1Swenshuai.xi typedef pthread_key_t __libc_key_t;
63*53ee8cc1Swenshuai.xi
64*53ee8cc1Swenshuai.xi /* Define a lock variable NAME with storage class CLASS. The lock must be
65*53ee8cc1Swenshuai.xi initialized with __libc_lock_init before it can be used (or define it
66*53ee8cc1Swenshuai.xi with __libc_lock_define_initialized, below). Use `extern' for CLASS to
67*53ee8cc1Swenshuai.xi declare a lock defined in another module. In public structure
68*53ee8cc1Swenshuai.xi definitions you must use a pointer to the lock structure (i.e., NAME
69*53ee8cc1Swenshuai.xi begins with a `*'), because its storage size will not be known outside
70*53ee8cc1Swenshuai.xi of libc. */
71*53ee8cc1Swenshuai.xi #define __libc_lock_define(CLASS,NAME) \
72*53ee8cc1Swenshuai.xi CLASS __libc_lock_t NAME;
73*53ee8cc1Swenshuai.xi #define __libc_rwlock_define(CLASS,NAME) \
74*53ee8cc1Swenshuai.xi CLASS __libc_rwlock_t NAME;
75*53ee8cc1Swenshuai.xi #define __libc_lock_define_recursive(CLASS,NAME) \
76*53ee8cc1Swenshuai.xi CLASS __libc_lock_recursive_t NAME;
77*53ee8cc1Swenshuai.xi #define __rtld_lock_define_recursive(CLASS,NAME) \
78*53ee8cc1Swenshuai.xi CLASS __rtld_lock_recursive_t NAME;
79*53ee8cc1Swenshuai.xi
80*53ee8cc1Swenshuai.xi /* Define an initialized lock variable NAME with storage class CLASS.
81*53ee8cc1Swenshuai.xi
82*53ee8cc1Swenshuai.xi For the C library we take a deeper look at the initializer. For
83*53ee8cc1Swenshuai.xi this implementation all fields are initialized to zero. Therefore
84*53ee8cc1Swenshuai.xi we don't initialize the variable which allows putting it into the
85*53ee8cc1Swenshuai.xi BSS section. (Except on PA-RISC and other odd architectures, where
86*53ee8cc1Swenshuai.xi initialized locks must be set to one due to the lack of normal
87*53ee8cc1Swenshuai.xi atomic operations.) */
88*53ee8cc1Swenshuai.xi
89*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
90*53ee8cc1Swenshuai.xi # if LLL_LOCK_INITIALIZER == 0
91*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized(CLASS,NAME) \
92*53ee8cc1Swenshuai.xi CLASS __libc_lock_t NAME;
93*53ee8cc1Swenshuai.xi # else
94*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized(CLASS,NAME) \
95*53ee8cc1Swenshuai.xi CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
96*53ee8cc1Swenshuai.xi # endif
97*53ee8cc1Swenshuai.xi #else
98*53ee8cc1Swenshuai.xi # if __LT_SPINLOCK_INIT == 0
99*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized(CLASS,NAME) \
100*53ee8cc1Swenshuai.xi CLASS __libc_lock_t NAME;
101*53ee8cc1Swenshuai.xi # else
102*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized(CLASS,NAME) \
103*53ee8cc1Swenshuai.xi CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
104*53ee8cc1Swenshuai.xi # endif
105*53ee8cc1Swenshuai.xi #endif
106*53ee8cc1Swenshuai.xi
107*53ee8cc1Swenshuai.xi #define __libc_rwlock_define_initialized(CLASS,NAME) \
108*53ee8cc1Swenshuai.xi CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
109*53ee8cc1Swenshuai.xi
110*53ee8cc1Swenshuai.xi /* Define an initialized recursive lock variable NAME with storage
111*53ee8cc1Swenshuai.xi class CLASS. */
112*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
113*53ee8cc1Swenshuai.xi # if LLL_LOCK_INITIALIZER == 0
114*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
115*53ee8cc1Swenshuai.xi CLASS __libc_lock_recursive_t NAME;
116*53ee8cc1Swenshuai.xi # else
117*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
118*53ee8cc1Swenshuai.xi CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
119*53ee8cc1Swenshuai.xi # endif
120*53ee8cc1Swenshuai.xi # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
121*53ee8cc1Swenshuai.xi { LLL_LOCK_INITIALIZER, 0, NULL }
122*53ee8cc1Swenshuai.xi #else
123*53ee8cc1Swenshuai.xi # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
124*53ee8cc1Swenshuai.xi CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
125*53ee8cc1Swenshuai.xi # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
126*53ee8cc1Swenshuai.xi {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
127*53ee8cc1Swenshuai.xi #endif
128*53ee8cc1Swenshuai.xi
129*53ee8cc1Swenshuai.xi #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
130*53ee8cc1Swenshuai.xi CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
131*53ee8cc1Swenshuai.xi #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
132*53ee8cc1Swenshuai.xi {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
133*53ee8cc1Swenshuai.xi
134*53ee8cc1Swenshuai.xi #define __rtld_lock_initialize(NAME) \
135*53ee8cc1Swenshuai.xi (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
136*53ee8cc1Swenshuai.xi
137*53ee8cc1Swenshuai.xi /* If we check for a weakly referenced symbol and then perform a
138*53ee8cc1Swenshuai.xi normal jump to it te code generated for some platforms in case of
139*53ee8cc1Swenshuai.xi PIC is unnecessarily slow. What would happen is that the function
140*53ee8cc1Swenshuai.xi is first referenced as data and then it is called indirectly
141*53ee8cc1Swenshuai.xi through the PLT. We can make this a direct jump. */
142*53ee8cc1Swenshuai.xi #ifdef __PIC__
143*53ee8cc1Swenshuai.xi # define __libc_maybe_call(FUNC, ARGS, ELSE) \
144*53ee8cc1Swenshuai.xi (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
145*53ee8cc1Swenshuai.xi _fn != NULL ? (*_fn) ARGS : ELSE; }))
146*53ee8cc1Swenshuai.xi #else
147*53ee8cc1Swenshuai.xi # define __libc_maybe_call(FUNC, ARGS, ELSE) \
148*53ee8cc1Swenshuai.xi (FUNC != NULL ? FUNC ARGS : ELSE)
149*53ee8cc1Swenshuai.xi #endif
150*53ee8cc1Swenshuai.xi
151*53ee8cc1Swenshuai.xi /* Call thread functions through the function pointer table. */
152*53ee8cc1Swenshuai.xi #if defined SHARED && !defined NOT_IN_libc
153*53ee8cc1Swenshuai.xi # define PTFAVAIL(NAME) __libc_pthread_functions_init
154*53ee8cc1Swenshuai.xi # define __libc_ptf_call(FUNC, ARGS, ELSE) \
155*53ee8cc1Swenshuai.xi (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
156*53ee8cc1Swenshuai.xi # define __libc_ptf_call_always(FUNC, ARGS) \
157*53ee8cc1Swenshuai.xi PTHFCT_CALL (ptr_##FUNC, ARGS)
158*53ee8cc1Swenshuai.xi #else
159*53ee8cc1Swenshuai.xi # define PTFAVAIL(NAME) (NAME != NULL)
160*53ee8cc1Swenshuai.xi # define __libc_ptf_call(FUNC, ARGS, ELSE) \
161*53ee8cc1Swenshuai.xi __libc_maybe_call (FUNC, ARGS, ELSE)
162*53ee8cc1Swenshuai.xi # define __libc_ptf_call_always(FUNC, ARGS) \
163*53ee8cc1Swenshuai.xi FUNC ARGS
164*53ee8cc1Swenshuai.xi #endif
165*53ee8cc1Swenshuai.xi
166*53ee8cc1Swenshuai.xi
167*53ee8cc1Swenshuai.xi /* Initialize the named lock variable, leaving it in a consistent, unlocked
168*53ee8cc1Swenshuai.xi state. */
169*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
170*53ee8cc1Swenshuai.xi # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
171*53ee8cc1Swenshuai.xi #else
172*53ee8cc1Swenshuai.xi # define __libc_lock_init(NAME) \
173*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
174*53ee8cc1Swenshuai.xi #endif
175*53ee8cc1Swenshuai.xi #if defined SHARED && !defined NOT_IN_libc
176*53ee8cc1Swenshuai.xi /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
177*53ee8cc1Swenshuai.xi inefficient. */
178*53ee8cc1Swenshuai.xi # define __libc_rwlock_init(NAME) \
179*53ee8cc1Swenshuai.xi (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
180*53ee8cc1Swenshuai.xi #else
181*53ee8cc1Swenshuai.xi # define __libc_rwlock_init(NAME) \
182*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
183*53ee8cc1Swenshuai.xi #endif
184*53ee8cc1Swenshuai.xi
185*53ee8cc1Swenshuai.xi /* Same as last but this time we initialize a recursive mutex. */
186*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
187*53ee8cc1Swenshuai.xi # define __libc_lock_init_recursive(NAME) \
188*53ee8cc1Swenshuai.xi ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
189*53ee8cc1Swenshuai.xi #else
190*53ee8cc1Swenshuai.xi # define __libc_lock_init_recursive(NAME) \
191*53ee8cc1Swenshuai.xi do { \
192*53ee8cc1Swenshuai.xi if (__pthread_mutex_init != NULL) \
193*53ee8cc1Swenshuai.xi { \
194*53ee8cc1Swenshuai.xi pthread_mutexattr_t __attr; \
195*53ee8cc1Swenshuai.xi __pthread_mutexattr_init (&__attr); \
196*53ee8cc1Swenshuai.xi __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
197*53ee8cc1Swenshuai.xi __pthread_mutex_init (&(NAME).mutex, &__attr); \
198*53ee8cc1Swenshuai.xi __pthread_mutexattr_destroy (&__attr); \
199*53ee8cc1Swenshuai.xi } \
200*53ee8cc1Swenshuai.xi } while (0)
201*53ee8cc1Swenshuai.xi #endif
202*53ee8cc1Swenshuai.xi
203*53ee8cc1Swenshuai.xi #define __rtld_lock_init_recursive(NAME) \
204*53ee8cc1Swenshuai.xi do { \
205*53ee8cc1Swenshuai.xi if (__pthread_mutex_init != NULL) \
206*53ee8cc1Swenshuai.xi { \
207*53ee8cc1Swenshuai.xi pthread_mutexattr_t __attr; \
208*53ee8cc1Swenshuai.xi __pthread_mutexattr_init (&__attr); \
209*53ee8cc1Swenshuai.xi __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
210*53ee8cc1Swenshuai.xi __pthread_mutex_init (&(NAME).mutex, &__attr); \
211*53ee8cc1Swenshuai.xi __pthread_mutexattr_destroy (&__attr); \
212*53ee8cc1Swenshuai.xi } \
213*53ee8cc1Swenshuai.xi } while (0)
214*53ee8cc1Swenshuai.xi
215*53ee8cc1Swenshuai.xi /* Finalize the named lock variable, which must be locked. It cannot be
216*53ee8cc1Swenshuai.xi used again until __libc_lock_init is called again on it. This must be
217*53ee8cc1Swenshuai.xi called on a lock variable before the containing storage is reused. */
218*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
219*53ee8cc1Swenshuai.xi # define __libc_lock_fini(NAME) ((void) 0)
220*53ee8cc1Swenshuai.xi #else
221*53ee8cc1Swenshuai.xi # define __libc_lock_fini(NAME) \
222*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
223*53ee8cc1Swenshuai.xi #endif
224*53ee8cc1Swenshuai.xi #if defined SHARED && !defined NOT_IN_libc
225*53ee8cc1Swenshuai.xi # define __libc_rwlock_fini(NAME) ((void) 0)
226*53ee8cc1Swenshuai.xi #else
227*53ee8cc1Swenshuai.xi # define __libc_rwlock_fini(NAME) \
228*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
229*53ee8cc1Swenshuai.xi #endif
230*53ee8cc1Swenshuai.xi
231*53ee8cc1Swenshuai.xi /* Finalize recursive named lock. */
232*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
233*53ee8cc1Swenshuai.xi # define __libc_lock_fini_recursive(NAME) ((void) 0)
234*53ee8cc1Swenshuai.xi #else
235*53ee8cc1Swenshuai.xi # define __libc_lock_fini_recursive(NAME) \
236*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
237*53ee8cc1Swenshuai.xi #endif
238*53ee8cc1Swenshuai.xi
239*53ee8cc1Swenshuai.xi /* Lock the named lock variable. */
240*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
241*53ee8cc1Swenshuai.xi # define __libc_lock_lock(NAME) \
242*53ee8cc1Swenshuai.xi ({ lll_lock (NAME, LLL_PRIVATE); 0; })
243*53ee8cc1Swenshuai.xi #else
244*53ee8cc1Swenshuai.xi # define __libc_lock_lock(NAME) \
245*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
246*53ee8cc1Swenshuai.xi #endif
247*53ee8cc1Swenshuai.xi #define __libc_rwlock_rdlock(NAME) \
248*53ee8cc1Swenshuai.xi __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
249*53ee8cc1Swenshuai.xi #define __libc_rwlock_wrlock(NAME) \
250*53ee8cc1Swenshuai.xi __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
251*53ee8cc1Swenshuai.xi
252*53ee8cc1Swenshuai.xi /* Lock the recursive named lock variable. */
253*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
254*53ee8cc1Swenshuai.xi # define __libc_lock_lock_recursive(NAME) \
255*53ee8cc1Swenshuai.xi do { \
256*53ee8cc1Swenshuai.xi void *self = THREAD_SELF; \
257*53ee8cc1Swenshuai.xi if ((NAME).owner != self) \
258*53ee8cc1Swenshuai.xi { \
259*53ee8cc1Swenshuai.xi lll_lock ((NAME).lock, LLL_PRIVATE); \
260*53ee8cc1Swenshuai.xi (NAME).owner = self; \
261*53ee8cc1Swenshuai.xi } \
262*53ee8cc1Swenshuai.xi ++(NAME).cnt; \
263*53ee8cc1Swenshuai.xi } while (0)
264*53ee8cc1Swenshuai.xi #else
265*53ee8cc1Swenshuai.xi # define __libc_lock_lock_recursive(NAME) \
266*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
267*53ee8cc1Swenshuai.xi #endif
268*53ee8cc1Swenshuai.xi
269*53ee8cc1Swenshuai.xi /* Try to lock the named lock variable. */
270*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
271*53ee8cc1Swenshuai.xi # define __libc_lock_trylock(NAME) \
272*53ee8cc1Swenshuai.xi lll_trylock (NAME)
273*53ee8cc1Swenshuai.xi #else
274*53ee8cc1Swenshuai.xi # define __libc_lock_trylock(NAME) \
275*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
276*53ee8cc1Swenshuai.xi #endif
277*53ee8cc1Swenshuai.xi #define __libc_rwlock_tryrdlock(NAME) \
278*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
279*53ee8cc1Swenshuai.xi #define __libc_rwlock_trywrlock(NAME) \
280*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
281*53ee8cc1Swenshuai.xi
282*53ee8cc1Swenshuai.xi /* Try to lock the recursive named lock variable. */
283*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
284*53ee8cc1Swenshuai.xi # define __libc_lock_trylock_recursive(NAME) \
285*53ee8cc1Swenshuai.xi ({ \
286*53ee8cc1Swenshuai.xi int result = 0; \
287*53ee8cc1Swenshuai.xi void *self = THREAD_SELF; \
288*53ee8cc1Swenshuai.xi if ((NAME).owner != self) \
289*53ee8cc1Swenshuai.xi { \
290*53ee8cc1Swenshuai.xi if (lll_trylock ((NAME).lock) == 0) \
291*53ee8cc1Swenshuai.xi { \
292*53ee8cc1Swenshuai.xi (NAME).owner = self; \
293*53ee8cc1Swenshuai.xi (NAME).cnt = 1; \
294*53ee8cc1Swenshuai.xi } \
295*53ee8cc1Swenshuai.xi else \
296*53ee8cc1Swenshuai.xi result = EBUSY; \
297*53ee8cc1Swenshuai.xi } \
298*53ee8cc1Swenshuai.xi else \
299*53ee8cc1Swenshuai.xi ++(NAME).cnt; \
300*53ee8cc1Swenshuai.xi result; \
301*53ee8cc1Swenshuai.xi })
302*53ee8cc1Swenshuai.xi #else
303*53ee8cc1Swenshuai.xi # define __libc_lock_trylock_recursive(NAME) \
304*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
305*53ee8cc1Swenshuai.xi #endif
306*53ee8cc1Swenshuai.xi
307*53ee8cc1Swenshuai.xi #define __rtld_lock_trylock_recursive(NAME) \
308*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
309*53ee8cc1Swenshuai.xi
310*53ee8cc1Swenshuai.xi /* Unlock the named lock variable. */
311*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
312*53ee8cc1Swenshuai.xi # define __libc_lock_unlock(NAME) \
313*53ee8cc1Swenshuai.xi lll_unlock (NAME, LLL_PRIVATE)
314*53ee8cc1Swenshuai.xi #else
315*53ee8cc1Swenshuai.xi # define __libc_lock_unlock(NAME) \
316*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
317*53ee8cc1Swenshuai.xi #endif
318*53ee8cc1Swenshuai.xi #define __libc_rwlock_unlock(NAME) \
319*53ee8cc1Swenshuai.xi __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
320*53ee8cc1Swenshuai.xi
321*53ee8cc1Swenshuai.xi /* Unlock the recursive named lock variable. */
322*53ee8cc1Swenshuai.xi #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
323*53ee8cc1Swenshuai.xi /* We do no error checking here. */
324*53ee8cc1Swenshuai.xi # define __libc_lock_unlock_recursive(NAME) \
325*53ee8cc1Swenshuai.xi do { \
326*53ee8cc1Swenshuai.xi if (--(NAME).cnt == 0) \
327*53ee8cc1Swenshuai.xi { \
328*53ee8cc1Swenshuai.xi (NAME).owner = NULL; \
329*53ee8cc1Swenshuai.xi lll_unlock ((NAME).lock, LLL_PRIVATE); \
330*53ee8cc1Swenshuai.xi } \
331*53ee8cc1Swenshuai.xi } while (0)
332*53ee8cc1Swenshuai.xi #else
333*53ee8cc1Swenshuai.xi # define __libc_lock_unlock_recursive(NAME) \
334*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
335*53ee8cc1Swenshuai.xi #endif
336*53ee8cc1Swenshuai.xi
337*53ee8cc1Swenshuai.xi #if defined _LIBC && defined SHARED
338*53ee8cc1Swenshuai.xi # define __rtld_lock_default_lock_recursive(lock) \
339*53ee8cc1Swenshuai.xi ++((pthread_mutex_t *)(lock))->__data.__count;
340*53ee8cc1Swenshuai.xi
341*53ee8cc1Swenshuai.xi # define __rtld_lock_default_unlock_recursive(lock) \
342*53ee8cc1Swenshuai.xi --((pthread_mutex_t *)(lock))->__data.__count;
343*53ee8cc1Swenshuai.xi
344*53ee8cc1Swenshuai.xi # define __rtld_lock_lock_recursive(NAME) \
345*53ee8cc1Swenshuai.xi GL(dl_rtld_lock_recursive) (&(NAME).mutex)
346*53ee8cc1Swenshuai.xi
347*53ee8cc1Swenshuai.xi # define __rtld_lock_unlock_recursive(NAME) \
348*53ee8cc1Swenshuai.xi GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
349*53ee8cc1Swenshuai.xi #else
350*53ee8cc1Swenshuai.xi # define __rtld_lock_lock_recursive(NAME) \
351*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
352*53ee8cc1Swenshuai.xi
353*53ee8cc1Swenshuai.xi # define __rtld_lock_unlock_recursive(NAME) \
354*53ee8cc1Swenshuai.xi __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
355*53ee8cc1Swenshuai.xi #endif
356*53ee8cc1Swenshuai.xi
357*53ee8cc1Swenshuai.xi /* Define once control variable. */
358*53ee8cc1Swenshuai.xi #if PTHREAD_ONCE_INIT == 0
359*53ee8cc1Swenshuai.xi /* Special case for static variables where we can avoid the initialization
360*53ee8cc1Swenshuai.xi if it is zero. */
361*53ee8cc1Swenshuai.xi # define __libc_once_define(CLASS, NAME) \
362*53ee8cc1Swenshuai.xi CLASS pthread_once_t NAME
363*53ee8cc1Swenshuai.xi #else
364*53ee8cc1Swenshuai.xi # define __libc_once_define(CLASS, NAME) \
365*53ee8cc1Swenshuai.xi CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
366*53ee8cc1Swenshuai.xi #endif
367*53ee8cc1Swenshuai.xi
368*53ee8cc1Swenshuai.xi /* Call handler iff the first call. */
369*53ee8cc1Swenshuai.xi #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
370*53ee8cc1Swenshuai.xi do { \
371*53ee8cc1Swenshuai.xi if (PTFAVAIL (__pthread_once)) \
372*53ee8cc1Swenshuai.xi __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
373*53ee8cc1Swenshuai.xi INIT_FUNCTION)); \
374*53ee8cc1Swenshuai.xi else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
375*53ee8cc1Swenshuai.xi INIT_FUNCTION (); \
376*53ee8cc1Swenshuai.xi (ONCE_CONTROL) |= 2; \
377*53ee8cc1Swenshuai.xi } \
378*53ee8cc1Swenshuai.xi } while (0)
379*53ee8cc1Swenshuai.xi
380*53ee8cc1Swenshuai.xi
381*53ee8cc1Swenshuai.xi /* Note that for I/O cleanup handling we are using the old-style
382*53ee8cc1Swenshuai.xi cancel handling. It does not have to be integrated with C++ snce
383*53ee8cc1Swenshuai.xi no C++ code is called in the middle. The old-style handling is
384*53ee8cc1Swenshuai.xi faster and the support is not going away. */
385*53ee8cc1Swenshuai.xi extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
386*53ee8cc1Swenshuai.xi void (*routine) (void *), void *arg);
387*53ee8cc1Swenshuai.xi extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
388*53ee8cc1Swenshuai.xi int execute);
389*53ee8cc1Swenshuai.xi extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
390*53ee8cc1Swenshuai.xi void (*routine) (void *), void *arg);
391*53ee8cc1Swenshuai.xi extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
392*53ee8cc1Swenshuai.xi int execute);
393*53ee8cc1Swenshuai.xi
394*53ee8cc1Swenshuai.xi /* Start critical region with cleanup. */
395*53ee8cc1Swenshuai.xi #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
396*53ee8cc1Swenshuai.xi { struct _pthread_cleanup_buffer _buffer; \
397*53ee8cc1Swenshuai.xi int _avail; \
398*53ee8cc1Swenshuai.xi if (DOIT) { \
399*53ee8cc1Swenshuai.xi _avail = PTFAVAIL (_pthread_cleanup_push_defer); \
400*53ee8cc1Swenshuai.xi if (_avail) { \
401*53ee8cc1Swenshuai.xi __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
402*53ee8cc1Swenshuai.xi ARG)); \
403*53ee8cc1Swenshuai.xi } else { \
404*53ee8cc1Swenshuai.xi _buffer.__routine = (FCT); \
405*53ee8cc1Swenshuai.xi _buffer.__arg = (ARG); \
406*53ee8cc1Swenshuai.xi } \
407*53ee8cc1Swenshuai.xi } else { \
408*53ee8cc1Swenshuai.xi _avail = 0; \
409*53ee8cc1Swenshuai.xi }
410*53ee8cc1Swenshuai.xi
411*53ee8cc1Swenshuai.xi /* End critical region with cleanup. */
412*53ee8cc1Swenshuai.xi #define __libc_cleanup_region_end(DOIT) \
413*53ee8cc1Swenshuai.xi if (_avail) { \
414*53ee8cc1Swenshuai.xi __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
415*53ee8cc1Swenshuai.xi } else if (DOIT) \
416*53ee8cc1Swenshuai.xi _buffer.__routine (_buffer.__arg); \
417*53ee8cc1Swenshuai.xi }
418*53ee8cc1Swenshuai.xi
419*53ee8cc1Swenshuai.xi /* Sometimes we have to exit the block in the middle. */
420*53ee8cc1Swenshuai.xi #define __libc_cleanup_end(DOIT) \
421*53ee8cc1Swenshuai.xi if (_avail) { \
422*53ee8cc1Swenshuai.xi __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
423*53ee8cc1Swenshuai.xi } else if (DOIT) \
424*53ee8cc1Swenshuai.xi _buffer.__routine (_buffer.__arg)
425*53ee8cc1Swenshuai.xi
426*53ee8cc1Swenshuai.xi
427*53ee8cc1Swenshuai.xi /* Normal cleanup handling, based on C cleanup attribute. */
428*53ee8cc1Swenshuai.xi __extern_inline void
__libc_cleanup_routine(struct __pthread_cleanup_frame * f)429*53ee8cc1Swenshuai.xi __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
430*53ee8cc1Swenshuai.xi {
431*53ee8cc1Swenshuai.xi if (f->__do_it)
432*53ee8cc1Swenshuai.xi f->__cancel_routine (f->__cancel_arg);
433*53ee8cc1Swenshuai.xi }
434*53ee8cc1Swenshuai.xi
435*53ee8cc1Swenshuai.xi #define __libc_cleanup_push(fct, arg) \
436*53ee8cc1Swenshuai.xi do { \
437*53ee8cc1Swenshuai.xi struct __pthread_cleanup_frame __clframe \
438*53ee8cc1Swenshuai.xi __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
439*53ee8cc1Swenshuai.xi = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
440*53ee8cc1Swenshuai.xi .__do_it = 1 };
441*53ee8cc1Swenshuai.xi
442*53ee8cc1Swenshuai.xi #define __libc_cleanup_pop(execute) \
443*53ee8cc1Swenshuai.xi __clframe.__do_it = (execute); \
444*53ee8cc1Swenshuai.xi } while (0)
445*53ee8cc1Swenshuai.xi
446*53ee8cc1Swenshuai.xi
447*53ee8cc1Swenshuai.xi /* Create thread-specific key. */
448*53ee8cc1Swenshuai.xi #define __libc_key_create(KEY, DESTRUCTOR) \
449*53ee8cc1Swenshuai.xi __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
450*53ee8cc1Swenshuai.xi
451*53ee8cc1Swenshuai.xi /* Get thread-specific data. */
452*53ee8cc1Swenshuai.xi #define __libc_getspecific(KEY) \
453*53ee8cc1Swenshuai.xi __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
454*53ee8cc1Swenshuai.xi
455*53ee8cc1Swenshuai.xi /* Set thread-specific data. */
456*53ee8cc1Swenshuai.xi #define __libc_setspecific(KEY, VALUE) \
457*53ee8cc1Swenshuai.xi __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
458*53ee8cc1Swenshuai.xi
459*53ee8cc1Swenshuai.xi
460*53ee8cc1Swenshuai.xi /* Register handlers to execute before and after `fork'. Note that the
461*53ee8cc1Swenshuai.xi last parameter is NULL. The handlers registered by the libc are
462*53ee8cc1Swenshuai.xi never removed so this is OK. */
463*53ee8cc1Swenshuai.xi #define __libc_atfork(PREPARE, PARENT, CHILD) \
464*53ee8cc1Swenshuai.xi __register_atfork (PREPARE, PARENT, CHILD, NULL)
465*53ee8cc1Swenshuai.xi extern int __register_atfork (void (*__prepare) (void),
466*53ee8cc1Swenshuai.xi void (*__parent) (void),
467*53ee8cc1Swenshuai.xi void (*__child) (void),
468*53ee8cc1Swenshuai.xi void *__dso_handle);
469*53ee8cc1Swenshuai.xi
470*53ee8cc1Swenshuai.xi /* Functions that are used by this file and are internal to the GNU C
471*53ee8cc1Swenshuai.xi library. */
472*53ee8cc1Swenshuai.xi
473*53ee8cc1Swenshuai.xi extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
474*53ee8cc1Swenshuai.xi __const pthread_mutexattr_t *__mutex_attr);
475*53ee8cc1Swenshuai.xi
476*53ee8cc1Swenshuai.xi extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
477*53ee8cc1Swenshuai.xi
478*53ee8cc1Swenshuai.xi extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
479*53ee8cc1Swenshuai.xi
480*53ee8cc1Swenshuai.xi extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
481*53ee8cc1Swenshuai.xi
482*53ee8cc1Swenshuai.xi extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
483*53ee8cc1Swenshuai.xi
484*53ee8cc1Swenshuai.xi extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
485*53ee8cc1Swenshuai.xi
486*53ee8cc1Swenshuai.xi extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
487*53ee8cc1Swenshuai.xi
488*53ee8cc1Swenshuai.xi extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
489*53ee8cc1Swenshuai.xi int __kind);
490*53ee8cc1Swenshuai.xi
491*53ee8cc1Swenshuai.xi #ifdef __USE_UNIX98
492*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
493*53ee8cc1Swenshuai.xi __const pthread_rwlockattr_t *__attr);
494*53ee8cc1Swenshuai.xi
495*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
496*53ee8cc1Swenshuai.xi
497*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
498*53ee8cc1Swenshuai.xi
499*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
500*53ee8cc1Swenshuai.xi
501*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
502*53ee8cc1Swenshuai.xi
503*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
504*53ee8cc1Swenshuai.xi
505*53ee8cc1Swenshuai.xi extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
506*53ee8cc1Swenshuai.xi #endif
507*53ee8cc1Swenshuai.xi
508*53ee8cc1Swenshuai.xi extern int __pthread_key_create (pthread_key_t *__key,
509*53ee8cc1Swenshuai.xi void (*__destr_function) (void *));
510*53ee8cc1Swenshuai.xi
511*53ee8cc1Swenshuai.xi extern int __pthread_setspecific (pthread_key_t __key,
512*53ee8cc1Swenshuai.xi __const void *__pointer);
513*53ee8cc1Swenshuai.xi
514*53ee8cc1Swenshuai.xi extern void *__pthread_getspecific (pthread_key_t __key);
515*53ee8cc1Swenshuai.xi
516*53ee8cc1Swenshuai.xi extern int __pthread_once (pthread_once_t *__once_control,
517*53ee8cc1Swenshuai.xi void (*__init_routine) (void));
518*53ee8cc1Swenshuai.xi
519*53ee8cc1Swenshuai.xi extern int __pthread_atfork (void (*__prepare) (void),
520*53ee8cc1Swenshuai.xi void (*__parent) (void),
521*53ee8cc1Swenshuai.xi void (*__child) (void));
522*53ee8cc1Swenshuai.xi
523*53ee8cc1Swenshuai.xi
524*53ee8cc1Swenshuai.xi
525*53ee8cc1Swenshuai.xi /* Make the pthread functions weak so that we can elide them from
526*53ee8cc1Swenshuai.xi single-threaded processes. */
527*53ee8cc1Swenshuai.xi #ifndef __NO_WEAK_PTHREAD_ALIASES
528*53ee8cc1Swenshuai.xi # ifdef weak_extern
529*53ee8cc1Swenshuai.xi # if _LIBC
530*53ee8cc1Swenshuai.xi # include <bp-sym.h>
531*53ee8cc1Swenshuai.xi # else
532*53ee8cc1Swenshuai.xi # define BP_SYM (sym) sym
533*53ee8cc1Swenshuai.xi # endif
534*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutex_init))
535*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutex_destroy))
536*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutex_lock))
537*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutex_trylock))
538*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutex_unlock))
539*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutexattr_init))
540*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutexattr_destroy))
541*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_mutexattr_settype))
542*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_init))
543*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_destroy))
544*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_rdlock))
545*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
546*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_wrlock))
547*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
548*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_rwlock_unlock))
549*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_key_create))
550*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_setspecific))
551*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_getspecific))
552*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (__pthread_once))
553*53ee8cc1Swenshuai.xi weak_extern (__pthread_initialize)
554*53ee8cc1Swenshuai.xi weak_extern (__pthread_atfork)
555*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (_pthread_cleanup_push_defer))
556*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
557*53ee8cc1Swenshuai.xi weak_extern (BP_SYM (pthread_setcancelstate))
558*53ee8cc1Swenshuai.xi # else
559*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutex_init
560*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutex_destroy
561*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutex_lock
562*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutex_trylock
563*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutex_unlock
564*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutexattr_init
565*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutexattr_destroy
566*53ee8cc1Swenshuai.xi # pragma weak __pthread_mutexattr_settype
567*53ee8cc1Swenshuai.xi # pragma weak __pthread_rwlock_destroy
568*53ee8cc1Swenshuai.xi # pragma weak __pthread_rwlock_rdlock
569*53ee8cc1Swenshuai.xi # pragma weak __pthread_rwlock_tryrdlock
570*53ee8cc1Swenshuai.xi # pragma weak __pthread_rwlock_wrlock
571*53ee8cc1Swenshuai.xi # pragma weak __pthread_rwlock_trywrlock
572*53ee8cc1Swenshuai.xi # pragma weak __pthread_rwlock_unlock
573*53ee8cc1Swenshuai.xi # pragma weak __pthread_key_create
574*53ee8cc1Swenshuai.xi # pragma weak __pthread_setspecific
575*53ee8cc1Swenshuai.xi # pragma weak __pthread_getspecific
576*53ee8cc1Swenshuai.xi # pragma weak __pthread_once
577*53ee8cc1Swenshuai.xi # pragma weak __pthread_initialize
578*53ee8cc1Swenshuai.xi # pragma weak __pthread_atfork
579*53ee8cc1Swenshuai.xi # pragma weak _pthread_cleanup_push_defer
580*53ee8cc1Swenshuai.xi # pragma weak _pthread_cleanup_pop_restore
581*53ee8cc1Swenshuai.xi # pragma weak pthread_setcancelstate
582*53ee8cc1Swenshuai.xi # endif
583*53ee8cc1Swenshuai.xi #endif
584*53ee8cc1Swenshuai.xi
585*53ee8cc1Swenshuai.xi #endif /* bits/libc-lock.h */
586