xref: /OK3568_Linux_fs/u-boot/common/dlmalloc.src (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* ---------- To make a malloc.h, start cutting here ------------ */
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun/*
4*4882a593Smuzhiyun  A version of malloc/free/realloc written by Doug Lea and released to the
5*4882a593Smuzhiyun  public domain.  Send questions/comments/complaints/performance data
6*4882a593Smuzhiyun  to dl@cs.oswego.edu
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun* VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun   Note: There may be an updated version of this malloc obtainable at
11*4882a593Smuzhiyun	   ftp://g.oswego.edu/pub/misc/malloc.c
12*4882a593Smuzhiyun	 Check before installing!
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun* Why use this malloc?
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun  This is not the fastest, most space-conserving, most portable, or
17*4882a593Smuzhiyun  most tunable malloc ever written. However it is among the fastest
18*4882a593Smuzhiyun  while also being among the most space-conserving, portable and tunable.
19*4882a593Smuzhiyun  Consistent balance across these factors results in a good general-purpose
20*4882a593Smuzhiyun  allocator. For a high-level description, see
21*4882a593Smuzhiyun     http://g.oswego.edu/dl/html/malloc.html
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun* Synopsis of public routines
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun  (Much fuller descriptions are contained in the program documentation below.)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun  malloc(size_t n);
28*4882a593Smuzhiyun     Return a pointer to a newly allocated chunk of at least n bytes, or null
29*4882a593Smuzhiyun     if no space is available.
30*4882a593Smuzhiyun  free(Void_t* p);
31*4882a593Smuzhiyun     Release the chunk of memory pointed to by p, or no effect if p is null.
32*4882a593Smuzhiyun  realloc(Void_t* p, size_t n);
33*4882a593Smuzhiyun     Return a pointer to a chunk of size n that contains the same data
34*4882a593Smuzhiyun     as does chunk p up to the minimum of (n, p's size) bytes, or null
35*4882a593Smuzhiyun     if no space is available. The returned pointer may or may not be
36*4882a593Smuzhiyun     the same as p. If p is null, equivalent to malloc.  Unless the
37*4882a593Smuzhiyun     #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
38*4882a593Smuzhiyun     size argument of zero (re)allocates a minimum-sized chunk.
39*4882a593Smuzhiyun  memalign(size_t alignment, size_t n);
40*4882a593Smuzhiyun     Return a pointer to a newly allocated chunk of n bytes, aligned
41*4882a593Smuzhiyun     in accord with the alignment argument, which must be a power of
42*4882a593Smuzhiyun     two.
43*4882a593Smuzhiyun  valloc(size_t n);
44*4882a593Smuzhiyun     Equivalent to memalign(pagesize, n), where pagesize is the page
45*4882a593Smuzhiyun     size of the system (or as near to this as can be figured out from
46*4882a593Smuzhiyun     all the includes/defines below.)
47*4882a593Smuzhiyun  pvalloc(size_t n);
48*4882a593Smuzhiyun     Equivalent to valloc(minimum-page-that-holds(n)), that is,
49*4882a593Smuzhiyun     round up n to nearest pagesize.
50*4882a593Smuzhiyun  calloc(size_t unit, size_t quantity);
51*4882a593Smuzhiyun     Returns a pointer to quantity * unit bytes, with all locations
52*4882a593Smuzhiyun     set to zero.
53*4882a593Smuzhiyun  cfree(Void_t* p);
54*4882a593Smuzhiyun     Equivalent to free(p).
55*4882a593Smuzhiyun  malloc_trim(size_t pad);
56*4882a593Smuzhiyun     Release all but pad bytes of freed top-most memory back
57*4882a593Smuzhiyun     to the system. Return 1 if successful, else 0.
58*4882a593Smuzhiyun  malloc_usable_size(Void_t* p);
59*4882a593Smuzhiyun     Report the number usable allocated bytes associated with allocated
60*4882a593Smuzhiyun     chunk p. This may or may not report more bytes than were requested,
61*4882a593Smuzhiyun     due to alignment and minimum size constraints.
62*4882a593Smuzhiyun  malloc_stats();
63*4882a593Smuzhiyun     Prints brief summary statistics on stderr.
64*4882a593Smuzhiyun  mallinfo()
65*4882a593Smuzhiyun     Returns (by copy) a struct containing various summary statistics.
66*4882a593Smuzhiyun  mallopt(int parameter_number, int parameter_value)
67*4882a593Smuzhiyun     Changes one of the tunable parameters described below. Returns
68*4882a593Smuzhiyun     1 if successful in changing the parameter, else 0.
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun* Vital statistics:
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun  Alignment:                            8-byte
73*4882a593Smuzhiyun       8 byte alignment is currently hardwired into the design.  This
74*4882a593Smuzhiyun       seems to suffice for all current machines and C compilers.
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun  Assumed pointer representation:       4 or 8 bytes
77*4882a593Smuzhiyun       Code for 8-byte pointers is untested by me but has worked
78*4882a593Smuzhiyun       reliably by Wolfram Gloger, who contributed most of the
79*4882a593Smuzhiyun       changes supporting this.
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun  Assumed size_t  representation:       4 or 8 bytes
82*4882a593Smuzhiyun       Note that size_t is allowed to be 4 bytes even if pointers are 8.
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun  Minimum overhead per allocated chunk: 4 or 8 bytes
85*4882a593Smuzhiyun       Each malloced chunk has a hidden overhead of 4 bytes holding size
86*4882a593Smuzhiyun       and status information.
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
89*4882a593Smuzhiyun			  8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
92*4882a593Smuzhiyun       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
93*4882a593Smuzhiyun       needed; 4 (8) for a trailing size field
94*4882a593Smuzhiyun       and 8 (16) bytes for free list pointers. Thus, the minimum
95*4882a593Smuzhiyun       allocatable size is 16/24/32 bytes.
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun       Even a request for zero bytes (i.e., malloc(0)) returns a
98*4882a593Smuzhiyun       pointer to something of the minimum allocatable size.
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
101*4882a593Smuzhiyun			  8-byte size_t: 2^63 - 16 bytes
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun       It is assumed that (possibly signed) size_t bit values suffice to
104*4882a593Smuzhiyun       represent chunk sizes. `Possibly signed' is due to the fact
105*4882a593Smuzhiyun       that `size_t' may be defined on a system as either a signed or
106*4882a593Smuzhiyun       an unsigned type. To be conservative, values that would appear
107*4882a593Smuzhiyun       as negative numbers are avoided.
108*4882a593Smuzhiyun       Requests for sizes with a negative sign bit when the request
109*4882a593Smuzhiyun       size is treaded as a long will return null.
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun  Maximum overhead wastage per allocated chunk: normally 15 bytes
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun       Alignnment demands, plus the minimum allocatable size restriction
114*4882a593Smuzhiyun       make the normal worst-case wastage 15 bytes (i.e., up to 15
115*4882a593Smuzhiyun       more bytes will be allocated than were requested in malloc), with
116*4882a593Smuzhiyun       two exceptions:
117*4882a593Smuzhiyun	 1. Because requests for zero bytes allocate non-zero space,
118*4882a593Smuzhiyun	    the worst case wastage for a request of zero bytes is 24 bytes.
119*4882a593Smuzhiyun	 2. For requests >= mmap_threshold that are serviced via
120*4882a593Smuzhiyun	    mmap(), the worst case wastage is 8 bytes plus the remainder
121*4882a593Smuzhiyun	    from a system page (the minimal mmap unit); typically 4096 bytes.
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun* Limitations
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun    Here are some features that are NOT currently supported
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun    * No user-definable hooks for callbacks and the like.
128*4882a593Smuzhiyun    * No automated mechanism for fully checking that all accesses
129*4882a593Smuzhiyun      to malloced memory stay within their bounds.
130*4882a593Smuzhiyun    * No support for compaction.
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun* Synopsis of compile-time options:
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun    People have reported using previous versions of this malloc on all
135*4882a593Smuzhiyun    versions of Unix, sometimes by tweaking some of the defines
136*4882a593Smuzhiyun    below. It has been tested most extensively on Solaris and
137*4882a593Smuzhiyun    Linux. It is also reported to work on WIN32 platforms.
138*4882a593Smuzhiyun    People have also reported adapting this malloc for use in
139*4882a593Smuzhiyun    stand-alone embedded systems.
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun    The implementation is in straight, hand-tuned ANSI C.  Among other
142*4882a593Smuzhiyun    consequences, it uses a lot of macros.  Because of this, to be at
143*4882a593Smuzhiyun    all usable, this code should be compiled using an optimizing compiler
144*4882a593Smuzhiyun    (for example gcc -O2) that can simplify expressions and control
145*4882a593Smuzhiyun    paths.
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun  __STD_C                  (default: derived from C compiler defines)
148*4882a593Smuzhiyun     Nonzero if using ANSI-standard C compiler, a C++ compiler, or
149*4882a593Smuzhiyun     a C compiler sufficiently close to ANSI to get away with it.
150*4882a593Smuzhiyun  DEBUG                    (default: NOT defined)
151*4882a593Smuzhiyun     Define to enable debugging. Adds fairly extensive assertion-based
152*4882a593Smuzhiyun     checking to help track down memory errors, but noticeably slows down
153*4882a593Smuzhiyun     execution.
154*4882a593Smuzhiyun  REALLOC_ZERO_BYTES_FREES (default: NOT defined)
155*4882a593Smuzhiyun     Define this if you think that realloc(p, 0) should be equivalent
156*4882a593Smuzhiyun     to free(p). Otherwise, since malloc returns a unique pointer for
157*4882a593Smuzhiyun     malloc(0), so does realloc(p, 0).
158*4882a593Smuzhiyun  HAVE_MEMCPY               (default: defined)
159*4882a593Smuzhiyun     Define if you are not otherwise using ANSI STD C, but still
160*4882a593Smuzhiyun     have memcpy and memset in your C library and want to use them.
161*4882a593Smuzhiyun     Otherwise, simple internal versions are supplied.
162*4882a593Smuzhiyun  USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
163*4882a593Smuzhiyun     Define as 1 if you want the C library versions of memset and
164*4882a593Smuzhiyun     memcpy called in realloc and calloc (otherwise macro versions are used).
165*4882a593Smuzhiyun     At least on some platforms, the simple macro versions usually
166*4882a593Smuzhiyun     outperform libc versions.
167*4882a593Smuzhiyun  HAVE_MMAP                 (default: defined as 1)
168*4882a593Smuzhiyun     Define to non-zero to optionally make malloc() use mmap() to
169*4882a593Smuzhiyun     allocate very large blocks.
170*4882a593Smuzhiyun  HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
171*4882a593Smuzhiyun     Define to non-zero to optionally make realloc() use mremap() to
172*4882a593Smuzhiyun     reallocate very large blocks.
173*4882a593Smuzhiyun  malloc_getpagesize        (default: derived from system #includes)
174*4882a593Smuzhiyun     Either a constant or routine call returning the system page size.
175*4882a593Smuzhiyun  HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
176*4882a593Smuzhiyun     Optionally define if you are on a system with a /usr/include/malloc.h
177*4882a593Smuzhiyun     that declares struct mallinfo. It is not at all necessary to
178*4882a593Smuzhiyun     define this even if you do, but will ensure consistency.
179*4882a593Smuzhiyun  INTERNAL_SIZE_T           (default: size_t)
180*4882a593Smuzhiyun     Define to a 32-bit type (probably `unsigned int') if you are on a
181*4882a593Smuzhiyun     64-bit machine, yet do not want or need to allow malloc requests of
182*4882a593Smuzhiyun     greater than 2^31 to be handled. This saves space, especially for
183*4882a593Smuzhiyun     very small chunks.
184*4882a593Smuzhiyun  INTERNAL_LINUX_C_LIB      (default: NOT defined)
185*4882a593Smuzhiyun     Defined only when compiled as part of Linux libc.
186*4882a593Smuzhiyun     Also note that there is some odd internal name-mangling via defines
187*4882a593Smuzhiyun     (for example, internally, `malloc' is named `mALLOc') needed
188*4882a593Smuzhiyun     when compiling in this case. These look funny but don't otherwise
189*4882a593Smuzhiyun     affect anything.
190*4882a593Smuzhiyun  WIN32                     (default: undefined)
191*4882a593Smuzhiyun     Define this on MS win (95, nt) platforms to compile in sbrk emulation.
192*4882a593Smuzhiyun  LACKS_UNISTD_H            (default: undefined if not WIN32)
193*4882a593Smuzhiyun     Define this if your system does not have a <unistd.h>.
194*4882a593Smuzhiyun  LACKS_SYS_PARAM_H         (default: undefined if not WIN32)
195*4882a593Smuzhiyun     Define this if your system does not have a <sys/param.h>.
196*4882a593Smuzhiyun  MORECORE                  (default: sbrk)
197*4882a593Smuzhiyun     The name of the routine to call to obtain more memory from the system.
198*4882a593Smuzhiyun  MORECORE_FAILURE          (default: -1)
199*4882a593Smuzhiyun     The value returned upon failure of MORECORE.
200*4882a593Smuzhiyun  MORECORE_CLEARS           (default 1)
201*4882a593Smuzhiyun     true (1) if the routine mapped to MORECORE zeroes out memory (which
202*4882a593Smuzhiyun     holds for sbrk).
203*4882a593Smuzhiyun  DEFAULT_TRIM_THRESHOLD
204*4882a593Smuzhiyun  DEFAULT_TOP_PAD
205*4882a593Smuzhiyun  DEFAULT_MMAP_THRESHOLD
206*4882a593Smuzhiyun  DEFAULT_MMAP_MAX
207*4882a593Smuzhiyun     Default values of tunable parameters (described in detail below)
208*4882a593Smuzhiyun     controlling interaction with host system routines (sbrk, mmap, etc).
209*4882a593Smuzhiyun     These values may also be changed dynamically via mallopt(). The
210*4882a593Smuzhiyun     preset defaults are those that give best performance for typical
211*4882a593Smuzhiyun     programs/systems.
212*4882a593Smuzhiyun  USE_DL_PREFIX             (default: undefined)
213*4882a593Smuzhiyun     Prefix all public routines with the string 'dl'.  Useful to
214*4882a593Smuzhiyun     quickly avoid procedure declaration conflicts and linker symbol
215*4882a593Smuzhiyun     conflicts with existing memory allocation routines.
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun*/
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun/* Preliminaries */
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun#ifndef __STD_C
226*4882a593Smuzhiyun#ifdef __STDC__
227*4882a593Smuzhiyun#define __STD_C     1
228*4882a593Smuzhiyun#else
229*4882a593Smuzhiyun#if __cplusplus
230*4882a593Smuzhiyun#define __STD_C     1
231*4882a593Smuzhiyun#else
232*4882a593Smuzhiyun#define __STD_C     0
233*4882a593Smuzhiyun#endif /*__cplusplus*/
234*4882a593Smuzhiyun#endif /*__STDC__*/
235*4882a593Smuzhiyun#endif /*__STD_C*/
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun#ifndef Void_t
238*4882a593Smuzhiyun#if (__STD_C || defined(WIN32))
239*4882a593Smuzhiyun#define Void_t      void
240*4882a593Smuzhiyun#else
241*4882a593Smuzhiyun#define Void_t      char
242*4882a593Smuzhiyun#endif
243*4882a593Smuzhiyun#endif /*Void_t*/
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun#if __STD_C
246*4882a593Smuzhiyun#include <stddef.h>   /* for size_t */
247*4882a593Smuzhiyun#else
248*4882a593Smuzhiyun#include <sys/types.h>
249*4882a593Smuzhiyun#endif
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun#ifdef __cplusplus
252*4882a593Smuzhiyunextern "C" {
253*4882a593Smuzhiyun#endif
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun#include <stdio.h>    /* needed for malloc_stats */
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun/*
259*4882a593Smuzhiyun  Compile-time options
260*4882a593Smuzhiyun*/
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun/*
264*4882a593Smuzhiyun    Debugging:
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun    Because freed chunks may be overwritten with link fields, this
267*4882a593Smuzhiyun    malloc will often die when freed memory is overwritten by user
268*4882a593Smuzhiyun    programs.  This can be very effective (albeit in an annoying way)
269*4882a593Smuzhiyun    in helping track down dangling pointers.
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun    If you compile with -DDEBUG, a number of assertion checks are
272*4882a593Smuzhiyun    enabled that will catch more memory errors. You probably won't be
273*4882a593Smuzhiyun    able to make much sense of the actual assertion errors, but they
274*4882a593Smuzhiyun    should help you locate incorrectly overwritten memory.  The
275*4882a593Smuzhiyun    checking is fairly extensive, and will slow down execution
276*4882a593Smuzhiyun    noticeably. Calling malloc_stats or mallinfo with DEBUG set will
277*4882a593Smuzhiyun    attempt to check every non-mmapped allocated and free chunk in the
278*4882a593Smuzhiyun    course of computing the summmaries. (By nature, mmapped regions
279*4882a593Smuzhiyun    cannot be checked very much automatically.)
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun    Setting DEBUG may also be helpful if you are trying to modify
282*4882a593Smuzhiyun    this code. The assertions in the check routines spell out in more
283*4882a593Smuzhiyun    detail the assumptions and invariants underlying the algorithms.
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun*/
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun#if DEBUG
288*4882a593Smuzhiyun#include <assert.h>
289*4882a593Smuzhiyun#else
290*4882a593Smuzhiyun#define assert(x) ((void)0)
291*4882a593Smuzhiyun#endif
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun/*
295*4882a593Smuzhiyun  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
296*4882a593Smuzhiyun  of chunk sizes. On a 64-bit machine, you can reduce malloc
297*4882a593Smuzhiyun  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
298*4882a593Smuzhiyun  at the expense of not being able to handle requests greater than
299*4882a593Smuzhiyun  2^31. This limitation is hardly ever a concern; you are encouraged
300*4882a593Smuzhiyun  to set this. However, the default version is the same as size_t.
301*4882a593Smuzhiyun*/
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun#ifndef INTERNAL_SIZE_T
304*4882a593Smuzhiyun#define INTERNAL_SIZE_T size_t
305*4882a593Smuzhiyun#endif
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun/*
308*4882a593Smuzhiyun  REALLOC_ZERO_BYTES_FREES should be set if a call to
309*4882a593Smuzhiyun  realloc with zero bytes should be the same as a call to free.
310*4882a593Smuzhiyun  Some people think it should. Otherwise, since this malloc
311*4882a593Smuzhiyun  returns a unique pointer for malloc(0), so does realloc(p, 0).
312*4882a593Smuzhiyun*/
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun/*   #define REALLOC_ZERO_BYTES_FREES */
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun/*
319*4882a593Smuzhiyun  WIN32 causes an emulation of sbrk to be compiled in
320*4882a593Smuzhiyun  mmap-based options are not currently supported in WIN32.
321*4882a593Smuzhiyun*/
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun/* #define WIN32 */
324*4882a593Smuzhiyun#ifdef WIN32
325*4882a593Smuzhiyun#define MORECORE wsbrk
326*4882a593Smuzhiyun#define HAVE_MMAP 0
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun#define LACKS_UNISTD_H
329*4882a593Smuzhiyun#define LACKS_SYS_PARAM_H
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun/*
332*4882a593Smuzhiyun  Include 'windows.h' to get the necessary declarations for the
333*4882a593Smuzhiyun  Microsoft Visual C++ data structures and routines used in the 'sbrk'
334*4882a593Smuzhiyun  emulation.
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun  Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft
337*4882a593Smuzhiyun  Visual C++ header files are included.
338*4882a593Smuzhiyun*/
339*4882a593Smuzhiyun#define WIN32_LEAN_AND_MEAN
340*4882a593Smuzhiyun#include <windows.h>
341*4882a593Smuzhiyun#endif
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun/*
345*4882a593Smuzhiyun  HAVE_MEMCPY should be defined if you are not otherwise using
346*4882a593Smuzhiyun  ANSI STD C, but still have memcpy and memset in your C library
347*4882a593Smuzhiyun  and want to use them in calloc and realloc. Otherwise simple
348*4882a593Smuzhiyun  macro versions are defined here.
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun  USE_MEMCPY should be defined as 1 if you actually want to
351*4882a593Smuzhiyun  have memset and memcpy called. People report that the macro
352*4882a593Smuzhiyun  versions are often enough faster than libc versions on many
353*4882a593Smuzhiyun  systems that it is better to use them.
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun*/
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun#define HAVE_MEMCPY
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun#ifndef USE_MEMCPY
360*4882a593Smuzhiyun#ifdef HAVE_MEMCPY
361*4882a593Smuzhiyun#define USE_MEMCPY 1
362*4882a593Smuzhiyun#else
363*4882a593Smuzhiyun#define USE_MEMCPY 0
364*4882a593Smuzhiyun#endif
365*4882a593Smuzhiyun#endif
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun#if (__STD_C || defined(HAVE_MEMCPY))
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun#if __STD_C
370*4882a593Smuzhiyunvoid* memset(void*, int, size_t);
371*4882a593Smuzhiyunvoid* memcpy(void*, const void*, size_t);
372*4882a593Smuzhiyun#else
373*4882a593Smuzhiyun#ifdef WIN32
374*4882a593Smuzhiyun/* On Win32 platforms, 'memset()' and 'memcpy()' are already declared in */
375*4882a593Smuzhiyun/* 'windows.h' */
376*4882a593Smuzhiyun#else
377*4882a593SmuzhiyunVoid_t* memset();
378*4882a593SmuzhiyunVoid_t* memcpy();
379*4882a593Smuzhiyun#endif
380*4882a593Smuzhiyun#endif
381*4882a593Smuzhiyun#endif
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun#if USE_MEMCPY
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun/* The following macros are only invoked with (2n+1)-multiples of
386*4882a593Smuzhiyun   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
387*4882a593Smuzhiyun   for fast inline execution when n is small. */
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun#define MALLOC_ZERO(charp, nbytes)                                            \
390*4882a593Smuzhiyundo {                                                                          \
391*4882a593Smuzhiyun  INTERNAL_SIZE_T mzsz = (nbytes);                                            \
392*4882a593Smuzhiyun  if(mzsz <= 9*sizeof(mzsz)) {                                                \
393*4882a593Smuzhiyun    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
394*4882a593Smuzhiyun    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
395*4882a593Smuzhiyun				     *mz++ = 0;                               \
396*4882a593Smuzhiyun      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
397*4882a593Smuzhiyun				     *mz++ = 0;                               \
398*4882a593Smuzhiyun	if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
399*4882a593Smuzhiyun				     *mz++ = 0; }}}                           \
400*4882a593Smuzhiyun				     *mz++ = 0;                               \
401*4882a593Smuzhiyun				     *mz++ = 0;                               \
402*4882a593Smuzhiyun				     *mz   = 0;                               \
403*4882a593Smuzhiyun  } else memset((charp), 0, mzsz);                                            \
404*4882a593Smuzhiyun} while(0)
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun#define MALLOC_COPY(dest,src,nbytes)                                          \
407*4882a593Smuzhiyundo {                                                                          \
408*4882a593Smuzhiyun  INTERNAL_SIZE_T mcsz = (nbytes);                                            \
409*4882a593Smuzhiyun  if(mcsz <= 9*sizeof(mcsz)) {                                                \
410*4882a593Smuzhiyun    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
411*4882a593Smuzhiyun    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
412*4882a593Smuzhiyun    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
413*4882a593Smuzhiyun				     *mcdst++ = *mcsrc++;                     \
414*4882a593Smuzhiyun      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
415*4882a593Smuzhiyun				     *mcdst++ = *mcsrc++;                     \
416*4882a593Smuzhiyun	if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
417*4882a593Smuzhiyun				     *mcdst++ = *mcsrc++; }}}                 \
418*4882a593Smuzhiyun				     *mcdst++ = *mcsrc++;                     \
419*4882a593Smuzhiyun				     *mcdst++ = *mcsrc++;                     \
420*4882a593Smuzhiyun				     *mcdst   = *mcsrc  ;                     \
421*4882a593Smuzhiyun  } else memcpy(dest, src, mcsz);                                             \
422*4882a593Smuzhiyun} while(0)
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun#else /* !USE_MEMCPY */
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun/* Use Duff's device for good zeroing/copying performance. */
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun#define MALLOC_ZERO(charp, nbytes)                                            \
429*4882a593Smuzhiyundo {                                                                          \
430*4882a593Smuzhiyun  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
431*4882a593Smuzhiyun  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
432*4882a593Smuzhiyun  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
433*4882a593Smuzhiyun  switch (mctmp) {                                                            \
434*4882a593Smuzhiyun    case 0: for(;;) { *mzp++ = 0;                                             \
435*4882a593Smuzhiyun    case 7:           *mzp++ = 0;                                             \
436*4882a593Smuzhiyun    case 6:           *mzp++ = 0;                                             \
437*4882a593Smuzhiyun    case 5:           *mzp++ = 0;                                             \
438*4882a593Smuzhiyun    case 4:           *mzp++ = 0;                                             \
439*4882a593Smuzhiyun    case 3:           *mzp++ = 0;                                             \
440*4882a593Smuzhiyun    case 2:           *mzp++ = 0;                                             \
441*4882a593Smuzhiyun    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
442*4882a593Smuzhiyun  }                                                                           \
443*4882a593Smuzhiyun} while(0)
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun#define MALLOC_COPY(dest,src,nbytes)                                          \
446*4882a593Smuzhiyundo {                                                                          \
447*4882a593Smuzhiyun  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
448*4882a593Smuzhiyun  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
449*4882a593Smuzhiyun  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
450*4882a593Smuzhiyun  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
451*4882a593Smuzhiyun  switch (mctmp) {                                                            \
452*4882a593Smuzhiyun    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
453*4882a593Smuzhiyun    case 7:           *mcdst++ = *mcsrc++;                                    \
454*4882a593Smuzhiyun    case 6:           *mcdst++ = *mcsrc++;                                    \
455*4882a593Smuzhiyun    case 5:           *mcdst++ = *mcsrc++;                                    \
456*4882a593Smuzhiyun    case 4:           *mcdst++ = *mcsrc++;                                    \
457*4882a593Smuzhiyun    case 3:           *mcdst++ = *mcsrc++;                                    \
458*4882a593Smuzhiyun    case 2:           *mcdst++ = *mcsrc++;                                    \
459*4882a593Smuzhiyun    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
460*4882a593Smuzhiyun  }                                                                           \
461*4882a593Smuzhiyun} while(0)
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun#endif
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun/*
467*4882a593Smuzhiyun  Define HAVE_MMAP to optionally make malloc() use mmap() to
468*4882a593Smuzhiyun  allocate very large blocks.  These will be returned to the
469*4882a593Smuzhiyun  operating system immediately after a free().
470*4882a593Smuzhiyun*/
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun#ifndef HAVE_MMAP
473*4882a593Smuzhiyun#define HAVE_MMAP 1
474*4882a593Smuzhiyun#endif
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun/*
477*4882a593Smuzhiyun  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
478*4882a593Smuzhiyun  large blocks.  This is currently only possible on Linux with
479*4882a593Smuzhiyun  kernel versions newer than 1.3.77.
480*4882a593Smuzhiyun*/
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun#ifndef HAVE_MREMAP
483*4882a593Smuzhiyun#ifdef INTERNAL_LINUX_C_LIB
484*4882a593Smuzhiyun#define HAVE_MREMAP 1
485*4882a593Smuzhiyun#else
486*4882a593Smuzhiyun#define HAVE_MREMAP 0
487*4882a593Smuzhiyun#endif
488*4882a593Smuzhiyun#endif
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun#if HAVE_MMAP
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun#include <unistd.h>
493*4882a593Smuzhiyun#include <fcntl.h>
494*4882a593Smuzhiyun#include <sys/mman.h>
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
497*4882a593Smuzhiyun#define MAP_ANONYMOUS MAP_ANON
498*4882a593Smuzhiyun#endif
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun#endif /* HAVE_MMAP */
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun/*
503*4882a593Smuzhiyun  Access to system page size. To the extent possible, this malloc
504*4882a593Smuzhiyun  manages memory from the system in page-size units.
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun  The following mechanics for getpagesize were adapted from
507*4882a593Smuzhiyun  bsd/gnu getpagesize.h
508*4882a593Smuzhiyun*/
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun#ifndef LACKS_UNISTD_H
511*4882a593Smuzhiyun#  include <unistd.h>
512*4882a593Smuzhiyun#endif
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun#ifndef malloc_getpagesize
515*4882a593Smuzhiyun#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
516*4882a593Smuzhiyun#    ifndef _SC_PAGE_SIZE
517*4882a593Smuzhiyun#      define _SC_PAGE_SIZE _SC_PAGESIZE
518*4882a593Smuzhiyun#    endif
519*4882a593Smuzhiyun#  endif
520*4882a593Smuzhiyun#  ifdef _SC_PAGE_SIZE
521*4882a593Smuzhiyun#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
522*4882a593Smuzhiyun#  else
523*4882a593Smuzhiyun#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
524*4882a593Smuzhiyun       extern size_t getpagesize();
525*4882a593Smuzhiyun#      define malloc_getpagesize getpagesize()
526*4882a593Smuzhiyun#    else
527*4882a593Smuzhiyun#      ifdef WIN32
528*4882a593Smuzhiyun#        define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */
529*4882a593Smuzhiyun#      else
530*4882a593Smuzhiyun#        ifndef LACKS_SYS_PARAM_H
531*4882a593Smuzhiyun#          include <sys/param.h>
532*4882a593Smuzhiyun#        endif
533*4882a593Smuzhiyun#        ifdef EXEC_PAGESIZE
534*4882a593Smuzhiyun#          define malloc_getpagesize EXEC_PAGESIZE
535*4882a593Smuzhiyun#        else
536*4882a593Smuzhiyun#          ifdef NBPG
537*4882a593Smuzhiyun#            ifndef CLSIZE
538*4882a593Smuzhiyun#              define malloc_getpagesize NBPG
539*4882a593Smuzhiyun#            else
540*4882a593Smuzhiyun#              define malloc_getpagesize (NBPG * CLSIZE)
541*4882a593Smuzhiyun#            endif
542*4882a593Smuzhiyun#          else
543*4882a593Smuzhiyun#            ifdef NBPC
544*4882a593Smuzhiyun#              define malloc_getpagesize NBPC
545*4882a593Smuzhiyun#            else
546*4882a593Smuzhiyun#              ifdef PAGESIZE
547*4882a593Smuzhiyun#                define malloc_getpagesize PAGESIZE
548*4882a593Smuzhiyun#              else
549*4882a593Smuzhiyun#                define malloc_getpagesize (4096) /* just guess */
550*4882a593Smuzhiyun#              endif
551*4882a593Smuzhiyun#            endif
552*4882a593Smuzhiyun#          endif
553*4882a593Smuzhiyun#        endif
554*4882a593Smuzhiyun#      endif
555*4882a593Smuzhiyun#    endif
556*4882a593Smuzhiyun#  endif
557*4882a593Smuzhiyun#endif
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun/*
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun  This version of malloc supports the standard SVID/XPG mallinfo
563*4882a593Smuzhiyun  routine that returns a struct containing the same kind of
564*4882a593Smuzhiyun  information you can get from malloc_stats. It should work on
565*4882a593Smuzhiyun  any SVID/XPG compliant system that has a /usr/include/malloc.h
566*4882a593Smuzhiyun  defining struct mallinfo. (If you'd like to install such a thing
567*4882a593Smuzhiyun  yourself, cut out the preliminary declarations as described above
568*4882a593Smuzhiyun  and below and save them in a malloc.h file. But there's no
569*4882a593Smuzhiyun  compelling reason to bother to do this.)
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun  The main declaration needed is the mallinfo struct that is returned
572*4882a593Smuzhiyun  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
573*4882a593Smuzhiyun  bunch of fields, most of which are not even meaningful in this
574*4882a593Smuzhiyun  version of malloc. Some of these fields are are instead filled by
575*4882a593Smuzhiyun  mallinfo() with other numbers that might possibly be of interest.
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
578*4882a593Smuzhiyun  /usr/include/malloc.h file that includes a declaration of struct
579*4882a593Smuzhiyun  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
580*4882a593Smuzhiyun  version is declared below.  These must be precisely the same for
581*4882a593Smuzhiyun  mallinfo() to work.
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun*/
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun/* #define HAVE_USR_INCLUDE_MALLOC_H */
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun#if HAVE_USR_INCLUDE_MALLOC_H
588*4882a593Smuzhiyun#include "/usr/include/malloc.h"
589*4882a593Smuzhiyun#else
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun/* SVID2/XPG mallinfo structure */
592*4882a593Smuzhiyun
593*4882a593Smuzhiyunstruct mallinfo {
594*4882a593Smuzhiyun  int arena;    /* total space allocated from system */
595*4882a593Smuzhiyun  int ordblks;  /* number of non-inuse chunks */
596*4882a593Smuzhiyun  int smblks;   /* unused -- always zero */
597*4882a593Smuzhiyun  int hblks;    /* number of mmapped regions */
598*4882a593Smuzhiyun  int hblkhd;   /* total space in mmapped regions */
599*4882a593Smuzhiyun  int usmblks;  /* unused -- always zero */
600*4882a593Smuzhiyun  int fsmblks;  /* unused -- always zero */
601*4882a593Smuzhiyun  int uordblks; /* total allocated space */
602*4882a593Smuzhiyun  int fordblks; /* total non-inuse space */
603*4882a593Smuzhiyun  int keepcost; /* top-most, releasable (via malloc_trim) space */
604*4882a593Smuzhiyun};
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun/* SVID2/XPG mallopt options */
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun#define M_MXFAST  1    /* UNUSED in this malloc */
609*4882a593Smuzhiyun#define M_NLBLKS  2    /* UNUSED in this malloc */
610*4882a593Smuzhiyun#define M_GRAIN   3    /* UNUSED in this malloc */
611*4882a593Smuzhiyun#define M_KEEP    4    /* UNUSED in this malloc */
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun#endif
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun/* mallopt options that actually do something */
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun#define M_TRIM_THRESHOLD    -1
618*4882a593Smuzhiyun#define M_TOP_PAD           -2
619*4882a593Smuzhiyun#define M_MMAP_THRESHOLD    -3
620*4882a593Smuzhiyun#define M_MMAP_MAX          -4
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun#ifndef DEFAULT_TRIM_THRESHOLD
624*4882a593Smuzhiyun#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
625*4882a593Smuzhiyun#endif
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun/*
628*4882a593Smuzhiyun    M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
629*4882a593Smuzhiyun      to keep before releasing via malloc_trim in free().
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun      Automatic trimming is mainly useful in long-lived programs.
632*4882a593Smuzhiyun      Because trimming via sbrk can be slow on some systems, and can
633*4882a593Smuzhiyun      sometimes be wasteful (in cases where programs immediately
634*4882a593Smuzhiyun      afterward allocate more large chunks) the value should be high
635*4882a593Smuzhiyun      enough so that your overall system performance would improve by
636*4882a593Smuzhiyun      releasing.
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun      The trim threshold and the mmap control parameters (see below)
639*4882a593Smuzhiyun      can be traded off with one another. Trimming and mmapping are
640*4882a593Smuzhiyun      two different ways of releasing unused memory back to the
641*4882a593Smuzhiyun      system. Between these two, it is often possible to keep
642*4882a593Smuzhiyun      system-level demands of a long-lived program down to a bare
643*4882a593Smuzhiyun      minimum. For example, in one test suite of sessions measuring
644*4882a593Smuzhiyun      the XF86 X server on Linux, using a trim threshold of 128K and a
645*4882a593Smuzhiyun      mmap threshold of 192K led to near-minimal long term resource
646*4882a593Smuzhiyun      consumption.
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun      If you are using this malloc in a long-lived program, it should
649*4882a593Smuzhiyun      pay to experiment with these values.  As a rough guide, you
650*4882a593Smuzhiyun      might set to a value close to the average size of a process
651*4882a593Smuzhiyun      (program) running on your system.  Releasing this much memory
652*4882a593Smuzhiyun      would allow such a process to run in memory.  Generally, it's
653*4882a593Smuzhiyun      worth it to tune for trimming rather tham memory mapping when a
654*4882a593Smuzhiyun      program undergoes phases where several large chunks are
655*4882a593Smuzhiyun      allocated and released in ways that can reuse each other's
656*4882a593Smuzhiyun      storage, perhaps mixed with phases where there are no such
657*4882a593Smuzhiyun      chunks at all.  And in well-behaved long-lived programs,
658*4882a593Smuzhiyun      controlling release of large blocks via trimming versus mapping
659*4882a593Smuzhiyun      is usually faster.
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun      However, in most programs, these parameters serve mainly as
662*4882a593Smuzhiyun      protection against the system-level effects of carrying around
663*4882a593Smuzhiyun      massive amounts of unneeded memory. Since frequent calls to
664*4882a593Smuzhiyun      sbrk, mmap, and munmap otherwise degrade performance, the default
665*4882a593Smuzhiyun      parameters are set to relatively high values that serve only as
666*4882a593Smuzhiyun      safeguards.
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun      The default trim value is high enough to cause trimming only in
669*4882a593Smuzhiyun      fairly extreme (by current memory consumption standards) cases.
670*4882a593Smuzhiyun      It must be greater than page size to have any useful effect.  To
671*4882a593Smuzhiyun      disable trimming completely, you can set to (unsigned long)(-1);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun*/
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun#ifndef DEFAULT_TOP_PAD
678*4882a593Smuzhiyun#define DEFAULT_TOP_PAD        (0)
679*4882a593Smuzhiyun#endif
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun/*
682*4882a593Smuzhiyun    M_TOP_PAD is the amount of extra `padding' space to allocate or
683*4882a593Smuzhiyun      retain whenever sbrk is called. It is used in two ways internally:
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun      * When sbrk is called to extend the top of the arena to satisfy
686*4882a593Smuzhiyun	a new malloc request, this much padding is added to the sbrk
687*4882a593Smuzhiyun	request.
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun      * When malloc_trim is called automatically from free(),
690*4882a593Smuzhiyun	it is used as the `pad' argument.
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun      In both cases, the actual amount of padding is rounded
693*4882a593Smuzhiyun      so that the end of the arena is always a system page boundary.
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun      The main reason for using padding is to avoid calling sbrk so
696*4882a593Smuzhiyun      often. Having even a small pad greatly reduces the likelihood
697*4882a593Smuzhiyun      that nearly every malloc request during program start-up (or
698*4882a593Smuzhiyun      after trimming) will invoke sbrk, which needlessly wastes
699*4882a593Smuzhiyun      time.
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun      Automatic rounding-up to page-size units is normally sufficient
702*4882a593Smuzhiyun      to avoid measurable overhead, so the default is 0.  However, in
703*4882a593Smuzhiyun      systems where sbrk is relatively slow, it can pay to increase
704*4882a593Smuzhiyun      this value, at the expense of carrying around more memory than
705*4882a593Smuzhiyun      the program needs.
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun*/
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun#ifndef DEFAULT_MMAP_THRESHOLD
711*4882a593Smuzhiyun#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
712*4882a593Smuzhiyun#endif
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun/*
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun    M_MMAP_THRESHOLD is the request size threshold for using mmap()
717*4882a593Smuzhiyun      to service a request. Requests of at least this size that cannot
718*4882a593Smuzhiyun      be allocated using already-existing space will be serviced via mmap.
719*4882a593Smuzhiyun      (If enough normal freed space already exists it is used instead.)
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun      Using mmap segregates relatively large chunks of memory so that
722*4882a593Smuzhiyun      they can be individually obtained and released from the host
723*4882a593Smuzhiyun      system. A request serviced through mmap is never reused by any
724*4882a593Smuzhiyun      other request (at least not directly; the system may just so
725*4882a593Smuzhiyun      happen to remap successive requests to the same locations).
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun      Segregating space in this way has the benefit that mmapped space
728*4882a593Smuzhiyun      can ALWAYS be individually released back to the system, which
729*4882a593Smuzhiyun      helps keep the system level memory demands of a long-lived
730*4882a593Smuzhiyun      program low. Mapped memory can never become `locked' between
731*4882a593Smuzhiyun      other chunks, as can happen with normally allocated chunks, which
732*4882a593Smuzhiyun      menas that even trimming via malloc_trim would not release them.
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun      However, it has the disadvantages that:
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun	 1. The space cannot be reclaimed, consolidated, and then
737*4882a593Smuzhiyun	    used to service later requests, as happens with normal chunks.
738*4882a593Smuzhiyun	 2. It can lead to more wastage because of mmap page alignment
739*4882a593Smuzhiyun	    requirements
740*4882a593Smuzhiyun	 3. It causes malloc performance to be more dependent on host
741*4882a593Smuzhiyun	    system memory management support routines which may vary in
742*4882a593Smuzhiyun	    implementation quality and may impose arbitrary
743*4882a593Smuzhiyun	    limitations. Generally, servicing a request via normal
744*4882a593Smuzhiyun	    malloc steps is faster than going through a system's mmap.
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun      All together, these considerations should lead you to use mmap
747*4882a593Smuzhiyun      only for relatively large requests.
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun*/
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun#ifndef DEFAULT_MMAP_MAX
754*4882a593Smuzhiyun#if HAVE_MMAP
755*4882a593Smuzhiyun#define DEFAULT_MMAP_MAX       (64)
756*4882a593Smuzhiyun#else
757*4882a593Smuzhiyun#define DEFAULT_MMAP_MAX       (0)
758*4882a593Smuzhiyun#endif
759*4882a593Smuzhiyun#endif
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun/*
762*4882a593Smuzhiyun    M_MMAP_MAX is the maximum number of requests to simultaneously
763*4882a593Smuzhiyun      service using mmap. This parameter exists because:
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun	 1. Some systems have a limited number of internal tables for
766*4882a593Smuzhiyun	    use by mmap.
767*4882a593Smuzhiyun	 2. In most systems, overreliance on mmap can degrade overall
768*4882a593Smuzhiyun	    performance.
769*4882a593Smuzhiyun	 3. If a program allocates many large regions, it is probably
770*4882a593Smuzhiyun	    better off using normal sbrk-based allocation routines that
771*4882a593Smuzhiyun	    can reclaim and reallocate normal heap memory. Using a
772*4882a593Smuzhiyun	    small value allows transition into this mode after the
773*4882a593Smuzhiyun	    first few allocations.
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun      Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
776*4882a593Smuzhiyun      the default value is 0, and attempts to set it to non-zero values
777*4882a593Smuzhiyun      in mallopt will fail.
778*4882a593Smuzhiyun*/
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun/*
782*4882a593Smuzhiyun    USE_DL_PREFIX will prefix all public routines with the string 'dl'.
783*4882a593Smuzhiyun      Useful to quickly avoid procedure declaration conflicts and linker
784*4882a593Smuzhiyun      symbol conflicts with existing memory allocation routines.
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun*/
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun/* #define USE_DL_PREFIX */
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun/*
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun  Special defines for linux libc
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun  Except when compiled using these special defines for Linux libc
796*4882a593Smuzhiyun  using weak aliases, this malloc is NOT designed to work in
797*4882a593Smuzhiyun  multithreaded applications.  No semaphores or other concurrency
798*4882a593Smuzhiyun  control are provided to ensure that multiple malloc or free calls
799*4882a593Smuzhiyun  don't run at the same time, which could be disasterous. A single
800*4882a593Smuzhiyun  semaphore could be used across malloc, realloc, and free (which is
801*4882a593Smuzhiyun  essentially the effect of the linux weak alias approach). It would
802*4882a593Smuzhiyun  be hard to obtain finer granularity.
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun*/
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun#ifdef INTERNAL_LINUX_C_LIB
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun#if __STD_C
810*4882a593Smuzhiyun
811*4882a593SmuzhiyunVoid_t * __default_morecore_init (ptrdiff_t);
812*4882a593SmuzhiyunVoid_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun#else
815*4882a593Smuzhiyun
816*4882a593SmuzhiyunVoid_t * __default_morecore_init ();
817*4882a593SmuzhiyunVoid_t *(*__morecore)() = __default_morecore_init;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun#endif
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun#define MORECORE (*__morecore)
822*4882a593Smuzhiyun#define MORECORE_FAILURE 0
823*4882a593Smuzhiyun#define MORECORE_CLEARS 1
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun#else /* INTERNAL_LINUX_C_LIB */
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun#if __STD_C
828*4882a593Smuzhiyunextern Void_t*     sbrk(ptrdiff_t);
829*4882a593Smuzhiyun#else
830*4882a593Smuzhiyunextern Void_t*     sbrk();
831*4882a593Smuzhiyun#endif
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun#ifndef MORECORE
834*4882a593Smuzhiyun#define MORECORE sbrk
835*4882a593Smuzhiyun#endif
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun#ifndef MORECORE_FAILURE
838*4882a593Smuzhiyun#define MORECORE_FAILURE -1
839*4882a593Smuzhiyun#endif
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun#ifndef MORECORE_CLEARS
842*4882a593Smuzhiyun#define MORECORE_CLEARS 1
843*4882a593Smuzhiyun#endif
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun#endif /* INTERNAL_LINUX_C_LIB */
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun#define cALLOc		__libc_calloc
850*4882a593Smuzhiyun#define fREe		__libc_free
851*4882a593Smuzhiyun#define mALLOc		__libc_malloc
852*4882a593Smuzhiyun#define mEMALIGn	__libc_memalign
853*4882a593Smuzhiyun#define rEALLOc		__libc_realloc
854*4882a593Smuzhiyun#define vALLOc		__libc_valloc
855*4882a593Smuzhiyun#define pvALLOc		__libc_pvalloc
856*4882a593Smuzhiyun#define mALLINFo	__libc_mallinfo
857*4882a593Smuzhiyun#define mALLOPt		__libc_mallopt
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun#pragma weak calloc = __libc_calloc
860*4882a593Smuzhiyun#pragma weak free = __libc_free
861*4882a593Smuzhiyun#pragma weak cfree = __libc_free
862*4882a593Smuzhiyun#pragma weak malloc = __libc_malloc
863*4882a593Smuzhiyun#pragma weak memalign = __libc_memalign
864*4882a593Smuzhiyun#pragma weak realloc = __libc_realloc
865*4882a593Smuzhiyun#pragma weak valloc = __libc_valloc
866*4882a593Smuzhiyun#pragma weak pvalloc = __libc_pvalloc
867*4882a593Smuzhiyun#pragma weak mallinfo = __libc_mallinfo
868*4882a593Smuzhiyun#pragma weak mallopt = __libc_mallopt
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun#else
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun#ifdef USE_DL_PREFIX
873*4882a593Smuzhiyun#define cALLOc		dlcalloc
874*4882a593Smuzhiyun#define fREe		dlfree
875*4882a593Smuzhiyun#define mALLOc		dlmalloc
876*4882a593Smuzhiyun#define mEMALIGn	dlmemalign
877*4882a593Smuzhiyun#define rEALLOc		dlrealloc
878*4882a593Smuzhiyun#define vALLOc		dlvalloc
879*4882a593Smuzhiyun#define pvALLOc		dlpvalloc
880*4882a593Smuzhiyun#define mALLINFo	dlmallinfo
881*4882a593Smuzhiyun#define mALLOPt		dlmallopt
882*4882a593Smuzhiyun#else /* USE_DL_PREFIX */
883*4882a593Smuzhiyun#define cALLOc		calloc
884*4882a593Smuzhiyun#define fREe		free
885*4882a593Smuzhiyun#define mALLOc		malloc
886*4882a593Smuzhiyun#define mEMALIGn	memalign
887*4882a593Smuzhiyun#define rEALLOc		realloc
888*4882a593Smuzhiyun#define vALLOc		valloc
889*4882a593Smuzhiyun#define pvALLOc		pvalloc
890*4882a593Smuzhiyun#define mALLINFo	mallinfo
891*4882a593Smuzhiyun#define mALLOPt		mallopt
892*4882a593Smuzhiyun#endif /* USE_DL_PREFIX */
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun#endif
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun/* Public routines */
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun#if __STD_C
899*4882a593Smuzhiyun
900*4882a593SmuzhiyunVoid_t* mALLOc(size_t);
901*4882a593Smuzhiyunvoid    fREe(Void_t*);
902*4882a593SmuzhiyunVoid_t* rEALLOc(Void_t*, size_t);
903*4882a593SmuzhiyunVoid_t* mEMALIGn(size_t, size_t);
904*4882a593SmuzhiyunVoid_t* vALLOc(size_t);
905*4882a593SmuzhiyunVoid_t* pvALLOc(size_t);
906*4882a593SmuzhiyunVoid_t* cALLOc(size_t, size_t);
907*4882a593Smuzhiyunvoid    cfree(Void_t*);
908*4882a593Smuzhiyunint     malloc_trim(size_t);
909*4882a593Smuzhiyunsize_t  malloc_usable_size(Void_t*);
910*4882a593Smuzhiyunvoid    malloc_stats();
911*4882a593Smuzhiyunint     mALLOPt(int, int);
912*4882a593Smuzhiyunstruct mallinfo mALLINFo(void);
913*4882a593Smuzhiyun#else
914*4882a593SmuzhiyunVoid_t* mALLOc();
915*4882a593Smuzhiyunvoid    fREe();
916*4882a593SmuzhiyunVoid_t* rEALLOc();
917*4882a593SmuzhiyunVoid_t* mEMALIGn();
918*4882a593SmuzhiyunVoid_t* vALLOc();
919*4882a593SmuzhiyunVoid_t* pvALLOc();
920*4882a593SmuzhiyunVoid_t* cALLOc();
921*4882a593Smuzhiyunvoid    cfree();
922*4882a593Smuzhiyunint     malloc_trim();
923*4882a593Smuzhiyunsize_t  malloc_usable_size();
924*4882a593Smuzhiyunvoid    malloc_stats();
925*4882a593Smuzhiyunint     mALLOPt();
926*4882a593Smuzhiyunstruct mallinfo mALLINFo();
927*4882a593Smuzhiyun#endif
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun#ifdef __cplusplus
931*4882a593Smuzhiyun};  /* end of extern "C" */
932*4882a593Smuzhiyun#endif
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun/* ---------- To make a malloc.h, end cutting here ------------ */
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun/*
938*4882a593Smuzhiyun  Emulation of sbrk for WIN32
939*4882a593Smuzhiyun  All code within the ifdef WIN32 is untested by me.
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun  Thanks to Martin Fong and others for supplying this.
942*4882a593Smuzhiyun*/
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun#ifdef WIN32
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
948*4882a593Smuzhiyun~(malloc_getpagesize-1))
949*4882a593Smuzhiyun#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun/* resrve 64MB to insure large contiguous space */
952*4882a593Smuzhiyun#define RESERVED_SIZE (1024*1024*64)
953*4882a593Smuzhiyun#define NEXT_SIZE (2048*1024)
954*4882a593Smuzhiyun#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
955*4882a593Smuzhiyun
956*4882a593Smuzhiyunstruct GmListElement;
957*4882a593Smuzhiyuntypedef struct GmListElement GmListElement;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyunstruct GmListElement
960*4882a593Smuzhiyun{
961*4882a593Smuzhiyun	GmListElement* next;
962*4882a593Smuzhiyun	void* base;
963*4882a593Smuzhiyun};
964*4882a593Smuzhiyun
965*4882a593Smuzhiyunstatic GmListElement* head = 0;
966*4882a593Smuzhiyunstatic unsigned int gNextAddress = 0;
967*4882a593Smuzhiyunstatic unsigned int gAddressBase = 0;
968*4882a593Smuzhiyunstatic unsigned int gAllocatedSize = 0;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyunstatic
971*4882a593SmuzhiyunGmListElement* makeGmListElement (void* bas)
972*4882a593Smuzhiyun{
973*4882a593Smuzhiyun	GmListElement* this;
974*4882a593Smuzhiyun	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
975*4882a593Smuzhiyun	assert (this);
976*4882a593Smuzhiyun	if (this)
977*4882a593Smuzhiyun	{
978*4882a593Smuzhiyun		this->base = bas;
979*4882a593Smuzhiyun		this->next = head;
980*4882a593Smuzhiyun		head = this;
981*4882a593Smuzhiyun	}
982*4882a593Smuzhiyun	return this;
983*4882a593Smuzhiyun}
984*4882a593Smuzhiyun
985*4882a593Smuzhiyunvoid gcleanup ()
986*4882a593Smuzhiyun{
987*4882a593Smuzhiyun	BOOL rval;
988*4882a593Smuzhiyun	assert ( (head == NULL) || (head->base == (void*)gAddressBase));
989*4882a593Smuzhiyun	if (gAddressBase && (gNextAddress - gAddressBase))
990*4882a593Smuzhiyun	{
991*4882a593Smuzhiyun		rval = VirtualFree ((void*)gAddressBase,
992*4882a593Smuzhiyun							gNextAddress - gAddressBase,
993*4882a593Smuzhiyun							MEM_DECOMMIT);
994*4882a593Smuzhiyun	assert (rval);
995*4882a593Smuzhiyun	}
996*4882a593Smuzhiyun	while (head)
997*4882a593Smuzhiyun	{
998*4882a593Smuzhiyun		GmListElement* next = head->next;
999*4882a593Smuzhiyun		rval = VirtualFree (head->base, 0, MEM_RELEASE);
1000*4882a593Smuzhiyun		assert (rval);
1001*4882a593Smuzhiyun		LocalFree (head);
1002*4882a593Smuzhiyun		head = next;
1003*4882a593Smuzhiyun	}
1004*4882a593Smuzhiyun}
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyunstatic
1007*4882a593Smuzhiyunvoid* findRegion (void* start_address, unsigned long size)
1008*4882a593Smuzhiyun{
1009*4882a593Smuzhiyun	MEMORY_BASIC_INFORMATION info;
1010*4882a593Smuzhiyun	if (size >= TOP_MEMORY) return NULL;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun	while ((unsigned long)start_address + size < TOP_MEMORY)
1013*4882a593Smuzhiyun	{
1014*4882a593Smuzhiyun		VirtualQuery (start_address, &info, sizeof (info));
1015*4882a593Smuzhiyun		if ((info.State == MEM_FREE) && (info.RegionSize >= size))
1016*4882a593Smuzhiyun			return start_address;
1017*4882a593Smuzhiyun		else
1018*4882a593Smuzhiyun		{
1019*4882a593Smuzhiyun			/* Requested region is not available so see if the */
1020*4882a593Smuzhiyun			/* next region is available.  Set 'start_address' */
1021*4882a593Smuzhiyun			/* to the next region and call 'VirtualQuery()' */
1022*4882a593Smuzhiyun			/* again. */
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun			start_address = (char*)info.BaseAddress + info.RegionSize;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun			/* Make sure we start looking for the next region */
1027*4882a593Smuzhiyun			/* on the *next* 64K boundary.  Otherwise, even if */
1028*4882a593Smuzhiyun			/* the new region is free according to */
1029*4882a593Smuzhiyun			/* 'VirtualQuery()', the subsequent call to */
1030*4882a593Smuzhiyun			/* 'VirtualAlloc()' (which follows the call to */
1031*4882a593Smuzhiyun			/* this routine in 'wsbrk()') will round *down* */
1032*4882a593Smuzhiyun			/* the requested address to a 64K boundary which */
1033*4882a593Smuzhiyun			/* we already know is an address in the */
1034*4882a593Smuzhiyun			/* unavailable region.  Thus, the subsequent call */
1035*4882a593Smuzhiyun			/* to 'VirtualAlloc()' will fail and bring us back */
1036*4882a593Smuzhiyun			/* here, causing us to go into an infinite loop. */
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun			start_address =
1039*4882a593Smuzhiyun				(void *) AlignPage64K((unsigned long) start_address);
1040*4882a593Smuzhiyun		}
1041*4882a593Smuzhiyun	}
1042*4882a593Smuzhiyun	return NULL;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun}
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyunvoid* wsbrk (long size)
1048*4882a593Smuzhiyun{
1049*4882a593Smuzhiyun	void* tmp;
1050*4882a593Smuzhiyun	if (size > 0)
1051*4882a593Smuzhiyun	{
1052*4882a593Smuzhiyun		if (gAddressBase == 0)
1053*4882a593Smuzhiyun		{
1054*4882a593Smuzhiyun			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
1055*4882a593Smuzhiyun			gNextAddress = gAddressBase =
1056*4882a593Smuzhiyun				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,
1057*4882a593Smuzhiyun											MEM_RESERVE, PAGE_NOACCESS);
1058*4882a593Smuzhiyun		} else if (AlignPage (gNextAddress + size) > (gAddressBase +
1059*4882a593SmuzhiyungAllocatedSize))
1060*4882a593Smuzhiyun		{
1061*4882a593Smuzhiyun			long new_size = max (NEXT_SIZE, AlignPage (size));
1062*4882a593Smuzhiyun			void* new_address = (void*)(gAddressBase+gAllocatedSize);
1063*4882a593Smuzhiyun			do
1064*4882a593Smuzhiyun			{
1065*4882a593Smuzhiyun				new_address = findRegion (new_address, new_size);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun				if (new_address == 0)
1068*4882a593Smuzhiyun					return (void*)-1;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun				gAddressBase = gNextAddress =
1071*4882a593Smuzhiyun					(unsigned int)VirtualAlloc (new_address, new_size,
1072*4882a593Smuzhiyun												MEM_RESERVE, PAGE_NOACCESS);
1073*4882a593Smuzhiyun				/* repeat in case of race condition */
1074*4882a593Smuzhiyun				/* The region that we found has been snagged */
1075*4882a593Smuzhiyun				/* by another thread */
1076*4882a593Smuzhiyun			}
1077*4882a593Smuzhiyun			while (gAddressBase == 0);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun			assert (new_address == (void*)gAddressBase);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun			gAllocatedSize = new_size;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun			if (!makeGmListElement ((void*)gAddressBase))
1084*4882a593Smuzhiyun				return (void*)-1;
1085*4882a593Smuzhiyun		}
1086*4882a593Smuzhiyun		if ((size + gNextAddress) > AlignPage (gNextAddress))
1087*4882a593Smuzhiyun		{
1088*4882a593Smuzhiyun			void* res;
1089*4882a593Smuzhiyun			res = VirtualAlloc ((void*)AlignPage (gNextAddress),
1090*4882a593Smuzhiyun								(size + gNextAddress -
1091*4882a593Smuzhiyun								 AlignPage (gNextAddress)),
1092*4882a593Smuzhiyun								MEM_COMMIT, PAGE_READWRITE);
1093*4882a593Smuzhiyun			if (res == 0)
1094*4882a593Smuzhiyun				return (void*)-1;
1095*4882a593Smuzhiyun		}
1096*4882a593Smuzhiyun		tmp = (void*)gNextAddress;
1097*4882a593Smuzhiyun		gNextAddress = (unsigned int)tmp + size;
1098*4882a593Smuzhiyun		return tmp;
1099*4882a593Smuzhiyun	}
1100*4882a593Smuzhiyun	else if (size < 0)
1101*4882a593Smuzhiyun	{
1102*4882a593Smuzhiyun		unsigned int alignedGoal = AlignPage (gNextAddress + size);
1103*4882a593Smuzhiyun		/* Trim by releasing the virtual memory */
1104*4882a593Smuzhiyun		if (alignedGoal >= gAddressBase)
1105*4882a593Smuzhiyun		{
1106*4882a593Smuzhiyun			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
1107*4882a593Smuzhiyun						 MEM_DECOMMIT);
1108*4882a593Smuzhiyun			gNextAddress = gNextAddress + size;
1109*4882a593Smuzhiyun			return (void*)gNextAddress;
1110*4882a593Smuzhiyun		}
1111*4882a593Smuzhiyun		else
1112*4882a593Smuzhiyun		{
1113*4882a593Smuzhiyun			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
1114*4882a593Smuzhiyun						 MEM_DECOMMIT);
1115*4882a593Smuzhiyun			gNextAddress = gAddressBase;
1116*4882a593Smuzhiyun			return (void*)-1;
1117*4882a593Smuzhiyun		}
1118*4882a593Smuzhiyun	}
1119*4882a593Smuzhiyun	else
1120*4882a593Smuzhiyun	{
1121*4882a593Smuzhiyun		return (void*)gNextAddress;
1122*4882a593Smuzhiyun	}
1123*4882a593Smuzhiyun}
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun#endif
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun/*
1130*4882a593Smuzhiyun  Type declarations
1131*4882a593Smuzhiyun*/
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyunstruct malloc_chunk
1135*4882a593Smuzhiyun{
1136*4882a593Smuzhiyun  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1137*4882a593Smuzhiyun  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
1138*4882a593Smuzhiyun  struct malloc_chunk* fd;   /* double links -- used only if free. */
1139*4882a593Smuzhiyun  struct malloc_chunk* bk;
1140*4882a593Smuzhiyun};
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyuntypedef struct malloc_chunk* mchunkptr;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun/*
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun   malloc_chunk details:
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun    (The following includes lightly edited explanations by Colin Plumb.)
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun    Chunks of memory are maintained using a `boundary tag' method as
1151*4882a593Smuzhiyun    described in e.g., Knuth or Standish.  (See the paper by Paul
1152*4882a593Smuzhiyun    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1153*4882a593Smuzhiyun    survey of such techniques.)  Sizes of free chunks are stored both
1154*4882a593Smuzhiyun    in the front of each chunk and at the end.  This makes
1155*4882a593Smuzhiyun    consolidating fragmented chunks into bigger chunks very fast.  The
1156*4882a593Smuzhiyun    size fields also hold bits representing whether chunks are free or
1157*4882a593Smuzhiyun    in use.
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun    An allocated chunk looks like this:
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1163*4882a593Smuzhiyun	    |             Size of previous chunk, if allocated            | |
1164*4882a593Smuzhiyun	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1165*4882a593Smuzhiyun	    |             Size of chunk, in bytes                         |P|
1166*4882a593Smuzhiyun      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1167*4882a593Smuzhiyun	    |             User data starts here...                          .
1168*4882a593Smuzhiyun	    .                                                               .
1169*4882a593Smuzhiyun	    .             (malloc_usable_space() bytes)                     .
1170*4882a593Smuzhiyun	    .                                                               |
1171*4882a593Smuzhiyunnextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1172*4882a593Smuzhiyun	    |             Size of chunk                                     |
1173*4882a593Smuzhiyun	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun    Where "chunk" is the front of the chunk for the purpose of most of
1177*4882a593Smuzhiyun    the malloc code, but "mem" is the pointer that is returned to the
1178*4882a593Smuzhiyun    user.  "Nextchunk" is the beginning of the next contiguous chunk.
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun    Chunks always begin on even word boundries, so the mem portion
1181*4882a593Smuzhiyun    (which is returned to the user) is also on an even word boundary, and
1182*4882a593Smuzhiyun    thus double-word aligned.
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun    Free chunks are stored in circular doubly-linked lists, and look like this:
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1187*4882a593Smuzhiyun	    |             Size of previous chunk                            |
1188*4882a593Smuzhiyun	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1189*4882a593Smuzhiyun    `head:' |             Size of chunk, in bytes                         |P|
1190*4882a593Smuzhiyun      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1191*4882a593Smuzhiyun	    |             Forward pointer to next chunk in list             |
1192*4882a593Smuzhiyun	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1193*4882a593Smuzhiyun	    |             Back pointer to previous chunk in list            |
1194*4882a593Smuzhiyun	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1195*4882a593Smuzhiyun	    |             Unused space (may be 0 bytes long)                .
1196*4882a593Smuzhiyun	    .                                                               .
1197*4882a593Smuzhiyun	    .                                                               |
1198*4882a593Smuzhiyunnextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199*4882a593Smuzhiyun    `foot:' |             Size of chunk, in bytes                           |
1200*4882a593Smuzhiyun	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1203*4882a593Smuzhiyun    chunk size (which is always a multiple of two words), is an in-use
1204*4882a593Smuzhiyun    bit for the *previous* chunk.  If that bit is *clear*, then the
1205*4882a593Smuzhiyun    word before the current chunk size contains the previous chunk
1206*4882a593Smuzhiyun    size, and can be used to find the front of the previous chunk.
1207*4882a593Smuzhiyun    (The very first chunk allocated always has this bit set,
1208*4882a593Smuzhiyun    preventing access to non-existent (or non-owned) memory.)
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun    Note that the `foot' of the current chunk is actually represented
1211*4882a593Smuzhiyun    as the prev_size of the NEXT chunk. (This makes it easier to
1212*4882a593Smuzhiyun    deal with alignments etc).
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun    The two exceptions to all this are
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun     1. The special chunk `top', which doesn't bother using the
1217*4882a593Smuzhiyun	trailing size field since there is no
1218*4882a593Smuzhiyun	next contiguous chunk that would have to index off it. (After
1219*4882a593Smuzhiyun	initialization, `top' is forced to always exist.  If it would
1220*4882a593Smuzhiyun	become less than MINSIZE bytes long, it is replenished via
1221*4882a593Smuzhiyun	malloc_extend_top.)
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun     2. Chunks allocated via mmap, which have the second-lowest-order
1224*4882a593Smuzhiyun	bit (IS_MMAPPED) set in their size fields.  Because they are
1225*4882a593Smuzhiyun	never merged or traversed from any other chunk, they have no
1226*4882a593Smuzhiyun	foot size or inuse information.
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun    Available chunks are kept in any of several places (all declared below):
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun    * `av': An array of chunks serving as bin headers for consolidated
1231*4882a593Smuzhiyun       chunks. Each bin is doubly linked.  The bins are approximately
1232*4882a593Smuzhiyun       proportionally (log) spaced.  There are a lot of these bins
1233*4882a593Smuzhiyun       (128). This may look excessive, but works very well in
1234*4882a593Smuzhiyun       practice.  All procedures maintain the invariant that no
1235*4882a593Smuzhiyun       consolidated chunk physically borders another one. Chunks in
1236*4882a593Smuzhiyun       bins are kept in size order, with ties going to the
1237*4882a593Smuzhiyun       approximately least recently used chunk.
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun       The chunks in each bin are maintained in decreasing sorted order by
1240*4882a593Smuzhiyun       size.  This is irrelevant for the small bins, which all contain
1241*4882a593Smuzhiyun       the same-sized chunks, but facilitates best-fit allocation for
1242*4882a593Smuzhiyun       larger chunks. (These lists are just sequential. Keeping them in
1243*4882a593Smuzhiyun       order almost never requires enough traversal to warrant using
1244*4882a593Smuzhiyun       fancier ordered data structures.)  Chunks of the same size are
1245*4882a593Smuzhiyun       linked with the most recently freed at the front, and allocations
1246*4882a593Smuzhiyun       are taken from the back.  This results in LRU or FIFO allocation
1247*4882a593Smuzhiyun       order, which tends to give each chunk an equal opportunity to be
1248*4882a593Smuzhiyun       consolidated with adjacent freed chunks, resulting in larger free
1249*4882a593Smuzhiyun       chunks and less fragmentation.
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun    * `top': The top-most available chunk (i.e., the one bordering the
1252*4882a593Smuzhiyun       end of available memory) is treated specially. It is never
1253*4882a593Smuzhiyun       included in any bin, is used only if no other chunk is
1254*4882a593Smuzhiyun       available, and is released back to the system if it is very
1255*4882a593Smuzhiyun       large (see M_TRIM_THRESHOLD).
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun    * `last_remainder': A bin holding only the remainder of the
1258*4882a593Smuzhiyun       most recently split (non-top) chunk. This bin is checked
1259*4882a593Smuzhiyun       before other non-fitting chunks, so as to provide better
1260*4882a593Smuzhiyun       locality for runs of sequentially allocated chunks.
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun    *  Implicitly, through the host system's memory mapping tables.
1263*4882a593Smuzhiyun       If supported, requests greater than a threshold are usually
1264*4882a593Smuzhiyun       serviced via calls to mmap, and then later released via munmap.
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun*/
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun/*  sizes, alignments */
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
1275*4882a593Smuzhiyun#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
1276*4882a593Smuzhiyun#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
1277*4882a593Smuzhiyun#define MINSIZE                (sizeof(struct malloc_chunk))
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun/* conversion from malloc headers to user pointers, and back */
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1282*4882a593Smuzhiyun#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun/* pad request bytes into a usable size */
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun#define request2size(req) \
1287*4882a593Smuzhiyun (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
1288*4882a593Smuzhiyun  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
1289*4882a593Smuzhiyun   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun/* Check if m has acceptable alignment */
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun/*
1299*4882a593Smuzhiyun  Physical chunk operations
1300*4882a593Smuzhiyun*/
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun#define PREV_INUSE 0x1
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun#define IS_MMAPPED 0x2
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun/* Bits to mask off when extracting size */
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun/* Ptr to next physical malloc_chunk. */
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun/* Ptr to previous physical malloc_chunk */
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun#define prev_chunk(p)\
1323*4882a593Smuzhiyun   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun/* Treat space at ptr + offset as a chunk */
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun/*
1334*4882a593Smuzhiyun  Dealing with use bits
1335*4882a593Smuzhiyun*/
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun/* extract p's inuse bit */
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun#define inuse(p)\
1340*4882a593Smuzhiyun((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun/* extract inuse bit of previous chunk */
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun#define prev_inuse(p)  ((p)->size & PREV_INUSE)
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun/* check for mmap()'ed chunk */
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun/* set/clear chunk as in use without otherwise disturbing */
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun#define set_inuse(p)\
1353*4882a593Smuzhiyun((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun#define clear_inuse(p)\
1356*4882a593Smuzhiyun((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun/* check/set/clear inuse bits in known places */
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun#define inuse_bit_at_offset(p, s)\
1361*4882a593Smuzhiyun (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun#define set_inuse_bit_at_offset(p, s)\
1364*4882a593Smuzhiyun (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun#define clear_inuse_bit_at_offset(p, s)\
1367*4882a593Smuzhiyun (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun/*
1373*4882a593Smuzhiyun  Dealing with size fields
1374*4882a593Smuzhiyun*/
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun/* Get size, ignoring use bits */
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun/* Set size at head, without disturbing its use bit */
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun/* Set size/use ignoring previous bits in header */
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun#define set_head(p, s)        ((p)->size = (s))
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun/* Set size at footer (only when chunk is not in use) */
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun/*
1397*4882a593Smuzhiyun   Bins
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun    The bins, `av_' are an array of pairs of pointers serving as the
1400*4882a593Smuzhiyun    heads of (initially empty) doubly-linked lists of chunks, laid out
1401*4882a593Smuzhiyun    in a way so that each pair can be treated as if it were in a
1402*4882a593Smuzhiyun    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
1403*4882a593Smuzhiyun    and chunks are the same).
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1406*4882a593Smuzhiyun    8 bytes apart. Larger bins are approximately logarithmically
1407*4882a593Smuzhiyun    spaced. (See the table below.) The `av_' array is never mentioned
1408*4882a593Smuzhiyun    directly in the code, but instead via bin access macros.
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun    Bin layout:
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun    64 bins of size       8
1413*4882a593Smuzhiyun    32 bins of size      64
1414*4882a593Smuzhiyun    16 bins of size     512
1415*4882a593Smuzhiyun     8 bins of size    4096
1416*4882a593Smuzhiyun     4 bins of size   32768
1417*4882a593Smuzhiyun     2 bins of size  262144
1418*4882a593Smuzhiyun     1 bin  of size what's left
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun    There is actually a little bit of slop in the numbers in bin_index
1421*4882a593Smuzhiyun    for the sake of speed. This makes no difference elsewhere.
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun    The special chunks `top' and `last_remainder' get their own bins,
1424*4882a593Smuzhiyun    (this is implemented via yet more trickery with the av_ array),
1425*4882a593Smuzhiyun    although `top' is never properly linked to its bin since it is
1426*4882a593Smuzhiyun    always handled specially.
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun*/
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun#define NAV             128   /* number of bins */
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyuntypedef struct malloc_chunk* mbinptr;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun/* access macros */
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
1437*4882a593Smuzhiyun#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
1438*4882a593Smuzhiyun#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun/*
1441*4882a593Smuzhiyun   The first 2 bins are never indexed. The corresponding av_ cells are instead
1442*4882a593Smuzhiyun   used for bookkeeping. This is not to save space, but to simplify
1443*4882a593Smuzhiyun   indexing, maintain locality, and avoid some initialization tests.
1444*4882a593Smuzhiyun*/
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun#define top            (bin_at(0)->fd)   /* The topmost chunk */
1447*4882a593Smuzhiyun#define last_remainder (bin_at(1))       /* remainder from last split */
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun/*
1451*4882a593Smuzhiyun   Because top initially points to its own bin with initial
1452*4882a593Smuzhiyun   zero size, thus forcing extension on the first malloc request,
1453*4882a593Smuzhiyun   we avoid having any special code in malloc to check whether
1454*4882a593Smuzhiyun   it even exists yet. But we still need to in malloc_extend_top.
1455*4882a593Smuzhiyun*/
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun#define initial_top    ((mchunkptr)(bin_at(0)))
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun/* Helper macro to initialize bins */
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun#define IAV(i)  bin_at(i), bin_at(i)
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyunstatic mbinptr av_[NAV * 2 + 2] = {
1464*4882a593Smuzhiyun 0, 0,
1465*4882a593Smuzhiyun IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
1466*4882a593Smuzhiyun IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
1467*4882a593Smuzhiyun IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
1468*4882a593Smuzhiyun IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
1469*4882a593Smuzhiyun IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
1470*4882a593Smuzhiyun IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
1471*4882a593Smuzhiyun IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
1472*4882a593Smuzhiyun IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
1473*4882a593Smuzhiyun IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
1474*4882a593Smuzhiyun IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
1475*4882a593Smuzhiyun IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
1476*4882a593Smuzhiyun IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
1477*4882a593Smuzhiyun IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
1478*4882a593Smuzhiyun IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
1479*4882a593Smuzhiyun IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
1480*4882a593Smuzhiyun IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
1481*4882a593Smuzhiyun};
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun/* field-extraction macros */
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun#define first(b) ((b)->fd)
1488*4882a593Smuzhiyun#define last(b)  ((b)->bk)
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun/*
1491*4882a593Smuzhiyun  Indexing into bins
1492*4882a593Smuzhiyun*/
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun#define bin_index(sz)                                                          \
1495*4882a593Smuzhiyun(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
1496*4882a593Smuzhiyun ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
1497*4882a593Smuzhiyun ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
1498*4882a593Smuzhiyun ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
1499*4882a593Smuzhiyun ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
1500*4882a593Smuzhiyun ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
1501*4882a593Smuzhiyun					  126)
1502*4882a593Smuzhiyun/*
1503*4882a593Smuzhiyun  bins for chunks < 512 are all spaced 8 bytes apart, and hold
1504*4882a593Smuzhiyun  identically sized chunks. This is exploited in malloc.
1505*4882a593Smuzhiyun*/
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun#define MAX_SMALLBIN         63
1508*4882a593Smuzhiyun#define MAX_SMALLBIN_SIZE   512
1509*4882a593Smuzhiyun#define SMALLBIN_WIDTH        8
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun#define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun/*
1514*4882a593Smuzhiyun   Requests are `small' if both the corresponding and the next bin are small
1515*4882a593Smuzhiyun*/
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun/*
1522*4882a593Smuzhiyun    To help compensate for the large number of bins, a one-level index
1523*4882a593Smuzhiyun    structure is used for bin-by-bin searching.  `binblocks' is a
1524*4882a593Smuzhiyun    one-word bitvector recording whether groups of BINBLOCKWIDTH bins
1525*4882a593Smuzhiyun    have any (possibly) non-empty bins, so they can be skipped over
1526*4882a593Smuzhiyun    all at once during during traversals. The bits are NOT always
1527*4882a593Smuzhiyun    cleared as soon as all bins in a block are empty, but instead only
1528*4882a593Smuzhiyun    when all are noticed to be empty during traversal in malloc.
1529*4882a593Smuzhiyun*/
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun#define BINBLOCKWIDTH     4   /* bins per block */
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun/* bin<->block macros */
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun#define idx2binblock(ix)    ((unsigned)1 << (ix / BINBLOCKWIDTH))
1538*4882a593Smuzhiyun#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
1539*4882a593Smuzhiyun#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun/*  Other static bookkeeping data */
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun/* variables holding tunable values */
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyunstatic unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
1550*4882a593Smuzhiyunstatic unsigned long top_pad          = DEFAULT_TOP_PAD;
1551*4882a593Smuzhiyunstatic unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
1552*4882a593Smuzhiyunstatic unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun/* The first value returned from sbrk */
1555*4882a593Smuzhiyunstatic char* sbrk_base = (char*)(-1);
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun/* The maximum memory obtained from system via sbrk */
1558*4882a593Smuzhiyunstatic unsigned long max_sbrked_mem = 0;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun/* The maximum via either sbrk or mmap */
1561*4882a593Smuzhiyunstatic unsigned long max_total_mem = 0;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun/* internal working copy of mallinfo */
1564*4882a593Smuzhiyunstatic struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun/* The total memory obtained from system via sbrk */
1567*4882a593Smuzhiyun#define sbrked_mem  (current_mallinfo.arena)
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun/* Tracking mmaps */
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyunstatic unsigned int n_mmaps = 0;
1572*4882a593Smuzhiyunstatic unsigned int max_n_mmaps = 0;
1573*4882a593Smuzhiyunstatic unsigned long mmapped_mem = 0;
1574*4882a593Smuzhiyunstatic unsigned long max_mmapped_mem = 0;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun/*
1579*4882a593Smuzhiyun  Debugging support
1580*4882a593Smuzhiyun*/
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun#if DEBUG
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun/*
1586*4882a593Smuzhiyun  These routines make a number of assertions about the states
1587*4882a593Smuzhiyun  of data structures that should be true at all times. If any
1588*4882a593Smuzhiyun  are not true, it's very likely that a user program has somehow
1589*4882a593Smuzhiyun  trashed memory. (It's also possible that there is a coding error
1590*4882a593Smuzhiyun  in malloc. In which case, please report it!)
1591*4882a593Smuzhiyun*/
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun#if __STD_C
1594*4882a593Smuzhiyunstatic void do_check_chunk(mchunkptr p)
1595*4882a593Smuzhiyun#else
1596*4882a593Smuzhiyunstatic void do_check_chunk(p) mchunkptr p;
1597*4882a593Smuzhiyun#endif
1598*4882a593Smuzhiyun{
1599*4882a593Smuzhiyun  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun  /* No checkable chunk is mmapped */
1602*4882a593Smuzhiyun  assert(!chunk_is_mmapped(p));
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun  /* Check for legal address ... */
1605*4882a593Smuzhiyun  assert((char*)p >= sbrk_base);
1606*4882a593Smuzhiyun  if (p != top)
1607*4882a593Smuzhiyun    assert((char*)p + sz <= (char*)top);
1608*4882a593Smuzhiyun  else
1609*4882a593Smuzhiyun    assert((char*)p + sz <= sbrk_base + sbrked_mem);
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun}
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun#if __STD_C
1615*4882a593Smuzhiyunstatic void do_check_free_chunk(mchunkptr p)
1616*4882a593Smuzhiyun#else
1617*4882a593Smuzhiyunstatic void do_check_free_chunk(p) mchunkptr p;
1618*4882a593Smuzhiyun#endif
1619*4882a593Smuzhiyun{
1620*4882a593Smuzhiyun  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1621*4882a593Smuzhiyun  mchunkptr next = chunk_at_offset(p, sz);
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun  do_check_chunk(p);
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun  /* Check whether it claims to be free ... */
1626*4882a593Smuzhiyun  assert(!inuse(p));
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun  /* Unless a special marker, must have OK fields */
1629*4882a593Smuzhiyun  if ((long)sz >= (long)MINSIZE)
1630*4882a593Smuzhiyun  {
1631*4882a593Smuzhiyun    assert((sz & MALLOC_ALIGN_MASK) == 0);
1632*4882a593Smuzhiyun    assert(aligned_OK(chunk2mem(p)));
1633*4882a593Smuzhiyun    /* ... matching footer field */
1634*4882a593Smuzhiyun    assert(next->prev_size == sz);
1635*4882a593Smuzhiyun    /* ... and is fully consolidated */
1636*4882a593Smuzhiyun    assert(prev_inuse(p));
1637*4882a593Smuzhiyun    assert (next == top || inuse(next));
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun    /* ... and has minimally sane links */
1640*4882a593Smuzhiyun    assert(p->fd->bk == p);
1641*4882a593Smuzhiyun    assert(p->bk->fd == p);
1642*4882a593Smuzhiyun  }
1643*4882a593Smuzhiyun  else /* markers are always of size SIZE_SZ */
1644*4882a593Smuzhiyun    assert(sz == SIZE_SZ);
1645*4882a593Smuzhiyun}
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun#if __STD_C
1648*4882a593Smuzhiyunstatic void do_check_inuse_chunk(mchunkptr p)
1649*4882a593Smuzhiyun#else
1650*4882a593Smuzhiyunstatic void do_check_inuse_chunk(p) mchunkptr p;
1651*4882a593Smuzhiyun#endif
1652*4882a593Smuzhiyun{
1653*4882a593Smuzhiyun  mchunkptr next = next_chunk(p);
1654*4882a593Smuzhiyun  do_check_chunk(p);
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun  /* Check whether it claims to be in use ... */
1657*4882a593Smuzhiyun  assert(inuse(p));
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun  /* ... and is surrounded by OK chunks.
1660*4882a593Smuzhiyun    Since more things can be checked with free chunks than inuse ones,
1661*4882a593Smuzhiyun    if an inuse chunk borders them and debug is on, it's worth doing them.
1662*4882a593Smuzhiyun  */
1663*4882a593Smuzhiyun  if (!prev_inuse(p))
1664*4882a593Smuzhiyun  {
1665*4882a593Smuzhiyun    mchunkptr prv = prev_chunk(p);
1666*4882a593Smuzhiyun    assert(next_chunk(prv) == p);
1667*4882a593Smuzhiyun    do_check_free_chunk(prv);
1668*4882a593Smuzhiyun  }
1669*4882a593Smuzhiyun  if (next == top)
1670*4882a593Smuzhiyun  {
1671*4882a593Smuzhiyun    assert(prev_inuse(next));
1672*4882a593Smuzhiyun    assert(chunksize(next) >= MINSIZE);
1673*4882a593Smuzhiyun  }
1674*4882a593Smuzhiyun  else if (!inuse(next))
1675*4882a593Smuzhiyun    do_check_free_chunk(next);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun}
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun#if __STD_C
1680*4882a593Smuzhiyunstatic void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
1681*4882a593Smuzhiyun#else
1682*4882a593Smuzhiyunstatic void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
1683*4882a593Smuzhiyun#endif
1684*4882a593Smuzhiyun{
1685*4882a593Smuzhiyun  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
1686*4882a593Smuzhiyun  long room = sz - s;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun  do_check_inuse_chunk(p);
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun  /* Legal size ... */
1691*4882a593Smuzhiyun  assert((long)sz >= (long)MINSIZE);
1692*4882a593Smuzhiyun  assert((sz & MALLOC_ALIGN_MASK) == 0);
1693*4882a593Smuzhiyun  assert(room >= 0);
1694*4882a593Smuzhiyun  assert(room < (long)MINSIZE);
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun  /* ... and alignment */
1697*4882a593Smuzhiyun  assert(aligned_OK(chunk2mem(p)));
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun  /* ... and was allocated at front of an available chunk */
1701*4882a593Smuzhiyun  assert(prev_inuse(p));
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun}
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun#define check_free_chunk(P)  do_check_free_chunk(P)
1707*4882a593Smuzhiyun#define check_inuse_chunk(P) do_check_inuse_chunk(P)
1708*4882a593Smuzhiyun#define check_chunk(P) do_check_chunk(P)
1709*4882a593Smuzhiyun#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
1710*4882a593Smuzhiyun#else
1711*4882a593Smuzhiyun#define check_free_chunk(P)
1712*4882a593Smuzhiyun#define check_inuse_chunk(P)
1713*4882a593Smuzhiyun#define check_chunk(P)
1714*4882a593Smuzhiyun#define check_malloced_chunk(P,N)
1715*4882a593Smuzhiyun#endif
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun/*
1720*4882a593Smuzhiyun  Macro-based internal utilities
1721*4882a593Smuzhiyun*/
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun/*
1725*4882a593Smuzhiyun  Linking chunks in bin lists.
1726*4882a593Smuzhiyun  Call these only with variables, not arbitrary expressions, as arguments.
1727*4882a593Smuzhiyun*/
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun/*
1730*4882a593Smuzhiyun  Place chunk p of size s in its bin, in size order,
1731*4882a593Smuzhiyun  putting it ahead of others of same size.
1732*4882a593Smuzhiyun*/
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun#define frontlink(P, S, IDX, BK, FD)                                          \
1736*4882a593Smuzhiyun{                                                                             \
1737*4882a593Smuzhiyun  if (S < MAX_SMALLBIN_SIZE)                                                  \
1738*4882a593Smuzhiyun  {                                                                           \
1739*4882a593Smuzhiyun    IDX = smallbin_index(S);                                                  \
1740*4882a593Smuzhiyun    mark_binblock(IDX);                                                       \
1741*4882a593Smuzhiyun    BK = bin_at(IDX);                                                         \
1742*4882a593Smuzhiyun    FD = BK->fd;                                                              \
1743*4882a593Smuzhiyun    P->bk = BK;                                                               \
1744*4882a593Smuzhiyun    P->fd = FD;                                                               \
1745*4882a593Smuzhiyun    FD->bk = BK->fd = P;                                                      \
1746*4882a593Smuzhiyun  }                                                                           \
1747*4882a593Smuzhiyun  else                                                                        \
1748*4882a593Smuzhiyun  {                                                                           \
1749*4882a593Smuzhiyun    IDX = bin_index(S);                                                       \
1750*4882a593Smuzhiyun    BK = bin_at(IDX);                                                         \
1751*4882a593Smuzhiyun    FD = BK->fd;                                                              \
1752*4882a593Smuzhiyun    if (FD == BK) mark_binblock(IDX);                                         \
1753*4882a593Smuzhiyun    else                                                                      \
1754*4882a593Smuzhiyun    {                                                                         \
1755*4882a593Smuzhiyun      while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
1756*4882a593Smuzhiyun      BK = FD->bk;                                                            \
1757*4882a593Smuzhiyun    }                                                                         \
1758*4882a593Smuzhiyun    P->bk = BK;                                                               \
1759*4882a593Smuzhiyun    P->fd = FD;                                                               \
1760*4882a593Smuzhiyun    FD->bk = BK->fd = P;                                                      \
1761*4882a593Smuzhiyun  }                                                                           \
1762*4882a593Smuzhiyun}
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun/* take a chunk off a list */
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun#define unlink(P, BK, FD)                                                     \
1768*4882a593Smuzhiyun{                                                                             \
1769*4882a593Smuzhiyun  BK = P->bk;                                                                 \
1770*4882a593Smuzhiyun  FD = P->fd;                                                                 \
1771*4882a593Smuzhiyun  FD->bk = BK;                                                                \
1772*4882a593Smuzhiyun  BK->fd = FD;                                                                \
1773*4882a593Smuzhiyun}                                                                             \
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun/* Place p as the last remainder */
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun#define link_last_remainder(P)                                                \
1778*4882a593Smuzhiyun{                                                                             \
1779*4882a593Smuzhiyun  last_remainder->fd = last_remainder->bk =  P;                               \
1780*4882a593Smuzhiyun  P->fd = P->bk = last_remainder;                                             \
1781*4882a593Smuzhiyun}
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun/* Clear the last_remainder bin */
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun#define clear_last_remainder \
1786*4882a593Smuzhiyun  (last_remainder->fd = last_remainder->bk = last_remainder)
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun/* Routines dealing with mmap(). */
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun#if HAVE_MMAP
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun#if __STD_C
1797*4882a593Smuzhiyunstatic mchunkptr mmap_chunk(size_t size)
1798*4882a593Smuzhiyun#else
1799*4882a593Smuzhiyunstatic mchunkptr mmap_chunk(size) size_t size;
1800*4882a593Smuzhiyun#endif
1801*4882a593Smuzhiyun{
1802*4882a593Smuzhiyun  size_t page_mask = malloc_getpagesize - 1;
1803*4882a593Smuzhiyun  mchunkptr p;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun#ifndef MAP_ANONYMOUS
1806*4882a593Smuzhiyun  static int fd = -1;
1807*4882a593Smuzhiyun#endif
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun  if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun  /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1812*4882a593Smuzhiyun   * there is no following chunk whose prev_size field could be used.
1813*4882a593Smuzhiyun   */
1814*4882a593Smuzhiyun  size = (size + SIZE_SZ + page_mask) & ~page_mask;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun#ifdef MAP_ANONYMOUS
1817*4882a593Smuzhiyun  p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
1818*4882a593Smuzhiyun		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1819*4882a593Smuzhiyun#else /* !MAP_ANONYMOUS */
1820*4882a593Smuzhiyun  if (fd < 0)
1821*4882a593Smuzhiyun  {
1822*4882a593Smuzhiyun    fd = open("/dev/zero", O_RDWR);
1823*4882a593Smuzhiyun    if(fd < 0) return 0;
1824*4882a593Smuzhiyun  }
1825*4882a593Smuzhiyun  p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1826*4882a593Smuzhiyun#endif
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun  if(p == (mchunkptr)-1) return 0;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun  n_mmaps++;
1831*4882a593Smuzhiyun  if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun  /* We demand that eight bytes into a page must be 8-byte aligned. */
1834*4882a593Smuzhiyun  assert(aligned_OK(chunk2mem(p)));
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun  /* The offset to the start of the mmapped region is stored
1837*4882a593Smuzhiyun   * in the prev_size field of the chunk; normally it is zero,
1838*4882a593Smuzhiyun   * but that can be changed in memalign().
1839*4882a593Smuzhiyun   */
1840*4882a593Smuzhiyun  p->prev_size = 0;
1841*4882a593Smuzhiyun  set_head(p, size|IS_MMAPPED);
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun  mmapped_mem += size;
1844*4882a593Smuzhiyun  if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1845*4882a593Smuzhiyun    max_mmapped_mem = mmapped_mem;
1846*4882a593Smuzhiyun  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1847*4882a593Smuzhiyun    max_total_mem = mmapped_mem + sbrked_mem;
1848*4882a593Smuzhiyun  return p;
1849*4882a593Smuzhiyun}
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun#if __STD_C
1852*4882a593Smuzhiyunstatic void munmap_chunk(mchunkptr p)
1853*4882a593Smuzhiyun#else
1854*4882a593Smuzhiyunstatic void munmap_chunk(p) mchunkptr p;
1855*4882a593Smuzhiyun#endif
1856*4882a593Smuzhiyun{
1857*4882a593Smuzhiyun  INTERNAL_SIZE_T size = chunksize(p);
1858*4882a593Smuzhiyun  int ret;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun  assert (chunk_is_mmapped(p));
1861*4882a593Smuzhiyun  assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1862*4882a593Smuzhiyun  assert((n_mmaps > 0));
1863*4882a593Smuzhiyun  assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun  n_mmaps--;
1866*4882a593Smuzhiyun  mmapped_mem -= (size + p->prev_size);
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun  ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun  /* munmap returns non-zero on failure */
1871*4882a593Smuzhiyun  assert(ret == 0);
1872*4882a593Smuzhiyun}
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun#if HAVE_MREMAP
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun#if __STD_C
1877*4882a593Smuzhiyunstatic mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1878*4882a593Smuzhiyun#else
1879*4882a593Smuzhiyunstatic mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1880*4882a593Smuzhiyun#endif
1881*4882a593Smuzhiyun{
1882*4882a593Smuzhiyun  size_t page_mask = malloc_getpagesize - 1;
1883*4882a593Smuzhiyun  INTERNAL_SIZE_T offset = p->prev_size;
1884*4882a593Smuzhiyun  INTERNAL_SIZE_T size = chunksize(p);
1885*4882a593Smuzhiyun  char *cp;
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun  assert (chunk_is_mmapped(p));
1888*4882a593Smuzhiyun  assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1889*4882a593Smuzhiyun  assert((n_mmaps > 0));
1890*4882a593Smuzhiyun  assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun  /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1893*4882a593Smuzhiyun  new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun  cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun  if (cp == (char *)-1) return 0;
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun  p = (mchunkptr)(cp + offset);
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun  assert(aligned_OK(chunk2mem(p)));
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun  assert((p->prev_size == offset));
1904*4882a593Smuzhiyun  set_head(p, (new_size - offset)|IS_MMAPPED);
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun  mmapped_mem -= size + offset;
1907*4882a593Smuzhiyun  mmapped_mem += new_size;
1908*4882a593Smuzhiyun  if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1909*4882a593Smuzhiyun    max_mmapped_mem = mmapped_mem;
1910*4882a593Smuzhiyun  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1911*4882a593Smuzhiyun    max_total_mem = mmapped_mem + sbrked_mem;
1912*4882a593Smuzhiyun  return p;
1913*4882a593Smuzhiyun}
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun#endif /* HAVE_MREMAP */
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun#endif /* HAVE_MMAP */
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun/*
1923*4882a593Smuzhiyun  Extend the top-most chunk by obtaining memory from system.
1924*4882a593Smuzhiyun  Main interface to sbrk (but see also malloc_trim).
1925*4882a593Smuzhiyun*/
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun#if __STD_C
1928*4882a593Smuzhiyunstatic void malloc_extend_top(INTERNAL_SIZE_T nb)
1929*4882a593Smuzhiyun#else
1930*4882a593Smuzhiyunstatic void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1931*4882a593Smuzhiyun#endif
1932*4882a593Smuzhiyun{
1933*4882a593Smuzhiyun  char*     brk;                  /* return value from sbrk */
1934*4882a593Smuzhiyun  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1935*4882a593Smuzhiyun  INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
1936*4882a593Smuzhiyun  char*     new_brk;              /* return of 2nd sbrk call */
1937*4882a593Smuzhiyun  INTERNAL_SIZE_T top_size;       /* new size of top chunk */
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun  mchunkptr old_top     = top;  /* Record state of old top */
1940*4882a593Smuzhiyun  INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1941*4882a593Smuzhiyun  char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun  /* Pad request with top_pad plus minimal overhead */
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun  INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
1946*4882a593Smuzhiyun  unsigned long pagesz    = malloc_getpagesize;
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun  /* If not the first time through, round to preserve page boundary */
1949*4882a593Smuzhiyun  /* Otherwise, we need to correct to a page size below anyway. */
1950*4882a593Smuzhiyun  /* (We also correct below if an intervening foreign sbrk call.) */
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun  if (sbrk_base != (char*)(-1))
1953*4882a593Smuzhiyun    sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun  brk = (char*)(MORECORE (sbrk_size));
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun  /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1958*4882a593Smuzhiyun  if (brk == (char*)(MORECORE_FAILURE) ||
1959*4882a593Smuzhiyun      (brk < old_end && old_top != initial_top))
1960*4882a593Smuzhiyun    return;
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun  sbrked_mem += sbrk_size;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun  if (brk == old_end) /* can just add bytes to current top */
1965*4882a593Smuzhiyun  {
1966*4882a593Smuzhiyun    top_size = sbrk_size + old_top_size;
1967*4882a593Smuzhiyun    set_head(top, top_size | PREV_INUSE);
1968*4882a593Smuzhiyun  }
1969*4882a593Smuzhiyun  else
1970*4882a593Smuzhiyun  {
1971*4882a593Smuzhiyun    if (sbrk_base == (char*)(-1))  /* First time through. Record base */
1972*4882a593Smuzhiyun      sbrk_base = brk;
1973*4882a593Smuzhiyun    else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
1974*4882a593Smuzhiyun      sbrked_mem += brk - (char*)old_end;
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun    /* Guarantee alignment of first new chunk made from this space */
1977*4882a593Smuzhiyun    front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1978*4882a593Smuzhiyun    if (front_misalign > 0)
1979*4882a593Smuzhiyun    {
1980*4882a593Smuzhiyun      correction = (MALLOC_ALIGNMENT) - front_misalign;
1981*4882a593Smuzhiyun      brk += correction;
1982*4882a593Smuzhiyun    }
1983*4882a593Smuzhiyun    else
1984*4882a593Smuzhiyun      correction = 0;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun    /* Guarantee the next brk will be at a page boundary */
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun    correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
1989*4882a593Smuzhiyun		   ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun    /* Allocate correction */
1992*4882a593Smuzhiyun    new_brk = (char*)(MORECORE (correction));
1993*4882a593Smuzhiyun    if (new_brk == (char*)(MORECORE_FAILURE)) return;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun    sbrked_mem += correction;
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun    top = (mchunkptr)brk;
1998*4882a593Smuzhiyun    top_size = new_brk - brk + correction;
1999*4882a593Smuzhiyun    set_head(top, top_size | PREV_INUSE);
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun    if (old_top != initial_top)
2002*4882a593Smuzhiyun    {
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun      /* There must have been an intervening foreign sbrk call. */
2005*4882a593Smuzhiyun      /* A double fencepost is necessary to prevent consolidation */
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun      /* If not enough space to do this, then user did something very wrong */
2008*4882a593Smuzhiyun      if (old_top_size < MINSIZE)
2009*4882a593Smuzhiyun      {
2010*4882a593Smuzhiyun	set_head(top, PREV_INUSE); /* will force null return from malloc */
2011*4882a593Smuzhiyun	return;
2012*4882a593Smuzhiyun      }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun      /* Also keep size a multiple of MALLOC_ALIGNMENT */
2015*4882a593Smuzhiyun      old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2016*4882a593Smuzhiyun      set_head_size(old_top, old_top_size);
2017*4882a593Smuzhiyun      chunk_at_offset(old_top, old_top_size          )->size =
2018*4882a593Smuzhiyun	SIZE_SZ|PREV_INUSE;
2019*4882a593Smuzhiyun      chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
2020*4882a593Smuzhiyun	SIZE_SZ|PREV_INUSE;
2021*4882a593Smuzhiyun      /* If possible, release the rest. */
2022*4882a593Smuzhiyun      if (old_top_size >= MINSIZE)
2023*4882a593Smuzhiyun	fREe(chunk2mem(old_top));
2024*4882a593Smuzhiyun    }
2025*4882a593Smuzhiyun  }
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun  if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
2028*4882a593Smuzhiyun    max_sbrked_mem = sbrked_mem;
2029*4882a593Smuzhiyun  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
2030*4882a593Smuzhiyun    max_total_mem = mmapped_mem + sbrked_mem;
2031*4882a593Smuzhiyun
2032*4882a593Smuzhiyun  /* We always land on a page boundary */
2033*4882a593Smuzhiyun  assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
2034*4882a593Smuzhiyun}
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun/* Main public routines */
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun/*
2043*4882a593Smuzhiyun  Malloc Algorthim:
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun    The requested size is first converted into a usable form, `nb'.
2046*4882a593Smuzhiyun    This currently means to add 4 bytes overhead plus possibly more to
2047*4882a593Smuzhiyun    obtain 8-byte alignment and/or to obtain a size of at least
2048*4882a593Smuzhiyun    MINSIZE (currently 16 bytes), the smallest allocatable size.
2049*4882a593Smuzhiyun    (All fits are considered `exact' if they are within MINSIZE bytes.)
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun    From there, the first successful of the following steps is taken:
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun      1. The bin corresponding to the request size is scanned, and if
2054*4882a593Smuzhiyun	 a chunk of exactly the right size is found, it is taken.
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun      2. The most recently remaindered chunk is used if it is big
2057*4882a593Smuzhiyun	 enough.  This is a form of (roving) first fit, used only in
2058*4882a593Smuzhiyun	 the absence of exact fits. Runs of consecutive requests use
2059*4882a593Smuzhiyun	 the remainder of the chunk used for the previous such request
2060*4882a593Smuzhiyun	 whenever possible. This limited use of a first-fit style
2061*4882a593Smuzhiyun	 allocation strategy tends to give contiguous chunks
2062*4882a593Smuzhiyun	 coextensive lifetimes, which improves locality and can reduce
2063*4882a593Smuzhiyun	 fragmentation in the long run.
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun      3. Other bins are scanned in increasing size order, using a
2066*4882a593Smuzhiyun	 chunk big enough to fulfill the request, and splitting off
2067*4882a593Smuzhiyun	 any remainder.  This search is strictly by best-fit; i.e.,
2068*4882a593Smuzhiyun	 the smallest (with ties going to approximately the least
2069*4882a593Smuzhiyun	 recently used) chunk that fits is selected.
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun      4. If large enough, the chunk bordering the end of memory
2072*4882a593Smuzhiyun	 (`top') is split off. (This use of `top' is in accord with
2073*4882a593Smuzhiyun	 the best-fit search rule.  In effect, `top' is treated as
2074*4882a593Smuzhiyun	 larger (and thus less well fitting) than any other available
2075*4882a593Smuzhiyun	 chunk since it can be extended to be as large as necessary
2076*4882a593Smuzhiyun	 (up to system limitations).
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun      5. If the request size meets the mmap threshold and the
2079*4882a593Smuzhiyun	 system supports mmap, and there are few enough currently
2080*4882a593Smuzhiyun	 allocated mmapped regions, and a call to mmap succeeds,
2081*4882a593Smuzhiyun	 the request is allocated via direct memory mapping.
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun      6. Otherwise, the top of memory is extended by
2084*4882a593Smuzhiyun	 obtaining more space from the system (normally using sbrk,
2085*4882a593Smuzhiyun	 but definable to anything else via the MORECORE macro).
2086*4882a593Smuzhiyun	 Memory is gathered from the system (in system page-sized
2087*4882a593Smuzhiyun	 units) in a way that allows chunks obtained across different
2088*4882a593Smuzhiyun	 sbrk calls to be consolidated, but does not require
2089*4882a593Smuzhiyun	 contiguous memory. Thus, it should be safe to intersperse
2090*4882a593Smuzhiyun	 mallocs with other sbrk calls.
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun      All allocations are made from the the `lowest' part of any found
2094*4882a593Smuzhiyun      chunk. (The implementation invariant is that prev_inuse is
2095*4882a593Smuzhiyun      always true of any allocated chunk; i.e., that each allocated
2096*4882a593Smuzhiyun      chunk borders either a previously allocated and still in-use chunk,
2097*4882a593Smuzhiyun      or the base of its memory arena.)
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun*/
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun#if __STD_C
2102*4882a593SmuzhiyunVoid_t* mALLOc(size_t bytes)
2103*4882a593Smuzhiyun#else
2104*4882a593SmuzhiyunVoid_t* mALLOc(bytes) size_t bytes;
2105*4882a593Smuzhiyun#endif
2106*4882a593Smuzhiyun{
2107*4882a593Smuzhiyun  mchunkptr victim;                  /* inspected/selected chunk */
2108*4882a593Smuzhiyun  INTERNAL_SIZE_T victim_size;       /* its size */
2109*4882a593Smuzhiyun  int       idx;                     /* index for bin traversal */
2110*4882a593Smuzhiyun  mbinptr   bin;                     /* associated bin */
2111*4882a593Smuzhiyun  mchunkptr remainder;               /* remainder from a split */
2112*4882a593Smuzhiyun  long      remainder_size;          /* its size */
2113*4882a593Smuzhiyun  int       remainder_index;         /* its bin index */
2114*4882a593Smuzhiyun  unsigned long block;               /* block traverser bit */
2115*4882a593Smuzhiyun  int       startidx;                /* first bin of a traversed block */
2116*4882a593Smuzhiyun  mchunkptr fwd;                     /* misc temp for linking */
2117*4882a593Smuzhiyun  mchunkptr bck;                     /* misc temp for linking */
2118*4882a593Smuzhiyun  mbinptr q;                         /* misc temp */
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun  INTERNAL_SIZE_T nb;
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun  if ((long)bytes < 0) return 0;
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun  nb = request2size(bytes);  /* padded request size; */
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun  /* Check for exact match in a bin */
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun  if (is_small_request(nb))  /* Faster version for small requests */
2129*4882a593Smuzhiyun  {
2130*4882a593Smuzhiyun    idx = smallbin_index(nb);
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun    /* No traversal or size check necessary for small bins.  */
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun    q = bin_at(idx);
2135*4882a593Smuzhiyun    victim = last(q);
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun    /* Also scan the next one, since it would have a remainder < MINSIZE */
2138*4882a593Smuzhiyun    if (victim == q)
2139*4882a593Smuzhiyun    {
2140*4882a593Smuzhiyun      q = next_bin(q);
2141*4882a593Smuzhiyun      victim = last(q);
2142*4882a593Smuzhiyun    }
2143*4882a593Smuzhiyun    if (victim != q)
2144*4882a593Smuzhiyun    {
2145*4882a593Smuzhiyun      victim_size = chunksize(victim);
2146*4882a593Smuzhiyun      unlink(victim, bck, fwd);
2147*4882a593Smuzhiyun      set_inuse_bit_at_offset(victim, victim_size);
2148*4882a593Smuzhiyun      check_malloced_chunk(victim, nb);
2149*4882a593Smuzhiyun      return chunk2mem(victim);
2150*4882a593Smuzhiyun    }
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun    idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
2153*4882a593Smuzhiyun
2154*4882a593Smuzhiyun  }
2155*4882a593Smuzhiyun  else
2156*4882a593Smuzhiyun  {
2157*4882a593Smuzhiyun    idx = bin_index(nb);
2158*4882a593Smuzhiyun    bin = bin_at(idx);
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun    for (victim = last(bin); victim != bin; victim = victim->bk)
2161*4882a593Smuzhiyun    {
2162*4882a593Smuzhiyun      victim_size = chunksize(victim);
2163*4882a593Smuzhiyun      remainder_size = victim_size - nb;
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun      if (remainder_size >= (long)MINSIZE) /* too big */
2166*4882a593Smuzhiyun      {
2167*4882a593Smuzhiyun	--idx; /* adjust to rescan below after checking last remainder */
2168*4882a593Smuzhiyun	break;
2169*4882a593Smuzhiyun      }
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun      else if (remainder_size >= 0) /* exact fit */
2172*4882a593Smuzhiyun      {
2173*4882a593Smuzhiyun	unlink(victim, bck, fwd);
2174*4882a593Smuzhiyun	set_inuse_bit_at_offset(victim, victim_size);
2175*4882a593Smuzhiyun	check_malloced_chunk(victim, nb);
2176*4882a593Smuzhiyun	return chunk2mem(victim);
2177*4882a593Smuzhiyun      }
2178*4882a593Smuzhiyun    }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun    ++idx;
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun  }
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun  /* Try to use the last split-off remainder */
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun  if ( (victim = last_remainder->fd) != last_remainder)
2187*4882a593Smuzhiyun  {
2188*4882a593Smuzhiyun    victim_size = chunksize(victim);
2189*4882a593Smuzhiyun    remainder_size = victim_size - nb;
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun    if (remainder_size >= (long)MINSIZE) /* re-split */
2192*4882a593Smuzhiyun    {
2193*4882a593Smuzhiyun      remainder = chunk_at_offset(victim, nb);
2194*4882a593Smuzhiyun      set_head(victim, nb | PREV_INUSE);
2195*4882a593Smuzhiyun      link_last_remainder(remainder);
2196*4882a593Smuzhiyun      set_head(remainder, remainder_size | PREV_INUSE);
2197*4882a593Smuzhiyun      set_foot(remainder, remainder_size);
2198*4882a593Smuzhiyun      check_malloced_chunk(victim, nb);
2199*4882a593Smuzhiyun      return chunk2mem(victim);
2200*4882a593Smuzhiyun    }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun    clear_last_remainder;
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun    if (remainder_size >= 0)  /* exhaust */
2205*4882a593Smuzhiyun    {
2206*4882a593Smuzhiyun      set_inuse_bit_at_offset(victim, victim_size);
2207*4882a593Smuzhiyun      check_malloced_chunk(victim, nb);
2208*4882a593Smuzhiyun      return chunk2mem(victim);
2209*4882a593Smuzhiyun    }
2210*4882a593Smuzhiyun
2211*4882a593Smuzhiyun    /* Else place in bin */
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun    frontlink(victim, victim_size, remainder_index, bck, fwd);
2214*4882a593Smuzhiyun  }
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun  /*
2217*4882a593Smuzhiyun     If there are any possibly nonempty big-enough blocks,
2218*4882a593Smuzhiyun     search for best fitting chunk by scanning bins in blockwidth units.
2219*4882a593Smuzhiyun  */
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun  if ( (block = idx2binblock(idx)) <= binblocks)
2222*4882a593Smuzhiyun  {
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun    /* Get to the first marked block */
2225*4882a593Smuzhiyun
2226*4882a593Smuzhiyun    if ( (block & binblocks) == 0)
2227*4882a593Smuzhiyun    {
2228*4882a593Smuzhiyun      /* force to an even block boundary */
2229*4882a593Smuzhiyun      idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
2230*4882a593Smuzhiyun      block <<= 1;
2231*4882a593Smuzhiyun      while ((block & binblocks) == 0)
2232*4882a593Smuzhiyun      {
2233*4882a593Smuzhiyun	idx += BINBLOCKWIDTH;
2234*4882a593Smuzhiyun	block <<= 1;
2235*4882a593Smuzhiyun      }
2236*4882a593Smuzhiyun    }
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun    /* For each possibly nonempty block ... */
2239*4882a593Smuzhiyun    for (;;)
2240*4882a593Smuzhiyun    {
2241*4882a593Smuzhiyun      startidx = idx;          /* (track incomplete blocks) */
2242*4882a593Smuzhiyun      q = bin = bin_at(idx);
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun      /* For each bin in this block ... */
2245*4882a593Smuzhiyun      do
2246*4882a593Smuzhiyun      {
2247*4882a593Smuzhiyun	/* Find and use first big enough chunk ... */
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun	for (victim = last(bin); victim != bin; victim = victim->bk)
2250*4882a593Smuzhiyun	{
2251*4882a593Smuzhiyun	  victim_size = chunksize(victim);
2252*4882a593Smuzhiyun	  remainder_size = victim_size - nb;
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun	  if (remainder_size >= (long)MINSIZE) /* split */
2255*4882a593Smuzhiyun	  {
2256*4882a593Smuzhiyun	    remainder = chunk_at_offset(victim, nb);
2257*4882a593Smuzhiyun	    set_head(victim, nb | PREV_INUSE);
2258*4882a593Smuzhiyun	    unlink(victim, bck, fwd);
2259*4882a593Smuzhiyun	    link_last_remainder(remainder);
2260*4882a593Smuzhiyun	    set_head(remainder, remainder_size | PREV_INUSE);
2261*4882a593Smuzhiyun	    set_foot(remainder, remainder_size);
2262*4882a593Smuzhiyun	    check_malloced_chunk(victim, nb);
2263*4882a593Smuzhiyun	    return chunk2mem(victim);
2264*4882a593Smuzhiyun	  }
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun	  else if (remainder_size >= 0)  /* take */
2267*4882a593Smuzhiyun	  {
2268*4882a593Smuzhiyun	    set_inuse_bit_at_offset(victim, victim_size);
2269*4882a593Smuzhiyun	    unlink(victim, bck, fwd);
2270*4882a593Smuzhiyun	    check_malloced_chunk(victim, nb);
2271*4882a593Smuzhiyun	    return chunk2mem(victim);
2272*4882a593Smuzhiyun	  }
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun	}
2275*4882a593Smuzhiyun
2276*4882a593Smuzhiyun       bin = next_bin(bin);
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun      /* Clear out the block bit. */
2281*4882a593Smuzhiyun
2282*4882a593Smuzhiyun      do   /* Possibly backtrack to try to clear a partial block */
2283*4882a593Smuzhiyun      {
2284*4882a593Smuzhiyun	if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
2285*4882a593Smuzhiyun	{
2286*4882a593Smuzhiyun	  binblocks &= ~block;
2287*4882a593Smuzhiyun	  break;
2288*4882a593Smuzhiyun	}
2289*4882a593Smuzhiyun	--startidx;
2290*4882a593Smuzhiyun       q = prev_bin(q);
2291*4882a593Smuzhiyun      } while (first(q) == q);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun      /* Get to the next possibly nonempty block */
2294*4882a593Smuzhiyun
2295*4882a593Smuzhiyun      if ( (block <<= 1) <= binblocks && (block != 0) )
2296*4882a593Smuzhiyun      {
2297*4882a593Smuzhiyun	while ((block & binblocks) == 0)
2298*4882a593Smuzhiyun	{
2299*4882a593Smuzhiyun	  idx += BINBLOCKWIDTH;
2300*4882a593Smuzhiyun	  block <<= 1;
2301*4882a593Smuzhiyun	}
2302*4882a593Smuzhiyun      }
2303*4882a593Smuzhiyun      else
2304*4882a593Smuzhiyun	break;
2305*4882a593Smuzhiyun    }
2306*4882a593Smuzhiyun  }
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun  /* Try to use top chunk */
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun  /* Require that there be a remainder, ensuring top always exists  */
2312*4882a593Smuzhiyun  if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
2313*4882a593Smuzhiyun  {
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun#if HAVE_MMAP
2316*4882a593Smuzhiyun    /* If big and would otherwise need to extend, try to use mmap instead */
2317*4882a593Smuzhiyun    if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
2318*4882a593Smuzhiyun	(victim = mmap_chunk(nb)) != 0)
2319*4882a593Smuzhiyun      return chunk2mem(victim);
2320*4882a593Smuzhiyun#endif
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun    /* Try to extend */
2323*4882a593Smuzhiyun    malloc_extend_top(nb);
2324*4882a593Smuzhiyun    if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
2325*4882a593Smuzhiyun      return 0; /* propagate failure */
2326*4882a593Smuzhiyun  }
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun  victim = top;
2329*4882a593Smuzhiyun  set_head(victim, nb | PREV_INUSE);
2330*4882a593Smuzhiyun  top = chunk_at_offset(victim, nb);
2331*4882a593Smuzhiyun  set_head(top, remainder_size | PREV_INUSE);
2332*4882a593Smuzhiyun  check_malloced_chunk(victim, nb);
2333*4882a593Smuzhiyun  return chunk2mem(victim);
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun}
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun/*
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun  free() algorithm :
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun    cases:
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun       1. free(0) has no effect.
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun       2. If the chunk was allocated via mmap, it is release via munmap().
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun       3. If a returned chunk borders the current high end of memory,
2351*4882a593Smuzhiyun	  it is consolidated into the top, and if the total unused
2352*4882a593Smuzhiyun	  topmost memory exceeds the trim threshold, malloc_trim is
2353*4882a593Smuzhiyun	  called.
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun       4. Other chunks are consolidated as they arrive, and
2356*4882a593Smuzhiyun	  placed in corresponding bins. (This includes the case of
2357*4882a593Smuzhiyun	  consolidating with the current `last_remainder').
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun*/
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun#if __STD_C
2363*4882a593Smuzhiyunvoid fREe(Void_t* mem)
2364*4882a593Smuzhiyun#else
2365*4882a593Smuzhiyunvoid fREe(mem) Void_t* mem;
2366*4882a593Smuzhiyun#endif
2367*4882a593Smuzhiyun{
2368*4882a593Smuzhiyun  mchunkptr p;         /* chunk corresponding to mem */
2369*4882a593Smuzhiyun  INTERNAL_SIZE_T hd;  /* its head field */
2370*4882a593Smuzhiyun  INTERNAL_SIZE_T sz;  /* its size */
2371*4882a593Smuzhiyun  int       idx;       /* its bin index */
2372*4882a593Smuzhiyun  mchunkptr next;      /* next contiguous chunk */
2373*4882a593Smuzhiyun  INTERNAL_SIZE_T nextsz; /* its size */
2374*4882a593Smuzhiyun  INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
2375*4882a593Smuzhiyun  mchunkptr bck;       /* misc temp for linking */
2376*4882a593Smuzhiyun  mchunkptr fwd;       /* misc temp for linking */
2377*4882a593Smuzhiyun  int       islr;      /* track whether merging with last_remainder */
2378*4882a593Smuzhiyun
2379*4882a593Smuzhiyun  if (mem == 0)                              /* free(0) has no effect */
2380*4882a593Smuzhiyun    return;
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun  p = mem2chunk(mem);
2383*4882a593Smuzhiyun  hd = p->size;
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun#if HAVE_MMAP
2386*4882a593Smuzhiyun  if (hd & IS_MMAPPED)                       /* release mmapped memory. */
2387*4882a593Smuzhiyun  {
2388*4882a593Smuzhiyun    munmap_chunk(p);
2389*4882a593Smuzhiyun    return;
2390*4882a593Smuzhiyun  }
2391*4882a593Smuzhiyun#endif
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun  check_inuse_chunk(p);
2394*4882a593Smuzhiyun
2395*4882a593Smuzhiyun  sz = hd & ~PREV_INUSE;
2396*4882a593Smuzhiyun  next = chunk_at_offset(p, sz);
2397*4882a593Smuzhiyun  nextsz = chunksize(next);
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun  if (next == top)                            /* merge with top */
2400*4882a593Smuzhiyun  {
2401*4882a593Smuzhiyun    sz += nextsz;
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun    if (!(hd & PREV_INUSE))                    /* consolidate backward */
2404*4882a593Smuzhiyun    {
2405*4882a593Smuzhiyun      prevsz = p->prev_size;
2406*4882a593Smuzhiyun      p = chunk_at_offset(p, -((long) prevsz));
2407*4882a593Smuzhiyun      sz += prevsz;
2408*4882a593Smuzhiyun      unlink(p, bck, fwd);
2409*4882a593Smuzhiyun    }
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun    set_head(p, sz | PREV_INUSE);
2412*4882a593Smuzhiyun    top = p;
2413*4882a593Smuzhiyun    if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
2414*4882a593Smuzhiyun      malloc_trim(top_pad);
2415*4882a593Smuzhiyun    return;
2416*4882a593Smuzhiyun  }
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun  set_head(next, nextsz);                    /* clear inuse bit */
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun  islr = 0;
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun  if (!(hd & PREV_INUSE))                    /* consolidate backward */
2423*4882a593Smuzhiyun  {
2424*4882a593Smuzhiyun    prevsz = p->prev_size;
2425*4882a593Smuzhiyun    p = chunk_at_offset(p, -((long) prevsz));
2426*4882a593Smuzhiyun    sz += prevsz;
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun    if (p->fd == last_remainder)             /* keep as last_remainder */
2429*4882a593Smuzhiyun      islr = 1;
2430*4882a593Smuzhiyun    else
2431*4882a593Smuzhiyun      unlink(p, bck, fwd);
2432*4882a593Smuzhiyun  }
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun  if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
2435*4882a593Smuzhiyun  {
2436*4882a593Smuzhiyun    sz += nextsz;
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun    if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
2439*4882a593Smuzhiyun    {
2440*4882a593Smuzhiyun      islr = 1;
2441*4882a593Smuzhiyun      link_last_remainder(p);
2442*4882a593Smuzhiyun    }
2443*4882a593Smuzhiyun    else
2444*4882a593Smuzhiyun      unlink(next, bck, fwd);
2445*4882a593Smuzhiyun  }
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun  set_head(p, sz | PREV_INUSE);
2449*4882a593Smuzhiyun  set_foot(p, sz);
2450*4882a593Smuzhiyun  if (!islr)
2451*4882a593Smuzhiyun    frontlink(p, sz, idx, bck, fwd);
2452*4882a593Smuzhiyun}
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun/*
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun  Realloc algorithm:
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun    Chunks that were obtained via mmap cannot be extended or shrunk
2463*4882a593Smuzhiyun    unless HAVE_MREMAP is defined, in which case mremap is used.
2464*4882a593Smuzhiyun    Otherwise, if their reallocation is for additional space, they are
2465*4882a593Smuzhiyun    copied.  If for less, they are just left alone.
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun    Otherwise, if the reallocation is for additional space, and the
2468*4882a593Smuzhiyun    chunk can be extended, it is, else a malloc-copy-free sequence is
2469*4882a593Smuzhiyun    taken.  There are several different ways that a chunk could be
2470*4882a593Smuzhiyun    extended. All are tried:
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun       * Extending forward into following adjacent free chunk.
2473*4882a593Smuzhiyun       * Shifting backwards, joining preceding adjacent space
2474*4882a593Smuzhiyun       * Both shifting backwards and extending forward.
2475*4882a593Smuzhiyun       * Extending into newly sbrked space
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun    Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
2478*4882a593Smuzhiyun    size argument of zero (re)allocates a minimum-sized chunk.
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun    If the reallocation is for less space, and the new request is for
2481*4882a593Smuzhiyun    a `small' (<512 bytes) size, then the newly unused space is lopped
2482*4882a593Smuzhiyun    off and freed.
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun    The old unix realloc convention of allowing the last-free'd chunk
2485*4882a593Smuzhiyun    to be used as an argument to realloc is no longer supported.
2486*4882a593Smuzhiyun    I don't know of any programs still relying on this feature,
2487*4882a593Smuzhiyun    and allowing it would also allow too many other incorrect
2488*4882a593Smuzhiyun    usages of realloc to be sensible.
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun*/
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun#if __STD_C
2495*4882a593SmuzhiyunVoid_t* rEALLOc(Void_t* oldmem, size_t bytes)
2496*4882a593Smuzhiyun#else
2497*4882a593SmuzhiyunVoid_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
2498*4882a593Smuzhiyun#endif
2499*4882a593Smuzhiyun{
2500*4882a593Smuzhiyun  INTERNAL_SIZE_T    nb;      /* padded request size */
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun  mchunkptr oldp;             /* chunk corresponding to oldmem */
2503*4882a593Smuzhiyun  INTERNAL_SIZE_T    oldsize; /* its size */
2504*4882a593Smuzhiyun
2505*4882a593Smuzhiyun  mchunkptr newp;             /* chunk to return */
2506*4882a593Smuzhiyun  INTERNAL_SIZE_T    newsize; /* its size */
2507*4882a593Smuzhiyun  Void_t*   newmem;           /* corresponding user mem */
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun  mchunkptr next;             /* next contiguous chunk after oldp */
2510*4882a593Smuzhiyun  INTERNAL_SIZE_T  nextsize;  /* its size */
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun  mchunkptr prev;             /* previous contiguous chunk before oldp */
2513*4882a593Smuzhiyun  INTERNAL_SIZE_T  prevsize;  /* its size */
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun  mchunkptr remainder;        /* holds split off extra space from newp */
2516*4882a593Smuzhiyun  INTERNAL_SIZE_T  remainder_size;   /* its size */
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun  mchunkptr bck;              /* misc temp for linking */
2519*4882a593Smuzhiyun  mchunkptr fwd;              /* misc temp for linking */
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun#ifdef REALLOC_ZERO_BYTES_FREES
2522*4882a593Smuzhiyun  if (bytes == 0) { fREe(oldmem); return 0; }
2523*4882a593Smuzhiyun#endif
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun  if ((long)bytes < 0) return 0;
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun  /* realloc of null is supposed to be same as malloc */
2528*4882a593Smuzhiyun  if (oldmem == 0) return mALLOc(bytes);
2529*4882a593Smuzhiyun
2530*4882a593Smuzhiyun  newp    = oldp    = mem2chunk(oldmem);
2531*4882a593Smuzhiyun  newsize = oldsize = chunksize(oldp);
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun  nb = request2size(bytes);
2535*4882a593Smuzhiyun
2536*4882a593Smuzhiyun#if HAVE_MMAP
2537*4882a593Smuzhiyun  if (chunk_is_mmapped(oldp))
2538*4882a593Smuzhiyun  {
2539*4882a593Smuzhiyun#if HAVE_MREMAP
2540*4882a593Smuzhiyun    newp = mremap_chunk(oldp, nb);
2541*4882a593Smuzhiyun    if(newp) return chunk2mem(newp);
2542*4882a593Smuzhiyun#endif
2543*4882a593Smuzhiyun    /* Note the extra SIZE_SZ overhead. */
2544*4882a593Smuzhiyun    if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
2545*4882a593Smuzhiyun    /* Must alloc, copy, free. */
2546*4882a593Smuzhiyun    newmem = mALLOc(bytes);
2547*4882a593Smuzhiyun    if (newmem == 0) return 0; /* propagate failure */
2548*4882a593Smuzhiyun    MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
2549*4882a593Smuzhiyun    munmap_chunk(oldp);
2550*4882a593Smuzhiyun    return newmem;
2551*4882a593Smuzhiyun  }
2552*4882a593Smuzhiyun#endif
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun  check_inuse_chunk(oldp);
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun  if ((long)(oldsize) < (long)(nb))
2557*4882a593Smuzhiyun  {
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun    /* Try expanding forward */
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun    next = chunk_at_offset(oldp, oldsize);
2562*4882a593Smuzhiyun    if (next == top || !inuse(next))
2563*4882a593Smuzhiyun    {
2564*4882a593Smuzhiyun      nextsize = chunksize(next);
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun      /* Forward into top only if a remainder */
2567*4882a593Smuzhiyun      if (next == top)
2568*4882a593Smuzhiyun      {
2569*4882a593Smuzhiyun	if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
2570*4882a593Smuzhiyun	{
2571*4882a593Smuzhiyun	  newsize += nextsize;
2572*4882a593Smuzhiyun	  top = chunk_at_offset(oldp, nb);
2573*4882a593Smuzhiyun	  set_head(top, (newsize - nb) | PREV_INUSE);
2574*4882a593Smuzhiyun	  set_head_size(oldp, nb);
2575*4882a593Smuzhiyun	  return chunk2mem(oldp);
2576*4882a593Smuzhiyun	}
2577*4882a593Smuzhiyun      }
2578*4882a593Smuzhiyun
2579*4882a593Smuzhiyun      /* Forward into next chunk */
2580*4882a593Smuzhiyun      else if (((long)(nextsize + newsize) >= (long)(nb)))
2581*4882a593Smuzhiyun      {
2582*4882a593Smuzhiyun	unlink(next, bck, fwd);
2583*4882a593Smuzhiyun	newsize  += nextsize;
2584*4882a593Smuzhiyun	goto split;
2585*4882a593Smuzhiyun      }
2586*4882a593Smuzhiyun    }
2587*4882a593Smuzhiyun    else
2588*4882a593Smuzhiyun    {
2589*4882a593Smuzhiyun      next = 0;
2590*4882a593Smuzhiyun      nextsize = 0;
2591*4882a593Smuzhiyun    }
2592*4882a593Smuzhiyun
2593*4882a593Smuzhiyun    /* Try shifting backwards. */
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun    if (!prev_inuse(oldp))
2596*4882a593Smuzhiyun    {
2597*4882a593Smuzhiyun      prev = prev_chunk(oldp);
2598*4882a593Smuzhiyun      prevsize = chunksize(prev);
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun      /* try forward + backward first to save a later consolidation */
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun      if (next != 0)
2603*4882a593Smuzhiyun      {
2604*4882a593Smuzhiyun	/* into top */
2605*4882a593Smuzhiyun	if (next == top)
2606*4882a593Smuzhiyun	{
2607*4882a593Smuzhiyun	  if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
2608*4882a593Smuzhiyun	  {
2609*4882a593Smuzhiyun	    unlink(prev, bck, fwd);
2610*4882a593Smuzhiyun	    newp = prev;
2611*4882a593Smuzhiyun	    newsize += prevsize + nextsize;
2612*4882a593Smuzhiyun	    newmem = chunk2mem(newp);
2613*4882a593Smuzhiyun	    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2614*4882a593Smuzhiyun	    top = chunk_at_offset(newp, nb);
2615*4882a593Smuzhiyun	    set_head(top, (newsize - nb) | PREV_INUSE);
2616*4882a593Smuzhiyun	    set_head_size(newp, nb);
2617*4882a593Smuzhiyun	    return newmem;
2618*4882a593Smuzhiyun	  }
2619*4882a593Smuzhiyun	}
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun	/* into next chunk */
2622*4882a593Smuzhiyun	else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
2623*4882a593Smuzhiyun	{
2624*4882a593Smuzhiyun	  unlink(next, bck, fwd);
2625*4882a593Smuzhiyun	  unlink(prev, bck, fwd);
2626*4882a593Smuzhiyun	  newp = prev;
2627*4882a593Smuzhiyun	  newsize += nextsize + prevsize;
2628*4882a593Smuzhiyun	  newmem = chunk2mem(newp);
2629*4882a593Smuzhiyun	  MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2630*4882a593Smuzhiyun	  goto split;
2631*4882a593Smuzhiyun	}
2632*4882a593Smuzhiyun      }
2633*4882a593Smuzhiyun
2634*4882a593Smuzhiyun      /* backward only */
2635*4882a593Smuzhiyun      if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
2636*4882a593Smuzhiyun      {
2637*4882a593Smuzhiyun	unlink(prev, bck, fwd);
2638*4882a593Smuzhiyun	newp = prev;
2639*4882a593Smuzhiyun	newsize += prevsize;
2640*4882a593Smuzhiyun	newmem = chunk2mem(newp);
2641*4882a593Smuzhiyun	MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2642*4882a593Smuzhiyun	goto split;
2643*4882a593Smuzhiyun      }
2644*4882a593Smuzhiyun    }
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun    /* Must allocate */
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun    newmem = mALLOc (bytes);
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun    if (newmem == 0)  /* propagate failure */
2651*4882a593Smuzhiyun      return 0;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun    /* Avoid copy if newp is next chunk after oldp. */
2654*4882a593Smuzhiyun    /* (This can only happen when new chunk is sbrk'ed.) */
2655*4882a593Smuzhiyun
2656*4882a593Smuzhiyun    if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
2657*4882a593Smuzhiyun    {
2658*4882a593Smuzhiyun      newsize += chunksize(newp);
2659*4882a593Smuzhiyun      newp = oldp;
2660*4882a593Smuzhiyun      goto split;
2661*4882a593Smuzhiyun    }
2662*4882a593Smuzhiyun
2663*4882a593Smuzhiyun    /* Otherwise copy, free, and exit */
2664*4882a593Smuzhiyun    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
2665*4882a593Smuzhiyun    fREe(oldmem);
2666*4882a593Smuzhiyun    return newmem;
2667*4882a593Smuzhiyun  }
2668*4882a593Smuzhiyun
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun split:  /* split off extra room in old or expanded chunk */
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun  if (newsize - nb >= MINSIZE) /* split off remainder */
2673*4882a593Smuzhiyun  {
2674*4882a593Smuzhiyun    remainder = chunk_at_offset(newp, nb);
2675*4882a593Smuzhiyun    remainder_size = newsize - nb;
2676*4882a593Smuzhiyun    set_head_size(newp, nb);
2677*4882a593Smuzhiyun    set_head(remainder, remainder_size | PREV_INUSE);
2678*4882a593Smuzhiyun    set_inuse_bit_at_offset(remainder, remainder_size);
2679*4882a593Smuzhiyun    fREe(chunk2mem(remainder)); /* let free() deal with it */
2680*4882a593Smuzhiyun  }
2681*4882a593Smuzhiyun  else
2682*4882a593Smuzhiyun  {
2683*4882a593Smuzhiyun    set_head_size(newp, newsize);
2684*4882a593Smuzhiyun    set_inuse_bit_at_offset(newp, newsize);
2685*4882a593Smuzhiyun  }
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun  check_inuse_chunk(newp);
2688*4882a593Smuzhiyun  return chunk2mem(newp);
2689*4882a593Smuzhiyun}
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun/*
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun  memalign algorithm:
2697*4882a593Smuzhiyun
2698*4882a593Smuzhiyun    memalign requests more than enough space from malloc, finds a spot
2699*4882a593Smuzhiyun    within that chunk that meets the alignment request, and then
2700*4882a593Smuzhiyun    possibly frees the leading and trailing space.
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun    The alignment argument must be a power of two. This property is not
2703*4882a593Smuzhiyun    checked by memalign, so misuse may result in random runtime errors.
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun    8-byte alignment is guaranteed by normal malloc calls, so don't
2706*4882a593Smuzhiyun    bother calling memalign with an argument of 8 or less.
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun    Overreliance on memalign is a sure way to fragment space.
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun*/
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun
2713*4882a593Smuzhiyun#if __STD_C
2714*4882a593SmuzhiyunVoid_t* mEMALIGn(size_t alignment, size_t bytes)
2715*4882a593Smuzhiyun#else
2716*4882a593SmuzhiyunVoid_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
2717*4882a593Smuzhiyun#endif
2718*4882a593Smuzhiyun{
2719*4882a593Smuzhiyun  INTERNAL_SIZE_T    nb;      /* padded  request size */
2720*4882a593Smuzhiyun  char*     m;                /* memory returned by malloc call */
2721*4882a593Smuzhiyun  mchunkptr p;                /* corresponding chunk */
2722*4882a593Smuzhiyun  char*     brk;              /* alignment point within p */
2723*4882a593Smuzhiyun  mchunkptr newp;             /* chunk to return */
2724*4882a593Smuzhiyun  INTERNAL_SIZE_T  newsize;   /* its size */
2725*4882a593Smuzhiyun  INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
2726*4882a593Smuzhiyun  mchunkptr remainder;        /* spare room at end to split off */
2727*4882a593Smuzhiyun  long      remainder_size;   /* its size */
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun  if ((long)bytes < 0) return 0;
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun  /* If need less alignment than we give anyway, just relay to malloc */
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun  if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
2734*4882a593Smuzhiyun
2735*4882a593Smuzhiyun  /* Otherwise, ensure that it is at least a minimum chunk size */
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun  if (alignment <  MINSIZE) alignment = MINSIZE;
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun  /* Call malloc with worst case padding to hit alignment. */
2740*4882a593Smuzhiyun
2741*4882a593Smuzhiyun  nb = request2size(bytes);
2742*4882a593Smuzhiyun  m  = (char*)(mALLOc(nb + alignment + MINSIZE));
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun  if (m == 0) return 0; /* propagate failure */
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun  p = mem2chunk(m);
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun  if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
2749*4882a593Smuzhiyun  {
2750*4882a593Smuzhiyun#if HAVE_MMAP
2751*4882a593Smuzhiyun    if(chunk_is_mmapped(p))
2752*4882a593Smuzhiyun      return chunk2mem(p); /* nothing more to do */
2753*4882a593Smuzhiyun#endif
2754*4882a593Smuzhiyun  }
2755*4882a593Smuzhiyun  else /* misaligned */
2756*4882a593Smuzhiyun  {
2757*4882a593Smuzhiyun    /*
2758*4882a593Smuzhiyun      Find an aligned spot inside chunk.
2759*4882a593Smuzhiyun      Since we need to give back leading space in a chunk of at
2760*4882a593Smuzhiyun      least MINSIZE, if the first calculation places us at
2761*4882a593Smuzhiyun      a spot with less than MINSIZE leader, we can move to the
2762*4882a593Smuzhiyun      next aligned spot -- we've allocated enough total room so that
2763*4882a593Smuzhiyun      this is always possible.
2764*4882a593Smuzhiyun    */
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun    brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2767*4882a593Smuzhiyun    if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun    newp = (mchunkptr)brk;
2770*4882a593Smuzhiyun    leadsize = brk - (char*)(p);
2771*4882a593Smuzhiyun    newsize = chunksize(p) - leadsize;
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun#if HAVE_MMAP
2774*4882a593Smuzhiyun    if(chunk_is_mmapped(p))
2775*4882a593Smuzhiyun    {
2776*4882a593Smuzhiyun      newp->prev_size = p->prev_size + leadsize;
2777*4882a593Smuzhiyun      set_head(newp, newsize|IS_MMAPPED);
2778*4882a593Smuzhiyun      return chunk2mem(newp);
2779*4882a593Smuzhiyun    }
2780*4882a593Smuzhiyun#endif
2781*4882a593Smuzhiyun
2782*4882a593Smuzhiyun    /* give back leader, use the rest */
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun    set_head(newp, newsize | PREV_INUSE);
2785*4882a593Smuzhiyun    set_inuse_bit_at_offset(newp, newsize);
2786*4882a593Smuzhiyun    set_head_size(p, leadsize);
2787*4882a593Smuzhiyun    fREe(chunk2mem(p));
2788*4882a593Smuzhiyun    p = newp;
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun    assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2791*4882a593Smuzhiyun  }
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun  /* Also give back spare room at the end */
2794*4882a593Smuzhiyun
2795*4882a593Smuzhiyun  remainder_size = chunksize(p) - nb;
2796*4882a593Smuzhiyun
2797*4882a593Smuzhiyun  if (remainder_size >= (long)MINSIZE)
2798*4882a593Smuzhiyun  {
2799*4882a593Smuzhiyun    remainder = chunk_at_offset(p, nb);
2800*4882a593Smuzhiyun    set_head(remainder, remainder_size | PREV_INUSE);
2801*4882a593Smuzhiyun    set_head_size(p, nb);
2802*4882a593Smuzhiyun    fREe(chunk2mem(remainder));
2803*4882a593Smuzhiyun  }
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun  check_inuse_chunk(p);
2806*4882a593Smuzhiyun  return chunk2mem(p);
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun}
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun
2811*4882a593Smuzhiyun
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun/*
2814*4882a593Smuzhiyun    valloc just invokes memalign with alignment argument equal
2815*4882a593Smuzhiyun    to the page size of the system (or as near to this as can
2816*4882a593Smuzhiyun    be figured out from all the includes/defines above.)
2817*4882a593Smuzhiyun*/
2818*4882a593Smuzhiyun
2819*4882a593Smuzhiyun#if __STD_C
2820*4882a593SmuzhiyunVoid_t* vALLOc(size_t bytes)
2821*4882a593Smuzhiyun#else
2822*4882a593SmuzhiyunVoid_t* vALLOc(bytes) size_t bytes;
2823*4882a593Smuzhiyun#endif
2824*4882a593Smuzhiyun{
2825*4882a593Smuzhiyun  return mEMALIGn (malloc_getpagesize, bytes);
2826*4882a593Smuzhiyun}
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun/*
2829*4882a593Smuzhiyun  pvalloc just invokes valloc for the nearest pagesize
2830*4882a593Smuzhiyun  that will accommodate request
2831*4882a593Smuzhiyun*/
2832*4882a593Smuzhiyun
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun#if __STD_C
2835*4882a593SmuzhiyunVoid_t* pvALLOc(size_t bytes)
2836*4882a593Smuzhiyun#else
2837*4882a593SmuzhiyunVoid_t* pvALLOc(bytes) size_t bytes;
2838*4882a593Smuzhiyun#endif
2839*4882a593Smuzhiyun{
2840*4882a593Smuzhiyun  size_t pagesize = malloc_getpagesize;
2841*4882a593Smuzhiyun  return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2842*4882a593Smuzhiyun}
2843*4882a593Smuzhiyun
2844*4882a593Smuzhiyun/*
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun  calloc calls malloc, then zeroes out the allocated chunk.
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun*/
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun#if __STD_C
2851*4882a593SmuzhiyunVoid_t* cALLOc(size_t n, size_t elem_size)
2852*4882a593Smuzhiyun#else
2853*4882a593SmuzhiyunVoid_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2854*4882a593Smuzhiyun#endif
2855*4882a593Smuzhiyun{
2856*4882a593Smuzhiyun  mchunkptr p;
2857*4882a593Smuzhiyun  INTERNAL_SIZE_T csz;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun  INTERNAL_SIZE_T sz = n * elem_size;
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun  /* check if expand_top called, in which case don't need to clear */
2863*4882a593Smuzhiyun#if MORECORE_CLEARS
2864*4882a593Smuzhiyun  mchunkptr oldtop = top;
2865*4882a593Smuzhiyun  INTERNAL_SIZE_T oldtopsize = chunksize(top);
2866*4882a593Smuzhiyun#endif
2867*4882a593Smuzhiyun  Void_t* mem = mALLOc (sz);
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun  if ((long)n < 0) return 0;
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun  if (mem == 0)
2872*4882a593Smuzhiyun    return 0;
2873*4882a593Smuzhiyun  else
2874*4882a593Smuzhiyun  {
2875*4882a593Smuzhiyun    p = mem2chunk(mem);
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun    /* Two optional cases in which clearing not necessary */
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun#if HAVE_MMAP
2881*4882a593Smuzhiyun    if (chunk_is_mmapped(p)) return mem;
2882*4882a593Smuzhiyun#endif
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun    csz = chunksize(p);
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun#if MORECORE_CLEARS
2887*4882a593Smuzhiyun    if (p == oldtop && csz > oldtopsize)
2888*4882a593Smuzhiyun    {
2889*4882a593Smuzhiyun      /* clear only the bytes from non-freshly-sbrked memory */
2890*4882a593Smuzhiyun      csz = oldtopsize;
2891*4882a593Smuzhiyun    }
2892*4882a593Smuzhiyun#endif
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun    MALLOC_ZERO(mem, csz - SIZE_SZ);
2895*4882a593Smuzhiyun    return mem;
2896*4882a593Smuzhiyun  }
2897*4882a593Smuzhiyun}
2898*4882a593Smuzhiyun
2899*4882a593Smuzhiyun/*
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun  cfree just calls free. It is needed/defined on some systems
2902*4882a593Smuzhiyun  that pair it with calloc, presumably for odd historical reasons.
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun*/
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2907*4882a593Smuzhiyun#if __STD_C
2908*4882a593Smuzhiyunvoid cfree(Void_t *mem)
2909*4882a593Smuzhiyun#else
2910*4882a593Smuzhiyunvoid cfree(mem) Void_t *mem;
2911*4882a593Smuzhiyun#endif
2912*4882a593Smuzhiyun{
2913*4882a593Smuzhiyun  fREe(mem);
2914*4882a593Smuzhiyun}
2915*4882a593Smuzhiyun#endif
2916*4882a593Smuzhiyun
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun/*
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun    Malloc_trim gives memory back to the system (via negative
2922*4882a593Smuzhiyun    arguments to sbrk) if there is unused memory at the `high' end of
2923*4882a593Smuzhiyun    the malloc pool. You can call this after freeing large blocks of
2924*4882a593Smuzhiyun    memory to potentially reduce the system-level memory requirements
2925*4882a593Smuzhiyun    of a program. However, it cannot guarantee to reduce memory. Under
2926*4882a593Smuzhiyun    some allocation patterns, some large free blocks of memory will be
2927*4882a593Smuzhiyun    locked between two used chunks, so they cannot be given back to
2928*4882a593Smuzhiyun    the system.
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun    The `pad' argument to malloc_trim represents the amount of free
2931*4882a593Smuzhiyun    trailing space to leave untrimmed. If this argument is zero,
2932*4882a593Smuzhiyun    only the minimum amount of memory to maintain internal data
2933*4882a593Smuzhiyun    structures will be left (one page or less). Non-zero arguments
2934*4882a593Smuzhiyun    can be supplied to maintain enough trailing space to service
2935*4882a593Smuzhiyun    future expected allocations without having to re-obtain memory
2936*4882a593Smuzhiyun    from the system.
2937*4882a593Smuzhiyun
2938*4882a593Smuzhiyun    Malloc_trim returns 1 if it actually released any memory, else 0.
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun*/
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun#if __STD_C
2943*4882a593Smuzhiyunint malloc_trim(size_t pad)
2944*4882a593Smuzhiyun#else
2945*4882a593Smuzhiyunint malloc_trim(pad) size_t pad;
2946*4882a593Smuzhiyun#endif
2947*4882a593Smuzhiyun{
2948*4882a593Smuzhiyun  long  top_size;        /* Amount of top-most memory */
2949*4882a593Smuzhiyun  long  extra;           /* Amount to release */
2950*4882a593Smuzhiyun  char* current_brk;     /* address returned by pre-check sbrk call */
2951*4882a593Smuzhiyun  char* new_brk;         /* address returned by negative sbrk call */
2952*4882a593Smuzhiyun
2953*4882a593Smuzhiyun  unsigned long pagesz = malloc_getpagesize;
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun  top_size = chunksize(top);
2956*4882a593Smuzhiyun  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun  if (extra < (long)pagesz)  /* Not enough memory to release */
2959*4882a593Smuzhiyun    return 0;
2960*4882a593Smuzhiyun
2961*4882a593Smuzhiyun  else
2962*4882a593Smuzhiyun  {
2963*4882a593Smuzhiyun    /* Test to make sure no one else called sbrk */
2964*4882a593Smuzhiyun    current_brk = (char*)(MORECORE (0));
2965*4882a593Smuzhiyun    if (current_brk != (char*)(top) + top_size)
2966*4882a593Smuzhiyun      return 0;     /* Apparently we don't own memory; must fail */
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun    else
2969*4882a593Smuzhiyun    {
2970*4882a593Smuzhiyun      new_brk = (char*)(MORECORE (-extra));
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun      if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2973*4882a593Smuzhiyun      {
2974*4882a593Smuzhiyun	/* Try to figure out what we have */
2975*4882a593Smuzhiyun	current_brk = (char*)(MORECORE (0));
2976*4882a593Smuzhiyun	top_size = current_brk - (char*)top;
2977*4882a593Smuzhiyun	if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2978*4882a593Smuzhiyun	{
2979*4882a593Smuzhiyun	  sbrked_mem = current_brk - sbrk_base;
2980*4882a593Smuzhiyun	  set_head(top, top_size | PREV_INUSE);
2981*4882a593Smuzhiyun	}
2982*4882a593Smuzhiyun	check_chunk(top);
2983*4882a593Smuzhiyun	return 0;
2984*4882a593Smuzhiyun      }
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun      else
2987*4882a593Smuzhiyun      {
2988*4882a593Smuzhiyun	/* Success. Adjust top accordingly. */
2989*4882a593Smuzhiyun	set_head(top, (top_size - extra) | PREV_INUSE);
2990*4882a593Smuzhiyun	sbrked_mem -= extra;
2991*4882a593Smuzhiyun	check_chunk(top);
2992*4882a593Smuzhiyun	return 1;
2993*4882a593Smuzhiyun      }
2994*4882a593Smuzhiyun    }
2995*4882a593Smuzhiyun  }
2996*4882a593Smuzhiyun}
2997*4882a593Smuzhiyun
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun/*
3001*4882a593Smuzhiyun  malloc_usable_size:
3002*4882a593Smuzhiyun
3003*4882a593Smuzhiyun    This routine tells you how many bytes you can actually use in an
3004*4882a593Smuzhiyun    allocated chunk, which may be more than you requested (although
3005*4882a593Smuzhiyun    often not). You can use this many bytes without worrying about
3006*4882a593Smuzhiyun    overwriting other allocated objects. Not a particularly great
3007*4882a593Smuzhiyun    programming practice, but still sometimes useful.
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun*/
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun#if __STD_C
3012*4882a593Smuzhiyunsize_t malloc_usable_size(Void_t* mem)
3013*4882a593Smuzhiyun#else
3014*4882a593Smuzhiyunsize_t malloc_usable_size(mem) Void_t* mem;
3015*4882a593Smuzhiyun#endif
3016*4882a593Smuzhiyun{
3017*4882a593Smuzhiyun  mchunkptr p;
3018*4882a593Smuzhiyun  if (mem == 0)
3019*4882a593Smuzhiyun    return 0;
3020*4882a593Smuzhiyun  else
3021*4882a593Smuzhiyun  {
3022*4882a593Smuzhiyun    p = mem2chunk(mem);
3023*4882a593Smuzhiyun    if(!chunk_is_mmapped(p))
3024*4882a593Smuzhiyun    {
3025*4882a593Smuzhiyun      if (!inuse(p)) return 0;
3026*4882a593Smuzhiyun      check_inuse_chunk(p);
3027*4882a593Smuzhiyun      return chunksize(p) - SIZE_SZ;
3028*4882a593Smuzhiyun    }
3029*4882a593Smuzhiyun    return chunksize(p) - 2*SIZE_SZ;
3030*4882a593Smuzhiyun  }
3031*4882a593Smuzhiyun}
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
3037*4882a593Smuzhiyun
3038*4882a593Smuzhiyunstatic void malloc_update_mallinfo()
3039*4882a593Smuzhiyun{
3040*4882a593Smuzhiyun  int i;
3041*4882a593Smuzhiyun  mbinptr b;
3042*4882a593Smuzhiyun  mchunkptr p;
3043*4882a593Smuzhiyun#if DEBUG
3044*4882a593Smuzhiyun  mchunkptr q;
3045*4882a593Smuzhiyun#endif
3046*4882a593Smuzhiyun
3047*4882a593Smuzhiyun  INTERNAL_SIZE_T avail = chunksize(top);
3048*4882a593Smuzhiyun  int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
3049*4882a593Smuzhiyun
3050*4882a593Smuzhiyun  for (i = 1; i < NAV; ++i)
3051*4882a593Smuzhiyun  {
3052*4882a593Smuzhiyun    b = bin_at(i);
3053*4882a593Smuzhiyun    for (p = last(b); p != b; p = p->bk)
3054*4882a593Smuzhiyun    {
3055*4882a593Smuzhiyun#if DEBUG
3056*4882a593Smuzhiyun      check_free_chunk(p);
3057*4882a593Smuzhiyun      for (q = next_chunk(p);
3058*4882a593Smuzhiyun	   q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
3059*4882a593Smuzhiyun	   q = next_chunk(q))
3060*4882a593Smuzhiyun	check_inuse_chunk(q);
3061*4882a593Smuzhiyun#endif
3062*4882a593Smuzhiyun      avail += chunksize(p);
3063*4882a593Smuzhiyun      navail++;
3064*4882a593Smuzhiyun    }
3065*4882a593Smuzhiyun  }
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun  current_mallinfo.ordblks = navail;
3068*4882a593Smuzhiyun  current_mallinfo.uordblks = sbrked_mem - avail;
3069*4882a593Smuzhiyun  current_mallinfo.fordblks = avail;
3070*4882a593Smuzhiyun  current_mallinfo.hblks = n_mmaps;
3071*4882a593Smuzhiyun  current_mallinfo.hblkhd = mmapped_mem;
3072*4882a593Smuzhiyun  current_mallinfo.keepcost = chunksize(top);
3073*4882a593Smuzhiyun
3074*4882a593Smuzhiyun}
3075*4882a593Smuzhiyun
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun/*
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun  malloc_stats:
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun    Prints on stderr the amount of space obtain from the system (both
3083*4882a593Smuzhiyun    via sbrk and mmap), the maximum amount (which may be more than
3084*4882a593Smuzhiyun    current if malloc_trim and/or munmap got called), the maximum
3085*4882a593Smuzhiyun    number of simultaneous mmap regions used, and the current number
3086*4882a593Smuzhiyun    of bytes allocated via malloc (or realloc, etc) but not yet
3087*4882a593Smuzhiyun    freed. (Note that this is the number of bytes allocated, not the
3088*4882a593Smuzhiyun    number requested. It will be larger than the number requested
3089*4882a593Smuzhiyun    because of alignment and bookkeeping overhead.)
3090*4882a593Smuzhiyun
3091*4882a593Smuzhiyun*/
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyunvoid malloc_stats()
3094*4882a593Smuzhiyun{
3095*4882a593Smuzhiyun  malloc_update_mallinfo();
3096*4882a593Smuzhiyun  fprintf(stderr, "max system bytes = %10u\n",
3097*4882a593Smuzhiyun	  (unsigned int)(max_total_mem));
3098*4882a593Smuzhiyun  fprintf(stderr, "system bytes     = %10u\n",
3099*4882a593Smuzhiyun	  (unsigned int)(sbrked_mem + mmapped_mem));
3100*4882a593Smuzhiyun  fprintf(stderr, "in use bytes     = %10u\n",
3101*4882a593Smuzhiyun	  (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
3102*4882a593Smuzhiyun#if HAVE_MMAP
3103*4882a593Smuzhiyun  fprintf(stderr, "max mmap regions = %10u\n",
3104*4882a593Smuzhiyun	  (unsigned int)max_n_mmaps);
3105*4882a593Smuzhiyun#endif
3106*4882a593Smuzhiyun}
3107*4882a593Smuzhiyun
3108*4882a593Smuzhiyun/*
3109*4882a593Smuzhiyun  mallinfo returns a copy of updated current mallinfo.
3110*4882a593Smuzhiyun*/
3111*4882a593Smuzhiyun
3112*4882a593Smuzhiyunstruct mallinfo mALLINFo()
3113*4882a593Smuzhiyun{
3114*4882a593Smuzhiyun  malloc_update_mallinfo();
3115*4882a593Smuzhiyun  return current_mallinfo;
3116*4882a593Smuzhiyun}
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun/*
3122*4882a593Smuzhiyun  mallopt:
3123*4882a593Smuzhiyun
3124*4882a593Smuzhiyun    mallopt is the general SVID/XPG interface to tunable parameters.
3125*4882a593Smuzhiyun    The format is to provide a (parameter-number, parameter-value) pair.
3126*4882a593Smuzhiyun    mallopt then sets the corresponding parameter to the argument
3127*4882a593Smuzhiyun    value if it can (i.e., so long as the value is meaningful),
3128*4882a593Smuzhiyun    and returns 1 if successful else 0.
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun    See descriptions of tunable parameters above.
3131*4882a593Smuzhiyun
3132*4882a593Smuzhiyun*/
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun#if __STD_C
3135*4882a593Smuzhiyunint mALLOPt(int param_number, int value)
3136*4882a593Smuzhiyun#else
3137*4882a593Smuzhiyunint mALLOPt(param_number, value) int param_number; int value;
3138*4882a593Smuzhiyun#endif
3139*4882a593Smuzhiyun{
3140*4882a593Smuzhiyun  switch(param_number)
3141*4882a593Smuzhiyun  {
3142*4882a593Smuzhiyun    case M_TRIM_THRESHOLD:
3143*4882a593Smuzhiyun      trim_threshold = value; return 1;
3144*4882a593Smuzhiyun    case M_TOP_PAD:
3145*4882a593Smuzhiyun      top_pad = value; return 1;
3146*4882a593Smuzhiyun    case M_MMAP_THRESHOLD:
3147*4882a593Smuzhiyun      mmap_threshold = value; return 1;
3148*4882a593Smuzhiyun    case M_MMAP_MAX:
3149*4882a593Smuzhiyun#if HAVE_MMAP
3150*4882a593Smuzhiyun      n_mmaps_max = value; return 1;
3151*4882a593Smuzhiyun#else
3152*4882a593Smuzhiyun      if (value != 0) return 0; else  n_mmaps_max = value; return 1;
3153*4882a593Smuzhiyun#endif
3154*4882a593Smuzhiyun
3155*4882a593Smuzhiyun    default:
3156*4882a593Smuzhiyun      return 0;
3157*4882a593Smuzhiyun  }
3158*4882a593Smuzhiyun}
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun/*
3161*4882a593Smuzhiyun
3162*4882a593SmuzhiyunHistory:
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
3165*4882a593Smuzhiyun      * return null for negative arguments
3166*4882a593Smuzhiyun      * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
3167*4882a593Smuzhiyun	 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
3168*4882a593Smuzhiyun	  (e.g. WIN32 platforms)
3169*4882a593Smuzhiyun	 * Cleanup up header file inclusion for WIN32 platforms
3170*4882a593Smuzhiyun	 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
3171*4882a593Smuzhiyun	 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
3172*4882a593Smuzhiyun	   memory allocation routines
3173*4882a593Smuzhiyun	 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
3174*4882a593Smuzhiyun	 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
3175*4882a593Smuzhiyun	   usage of 'assert' in non-WIN32 code
3176*4882a593Smuzhiyun	 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
3177*4882a593Smuzhiyun	   avoid infinite loop
3178*4882a593Smuzhiyun      * Always call 'fREe()' rather than 'free()'
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
3181*4882a593Smuzhiyun      * Fixed ordering problem with boundary-stamping
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
3184*4882a593Smuzhiyun      * Added pvalloc, as recommended by H.J. Liu
3185*4882a593Smuzhiyun      * Added 64bit pointer support mainly from Wolfram Gloger
3186*4882a593Smuzhiyun      * Added anonymously donated WIN32 sbrk emulation
3187*4882a593Smuzhiyun      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
3188*4882a593Smuzhiyun      * malloc_extend_top: fix mask error that caused wastage after
3189*4882a593Smuzhiyun	foreign sbrks
3190*4882a593Smuzhiyun      * Add linux mremap support code from HJ Liu
3191*4882a593Smuzhiyun
3192*4882a593Smuzhiyun    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
3193*4882a593Smuzhiyun      * Integrated most documentation with the code.
3194*4882a593Smuzhiyun      * Add support for mmap, with help from
3195*4882a593Smuzhiyun	Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3196*4882a593Smuzhiyun      * Use last_remainder in more cases.
3197*4882a593Smuzhiyun      * Pack bins using idea from  colin@nyx10.cs.du.edu
3198*4882a593Smuzhiyun      * Use ordered bins instead of best-fit threshhold
3199*4882a593Smuzhiyun      * Eliminate block-local decls to simplify tracing and debugging.
3200*4882a593Smuzhiyun      * Support another case of realloc via move into top
3201*4882a593Smuzhiyun      * Fix error occuring when initial sbrk_base not word-aligned.
3202*4882a593Smuzhiyun      * Rely on page size for units instead of SBRK_UNIT to
3203*4882a593Smuzhiyun	avoid surprises about sbrk alignment conventions.
3204*4882a593Smuzhiyun      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
3205*4882a593Smuzhiyun	(raymond@es.ele.tue.nl) for the suggestion.
3206*4882a593Smuzhiyun      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
3207*4882a593Smuzhiyun      * More precautions for cases where other routines call sbrk,
3208*4882a593Smuzhiyun	courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
3209*4882a593Smuzhiyun      * Added macros etc., allowing use in linux libc from
3210*4882a593Smuzhiyun	H.J. Lu (hjl@gnu.ai.mit.edu)
3211*4882a593Smuzhiyun      * Inverted this history list
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
3214*4882a593Smuzhiyun      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
3215*4882a593Smuzhiyun      * Removed all preallocation code since under current scheme
3216*4882a593Smuzhiyun	the work required to undo bad preallocations exceeds
3217*4882a593Smuzhiyun	the work saved in good cases for most test programs.
3218*4882a593Smuzhiyun      * No longer use return list or unconsolidated bins since
3219*4882a593Smuzhiyun	no scheme using them consistently outperforms those that don't
3220*4882a593Smuzhiyun	given above changes.
3221*4882a593Smuzhiyun      * Use best fit for very large chunks to prevent some worst-cases.
3222*4882a593Smuzhiyun      * Added some support for debugging
3223*4882a593Smuzhiyun
3224*4882a593Smuzhiyun    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
3225*4882a593Smuzhiyun      * Removed footers when chunks are in use. Thanks to
3226*4882a593Smuzhiyun	Paul Wilson (wilson@cs.texas.edu) for the suggestion.
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
3229*4882a593Smuzhiyun      * Added malloc_trim, with help from Wolfram Gloger
3230*4882a593Smuzhiyun	(wmglo@Dent.MED.Uni-Muenchen.DE).
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
3235*4882a593Smuzhiyun      * realloc: try to expand in both directions
3236*4882a593Smuzhiyun      * malloc: swap order of clean-bin strategy;
3237*4882a593Smuzhiyun      * realloc: only conditionally expand backwards
3238*4882a593Smuzhiyun      * Try not to scavenge used bins
3239*4882a593Smuzhiyun      * Use bin counts as a guide to preallocation
3240*4882a593Smuzhiyun      * Occasionally bin return list chunks in first scan
3241*4882a593Smuzhiyun      * Add a few optimizations from colin@nyx10.cs.du.edu
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
3244*4882a593Smuzhiyun      * faster bin computation & slightly different binning
3245*4882a593Smuzhiyun      * merged all consolidations to one part of malloc proper
3246*4882a593Smuzhiyun	 (eliminating old malloc_find_space & malloc_clean_bin)
3247*4882a593Smuzhiyun      * Scan 2 returns chunks (not just 1)
3248*4882a593Smuzhiyun      * Propagate failure in realloc if malloc returns 0
3249*4882a593Smuzhiyun      * Add stuff to allow compilation on non-ANSI compilers
3250*4882a593Smuzhiyun	  from kpv@research.att.com
3251*4882a593Smuzhiyun
3252*4882a593Smuzhiyun    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
3253*4882a593Smuzhiyun      * removed potential for odd address access in prev_chunk
3254*4882a593Smuzhiyun      * removed dependency on getpagesize.h
3255*4882a593Smuzhiyun      * misc cosmetics and a bit more internal documentation
3256*4882a593Smuzhiyun      * anticosmetics: mangled names in macros to evade debugger strangeness
3257*4882a593Smuzhiyun      * tested on sparc, hp-700, dec-mips, rs6000
3258*4882a593Smuzhiyun	  with gcc & native cc (hp, dec only) allowing
3259*4882a593Smuzhiyun	  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
3262*4882a593Smuzhiyun      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
3263*4882a593Smuzhiyun	 structure of old version,  but most details differ.)
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun*/
3266