xref: /utopia/UTPA2-700.0.x/modules/msos/msos/linux/dlmalloc.c (revision 53ee8cc121a030b8d368113ac3e966b4705770ef)
1 //<MStar Software>
2 //******************************************************************************
3 // MStar Software
4 // Copyright (c) 2010 - 2012 MStar Semiconductor, Inc. All rights reserved.
5 // All software, firmware and related documentation herein ("MStar Software") are
6 // intellectual property of MStar Semiconductor, Inc. ("MStar") and protected by
7 // law, including, but not limited to, copyright law and international treaties.
8 // Any use, modification, reproduction, retransmission, or republication of all
9 // or part of MStar Software is expressly prohibited, unless prior written
10 // permission has been granted by MStar.
11 //
12 // By accessing, browsing and/or using MStar Software, you acknowledge that you
13 // have read, understood, and agree, to be bound by below terms ("Terms") and to
14 // comply with all applicable laws and regulations:
15 //
16 // 1. MStar shall retain any and all right, ownership and interest to MStar
17 //    Software and any modification/derivatives thereof.
18 //    No right, ownership, or interest to MStar Software and any
19 //    modification/derivatives thereof is transferred to you under Terms.
20 //
21 // 2. You understand that MStar Software might include, incorporate or be
22 //    supplied together with third party`s software and the use of MStar
23 //    Software may require additional licenses from third parties.
24 //    Therefore, you hereby agree it is your sole responsibility to separately
25 //    obtain any and all third party right and license necessary for your use of
26 //    such third party`s software.
27 //
28 // 3. MStar Software and any modification/derivatives thereof shall be deemed as
29 //    MStar`s confidential information and you agree to keep MStar`s
30 //    confidential information in strictest confidence and not disclose to any
31 //    third party.
32 //
33 // 4. MStar Software is provided on an "AS IS" basis without warranties of any
34 //    kind. Any warranties are hereby expressly disclaimed by MStar, including
35 //    without limitation, any warranties of merchantability, non-infringement of
36 //    intellectual property rights, fitness for a particular purpose, error free
37 //    and in conformity with any international standard.  You agree to waive any
38 //    claim against MStar for any loss, damage, cost or expense that you may
39 //    incur related to your use of MStar Software.
40 //    In no event shall MStar be liable for any direct, indirect, incidental or
41 //    consequential damages, including without limitation, lost of profit or
42 //    revenues, lost or damage of data, and unauthorized system use.
43 //    You agree that this Section 4 shall still apply without being affected
44 //    even if MStar Software has been modified by MStar in accordance with your
45 //    request or instruction for your use, except otherwise agreed by both
46 //    parties in writing.
47 //
48 // 5. If requested, MStar may from time to time provide technical supports or
49 //    services in relation with MStar Software to you for your use of
50 //    MStar Software in conjunction with your or your customer`s product
51 //    ("Services").
52 //    You understand and agree that, except otherwise agreed by both parties in
53 //    writing, Services are provided on an "AS IS" basis and the warranty
54 //    disclaimer set forth in Section 4 above shall apply.
55 //
56 // 6. Nothing contained herein shall be construed as by implication, estoppels
57 //    or otherwise:
58 //    (a) conferring any license or right to use MStar name, trademark, service
59 //        mark, symbol or any other identification;
60 //    (b) obligating MStar or any of its affiliates to furnish any person,
61 //        including without limitation, you and your customers, any assistance
62 //        of any kind whatsoever, or any information; or
63 //    (c) conferring any license or right under any intellectual property right.
64 //
65 // 7. These terms shall be governed by and construed in accordance with the laws
66 //    of Taiwan, R.O.C., excluding its conflict of law rules.
67 //    Any and all dispute arising out hereof or related hereto shall be finally
68 //    settled by arbitration referred to the Chinese Arbitration Association,
69 //    Taipei in accordance with the ROC Arbitration Law and the Arbitration
70 //    Rules of the Association by three (3) arbitrators appointed in accordance
71 //    with the said Rules.
72 //    The place of arbitration shall be in Taipei, Taiwan and the language shall
73 //    be English.
74 //    The arbitration award shall be final and binding to both parties.
75 //
76 //******************************************************************************
77 //<MStar Software>
78 #if !defined (ANDROID)
79 /* dlmalloc configurations for MSTAR system. */
80 
81 #if defined(CHIP_T13) //T9, T13
82 #define MALLOC_ALIGNMENT ((size_t)16U)
83 #else
84 #define MALLOC_ALIGNMENT ((size_t)32U)
85 #endif
86 
87 #define NOT_USE_SYSALLOC 1
88 #define USE_LOCKS 1
89 #define MSPACES 1
90 #define ONLY_MSPACES 1
91 #define HAVE_MMAP 0
92 #define MORECORE_CANNOT_TRIM
93 #define DEFAULT_GRANULARITY 0
94 #define DEBUG 0
95 
96 /*
97   This is a version (aka dlmalloc) of malloc/free/realloc written by
98   Doug Lea and released to the public domain, as explained at
99   http://creativecommons.org/licenses/publicdomain.  Send questions,
100   comments, complaints, performance data, etc to dl@cs.oswego.edu
101 
102 * Version 2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
103 
104    Note: There may be an updated version of this malloc obtainable at
105            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
106          Check before installing!
107 
108 * Quickstart
109 
110   This library is all in one file to simplify the most common usage:
111   ftp it, compile it (-O3), and link it into another program. All of
112   the compile-time options default to reasonable values for use on
113   most platforms.  You might later want to step through various
114   compile-time and dynamic tuning options.
115 
116   For convenience, an include file for code using this malloc is at:
117      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h
118   You don't really need this .h file unless you call functions not
119   defined in your system include files.  The .h file contains only the
120   excerpts from this file needed for using this malloc on ANSI C/C++
121   systems, so long as you haven't changed compile-time options about
122   naming and tuning parameters.  If you do, then you can create your
123   own malloc.h that does include all settings by cutting at the point
124   indicated below. Note that you may already by default be using a C
125   library containing a malloc that is based on some version of this
126   malloc (for example in linux). You might still want to use the one
127   in this file to customize settings or to avoid overheads associated
128   with library versions.
129 
130 * Vital statistics:
131 
132   Supported pointer/size_t representation:       4 or 8 bytes
133        size_t MUST be an unsigned type of the same width as
134        pointers. (If you are using an ancient system that declares
135        size_t as a signed type, or need it to be a different width
136        than pointers, you can use a previous release of this malloc
137        (e.g. 2.7.2) supporting these.)
138 
139   Alignment:                                     8 bytes (default)
140        This suffices for nearly all current machines and C compilers.
141        However, you can define MALLOC_ALIGNMENT to be wider than this
142        if necessary (up to 128bytes), at the expense of using more space.
143 
144   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
145                                           8 or 16 bytes (if 8byte sizes)
146        Each malloced chunk has a hidden word of overhead holding size
147        and status information, and additional cross-check word
148        if FOOTERS is defined.
149 
150   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
151                           8-byte ptrs:  32 bytes    (including overhead)
152 
153        Even a request for zero bytes (i.e., malloc(0)) returns a
154        pointer to something of the minimum allocatable size.
155        The maximum overhead wastage (i.e., number of extra bytes
156        allocated than were requested in malloc) is less than or equal
157        to the minimum size, except for requests >= mmap_threshold that
158        are serviced via mmap(), where the worst case wastage is about
159        32 bytes plus the remainder from a system page (the minimal
160        mmap unit); typically 4096 or 8192 bytes.
161 
162   Security: static-safe; optionally more or less
163        The "security" of malloc refers to the ability of malicious
164        code to accentuate the effects of errors (for example, freeing
165        space that is not currently malloc'ed or overwriting past the
166        ends of chunks) in code that calls malloc.  This malloc
167        guarantees not to modify any memory locations below the base of
168        heap, i.e., static variables, even in the presence of usage
169        errors.  The routines additionally detect most improper frees
170        and reallocs.  All this holds as long as the static bookkeeping
171        for malloc itself is not corrupted by some other means.  This
172        is only one aspect of security -- these checks do not, and
173        cannot, detect all possible programming errors.
174 
175        If FOOTERS is defined nonzero, then each allocated chunk
176        carries an additional check word to verify that it was malloced
177        from its space.  These check words are the same within each
178        execution of a program using malloc, but differ across
179        executions, so externally crafted fake chunks cannot be
180        freed. This improves security by rejecting frees/reallocs that
181        could corrupt heap memory, in addition to the checks preventing
182        writes to statics that are always on.  This may further improve
183        security at the expense of time and space overhead.  (Note that
184        FOOTERS may also be worth using with MSPACES.)
185 
186        By default detected errors cause the program to abort (calling
187        "abort()"). You can override this to instead proceed past
188        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
189        has no effect, and a malloc that encounters a bad address
190        caused by user overwrites will ignore the bad address by
191        dropping pointers and indices to all known memory. This may
192        be appropriate for programs that should continue if at all
193        possible in the face of programming errors, although they may
194        run out of memory because dropped memory is never reclaimed.
195 
196        If you don't like either of these options, you can define
197        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
198        else. And if if you are sure that your program using malloc has
199        no errors or vulnerabilities, you can define INSECURE to 1,
200        which might (or might not) provide a small performance improvement.
201 
202   Thread-safety: NOT thread-safe unless USE_LOCKS defined
203        When USE_LOCKS is defined, each public call to malloc, free,
204        etc is surrounded with either a pthread mutex or a win32
205        spinlock (depending on WIN32). This is not especially fast, and
206        can be a major bottleneck.  It is designed only to provide
207        minimal protection in concurrent environments, and to provide a
208        basis for extensions.  If you are using malloc in a concurrent
209        program, consider instead using nedmalloc
210        (http://www.nedprod.com/programs/portable/nedmalloc/) or
211        ptmalloc (See http://www.malloc.de), which are derived
212        from versions of this malloc.
213 
214   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
215        This malloc can use unix sbrk or any emulation (invoked using
216        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
217        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
218        memory.  On most unix systems, it tends to work best if both
219        MORECORE and MMAP are enabled.  On Win32, it uses emulations
220        based on VirtualAlloc. It also uses common C library functions
221        like memset.
222 
223   Compliance: I believe it is compliant with the Single Unix Specification
224        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
225        others as well.
226 
227 * Overview of algorithms
228 
229   This is not the fastest, most space-conserving, most portable, or
230   most tunable malloc ever written. However it is among the fastest
231   while also being among the most space-conserving, portable and
232   tunable.  Consistent balance across these factors results in a good
233   general-purpose allocator for malloc-intensive programs.
234 
235   In most ways, this malloc is a best-fit allocator. Generally, it
236   chooses the best-fitting existing chunk for a request, with ties
237   broken in approximately least-recently-used order. (This strategy
238   normally maintains low fragmentation.) However, for requests less
239   than 256bytes, it deviates from best-fit when there is not an
240   exactly fitting available chunk by preferring to use space adjacent
241   to that used for the previous small request, as well as by breaking
242   ties in approximately most-recently-used order. (These enhance
243   locality of series of small allocations.)  And for very large requests
244   (>= 256Kb by default), it relies on system memory mapping
245   facilities, if supported.  (This helps avoid carrying around and
246   possibly fragmenting memory used only for large chunks.)
247 
248   All operations (except malloc_stats and mallinfo) have execution
249   times that are bounded by a constant factor of the number of bits in
250   a size_t, not counting any clearing in calloc or copying in realloc,
251   or actions surrounding MORECORE and MMAP that have times
252   proportional to the number of non-contiguous regions returned by
253   system allocation routines, which is often just 1. In real-time
254   applications, you can optionally suppress segment traversals using
255   NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
256   system allocators return non-contiguous spaces, at the typical
257   expense of carrying around more memory and increased fragmentation.
258 
259   The implementation is not very modular and seriously overuses
260   macros. Perhaps someday all C compilers will do as good a job
261   inlining modular code as can now be done by brute-force expansion,
262   but now, enough of them seem not to.
263 
264   Some compilers issue a lot of warnings about code that is
265   dead/unreachable only on some platforms, and also about intentional
266   uses of negation on unsigned types. All known cases of each can be
267   ignored.
268 
269   For a longer but out of date high-level description, see
270      http://gee.cs.oswego.edu/dl/html/malloc.html
271 
272 * MSPACES
273   If MSPACES is defined, then in addition to malloc, free, etc.,
274   this file also defines mspace_malloc, mspace_free, etc. These
275   are versions of malloc routines that take an "mspace" argument
276   obtained using mstar_create_mspace, to control all internal bookkeeping.
277   If ONLY_MSPACES is defined, only these versions are compiled.
278   So if you would like to use this allocator for only some allocations,
279   and your system malloc for others, you can compile with
280   ONLY_MSPACES and then do something like...
281     static mspace mymspace = mstar_create_mspace(0,0); // for example
282     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
283 
284   (Note: If you only need one instance of an mspace, you can instead
285   use "USE_DL_PREFIX" to relabel the global malloc.)
286 
287   You can similarly create thread-local allocators by storing
288   mspaces as thread-locals. For example:
289     static __thread mspace tlms = 0;
290     void*  tlmalloc(size_t bytes) {
291       if (tlms == 0) tlms = mstar_create_mspace(0, 0);
292       return mspace_malloc(tlms, bytes);
293     }
294     void  tlfree(void* mem) { mspace_free(tlms, mem); }
295 
296   Unless FOOTERS is defined, each mspace is completely independent.
297   You cannot allocate from one and free to another (although
298   conformance is only weakly checked, so usage errors are not always
299   caught). If FOOTERS is defined, then each chunk carries around a tag
300   indicating its originating mspace, and frees are directed to their
301   originating spaces.
302 
303  -------------------------  Compile-time options ---------------------------
304 
305 Be careful in setting #define values for numerical constants of type
306 size_t. On some systems, literal values are not automatically extended
307 to size_t precision unless they are explicitly casted. You can also
308 use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
309 
310 WIN32                    default: defined if _WIN32 defined
311   Defining WIN32 sets up defaults for MS environment and compilers.
312   Otherwise defaults are for unix. Beware that there seem to be some
313   cases where this malloc might not be a pure drop-in replacement for
314   Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
315   SetDIBits()) may be due to bugs in some video driver implementations
316   when pixel buffers are malloc()ed, and the region spans more than
317   one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
318   default granularity, pixel buffers may straddle virtual allocation
319   regions more often than when using the Microsoft allocator.  You can
320   avoid this by using VirtualAlloc() and VirtualFree() for all pixel
321   buffers rather than using malloc().  If this is not possible,
322   recompile this malloc with a larger DEFAULT_GRANULARITY.
323 
324 MALLOC_ALIGNMENT         default: (size_t)8
325   Controls the minimum alignment for malloc'ed chunks.  It must be a
326   power of two and at least 8, even on machines for which smaller
327   alignments would suffice. It may be defined as larger than this
328   though. Note however that code and data structures are optimized for
329   the case of 8-byte alignment.
330 
331 MSPACES                  default: 0 (false)
332   If true, compile in support for independent allocation spaces.
333   This is only supported if HAVE_MMAP is true.
334 
335 ONLY_MSPACES             default: 0 (false)
336   If true, only compile in mspace versions, not regular versions.
337 
338 USE_LOCKS                default: 0 (false)
339   Causes each call to each public routine to be surrounded with
340   pthread or WIN32 mutex lock/unlock. (If set true, this can be
341   overridden on a per-mspace basis for mspace versions.) If set to a
342   non-zero value other than 1, locks are used, but their
343   implementation is left out, so lock functions must be supplied manually,
344   as described below.
345 
346 USE_SPIN_LOCKS           default: 1 iff USE_LOCKS and on x86 using gcc or MSC
347   If true, uses custom spin locks for locking. This is currently
348   supported only for x86 platforms using gcc or recent MS compilers.
349   Otherwise, posix locks or win32 critical sections are used.
350 
351 FOOTERS                  default: 0
352   If true, provide extra checking and dispatching by placing
353   information in the footers of allocated chunks. This adds
354   space and time overhead.
355 
356 INSECURE                 default: 0
357   If true, omit checks for usage errors and heap space overwrites.
358 
359 USE_DL_PREFIX            default: NOT defined
360   Causes compiler to prefix all public routines with the string 'dl'.
361   This can be useful when you only want to use this malloc in one part
362   of a program, using your regular system malloc elsewhere.
363 
364 ABORT                    default: defined as abort()
365   Defines how to abort on failed checks.  On most systems, a failed
366   check cannot die with an "assert" or even print an informative
367   message, because the underlying print routines in turn call malloc,
368   which will fail again.  Generally, the best policy is to simply call
369   abort(). It's not very useful to do more than this because many
370   errors due to overwriting will show up as address faults (null, odd
371   addresses etc) rather than malloc-triggered checks, so will also
372   abort.  Also, most compilers know that abort() does not return, so
373   can better optimize code conditionally calling it.
374 
375 PROCEED_ON_ERROR           default: defined as 0 (false)
376   Controls whether detected bad addresses cause them to bypassed
377   rather than aborting. If set, detected bad arguments to free and
378   realloc are ignored. And all bookkeeping information is zeroed out
379   upon a detected overwrite of freed heap space, thus losing the
380   ability to ever return it from malloc again, but enabling the
381   application to proceed. If PROCEED_ON_ERROR is defined, the
382   static variable malloc_corruption_error_count is compiled in
383   and can be examined to see if errors have occurred. This option
384   generates slower code than the default abort policy.
385 
386 DEBUG                    default: NOT defined
387   The DEBUG setting is mainly intended for people trying to modify
388   this code or diagnose problems when porting to new platforms.
389   However, it may also be able to better isolate user errors than just
390   using runtime checks.  The assertions in the check routines spell
391   out in more detail the assumptions and invariants underlying the
392   algorithms.  The checking is fairly extensive, and will slow down
393   execution noticeably. Calling malloc_stats or mallinfo with DEBUG
394   set will attempt to check every non-mmapped allocated and free chunk
395   in the course of computing the summaries.
396 
397 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
398   Debugging assertion failures can be nearly impossible if your
399   version of the assert macro causes malloc to be called, which will
400   lead to a cascade of further failures, blowing the runtime stack.
401   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
402   which will usually make debugging easier.
403 
404 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
405   The action to take before "return 0" when malloc fails to be able to
406   return memory because there is none available.
407 
408 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
409   True if this system supports sbrk or an emulation of it.
410 
411 MORECORE                  default: sbrk
412   The name of the sbrk-style system routine to call to obtain more
413   memory.  See below for guidance on writing custom MORECORE
414   functions. The type of the argument to sbrk/MORECORE varies across
415   systems.  It cannot be size_t, because it supports negative
416   arguments, so it is normally the signed type of the same width as
417   size_t (sometimes declared as "intptr_t").  It doesn't much matter
418   though. Internally, we only call it with arguments less than half
419   the max value of a size_t, which should work across all reasonable
420   possibilities, although sometimes generating compiler warnings.
421 
422 MORECORE_CONTIGUOUS       default: 1 (true) if HAVE_MORECORE
423   If true, take advantage of fact that consecutive calls to MORECORE
424   with positive arguments always return contiguous increasing
425   addresses.  This is true of unix sbrk. It does not hurt too much to
426   set it true anyway, since malloc copes with non-contiguities.
427   Setting it false when definitely non-contiguous saves time
428   and possibly wasted space it would take to discover this though.
429 
430 MORECORE_CANNOT_TRIM      default: NOT defined
431   True if MORECORE cannot release space back to the system when given
432   negative arguments. This is generally necessary only if you are
433   using a hand-crafted MORECORE function that cannot handle negative
434   arguments.
435 
436 NO_SEGMENT_TRAVERSAL       default: 0
437   If non-zero, suppresses traversals of memory segments
438   returned by either MORECORE or CALL_MMAP. This disables
439   merging of segments that are contiguous, and selectively
440   releasing them to the OS if unused, but bounds execution times.
441 
442 HAVE_MMAP                 default: 1 (true)
443   True if this system supports mmap or an emulation of it.  If so, and
444   HAVE_MORECORE is not true, MMAP is used for all system
445   allocation. If set and HAVE_MORECORE is true as well, MMAP is
446   primarily used to directly allocate very large blocks. It is also
447   used as a backup strategy in cases where MORECORE fails to provide
448   space from system. Note: A single call to MUNMAP is assumed to be
449   able to unmap memory that may have be allocated using multiple calls
450   to MMAP, so long as they are adjacent.
451 
452 HAVE_MREMAP               default: 1 on linux, else 0
453   If true realloc() uses mremap() to re-allocate large blocks and
454   extend or shrink allocation spaces.
455 
456 MMAP_CLEARS               default: 1 except on WINCE.
457   True if mmap clears memory so calloc doesn't need to. This is true
458   for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
459 
460 USE_BUILTIN_FFS            default: 0 (i.e., not used)
461   Causes malloc to use the builtin ffs() function to compute indices.
462   Some compilers may recognize and intrinsify ffs to be faster than the
463   supplied C version. Also, the case of x86 using gcc is special-cased
464   to an asm instruction, so is already as fast as it can be, and so
465   this setting has no effect. Similarly for Win32 under recent MS compilers.
466   (On most x86s, the asm version is only slightly faster than the C version.)
467 
468 malloc_getpagesize         default: derive from system includes, or 4096.
469   The system page size. To the extent possible, this malloc manages
470   memory from the system in page-size units.  This may be (and
471   usually is) a function rather than a constant. This is ignored
472   if WIN32, where page size is determined using getSystemInfo during
473   initialization.
474 
475 USE_DEV_RANDOM             default: 0 (i.e., not used)
476   Causes malloc to use /dev/random to initialize secure magic seed for
477   stamping footers. Otherwise, the current time is used.
478 
479 NO_MALLINFO                default: 0
480   If defined, don't compile "mallinfo". This can be a simple way
481   of dealing with mismatches between system declarations and
482   those in this file.
483 
484 MALLINFO_FIELD_TYPE        default: size_t
485   The type of the fields in the mallinfo struct. This was originally
486   defined as "int" in SVID etc, but is more usefully defined as
487   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
488 
489 REALLOC_ZERO_BYTES_FREES    default: not defined
490   This should be set if a call to realloc with zero bytes should
491   be the same as a call to free. Some people think it should. Otherwise,
492   since this malloc returns a unique pointer for malloc(0), so does
493   realloc(p, 0).
494 
495 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
496 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
497 LACKS_STDLIB_H                default: NOT defined unless on WIN32
498   Define these if your system does not have these header files.
499   You might need to manually insert some of the declarations they provide.
500 
501 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
502                                 system_info.dwAllocationGranularity in WIN32,
503                                 otherwise 64K.
504       Also settable using mallopt(M_GRANULARITY, x)
505   The unit for allocating and deallocating memory from the system.  On
506   most systems with contiguous MORECORE, there is no reason to
507   make this more than a page. However, systems with MMAP tend to
508   either require or encourage larger granularities.  You can increase
509   this value to prevent system allocation functions to be called so
510   often, especially if they are slow.  The value must be at least one
511   page and must be a power of two.  Setting to 0 causes initialization
512   to either page size or win32 region size.  (Note: In previous
513   versions of malloc, the equivalent of this option was called
514   "TOP_PAD")
515 
516 DEFAULT_TRIM_THRESHOLD    default: 2MB
517       Also settable using mallopt(M_TRIM_THRESHOLD, x)
518   The maximum amount of unused top-most memory to keep before
519   releasing via malloc_trim in free().  Automatic trimming is mainly
520   useful in long-lived programs using contiguous MORECORE.  Because
521   trimming via sbrk can be slow on some systems, and can sometimes be
522   wasteful (in cases where programs immediately afterward allocate
523   more large chunks) the value should be high enough so that your
524   overall system performance would improve by releasing this much
525   memory.  As a rough guide, you might set to a value close to the
526   average size of a process (program) running on your system.
527   Releasing this much memory would allow such a process to run in
528   memory.  Generally, it is worth tuning trim thresholds when a
529   program undergoes phases where several large chunks are allocated
530   and released in ways that can reuse each other's storage, perhaps
531   mixed with phases where there are no such chunks at all. The trim
532   value must be greater than page size to have any useful effect.  To
533   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
534   some people use of mallocing a huge space and then freeing it at
535   program startup, in an attempt to reserve system memory, doesn't
536   have the intended effect under automatic trimming, since that memory
537   will immediately be returned to the system.
538 
539 DEFAULT_MMAP_THRESHOLD       default: 256K
540       Also settable using mallopt(M_MMAP_THRESHOLD, x)
541   The request size threshold for using MMAP to directly service a
542   request. Requests of at least this size that cannot be allocated
543   using already-existing space will be serviced via mmap.  (If enough
544   normal freed space already exists it is used instead.)  Using mmap
545   segregates relatively large chunks of memory so that they can be
546   individually obtained and released from the host system. A request
547   serviced through mmap is never reused by any other request (at least
548   not directly; the system may just so happen to remap successive
549   requests to the same locations).  Segregating space in this way has
550   the benefits that: Mmapped space can always be individually released
551   back to the system, which helps keep the system level memory demands
552   of a long-lived program low.  Also, mapped memory doesn't become
553   `locked' between other chunks, as can happen with normally allocated
554   chunks, which means that even trimming via malloc_trim would not
555   release them.  However, it has the disadvantage that the space
556   cannot be reclaimed, consolidated, and then used to service later
557   requests, as happens with normal chunks.  The advantages of mmap
558   nearly always outweigh disadvantages for "large" chunks, but the
559   value of "large" may vary across systems.  The default is an
560   empirically derived value that works well in most systems. You can
561   disable mmap by setting to MAX_SIZE_T.
562 
563 MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
564   The number of consolidated frees between checks to release
565   unused segments when freeing. When using non-contiguous segments,
566   especially with multiple mspaces, checking only for topmost space
567   doesn't always suffice to trigger trimming. To compensate for this,
568   free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
569   current number of segments, if greater) try to release unused
570   segments to the OS when freeing chunks that result in
571   consolidation. The best value for this parameter is a compromise
572   between slowing down frees with relatively costly checks that
573   rarely trigger versus holding on to unused memory. To effectively
574   disable, set to MAX_SIZE_T. This may lead to a very slight speed
575   improvement at the expense of carrying around more memory.
576 */
577 
578 /* Version identifier to allow people to support multiple versions */
579 #ifndef DLMALLOC_VERSION
580 #define DLMALLOC_VERSION 20804
581 #endif /* DLMALLOC_VERSION */
582 
583 #ifndef WIN32
584 #ifdef _WIN32
585 #define WIN32 1
586 #endif  /* _WIN32 */
587 #ifdef _WIN32_WCE
588 #define LACKS_FCNTL_H
589 #define WIN32 1
590 #endif /* _WIN32_WCE */
591 #endif  /* WIN32 */
592 #ifdef WIN32
593 #define WIN32_LEAN_AND_MEAN
594 #include <windows.h>
595 #define HAVE_MMAP 1
596 #define HAVE_MORECORE 0
597 #define LACKS_UNISTD_H
598 #define LACKS_SYS_PARAM_H
599 #define LACKS_SYS_MMAN_H
600 #define LACKS_STRING_H
601 #define LACKS_STRINGS_H
602 #define LACKS_SYS_TYPES_H
603 #define LACKS_ERRNO_H
604 #ifndef MALLOC_FAILURE_ACTION
605 #define MALLOC_FAILURE_ACTION
606 #endif /* MALLOC_FAILURE_ACTION */
607 #ifdef _WIN32_WCE /* WINCE reportedly does not clear */
608 #define MMAP_CLEARS 0
609 #else
610 #define MMAP_CLEARS 1
611 #endif /* _WIN32_WCE */
612 #endif  /* WIN32 */
613 
614 #if defined(DARWIN) || defined(_DARWIN)
615 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
616 #ifndef HAVE_MORECORE
617 #define HAVE_MORECORE 0
618 #define HAVE_MMAP 1
619 /* OSX allocators provide 16 byte alignment */
620 #ifndef MALLOC_ALIGNMENT
621 #define MALLOC_ALIGNMENT ((size_t)16U)
622 #endif
623 #endif  /* HAVE_MORECORE */
624 #endif  /* DARWIN */
625 
626 #ifndef LACKS_SYS_TYPES_H
627 #include <sys/types.h>  /* For size_t */
628 #endif  /* LACKS_SYS_TYPES_H */
629 
630 #if (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
631 #define SPIN_LOCKS_AVAILABLE 1
632 #else
633 #define SPIN_LOCKS_AVAILABLE 0
634 #endif
635 
636 /* The maximum possible size_t value has all bits set */
637 #define MAX_SIZE_T           ((~(size_t)0) - 1)
638 
639 #ifndef ONLY_MSPACES
640 #define ONLY_MSPACES 0     /* define to a value */
641 #else
642 #define ONLY_MSPACES 1
643 #endif  /* ONLY_MSPACES */
644 #ifndef MSPACES
645 #if ONLY_MSPACES
646 #define MSPACES 1
647 #else   /* ONLY_MSPACES */
648 #define MSPACES 0
649 #endif  /* ONLY_MSPACES */
650 #endif  /* MSPACES */
651 #ifndef MALLOC_ALIGNMENT
652 #define MALLOC_ALIGNMENT ((size_t)8U)
653 #endif  /* MALLOC_ALIGNMENT */
654 #ifndef FOOTERS
655 #define FOOTERS 0
656 #endif  /* FOOTERS */
657 #ifndef ABORT
658 #define ABORT  abort()
659 #endif  /* ABORT */
660 #ifndef ABORT_ON_ASSERT_FAILURE
661 #define ABORT_ON_ASSERT_FAILURE 1
662 #endif  /* ABORT_ON_ASSERT_FAILURE */
663 #ifndef PROCEED_ON_ERROR
664 #define PROCEED_ON_ERROR 0
665 #endif  /* PROCEED_ON_ERROR */
666 #ifndef USE_LOCKS
667 #define USE_LOCKS 0
668 #endif  /* USE_LOCKS */
669 #ifndef USE_SPIN_LOCKS
670 #if USE_LOCKS && SPIN_LOCKS_AVAILABLE
671 #define USE_SPIN_LOCKS 1
672 #else
673 #define USE_SPIN_LOCKS 0
674 #endif /* USE_LOCKS && SPIN_LOCKS_AVAILABLE. */
675 #endif /* USE_SPIN_LOCKS */
676 #ifndef INSECURE
677 #define INSECURE 0
678 #endif  /* INSECURE */
679 #ifndef HAVE_MMAP
680 #define HAVE_MMAP 1
681 #endif  /* HAVE_MMAP */
682 #ifndef MMAP_CLEARS
683 #define MMAP_CLEARS 1
684 #endif  /* MMAP_CLEARS */
685 #ifndef HAVE_MREMAP
686 #ifdef linux
687 #define HAVE_MREMAP 1
688 #else   /* linux */
689 #define HAVE_MREMAP 0
690 #endif  /* linux */
691 #endif  /* HAVE_MREMAP */
692 #ifndef MALLOC_FAILURE_ACTION
693 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
694 #endif  /* MALLOC_FAILURE_ACTION */
695 #ifndef HAVE_MORECORE
696 #if ONLY_MSPACES
697 #define HAVE_MORECORE 0
698 #else   /* ONLY_MSPACES */
699 #define HAVE_MORECORE 1
700 #endif  /* ONLY_MSPACES */
701 #endif  /* HAVE_MORECORE */
702 #if !HAVE_MORECORE
703 #define MORECORE_CONTIGUOUS 0
704 #else   /* !HAVE_MORECORE */
705 #define MORECORE_DEFAULT sbrk
706 #ifndef MORECORE_CONTIGUOUS
707 #define MORECORE_CONTIGUOUS 1
708 #endif  /* MORECORE_CONTIGUOUS */
709 #endif  /* HAVE_MORECORE */
710 #ifndef DEFAULT_GRANULARITY
711 #if (MORECORE_CONTIGUOUS || defined(WIN32))
712 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
713 #else   /* MORECORE_CONTIGUOUS */
714 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
715 #endif  /* MORECORE_CONTIGUOUS */
716 #endif  /* DEFAULT_GRANULARITY */
717 #ifndef DEFAULT_TRIM_THRESHOLD
718 #ifndef MORECORE_CANNOT_TRIM
719 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
720 #else   /* MORECORE_CANNOT_TRIM */
721 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
722 #endif  /* MORECORE_CANNOT_TRIM */
723 #endif  /* DEFAULT_TRIM_THRESHOLD */
724 #ifndef DEFAULT_MMAP_THRESHOLD
725 #if HAVE_MMAP
726 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
727 #else   /* HAVE_MMAP */
728 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
729 #endif  /* HAVE_MMAP */
730 #endif  /* DEFAULT_MMAP_THRESHOLD */
731 #ifndef MAX_RELEASE_CHECK_RATE
732 #if HAVE_MMAP
733 #define MAX_RELEASE_CHECK_RATE 4095
734 #else
735 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
736 #endif /* HAVE_MMAP */
737 #endif /* MAX_RELEASE_CHECK_RATE */
738 #ifndef USE_BUILTIN_FFS
739 #define USE_BUILTIN_FFS 0
740 #endif  /* USE_BUILTIN_FFS */
741 #ifndef USE_DEV_RANDOM
742 #define USE_DEV_RANDOM 0
743 #endif  /* USE_DEV_RANDOM */
744 #ifndef NO_MALLINFO
745 #define NO_MALLINFO 0
746 #endif  /* NO_MALLINFO */
747 #ifndef MALLINFO_FIELD_TYPE
748 #define MALLINFO_FIELD_TYPE size_t
749 #endif  /* MALLINFO_FIELD_TYPE */
750 #ifndef NO_SEGMENT_TRAVERSAL
751 #define NO_SEGMENT_TRAVERSAL 0
752 #endif /* NO_SEGMENT_TRAVERSAL */
753 
754 /*
755   mallopt tuning options.  SVID/XPG defines four standard parameter
756   numbers for mallopt, normally defined in malloc.h.  None of these
757   are used in this malloc, so setting them has no effect. But this
758   malloc does support the following options.
759 */
760 
761 #define M_TRIM_THRESHOLD     (-1)
762 #define M_GRANULARITY        (-2)
763 #define M_MMAP_THRESHOLD     (-3)
764 
765 /* ------------------------ Mallinfo declarations ------------------------ */
766 
767 #if !NO_MALLINFO
768 /*
769   This version of malloc supports the standard SVID/XPG mallinfo
770   routine that returns a struct containing usage properties and
771   statistics. It should work on any system that has a
772   /usr/include/malloc.h defining struct mallinfo.  The main
773   declaration needed is the mallinfo struct that is returned (by-copy)
774   by mallinfo().  The malloinfo struct contains a bunch of fields that
775   are not even meaningful in this version of malloc.  These fields are
776   are instead filled by mallinfo() with other numbers that might be of
777   interest.
778 
779   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
780   /usr/include/malloc.h file that includes a declaration of struct
781   mallinfo.  If so, it is included; else a compliant version is
782   declared below.  These must be precisely the same for mallinfo() to
783   work.  The original SVID version of this struct, defined on most
784   systems with mallinfo, declares all fields as ints. But some others
785   define as unsigned long. If your system defines the fields using a
786   type of different width than listed here, you MUST #include your
787   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
788 */
789 
790 /* #define HAVE_USR_INCLUDE_MALLOC_H */
791 
792 #ifdef HAVE_USR_INCLUDE_MALLOC_H
793 #include "/usr/include/malloc.h"
794 #else /* HAVE_USR_INCLUDE_MALLOC_H */
795 #ifndef STRUCT_MALLINFO_DECLARED
796 #define STRUCT_MALLINFO_DECLARED 1
797 struct mallinfo {
798   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
799   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
800   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
801   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
802   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
803   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
804   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
805   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
806   MALLINFO_FIELD_TYPE fordblks; /* total free space */
807   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
808 };
809 #endif /* STRUCT_MALLINFO_DECLARED */
810 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
811 #endif /* NO_MALLINFO */
812 
813 /*
814   Try to persuade compilers to inline. The most critical functions for
815   inlining are defined as macros, so these aren't used for them.
816 */
817 
818 #ifndef FORCEINLINE
819   #if defined(__GNUC__)
820 #define FORCEINLINE __inline __attribute__ ((always_inline))
821   #elif defined(_MSC_VER)
822     #define FORCEINLINE __forceinline
823   #endif
824 #endif
825 #ifndef NOINLINE
826   #if defined(__GNUC__)
827     #define NOINLINE __attribute__ ((noinline))
828   #elif defined(_MSC_VER)
829     #define NOINLINE __declspec(noinline)
830   #else
831     #define NOINLINE
832   #endif
833 #endif
834 
835 #ifdef __cplusplus
836 extern "C" {
837 #ifndef FORCEINLINE
838  #define FORCEINLINE inline
839 #endif
840 #endif /* __cplusplus */
841 #ifndef FORCEINLINE
842  #define FORCEINLINE
843 #endif
844 
845 #if !ONLY_MSPACES
846 
847 /* ------------------- Declarations of public routines ------------------- */
848 
849 #ifndef USE_DL_PREFIX
850 #define dlcalloc               calloc
851 #define dlfree                 free
852 #define dlmalloc               malloc
853 #define dlmemalign             memalign
854 #define dlrealloc              realloc
855 #define dlvalloc               valloc
856 #define dlpvalloc              pvalloc
857 #define dlmallinfo             mallinfo
858 #define dlmallopt              mallopt
859 #define dlmalloc_trim          malloc_trim
860 #define dlmalloc_stats         malloc_stats
861 #define dlmalloc_usable_size   malloc_usable_size
862 #define dlmalloc_footprint     malloc_footprint
863 #define dlmalloc_max_footprint malloc_max_footprint
864 #define dlindependent_calloc   independent_calloc
865 #define dlindependent_comalloc independent_comalloc
866 #endif /* USE_DL_PREFIX */
867 
868 
869 /*
870   malloc(size_t n)
871   Returns a pointer to a newly allocated chunk of at least n bytes, or
872   null if no space is available, in which case errno is set to ENOMEM
873   on ANSI C systems.
874 
875   If n is zero, malloc returns a minimum-sized chunk. (The minimum
876   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
877   systems.)  Note that size_t is an unsigned type, so calls with
878   arguments that would be negative if signed are interpreted as
879   requests for huge amounts of space, which will often fail. The
880   maximum supported value of n differs across systems, but is in all
881   cases less than the maximum representable value of a size_t.
882 */
883 void* dlmalloc(size_t);
884 
885 /*
886   free(void* p)
887   Releases the chunk of memory pointed to by p, that had been previously
888   allocated using malloc or a related routine such as realloc.
889   It has no effect if p is null. If p was not malloced or already
890   freed, free(p) will by default cause the current program to abort.
891 */
892 void  dlfree(void*);
893 
894 /*
895   calloc(size_t n_elements, size_t element_size);
896   Returns a pointer to n_elements * element_size bytes, with all locations
897   set to zero.
898 */
899 void* dlcalloc(size_t, size_t);
900 
901 /*
902   realloc(void* p, size_t n)
903   Returns a pointer to a chunk of size n that contains the same data
904   as does chunk p up to the minimum of (n, p's size) bytes, or null
905   if no space is available.
906 
907   The returned pointer may or may not be the same as p. The algorithm
908   prefers extending p in most cases when possible, otherwise it
909   employs the equivalent of a malloc-copy-free sequence.
910 
911   If p is null, realloc is equivalent to malloc.
912 
913   If space is not available, realloc returns null, errno is set (if on
914   ANSI) and p is NOT freed.
915 
916   if n is for fewer bytes than already held by p, the newly unused
917   space is lopped off and freed if possible.  realloc with a size
918   argument of zero (re)allocates a minimum-sized chunk.
919 
920   The old unix realloc convention of allowing the last-free'd chunk
921   to be used as an argument to realloc is not supported.
922 */
923 
924 void* dlrealloc(void*, size_t);
925 
926 /*
927   memalign(size_t alignment, size_t n);
928   Returns a pointer to a newly allocated chunk of n bytes, aligned
929   in accord with the alignment argument.
930 
931   The alignment argument should be a power of two. If the argument is
932   not a power of two, the nearest greater power is used.
933   8-byte alignment is guaranteed by normal malloc calls, so don't
934   bother calling memalign with an argument of 8 or less.
935 
936   Overreliance on memalign is a sure way to fragment space.
937 */
938 void* dlmemalign(size_t, size_t);
939 
940 /*
941   valloc(size_t n);
942   Equivalent to memalign(pagesize, n), where pagesize is the page
943   size of the system. If the pagesize is unknown, 4096 is used.
944 */
945 void* dlvalloc(size_t);
946 
947 /*
948   mallopt(int parameter_number, int parameter_value)
949   Sets tunable parameters The format is to provide a
950   (parameter-number, parameter-value) pair.  mallopt then sets the
951   corresponding parameter to the argument value if it can (i.e., so
952   long as the value is meaningful), and returns 1 if successful else
953   0.  To workaround the fact that mallopt is specified to use int,
954   not size_t parameters, the value -1 is specially treated as the
955   maximum unsigned size_t value.
956 
957   SVID/XPG/ANSI defines four standard param numbers for mallopt,
958   normally defined in malloc.h.  None of these are use in this malloc,
959   so setting them has no effect. But this malloc also supports other
960   options in mallopt. See below for details.  Briefly, supported
961   parameters are as follows (listed defaults are for "typical"
962   configurations).
963 
964   Symbol            param #  default    allowed param values
965   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1 disables)
966   M_GRANULARITY        -2     page size   any power of 2 >= page size
967   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
968 */
969 int dlmallopt(int, int);
970 
971 /*
972   malloc_footprint();
973   Returns the number of bytes obtained from the system.  The total
974   number of bytes allocated by malloc, realloc etc., is less than this
975   value. Unlike mallinfo, this function returns only a precomputed
976   result, so can be called frequently to monitor memory consumption.
977   Even if locks are otherwise defined, this function does not use them,
978   so results might not be up to date.
979 */
980 size_t dlmalloc_footprint(void);
981 
982 /*
983   malloc_max_footprint();
984   Returns the maximum number of bytes obtained from the system. This
985   value will be greater than current footprint if deallocated space
986   has been reclaimed by the system. The peak number of bytes allocated
987   by malloc, realloc etc., is less than this value. Unlike mallinfo,
988   this function returns only a precomputed result, so can be called
989   frequently to monitor memory consumption.  Even if locks are
990   otherwise defined, this function does not use them, so results might
991   not be up to date.
992 */
993 size_t dlmalloc_max_footprint(void);
994 
995 #if !NO_MALLINFO
996 /*
997   mallinfo()
998   Returns (by copy) a struct containing various summary statistics:
999 
1000   arena:     current total non-mmapped bytes allocated from system
1001   ordblks:   the number of free chunks
1002   smblks:    always zero.
1003   hblks:     current number of mmapped regions
1004   hblkhd:    total bytes held in mmapped regions
1005   usmblks:   the maximum total allocated space. This will be greater
1006                 than current total if trimming has occurred.
1007   fsmblks:   always zero
1008   uordblks:  current total allocated space (normal or mmapped)
1009   fordblks:  total free space
1010   keepcost:  the maximum number of bytes that could ideally be released
1011                back to system via malloc_trim. ("ideally" means that
1012                it ignores page restrictions etc.)
1013 
1014   Because these fields are ints, but internal bookkeeping may
1015   be kept as longs, the reported values may wrap around zero and
1016   thus be inaccurate.
1017 */
1018 struct mallinfo dlmallinfo(void);
1019 #endif /* NO_MALLINFO */
1020 
1021 /*
1022   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
1023 
1024   independent_calloc is similar to calloc, but instead of returning a
1025   single cleared space, it returns an array of pointers to n_elements
1026   independent elements that can hold contents of size elem_size, each
1027   of which starts out cleared, and can be independently freed,
1028   realloc'ed etc. The elements are guaranteed to be adjacently
1029   allocated (this is not guaranteed to occur with multiple callocs or
1030   mallocs), which may also improve cache locality in some
1031   applications.
1032 
1033   The "chunks" argument is optional (i.e., may be null, which is
1034   probably the most typical usage). If it is null, the returned array
1035   is itself dynamically allocated and should also be freed when it is
1036   no longer needed. Otherwise, the chunks array must be of at least
1037   n_elements in length. It is filled in with the pointers to the
1038   chunks.
1039 
1040   In either case, independent_calloc returns this pointer array, or
1041   null if the allocation failed.  If n_elements is zero and "chunks"
1042   is null, it returns a chunk representing an array with zero elements
1043   (which should be freed if not wanted).
1044 
1045   Each element must be individually freed when it is no longer
1046   needed. If you'd like to instead be able to free all at once, you
1047   should instead use regular calloc and assign pointers into this
1048   space to represent elements.  (In this case though, you cannot
1049   independently free elements.)
1050 
1051   independent_calloc simplifies and speeds up implementations of many
1052   kinds of pools.  It may also be useful when constructing large data
1053   structures that initially have a fixed number of fixed-sized nodes,
1054   but the number is not known at compile time, and some of the nodes
1055   may later need to be freed. For example:
1056 
1057   struct Node { int item; struct Node* next; };
1058 
1059   struct Node* build_list() {
1060     struct Node** pool;
1061     int n = read_number_of_nodes_needed();
1062     if (n <= 0) return 0;
1063     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1064     if (pool == 0) die();
1065     // organize into a linked list...
1066     struct Node* first = pool[0];
1067     for (i = 0; i < n-1; ++i)
1068       pool[i]->next = pool[i+1];
1069     free(pool);     // Can now free the array (or not, if it is needed later)
1070     return first;
1071   }
1072 */
1073 void** dlindependent_calloc(size_t, size_t, void**);
1074 
1075 /*
1076   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
1077 
1078   independent_comalloc allocates, all at once, a set of n_elements
1079   chunks with sizes indicated in the "sizes" array.    It returns
1080   an array of pointers to these elements, each of which can be
1081   independently freed, realloc'ed etc. The elements are guaranteed to
1082   be adjacently allocated (this is not guaranteed to occur with
1083   multiple callocs or mallocs), which may also improve cache locality
1084   in some applications.
1085 
1086   The "chunks" argument is optional (i.e., may be null). If it is null
1087   the returned array is itself dynamically allocated and should also
1088   be freed when it is no longer needed. Otherwise, the chunks array
1089   must be of at least n_elements in length. It is filled in with the
1090   pointers to the chunks.
1091 
1092   In either case, independent_comalloc returns this pointer array, or
1093   null if the allocation failed.  If n_elements is zero and chunks is
1094   null, it returns a chunk representing an array with zero elements
1095   (which should be freed if not wanted).
1096 
1097   Each element must be individually freed when it is no longer
1098   needed. If you'd like to instead be able to free all at once, you
1099   should instead use a single regular malloc, and assign pointers at
1100   particular offsets in the aggregate space. (In this case though, you
1101   cannot independently free elements.)
1102 
1103   independent_comallac differs from independent_calloc in that each
1104   element may have a different size, and also that it does not
1105   automatically clear elements.
1106 
1107   independent_comalloc can be used to speed up allocation in cases
1108   where several structs or objects must always be allocated at the
1109   same time.  For example:
1110 
1111   struct Head { ... }
1112   struct Foot { ... }
1113 
1114   void send_message(char* msg) {
1115     int msglen = strlen(msg);
1116     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1117     void* chunks[3];
1118     if (independent_comalloc(3, sizes, chunks) == 0)
1119       die();
1120     struct Head* head = (struct Head*)(chunks[0]);
1121     char*        body = (char*)(chunks[1]);
1122     struct Foot* foot = (struct Foot*)(chunks[2]);
1123     // ...
1124   }
1125 
1126   In general though, independent_comalloc is worth using only for
1127   larger values of n_elements. For small values, you probably won't
1128   detect enough difference from series of malloc calls to bother.
1129 
1130   Overuse of independent_comalloc can increase overall memory usage,
1131   since it cannot reuse existing noncontiguous small chunks that
1132   might be available for some of the elements.
1133 */
1134 void** dlindependent_comalloc(size_t, size_t*, void**);
1135 
1136 
1137 /*
1138   pvalloc(size_t n);
1139   Equivalent to valloc(minimum-page-that-holds(n)), that is,
1140   round up n to nearest pagesize.
1141  */
1142 void*  dlpvalloc(size_t);
1143 
1144 /*
1145   malloc_trim(size_t pad);
1146 
1147   If possible, gives memory back to the system (via negative arguments
1148   to sbrk) if there is unused memory at the `high' end of the malloc
1149   pool or in unused MMAP segments. You can call this after freeing
1150   large blocks of memory to potentially reduce the system-level memory
1151   requirements of a program. However, it cannot guarantee to reduce
1152   memory. Under some allocation patterns, some large free blocks of
1153   memory will be locked between two used chunks, so they cannot be
1154   given back to the system.
1155 
1156   The `pad' argument to malloc_trim represents the amount of free
1157   trailing space to leave untrimmed. If this argument is zero, only
1158   the minimum amount of memory to maintain internal data structures
1159   will be left. Non-zero arguments can be supplied to maintain enough
1160   trailing space to service future expected allocations without having
1161   to re-obtain memory from the system.
1162 
1163   Malloc_trim returns 1 if it actually released any memory, else 0.
1164 */
1165 int  dlmalloc_trim(size_t);
1166 
1167 /*
1168   malloc_stats();
1169   Prints on stderr the amount of space obtained from the system (both
1170   via sbrk and mmap), the maximum amount (which may be more than
1171   current if malloc_trim and/or munmap got called), and the current
1172   number of bytes allocated via malloc (or realloc, etc) but not yet
1173   freed. Note that this is the number of bytes allocated, not the
1174   number requested. It will be larger than the number requested
1175   because of alignment and bookkeeping overhead. Because it includes
1176   alignment wastage as being in use, this figure may be greater than
1177   zero even when no user-level chunks are allocated.
1178 
1179   The reported current and maximum system memory can be inaccurate if
1180   a program makes other calls to system memory allocation functions
1181   (normally sbrk) outside of malloc.
1182 
1183   malloc_stats prints only the most commonly interesting statistics.
1184   More information can be obtained by calling mallinfo.
1185 */
1186 void  dlmalloc_stats(void);
1187 
1188 #endif /* ONLY_MSPACES */
1189 
1190 /*
1191   malloc_usable_size(void* p);
1192 
1193   Returns the number of bytes you can actually use in
1194   an allocated chunk, which may be more than you requested (although
1195   often not) due to alignment and minimum size constraints.
1196   You can use this many bytes without worrying about
1197   overwriting other allocated objects. This is not a particularly great
1198   programming practice. malloc_usable_size can be more useful in
1199   debugging and assertions, for example:
1200 
1201   p = malloc(n);
1202   assert(malloc_usable_size(p) >= 256);
1203 */
1204 size_t dlmalloc_usable_size(void*);
1205 
1206 
1207 #if MSPACES
1208 
1209 /*
1210   mspace is an opaque type representing an independent
1211   region of space that supports mspace_malloc, etc.
1212 */
1213 typedef void* mspace;
1214 
1215 /*
1216   mstar_create_mspace creates and returns a new independent space with the
1217   given initial capacity, or, if 0, the default granularity size.  It
1218   returns null if there is no system memory available to create the
1219   space.  If argument locked is non-zero, the space uses a separate
1220   lock to control access. The capacity of the space will grow
1221   dynamically as needed to service mspace_malloc requests.  You can
1222   control the sizes of incremental increases of this space by
1223   compiling with a different DEFAULT_GRANULARITY or dynamically
1224   setting with mallopt(M_GRANULARITY, value).
1225 */
1226 mspace mstar_create_mspace(size_t capacity, int locked);
1227 
1228 /*
1229   destroy_mspace destroys the given space, and attempts to return all
1230   of its memory back to the system, returning the total number of
1231   bytes freed. After destruction, the results of access to all memory
1232   used by the space become undefined.
1233 */
1234 size_t mstar_destroy_mspace(mspace msp);
1235 
1236 /*
1237   create_mspace_with_base uses the memory supplied as the initial base
1238   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1239   space is used for bookkeeping, so the capacity must be at least this
1240   large. (Otherwise 0 is returned.) When this initial space is
1241   exhausted, additional memory will be obtained from the system.
1242   Destroying this space will deallocate all additionally allocated
1243   space (if possible) but not the initial base.
1244 */
1245 mspace mstar_create_mspace_with_base(void* base, size_t capacity, int locked);
1246 
1247 /*
1248   mspace_track_large_chunks controls whether requests for large chunks
1249   are allocated in their own untracked mmapped regions, separate from
1250   others in this mspace. By default large chunks are not tracked,
1251   which reduces fragmentation. However, such chunks are not
1252   necessarily released to the system upon destroy_mspace.  Enabling
1253   tracking by setting to true may increase fragmentation, but avoids
1254   leakage when relying on destroy_mspace to release all memory
1255   allocated using this space.  The function returns the previous
1256   setting.
1257 */
1258 int mstar_mspace_track_large_chunks(mspace msp, int enable);
1259 
1260 
1261 /*
1262   mspace_malloc behaves as malloc, but operates within
1263   the given space.
1264 */
1265 void* mstar_mspace_malloc(mspace msp, size_t bytes);
1266 
1267 /*
1268   mspace_free behaves as free, but operates within
1269   the given space.
1270 
1271   If compiled with FOOTERS==1, mspace_free is not actually needed.
1272   free may be called instead of mspace_free because freed chunks from
1273   any space are handled by their originating spaces.
1274 */
1275 void mstar_mspace_free(mspace msp, void* mem);
1276 
1277 /*
1278   mstar_mspace_realloc behaves as realloc, but operates within
1279   the given space.
1280 
1281   If compiled with FOOTERS==1, mstar_mspace_realloc is not actually
1282   needed.  realloc may be called instead of mstar_mspace_realloc because
1283   realloced chunks from any space are handled by their originating
1284   spaces.
1285 */
1286 void* mstar_mspace_realloc(mspace msp, void* mem, size_t newsize);
1287 
1288 /*
1289   mspace_calloc behaves as calloc, but operates within
1290   the given space.
1291 */
1292 void* mstar_mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1293 
1294 /*
1295   mstar_mspace_memalign behaves as memalign, but operates within
1296   the given space.
1297 */
1298 void* mstar_mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1299 
1300 /*
1301   mstar_mspace_independent_calloc behaves as independent_calloc, but
1302   operates within the given space.
1303 */
1304 void** mstar_mspace_independent_calloc(mspace msp, size_t n_elements,
1305                                  size_t elem_size, void* chunks[]);
1306 
1307 /*
1308   mstar_mspace_independent_comalloc behaves as independent_comalloc, but
1309   operates within the given space.
1310 */
1311 void** mstar_mspace_independent_comalloc(mspace msp, size_t n_elements,
1312                                    size_t sizes[], void* chunks[]);
1313 
1314 /*
1315   mstar_mspace_footprint() returns the number of bytes obtained from the
1316   system for this space.
1317 */
1318 size_t mstar_mspace_footprint(mspace msp);
1319 
1320 /*
1321   mstar_mspace_max_footprint() returns the peak number of bytes obtained from the
1322   system for this space.
1323 */
1324 size_t mstar_mspace_max_footprint(mspace msp);
1325 
1326 
1327 #if !NO_MALLINFO
1328 /*
1329   mstar_mspace_mallinfo behaves as mallinfo, but reports properties of
1330   the given space.
1331 */
1332 struct mallinfo mstar_mspace_mallinfo(mspace msp);
1333 #endif /* NO_MALLINFO */
1334 
1335 /*
1336   malloc_usable_size(void* p) behaves the same as malloc_usable_size;
1337 */
1338   size_t mstar_mspace_usable_size(void* mem);
1339 
1340 /*
1341   mstar_mspace_malloc_stats behaves as malloc_stats, but reports
1342   properties of the given space.
1343 */
1344 void mstar_mspace_malloc_stats(mspace msp);
1345 
1346 /*
1347   mstar_mspace_trim behaves as malloc_trim, but
1348   operates within the given space.
1349 */
1350 int mstar_mspace_trim(mspace msp, size_t pad);
1351 
1352 /*
1353   An alias for mallopt.
1354 */
1355 int mstar_mspace_mallopt(int, int);
1356 
1357 #endif /* MSPACES */
1358 
1359 #ifdef __cplusplus
1360 };  /* end of extern "C" */
1361 #endif /* __cplusplus */
1362 
1363 /*
1364   ========================================================================
1365   To make a fully customizable malloc.h header file, cut everything
1366   above this line, put into file malloc.h, edit to suit, and #include it
1367   on the next line, as well as in programs that use this malloc.
1368   ========================================================================
1369 */
1370 
1371 /* #include "malloc.h" */
1372 
1373 /*------------------------------ internal #includes ---------------------- */
1374 
1375 #ifdef WIN32
1376 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1377 #endif /* WIN32 */
1378 
1379 #include <stdio.h>       /* for printing in malloc_stats */
1380 
1381 #ifndef LACKS_ERRNO_H
1382 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
1383 #endif /* LACKS_ERRNO_H */
1384 #if FOOTERS || DEBUG
1385 #include <time.h>        /* for magic initialization */
1386 #endif /* FOOTERS */
1387 #ifndef LACKS_STDLIB_H
1388 #include <stdlib.h>      /* for abort() */
1389 #endif /* LACKS_STDLIB_H */
1390 #ifdef DEBUG
1391 #if ABORT_ON_ASSERT_FAILURE
1392 #undef assert
1393 #define assert(x) if(!(x)) ABORT
1394 #else /* ABORT_ON_ASSERT_FAILURE */
1395 #include <assert.h>
1396 #endif /* ABORT_ON_ASSERT_FAILURE */
1397 #else  /* DEBUG */
1398 #ifndef assert
1399 #define assert(x)
1400 #endif
1401 #define DEBUG 0
1402 #endif /* DEBUG */
1403 #ifndef LACKS_STRING_H
1404 #include <string.h>      /* for memset etc */
1405 #endif  /* LACKS_STRING_H */
1406 #if USE_BUILTIN_FFS
1407 #ifndef LACKS_STRINGS_H
1408 #include <strings.h>     /* for ffs */
1409 #endif /* LACKS_STRINGS_H */
1410 #endif /* USE_BUILTIN_FFS */
1411 #if HAVE_MMAP
1412 #ifndef LACKS_SYS_MMAN_H
1413 /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
1414 #if (defined(linux) && !defined(__USE_GNU))
1415 #define __USE_GNU 1
1416 #include <sys/mman.h>    /* for mmap */
1417 #undef __USE_GNU
1418 #else
1419 #include <sys/mman.h>    /* for mmap */
1420 #endif /* linux */
1421 #endif /* LACKS_SYS_MMAN_H */
1422 #ifndef LACKS_FCNTL_H
1423 #include <fcntl.h>
1424 #endif /* LACKS_FCNTL_H */
1425 #endif /* HAVE_MMAP */
1426 #ifndef LACKS_UNISTD_H
1427 #include <unistd.h>     /* for sbrk, sysconf */
1428 #else /* LACKS_UNISTD_H */
1429 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1430 extern void*     sbrk(ptrdiff_t);
1431 #endif /* FreeBSD etc */
1432 #endif /* LACKS_UNISTD_H */
1433 
1434 /* Declarations for locking */
1435 #if USE_LOCKS
1436 #ifndef WIN32
1437 #include <pthread.h>
1438 #if defined (__SVR4) && defined (__sun)  /* solaris */
1439 #include <thread.h>
1440 #endif /* solaris */
1441 #else
1442 #ifndef _M_AMD64
1443 /* These are already defined on AMD64 builds */
1444 #ifdef __cplusplus
1445 extern "C" {
1446 #endif /* __cplusplus */
1447 LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
1448 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
1449 #ifdef __cplusplus
1450 }
1451 #endif /* __cplusplus */
1452 #endif /* _M_AMD64 */
1453 #pragma intrinsic (_InterlockedCompareExchange)
1454 #pragma intrinsic (_InterlockedExchange)
1455 #define interlockedcompareexchange _InterlockedCompareExchange
1456 #define interlockedexchange _InterlockedExchange
1457 #endif /* Win32 */
1458 #endif /* USE_LOCKS */
1459 
1460 /* Declarations for bit scanning on win32 */
1461 #if defined(_MSC_VER) && _MSC_VER>=1300
1462 #ifndef BitScanForward	/* Try to avoid pulling in WinNT.h */
1463 #ifdef __cplusplus
1464 extern "C" {
1465 #endif /* __cplusplus */
1466 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
1467 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
1468 #ifdef __cplusplus
1469 }
1470 #endif /* __cplusplus */
1471 
1472 #define BitScanForward _BitScanForward
1473 #define BitScanReverse _BitScanReverse
1474 #pragma intrinsic(_BitScanForward)
1475 #pragma intrinsic(_BitScanReverse)
1476 #endif /* BitScanForward */
1477 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
1478 
1479 #ifndef WIN32
1480 #ifndef malloc_getpagesize
1481 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
1482 #    ifndef _SC_PAGE_SIZE
1483 #      define _SC_PAGE_SIZE _SC_PAGESIZE
1484 #    endif
1485 #  endif
1486 #  ifdef _SC_PAGE_SIZE
1487 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1488 #  else
1489 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1490        extern size_t getpagesize();
1491 #      define malloc_getpagesize getpagesize()
1492 #    else
1493 #      ifdef WIN32 /* use supplied emulation of getpagesize */
1494 #        define malloc_getpagesize getpagesize()
1495 #      else
1496 #        ifndef LACKS_SYS_PARAM_H
1497 #          include <sys/param.h>
1498 #        endif
1499 #        ifdef EXEC_PAGESIZE
1500 #          define malloc_getpagesize EXEC_PAGESIZE
1501 #        else
1502 #          ifdef NBPG
1503 #            ifndef CLSIZE
1504 #              define malloc_getpagesize NBPG
1505 #            else
1506 #              define malloc_getpagesize (NBPG * CLSIZE)
1507 #            endif
1508 #          else
1509 #            ifdef NBPC
1510 #              define malloc_getpagesize NBPC
1511 #            else
1512 #              ifdef PAGESIZE
1513 #                define malloc_getpagesize PAGESIZE
1514 #              else /* just guess */
1515 #                define malloc_getpagesize ((size_t)4096U)
1516 #              endif
1517 #            endif
1518 #          endif
1519 #        endif
1520 #      endif
1521 #    endif
1522 #  endif
1523 #endif
1524 #endif
1525 
1526 
1527 
1528 /* ------------------- size_t and alignment properties -------------------- */
1529 
1530 /* The byte and bit size of a size_t */
1531 #define SIZE_T_SIZE         (sizeof(size_t))
1532 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
1533 
1534 /* Some constants coerced to size_t */
1535 /* Annoying but necessary to avoid errors on some platforms */
1536 #define SIZE_T_ZERO         ((size_t)0)
1537 #define SIZE_T_ONE          ((size_t)1)
1538 #define SIZE_T_TWO          ((size_t)2)
1539 #define SIZE_T_FOUR         ((size_t)4)
1540 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
1541 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
1542 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1543 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
1544 
1545 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
1546 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
1547 
1548 /* True if address a has acceptable alignment */
1549 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1550 
1551 /* the number of bytes to offset an address to align it */
1552 #define align_offset(A)\
1553  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1554   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1555 
1556 /* -------------------------- MMAP preliminaries ------------------------- */
1557 
1558 /*
1559    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1560    checks to fail so compiler optimizer can delete code rather than
1561    using so many "#if"s.
1562 */
1563 
1564 
1565 /* MORECORE and MMAP must return MFAIL on failure */
1566 #define MFAIL                ((void*)(MAX_SIZE_T))
1567 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
1568 
1569 #if HAVE_MMAP
1570 
1571 #ifndef WIN32
1572 #define MUNMAP_DEFAULT(a, s)  munmap((a), (s))
1573 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
1574 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1575 #define MAP_ANONYMOUS        MAP_ANON
1576 #endif /* MAP_ANON */
1577 #ifdef MAP_ANONYMOUS
1578 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
1579 #define MMAP_DEFAULT(s)       mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1580 #else /* MAP_ANONYMOUS */
1581 /*
1582    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1583    is unlikely to be needed, but is supplied just in case.
1584 */
1585 #define MMAP_FLAGS           (MAP_PRIVATE)
1586 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1587 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
1588            (dev_zero_fd = open("/dev/zero", O_RDWR), \
1589             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1590             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1591 #endif /* MAP_ANONYMOUS */
1592 
1593 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
1594 
1595 #else /* WIN32 */
1596 
1597 /* Win32 MMAP via VirtualAlloc */
win32mmap(size_t size)1598 static FORCEINLINE void* win32mmap(size_t size) {
1599   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1600   return (ptr != 0)? ptr: MFAIL;
1601 }
1602 
1603 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
win32direct_mmap(size_t size)1604 static FORCEINLINE void* win32direct_mmap(size_t size) {
1605   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1606                            PAGE_READWRITE);
1607   return (ptr != 0)? ptr: MFAIL;
1608 }
1609 
1610 /* This function supports releasing coalesed segments */
win32munmap(void * ptr,size_t size)1611 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
1612   MEMORY_BASIC_INFORMATION minfo;
1613   char* cptr = (char*)ptr;
1614   while (size) {
1615     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1616       return -1;
1617     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1618         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1619       return -1;
1620     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1621       return -1;
1622     cptr += minfo.RegionSize;
1623     size -= minfo.RegionSize;
1624   }
1625   return 0;
1626 }
1627 
1628 #define MMAP_DEFAULT(s)             win32mmap(s)
1629 #define MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
1630 #define DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
1631 #endif /* WIN32 */
1632 #endif /* HAVE_MMAP */
1633 
1634 #if HAVE_MREMAP
1635 #ifndef WIN32
1636 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1637 #endif /* WIN32 */
1638 #endif /* HAVE_MREMAP */
1639 
1640 
1641 /**
1642  * Define CALL_MORECORE
1643  */
1644 #if HAVE_MORECORE
1645     #ifdef MORECORE
1646         #define CALL_MORECORE(S)    MORECORE(S)
1647     #else  /* MORECORE */
1648         #define CALL_MORECORE(S)    MORECORE_DEFAULT(S)
1649     #endif /* MORECORE */
1650 #else  /* HAVE_MORECORE */
1651     #define CALL_MORECORE(S)        MFAIL
1652 #endif /* HAVE_MORECORE */
1653 
1654 /**
1655  * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
1656  */
1657 #if HAVE_MMAP
1658     #define USE_MMAP_BIT            (SIZE_T_ONE)
1659 
1660     #ifdef MMAP
1661         #define CALL_MMAP(s)        MMAP(s)
1662     #else /* MMAP */
1663         #define CALL_MMAP(s)        MMAP_DEFAULT(s)
1664     #endif /* MMAP */
1665     #ifdef MUNMAP
1666         #define CALL_MUNMAP(a, s)   MUNMAP((a), (s))
1667     #else /* MUNMAP */
1668         #define CALL_MUNMAP(a, s)   MUNMAP_DEFAULT((a), (s))
1669     #endif /* MUNMAP */
1670     #ifdef DIRECT_MMAP
1671         #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1672     #else /* DIRECT_MMAP */
1673         #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
1674     #endif /* DIRECT_MMAP */
1675 #else  /* HAVE_MMAP */
1676     #define USE_MMAP_BIT            (SIZE_T_ZERO)
1677 
1678     #define MMAP(s)                 MFAIL
1679     #define MUNMAP(a, s)            (-1)
1680     #define DIRECT_MMAP(s)          MFAIL
1681     #define CALL_DIRECT_MMAP(s)     DIRECT_MMAP(s)
1682     #define CALL_MMAP(s)            MMAP(s)
1683     #define CALL_MUNMAP(a, s)       MUNMAP((a), (s))
1684 #endif /* HAVE_MMAP */
1685 
1686 /**
1687  * Define CALL_MREMAP
1688  */
1689 #if HAVE_MMAP && HAVE_MREMAP
1690     #ifdef MREMAP
1691         #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
1692     #else /* MREMAP */
1693         #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
1694     #endif /* MREMAP */
1695 #else  /* HAVE_MMAP && HAVE_MREMAP */
1696     #define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
1697 #endif /* HAVE_MMAP && HAVE_MREMAP */
1698 
1699 /* mstate bit set if continguous morecore disabled or failed */
1700 #define USE_NONCONTIGUOUS_BIT (4U)
1701 
1702 /* segment bit set in create_mspace_with_base */
1703 #define EXTERN_BIT            (8U)
1704 
1705 
1706 /* --------------------------- Lock preliminaries ------------------------ */
1707 
1708 /*
1709   When locks are defined, there is one global lock, plus
1710   one per-mspace lock.
1711 
1712   The global lock_ensures that mparams.magic and other unique
1713   mparams values are initialized only once. It also protects
1714   sequences of calls to MORECORE.  In many cases sys_alloc requires
1715   two calls, that should not be interleaved with calls by other
1716   threads.  This does not protect against direct calls to MORECORE
1717   by other threads not using this lock, so there is still code to
1718   cope the best we can on interference.
1719 
1720   Per-mspace locks surround calls to malloc, free, etc.  To enable use
1721   in layered extensions, per-mspace locks are reentrant.
1722 
1723   Because lock-protected regions generally have bounded times, it is
1724   OK to use the supplied simple spinlocks in the custom versions for
1725   x86. Spinlocks are likely to improve performance for lightly
1726   contended applications, but worsen performance under heavy
1727   contention.
1728 
1729   If USE_LOCKS is > 1, the definitions of lock routines here are
1730   bypassed, in which case you will need to define the type MLOCK_T,
1731   and at least INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly
1732   TRY_LOCK (which is not used in this malloc, but commonly needed in
1733   extensions.)  You must also declare a
1734     static MLOCK_T malloc_global_mutex = { initialization values };.
1735 
1736 */
1737 
1738 #if USE_LOCKS == 1
1739 
1740 #if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
1741 #ifndef WIN32
1742 
1743 /* Custom pthread-style spin locks on x86 and x64 for gcc */
1744 struct pthread_mlock_t {
1745   volatile unsigned int l;
1746   unsigned int c;
1747   pthread_t threadid;
1748 };
1749 #define MLOCK_T               struct pthread_mlock_t
1750 #define CURRENT_THREAD        pthread_self()
1751 #define INITIAL_LOCK(sl)      ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
1752 #define ACQUIRE_LOCK(sl)      pthread_acquire_lock(sl)
1753 #define RELEASE_LOCK(sl)      pthread_release_lock(sl)
1754 #define TRY_LOCK(sl)          pthread_try_lock(sl)
1755 #define SPINS_PER_YIELD       63
1756 
1757 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
1758 
pthread_acquire_lock(MLOCK_T * sl)1759 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
1760   int spins = 0;
1761   volatile unsigned int* lp = &sl->l;
1762   for (;;) {
1763     if (*lp != 0) {
1764       if (sl->threadid == CURRENT_THREAD) {
1765         ++sl->c;
1766         return 0;
1767       }
1768     }
1769     else {
1770       /* place args to cmpxchgl in locals to evade oddities in some gccs */
1771       int cmp = 0;
1772       int val = 1;
1773       int ret;
1774       __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
1775                              : "=a" (ret)
1776                              : "r" (val), "m" (*(lp)), "0"(cmp)
1777                              : "memory", "cc");
1778       if (!ret) {
1779         assert(!sl->threadid);
1780         sl->threadid = CURRENT_THREAD;
1781         sl->c = 1;
1782         return 0;
1783       }
1784     }
1785     if ((++spins & SPINS_PER_YIELD) == 0) {
1786 #if defined (__SVR4) && defined (__sun) /* solaris */
1787       thr_yield();
1788 #else
1789 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
1790       sched_yield();
1791 #else  /* no-op yield on unknown systems */
1792       ;
1793 #endif /* __linux__ || __FreeBSD__ || __APPLE__ */
1794 #endif /* solaris */
1795     }
1796   }
1797 }
1798 
pthread_release_lock(MLOCK_T * sl)1799 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
1800   volatile unsigned int* lp = &sl->l;
1801   assert(*lp != 0);
1802   assert(sl->threadid == CURRENT_THREAD);
1803   if (--sl->c == 0) {
1804     sl->threadid = 0;
1805     int prev = 0;
1806     int ret;
1807     __asm__ __volatile__ ("lock; xchgl %0, %1"
1808                           : "=r" (ret)
1809                           : "m" (*(lp)), "0"(prev)
1810                           : "memory");
1811   }
1812 }
1813 
pthread_try_lock(MLOCK_T * sl)1814 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
1815   volatile unsigned int* lp = &sl->l;
1816   if (*lp != 0) {
1817     if (sl->threadid == CURRENT_THREAD) {
1818       ++sl->c;
1819       return 1;
1820     }
1821   }
1822   else {
1823     int cmp = 0;
1824     int val = 1;
1825     int ret;
1826     __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
1827                            : "=a" (ret)
1828                            : "r" (val), "m" (*(lp)), "0"(cmp)
1829                            : "memory", "cc");
1830     if (!ret) {
1831       assert(!sl->threadid);
1832       sl->threadid = CURRENT_THREAD;
1833       sl->c = 1;
1834       return 1;
1835     }
1836   }
1837   return 0;
1838 }
1839 
1840 
1841 #else /* WIN32 */
1842 /* Custom win32-style spin locks on x86 and x64 for MSC */
1843 struct win32_mlock_t {
1844   volatile long l;
1845   unsigned int c;
1846   long threadid;
1847 };
1848 
1849 #define MLOCK_T               struct win32_mlock_t
1850 #define CURRENT_THREAD        GetCurrentThreadId()
1851 #define INITIAL_LOCK(sl)      ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
1852 #define ACQUIRE_LOCK(sl)      win32_acquire_lock(sl)
1853 #define RELEASE_LOCK(sl)      win32_release_lock(sl)
1854 #define TRY_LOCK(sl)          win32_try_lock(sl)
1855 #define SPINS_PER_YIELD       63
1856 
1857 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
1858 
win32_acquire_lock(MLOCK_T * sl)1859 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
1860   int spins = 0;
1861   for (;;) {
1862     if (sl->l != 0) {
1863       if (sl->threadid == CURRENT_THREAD) {
1864         ++sl->c;
1865         return 0;
1866       }
1867     }
1868     else {
1869       if (!interlockedexchange(&sl->l, 1)) {
1870         assert(!sl->threadid);
1871         sl->threadid = CURRENT_THREAD;
1872         sl->c = 1;
1873         return 0;
1874       }
1875     }
1876     if ((++spins & SPINS_PER_YIELD) == 0)
1877       SleepEx(0, FALSE);
1878   }
1879 }
1880 
win32_release_lock(MLOCK_T * sl)1881 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
1882   assert(sl->threadid == CURRENT_THREAD);
1883   assert(sl->l != 0);
1884   if (--sl->c == 0) {
1885     sl->threadid = 0;
1886     interlockedexchange (&sl->l, 0);
1887   }
1888 }
1889 
win32_try_lock(MLOCK_T * sl)1890 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
1891   if (sl->l != 0) {
1892     if (sl->threadid == CURRENT_THREAD) {
1893       ++sl->c;
1894       return 1;
1895     }
1896   }
1897   else {
1898     if (!interlockedexchange(&sl->l, 1)){
1899       assert(!sl->threadid);
1900       sl->threadid = CURRENT_THREAD;
1901       sl->c = 1;
1902       return 1;
1903     }
1904   }
1905   return 0;
1906 }
1907 
1908 #endif /* WIN32 */
1909 #else /* USE_SPIN_LOCKS */
1910 
1911 #ifndef WIN32
1912 /* pthreads-based locks */
1913 
1914 #define MLOCK_T               pthread_mutex_t
1915 #define CURRENT_THREAD        pthread_self()
1916 #define INITIAL_LOCK(sl)      pthread_init_lock(sl)
1917 #define ACQUIRE_LOCK(sl)      pthread_mutex_lock(sl)
1918 #define RELEASE_LOCK(sl)      pthread_mutex_unlock(sl)
1919 #define TRY_LOCK(sl)          (!pthread_mutex_trylock(sl))
1920 
1921 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
1922 
1923 /* Cope with old-style linux recursive lock initialization by adding */
1924 /* skipped internal declaration from pthread.h */
1925 #ifdef linux
1926 #ifndef PTHREAD_MUTEX_RECURSIVE
1927 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
1928 					   int __kind));
1929 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
1930 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
1931 #endif
1932 #endif
1933 
pthread_init_lock(MLOCK_T * sl)1934 static int pthread_init_lock (MLOCK_T *sl) {
1935   pthread_mutexattr_t attr;
1936   if (pthread_mutexattr_init(&attr)) return 1;
1937   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
1938   if (pthread_mutex_init(sl, &attr)) return 1;
1939   if (pthread_mutexattr_destroy(&attr)) return 1;
1940   return 0;
1941 }
1942 
1943 #else /* WIN32 */
1944 /* Win32 critical sections */
1945 #define MLOCK_T               CRITICAL_SECTION
1946 #define CURRENT_THREAD        GetCurrentThreadId()
1947 #define INITIAL_LOCK(s)       (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
1948 #define ACQUIRE_LOCK(s)       (EnterCriticalSection(sl), 0)
1949 #define RELEASE_LOCK(s)       LeaveCriticalSection(sl)
1950 #define TRY_LOCK(s)           TryEnterCriticalSection(sl)
1951 #define NEED_GLOBAL_LOCK_INIT
1952 
1953 static MLOCK_T malloc_global_mutex;
1954 static volatile long malloc_global_mutex_status;
1955 
1956 /* Use spin loop to initialize global lock */
init_malloc_global_mutex()1957 static void init_malloc_global_mutex() {
1958   for (;;) {
1959     long stat = malloc_global_mutex_status;
1960     if (stat > 0)
1961       return;
1962     /* transition to < 0 while initializing, then to > 0) */
1963     if (stat == 0 &&
1964         interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) {
1965       InitializeCriticalSection(&malloc_global_mutex);
1966       interlockedexchange(&malloc_global_mutex_status,1);
1967       return;
1968     }
1969     SleepEx(0, FALSE);
1970   }
1971 }
1972 
1973 #endif /* WIN32 */
1974 #endif /* USE_SPIN_LOCKS */
1975 #endif /* USE_LOCKS == 1 */
1976 
1977 /* -----------------------  User-defined locks ------------------------ */
1978 
1979 #if USE_LOCKS > 1
1980 /* Define your own lock implementation here */
1981 /* #define INITIAL_LOCK(sl)  ... */
1982 /* #define ACQUIRE_LOCK(sl)  ... */
1983 /* #define RELEASE_LOCK(sl)  ... */
1984 /* #define TRY_LOCK(sl) ... */
1985 /* static MLOCK_T malloc_global_mutex = ... */
1986 #endif /* USE_LOCKS > 1 */
1987 
1988 /* -----------------------  Lock-based state ------------------------ */
1989 
1990 #if USE_LOCKS
1991 #define USE_LOCK_BIT               (2U)
1992 #else  /* USE_LOCKS */
1993 #define USE_LOCK_BIT               (0U)
1994 #define INITIAL_LOCK(l)
1995 #endif /* USE_LOCKS */
1996 
1997 #if USE_LOCKS
1998 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
1999 #define ACQUIRE_MALLOC_GLOBAL_LOCK()  ACQUIRE_LOCK(&malloc_global_mutex);
2000 #endif
2001 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
2002 #define RELEASE_MALLOC_GLOBAL_LOCK()  RELEASE_LOCK(&malloc_global_mutex);
2003 #endif
2004 #else  /* USE_LOCKS */
2005 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
2006 #define RELEASE_MALLOC_GLOBAL_LOCK()
2007 #endif /* USE_LOCKS */
2008 
2009 
2010 /* -----------------------  Chunk representations ------------------------ */
2011 
2012 /*
2013   (The following includes lightly edited explanations by Colin Plumb.)
2014 
2015   The malloc_chunk declaration below is misleading (but accurate and
2016   necessary).  It declares a "view" into memory allowing access to
2017   necessary fields at known offsets from a given base.
2018 
2019   Chunks of memory are maintained using a `boundary tag' method as
2020   originally described by Knuth.  (See the paper by Paul Wilson
2021   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
2022   techniques.)  Sizes of free chunks are stored both in the front of
2023   each chunk and at the end.  This makes consolidating fragmented
2024   chunks into bigger chunks fast.  The head fields also hold bits
2025   representing whether chunks are free or in use.
2026 
2027   Here are some pictures to make it clearer.  They are "exploded" to
2028   show that the state of a chunk can be thought of as extending from
2029   the high 31 bits of the head field of its header through the
2030   prev_foot and PINUSE_BIT bit of the following chunk header.
2031 
2032   A chunk that's in use looks like:
2033 
2034    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2035            | Size of previous chunk (if P = 0)                             |
2036            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2037          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
2038          | Size of this chunk                                         1| +-+
2039    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2040          |                                                               |
2041          +-                                                             -+
2042          |                                                               |
2043          +-                                                             -+
2044          |                                                               :
2045          +-      size - sizeof(size_t) available payload bytes          -+
2046          :                                                               |
2047  chunk-> +-                                                             -+
2048          |                                                               |
2049          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2050        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
2051        | Size of next chunk (may or may not be in use)               | +-+
2052  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2053 
2054     And if it's free, it looks like this:
2055 
2056    chunk-> +-                                                             -+
2057            | User payload (must be in use, or we would have merged!)       |
2058            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2059          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
2060          | Size of this chunk                                         0| +-+
2061    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2062          | Next pointer                                                  |
2063          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2064          | Prev pointer                                                  |
2065          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2066          |                                                               :
2067          +-      size - sizeof(struct chunk) unused bytes               -+
2068          :                                                               |
2069  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2070          | Size of this chunk                                            |
2071          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2072        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
2073        | Size of next chunk (must be in use, or we would have merged)| +-+
2074  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2075        |                                                               :
2076        +- User payload                                                -+
2077        :                                                               |
2078        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2079                                                                      |0|
2080                                                                      +-+
2081   Note that since we always merge adjacent free chunks, the chunks
2082   adjacent to a free chunk must be in use.
2083 
2084   Given a pointer to a chunk (which can be derived trivially from the
2085   payload pointer) we can, in O(1) time, find out whether the adjacent
2086   chunks are free, and if so, unlink them from the lists that they
2087   are on and merge them with the current chunk.
2088 
2089   Chunks always begin on even word boundaries, so the mem portion
2090   (which is returned to the user) is also on an even word boundary, and
2091   thus at least double-word aligned.
2092 
2093   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
2094   chunk size (which is always a multiple of two words), is an in-use
2095   bit for the *previous* chunk.  If that bit is *clear*, then the
2096   word before the current chunk size contains the previous chunk
2097   size, and can be used to find the front of the previous chunk.
2098   The very first chunk allocated always has this bit set, preventing
2099   access to non-existent (or non-owned) memory. If pinuse is set for
2100   any given chunk, then you CANNOT determine the size of the
2101   previous chunk, and might even get a memory addressing fault when
2102   trying to do so.
2103 
2104   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
2105   the chunk size redundantly records whether the current chunk is
2106   inuse (unless the chunk is mmapped). This redundancy enables usage
2107   checks within free and realloc, and reduces indirection when freeing
2108   and consolidating chunks.
2109 
2110   Each freshly allocated chunk must have both cinuse and pinuse set.
2111   That is, each allocated chunk borders either a previously allocated
2112   and still in-use chunk, or the base of its memory arena. This is
2113   ensured by making all allocations from the the `lowest' part of any
2114   found chunk.  Further, no free chunk physically borders another one,
2115   so each free chunk is known to be preceded and followed by either
2116   inuse chunks or the ends of memory.
2117 
2118   Note that the `foot' of the current chunk is actually represented
2119   as the prev_foot of the NEXT chunk. This makes it easier to
2120   deal with alignments etc but can be very confusing when trying
2121   to extend or adapt this code.
2122 
2123   The exceptions to all this are
2124 
2125      1. The special chunk `top' is the top-most available chunk (i.e.,
2126         the one bordering the end of available memory). It is treated
2127         specially.  Top is never included in any bin, is used only if
2128         no other chunk is available, and is released back to the
2129         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
2130         the top chunk is treated as larger (and thus less well
2131         fitting) than any other available chunk.  The top chunk
2132         doesn't update its trailing size field since there is no next
2133         contiguous chunk that would have to index off it. However,
2134         space is still allocated for it (TOP_FOOT_SIZE) to enable
2135         separation or merging when space is extended.
2136 
2137      3. Chunks allocated via mmap, have both cinuse and pinuse bits
2138         cleared in their head fields.  Because they are allocated
2139         one-by-one, each must carry its own prev_foot field, which is
2140         also used to hold the offset this chunk has within its mmapped
2141         region, which is needed to preserve alignment. Each mmapped
2142         chunk is trailed by the first two fields of a fake next-chunk
2143         for sake of usage checks.
2144 
2145 */
2146 
2147 struct malloc_chunk {
2148   size_t               prev_foot;  /* Size of previous chunk (if free).  */
2149   size_t               head;       /* Size and inuse bits. */
2150   struct malloc_chunk* fd;         /* double links -- used only if free. */
2151   struct malloc_chunk* bk;
2152 };
2153 
2154 typedef struct malloc_chunk  mchunk;
2155 typedef struct malloc_chunk* mchunkptr;
2156 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
2157 typedef unsigned int bindex_t;         /* Described below */
2158 typedef unsigned int binmap_t;         /* Described below */
2159 typedef unsigned int flag_t;           /* The type of various bit flag sets */
2160 
2161 /* ------------------- Chunks sizes and alignments ----------------------- */
2162 
2163 #define MCHUNK_SIZE         (sizeof(mchunk))
2164 
2165 #if FOOTERS
2166 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
2167 #else /* FOOTERS */
2168 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
2169 #endif /* FOOTERS */
2170 
2171 /* MMapped chunks need a second word of overhead ... */
2172 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2173 /* ... and additional padding for fake next-chunk at foot */
2174 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
2175 
2176 /* The smallest size we can malloc is an aligned minimal chunk */
2177 #define MIN_CHUNK_SIZE\
2178   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2179 
2180 /* conversion from malloc headers to user pointers, and back */
2181 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
2182 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
2183 /* chunk associated with aligned address A */
2184 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
2185 
2186 /* Bounds on request (not chunk) sizes. */
2187 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
2188 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
2189 
2190 /* pad request bytes into a usable size */
2191 #define pad_request(req) \
2192    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2193 
2194 /* pad request, checking for minimum (but not maximum) */
2195 #define request2size(req) \
2196   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
2197 
2198 
2199 /* ------------------ Operations on head and foot fields ----------------- */
2200 
2201 /*
2202   The head field of a chunk is or'ed with PINUSE_BIT when previous
2203   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
2204   use, unless mmapped, in which case both bits are cleared.
2205 
2206   FLAG4_BIT is not used by this malloc, but might be useful in extensions.
2207 */
2208 
2209 #define PINUSE_BIT          (SIZE_T_ONE)
2210 #define CINUSE_BIT          (SIZE_T_TWO)
2211 #define FLAG4_BIT           (SIZE_T_FOUR)
2212 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
2213 #define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
2214 
2215 /* Head value for fenceposts */
2216 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
2217 
2218 /* extraction of fields from head words */
2219 #define cinuse(p)           ((p)->head & CINUSE_BIT)
2220 #define pinuse(p)           ((p)->head & PINUSE_BIT)
2221 #define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
2222 #define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
2223 
2224 #define chunksize(p)        ((p)->head & ~(FLAG_BITS))
2225 
2226 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
2227 
2228 /* Treat space at ptr +/- offset as a chunk */
2229 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
2230 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2231 
2232 /* Ptr to next or previous physical malloc_chunk. */
2233 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2234 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2235 
2236 /* extract next chunk's pinuse bit */
2237 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
2238 
2239 /* Get/set size at footer */
2240 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2241 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2242 
2243 /* Set size, pinuse bit, and foot */
2244 #define set_size_and_pinuse_of_free_chunk(p, s)\
2245   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2246 
2247 /* Set size, pinuse bit, foot, and clear next pinuse */
2248 #define set_free_with_pinuse(p, s, n)\
2249   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2250 
2251 /* Get the internal overhead associated with chunk p */
2252 #define overhead_for(p)\
2253  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2254 
2255 /* Return true if malloced space is not necessarily cleared */
2256 #if MMAP_CLEARS
2257 #define calloc_must_clear(p) (!is_mmapped(p))
2258 #else /* MMAP_CLEARS */
2259 #define calloc_must_clear(p) (1)
2260 #endif /* MMAP_CLEARS */
2261 
2262 /* ---------------------- Overlaid data structures ----------------------- */
2263 
2264 /*
2265   When chunks are not in use, they are treated as nodes of either
2266   lists or trees.
2267 
2268   "Small"  chunks are stored in circular doubly-linked lists, and look
2269   like this:
2270 
2271     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2272             |             Size of previous chunk                            |
2273             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2274     `head:' |             Size of chunk, in bytes                         |P|
2275       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2276             |             Forward pointer to next chunk in list             |
2277             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2278             |             Back pointer to previous chunk in list            |
2279             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2280             |             Unused space (may be 0 bytes long)                .
2281             .                                                               .
2282             .                                                               |
2283 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2284     `foot:' |             Size of chunk, in bytes                           |
2285             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2286 
2287   Larger chunks are kept in a form of bitwise digital trees (aka
2288   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
2289   free chunks greater than 256 bytes, their size doesn't impose any
2290   constraints on user chunk sizes.  Each node looks like:
2291 
2292     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2293             |             Size of previous chunk                            |
2294             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2295     `head:' |             Size of chunk, in bytes                         |P|
2296       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2297             |             Forward pointer to next chunk of same size        |
2298             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2299             |             Back pointer to previous chunk of same size       |
2300             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2301             |             Pointer to left child (child[0])                  |
2302             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2303             |             Pointer to right child (child[1])                 |
2304             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2305             |             Pointer to parent                                 |
2306             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2307             |             bin index of this chunk                           |
2308             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2309             |             Unused space                                      .
2310             .                                                               |
2311 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2312     `foot:' |             Size of chunk, in bytes                           |
2313             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2314 
2315   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
2316   of the same size are arranged in a circularly-linked list, with only
2317   the oldest chunk (the next to be used, in our FIFO ordering)
2318   actually in the tree.  (Tree members are distinguished by a non-null
2319   parent pointer.)  If a chunk with the same size an an existing node
2320   is inserted, it is linked off the existing node using pointers that
2321   work in the same way as fd/bk pointers of small chunks.
2322 
2323   Each tree contains a power of 2 sized range of chunk sizes (the
2324   smallest is 0x100 <= x < 0x180), which is is divided in half at each
2325   tree level, with the chunks in the smaller half of the range (0x100
2326   <= x < 0x140 for the top nose) in the left subtree and the larger
2327   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
2328   done by inspecting individual bits.
2329 
2330   Using these rules, each node's left subtree contains all smaller
2331   sizes than its right subtree.  However, the node at the root of each
2332   subtree has no particular ordering relationship to either.  (The
2333   dividing line between the subtree sizes is based on trie relation.)
2334   If we remove the last chunk of a given size from the interior of the
2335   tree, we need to replace it with a leaf node.  The tree ordering
2336   rules permit a node to be replaced by any leaf below it.
2337 
2338   The smallest chunk in a tree (a common operation in a best-fit
2339   allocator) can be found by walking a path to the leftmost leaf in
2340   the tree.  Unlike a usual binary tree, where we follow left child
2341   pointers until we reach a null, here we follow the right child
2342   pointer any time the left one is null, until we reach a leaf with
2343   both child pointers null. The smallest chunk in the tree will be
2344   somewhere along that path.
2345 
2346   The worst case number of steps to add, find, or remove a node is
2347   bounded by the number of bits differentiating chunks within
2348   bins. Under current bin calculations, this ranges from 6 up to 21
2349   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
2350   is of course much better.
2351 */
2352 
2353 struct malloc_tree_chunk {
2354   /* The first four fields must be compatible with malloc_chunk */
2355   size_t                    prev_foot;
2356   size_t                    head;
2357   struct malloc_tree_chunk* fd;
2358   struct malloc_tree_chunk* bk;
2359 
2360   struct malloc_tree_chunk* child[2];
2361   struct malloc_tree_chunk* parent;
2362   bindex_t                  index;
2363 };
2364 
2365 typedef struct malloc_tree_chunk  tchunk;
2366 typedef struct malloc_tree_chunk* tchunkptr;
2367 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
2368 
2369 /* A little helper macro for trees */
2370 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2371 
2372 /* ----------------------------- Segments -------------------------------- */
2373 
2374 /*
2375   Each malloc space may include non-contiguous segments, held in a
2376   list headed by an embedded malloc_segment record representing the
2377   top-most space. Segments also include flags holding properties of
2378   the space. Large chunks that are directly allocated by mmap are not
2379   included in this list. They are instead independently created and
2380   destroyed without otherwise keeping track of them.
2381 
2382   Segment management mainly comes into play for spaces allocated by
2383   MMAP.  Any call to MMAP might or might not return memory that is
2384   adjacent to an existing segment.  MORECORE normally contiguously
2385   extends the current space, so this space is almost always adjacent,
2386   which is simpler and faster to deal with. (This is why MORECORE is
2387   used preferentially to MMAP when both are available -- see
2388   sys_alloc.)  When allocating using MMAP, we don't use any of the
2389   hinting mechanisms (inconsistently) supported in various
2390   implementations of unix mmap, or distinguish reserving from
2391   committing memory. Instead, we just ask for space, and exploit
2392   contiguity when we get it.  It is probably possible to do
2393   better than this on some systems, but no general scheme seems
2394   to be significantly better.
2395 
2396   Management entails a simpler variant of the consolidation scheme
2397   used for chunks to reduce fragmentation -- new adjacent memory is
2398   normally prepended or appended to an existing segment. However,
2399   there are limitations compared to chunk consolidation that mostly
2400   reflect the fact that segment processing is relatively infrequent
2401   (occurring only when getting memory from system) and that we
2402   don't expect to have huge numbers of segments:
2403 
2404   * Segments are not indexed, so traversal requires linear scans.  (It
2405     would be possible to index these, but is not worth the extra
2406     overhead and complexity for most programs on most platforms.)
2407   * New segments are only appended to old ones when holding top-most
2408     memory; if they cannot be prepended to others, they are held in
2409     different segments.
2410 
2411   Except for the top-most segment of an mstate, each segment record
2412   is kept at the tail of its segment. Segments are added by pushing
2413   segment records onto the list headed by &mstate.seg for the
2414   containing mstate.
2415 
2416   Segment flags control allocation/merge/deallocation policies:
2417   * If EXTERN_BIT set, then we did not allocate this segment,
2418     and so should not try to deallocate or merge with others.
2419     (This currently holds only for the initial segment passed
2420     into create_mspace_with_base.)
2421   * If USE_MMAP_BIT set, the segment may be merged with
2422     other surrounding mmapped segments and trimmed/de-allocated
2423     using munmap.
2424   * If neither bit is set, then the segment was obtained using
2425     MORECORE so can be merged with surrounding MORECORE'd segments
2426     and deallocated/trimmed using MORECORE with negative arguments.
2427 */
2428 
2429 struct malloc_segment {
2430   char*        base;             /* base address */
2431   size_t       size;             /* allocated size */
2432   struct malloc_segment* next;   /* ptr to next segment */
2433   flag_t       sflags;           /* mmap and extern flag */
2434 };
2435 
2436 #define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
2437 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
2438 
2439 typedef struct malloc_segment  msegment;
2440 typedef struct malloc_segment* msegmentptr;
2441 
2442 /* ---------------------------- malloc_state ----------------------------- */
2443 
2444 /*
2445    A malloc_state holds all of the bookkeeping for a space.
2446    The main fields are:
2447 
2448   Top
2449     The topmost chunk of the currently active segment. Its size is
2450     cached in topsize.  The actual size of topmost space is
2451     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
2452     fenceposts and segment records if necessary when getting more
2453     space from the system.  The size at which to autotrim top is
2454     cached from mparams in trim_check, except that it is disabled if
2455     an autotrim fails.
2456 
2457   Designated victim (dv)
2458     This is the preferred chunk for servicing small requests that
2459     don't have exact fits.  It is normally the chunk split off most
2460     recently to service another small request.  Its size is cached in
2461     dvsize. The link fields of this chunk are not maintained since it
2462     is not kept in a bin.
2463 
2464   SmallBins
2465     An array of bin headers for free chunks.  These bins hold chunks
2466     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
2467     chunks of all the same size, spaced 8 bytes apart.  To simplify
2468     use in double-linked lists, each bin header acts as a malloc_chunk
2469     pointing to the real first node, if it exists (else pointing to
2470     itself).  This avoids special-casing for headers.  But to avoid
2471     waste, we allocate only the fd/bk pointers of bins, and then use
2472     repositioning tricks to treat these as the fields of a chunk.
2473 
2474   TreeBins
2475     Treebins are pointers to the roots of trees holding a range of
2476     sizes. There are 2 equally spaced treebins for each power of two
2477     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2478     larger.
2479 
2480   Bin maps
2481     There is one bit map for small bins ("smallmap") and one for
2482     treebins ("treemap).  Each bin sets its bit when non-empty, and
2483     clears the bit when empty.  Bit operations are then used to avoid
2484     bin-by-bin searching -- nearly all "search" is done without ever
2485     looking at bins that won't be selected.  The bit maps
2486     conservatively use 32 bits per map word, even if on 64bit system.
2487     For a good description of some of the bit-based techniques used
2488     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2489     supplement at http://hackersdelight.org/). Many of these are
2490     intended to reduce the branchiness of paths through malloc etc, as
2491     well as to reduce the number of memory locations read or written.
2492 
2493   Segments
2494     A list of segments headed by an embedded malloc_segment record
2495     representing the initial space.
2496 
2497   Address check support
2498     The least_addr field is the least address ever obtained from
2499     MORECORE or MMAP. Attempted frees and reallocs of any address less
2500     than this are trapped (unless INSECURE is defined).
2501 
2502   Magic tag
2503     A cross-check field that should always hold same value as mparams.magic.
2504 
2505   Flags
2506     Bits recording whether to use MMAP, locks, or contiguous MORECORE
2507 
2508   Statistics
2509     Each space keeps track of current and maximum system memory
2510     obtained via MORECORE or MMAP.
2511 
2512   Trim support
2513     Fields holding the amount of unused topmost memory that should trigger
2514     timming, and a counter to force periodic scanning to release unused
2515     non-topmost segments.
2516 
2517   Locking
2518     If USE_LOCKS is defined, the "mutex" lock is acquired and released
2519     around every public call using this mspace.
2520 
2521   Extension support
2522     A void* pointer and a size_t field that can be used to help implement
2523     extensions to this malloc.
2524 */
2525 
2526 /* Bin types, widths and sizes */
2527 #define NSMALLBINS        (32U)
2528 #define NTREEBINS         (32U)
2529 #define SMALLBIN_SHIFT    (3U)
2530 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
2531 #define TREEBIN_SHIFT     (8U)
2532 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
2533 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
2534 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2535 
2536 struct malloc_state {
2537   binmap_t   smallmap;
2538   binmap_t   treemap;
2539   size_t     dvsize;
2540   size_t     topsize;
2541   char*      least_addr;
2542   mchunkptr  dv;
2543   mchunkptr  top;
2544   size_t     trim_check;
2545   size_t     release_checks;
2546   size_t     magic;
2547   mchunkptr  smallbins[(NSMALLBINS+1)*2];
2548   tbinptr    treebins[NTREEBINS];
2549   size_t     footprint;
2550   size_t     max_footprint;
2551   flag_t     mflags;
2552 #if USE_LOCKS
2553   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
2554 #endif /* USE_LOCKS */
2555   msegment   seg;
2556   void*      extp;      /* Unused but available for extensions */
2557   size_t     exts;
2558   char       dummy[7696];
2559 };
2560 
2561 typedef struct malloc_state*    mstate;
2562 
2563 /* ------------- Global malloc_state and malloc_params ------------------- */
2564 
2565 /*
2566   malloc_params holds global properties, including those that can be
2567   dynamically set using mallopt. There is a single instance, mparams,
2568   initialized in init_mparams. Note that the non-zeroness of "magic"
2569   also serves as an initialization flag.
2570 */
2571 
2572 struct malloc_params {
2573   volatile size_t magic;
2574   size_t page_size;
2575   size_t granularity;
2576   size_t mmap_threshold;
2577   size_t trim_threshold;
2578   flag_t default_mflags;
2579 };
2580 
2581 static struct malloc_params mparams;
2582 
2583 /* Ensure mparams initialized */
2584 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
2585 
2586 #if !ONLY_MSPACES
2587 
2588 /* The global malloc_state used for all non-"mspace" calls */
2589 static struct malloc_state _gm_;
2590 #define gm                 (&_gm_)
2591 #define is_global(M)       ((M) == &_gm_)
2592 
2593 #endif /* !ONLY_MSPACES */
2594 
2595 #define is_initialized(M)  ((M)->top != 0)
2596 
2597 /* -------------------------- system alloc setup ------------------------- */
2598 
2599 /* Operations on mflags */
2600 
2601 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
2602 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
2603 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
2604 
2605 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
2606 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
2607 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
2608 
2609 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
2610 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
2611 
2612 #define set_lock(M,L)\
2613  ((M)->mflags = (L)?\
2614   ((M)->mflags | USE_LOCK_BIT) :\
2615   ((M)->mflags & ~USE_LOCK_BIT))
2616 
2617 /* page-align a size */
2618 #define page_align(S)\
2619  (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2620 
2621 /* granularity-align a size */
2622 #define granularity_align(S)\
2623   (((S) + (mparams.granularity - SIZE_T_ONE))\
2624    & ~(mparams.granularity - SIZE_T_ONE))
2625 
2626 
2627 /* For mmap, use granularity alignment on windows, else page-align */
2628 #ifdef WIN32
2629 #define mmap_align(S) granularity_align(S)
2630 #else
2631 #define mmap_align(S) page_align(S)
2632 #endif
2633 
2634 /* For sys_alloc, enough padding to ensure can malloc request on success */
2635 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
2636 
2637 #define is_page_aligned(S)\
2638    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2639 #define is_granularity_aligned(S)\
2640    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2641 
2642 /*  True if segment S holds address A */
2643 #define segment_holds(S, A)\
2644   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2645 
2646 /* Return segment holding given address */
segment_holding(mstate m,char * addr)2647 static msegmentptr segment_holding(mstate m, char* addr) {
2648   msegmentptr sp = &m->seg;
2649   for (;;) {
2650     if (addr >= sp->base && addr < sp->base + sp->size)
2651       return sp;
2652     if ((sp = sp->next) == 0)
2653       return 0;
2654   }
2655 }
2656 
2657 /* Return true if segment contains a segment link */
has_segment_link(mstate m,msegmentptr ss)2658 static int has_segment_link(mstate m, msegmentptr ss) {
2659   msegmentptr sp = &m->seg;
2660   for (;;) {
2661     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
2662       return 1;
2663     if ((sp = sp->next) == 0)
2664       return 0;
2665   }
2666 }
2667 
2668 #ifndef MORECORE_CANNOT_TRIM
2669 #define should_trim(M,s)  ((s) > (M)->trim_check)
2670 #else  /* MORECORE_CANNOT_TRIM */
2671 #define should_trim(M,s)  (0)
2672 #endif /* MORECORE_CANNOT_TRIM */
2673 
2674 /*
2675   TOP_FOOT_SIZE is padding at the end of a segment, including space
2676   that may be needed to place segment records and fenceposts when new
2677   noncontiguous segments are added.
2678 */
2679 #define TOP_FOOT_SIZE\
2680   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2681 
2682 
2683 /* -------------------------------  Hooks -------------------------------- */
2684 
2685 /*
2686   PREACTION should be defined to return 0 on success, and nonzero on
2687   failure. If you are not using locking, you can redefine these to do
2688   anything you like.
2689 */
2690 
2691 #if USE_LOCKS
2692 
2693 #define PREACTION(M)  ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2694 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2695 #else /* USE_LOCKS */
2696 
2697 #ifndef PREACTION
2698 #define PREACTION(M) (0)
2699 #endif  /* PREACTION */
2700 
2701 #ifndef POSTACTION
2702 #define POSTACTION(M)
2703 #endif  /* POSTACTION */
2704 
2705 #endif /* USE_LOCKS */
2706 
2707 /*
2708   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2709   USAGE_ERROR_ACTION is triggered on detected bad frees and
2710   reallocs. The argument p is an address that might have triggered the
2711   fault. It is ignored by the two predefined actions, but might be
2712   useful in custom actions that try to help diagnose errors.
2713 */
2714 
2715 #if PROCEED_ON_ERROR
2716 
2717 /* A count of the number of corruption errors causing resets */
2718 int malloc_corruption_error_count;
2719 
2720 /* default corruption action */
2721 static void reset_on_error(mstate m);
2722 
2723 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
2724 #define USAGE_ERROR_ACTION(m, p)
2725 
2726 #else /* PROCEED_ON_ERROR */
2727 
2728 #ifndef CORRUPTION_ERROR_ACTION
2729 #define CORRUPTION_ERROR_ACTION(m) ABORT
2730 #endif /* CORRUPTION_ERROR_ACTION */
2731 
2732 #ifndef USAGE_ERROR_ACTION
2733 #define USAGE_ERROR_ACTION(m,p) ABORT
2734 #endif /* USAGE_ERROR_ACTION */
2735 
2736 #endif /* PROCEED_ON_ERROR */
2737 
2738 /* -------------------------- Debugging setup ---------------------------- */
2739 
2740 #if ! DEBUG
2741 
2742 #define check_free_chunk(M,P)
2743 #define check_inuse_chunk(M,P)
2744 #define check_malloced_chunk(M,P,N)
2745 #define check_mmapped_chunk(M,P)
2746 #define check_malloc_state(M)
2747 #define check_top_chunk(M,P)
2748 
2749 #else /* DEBUG */
2750 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
2751 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
2752 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
2753 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2754 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
2755 #define check_malloc_state(M)       do_check_malloc_state(M)
2756 
2757 static void   do_check_any_chunk(mstate m, mchunkptr p);
2758 static void   do_check_top_chunk(mstate m, mchunkptr p);
2759 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
2760 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
2761 static void   do_check_free_chunk(mstate m, mchunkptr p);
2762 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
2763 static void   do_check_tree(mstate m, tchunkptr t);
2764 static void   do_check_treebin(mstate m, bindex_t i);
2765 static void   do_check_smallbin(mstate m, bindex_t i);
2766 static void   do_check_malloc_state(mstate m);
2767 static int    bin_find(mstate m, mchunkptr x);
2768 static size_t traverse_and_check(mstate m);
2769 #endif /* DEBUG */
2770 
2771 /* ---------------------------- Indexing Bins ---------------------------- */
2772 
2773 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2774 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
2775 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
2776 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
2777 
2778 /* addressing by index. See above about smallbin repositioning */
2779 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2780 #define treebin_at(M,i)     (&((M)->treebins[i]))
2781 
2782 /* assign tree index for size S to variable I. Use x86 asm if possible  */
2783 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2784 #define compute_tree_index(S, I)\
2785 {\
2786   unsigned int X = S >> TREEBIN_SHIFT;\
2787   if (X == 0)\
2788     I = 0;\
2789   else if (X > 0xFFFF)\
2790     I = NTREEBINS-1;\
2791   else {\
2792     unsigned int K;\
2793     __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
2794     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2795   }\
2796 }
2797 
2798 #elif defined (__INTEL_COMPILER)
2799 #define compute_tree_index(S, I)\
2800 {\
2801   size_t X = S >> TREEBIN_SHIFT;\
2802   if (X == 0)\
2803     I = 0;\
2804   else if (X > 0xFFFF)\
2805     I = NTREEBINS-1;\
2806   else {\
2807     unsigned int K = _bit_scan_reverse (X); \
2808     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2809   }\
2810 }
2811 
2812 #elif defined(_MSC_VER) && _MSC_VER>=1300
2813 #define compute_tree_index(S, I)\
2814 {\
2815   size_t X = S >> TREEBIN_SHIFT;\
2816   if (X == 0)\
2817     I = 0;\
2818   else if (X > 0xFFFF)\
2819     I = NTREEBINS-1;\
2820   else {\
2821     unsigned int K;\
2822     _BitScanReverse((DWORD *) &K, X);\
2823     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2824   }\
2825 }
2826 
2827 #else /* GNUC */
2828 #define compute_tree_index(S, I)\
2829 {\
2830   size_t X = S >> TREEBIN_SHIFT;\
2831   if (X == 0)\
2832     I = 0;\
2833   else if (X > 0xFFFF)\
2834     I = NTREEBINS-1;\
2835   else {\
2836     unsigned int Y = (unsigned int)X;\
2837     unsigned int N = ((Y - 0x100) >> 16) & 8;\
2838     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2839     N += K;\
2840     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2841     K = 14 - N + ((Y <<= K) >> 15);\
2842     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2843   }\
2844 }
2845 #endif /* GNUC */
2846 
2847 /* Bit representing maximum resolved size in a treebin at i */
2848 #define bit_for_tree_index(i) \
2849    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2850 
2851 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
2852 #define leftshift_for_tree_index(i) \
2853    ((i == NTREEBINS-1)? 0 : \
2854     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2855 
2856 /* The size of the smallest chunk held in bin with index i */
2857 #define minsize_for_tree_index(i) \
2858    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
2859    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2860 
2861 
2862 /* ------------------------ Operations on bin maps ----------------------- */
2863 
2864 /* bit corresponding to given index */
2865 #define idx2bit(i)              ((binmap_t)(1) << (i))
2866 
2867 /* Mark/Clear bits with given index */
2868 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
2869 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
2870 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
2871 
2872 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
2873 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
2874 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
2875 
2876 /* isolate the least set bit of a bitmap */
2877 #define least_bit(x)         ((x) & -(x))
2878 
2879 /* mask with all bits to left of least bit of x on */
2880 #define left_bits(x)         ((x<<1) | -(x<<1))
2881 
2882 /* mask with all bits to left of or equal to least bit of x on */
2883 #define same_or_left_bits(x) ((x) | -(x))
2884 
2885 /* index corresponding to given bit. Use x86 asm if possible */
2886 
2887 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2888 #define compute_bit2idx(X, I)\
2889 {\
2890   unsigned int J;\
2891   __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
2892   I = (bindex_t)J;\
2893 }
2894 
2895 #elif defined (__INTEL_COMPILER)
2896 #define compute_bit2idx(X, I)\
2897 {\
2898   unsigned int J;\
2899   J = _bit_scan_forward (X); \
2900   I = (bindex_t)J;\
2901 }
2902 
2903 #elif defined(_MSC_VER) && _MSC_VER>=1300
2904 #define compute_bit2idx(X, I)\
2905 {\
2906   unsigned int J;\
2907   _BitScanForward((DWORD *) &J, X);\
2908   I = (bindex_t)J;\
2909 }
2910 
2911 #elif USE_BUILTIN_FFS
2912 #define compute_bit2idx(X, I) I = ffs(X)-1
2913 
2914 #else
2915 #define compute_bit2idx(X, I)\
2916 {\
2917   unsigned int Y = X - 1;\
2918   unsigned int K = Y >> (16-4) & 16;\
2919   unsigned int N = K;        Y >>= K;\
2920   N += K = Y >> (8-3) &  8;  Y >>= K;\
2921   N += K = Y >> (4-2) &  4;  Y >>= K;\
2922   N += K = Y >> (2-1) &  2;  Y >>= K;\
2923   N += K = Y >> (1-0) &  1;  Y >>= K;\
2924   I = (bindex_t)(N + Y);\
2925 }
2926 #endif /* GNUC */
2927 
2928 
2929 /* ----------------------- Runtime Check Support ------------------------- */
2930 
2931 /*
2932   For security, the main invariant is that malloc/free/etc never
2933   writes to a static address other than malloc_state, unless static
2934   malloc_state itself has been corrupted, which cannot occur via
2935   malloc (because of these checks). In essence this means that we
2936   believe all pointers, sizes, maps etc held in malloc_state, but
2937   check all of those linked or offsetted from other embedded data
2938   structures.  These checks are interspersed with main code in a way
2939   that tends to minimize their run-time cost.
2940 
2941   When FOOTERS is defined, in addition to range checking, we also
2942   verify footer fields of inuse chunks, which can be used guarantee
2943   that the mstate controlling malloc/free is intact.  This is a
2944   streamlined version of the approach described by William Robertson
2945   et al in "Run-time Detection of Heap-based Overflows" LISA'03
2946   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2947   of an inuse chunk holds the xor of its mstate and a random seed,
2948   that is checked upon calls to free() and realloc().  This is
2949   (probablistically) unguessable from outside the program, but can be
2950   computed by any code successfully malloc'ing any chunk, so does not
2951   itself provide protection against code that has already broken
2952   security through some other means.  Unlike Robertson et al, we
2953   always dynamically check addresses of all offset chunks (previous,
2954   next, etc). This turns out to be cheaper than relying on hashes.
2955 */
2956 
2957 #if !INSECURE
2958 /* Check if address a is at least as high as any from MORECORE or MMAP */
2959 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2960 /* Check if address of next chunk n is higher than base chunk p */
2961 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
2962 /* Check if p has inuse status */
2963 #define ok_inuse(p)     is_inuse(p)
2964 /* Check if p has its pinuse bit on */
2965 #define ok_pinuse(p)     pinuse(p)
2966 
2967 #else /* !INSECURE */
2968 #define ok_address(M, a) (1)
2969 #define ok_next(b, n)    (1)
2970 #define ok_inuse(p)      (1)
2971 #define ok_pinuse(p)     (1)
2972 #endif /* !INSECURE */
2973 
2974 #if (FOOTERS && !INSECURE)
2975 /* Check if (alleged) mstate m has expected magic field */
2976 #define ok_magic(M)      ((M)->magic == mparams.magic)
2977 #else  /* (FOOTERS && !INSECURE) */
2978 #define ok_magic(M)      (1)
2979 #endif /* (FOOTERS && !INSECURE) */
2980 
2981 
2982 /* In gcc, use __builtin_expect to minimize impact of checks */
2983 #if !INSECURE
2984 #if defined(__GNUC__) && __GNUC__ >= 3
2985 #define RTCHECK(e)  __builtin_expect(e, 1)
2986 #else /* GNUC */
2987 #define RTCHECK(e)  (e)
2988 #endif /* GNUC */
2989 #else /* !INSECURE */
2990 #define RTCHECK(e)  (1)
2991 #endif /* !INSECURE */
2992 
2993 /* macros to set up inuse chunks with or without footers */
2994 
2995 #if !FOOTERS
2996 
2997 #define mark_inuse_foot(M,p,s)
2998 
2999 /* Macros for setting head/foot of non-mmapped chunks */
3000 
3001 /* Set cinuse bit and pinuse bit of next chunk */
3002 #define set_inuse(M,p,s)\
3003   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3004   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3005 
3006 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
3007 #define set_inuse_and_pinuse(M,p,s)\
3008   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3009   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3010 
3011 /* Set size, cinuse and pinuse bit of this chunk */
3012 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3013   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
3014 
3015 #else /* FOOTERS */
3016 
3017 /* Set foot of inuse chunk to be xor of mstate and seed */
3018 #define mark_inuse_foot(M,p,s)\
3019   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
3020 
3021 #define get_mstate_for(p)\
3022   ((mstate)(((mchunkptr)((char*)(p) +\
3023     (chunksize(p))))->prev_foot ^ mparams.magic))
3024 
3025 #define set_inuse(M,p,s)\
3026   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3027   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
3028   mark_inuse_foot(M,p,s))
3029 
3030 #define set_inuse_and_pinuse(M,p,s)\
3031   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3032   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
3033  mark_inuse_foot(M,p,s))
3034 
3035 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3036   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3037   mark_inuse_foot(M, p, s))
3038 
3039 #endif /* !FOOTERS */
3040 
3041 /* ---------------------------- setting mparams -------------------------- */
3042 
3043 /* Initialize mparams */
init_mparams(void)3044 static int init_mparams(void) {
3045    int check = 0;
3046 
3047 #ifdef NEED_GLOBAL_LOCK_INIT
3048   if (malloc_global_mutex_status <= 0)
3049     init_malloc_global_mutex();
3050 #endif
3051   check = ACQUIRE_MALLOC_GLOBAL_LOCK();
3052 
3053   if(check)
3054     printf("ACQUIRE_MALLOC_GLOBAL_LOCK fail\n");
3055 
3056   if (mparams.magic == 0) {
3057     size_t magic;
3058     size_t psize;
3059     size_t gsize;
3060 
3061 #ifndef WIN32
3062     psize = malloc_getpagesize;
3063     gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
3064 #else /* WIN32 */
3065     {
3066       SYSTEM_INFO system_info;
3067       GetSystemInfo(&system_info);
3068       psize = system_info.dwPageSize;
3069       gsize = ((DEFAULT_GRANULARITY != 0)?
3070                DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
3071     }
3072 #endif /* WIN32 */
3073 
3074     /* Sanity-check configuration:
3075        size_t must be unsigned and as wide as pointer type.
3076        ints must be at least 4 bytes.
3077        alignment must be at least 8.
3078        Alignment, min chunk size, and page size must all be powers of 2.
3079     */
3080     if ((sizeof(size_t) != sizeof(char*)) ||
3081         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
3082         (sizeof(int) < 4)  ||
3083         (MALLOC_ALIGNMENT < (size_t)8U) ||
3084         ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
3085         ((MCHUNK_SIZE      & (MCHUNK_SIZE-SIZE_T_ONE))      != 0) ||
3086         ((gsize            & (gsize-SIZE_T_ONE))            != 0) ||
3087         ((psize            & (psize-SIZE_T_ONE))            != 0))
3088       ABORT;
3089 
3090     mparams.granularity = gsize;
3091     mparams.page_size = psize;
3092     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3093     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
3094 #if MORECORE_CONTIGUOUS
3095     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
3096 #else  /* MORECORE_CONTIGUOUS */
3097     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
3098 #endif /* MORECORE_CONTIGUOUS */
3099 
3100 #if !ONLY_MSPACES
3101     /* Set up lock for main malloc area */
3102     gm->mflags = mparams.default_mflags;
3103     INITIAL_LOCK(&gm->mutex);
3104 #endif
3105 
3106     {
3107 #if USE_DEV_RANDOM
3108       int fd;
3109       unsigned char buf[sizeof(size_t)];
3110       /* Try to use /dev/urandom, else fall back on using time */
3111       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
3112           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
3113         magic = *((size_t *) buf);
3114         close(fd);
3115       }
3116       else
3117 #endif /* USE_DEV_RANDOM */
3118 #ifdef WIN32
3119         magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
3120 #else
3121         magic = (size_t)(time(0) ^ (size_t)0x55555555U);
3122 #endif
3123       magic |= (size_t)8U;    /* ensure nonzero */
3124       magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
3125       mparams.magic = magic;
3126     }
3127   }
3128 
3129   RELEASE_MALLOC_GLOBAL_LOCK();
3130   return 1;
3131 }
3132 
3133 /* support for mallopt */
change_mparam(int param_number,int value)3134 static int change_mparam(int param_number, int value) {
3135   size_t val;
3136   ensure_initialization();
3137   val = (value == -1)? MAX_SIZE_T : (size_t)value;
3138   switch(param_number) {
3139   case M_TRIM_THRESHOLD:
3140     mparams.trim_threshold = val;
3141     return 1;
3142   case M_GRANULARITY:
3143     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
3144       mparams.granularity = val;
3145       return 1;
3146     }
3147     else
3148       return 0;
3149   case M_MMAP_THRESHOLD:
3150     mparams.mmap_threshold = val;
3151     return 1;
3152   default:
3153     return 0;
3154   }
3155 }
3156 
3157 #if DEBUG
3158 /* ------------------------- Debugging Support --------------------------- */
3159 
3160 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
do_check_any_chunk(mstate m,mchunkptr p)3161 static void do_check_any_chunk(mstate m, mchunkptr p) {
3162   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3163   assert(ok_address(m, p));
3164 }
3165 
3166 /* Check properties of top chunk */
do_check_top_chunk(mstate m,mchunkptr p)3167 static void do_check_top_chunk(mstate m, mchunkptr p) {
3168   msegmentptr sp = segment_holding(m, (char*)p);
3169   size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
3170   assert(sp != 0);
3171   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3172   assert(ok_address(m, p));
3173   assert(sz == m->topsize);
3174   assert(sz > 0);
3175   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
3176   assert(pinuse(p));
3177   assert(!pinuse(chunk_plus_offset(p, sz)));
3178 }
3179 
3180 /* Check properties of (inuse) mmapped chunks */
do_check_mmapped_chunk(mstate m,mchunkptr p)3181 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
3182   size_t  sz = chunksize(p);
3183   size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
3184   assert(is_mmapped(p));
3185   assert(use_mmap(m));
3186   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3187   assert(ok_address(m, p));
3188   assert(!is_small(sz));
3189   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
3190   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
3191   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
3192 }
3193 
3194 /* Check properties of inuse chunks */
do_check_inuse_chunk(mstate m,mchunkptr p)3195 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
3196   do_check_any_chunk(m, p);
3197   assert(is_inuse(p));
3198   assert(next_pinuse(p));
3199   /* If not pinuse and not mmapped, previous chunk has OK offset */
3200   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
3201   if (is_mmapped(p))
3202     do_check_mmapped_chunk(m, p);
3203 }
3204 
3205 /* Check properties of free chunks */
do_check_free_chunk(mstate m,mchunkptr p)3206 static void do_check_free_chunk(mstate m, mchunkptr p) {
3207   size_t sz = chunksize(p);
3208   mchunkptr next = chunk_plus_offset(p, sz);
3209   do_check_any_chunk(m, p);
3210   assert(!is_inuse(p));
3211   assert(!next_pinuse(p));
3212   assert (!is_mmapped(p));
3213   if (p != m->dv && p != m->top) {
3214     if (sz >= MIN_CHUNK_SIZE) {
3215       assert((sz & CHUNK_ALIGN_MASK) == 0);
3216       assert(is_aligned(chunk2mem(p)));
3217       assert(next->prev_foot == sz);
3218       assert(pinuse(p));
3219       assert (next == m->top || is_inuse(next));
3220       assert(p->fd->bk == p);
3221       assert(p->bk->fd == p);
3222     }
3223     else  /* markers are always of size SIZE_T_SIZE */
3224       assert(sz == SIZE_T_SIZE);
3225   }
3226 }
3227 
3228 /* Check properties of malloced chunks at the point they are malloced */
do_check_malloced_chunk(mstate m,void * mem,size_t s)3229 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
3230   if (mem != 0) {
3231     mchunkptr p = mem2chunk(mem);
3232     size_t sz = p->head & ~INUSE_BITS;
3233     do_check_inuse_chunk(m, p);
3234     assert((sz & CHUNK_ALIGN_MASK) == 0);
3235     assert(sz >= MIN_CHUNK_SIZE);
3236     assert(sz >= s);
3237     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
3238     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
3239   }
3240 }
3241 
3242 /* Check a tree and its subtrees.  */
do_check_tree(mstate m,tchunkptr t)3243 static void do_check_tree(mstate m, tchunkptr t) {
3244   tchunkptr head = 0;
3245   tchunkptr u = t;
3246   bindex_t tindex = t->index;
3247   size_t tsize = chunksize(t);
3248   bindex_t idx;
3249   compute_tree_index(tsize, idx);
3250   assert(tindex == idx);
3251   assert(tsize >= MIN_LARGE_SIZE);
3252   assert(tsize >= minsize_for_tree_index(idx));
3253   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
3254 
3255   do { /* traverse through chain of same-sized nodes */
3256     do_check_any_chunk(m, ((mchunkptr)u));
3257     assert(u->index == tindex);
3258     assert(chunksize(u) == tsize);
3259     assert(!is_inuse(u));
3260     assert(!next_pinuse(u));
3261     assert(u->fd->bk == u);
3262     assert(u->bk->fd == u);
3263     if (u->parent == 0) {
3264       assert(u->child[0] == 0);
3265       assert(u->child[1] == 0);
3266     }
3267     else {
3268       assert(head == 0); /* only one node on chain has parent */
3269       head = u;
3270       assert(u->parent != u);
3271       assert (u->parent->child[0] == u ||
3272               u->parent->child[1] == u ||
3273               *((tbinptr*)(u->parent)) == u);
3274       if (u->child[0] != 0) {
3275         assert(u->child[0]->parent == u);
3276         assert(u->child[0] != u);
3277         do_check_tree(m, u->child[0]);
3278       }
3279       if (u->child[1] != 0) {
3280         assert(u->child[1]->parent == u);
3281         assert(u->child[1] != u);
3282         do_check_tree(m, u->child[1]);
3283       }
3284       if (u->child[0] != 0 && u->child[1] != 0) {
3285         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
3286       }
3287     }
3288     u = u->fd;
3289   } while (u != t);
3290   assert(head != 0);
3291 }
3292 
3293 /*  Check all the chunks in a treebin.  */
do_check_treebin(mstate m,bindex_t i)3294 static void do_check_treebin(mstate m, bindex_t i) {
3295   tbinptr* tb = treebin_at(m, i);
3296   tchunkptr t = *tb;
3297   int empty = (m->treemap & (1U << i)) == 0;
3298   if (t == 0)
3299     assert(empty);
3300   if (!empty)
3301     do_check_tree(m, t);
3302 }
3303 
3304 /*  Check all the chunks in a smallbin.  */
do_check_smallbin(mstate m,bindex_t i)3305 static void do_check_smallbin(mstate m, bindex_t i) {
3306   sbinptr b = smallbin_at(m, i);
3307   mchunkptr p = b->bk;
3308   unsigned int empty = (m->smallmap & (1U << i)) == 0;
3309   if (p == b)
3310     assert(empty);
3311   if (!empty) {
3312     for (; p != b; p = p->bk) {
3313       size_t size = chunksize(p);
3314       mchunkptr q;
3315       /* each chunk claims to be free */
3316       do_check_free_chunk(m, p);
3317       /* chunk belongs in bin */
3318       assert(small_index(size) == i);
3319       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
3320       /* chunk is followed by an inuse chunk */
3321       q = next_chunk(p);
3322       if (q->head != FENCEPOST_HEAD)
3323         do_check_inuse_chunk(m, q);
3324     }
3325   }
3326 }
3327 
3328 /* Find x in a bin. Used in other check functions. */
bin_find(mstate m,mchunkptr x)3329 static int bin_find(mstate m, mchunkptr x) {
3330   size_t size = chunksize(x);
3331   if (is_small(size)) {
3332     bindex_t sidx = small_index(size);
3333     sbinptr b = smallbin_at(m, sidx);
3334     if (smallmap_is_marked(m, sidx)) {
3335       mchunkptr p = b;
3336       do {
3337         if (p == x)
3338           return 1;
3339       } while ((p = p->fd) != b);
3340     }
3341   }
3342   else {
3343     bindex_t tidx;
3344     compute_tree_index(size, tidx);
3345     if (treemap_is_marked(m, tidx)) {
3346       tchunkptr t = *treebin_at(m, tidx);
3347       size_t sizebits = size << leftshift_for_tree_index(tidx);
3348       while (t != 0 && chunksize(t) != size) {
3349         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3350         sizebits <<= 1;
3351       }
3352       if (t != 0) {
3353         tchunkptr u = t;
3354         do {
3355           if (u == (tchunkptr)x)
3356             return 1;
3357         } while ((u = u->fd) != t);
3358       }
3359     }
3360   }
3361   return 0;
3362 }
3363 
3364 /* Traverse each chunk and check it; return total */
traverse_and_check(mstate m)3365 static size_t traverse_and_check(mstate m) {
3366   size_t sum = 0;
3367   if (is_initialized(m)) {
3368     msegmentptr s = &m->seg;
3369     sum += m->topsize + TOP_FOOT_SIZE;
3370     while (s != 0) {
3371       mchunkptr q = align_as_chunk(s->base);
3372       mchunkptr lastq = 0;
3373       assert(pinuse(q));
3374       while (segment_holds(s, q) &&
3375              q != m->top && q->head != FENCEPOST_HEAD) {
3376         sum += chunksize(q);
3377         if (is_inuse(q)) {
3378           assert(!bin_find(m, q));
3379           do_check_inuse_chunk(m, q);
3380         }
3381         else {
3382           assert(q == m->dv || bin_find(m, q));
3383           assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
3384           do_check_free_chunk(m, q);
3385         }
3386         lastq = q;
3387         q = next_chunk(q);
3388       }
3389       s = s->next;
3390     }
3391   }
3392   return sum;
3393 }
3394 
3395 /* Check all properties of malloc_state. */
do_check_malloc_state(mstate m)3396 static void do_check_malloc_state(mstate m) {
3397   bindex_t i;
3398   size_t total;
3399   /* check bins */
3400   for (i = 0; i < NSMALLBINS; ++i)
3401     do_check_smallbin(m, i);
3402   for (i = 0; i < NTREEBINS; ++i)
3403     do_check_treebin(m, i);
3404 
3405   if (m->dvsize != 0) { /* check dv chunk */
3406     do_check_any_chunk(m, m->dv);
3407     assert(m->dvsize == chunksize(m->dv));
3408     assert(m->dvsize >= MIN_CHUNK_SIZE);
3409     assert(bin_find(m, m->dv) == 0);
3410   }
3411 
3412   if (m->top != 0) {   /* check top chunk */
3413     do_check_top_chunk(m, m->top);
3414     /*assert(m->topsize == chunksize(m->top)); redundant */
3415     assert(m->topsize > 0);
3416     assert(bin_find(m, m->top) == 0);
3417   }
3418 
3419   total = traverse_and_check(m);
3420   assert(total <= m->footprint);
3421   assert(m->footprint <= m->max_footprint);
3422 }
3423 #endif /* DEBUG */
3424 
3425 /* ----------------------------- statistics ------------------------------ */
3426 
3427 #if !NO_MALLINFO
internal_mallinfo(mstate m)3428 static struct mallinfo internal_mallinfo(mstate m) {
3429   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3430   ensure_initialization();
3431   if (!PREACTION(m)) {
3432     check_malloc_state(m);
3433     if (is_initialized(m)) {
3434       size_t nfree = SIZE_T_ONE; /* top always free */
3435       size_t mfree = m->topsize + TOP_FOOT_SIZE;
3436       size_t sum = mfree;
3437       msegmentptr s = &m->seg;
3438       while (s != 0) {
3439         mchunkptr q = align_as_chunk(s->base);
3440         while (segment_holds(s, q) &&
3441                q != m->top && q->head != FENCEPOST_HEAD) {
3442           size_t sz = chunksize(q);
3443           sum += sz;
3444           if (!is_inuse(q)) {
3445             mfree += sz;
3446             ++nfree;
3447           }
3448           q = next_chunk(q);
3449         }
3450         s = s->next;
3451       }
3452 
3453       nm.arena    = sum;
3454       nm.ordblks  = nfree;
3455       nm.hblkhd   = m->footprint - sum;
3456       nm.usmblks  = m->max_footprint;
3457       nm.uordblks = m->footprint - mfree;
3458       nm.fordblks = mfree;
3459       nm.keepcost = m->topsize;
3460     }
3461 
3462     POSTACTION(m);
3463   }
3464   return nm;
3465 }
3466 #endif /* !NO_MALLINFO */
3467 
internal_malloc_stats(mstate m)3468 static void internal_malloc_stats(mstate m) {
3469   ensure_initialization();
3470   if (!PREACTION(m)) {
3471     size_t maxfp = 0;
3472     size_t fp = 0;
3473     size_t used = 0;
3474     check_malloc_state(m);
3475     if (is_initialized(m)) {
3476       msegmentptr s = &m->seg;
3477       maxfp = m->max_footprint;
3478       fp = m->footprint;
3479       used = fp - (m->topsize + TOP_FOOT_SIZE);
3480 
3481       while (s != 0) {
3482         mchunkptr q = align_as_chunk(s->base);
3483         while (segment_holds(s, q) &&
3484                q != m->top && q->head != FENCEPOST_HEAD) {
3485           if (!is_inuse(q))
3486             used -= chunksize(q);
3487           q = next_chunk(q);
3488         }
3489         s = s->next;
3490       }
3491     }
3492 
3493     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
3494     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
3495     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
3496 
3497     POSTACTION(m);
3498   }
3499 }
3500 
3501 /* ----------------------- Operations on smallbins ----------------------- */
3502 
3503 /*
3504   Various forms of linking and unlinking are defined as macros.  Even
3505   the ones for trees, which are very long but have very short typical
3506   paths.  This is ugly but reduces reliance on inlining support of
3507   compilers.
3508 */
3509 
3510 /* Link a free chunk into a smallbin  */
3511 #define insert_small_chunk(M, P, S) {\
3512   bindex_t I  = small_index(S);\
3513   mchunkptr B = smallbin_at(M, I);\
3514   mchunkptr F = B;\
3515   assert(S >= MIN_CHUNK_SIZE);\
3516   if (!smallmap_is_marked(M, I))\
3517     mark_smallmap(M, I);\
3518   else if (RTCHECK(ok_address(M, B->fd)))\
3519     F = B->fd;\
3520   else {\
3521     CORRUPTION_ERROR_ACTION(M);\
3522   }\
3523   B->fd = P;\
3524   F->bk = P;\
3525   P->fd = F;\
3526   P->bk = B;\
3527 }
3528 
3529 /* Unlink a chunk from a smallbin  */
3530 #define unlink_small_chunk(M, P, S) {\
3531   mchunkptr F = P->fd;\
3532   mchunkptr B = P->bk;\
3533   bindex_t I = small_index(S);\
3534   assert(P != B);\
3535   assert(P != F);\
3536   assert(chunksize(P) == small_index2size(I));\
3537   if (F == B)\
3538     clear_smallmap(M, I);\
3539   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3540                    (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3541     F->bk = B;\
3542     B->fd = F;\
3543   }\
3544   else {\
3545     CORRUPTION_ERROR_ACTION(M);\
3546   }\
3547 }
3548 
3549 /* Unlink the first chunk from a smallbin */
3550 #define unlink_first_small_chunk(M, B, P, I) {\
3551   mchunkptr F = P->fd;\
3552   assert(P != B);\
3553   assert(P != F);\
3554   assert(chunksize(P) == small_index2size(I));\
3555   if (B == F)\
3556     clear_smallmap(M, I);\
3557   else if (RTCHECK(ok_address(M, F))) {\
3558     B->fd = F;\
3559     F->bk = B;\
3560   }\
3561   else {\
3562     CORRUPTION_ERROR_ACTION(M);\
3563   }\
3564 }
3565 
3566 
3567 
3568 /* Replace dv node, binning the old one */
3569 /* Used only when dvsize known to be small */
3570 #define replace_dv(M, P, S) {\
3571   size_t DVS = M->dvsize;\
3572   if (DVS != 0) {\
3573     mchunkptr DV = M->dv;\
3574     assert(is_small(DVS));\
3575     insert_small_chunk(M, DV, DVS);\
3576   }\
3577   M->dvsize = S;\
3578   M->dv = P;\
3579 }
3580 
3581 /* ------------------------- Operations on trees ------------------------- */
3582 
3583 /* Insert chunk into tree */
3584 #define insert_large_chunk(M, X, S) {\
3585   tbinptr* H;\
3586   bindex_t I;\
3587   compute_tree_index(S, I);\
3588   H = treebin_at(M, I);\
3589   X->index = I;\
3590   X->child[0] = X->child[1] = 0;\
3591   if (!treemap_is_marked(M, I)) {\
3592     mark_treemap(M, I);\
3593     *H = X;\
3594     X->parent = (tchunkptr)H;\
3595     X->fd = X->bk = X;\
3596   }\
3597   else {\
3598     tchunkptr T = *H;\
3599     size_t K = S << leftshift_for_tree_index(I);\
3600     for (;;) {\
3601       if (chunksize(T) != S) {\
3602         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3603         K <<= 1;\
3604         if (*C != 0)\
3605           T = *C;\
3606         else if (RTCHECK(ok_address(M, C))) {\
3607           *C = X;\
3608           X->parent = T;\
3609           X->fd = X->bk = X;\
3610           break;\
3611         }\
3612         else {\
3613           CORRUPTION_ERROR_ACTION(M);\
3614           break;\
3615         }\
3616       }\
3617       else {\
3618         tchunkptr F = T->fd;\
3619         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3620           T->fd = F->bk = X;\
3621           X->fd = F;\
3622           X->bk = T;\
3623           X->parent = 0;\
3624           break;\
3625         }\
3626         else {\
3627           CORRUPTION_ERROR_ACTION(M);\
3628           break;\
3629         }\
3630       }\
3631     }\
3632   }\
3633 }
3634 
3635 /*
3636   Unlink steps:
3637 
3638   1. If x is a chained node, unlink it from its same-sized fd/bk links
3639      and choose its bk node as its replacement.
3640   2. If x was the last node of its size, but not a leaf node, it must
3641      be replaced with a leaf node (not merely one with an open left or
3642      right), to make sure that lefts and rights of descendents
3643      correspond properly to bit masks.  We use the rightmost descendent
3644      of x.  We could use any other leaf, but this is easy to locate and
3645      tends to counteract removal of leftmosts elsewhere, and so keeps
3646      paths shorter than minimally guaranteed.  This doesn't loop much
3647      because on average a node in a tree is near the bottom.
3648   3. If x is the base of a chain (i.e., has parent links) relink
3649      x's parent and children to x's replacement (or null if none).
3650 */
3651 
3652 #define unlink_large_chunk(M, X) {\
3653   tchunkptr XP = X->parent;\
3654   tchunkptr R;\
3655   if (X->bk != X) {\
3656     tchunkptr F = X->fd;\
3657     R = X->bk;\
3658     if (RTCHECK(ok_address(M, F))) {\
3659       F->bk = R;\
3660       R->fd = F;\
3661     }\
3662     else {\
3663       printf("err unlink_large_chunk %d %p bk %p fd %p prev_foot %td head %td child %p %p parent %p\n", __LINE__, X, X->bk, X->fd, X->prev_foot, X->head, X->child[0], X->child[1], X->parent);\
3664       CORRUPTION_ERROR_ACTION(M);\
3665     }\
3666   }\
3667   else {\
3668     tchunkptr* RP;\
3669     if (((R = *(RP = &(X->child[1]))) != 0) ||\
3670         ((R = *(RP = &(X->child[0]))) != 0)) {\
3671       tchunkptr* CP;\
3672       while ((*(CP = &(R->child[1])) != 0) ||\
3673              (*(CP = &(R->child[0])) != 0)) {\
3674         R = *(RP = CP);\
3675       }\
3676       if (RTCHECK(ok_address(M, RP)))\
3677         *RP = 0;\
3678       else {\
3679         CORRUPTION_ERROR_ACTION(M);\
3680       }\
3681     }\
3682   }\
3683   if (XP != 0) {\
3684     tbinptr* H = treebin_at(M, X->index);\
3685     if (X == *H) {\
3686       if ((*H = R) == 0) \
3687         clear_treemap(M, X->index);\
3688     }\
3689     else if (RTCHECK(ok_address(M, XP))) {\
3690       if (XP->child[0] == X) \
3691         XP->child[0] = R;\
3692       else \
3693         XP->child[1] = R;\
3694     }\
3695     else\
3696       CORRUPTION_ERROR_ACTION(M);\
3697     if (R != 0) {\
3698       if (RTCHECK(ok_address(M, R))) {\
3699         tchunkptr C0, C1;\
3700         R->parent = XP;\
3701         if ((C0 = X->child[0]) != 0) {\
3702           if (RTCHECK(ok_address(M, C0))) {\
3703             R->child[0] = C0;\
3704             C0->parent = R;\
3705           }\
3706           else\
3707             CORRUPTION_ERROR_ACTION(M);\
3708         }\
3709         if ((C1 = X->child[1]) != 0) {\
3710           if (RTCHECK(ok_address(M, C1))) {\
3711             R->child[1] = C1;\
3712             C1->parent = R;\
3713           }\
3714           else\
3715             CORRUPTION_ERROR_ACTION(M);\
3716         }\
3717       }\
3718       else\
3719         CORRUPTION_ERROR_ACTION(M);\
3720     }\
3721   }\
3722 }
3723 
3724 /* Relays to large vs small bin operations */
3725 
3726 #define insert_chunk(M, P, S)\
3727   if (is_small(S)) insert_small_chunk(M, P, S)\
3728   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3729 
3730 #define unlink_chunk(M, P, S)\
3731   if (is_small(S)) unlink_small_chunk(M, P, S)\
3732   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3733 
3734 
3735 /* Relays to internal calls to malloc/free from realloc, memalign etc */
3736 
3737 #if ONLY_MSPACES
3738 #define internal_malloc(m, b) mstar_mspace_malloc(m, b)
3739 #define internal_free(m, mem) mstar_mspace_free(m,mem);
3740 #else /* ONLY_MSPACES */
3741 #if MSPACES
3742 #define internal_malloc(m, b)\
3743    (m == gm)? dlmalloc(b) : mstar_mspace_malloc(m, b)
3744 #define internal_free(m, mem)\
3745    if (m == gm) dlfree(mem); else mstar_mspace_free(m,mem);
3746 #else /* MSPACES */
3747 #define internal_malloc(m, b) dlmalloc(b)
3748 #define internal_free(m, mem) dlfree(mem)
3749 #endif /* MSPACES */
3750 #endif /* ONLY_MSPACES */
3751 
3752 /* -----------------------  Direct-mmapping chunks ----------------------- */
3753 
3754 /*
3755   Directly mmapped chunks are set up with an offset to the start of
3756   the mmapped region stored in the prev_foot field of the chunk. This
3757   allows reconstruction of the required argument to MUNMAP when freed,
3758   and also allows adjustment of the returned chunk to meet alignment
3759   requirements (especially in memalign).
3760 */
3761 
3762 /* Malloc using mmap */
mmap_alloc(mstate m,size_t nb)3763 static void* mmap_alloc(mstate m, size_t nb) {
3764   size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3765   if (mmsize > nb) {     /* Check for wrap around 0 */
3766     char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
3767     if (mm != CMFAIL) {
3768       size_t offset = align_offset(chunk2mem(mm));
3769       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3770       mchunkptr p = (mchunkptr)(mm + offset);
3771       p->prev_foot = offset;
3772       p->head = psize;
3773       mark_inuse_foot(m, p, psize);
3774       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3775       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3776 
3777       if (m->least_addr == 0 || mm < m->least_addr)
3778         m->least_addr = mm;
3779       if ((m->footprint += mmsize) > m->max_footprint)
3780         m->max_footprint = m->footprint;
3781       assert(is_aligned(chunk2mem(p)));
3782       check_mmapped_chunk(m, p);
3783       return chunk2mem(p);
3784     }
3785   }
3786   return 0;
3787 }
3788 
3789 /* Realloc using mmap */
mmap_resize(mstate m,mchunkptr oldp,size_t nb)3790 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
3791   size_t oldsize = chunksize(oldp);
3792   if (is_small(nb)) /* Can't shrink mmap regions below small size */
3793     return 0;
3794   /* Keep old chunk if big enough but not too big */
3795   if (oldsize >= nb + SIZE_T_SIZE &&
3796       (oldsize - nb) <= (mparams.granularity << 1))
3797     return oldp;
3798   else {
3799     size_t offset = oldp->prev_foot;
3800     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3801     size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3802     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
3803                                   oldmmsize, newmmsize, 1);
3804     if (cp != CMFAIL) {
3805       mchunkptr newp = (mchunkptr)(cp + offset);
3806       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3807       newp->head = psize;
3808       mark_inuse_foot(m, newp, psize);
3809       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3810       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3811 
3812       if (cp < m->least_addr)
3813         m->least_addr = cp;
3814       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3815         m->max_footprint = m->footprint;
3816       check_mmapped_chunk(m, newp);
3817       return newp;
3818     }
3819   }
3820   return 0;
3821 }
3822 
3823 /* -------------------------- mspace management -------------------------- */
3824 
3825 /* Initialize top chunk and its size */
init_top(mstate m,mchunkptr p,size_t psize)3826 static void init_top(mstate m, mchunkptr p, size_t psize) {
3827   /* Ensure alignment */
3828   size_t offset = align_offset(chunk2mem(p));
3829   p = (mchunkptr)((char*)p + offset);
3830   psize -= offset;
3831 
3832   m->top = p;
3833   m->topsize = psize;
3834   p->head = psize | PINUSE_BIT;
3835   /* set size of fake trailing chunk holding overhead space only once */
3836   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3837   m->trim_check = mparams.trim_threshold; /* reset on each update */
3838 }
3839 
3840 /* Initialize bins for a new mstate that is otherwise zeroed out */
init_bins(mstate m)3841 static void init_bins(mstate m) {
3842   /* Establish circular links for smallbins */
3843   bindex_t i;
3844   for (i = 0; i < NSMALLBINS; ++i) {
3845     sbinptr bin = smallbin_at(m,i);
3846     bin->fd = bin->bk = bin;
3847   }
3848 }
3849 
3850 #if PROCEED_ON_ERROR
3851 
3852 /* default corruption action */
reset_on_error(mstate m)3853 static void reset_on_error(mstate m) {
3854   int i;
3855   ++malloc_corruption_error_count;
3856   /* Reinitialize fields to forget about all memory */
3857   m->smallbins = m->treebins = 0;
3858   m->dvsize = m->topsize = 0;
3859   m->seg.base = 0;
3860   m->seg.size = 0;
3861   m->seg.next = 0;
3862   m->top = m->dv = 0;
3863   for (i = 0; i < NTREEBINS; ++i)
3864     *treebin_at(m, i) = 0;
3865   init_bins(m);
3866 }
3867 #endif /* PROCEED_ON_ERROR */
3868 
3869 /* Allocate chunk and prepend remainder with chunk in successor base. */
prepend_alloc(mstate m,char * newbase,char * oldbase,size_t nb)3870 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
3871                            size_t nb) {
3872   mchunkptr p = align_as_chunk(newbase);
3873   mchunkptr oldfirst = align_as_chunk(oldbase);
3874   size_t psize = (char*)oldfirst - (char*)p;
3875   mchunkptr q = chunk_plus_offset(p, nb);
3876   size_t qsize = psize - nb;
3877   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3878 
3879   assert((char*)oldfirst > (char*)q);
3880   assert(pinuse(oldfirst));
3881   assert(qsize >= MIN_CHUNK_SIZE);
3882 
3883   /* consolidate remainder with first chunk of old base */
3884   if (oldfirst == m->top) {
3885     size_t tsize = m->topsize += qsize;
3886     m->top = q;
3887     q->head = tsize | PINUSE_BIT;
3888     check_top_chunk(m, q);
3889   }
3890   else if (oldfirst == m->dv) {
3891     size_t dsize = m->dvsize += qsize;
3892     m->dv = q;
3893     set_size_and_pinuse_of_free_chunk(q, dsize);
3894   }
3895   else {
3896     if (!is_inuse(oldfirst)) {
3897       size_t nsize = chunksize(oldfirst);
3898       unlink_chunk(m, oldfirst, nsize);
3899       oldfirst = chunk_plus_offset(oldfirst, nsize);
3900       qsize += nsize;
3901     }
3902     set_free_with_pinuse(q, qsize, oldfirst);
3903     insert_chunk(m, q, qsize);
3904     check_free_chunk(m, q);
3905   }
3906 
3907   check_malloced_chunk(m, chunk2mem(p), nb);
3908   return chunk2mem(p);
3909 }
3910 
3911 /* Add a segment to hold a new noncontiguous region */
add_segment(mstate m,char * tbase,size_t tsize,flag_t mmapped)3912 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
3913   /* Determine locations and sizes of segment, fenceposts, old top */
3914   char* old_top = (char*)m->top;
3915   msegmentptr oldsp = segment_holding(m, old_top);
3916   char* old_end = oldsp->base + oldsp->size;
3917   size_t ssize = pad_request(sizeof(struct malloc_segment));
3918   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3919   size_t offset = align_offset(chunk2mem(rawsp));
3920   char* asp = rawsp + offset;
3921   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
3922   mchunkptr sp = (mchunkptr)csp;
3923   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
3924   mchunkptr tnext = chunk_plus_offset(sp, ssize);
3925   mchunkptr p = tnext;
3926   int nfences = 0;
3927 
3928   /* reset top to new space */
3929   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
3930 
3931   /* Set up segment record */
3932   assert(is_aligned(ss));
3933   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3934   *ss = m->seg; /* Push current record */
3935   m->seg.base = tbase;
3936   m->seg.size = tsize;
3937   m->seg.sflags = mmapped;
3938   m->seg.next = ss;
3939 
3940   /* Insert trailing fenceposts */
3941   for (;;) {
3942     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3943     p->head = FENCEPOST_HEAD;
3944     ++nfences;
3945     if ((char*)(&(nextp->head)) < old_end)
3946       p = nextp;
3947     else
3948       break;
3949   }
3950   assert(nfences >= 2);
3951 
3952   /* Insert the rest of old top into a bin as an ordinary free chunk */
3953   if (csp != old_top) {
3954     mchunkptr q = (mchunkptr)old_top;
3955     size_t psize = csp - old_top;
3956     mchunkptr tn = chunk_plus_offset(q, psize);
3957     set_free_with_pinuse(q, psize, tn);
3958     insert_chunk(m, q, psize);
3959   }
3960 
3961   check_top_chunk(m, m->top);
3962 }
3963 
3964 /* -------------------------- System allocation -------------------------- */
3965 
3966 /* Get memory from system using MORECORE or MMAP */
sys_alloc(mstate m,size_t nb)3967 static void* sys_alloc(mstate m, size_t nb) {
3968   char* tbase = CMFAIL;
3969   size_t tsize = 0;
3970   flag_t mmap_flag = 0;
3971 
3972   ensure_initialization();
3973 
3974   #ifdef NOT_USE_SYSALLOC
3975   printf("NOT_USE_SYSALLOC\n");
3976   return 0;
3977   #endif
3978 
3979   /* Directly map large chunks, but only if already initialized */
3980   if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
3981     void* mem = mmap_alloc(m, nb);
3982     if (mem != 0)
3983       return mem;
3984   }
3985 
3986   /*
3987     Try getting memory in any of three ways (in most-preferred to
3988     least-preferred order):
3989     1. A call to MORECORE that can normally contiguously extend memory.
3990        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3991        or main space is mmapped or a previous contiguous call failed)
3992     2. A call to MMAP new space (disabled if not HAVE_MMAP).
3993        Note that under the default settings, if MORECORE is unable to
3994        fulfill a request, and HAVE_MMAP is true, then mmap is
3995        used as a noncontiguous system allocator. This is a useful backup
3996        strategy for systems with holes in address spaces -- in this case
3997        sbrk cannot contiguously expand the heap, but mmap may be able to
3998        find space.
3999     3. A call to MORECORE that cannot usually contiguously extend memory.
4000        (disabled if not HAVE_MORECORE)
4001 
4002    In all cases, we need to request enough bytes from system to ensure
4003    we can malloc nb bytes upon success, so pad with enough space for
4004    top_foot, plus alignment-pad to make sure we don't lose bytes if
4005    not on boundary, and round this up to a granularity unit.
4006   */
4007 
4008   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
4009     char* br = CMFAIL;
4010     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
4011     size_t asize = 0;
4012     ACQUIRE_MALLOC_GLOBAL_LOCK();
4013 
4014     if (ss == 0) {  /* First time through or recovery */
4015       char* base = (char*)CALL_MORECORE(0);
4016       if (base != CMFAIL) {
4017         asize = granularity_align(nb + SYS_ALLOC_PADDING);
4018         /* Adjust to end on a page boundary */
4019         if (!is_page_aligned(base))
4020           asize += (page_align((size_t)base) - (size_t)base);
4021         /* Can't call MORECORE if size is negative when treated as signed */
4022         if (asize < HALF_MAX_SIZE_T &&
4023             (br = (char*)(CALL_MORECORE(asize))) == base) {
4024           tbase = base;
4025           tsize = asize;
4026         }
4027       }
4028     }
4029     else {
4030       /* Subtract out existing available top space from MORECORE request. */
4031       asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
4032       /* Use mem here only if it did continuously extend old space */
4033       if (asize < HALF_MAX_SIZE_T &&
4034           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
4035         tbase = br;
4036         tsize = asize;
4037       }
4038     }
4039 
4040     if (tbase == CMFAIL) {    /* Cope with partial failure */
4041       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
4042         if (asize < HALF_MAX_SIZE_T &&
4043             asize < nb + SYS_ALLOC_PADDING) {
4044           size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize);
4045           if (esize < HALF_MAX_SIZE_T) {
4046             char* end = (char*)CALL_MORECORE(esize);
4047             if (end != CMFAIL)
4048               asize += esize;
4049             else {            /* Can't use; try to release */
4050               (void) CALL_MORECORE(-asize);
4051               br = CMFAIL;
4052             }
4053           }
4054         }
4055       }
4056       if (br != CMFAIL) {    /* Use the space we did get */
4057         tbase = br;
4058         tsize = asize;
4059       }
4060       else
4061         disable_contiguous(m); /* Don't try contiguous path in the future */
4062     }
4063 
4064     RELEASE_MALLOC_GLOBAL_LOCK();
4065   }
4066 
4067   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
4068     size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
4069     if (rsize > nb) { /* Fail if wraps around zero */
4070       char* mp = (char*)(CALL_MMAP(rsize));
4071       if (mp != CMFAIL) {
4072         tbase = mp;
4073         tsize = rsize;
4074         mmap_flag = USE_MMAP_BIT;
4075       }
4076     }
4077   }
4078 
4079   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
4080     size_t asize = granularity_align(nb + SYS_ALLOC_PADDING);
4081     if (asize < HALF_MAX_SIZE_T) {
4082       char* br = CMFAIL;
4083       char* end = CMFAIL;
4084       ACQUIRE_MALLOC_GLOBAL_LOCK();
4085       br = (char*)(CALL_MORECORE(asize));
4086       end = (char*)(CALL_MORECORE(0));
4087       RELEASE_MALLOC_GLOBAL_LOCK();
4088       if (br != CMFAIL && end != CMFAIL && br < end) {
4089         size_t ssize = end - br;
4090         if (ssize > nb + TOP_FOOT_SIZE) {
4091           tbase = br;
4092           tsize = ssize;
4093         }
4094       }
4095     }
4096   }
4097 
4098   if (tbase != CMFAIL) {
4099 
4100     if ((m->footprint += tsize) > m->max_footprint)
4101       m->max_footprint = m->footprint;
4102 
4103     if (!is_initialized(m)) { /* first-time initialization */
4104       if (m->least_addr == 0 || tbase < m->least_addr)
4105         m->least_addr = tbase;
4106       m->seg.base = tbase;
4107       m->seg.size = tsize;
4108       m->seg.sflags = mmap_flag;
4109       m->magic = mparams.magic;
4110       m->release_checks = MAX_RELEASE_CHECK_RATE;
4111       init_bins(m);
4112 #if !ONLY_MSPACES
4113       if (is_global(m))
4114         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4115       else
4116 #endif
4117       {
4118         /* Offset top by embedded malloc_state */
4119         mchunkptr mn = next_chunk(mem2chunk(m));
4120         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
4121       }
4122     }
4123 
4124     else {
4125       /* Try to merge with an existing segment */
4126       msegmentptr sp = &m->seg;
4127       /* Only consider most recent segment if traversal suppressed */
4128       while (sp != 0 && tbase != sp->base + sp->size)
4129         sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4130       if (sp != 0 &&
4131           !is_extern_segment(sp) &&
4132           (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
4133           segment_holds(sp, m->top)) { /* append */
4134         sp->size += tsize;
4135         init_top(m, m->top, m->topsize + tsize);
4136       }
4137       else {
4138         if (tbase < m->least_addr)
4139           m->least_addr = tbase;
4140         sp = &m->seg;
4141         while (sp != 0 && sp->base != tbase + tsize)
4142           sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4143         if (sp != 0 &&
4144             !is_extern_segment(sp) &&
4145             (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
4146           char* oldbase = sp->base;
4147           sp->base = tbase;
4148           sp->size += tsize;
4149           return prepend_alloc(m, tbase, oldbase, nb);
4150         }
4151         else
4152           add_segment(m, tbase, tsize, mmap_flag);
4153       }
4154     }
4155 
4156     if (nb < m->topsize) { /* Allocate from new or extended top space */
4157       size_t rsize = m->topsize -= nb;
4158       mchunkptr p = m->top;
4159       mchunkptr r = m->top = chunk_plus_offset(p, nb);
4160       r->head = rsize | PINUSE_BIT;
4161       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
4162       check_top_chunk(m, m->top);
4163       check_malloced_chunk(m, chunk2mem(p), nb);
4164       return chunk2mem(p);
4165     }
4166   }
4167 
4168   MALLOC_FAILURE_ACTION;
4169   return 0;
4170 }
4171 
4172 /* -----------------------  system deallocation -------------------------- */
4173 
4174 /* Unmap and unlink any mmapped segments that don't contain used chunks */
release_unused_segments(mstate m)4175 static size_t release_unused_segments(mstate m) {
4176   size_t released = 0;
4177   int nsegs = 0;
4178   msegmentptr pred = &m->seg;
4179   msegmentptr sp = pred->next;
4180   while (sp != 0) {
4181     char* base = sp->base;
4182     size_t size = sp->size;
4183     msegmentptr next = sp->next;
4184     ++nsegs;
4185     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
4186       mchunkptr p = align_as_chunk(base);
4187       size_t psize = chunksize(p);
4188       /* Can unmap if first chunk holds entire segment and not pinned */
4189       if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
4190         tchunkptr tp = (tchunkptr)p;
4191         assert(segment_holds(sp, (char*)sp));
4192         if (p == m->dv) {
4193           m->dv = 0;
4194           m->dvsize = 0;
4195         }
4196         else {
4197           unlink_large_chunk(m, tp);
4198         }
4199         if (CALL_MUNMAP(base, size) == 0) {
4200           released += size;
4201           m->footprint -= size;
4202           /* unlink obsoleted record */
4203           sp = pred;
4204           sp->next = next;
4205         }
4206         else { /* back out if cannot unmap */
4207           insert_large_chunk(m, tp, psize);
4208         }
4209       }
4210     }
4211     if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
4212       break;
4213     pred = sp;
4214     sp = next;
4215   }
4216   /* Reset check counter */
4217   m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
4218                        nsegs : MAX_RELEASE_CHECK_RATE);
4219   return released;
4220 }
4221 
sys_trim(mstate m,size_t pad)4222 static int sys_trim(mstate m, size_t pad) {
4223   size_t released = 0;
4224   size_t newsize;
4225   ensure_initialization();
4226   if (pad < MAX_REQUEST && is_initialized(m)) {
4227     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
4228 
4229     if (m->topsize > pad) {
4230       /* Shrink top space in granularity-size units, keeping at least one */
4231       size_t unit = mparams.granularity;
4232       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
4233                       SIZE_T_ONE) * unit;
4234       msegmentptr sp = segment_holding(m, (char*)m->top);
4235 
4236       if (!is_extern_segment(sp)) {
4237         if (is_mmapped_segment(sp)) {
4238           if (HAVE_MMAP &&
4239               sp->size >= extra &&
4240               !has_segment_link(m, sp)) { /* can't shrink if pinned */
4241              newsize = sp->size - extra;
4242             /* Prefer mremap, fall back to munmap */
4243             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
4244                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
4245               released = extra;
4246             }
4247           }
4248         }
4249         else if (HAVE_MORECORE) {
4250           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
4251             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
4252           ACQUIRE_MALLOC_GLOBAL_LOCK();
4253           {
4254             /* Make sure end of memory is where we last set it. */
4255             char* old_br = (char*)(CALL_MORECORE(0));
4256             if (old_br == sp->base + sp->size) {
4257               char* rel_br = (char*)(CALL_MORECORE(-extra));
4258               char* new_br = (char*)(CALL_MORECORE(0));
4259               if (rel_br != CMFAIL && new_br < old_br)
4260                 released = old_br - new_br;
4261             }
4262           }
4263           RELEASE_MALLOC_GLOBAL_LOCK();
4264         }
4265       }
4266 
4267       if (released != 0) {
4268         sp->size -= released;
4269         m->footprint -= released;
4270         init_top(m, m->top, m->topsize - released);
4271         check_top_chunk(m, m->top);
4272       }
4273     }
4274 
4275     /* Unmap any unused mmapped segments */
4276     if (HAVE_MMAP)
4277       released += release_unused_segments(m);
4278 
4279     /* On failure, disable autotrim to avoid repeated failed future calls */
4280     if (released == 0 && m->topsize > m->trim_check)
4281       m->trim_check = MAX_SIZE_T;
4282   }
4283 
4284   return (released != 0)? 1 : 0;
4285 }
4286 
4287 
4288 /* ---------------------------- malloc support --------------------------- */
4289 
4290 /* allocate a large request from the best fitting chunk in a treebin */
tmalloc_large(mstate m,size_t nb)4291 static void* tmalloc_large(mstate m, size_t nb) {
4292   tchunkptr v = 0;
4293   size_t rsize = -nb; /* Unsigned negation */
4294   tchunkptr t;
4295   bindex_t idx;
4296   compute_tree_index(nb, idx);
4297   if ((t = *treebin_at(m, idx)) != 0) {
4298     /* Traverse tree for this bin looking for node with size == nb */
4299     size_t sizebits = nb << leftshift_for_tree_index(idx);
4300     tchunkptr rst = 0;  /* The deepest untaken right subtree */
4301     for (;;) {
4302       tchunkptr rt;
4303       size_t trem = chunksize(t) - nb;
4304       if (trem < rsize) {
4305         v = t;
4306         if ((rsize = trem) == 0)
4307           break;
4308       }
4309       rt = t->child[1];
4310       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
4311       if (rt != 0 && rt != t)
4312         rst = rt;
4313       if (t == 0) {
4314         t = rst; /* set t to least subtree holding sizes > nb */
4315         break;
4316       }
4317       sizebits <<= 1;
4318     }
4319   }
4320   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
4321     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
4322     if (leftbits != 0) {
4323       bindex_t i;
4324       binmap_t leastbit = least_bit(leftbits);
4325       compute_bit2idx(leastbit, i);
4326       t = *treebin_at(m, i);
4327     }
4328   }
4329 
4330   while (t != 0) { /* find smallest of tree or subtree */
4331     size_t trem = chunksize(t) - nb;
4332     if (trem < rsize) {
4333       rsize = trem;
4334       v = t;
4335     }
4336     t = leftmost_child(t);
4337   }
4338 
4339   /*  If dv is a better fit, return 0 so malloc will use it */
4340   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
4341     if (RTCHECK(ok_address(m, v))) { /* split */
4342       mchunkptr r = chunk_plus_offset(v, nb);
4343       assert(chunksize(v) == rsize + nb);
4344       if (RTCHECK(ok_next(v, r))) {
4345         unlink_large_chunk(m, v);
4346         if (rsize < MIN_CHUNK_SIZE)
4347           set_inuse_and_pinuse(m, v, (rsize + nb));
4348         else {
4349           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4350           set_size_and_pinuse_of_free_chunk(r, rsize);
4351           insert_chunk(m, r, rsize);
4352         }
4353         return chunk2mem(v);
4354       }
4355     }
4356     CORRUPTION_ERROR_ACTION(m);
4357   }
4358   return 0;
4359 }
4360 
4361 /* allocate a small request from the best fitting chunk in a treebin */
tmalloc_small(mstate m,size_t nb)4362 static void* tmalloc_small(mstate m, size_t nb) {
4363   tchunkptr t, v;
4364   size_t rsize;
4365   bindex_t i;
4366   binmap_t leastbit = least_bit(m->treemap);
4367   compute_bit2idx(leastbit, i);
4368   v = t = *treebin_at(m, i);
4369   rsize = chunksize(t) - nb;
4370 
4371   while ((t = leftmost_child(t)) != 0) {
4372     size_t trem = chunksize(t) - nb;
4373     if (trem < rsize) {
4374       rsize = trem;
4375       v = t;
4376     }
4377   }
4378 
4379   if (RTCHECK(ok_address(m, v))) {
4380     mchunkptr r = chunk_plus_offset(v, nb);
4381     assert(chunksize(v) == rsize + nb);
4382     if (RTCHECK(ok_next(v, r))) {
4383       unlink_large_chunk(m, v);
4384       if (rsize < MIN_CHUNK_SIZE)
4385         set_inuse_and_pinuse(m, v, (rsize + nb));
4386       else {
4387         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4388         set_size_and_pinuse_of_free_chunk(r, rsize);
4389         replace_dv(m, r, rsize);
4390       }
4391       return chunk2mem(v);
4392     }
4393   }
4394 
4395   CORRUPTION_ERROR_ACTION(m);
4396   return 0;
4397 }
4398 
4399 /* --------------------------- realloc support --------------------------- */
4400 
internal_realloc(mstate m,void * oldmem,size_t bytes)4401 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
4402   if (bytes >= MAX_REQUEST) {
4403     MALLOC_FAILURE_ACTION;
4404     return 0;
4405   }
4406   if (!PREACTION(m)) {
4407     mchunkptr oldp = mem2chunk(oldmem);
4408     size_t oldsize = chunksize(oldp);
4409     mchunkptr next = chunk_plus_offset(oldp, oldsize);
4410     mchunkptr newp = 0;
4411     void* extra = 0;
4412 
4413     /* Try to either shrink or extend into top. Else malloc-copy-free */
4414 
4415     if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
4416                 ok_next(oldp, next) && ok_pinuse(next))) {
4417       size_t nb = request2size(bytes);
4418       if (is_mmapped(oldp))
4419         newp = mmap_resize(m, oldp, nb);
4420       else if (oldsize >= nb) { /* already big enough */
4421         size_t rsize = oldsize - nb;
4422         newp = oldp;
4423         if (rsize >= MIN_CHUNK_SIZE) {
4424           mchunkptr remainder = chunk_plus_offset(newp, nb);
4425           set_inuse(m, newp, nb);
4426           set_inuse_and_pinuse(m, remainder, rsize);
4427           extra = chunk2mem(remainder);
4428         }
4429       }
4430       else if (next == m->top && oldsize + m->topsize > nb) {
4431         /* Expand into top */
4432         size_t newsize = oldsize + m->topsize;
4433         size_t newtopsize = newsize - nb;
4434         mchunkptr newtop = chunk_plus_offset(oldp, nb);
4435         set_inuse(m, oldp, nb);
4436         newtop->head = newtopsize |PINUSE_BIT;
4437         m->top = newtop;
4438         m->topsize = newtopsize;
4439         newp = oldp;
4440       }
4441     }
4442     else {
4443       USAGE_ERROR_ACTION(m, oldmem);
4444       POSTACTION(m);
4445       return 0;
4446     }
4447 #if DEBUG
4448     if (newp != 0) {
4449       check_inuse_chunk(m, newp); /* Check requires lock */
4450     }
4451 #endif
4452 
4453     POSTACTION(m);
4454 
4455     if (newp != 0) {
4456       if (extra != 0) {
4457         internal_free(m, extra);
4458       }
4459       return chunk2mem(newp);
4460     }
4461     else {
4462       void* newmem = internal_malloc(m, bytes);
4463       if (newmem != 0) {
4464         size_t oc = oldsize - overhead_for(oldp);
4465         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
4466         internal_free(m, oldmem);
4467       }
4468       return newmem;
4469     }
4470   }
4471   return 0;
4472 }
4473 
4474 /* --------------------------- memalign support -------------------------- */
4475 
internal_memalign(mstate m,size_t alignment,size_t bytes)4476 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
4477   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
4478     return internal_malloc(m, bytes);
4479   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4480     alignment = MIN_CHUNK_SIZE;
4481   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
4482     size_t a = MALLOC_ALIGNMENT << 1;
4483     while (a < alignment) a <<= 1;
4484     alignment = a;
4485   }
4486 
4487   if (bytes >= MAX_REQUEST - alignment) {
4488     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
4489       MALLOC_FAILURE_ACTION;
4490     }
4491   }
4492   else {
4493     size_t nb = request2size(bytes);
4494     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4495     char* mem = (char*)internal_malloc(m, req);
4496     if (mem != 0) {
4497       void* leader = 0;
4498       void* trailer = 0;
4499       mchunkptr p = mem2chunk(mem);
4500 
4501       if (PREACTION(m)) return 0;
4502       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
4503         /*
4504           Find an aligned spot inside chunk.  Since we need to give
4505           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4506           the first calculation places us at a spot with less than
4507           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4508           We've allocated enough total room so that this is always
4509           possible.
4510         */
4511         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
4512                                                        alignment -
4513                                                        SIZE_T_ONE)) &
4514                                              -alignment));
4515         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
4516           br : br+alignment;
4517         mchunkptr newp = (mchunkptr)pos;
4518         size_t leadsize = pos - (char*)(p);
4519         size_t newsize = chunksize(p) - leadsize;
4520 
4521         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
4522           newp->prev_foot = p->prev_foot + leadsize;
4523           newp->head = newsize;
4524         }
4525         else { /* Otherwise, give back leader, use the rest */
4526           set_inuse(m, newp, newsize);
4527           set_inuse(m, p, leadsize);
4528           leader = chunk2mem(p);
4529         }
4530         p = newp;
4531       }
4532 
4533       /* Give back spare room at the end */
4534       if (!is_mmapped(p)) {
4535         size_t size = chunksize(p);
4536         if (size > nb + MIN_CHUNK_SIZE) {
4537           size_t remainder_size = size - nb;
4538           mchunkptr remainder = chunk_plus_offset(p, nb);
4539           set_inuse(m, p, nb);
4540           set_inuse(m, remainder, remainder_size);
4541           trailer = chunk2mem(remainder);
4542         }
4543       }
4544 
4545       assert (chunksize(p) >= nb);
4546       assert((((size_t)(chunk2mem(p))) % alignment) == 0);
4547       check_inuse_chunk(m, p);
4548       POSTACTION(m);
4549       if (leader != 0) {
4550         internal_free(m, leader);
4551       }
4552       if (trailer != 0) {
4553         internal_free(m, trailer);
4554       }
4555       return chunk2mem(p);
4556     }
4557   }
4558   return 0;
4559 }
4560 
4561 /* ------------------------ comalloc/coalloc support --------------------- */
4562 
ialloc(mstate m,size_t n_elements,size_t * sizes,int opts,void * chunks[])4563 static void** ialloc(mstate m,
4564                      size_t n_elements,
4565                      size_t* sizes,
4566                      int opts,
4567                      void* chunks[]) {
4568   /*
4569     This provides common support for independent_X routines, handling
4570     all of the combinations that can result.
4571 
4572     The opts arg has:
4573     bit 0 set if all elements are same size (using sizes[0])
4574     bit 1 set if elements should be zeroed
4575   */
4576 
4577   size_t    element_size;   /* chunksize of each element, if all same */
4578   size_t    contents_size;  /* total size of elements */
4579   size_t    array_size;     /* request size of pointer array */
4580   void*     mem;            /* malloced aggregate space */
4581   mchunkptr p;              /* corresponding chunk */
4582   size_t    remainder_size; /* remaining bytes while splitting */
4583   void**    marray;         /* either "chunks" or malloced ptr array */
4584   mchunkptr array_chunk;    /* chunk for malloced ptr array */
4585   flag_t    was_enabled;    /* to disable mmap */
4586   size_t    size;
4587   size_t    i;
4588 
4589   ensure_initialization();
4590   /* compute array length, if needed */
4591   if (chunks != 0) {
4592     if (n_elements == 0)
4593       return chunks; /* nothing to do */
4594     marray = chunks;
4595     array_size = 0;
4596   }
4597   else {
4598     /* if empty req, must still return chunk representing empty array */
4599     if (n_elements == 0)
4600       return (void**)internal_malloc(m, 0);
4601     marray = 0;
4602     array_size = request2size(n_elements * (sizeof(void*)));
4603   }
4604 
4605   /* compute total element size */
4606   if (opts & 0x1) { /* all-same-size */
4607     element_size = request2size(*sizes);
4608     contents_size = n_elements * element_size;
4609   }
4610   else { /* add up all the sizes */
4611     element_size = 0;
4612     contents_size = 0;
4613     for (i = 0; i != n_elements; ++i)
4614       contents_size += request2size(sizes[i]);
4615   }
4616 
4617   size = contents_size + array_size;
4618 
4619   /*
4620      Allocate the aggregate chunk.  First disable direct-mmapping so
4621      malloc won't use it, since we would not be able to later
4622      free/realloc space internal to a segregated mmap region.
4623   */
4624   was_enabled = use_mmap(m);
4625   disable_mmap(m);
4626   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4627   if (was_enabled)
4628     enable_mmap(m);
4629   if (mem == 0)
4630     return 0;
4631 
4632   if (PREACTION(m)) return 0;
4633   p = mem2chunk(mem);
4634   remainder_size = chunksize(p);
4635 
4636   assert(!is_mmapped(p));
4637 
4638   if (opts & 0x2) {       /* optionally clear the elements */
4639     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4640   }
4641 
4642   /* If not provided, allocate the pointer array as final part of chunk */
4643   if (marray == 0) {
4644     size_t  array_chunk_size;
4645     array_chunk = chunk_plus_offset(p, contents_size);
4646     array_chunk_size = remainder_size - contents_size;
4647     marray = (void**) (chunk2mem(array_chunk));
4648     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4649     remainder_size = contents_size;
4650   }
4651 
4652   /* split out elements */
4653   for (i = 0; ; ++i) {
4654     marray[i] = chunk2mem(p);
4655     if (i != n_elements-1) {
4656       if (element_size != 0)
4657         size = element_size;
4658       else
4659         size = request2size(sizes[i]);
4660       remainder_size -= size;
4661       set_size_and_pinuse_of_inuse_chunk(m, p, size);
4662       p = chunk_plus_offset(p, size);
4663     }
4664     else { /* the final element absorbs any overallocation slop */
4665       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4666       break;
4667     }
4668   }
4669 
4670 #if DEBUG
4671   if (marray != chunks) {
4672     /* final element must have exactly exhausted chunk */
4673     if (element_size != 0) {
4674       assert(remainder_size == element_size);
4675     }
4676     else {
4677       assert(remainder_size == request2size(sizes[i]));
4678     }
4679     check_inuse_chunk(m, mem2chunk(marray));
4680   }
4681   for (i = 0; i != n_elements; ++i)
4682     check_inuse_chunk(m, mem2chunk(marray[i]));
4683 
4684 #endif /* DEBUG */
4685 
4686   POSTACTION(m);
4687   return marray;
4688 }
4689 
4690 
4691 /* -------------------------- public routines ---------------------------- */
4692 
4693 #if !ONLY_MSPACES
4694 
dlmalloc(size_t bytes)4695 void* dlmalloc(size_t bytes) {
4696   /*
4697      Basic algorithm:
4698      If a small request (< 256 bytes minus per-chunk overhead):
4699        1. If one exists, use a remainderless chunk in associated smallbin.
4700           (Remainderless means that there are too few excess bytes to
4701           represent as a chunk.)
4702        2. If it is big enough, use the dv chunk, which is normally the
4703           chunk adjacent to the one used for the most recent small request.
4704        3. If one exists, split the smallest available chunk in a bin,
4705           saving remainder in dv.
4706        4. If it is big enough, use the top chunk.
4707        5. If available, get memory from system and use it
4708      Otherwise, for a large request:
4709        1. Find the smallest available binned chunk that fits, and use it
4710           if it is better fitting than dv chunk, splitting if necessary.
4711        2. If better fitting than any binned chunk, use the dv chunk.
4712        3. If it is big enough, use the top chunk.
4713        4. If request size >= mmap threshold, try to directly mmap this chunk.
4714        5. If available, get memory from system and use it
4715 
4716      The ugly goto's here ensure that postaction occurs along all paths.
4717   */
4718 
4719 #if USE_LOCKS
4720   ensure_initialization(); /* initialize in sys_alloc if not using locks */
4721 #endif
4722 
4723   if (!PREACTION(gm)) {
4724     void* mem;
4725     size_t nb;
4726     if (bytes <= MAX_SMALL_REQUEST) {
4727       bindex_t idx;
4728       binmap_t smallbits;
4729       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4730       idx = small_index(nb);
4731       smallbits = gm->smallmap >> idx;
4732 
4733       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4734         mchunkptr b, p;
4735         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
4736         b = smallbin_at(gm, idx);
4737         p = b->fd;
4738         assert(chunksize(p) == small_index2size(idx));
4739         unlink_first_small_chunk(gm, b, p, idx);
4740         set_inuse_and_pinuse(gm, p, small_index2size(idx));
4741         mem = chunk2mem(p);
4742         check_malloced_chunk(gm, mem, nb);
4743         goto postaction;
4744       }
4745 
4746       else if (nb > gm->dvsize) {
4747         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4748           mchunkptr b, p, r;
4749           size_t rsize;
4750           bindex_t i;
4751           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4752           binmap_t leastbit = least_bit(leftbits);
4753           compute_bit2idx(leastbit, i);
4754           b = smallbin_at(gm, i);
4755           p = b->fd;
4756           assert(chunksize(p) == small_index2size(i));
4757           unlink_first_small_chunk(gm, b, p, i);
4758           rsize = small_index2size(i) - nb;
4759           /* Fit here cannot be remainderless if 4byte sizes */
4760           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4761             set_inuse_and_pinuse(gm, p, small_index2size(i));
4762           else {
4763             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4764             r = chunk_plus_offset(p, nb);
4765             set_size_and_pinuse_of_free_chunk(r, rsize);
4766             replace_dv(gm, r, rsize);
4767           }
4768           mem = chunk2mem(p);
4769           check_malloced_chunk(gm, mem, nb);
4770           goto postaction;
4771         }
4772 
4773         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
4774           check_malloced_chunk(gm, mem, nb);
4775           goto postaction;
4776         }
4777       }
4778     }
4779     else if (bytes >= MAX_REQUEST)
4780       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4781     else {
4782       nb = pad_request(bytes);
4783       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4784         check_malloced_chunk(gm, mem, nb);
4785         goto postaction;
4786       }
4787     }
4788 
4789     if (nb <= gm->dvsize) {
4790       size_t rsize = gm->dvsize - nb;
4791       mchunkptr p = gm->dv;
4792       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4793         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4794         gm->dvsize = rsize;
4795         set_size_and_pinuse_of_free_chunk(r, rsize);
4796         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4797       }
4798       else { /* exhaust dv */
4799         size_t dvs = gm->dvsize;
4800         gm->dvsize = 0;
4801         gm->dv = 0;
4802         set_inuse_and_pinuse(gm, p, dvs);
4803       }
4804       mem = chunk2mem(p);
4805       check_malloced_chunk(gm, mem, nb);
4806       goto postaction;
4807     }
4808 
4809     else if (nb < gm->topsize) { /* Split top */
4810       size_t rsize = gm->topsize -= nb;
4811       mchunkptr p = gm->top;
4812       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4813       r->head = rsize | PINUSE_BIT;
4814       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4815       mem = chunk2mem(p);
4816       check_top_chunk(gm, gm->top);
4817       check_malloced_chunk(gm, mem, nb);
4818       goto postaction;
4819     }
4820 
4821     mem = sys_alloc(gm, nb);
4822 
4823   postaction:
4824     POSTACTION(gm);
4825     return mem;
4826   }
4827 
4828   return 0;
4829 }
4830 
dlfree(void * mem)4831 void dlfree(void* mem) {
4832   /*
4833      Consolidate freed chunks with preceeding or succeeding bordering
4834      free chunks, if they exist, and then place in a bin.  Intermixed
4835      with special cases for top, dv, mmapped chunks, and usage errors.
4836   */
4837 
4838   if (mem != 0) {
4839     mchunkptr p  = mem2chunk(mem);
4840 #if FOOTERS
4841     mstate fm = get_mstate_for(p);
4842     if (!ok_magic(fm)) {
4843       USAGE_ERROR_ACTION(fm, p);
4844       return;
4845     }
4846 #else /* FOOTERS */
4847 #define fm gm
4848 #endif /* FOOTERS */
4849     if (!PREACTION(fm)) {
4850       check_inuse_chunk(fm, p);
4851       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4852         size_t psize = chunksize(p);
4853         mchunkptr next = chunk_plus_offset(p, psize);
4854         if (!pinuse(p)) {
4855           size_t prevsize = p->prev_foot;
4856           if (is_mmapped(p)) {
4857             psize += prevsize + MMAP_FOOT_PAD;
4858             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4859               fm->footprint -= psize;
4860             goto postaction;
4861           }
4862           else {
4863             mchunkptr prev = chunk_minus_offset(p, prevsize);
4864             psize += prevsize;
4865             p = prev;
4866             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4867               if (p != fm->dv) {
4868                 unlink_chunk(fm, p, prevsize);
4869               }
4870               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4871                 fm->dvsize = psize;
4872                 set_free_with_pinuse(p, psize, next);
4873                 goto postaction;
4874               }
4875             }
4876             else
4877               goto erroraction;
4878           }
4879         }
4880 
4881         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4882           if (!cinuse(next)) {  /* consolidate forward */
4883             if (next == fm->top) {
4884               size_t tsize = fm->topsize += psize;
4885               fm->top = p;
4886               p->head = tsize | PINUSE_BIT;
4887               if (p == fm->dv) {
4888                 fm->dv = 0;
4889                 fm->dvsize = 0;
4890               }
4891               if (should_trim(fm, tsize))
4892                 sys_trim(fm, 0);
4893               goto postaction;
4894             }
4895             else if (next == fm->dv) {
4896               size_t dsize = fm->dvsize += psize;
4897               fm->dv = p;
4898               set_size_and_pinuse_of_free_chunk(p, dsize);
4899               goto postaction;
4900             }
4901             else {
4902               size_t nsize = chunksize(next);
4903               psize += nsize;
4904               unlink_chunk(fm, next, nsize);
4905               set_size_and_pinuse_of_free_chunk(p, psize);
4906               if (p == fm->dv) {
4907                 fm->dvsize = psize;
4908                 goto postaction;
4909               }
4910             }
4911           }
4912           else
4913             set_free_with_pinuse(p, psize, next);
4914 
4915           if (is_small(psize)) {
4916             insert_small_chunk(fm, p, psize);
4917             check_free_chunk(fm, p);
4918           }
4919           else {
4920             tchunkptr tp = (tchunkptr)p;
4921             insert_large_chunk(fm, tp, psize);
4922             check_free_chunk(fm, p);
4923             if (--fm->release_checks == 0)
4924               release_unused_segments(fm);
4925           }
4926           goto postaction;
4927         }
4928       }
4929     erroraction:
4930       USAGE_ERROR_ACTION(fm, p);
4931     postaction:
4932       POSTACTION(fm);
4933     }
4934   }
4935 #if !FOOTERS
4936 #undef fm
4937 #endif /* FOOTERS */
4938 }
4939 
dlcalloc(size_t n_elements,size_t elem_size)4940 void* dlcalloc(size_t n_elements, size_t elem_size) {
4941   void* mem;
4942   size_t req = 0;
4943   if (n_elements != 0) {
4944     req = n_elements * elem_size;
4945     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4946         (req / n_elements != elem_size))
4947       req = MAX_SIZE_T; /* force downstream failure on overflow */
4948   }
4949   mem = dlmalloc(req);
4950   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4951     memset(mem, 0, req);
4952   return mem;
4953 }
4954 
dlrealloc(void * oldmem,size_t bytes)4955 void* dlrealloc(void* oldmem, size_t bytes) {
4956   if (oldmem == 0)
4957     return dlmalloc(bytes);
4958 #ifdef REALLOC_ZERO_BYTES_FREES
4959   if (bytes == 0) {
4960     dlfree(oldmem);
4961     return 0;
4962   }
4963 #endif /* REALLOC_ZERO_BYTES_FREES */
4964   else {
4965 #if ! FOOTERS
4966     mstate m = gm;
4967 #else /* FOOTERS */
4968     mstate m = get_mstate_for(mem2chunk(oldmem));
4969     if (!ok_magic(m)) {
4970       USAGE_ERROR_ACTION(m, oldmem);
4971       return 0;
4972     }
4973 #endif /* FOOTERS */
4974     return internal_realloc(m, oldmem, bytes);
4975   }
4976 }
4977 
dlmemalign(size_t alignment,size_t bytes)4978 void* dlmemalign(size_t alignment, size_t bytes) {
4979   return internal_memalign(gm, alignment, bytes);
4980 }
4981 
dlindependent_calloc(size_t n_elements,size_t elem_size,void * chunks[])4982 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
4983                                  void* chunks[]) {
4984   size_t sz = elem_size; /* serves as 1-element array */
4985   return ialloc(gm, n_elements, &sz, 3, chunks);
4986 }
4987 
dlindependent_comalloc(size_t n_elements,size_t sizes[],void * chunks[])4988 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
4989                                    void* chunks[]) {
4990   return ialloc(gm, n_elements, sizes, 0, chunks);
4991 }
4992 
dlvalloc(size_t bytes)4993 void* dlvalloc(size_t bytes) {
4994   size_t pagesz;
4995   ensure_initialization();
4996   pagesz = mparams.page_size;
4997   return dlmemalign(pagesz, bytes);
4998 }
4999 
dlpvalloc(size_t bytes)5000 void* dlpvalloc(size_t bytes) {
5001   size_t pagesz;
5002   ensure_initialization();
5003   pagesz = mparams.page_size;
5004   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
5005 }
5006 
dlmalloc_trim(size_t pad)5007 int dlmalloc_trim(size_t pad) {
5008   int result = 0;
5009   ensure_initialization();
5010   if (!PREACTION(gm)) {
5011     result = sys_trim(gm, pad);
5012     POSTACTION(gm);
5013   }
5014   return result;
5015 }
5016 
dlmalloc_footprint(void)5017 size_t dlmalloc_footprint(void) {
5018   return gm->footprint;
5019 }
5020 
dlmalloc_max_footprint(void)5021 size_t dlmalloc_max_footprint(void) {
5022   return gm->max_footprint;
5023 }
5024 
5025 #if !NO_MALLINFO
dlmallinfo(void)5026 struct mallinfo dlmallinfo(void) {
5027   return internal_mallinfo(gm);
5028 }
5029 #endif /* NO_MALLINFO */
5030 
dlmalloc_stats()5031 void dlmalloc_stats() {
5032   internal_malloc_stats(gm);
5033 }
5034 
dlmallopt(int param_number,int value)5035 int dlmallopt(int param_number, int value) {
5036   return change_mparam(param_number, value);
5037 }
5038 
5039 #endif /* !ONLY_MSPACES */
5040 
dlmalloc_usable_size(void * mem)5041 size_t dlmalloc_usable_size(void* mem) {
5042   if (mem != 0) {
5043     mchunkptr p = mem2chunk(mem);
5044     if (is_inuse(p))
5045       return chunksize(p) - overhead_for(p);
5046   }
5047   return 0;
5048 }
5049 
5050 /* ----------------------------- user mspaces ---------------------------- */
5051 
5052 #if MSPACES
5053 
mstar_init_user_mstate(char * tbase,size_t tsize)5054 static mstate mstar_init_user_mstate(char* tbase, size_t tsize) {
5055   size_t msize = pad_request(sizeof(struct malloc_state));
5056   mchunkptr mn;
5057   mchunkptr msp = align_as_chunk(tbase);
5058   mstate m = (mstate)(chunk2mem(msp));
5059   memset(m, 0, msize);
5060   INITIAL_LOCK(&m->mutex);
5061   msp->head = (msize|INUSE_BITS);
5062   m->seg.base = m->least_addr = tbase;
5063   m->seg.size = m->footprint = m->max_footprint = tsize;
5064   m->magic = mparams.magic;
5065   m->release_checks = MAX_RELEASE_CHECK_RATE;
5066   m->mflags = mparams.default_mflags;
5067   m->extp = 0;
5068   m->exts = 0;
5069   disable_contiguous(m);
5070   init_bins(m);
5071   mn = next_chunk(mem2chunk(m));
5072   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
5073   check_top_chunk(m, m->top);
5074   return m;
5075 }
5076 
mstar_create_mspace(size_t capacity,int locked)5077 mspace mstar_create_mspace(size_t capacity, int locked) {
5078   mstate m = 0;
5079   size_t msize;
5080   ensure_initialization();
5081   msize = pad_request(sizeof(struct malloc_state));
5082   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5083     size_t rs = ((capacity == 0)? mparams.granularity :
5084                  (capacity + TOP_FOOT_SIZE + msize));
5085     size_t tsize = granularity_align(rs);
5086     char* tbase = (char*)(CALL_MMAP(tsize));
5087     if (tbase != CMFAIL) {
5088       m = mstar_init_user_mstate(tbase, tsize);
5089       m->seg.sflags = USE_MMAP_BIT;
5090       set_lock(m, locked);
5091     }
5092   }
5093   return (mspace)m;
5094 }
5095 
mstar_create_mspace_with_base(void * base,size_t capacity,int locked)5096 mspace mstar_create_mspace_with_base(void* base, size_t capacity, int locked) {
5097   mstate m = 0;
5098   size_t msize;
5099   ensure_initialization();
5100   msize = pad_request(sizeof(struct malloc_state));
5101   if (capacity > msize + TOP_FOOT_SIZE &&
5102       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5103     m = mstar_init_user_mstate((char*)base, capacity);
5104     m->seg.sflags = EXTERN_BIT;
5105     set_lock(m, locked);
5106   }
5107   return (mspace)m;
5108 }
5109 
mstar_mspace_track_large_chunks(mspace msp,int enable)5110 int mstar_mspace_track_large_chunks(mspace msp, int enable) {
5111   int ret = 0;
5112   mstate ms = (mstate)msp;
5113   if (!PREACTION(ms)) {
5114     if (!use_mmap(ms))
5115       ret = 1;
5116     if (!enable)
5117       enable_mmap(ms);
5118     else
5119       disable_mmap(ms);
5120     POSTACTION(ms);
5121   }
5122   return ret;
5123 }
5124 
mstar_destroy_mspace(mspace msp)5125 size_t mstar_destroy_mspace(mspace msp) {
5126   size_t freed = 0;
5127   char* base;
5128   mstate ms = (mstate)msp;
5129   if (ok_magic(ms)) {
5130     msegmentptr sp = &ms->seg;
5131     while (sp != 0) {
5132       base = sp->base;
5133       size_t size = sp->size;
5134       flag_t flag = sp->sflags;
5135       sp = sp->next;
5136       if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
5137           CALL_MUNMAP(base, size) == 0)
5138         freed += size;
5139     }
5140   }
5141   else {
5142     USAGE_ERROR_ACTION(ms,ms);
5143   }
5144   return freed;
5145 }
5146 
5147 /*
5148   mspace versions of routines are near-clones of the global
5149   versions. This is not so nice but better than the alternatives.
5150 */
5151 
5152 
mstar_mspace_malloc(mspace msp,size_t bytes)5153 void* mstar_mspace_malloc(mspace msp, size_t bytes) {
5154   mstate ms = (mstate)msp;
5155   if (!ok_magic(ms)) {
5156     USAGE_ERROR_ACTION(ms,ms);
5157     return 0;
5158   }
5159   if (!PREACTION(ms)) {
5160     void* mem;
5161     size_t nb;
5162     if (bytes <= MAX_SMALL_REQUEST) {
5163       bindex_t idx;
5164       binmap_t smallbits;
5165       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
5166       idx = small_index(nb);
5167       smallbits = ms->smallmap >> idx;
5168 
5169       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
5170         mchunkptr b, p;
5171         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
5172         b = smallbin_at(ms, idx);
5173         p = b->fd;
5174         assert(chunksize(p) == small_index2size(idx));
5175         unlink_first_small_chunk(ms, b, p, idx);
5176         set_inuse_and_pinuse(ms, p, small_index2size(idx));
5177         mem = chunk2mem(p);
5178         check_malloced_chunk(ms, mem, nb);
5179         goto postaction;
5180       }
5181 
5182       else if (nb > ms->dvsize) {
5183         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
5184           mchunkptr b, p, r;
5185           size_t rsize;
5186           bindex_t i;
5187           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
5188           binmap_t leastbit = least_bit(leftbits);
5189           compute_bit2idx(leastbit, i);
5190           b = smallbin_at(ms, i);
5191           p = b->fd;
5192           assert(chunksize(p) == small_index2size(i));
5193           unlink_first_small_chunk(ms, b, p, i);
5194           rsize = small_index2size(i) - nb;
5195           /* Fit here cannot be remainderless if 4byte sizes */
5196           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
5197             set_inuse_and_pinuse(ms, p, small_index2size(i));
5198           else {
5199             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5200             r = chunk_plus_offset(p, nb);
5201             set_size_and_pinuse_of_free_chunk(r, rsize);
5202             replace_dv(ms, r, rsize);
5203           }
5204           mem = chunk2mem(p);
5205           check_malloced_chunk(ms, mem, nb);
5206           goto postaction;
5207         }
5208 
5209         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
5210           check_malloced_chunk(ms, mem, nb);
5211           goto postaction;
5212         }
5213       }
5214     }
5215     else if (bytes >= MAX_REQUEST)
5216       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
5217     else {
5218       nb = pad_request(bytes);
5219       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
5220         check_malloced_chunk(ms, mem, nb);
5221         goto postaction;
5222       }
5223     }
5224 
5225     if (nb <= ms->dvsize) {
5226       size_t rsize = ms->dvsize - nb;
5227       mchunkptr p = ms->dv;
5228       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
5229         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
5230         ms->dvsize = rsize;
5231         set_size_and_pinuse_of_free_chunk(r, rsize);
5232         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5233       }
5234       else { /* exhaust dv */
5235         size_t dvs = ms->dvsize;
5236         ms->dvsize = 0;
5237         ms->dv = 0;
5238         set_inuse_and_pinuse(ms, p, dvs);
5239       }
5240       mem = chunk2mem(p);
5241       check_malloced_chunk(ms, mem, nb);
5242       goto postaction;
5243     }
5244 
5245     else if (nb < ms->topsize) { /* Split top */
5246       size_t rsize = ms->topsize -= nb;
5247       mchunkptr p = ms->top;
5248       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
5249       r->head = rsize | PINUSE_BIT;
5250       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5251       mem = chunk2mem(p);
5252       check_top_chunk(ms, ms->top);
5253       check_malloced_chunk(ms, mem, nb);
5254       goto postaction;
5255     }
5256 
5257     mem = sys_alloc(ms, nb);
5258 
5259   postaction:
5260     POSTACTION(ms);
5261     return mem;
5262   }
5263 
5264   return 0;
5265 }
5266 
mstar_mspace_free(mspace msp,void * mem)5267 void mstar_mspace_free(mspace msp, void* mem) {
5268   if (mem != 0) {
5269     mchunkptr p  = mem2chunk(mem);
5270 #if FOOTERS
5271     mstate fm = get_mstate_for(p);
5272     msp = msp; /* placate people compiling -Wunused */
5273 #else /* FOOTERS */
5274     mstate fm = (mstate)msp;
5275 #endif /* FOOTERS */
5276     if (!ok_magic(fm)) {
5277       USAGE_ERROR_ACTION(fm, p);
5278       return;
5279     }
5280     if (!PREACTION(fm)) {
5281       check_inuse_chunk(fm, p);
5282       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
5283         size_t psize = chunksize(p);
5284         mchunkptr next = chunk_plus_offset(p, psize);
5285         if (!pinuse(p)) {
5286           size_t prevsize = p->prev_foot;
5287           if (is_mmapped(p)) {
5288             psize += prevsize + MMAP_FOOT_PAD;
5289             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
5290               fm->footprint -= psize;
5291             goto postaction;
5292           }
5293           else {
5294             mchunkptr prev = chunk_minus_offset(p, prevsize);
5295             psize += prevsize;
5296             p = prev;
5297             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
5298               if (p != fm->dv) {
5299                 unlink_chunk(fm, p, prevsize);
5300               }
5301               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
5302                 fm->dvsize = psize;
5303                 set_free_with_pinuse(p, psize, next);
5304                 goto postaction;
5305               }
5306             }
5307             else
5308               goto erroraction;
5309           }
5310         }
5311 
5312         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
5313           if (!cinuse(next)) {  /* consolidate forward */
5314             if (next == fm->top) {
5315               size_t tsize = fm->topsize += psize;
5316               fm->top = p;
5317               p->head = tsize | PINUSE_BIT;
5318               if (p == fm->dv) {
5319                 fm->dv = 0;
5320                 fm->dvsize = 0;
5321               }
5322               if (should_trim(fm, tsize))
5323                 sys_trim(fm, 0);
5324               goto postaction;
5325             }
5326             else if (next == fm->dv) {
5327               size_t dsize = fm->dvsize += psize;
5328               fm->dv = p;
5329               set_size_and_pinuse_of_free_chunk(p, dsize);
5330               goto postaction;
5331             }
5332             else {
5333               size_t nsize = chunksize(next);
5334               psize += nsize;
5335               unlink_chunk(fm, next, nsize);
5336               set_size_and_pinuse_of_free_chunk(p, psize);
5337               if (p == fm->dv) {
5338                 fm->dvsize = psize;
5339                 goto postaction;
5340               }
5341             }
5342           }
5343           else
5344             set_free_with_pinuse(p, psize, next);
5345 
5346           if (is_small(psize)) {
5347             insert_small_chunk(fm, p, psize);
5348             check_free_chunk(fm, p);
5349           }
5350           else {
5351             tchunkptr tp = (tchunkptr)p;
5352             insert_large_chunk(fm, tp, psize);
5353             check_free_chunk(fm, p);
5354             if (--fm->release_checks == 0)
5355               release_unused_segments(fm);
5356           }
5357           goto postaction;
5358         }
5359       }
5360     erroraction:
5361       USAGE_ERROR_ACTION(fm, p);
5362     postaction:
5363       POSTACTION(fm);
5364     }
5365   }
5366 }
5367 
mstar_mspace_calloc(mspace msp,size_t n_elements,size_t elem_size)5368 void* mstar_mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
5369   void* mem;
5370   size_t req = 0;
5371   mstate ms = (mstate)msp;
5372   if (!ok_magic(ms)) {
5373     USAGE_ERROR_ACTION(ms,ms);
5374     return 0;
5375   }
5376   if (n_elements != 0) {
5377     req = n_elements * elem_size;
5378     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
5379         (req / n_elements != elem_size))
5380       req = MAX_SIZE_T; /* force downstream failure on overflow */
5381   }
5382   mem = internal_malloc(ms, req);
5383   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
5384     memset(mem, 0, req);
5385   return mem;
5386 }
5387 
mstar_mspace_realloc(mspace msp,void * oldmem,size_t bytes)5388 void* mstar_mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
5389   if (oldmem == 0)
5390     return mstar_mspace_malloc(msp, bytes);
5391 #ifdef REALLOC_ZERO_BYTES_FREES
5392   if (bytes == 0) {
5393     mstar_mspace_free(msp, oldmem);
5394     return 0;
5395   }
5396 #endif /* REALLOC_ZERO_BYTES_FREES */
5397   else {
5398 #if FOOTERS
5399     mchunkptr p  = mem2chunk(oldmem);
5400     mstate ms = get_mstate_for(p);
5401 #else /* FOOTERS */
5402     mstate ms = (mstate)msp;
5403 #endif /* FOOTERS */
5404     if (!ok_magic(ms)) {
5405       USAGE_ERROR_ACTION(ms,ms);
5406       return 0;
5407     }
5408     return internal_realloc(ms, oldmem, bytes);
5409   }
5410 }
5411 
mstar_mspace_memalign(mspace msp,size_t alignment,size_t bytes)5412 void* mstar_mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
5413   mstate ms = (mstate)msp;
5414   if (!ok_magic(ms)) {
5415     USAGE_ERROR_ACTION(ms,ms);
5416     return 0;
5417   }
5418   return internal_memalign(ms, alignment, bytes);
5419 }
5420 
mstar_mspace_independent_calloc(mspace msp,size_t n_elements,size_t elem_size,void * chunks[])5421 void** mstar_mspace_independent_calloc(mspace msp, size_t n_elements,
5422                                  size_t elem_size, void* chunks[]) {
5423   mstate ms = (mstate)msp;
5424   size_t sizes[1];
5425 
5426   sizes[0] = elem_size;
5427 
5428   if (!ok_magic(ms)) {
5429     USAGE_ERROR_ACTION(ms,ms);
5430     return 0;
5431   }
5432   return ialloc(ms, n_elements, sizes, 3, chunks);
5433 }
5434 
mstar_mspace_independent_comalloc(mspace msp,size_t n_elements,size_t sizes[],void * chunks[])5435 void** mstar_mspace_independent_comalloc(mspace msp, size_t n_elements,
5436                                    size_t sizes[], void* chunks[]) {
5437   mstate ms = (mstate)msp;
5438   if (!ok_magic(ms)) {
5439     USAGE_ERROR_ACTION(ms,ms);
5440     return 0;
5441   }
5442   return ialloc(ms, n_elements, sizes, 0, chunks);
5443 }
5444 
mstar_mspace_trim(mspace msp,size_t pad)5445 int mstar_mspace_trim(mspace msp, size_t pad) {
5446   int result = 0;
5447   mstate ms = (mstate)msp;
5448   if (ok_magic(ms)) {
5449     if (!PREACTION(ms)) {
5450       result = sys_trim(ms, pad);
5451       POSTACTION(ms);
5452     }
5453   }
5454   else {
5455     USAGE_ERROR_ACTION(ms,ms);
5456   }
5457   return result;
5458 }
5459 
mstar_mspace_malloc_stats(mspace msp)5460 void mstar_mspace_malloc_stats(mspace msp) {
5461   mstate ms = (mstate)msp;
5462   if (ok_magic(ms)) {
5463     internal_malloc_stats(ms);
5464   }
5465   else {
5466     USAGE_ERROR_ACTION(ms,ms);
5467   }
5468 }
5469 
mstar_mspace_footprint(mspace msp)5470 size_t mstar_mspace_footprint(mspace msp) {
5471   size_t result = 0;
5472   mstate ms = (mstate)msp;
5473   if (ok_magic(ms)) {
5474     result = ms->footprint;
5475   }
5476   else {
5477     USAGE_ERROR_ACTION(ms,ms);
5478   }
5479   return result;
5480 }
5481 
5482 
mstar_mspace_max_footprint(mspace msp)5483 size_t mstar_mspace_max_footprint(mspace msp) {
5484   size_t result = 0;
5485   mstate ms = (mstate)msp;
5486   if (ok_magic(ms)) {
5487     result = ms->max_footprint;
5488   }
5489   else {
5490     USAGE_ERROR_ACTION(ms,ms);
5491   }
5492   return result;
5493 }
5494 
5495 
5496 #if !NO_MALLINFO
mstar_mspace_mallinfo(mspace msp)5497 struct mallinfo mstar_mspace_mallinfo(mspace msp) {
5498   mstate ms = (mstate)msp;
5499   if (!ok_magic(ms)) {
5500     USAGE_ERROR_ACTION(ms,ms);
5501   }
5502   return internal_mallinfo(ms);
5503 }
5504 #endif /* NO_MALLINFO */
5505 
mstar_mspace_usable_size(void * mem)5506 size_t mstar_mspace_usable_size(void* mem) {
5507   if (mem != 0) {
5508     mchunkptr p = mem2chunk(mem);
5509     if (is_inuse(p))
5510       return chunksize(p) - overhead_for(p);
5511   }
5512   return 0;
5513 }
5514 
mstar_mspace_mallopt(int param_number,int value)5515 int mstar_mspace_mallopt(int param_number, int value) {
5516   return change_mparam(param_number, value);
5517 }
5518 
5519 #endif /* MSPACES */
5520 
5521 
5522 /* -------------------- Alternative MORECORE functions ------------------- */
5523 
5524 /*
5525   Guidelines for creating a custom version of MORECORE:
5526 
5527   * For best performance, MORECORE should allocate in multiples of pagesize.
5528   * MORECORE may allocate more memory than requested. (Or even less,
5529       but this will usually result in a malloc failure.)
5530   * MORECORE must not allocate memory when given argument zero, but
5531       instead return one past the end address of memory from previous
5532       nonzero call.
5533   * For best performance, consecutive calls to MORECORE with positive
5534       arguments should return increasing addresses, indicating that
5535       space has been contiguously extended.
5536   * Even though consecutive calls to MORECORE need not return contiguous
5537       addresses, it must be OK for malloc'ed chunks to span multiple
5538       regions in those cases where they do happen to be contiguous.
5539   * MORECORE need not handle negative arguments -- it may instead
5540       just return MFAIL when given negative arguments.
5541       Negative arguments are always multiples of pagesize. MORECORE
5542       must not misinterpret negative args as large positive unsigned
5543       args. You can suppress all such calls from even occurring by defining
5544       MORECORE_CANNOT_TRIM,
5545 
5546   As an example alternative MORECORE, here is a custom allocator
5547   kindly contributed for pre-OSX macOS.  It uses virtually but not
5548   necessarily physically contiguous non-paged memory (locked in,
5549   present and won't get swapped out).  You can use it by uncommenting
5550   this section, adding some #includes, and setting up the appropriate
5551   defines above:
5552 
5553       #define MORECORE osMoreCore
5554 
5555   There is also a shutdown routine that should somehow be called for
5556   cleanup upon program exit.
5557 
5558   #define MAX_POOL_ENTRIES 100
5559   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
5560   static int next_os_pool;
5561   void *our_os_pools[MAX_POOL_ENTRIES];
5562 
5563   void *osMoreCore(int size)
5564   {
5565     void *ptr = 0;
5566     static void *sbrk_top = 0;
5567 
5568     if (size > 0)
5569     {
5570       if (size < MINIMUM_MORECORE_SIZE)
5571          size = MINIMUM_MORECORE_SIZE;
5572       if (CurrentExecutionLevel() == kTaskLevel)
5573          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5574       if (ptr == 0)
5575       {
5576         return (void *) MFAIL;
5577       }
5578       // save ptrs so they can be freed during cleanup
5579       our_os_pools[next_os_pool] = ptr;
5580       next_os_pool++;
5581       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5582       sbrk_top = (char *) ptr + size;
5583       return ptr;
5584     }
5585     else if (size < 0)
5586     {
5587       // we don't currently support shrink behavior
5588       return (void *) MFAIL;
5589     }
5590     else
5591     {
5592       return sbrk_top;
5593     }
5594   }
5595 
5596   // cleanup any allocated memory pools
5597   // called as last thing before shutting down driver
5598 
5599   void osCleanupMem(void)
5600   {
5601     void **ptr;
5602 
5603     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5604       if (*ptr)
5605       {
5606          PoolDeallocate(*ptr);
5607          *ptr = 0;
5608       }
5609   }
5610 
5611 */
5612 
5613 
5614 /* -----------------------------------------------------------------------
5615 History:
5616     V2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
5617       * Use zeros instead of prev foot for is_mmapped
5618       * Add mspace_track_large_chunks; thanks to Jean Brouwers
5619       * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
5620       * Fix insufficient sys_alloc padding when using 16byte alignment
5621       * Fix bad error check in mstar_mspace_footprint
5622       * Adaptations for ptmalloc; thanks to Wolfram Gloger.
5623       * Reentrant spin locks; thanks to Earl Chew and others
5624       * Win32 improvements; thanks to Niall Douglas and Earl Chew
5625       * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
5626       * Extension hook in malloc_state
5627       * Various small adjustments to reduce warnings on some compilers
5628       * Various configuration extensions/changes for more platforms. Thanks
5629          to all who contributed these.
5630 
5631     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
5632       * Add max_footprint functions
5633       * Ensure all appropriate literals are size_t
5634       * Fix conditional compilation problem for some #define settings
5635       * Avoid concatenating segments with the one provided
5636         in create_mspace_with_base
5637       * Rename some variables to avoid compiler shadowing warnings
5638       * Use explicit lock initialization.
5639       * Better handling of sbrk interference.
5640       * Simplify and fix segment insertion, trimming and mspace_destroy
5641       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5642       * Thanks especially to Dennis Flanagan for help on these.
5643 
5644     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
5645       * Fix memalign brace error.
5646 
5647     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
5648       * Fix improper #endif nesting in C++
5649       * Add explicit casts needed for C++
5650 
5651     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
5652       * Use trees for large bins
5653       * Support mspaces
5654       * Use segments to unify sbrk-based and mmap-based system allocation,
5655         removing need for emulation on most platforms without sbrk.
5656       * Default safety checks
5657       * Optional footer checks. Thanks to William Robertson for the idea.
5658       * Internal code refactoring
5659       * Incorporate suggestions and platform-specific changes.
5660         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5661         Aaron Bachmann,  Emery Berger, and others.
5662       * Speed up non-fastbin processing enough to remove fastbins.
5663       * Remove useless cfree() to avoid conflicts with other apps.
5664       * Remove internal memcpy, memset. Compilers handle builtins better.
5665       * Remove some options that no one ever used and rename others.
5666 
5667     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
5668       * Fix malloc_state bitmap array misdeclaration
5669 
5670     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
5671       * Allow tuning of FIRST_SORTED_BIN_SIZE
5672       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5673       * Better detection and support for non-contiguousness of MORECORE.
5674         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5675       * Bypass most of malloc if no frees. Thanks To Emery Berger.
5676       * Fix freeing of old top non-contiguous chunk im sysmalloc.
5677       * Raised default trim and map thresholds to 256K.
5678       * Fix mmap-related #defines. Thanks to Lubos Lunak.
5679       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5680       * Branch-free bin calculation
5681       * Default trim and mmap thresholds now 256K.
5682 
5683     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
5684       * Introduce independent_comalloc and independent_calloc.
5685         Thanks to Michael Pachos for motivation and help.
5686       * Make optional .h file available
5687       * Allow > 2GB requests on 32bit systems.
5688       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5689         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5690         and Anonymous.
5691       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5692         helping test this.)
5693       * memalign: check alignment arg
5694       * realloc: don't try to shift chunks backwards, since this
5695         leads to  more fragmentation in some programs and doesn't
5696         seem to help in any others.
5697       * Collect all cases in malloc requiring system memory into sysmalloc
5698       * Use mmap as backup to sbrk
5699       * Place all internal state in malloc_state
5700       * Introduce fastbins (although similar to 2.5.1)
5701       * Many minor tunings and cosmetic improvements
5702       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5703       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5704         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5705       * Include errno.h to support default failure action.
5706 
5707     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
5708       * return null for negative arguments
5709       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5710          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5711           (e.g. WIN32 platforms)
5712          * Cleanup header file inclusion for WIN32 platforms
5713          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5714          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5715            memory allocation routines
5716          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5717          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5718            usage of 'assert' in non-WIN32 code
5719          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5720            avoid infinite loop
5721       * Always call 'fREe()' rather than 'free()'
5722 
5723     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
5724       * Fixed ordering problem with boundary-stamping
5725 
5726     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
5727       * Added pvalloc, as recommended by H.J. Liu
5728       * Added 64bit pointer support mainly from Wolfram Gloger
5729       * Added anonymously donated WIN32 sbrk emulation
5730       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5731       * malloc_extend_top: fix mask error that caused wastage after
5732         foreign sbrks
5733       * Add linux mremap support code from HJ Liu
5734 
5735     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
5736       * Integrated most documentation with the code.
5737       * Add support for mmap, with help from
5738         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5739       * Use last_remainder in more cases.
5740       * Pack bins using idea from  colin@nyx10.cs.du.edu
5741       * Use ordered bins instead of best-fit threshhold
5742       * Eliminate block-local decls to simplify tracing and debugging.
5743       * Support another case of realloc via move into top
5744       * Fix error occuring when initial sbrk_base not word-aligned.
5745       * Rely on page size for units instead of SBRK_UNIT to
5746         avoid surprises about sbrk alignment conventions.
5747       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5748         (raymond@es.ele.tue.nl) for the suggestion.
5749       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5750       * More precautions for cases where other routines call sbrk,
5751         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5752       * Added macros etc., allowing use in linux libc from
5753         H.J. Lu (hjl@gnu.ai.mit.edu)
5754       * Inverted this history list
5755 
5756     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
5757       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5758       * Removed all preallocation code since under current scheme
5759         the work required to undo bad preallocations exceeds
5760         the work saved in good cases for most test programs.
5761       * No longer use return list or unconsolidated bins since
5762         no scheme using them consistently outperforms those that don't
5763         given above changes.
5764       * Use best fit for very large chunks to prevent some worst-cases.
5765       * Added some support for debugging
5766 
5767     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
5768       * Removed footers when chunks are in use. Thanks to
5769         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5770 
5771     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
5772       * Added malloc_trim, with help from Wolfram Gloger
5773         (wmglo@Dent.MED.Uni-Muenchen.DE).
5774 
5775     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
5776 
5777     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
5778       * realloc: try to expand in both directions
5779       * malloc: swap order of clean-bin strategy;
5780       * realloc: only conditionally expand backwards
5781       * Try not to scavenge used bins
5782       * Use bin counts as a guide to preallocation
5783       * Occasionally bin return list chunks in first scan
5784       * Add a few optimizations from colin@nyx10.cs.du.edu
5785 
5786     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
5787       * faster bin computation & slightly different binning
5788       * merged all consolidations to one part of malloc proper
5789          (eliminating old malloc_find_space & malloc_clean_bin)
5790       * Scan 2 returns chunks (not just 1)
5791       * Propagate failure in realloc if malloc returns 0
5792       * Add stuff to allow compilation on non-ANSI compilers
5793           from kpv@research.att.com
5794 
5795     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
5796       * removed potential for odd address access in prev_chunk
5797       * removed dependency on getpagesize.h
5798       * misc cosmetics and a bit more internal documentation
5799       * anticosmetics: mangled names in macros to evade debugger strangeness
5800       * tested on sparc, hp-700, dec-mips, rs6000
5801           with gcc & native cc (hp, dec only) allowing
5802           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5803 
5804     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
5805       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5806          structure of old version,  but most details differ.)
5807 
5808 */
5809 #endif
5810