xref: /rk3399_rockchip-uboot/include/malloc.h (revision 5b1d713721c3ea02549940133f09236783dda1f9)
1*5b1d7137Swdenk /*
2*5b1d7137Swdenk   A version of malloc/free/realloc written by Doug Lea and released to the
3*5b1d7137Swdenk   public domain.  Send questions/comments/complaints/performance data
4*5b1d7137Swdenk   to dl@cs.oswego.edu
5*5b1d7137Swdenk 
6*5b1d7137Swdenk * VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
7*5b1d7137Swdenk 
8*5b1d7137Swdenk    Note: There may be an updated version of this malloc obtainable at
9*5b1d7137Swdenk            ftp://g.oswego.edu/pub/misc/malloc.c
10*5b1d7137Swdenk          Check before installing!
11*5b1d7137Swdenk 
12*5b1d7137Swdenk * Why use this malloc?
13*5b1d7137Swdenk 
14*5b1d7137Swdenk   This is not the fastest, most space-conserving, most portable, or
15*5b1d7137Swdenk   most tunable malloc ever written. However it is among the fastest
16*5b1d7137Swdenk   while also being among the most space-conserving, portable and tunable.
17*5b1d7137Swdenk   Consistent balance across these factors results in a good general-purpose
18*5b1d7137Swdenk   allocator. For a high-level description, see
19*5b1d7137Swdenk      http://g.oswego.edu/dl/html/malloc.html
20*5b1d7137Swdenk 
21*5b1d7137Swdenk * Synopsis of public routines
22*5b1d7137Swdenk 
23*5b1d7137Swdenk   (Much fuller descriptions are contained in the program documentation below.)
24*5b1d7137Swdenk 
25*5b1d7137Swdenk   malloc(size_t n);
26*5b1d7137Swdenk      Return a pointer to a newly allocated chunk of at least n bytes, or null
27*5b1d7137Swdenk      if no space is available.
28*5b1d7137Swdenk   free(Void_t* p);
29*5b1d7137Swdenk      Release the chunk of memory pointed to by p, or no effect if p is null.
30*5b1d7137Swdenk   realloc(Void_t* p, size_t n);
31*5b1d7137Swdenk      Return a pointer to a chunk of size n that contains the same data
32*5b1d7137Swdenk      as does chunk p up to the minimum of (n, p's size) bytes, or null
33*5b1d7137Swdenk      if no space is available. The returned pointer may or may not be
34*5b1d7137Swdenk      the same as p. If p is null, equivalent to malloc.  Unless the
35*5b1d7137Swdenk      #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
36*5b1d7137Swdenk      size argument of zero (re)allocates a minimum-sized chunk.
37*5b1d7137Swdenk   memalign(size_t alignment, size_t n);
38*5b1d7137Swdenk      Return a pointer to a newly allocated chunk of n bytes, aligned
39*5b1d7137Swdenk      in accord with the alignment argument, which must be a power of
40*5b1d7137Swdenk      two.
41*5b1d7137Swdenk   valloc(size_t n);
42*5b1d7137Swdenk      Equivalent to memalign(pagesize, n), where pagesize is the page
43*5b1d7137Swdenk      size of the system (or as near to this as can be figured out from
44*5b1d7137Swdenk      all the includes/defines below.)
45*5b1d7137Swdenk   pvalloc(size_t n);
46*5b1d7137Swdenk      Equivalent to valloc(minimum-page-that-holds(n)), that is,
47*5b1d7137Swdenk      round up n to nearest pagesize.
48*5b1d7137Swdenk   calloc(size_t unit, size_t quantity);
49*5b1d7137Swdenk      Returns a pointer to quantity * unit bytes, with all locations
50*5b1d7137Swdenk      set to zero.
51*5b1d7137Swdenk   cfree(Void_t* p);
52*5b1d7137Swdenk      Equivalent to free(p).
53*5b1d7137Swdenk   malloc_trim(size_t pad);
54*5b1d7137Swdenk      Release all but pad bytes of freed top-most memory back
55*5b1d7137Swdenk      to the system. Return 1 if successful, else 0.
56*5b1d7137Swdenk   malloc_usable_size(Void_t* p);
57*5b1d7137Swdenk      Report the number usable allocated bytes associated with allocated
58*5b1d7137Swdenk      chunk p. This may or may not report more bytes than were requested,
59*5b1d7137Swdenk      due to alignment and minimum size constraints.
60*5b1d7137Swdenk   malloc_stats();
61*5b1d7137Swdenk      Prints brief summary statistics on stderr.
62*5b1d7137Swdenk   mallinfo()
63*5b1d7137Swdenk      Returns (by copy) a struct containing various summary statistics.
64*5b1d7137Swdenk   mallopt(int parameter_number, int parameter_value)
65*5b1d7137Swdenk      Changes one of the tunable parameters described below. Returns
66*5b1d7137Swdenk      1 if successful in changing the parameter, else 0.
67*5b1d7137Swdenk 
68*5b1d7137Swdenk * Vital statistics:
69*5b1d7137Swdenk 
70*5b1d7137Swdenk   Alignment:                            8-byte
71*5b1d7137Swdenk        8 byte alignment is currently hardwired into the design.  This
72*5b1d7137Swdenk        seems to suffice for all current machines and C compilers.
73*5b1d7137Swdenk 
74*5b1d7137Swdenk   Assumed pointer representation:       4 or 8 bytes
75*5b1d7137Swdenk        Code for 8-byte pointers is untested by me but has worked
76*5b1d7137Swdenk        reliably by Wolfram Gloger, who contributed most of the
77*5b1d7137Swdenk        changes supporting this.
78*5b1d7137Swdenk 
79*5b1d7137Swdenk   Assumed size_t  representation:       4 or 8 bytes
80*5b1d7137Swdenk        Note that size_t is allowed to be 4 bytes even if pointers are 8.
81*5b1d7137Swdenk 
82*5b1d7137Swdenk   Minimum overhead per allocated chunk: 4 or 8 bytes
83*5b1d7137Swdenk        Each malloced chunk has a hidden overhead of 4 bytes holding size
84*5b1d7137Swdenk        and status information.
85*5b1d7137Swdenk 
86*5b1d7137Swdenk   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
87*5b1d7137Swdenk                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
88*5b1d7137Swdenk 
89*5b1d7137Swdenk        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
90*5b1d7137Swdenk        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
91*5b1d7137Swdenk        needed; 4 (8) for a trailing size field
92*5b1d7137Swdenk        and 8 (16) bytes for free list pointers. Thus, the minimum
93*5b1d7137Swdenk        allocatable size is 16/24/32 bytes.
94*5b1d7137Swdenk 
95*5b1d7137Swdenk        Even a request for zero bytes (i.e., malloc(0)) returns a
96*5b1d7137Swdenk        pointer to something of the minimum allocatable size.
97*5b1d7137Swdenk 
98*5b1d7137Swdenk   Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
99*5b1d7137Swdenk                           8-byte size_t: 2^63 - 16 bytes
100*5b1d7137Swdenk 
101*5b1d7137Swdenk        It is assumed that (possibly signed) size_t bit values suffice to
102*5b1d7137Swdenk        represent chunk sizes. `Possibly signed' is due to the fact
103*5b1d7137Swdenk        that `size_t' may be defined on a system as either a signed or
104*5b1d7137Swdenk        an unsigned type. To be conservative, values that would appear
105*5b1d7137Swdenk        as negative numbers are avoided.
106*5b1d7137Swdenk        Requests for sizes with a negative sign bit when the request
107*5b1d7137Swdenk        size is treaded as a long will return null.
108*5b1d7137Swdenk 
109*5b1d7137Swdenk   Maximum overhead wastage per allocated chunk: normally 15 bytes
110*5b1d7137Swdenk 
111*5b1d7137Swdenk        Alignnment demands, plus the minimum allocatable size restriction
112*5b1d7137Swdenk        make the normal worst-case wastage 15 bytes (i.e., up to 15
113*5b1d7137Swdenk        more bytes will be allocated than were requested in malloc), with
114*5b1d7137Swdenk        two exceptions:
115*5b1d7137Swdenk          1. Because requests for zero bytes allocate non-zero space,
116*5b1d7137Swdenk             the worst case wastage for a request of zero bytes is 24 bytes.
117*5b1d7137Swdenk          2. For requests >= mmap_threshold that are serviced via
118*5b1d7137Swdenk             mmap(), the worst case wastage is 8 bytes plus the remainder
119*5b1d7137Swdenk             from a system page (the minimal mmap unit); typically 4096 bytes.
120*5b1d7137Swdenk 
121*5b1d7137Swdenk * Limitations
122*5b1d7137Swdenk 
123*5b1d7137Swdenk     Here are some features that are NOT currently supported
124*5b1d7137Swdenk 
125*5b1d7137Swdenk     * No user-definable hooks for callbacks and the like.
126*5b1d7137Swdenk     * No automated mechanism for fully checking that all accesses
127*5b1d7137Swdenk       to malloced memory stay within their bounds.
128*5b1d7137Swdenk     * No support for compaction.
129*5b1d7137Swdenk 
130*5b1d7137Swdenk * Synopsis of compile-time options:
131*5b1d7137Swdenk 
132*5b1d7137Swdenk     People have reported using previous versions of this malloc on all
133*5b1d7137Swdenk     versions of Unix, sometimes by tweaking some of the defines
134*5b1d7137Swdenk     below. It has been tested most extensively on Solaris and
135*5b1d7137Swdenk     Linux. It is also reported to work on WIN32 platforms.
136*5b1d7137Swdenk     People have also reported adapting this malloc for use in
137*5b1d7137Swdenk     stand-alone embedded systems.
138*5b1d7137Swdenk 
139*5b1d7137Swdenk     The implementation is in straight, hand-tuned ANSI C.  Among other
140*5b1d7137Swdenk     consequences, it uses a lot of macros.  Because of this, to be at
141*5b1d7137Swdenk     all usable, this code should be compiled using an optimizing compiler
142*5b1d7137Swdenk     (for example gcc -O2) that can simplify expressions and control
143*5b1d7137Swdenk     paths.
144*5b1d7137Swdenk 
145*5b1d7137Swdenk   __STD_C                  (default: derived from C compiler defines)
146*5b1d7137Swdenk      Nonzero if using ANSI-standard C compiler, a C++ compiler, or
147*5b1d7137Swdenk      a C compiler sufficiently close to ANSI to get away with it.
148*5b1d7137Swdenk   DEBUG                    (default: NOT defined)
149*5b1d7137Swdenk      Define to enable debugging. Adds fairly extensive assertion-based
150*5b1d7137Swdenk      checking to help track down memory errors, but noticeably slows down
151*5b1d7137Swdenk      execution.
152*5b1d7137Swdenk   REALLOC_ZERO_BYTES_FREES (default: NOT defined)
153*5b1d7137Swdenk      Define this if you think that realloc(p, 0) should be equivalent
154*5b1d7137Swdenk      to free(p). Otherwise, since malloc returns a unique pointer for
155*5b1d7137Swdenk      malloc(0), so does realloc(p, 0).
156*5b1d7137Swdenk   HAVE_MEMCPY               (default: defined)
157*5b1d7137Swdenk      Define if you are not otherwise using ANSI STD C, but still
158*5b1d7137Swdenk      have memcpy and memset in your C library and want to use them.
159*5b1d7137Swdenk      Otherwise, simple internal versions are supplied.
160*5b1d7137Swdenk   USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
161*5b1d7137Swdenk      Define as 1 if you want the C library versions of memset and
162*5b1d7137Swdenk      memcpy called in realloc and calloc (otherwise macro versions are used).
163*5b1d7137Swdenk      At least on some platforms, the simple macro versions usually
164*5b1d7137Swdenk      outperform libc versions.
165*5b1d7137Swdenk   HAVE_MMAP                 (default: defined as 1)
166*5b1d7137Swdenk      Define to non-zero to optionally make malloc() use mmap() to
167*5b1d7137Swdenk      allocate very large blocks.
168*5b1d7137Swdenk   HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
169*5b1d7137Swdenk      Define to non-zero to optionally make realloc() use mremap() to
170*5b1d7137Swdenk      reallocate very large blocks.
171*5b1d7137Swdenk   malloc_getpagesize        (default: derived from system #includes)
172*5b1d7137Swdenk      Either a constant or routine call returning the system page size.
173*5b1d7137Swdenk   HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
174*5b1d7137Swdenk      Optionally define if you are on a system with a /usr/include/malloc.h
175*5b1d7137Swdenk      that declares struct mallinfo. It is not at all necessary to
176*5b1d7137Swdenk      define this even if you do, but will ensure consistency.
177*5b1d7137Swdenk   INTERNAL_SIZE_T           (default: size_t)
178*5b1d7137Swdenk      Define to a 32-bit type (probably `unsigned int') if you are on a
179*5b1d7137Swdenk      64-bit machine, yet do not want or need to allow malloc requests of
180*5b1d7137Swdenk      greater than 2^31 to be handled. This saves space, especially for
181*5b1d7137Swdenk      very small chunks.
182*5b1d7137Swdenk   INTERNAL_LINUX_C_LIB      (default: NOT defined)
183*5b1d7137Swdenk      Defined only when compiled as part of Linux libc.
184*5b1d7137Swdenk      Also note that there is some odd internal name-mangling via defines
185*5b1d7137Swdenk      (for example, internally, `malloc' is named `mALLOc') needed
186*5b1d7137Swdenk      when compiling in this case. These look funny but don't otherwise
187*5b1d7137Swdenk      affect anything.
188*5b1d7137Swdenk   WIN32                     (default: undefined)
189*5b1d7137Swdenk      Define this on MS win (95, nt) platforms to compile in sbrk emulation.
190*5b1d7137Swdenk   LACKS_UNISTD_H            (default: undefined if not WIN32)
191*5b1d7137Swdenk      Define this if your system does not have a <unistd.h>.
192*5b1d7137Swdenk   LACKS_SYS_PARAM_H         (default: undefined if not WIN32)
193*5b1d7137Swdenk      Define this if your system does not have a <sys/param.h>.
194*5b1d7137Swdenk   MORECORE                  (default: sbrk)
195*5b1d7137Swdenk      The name of the routine to call to obtain more memory from the system.
196*5b1d7137Swdenk   MORECORE_FAILURE          (default: -1)
197*5b1d7137Swdenk      The value returned upon failure of MORECORE.
198*5b1d7137Swdenk   MORECORE_CLEARS           (default 1)
199*5b1d7137Swdenk      True (1) if the routine mapped to MORECORE zeroes out memory (which
200*5b1d7137Swdenk      holds for sbrk).
201*5b1d7137Swdenk   DEFAULT_TRIM_THRESHOLD
202*5b1d7137Swdenk   DEFAULT_TOP_PAD
203*5b1d7137Swdenk   DEFAULT_MMAP_THRESHOLD
204*5b1d7137Swdenk   DEFAULT_MMAP_MAX
205*5b1d7137Swdenk      Default values of tunable parameters (described in detail below)
206*5b1d7137Swdenk      controlling interaction with host system routines (sbrk, mmap, etc).
207*5b1d7137Swdenk      These values may also be changed dynamically via mallopt(). The
208*5b1d7137Swdenk      preset defaults are those that give best performance for typical
209*5b1d7137Swdenk      programs/systems.
210*5b1d7137Swdenk   USE_DL_PREFIX             (default: undefined)
211*5b1d7137Swdenk      Prefix all public routines with the string 'dl'.  Useful to
212*5b1d7137Swdenk      quickly avoid procedure declaration conflicts and linker symbol
213*5b1d7137Swdenk      conflicts with existing memory allocation routines.
214*5b1d7137Swdenk 
215*5b1d7137Swdenk 
216*5b1d7137Swdenk */
217*5b1d7137Swdenk 
218*5b1d7137Swdenk 
219*5b1d7137Swdenk 
220*5b1d7137Swdenk 
221*5b1d7137Swdenk /* Preliminaries */
222*5b1d7137Swdenk 
223*5b1d7137Swdenk #ifndef __STD_C
224*5b1d7137Swdenk #ifdef __STDC__
225*5b1d7137Swdenk #define __STD_C     1
226*5b1d7137Swdenk #else
227*5b1d7137Swdenk #if __cplusplus
228*5b1d7137Swdenk #define __STD_C     1
229*5b1d7137Swdenk #else
230*5b1d7137Swdenk #define __STD_C     0
231*5b1d7137Swdenk #endif /*__cplusplus*/
232*5b1d7137Swdenk #endif /*__STDC__*/
233*5b1d7137Swdenk #endif /*__STD_C*/
234*5b1d7137Swdenk 
235*5b1d7137Swdenk #ifndef Void_t
236*5b1d7137Swdenk #if (__STD_C || defined(WIN32))
237*5b1d7137Swdenk #define Void_t      void
238*5b1d7137Swdenk #else
239*5b1d7137Swdenk #define Void_t      char
240*5b1d7137Swdenk #endif
241*5b1d7137Swdenk #endif /*Void_t*/
242*5b1d7137Swdenk 
243*5b1d7137Swdenk #if __STD_C
244*5b1d7137Swdenk #include <linux/stddef.h>	/* for size_t */
245*5b1d7137Swdenk #else
246*5b1d7137Swdenk #include <sys/types.h>
247*5b1d7137Swdenk #endif	/* __STD_C */
248*5b1d7137Swdenk 
249*5b1d7137Swdenk #ifdef __cplusplus
250*5b1d7137Swdenk extern "C" {
251*5b1d7137Swdenk #endif
252*5b1d7137Swdenk 
253*5b1d7137Swdenk #if 0	/* not for U-Boot */
254*5b1d7137Swdenk #include <stdio.h>	/* needed for malloc_stats */
255*5b1d7137Swdenk #endif
256*5b1d7137Swdenk 
257*5b1d7137Swdenk 
258*5b1d7137Swdenk /*
259*5b1d7137Swdenk   Compile-time options
260*5b1d7137Swdenk */
261*5b1d7137Swdenk 
262*5b1d7137Swdenk 
263*5b1d7137Swdenk /*
264*5b1d7137Swdenk     Debugging:
265*5b1d7137Swdenk 
266*5b1d7137Swdenk     Because freed chunks may be overwritten with link fields, this
267*5b1d7137Swdenk     malloc will often die when freed memory is overwritten by user
268*5b1d7137Swdenk     programs.  This can be very effective (albeit in an annoying way)
269*5b1d7137Swdenk     in helping track down dangling pointers.
270*5b1d7137Swdenk 
271*5b1d7137Swdenk     If you compile with -DDEBUG, a number of assertion checks are
272*5b1d7137Swdenk     enabled that will catch more memory errors. You probably won't be
273*5b1d7137Swdenk     able to make much sense of the actual assertion errors, but they
274*5b1d7137Swdenk     should help you locate incorrectly overwritten memory.  The
275*5b1d7137Swdenk     checking is fairly extensive, and will slow down execution
276*5b1d7137Swdenk     noticeably. Calling malloc_stats or mallinfo with DEBUG set will
277*5b1d7137Swdenk     attempt to check every non-mmapped allocated and free chunk in the
278*5b1d7137Swdenk     course of computing the summmaries. (By nature, mmapped regions
279*5b1d7137Swdenk     cannot be checked very much automatically.)
280*5b1d7137Swdenk 
281*5b1d7137Swdenk     Setting DEBUG may also be helpful if you are trying to modify
282*5b1d7137Swdenk     this code. The assertions in the check routines spell out in more
283*5b1d7137Swdenk     detail the assumptions and invariants underlying the algorithms.
284*5b1d7137Swdenk 
285*5b1d7137Swdenk */
286*5b1d7137Swdenk 
287*5b1d7137Swdenk #ifdef DEBUG
288*5b1d7137Swdenk /* #include <assert.h> */
289*5b1d7137Swdenk #define assert(x) ((void)0)
290*5b1d7137Swdenk #else
291*5b1d7137Swdenk #define assert(x) ((void)0)
292*5b1d7137Swdenk #endif
293*5b1d7137Swdenk 
294*5b1d7137Swdenk 
295*5b1d7137Swdenk /*
296*5b1d7137Swdenk   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
297*5b1d7137Swdenk   of chunk sizes. On a 64-bit machine, you can reduce malloc
298*5b1d7137Swdenk   overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
299*5b1d7137Swdenk   at the expense of not being able to handle requests greater than
300*5b1d7137Swdenk   2^31. This limitation is hardly ever a concern; you are encouraged
301*5b1d7137Swdenk   to set this. However, the default version is the same as size_t.
302*5b1d7137Swdenk */
303*5b1d7137Swdenk 
304*5b1d7137Swdenk #ifndef INTERNAL_SIZE_T
305*5b1d7137Swdenk #define INTERNAL_SIZE_T size_t
306*5b1d7137Swdenk #endif
307*5b1d7137Swdenk 
308*5b1d7137Swdenk /*
309*5b1d7137Swdenk   REALLOC_ZERO_BYTES_FREES should be set if a call to
310*5b1d7137Swdenk   realloc with zero bytes should be the same as a call to free.
311*5b1d7137Swdenk   Some people think it should. Otherwise, since this malloc
312*5b1d7137Swdenk   returns a unique pointer for malloc(0), so does realloc(p, 0).
313*5b1d7137Swdenk */
314*5b1d7137Swdenk 
315*5b1d7137Swdenk 
316*5b1d7137Swdenk /*   #define REALLOC_ZERO_BYTES_FREES */
317*5b1d7137Swdenk 
318*5b1d7137Swdenk 
319*5b1d7137Swdenk /*
320*5b1d7137Swdenk   WIN32 causes an emulation of sbrk to be compiled in
321*5b1d7137Swdenk   mmap-based options are not currently supported in WIN32.
322*5b1d7137Swdenk */
323*5b1d7137Swdenk 
324*5b1d7137Swdenk /* #define WIN32 */
325*5b1d7137Swdenk #ifdef WIN32
326*5b1d7137Swdenk #define MORECORE wsbrk
327*5b1d7137Swdenk #define HAVE_MMAP 0
328*5b1d7137Swdenk 
329*5b1d7137Swdenk #define LACKS_UNISTD_H
330*5b1d7137Swdenk #define LACKS_SYS_PARAM_H
331*5b1d7137Swdenk 
332*5b1d7137Swdenk /*
333*5b1d7137Swdenk   Include 'windows.h' to get the necessary declarations for the
334*5b1d7137Swdenk   Microsoft Visual C++ data structures and routines used in the 'sbrk'
335*5b1d7137Swdenk   emulation.
336*5b1d7137Swdenk 
337*5b1d7137Swdenk   Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft
338*5b1d7137Swdenk   Visual C++ header files are included.
339*5b1d7137Swdenk */
340*5b1d7137Swdenk #define WIN32_LEAN_AND_MEAN
341*5b1d7137Swdenk #include <windows.h>
342*5b1d7137Swdenk #endif
343*5b1d7137Swdenk 
344*5b1d7137Swdenk 
345*5b1d7137Swdenk /*
346*5b1d7137Swdenk   HAVE_MEMCPY should be defined if you are not otherwise using
347*5b1d7137Swdenk   ANSI STD C, but still have memcpy and memset in your C library
348*5b1d7137Swdenk   and want to use them in calloc and realloc. Otherwise simple
349*5b1d7137Swdenk   macro versions are defined here.
350*5b1d7137Swdenk 
351*5b1d7137Swdenk   USE_MEMCPY should be defined as 1 if you actually want to
352*5b1d7137Swdenk   have memset and memcpy called. People report that the macro
353*5b1d7137Swdenk   versions are often enough faster than libc versions on many
354*5b1d7137Swdenk   systems that it is better to use them.
355*5b1d7137Swdenk 
356*5b1d7137Swdenk */
357*5b1d7137Swdenk 
358*5b1d7137Swdenk #define HAVE_MEMCPY
359*5b1d7137Swdenk 
360*5b1d7137Swdenk #ifndef USE_MEMCPY
361*5b1d7137Swdenk #ifdef HAVE_MEMCPY
362*5b1d7137Swdenk #define USE_MEMCPY 1
363*5b1d7137Swdenk #else
364*5b1d7137Swdenk #define USE_MEMCPY 0
365*5b1d7137Swdenk #endif
366*5b1d7137Swdenk #endif
367*5b1d7137Swdenk 
368*5b1d7137Swdenk #if (__STD_C || defined(HAVE_MEMCPY))
369*5b1d7137Swdenk 
370*5b1d7137Swdenk #if __STD_C
371*5b1d7137Swdenk void* memset(void*, int, size_t);
372*5b1d7137Swdenk void* memcpy(void*, const void*, size_t);
373*5b1d7137Swdenk #else
374*5b1d7137Swdenk #ifdef WIN32
375*5b1d7137Swdenk // On Win32 platforms, 'memset()' and 'memcpy()' are already declared in
376*5b1d7137Swdenk // 'windows.h'
377*5b1d7137Swdenk #else
378*5b1d7137Swdenk Void_t* memset();
379*5b1d7137Swdenk Void_t* memcpy();
380*5b1d7137Swdenk #endif
381*5b1d7137Swdenk #endif
382*5b1d7137Swdenk #endif
383*5b1d7137Swdenk 
384*5b1d7137Swdenk #if USE_MEMCPY
385*5b1d7137Swdenk 
386*5b1d7137Swdenk /* The following macros are only invoked with (2n+1)-multiples of
387*5b1d7137Swdenk    INTERNAL_SIZE_T units, with a positive integer n. This is exploited
388*5b1d7137Swdenk    for fast inline execution when n is small. */
389*5b1d7137Swdenk 
390*5b1d7137Swdenk #define MALLOC_ZERO(charp, nbytes)                                            \
391*5b1d7137Swdenk do {                                                                          \
392*5b1d7137Swdenk   INTERNAL_SIZE_T mzsz = (nbytes);                                            \
393*5b1d7137Swdenk   if(mzsz <= 9*sizeof(mzsz)) {                                                \
394*5b1d7137Swdenk     INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
395*5b1d7137Swdenk     if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
396*5b1d7137Swdenk                                      *mz++ = 0;                               \
397*5b1d7137Swdenk       if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
398*5b1d7137Swdenk                                      *mz++ = 0;                               \
399*5b1d7137Swdenk         if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
400*5b1d7137Swdenk                                      *mz++ = 0; }}}                           \
401*5b1d7137Swdenk                                      *mz++ = 0;                               \
402*5b1d7137Swdenk                                      *mz++ = 0;                               \
403*5b1d7137Swdenk                                      *mz   = 0;                               \
404*5b1d7137Swdenk   } else memset((charp), 0, mzsz);                                            \
405*5b1d7137Swdenk } while(0)
406*5b1d7137Swdenk 
407*5b1d7137Swdenk #define MALLOC_COPY(dest,src,nbytes)                                          \
408*5b1d7137Swdenk do {                                                                          \
409*5b1d7137Swdenk   INTERNAL_SIZE_T mcsz = (nbytes);                                            \
410*5b1d7137Swdenk   if(mcsz <= 9*sizeof(mcsz)) {                                                \
411*5b1d7137Swdenk     INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
412*5b1d7137Swdenk     INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
413*5b1d7137Swdenk     if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
414*5b1d7137Swdenk                                      *mcdst++ = *mcsrc++;                     \
415*5b1d7137Swdenk       if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
416*5b1d7137Swdenk                                      *mcdst++ = *mcsrc++;                     \
417*5b1d7137Swdenk         if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
418*5b1d7137Swdenk                                      *mcdst++ = *mcsrc++; }}}                 \
419*5b1d7137Swdenk                                      *mcdst++ = *mcsrc++;                     \
420*5b1d7137Swdenk                                      *mcdst++ = *mcsrc++;                     \
421*5b1d7137Swdenk                                      *mcdst   = *mcsrc  ;                     \
422*5b1d7137Swdenk   } else memcpy(dest, src, mcsz);                                             \
423*5b1d7137Swdenk } while(0)
424*5b1d7137Swdenk 
425*5b1d7137Swdenk #else /* !USE_MEMCPY */
426*5b1d7137Swdenk 
427*5b1d7137Swdenk /* Use Duff's device for good zeroing/copying performance. */
428*5b1d7137Swdenk 
429*5b1d7137Swdenk #define MALLOC_ZERO(charp, nbytes)                                            \
430*5b1d7137Swdenk do {                                                                          \
431*5b1d7137Swdenk   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
432*5b1d7137Swdenk   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
433*5b1d7137Swdenk   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
434*5b1d7137Swdenk   switch (mctmp) {                                                            \
435*5b1d7137Swdenk     case 0: for(;;) { *mzp++ = 0;                                             \
436*5b1d7137Swdenk     case 7:           *mzp++ = 0;                                             \
437*5b1d7137Swdenk     case 6:           *mzp++ = 0;                                             \
438*5b1d7137Swdenk     case 5:           *mzp++ = 0;                                             \
439*5b1d7137Swdenk     case 4:           *mzp++ = 0;                                             \
440*5b1d7137Swdenk     case 3:           *mzp++ = 0;                                             \
441*5b1d7137Swdenk     case 2:           *mzp++ = 0;                                             \
442*5b1d7137Swdenk     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
443*5b1d7137Swdenk   }                                                                           \
444*5b1d7137Swdenk } while(0)
445*5b1d7137Swdenk 
446*5b1d7137Swdenk #define MALLOC_COPY(dest,src,nbytes)                                          \
447*5b1d7137Swdenk do {                                                                          \
448*5b1d7137Swdenk   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
449*5b1d7137Swdenk   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
450*5b1d7137Swdenk   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
451*5b1d7137Swdenk   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
452*5b1d7137Swdenk   switch (mctmp) {                                                            \
453*5b1d7137Swdenk     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
454*5b1d7137Swdenk     case 7:           *mcdst++ = *mcsrc++;                                    \
455*5b1d7137Swdenk     case 6:           *mcdst++ = *mcsrc++;                                    \
456*5b1d7137Swdenk     case 5:           *mcdst++ = *mcsrc++;                                    \
457*5b1d7137Swdenk     case 4:           *mcdst++ = *mcsrc++;                                    \
458*5b1d7137Swdenk     case 3:           *mcdst++ = *mcsrc++;                                    \
459*5b1d7137Swdenk     case 2:           *mcdst++ = *mcsrc++;                                    \
460*5b1d7137Swdenk     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
461*5b1d7137Swdenk   }                                                                           \
462*5b1d7137Swdenk } while(0)
463*5b1d7137Swdenk 
464*5b1d7137Swdenk #endif
465*5b1d7137Swdenk 
466*5b1d7137Swdenk 
467*5b1d7137Swdenk /*
468*5b1d7137Swdenk   Define HAVE_MMAP to optionally make malloc() use mmap() to
469*5b1d7137Swdenk   allocate very large blocks.  These will be returned to the
470*5b1d7137Swdenk   operating system immediately after a free().
471*5b1d7137Swdenk */
472*5b1d7137Swdenk 
473*5b1d7137Swdenk /***
474*5b1d7137Swdenk #ifndef HAVE_MMAP
475*5b1d7137Swdenk #define HAVE_MMAP 1
476*5b1d7137Swdenk #endif
477*5b1d7137Swdenk ***/
478*5b1d7137Swdenk #undef	HAVE_MMAP	/* Not available for U-Boot */
479*5b1d7137Swdenk 
480*5b1d7137Swdenk /*
481*5b1d7137Swdenk   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
482*5b1d7137Swdenk   large blocks.  This is currently only possible on Linux with
483*5b1d7137Swdenk   kernel versions newer than 1.3.77.
484*5b1d7137Swdenk */
485*5b1d7137Swdenk 
486*5b1d7137Swdenk /***
487*5b1d7137Swdenk #ifndef HAVE_MREMAP
488*5b1d7137Swdenk #ifdef INTERNAL_LINUX_C_LIB
489*5b1d7137Swdenk #define HAVE_MREMAP 1
490*5b1d7137Swdenk #else
491*5b1d7137Swdenk #define HAVE_MREMAP 0
492*5b1d7137Swdenk #endif
493*5b1d7137Swdenk #endif
494*5b1d7137Swdenk ***/
495*5b1d7137Swdenk #undef	HAVE_MREMAP	/* Not available for U-Boot */
496*5b1d7137Swdenk 
497*5b1d7137Swdenk #if HAVE_MMAP
498*5b1d7137Swdenk 
499*5b1d7137Swdenk #include <unistd.h>
500*5b1d7137Swdenk #include <fcntl.h>
501*5b1d7137Swdenk #include <sys/mman.h>
502*5b1d7137Swdenk 
503*5b1d7137Swdenk #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
504*5b1d7137Swdenk #define MAP_ANONYMOUS MAP_ANON
505*5b1d7137Swdenk #endif
506*5b1d7137Swdenk 
507*5b1d7137Swdenk #endif /* HAVE_MMAP */
508*5b1d7137Swdenk 
509*5b1d7137Swdenk /*
510*5b1d7137Swdenk   Access to system page size. To the extent possible, this malloc
511*5b1d7137Swdenk   manages memory from the system in page-size units.
512*5b1d7137Swdenk 
513*5b1d7137Swdenk   The following mechanics for getpagesize were adapted from
514*5b1d7137Swdenk   bsd/gnu getpagesize.h
515*5b1d7137Swdenk */
516*5b1d7137Swdenk 
517*5b1d7137Swdenk #define	LACKS_UNISTD_H	/* Shortcut for U-Boot */
518*5b1d7137Swdenk #define	malloc_getpagesize	4096
519*5b1d7137Swdenk 
520*5b1d7137Swdenk #ifndef LACKS_UNISTD_H
521*5b1d7137Swdenk #  include <unistd.h>
522*5b1d7137Swdenk #endif
523*5b1d7137Swdenk 
524*5b1d7137Swdenk #ifndef malloc_getpagesize
525*5b1d7137Swdenk #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
526*5b1d7137Swdenk #    ifndef _SC_PAGE_SIZE
527*5b1d7137Swdenk #      define _SC_PAGE_SIZE _SC_PAGESIZE
528*5b1d7137Swdenk #    endif
529*5b1d7137Swdenk #  endif
530*5b1d7137Swdenk #  ifdef _SC_PAGE_SIZE
531*5b1d7137Swdenk #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
532*5b1d7137Swdenk #  else
533*5b1d7137Swdenk #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
534*5b1d7137Swdenk        extern size_t getpagesize();
535*5b1d7137Swdenk #      define malloc_getpagesize getpagesize()
536*5b1d7137Swdenk #    else
537*5b1d7137Swdenk #      ifdef WIN32
538*5b1d7137Swdenk #        define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */
539*5b1d7137Swdenk #      else
540*5b1d7137Swdenk #        ifndef LACKS_SYS_PARAM_H
541*5b1d7137Swdenk #          include <sys/param.h>
542*5b1d7137Swdenk #        endif
543*5b1d7137Swdenk #        ifdef EXEC_PAGESIZE
544*5b1d7137Swdenk #          define malloc_getpagesize EXEC_PAGESIZE
545*5b1d7137Swdenk #        else
546*5b1d7137Swdenk #          ifdef NBPG
547*5b1d7137Swdenk #            ifndef CLSIZE
548*5b1d7137Swdenk #              define malloc_getpagesize NBPG
549*5b1d7137Swdenk #            else
550*5b1d7137Swdenk #              define malloc_getpagesize (NBPG * CLSIZE)
551*5b1d7137Swdenk #            endif
552*5b1d7137Swdenk #          else
553*5b1d7137Swdenk #            ifdef NBPC
554*5b1d7137Swdenk #              define malloc_getpagesize NBPC
555*5b1d7137Swdenk #            else
556*5b1d7137Swdenk #              ifdef PAGESIZE
557*5b1d7137Swdenk #                define malloc_getpagesize PAGESIZE
558*5b1d7137Swdenk #              else
559*5b1d7137Swdenk #                define malloc_getpagesize (4096) /* just guess */
560*5b1d7137Swdenk #              endif
561*5b1d7137Swdenk #            endif
562*5b1d7137Swdenk #          endif
563*5b1d7137Swdenk #        endif
564*5b1d7137Swdenk #      endif
565*5b1d7137Swdenk #    endif
566*5b1d7137Swdenk #  endif
567*5b1d7137Swdenk #endif
568*5b1d7137Swdenk 
569*5b1d7137Swdenk 
570*5b1d7137Swdenk 
571*5b1d7137Swdenk /*
572*5b1d7137Swdenk 
573*5b1d7137Swdenk   This version of malloc supports the standard SVID/XPG mallinfo
574*5b1d7137Swdenk   routine that returns a struct containing the same kind of
575*5b1d7137Swdenk   information you can get from malloc_stats. It should work on
576*5b1d7137Swdenk   any SVID/XPG compliant system that has a /usr/include/malloc.h
577*5b1d7137Swdenk   defining struct mallinfo. (If you'd like to install such a thing
578*5b1d7137Swdenk   yourself, cut out the preliminary declarations as described above
579*5b1d7137Swdenk   and below and save them in a malloc.h file. But there's no
580*5b1d7137Swdenk   compelling reason to bother to do this.)
581*5b1d7137Swdenk 
582*5b1d7137Swdenk   The main declaration needed is the mallinfo struct that is returned
583*5b1d7137Swdenk   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
584*5b1d7137Swdenk   bunch of fields, most of which are not even meaningful in this
585*5b1d7137Swdenk   version of malloc. Some of these fields are are instead filled by
586*5b1d7137Swdenk   mallinfo() with other numbers that might possibly be of interest.
587*5b1d7137Swdenk 
588*5b1d7137Swdenk   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
589*5b1d7137Swdenk   /usr/include/malloc.h file that includes a declaration of struct
590*5b1d7137Swdenk   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
591*5b1d7137Swdenk   version is declared below.  These must be precisely the same for
592*5b1d7137Swdenk   mallinfo() to work.
593*5b1d7137Swdenk 
594*5b1d7137Swdenk */
595*5b1d7137Swdenk 
596*5b1d7137Swdenk /* #define HAVE_USR_INCLUDE_MALLOC_H */
597*5b1d7137Swdenk 
598*5b1d7137Swdenk #if HAVE_USR_INCLUDE_MALLOC_H
599*5b1d7137Swdenk #include "/usr/include/malloc.h"
600*5b1d7137Swdenk #else
601*5b1d7137Swdenk 
602*5b1d7137Swdenk /* SVID2/XPG mallinfo structure */
603*5b1d7137Swdenk 
604*5b1d7137Swdenk struct mallinfo {
605*5b1d7137Swdenk   int arena;    /* total space allocated from system */
606*5b1d7137Swdenk   int ordblks;  /* number of non-inuse chunks */
607*5b1d7137Swdenk   int smblks;   /* unused -- always zero */
608*5b1d7137Swdenk   int hblks;    /* number of mmapped regions */
609*5b1d7137Swdenk   int hblkhd;   /* total space in mmapped regions */
610*5b1d7137Swdenk   int usmblks;  /* unused -- always zero */
611*5b1d7137Swdenk   int fsmblks;  /* unused -- always zero */
612*5b1d7137Swdenk   int uordblks; /* total allocated space */
613*5b1d7137Swdenk   int fordblks; /* total non-inuse space */
614*5b1d7137Swdenk   int keepcost; /* top-most, releasable (via malloc_trim) space */
615*5b1d7137Swdenk };
616*5b1d7137Swdenk 
617*5b1d7137Swdenk /* SVID2/XPG mallopt options */
618*5b1d7137Swdenk 
619*5b1d7137Swdenk #define M_MXFAST  1    /* UNUSED in this malloc */
620*5b1d7137Swdenk #define M_NLBLKS  2    /* UNUSED in this malloc */
621*5b1d7137Swdenk #define M_GRAIN   3    /* UNUSED in this malloc */
622*5b1d7137Swdenk #define M_KEEP    4    /* UNUSED in this malloc */
623*5b1d7137Swdenk 
624*5b1d7137Swdenk #endif
625*5b1d7137Swdenk 
626*5b1d7137Swdenk /* mallopt options that actually do something */
627*5b1d7137Swdenk 
628*5b1d7137Swdenk #define M_TRIM_THRESHOLD    -1
629*5b1d7137Swdenk #define M_TOP_PAD           -2
630*5b1d7137Swdenk #define M_MMAP_THRESHOLD    -3
631*5b1d7137Swdenk #define M_MMAP_MAX          -4
632*5b1d7137Swdenk 
633*5b1d7137Swdenk 
634*5b1d7137Swdenk 
635*5b1d7137Swdenk #ifndef DEFAULT_TRIM_THRESHOLD
636*5b1d7137Swdenk #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
637*5b1d7137Swdenk #endif
638*5b1d7137Swdenk 
639*5b1d7137Swdenk /*
640*5b1d7137Swdenk     M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
641*5b1d7137Swdenk       to keep before releasing via malloc_trim in free().
642*5b1d7137Swdenk 
643*5b1d7137Swdenk       Automatic trimming is mainly useful in long-lived programs.
644*5b1d7137Swdenk       Because trimming via sbrk can be slow on some systems, and can
645*5b1d7137Swdenk       sometimes be wasteful (in cases where programs immediately
646*5b1d7137Swdenk       afterward allocate more large chunks) the value should be high
647*5b1d7137Swdenk       enough so that your overall system performance would improve by
648*5b1d7137Swdenk       releasing.
649*5b1d7137Swdenk 
650*5b1d7137Swdenk       The trim threshold and the mmap control parameters (see below)
651*5b1d7137Swdenk       can be traded off with one another. Trimming and mmapping are
652*5b1d7137Swdenk       two different ways of releasing unused memory back to the
653*5b1d7137Swdenk       system. Between these two, it is often possible to keep
654*5b1d7137Swdenk       system-level demands of a long-lived program down to a bare
655*5b1d7137Swdenk       minimum. For example, in one test suite of sessions measuring
656*5b1d7137Swdenk       the XF86 X server on Linux, using a trim threshold of 128K and a
657*5b1d7137Swdenk       mmap threshold of 192K led to near-minimal long term resource
658*5b1d7137Swdenk       consumption.
659*5b1d7137Swdenk 
660*5b1d7137Swdenk       If you are using this malloc in a long-lived program, it should
661*5b1d7137Swdenk       pay to experiment with these values.  As a rough guide, you
662*5b1d7137Swdenk       might set to a value close to the average size of a process
663*5b1d7137Swdenk       (program) running on your system.  Releasing this much memory
664*5b1d7137Swdenk       would allow such a process to run in memory.  Generally, it's
665*5b1d7137Swdenk       worth it to tune for trimming rather tham memory mapping when a
666*5b1d7137Swdenk       program undergoes phases where several large chunks are
667*5b1d7137Swdenk       allocated and released in ways that can reuse each other's
668*5b1d7137Swdenk       storage, perhaps mixed with phases where there are no such
669*5b1d7137Swdenk       chunks at all.  And in well-behaved long-lived programs,
670*5b1d7137Swdenk       controlling release of large blocks via trimming versus mapping
671*5b1d7137Swdenk       is usually faster.
672*5b1d7137Swdenk 
673*5b1d7137Swdenk       However, in most programs, these parameters serve mainly as
674*5b1d7137Swdenk       protection against the system-level effects of carrying around
675*5b1d7137Swdenk       massive amounts of unneeded memory. Since frequent calls to
676*5b1d7137Swdenk       sbrk, mmap, and munmap otherwise degrade performance, the default
677*5b1d7137Swdenk       parameters are set to relatively high values that serve only as
678*5b1d7137Swdenk       safeguards.
679*5b1d7137Swdenk 
680*5b1d7137Swdenk       The default trim value is high enough to cause trimming only in
681*5b1d7137Swdenk       fairly extreme (by current memory consumption standards) cases.
682*5b1d7137Swdenk       It must be greater than page size to have any useful effect.  To
683*5b1d7137Swdenk       disable trimming completely, you can set to (unsigned long)(-1);
684*5b1d7137Swdenk 
685*5b1d7137Swdenk 
686*5b1d7137Swdenk */
687*5b1d7137Swdenk 
688*5b1d7137Swdenk 
689*5b1d7137Swdenk #ifndef DEFAULT_TOP_PAD
690*5b1d7137Swdenk #define DEFAULT_TOP_PAD        (0)
691*5b1d7137Swdenk #endif
692*5b1d7137Swdenk 
693*5b1d7137Swdenk /*
694*5b1d7137Swdenk     M_TOP_PAD is the amount of extra `padding' space to allocate or
695*5b1d7137Swdenk       retain whenever sbrk is called. It is used in two ways internally:
696*5b1d7137Swdenk 
697*5b1d7137Swdenk       * When sbrk is called to extend the top of the arena to satisfy
698*5b1d7137Swdenk         a new malloc request, this much padding is added to the sbrk
699*5b1d7137Swdenk         request.
700*5b1d7137Swdenk 
701*5b1d7137Swdenk       * When malloc_trim is called automatically from free(),
702*5b1d7137Swdenk         it is used as the `pad' argument.
703*5b1d7137Swdenk 
704*5b1d7137Swdenk       In both cases, the actual amount of padding is rounded
705*5b1d7137Swdenk       so that the end of the arena is always a system page boundary.
706*5b1d7137Swdenk 
707*5b1d7137Swdenk       The main reason for using padding is to avoid calling sbrk so
708*5b1d7137Swdenk       often. Having even a small pad greatly reduces the likelihood
709*5b1d7137Swdenk       that nearly every malloc request during program start-up (or
710*5b1d7137Swdenk       after trimming) will invoke sbrk, which needlessly wastes
711*5b1d7137Swdenk       time.
712*5b1d7137Swdenk 
713*5b1d7137Swdenk       Automatic rounding-up to page-size units is normally sufficient
714*5b1d7137Swdenk       to avoid measurable overhead, so the default is 0.  However, in
715*5b1d7137Swdenk       systems where sbrk is relatively slow, it can pay to increase
716*5b1d7137Swdenk       this value, at the expense of carrying around more memory than
717*5b1d7137Swdenk       the program needs.
718*5b1d7137Swdenk 
719*5b1d7137Swdenk */
720*5b1d7137Swdenk 
721*5b1d7137Swdenk 
722*5b1d7137Swdenk #ifndef DEFAULT_MMAP_THRESHOLD
723*5b1d7137Swdenk #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
724*5b1d7137Swdenk #endif
725*5b1d7137Swdenk 
726*5b1d7137Swdenk /*
727*5b1d7137Swdenk 
728*5b1d7137Swdenk     M_MMAP_THRESHOLD is the request size threshold for using mmap()
729*5b1d7137Swdenk       to service a request. Requests of at least this size that cannot
730*5b1d7137Swdenk       be allocated using already-existing space will be serviced via mmap.
731*5b1d7137Swdenk       (If enough normal freed space already exists it is used instead.)
732*5b1d7137Swdenk 
733*5b1d7137Swdenk       Using mmap segregates relatively large chunks of memory so that
734*5b1d7137Swdenk       they can be individually obtained and released from the host
735*5b1d7137Swdenk       system. A request serviced through mmap is never reused by any
736*5b1d7137Swdenk       other request (at least not directly; the system may just so
737*5b1d7137Swdenk       happen to remap successive requests to the same locations).
738*5b1d7137Swdenk 
739*5b1d7137Swdenk       Segregating space in this way has the benefit that mmapped space
740*5b1d7137Swdenk       can ALWAYS be individually released back to the system, which
741*5b1d7137Swdenk       helps keep the system level memory demands of a long-lived
742*5b1d7137Swdenk       program low. Mapped memory can never become `locked' between
743*5b1d7137Swdenk       other chunks, as can happen with normally allocated chunks, which
744*5b1d7137Swdenk       menas that even trimming via malloc_trim would not release them.
745*5b1d7137Swdenk 
746*5b1d7137Swdenk       However, it has the disadvantages that:
747*5b1d7137Swdenk 
748*5b1d7137Swdenk          1. The space cannot be reclaimed, consolidated, and then
749*5b1d7137Swdenk             used to service later requests, as happens with normal chunks.
750*5b1d7137Swdenk          2. It can lead to more wastage because of mmap page alignment
751*5b1d7137Swdenk             requirements
752*5b1d7137Swdenk          3. It causes malloc performance to be more dependent on host
753*5b1d7137Swdenk             system memory management support routines which may vary in
754*5b1d7137Swdenk             implementation quality and may impose arbitrary
755*5b1d7137Swdenk             limitations. Generally, servicing a request via normal
756*5b1d7137Swdenk             malloc steps is faster than going through a system's mmap.
757*5b1d7137Swdenk 
758*5b1d7137Swdenk       All together, these considerations should lead you to use mmap
759*5b1d7137Swdenk       only for relatively large requests.
760*5b1d7137Swdenk 
761*5b1d7137Swdenk 
762*5b1d7137Swdenk */
763*5b1d7137Swdenk 
764*5b1d7137Swdenk 
765*5b1d7137Swdenk 
766*5b1d7137Swdenk #ifndef DEFAULT_MMAP_MAX
767*5b1d7137Swdenk #if HAVE_MMAP
768*5b1d7137Swdenk #define DEFAULT_MMAP_MAX       (64)
769*5b1d7137Swdenk #else
770*5b1d7137Swdenk #define DEFAULT_MMAP_MAX       (0)
771*5b1d7137Swdenk #endif
772*5b1d7137Swdenk #endif
773*5b1d7137Swdenk 
774*5b1d7137Swdenk /*
775*5b1d7137Swdenk     M_MMAP_MAX is the maximum number of requests to simultaneously
776*5b1d7137Swdenk       service using mmap. This parameter exists because:
777*5b1d7137Swdenk 
778*5b1d7137Swdenk          1. Some systems have a limited number of internal tables for
779*5b1d7137Swdenk             use by mmap.
780*5b1d7137Swdenk          2. In most systems, overreliance on mmap can degrade overall
781*5b1d7137Swdenk             performance.
782*5b1d7137Swdenk          3. If a program allocates many large regions, it is probably
783*5b1d7137Swdenk             better off using normal sbrk-based allocation routines that
784*5b1d7137Swdenk             can reclaim and reallocate normal heap memory. Using a
785*5b1d7137Swdenk             small value allows transition into this mode after the
786*5b1d7137Swdenk             first few allocations.
787*5b1d7137Swdenk 
788*5b1d7137Swdenk       Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
789*5b1d7137Swdenk       the default value is 0, and attempts to set it to non-zero values
790*5b1d7137Swdenk       in mallopt will fail.
791*5b1d7137Swdenk */
792*5b1d7137Swdenk 
793*5b1d7137Swdenk 
794*5b1d7137Swdenk 
795*5b1d7137Swdenk 
796*5b1d7137Swdenk /*
797*5b1d7137Swdenk     USE_DL_PREFIX will prefix all public routines with the string 'dl'.
798*5b1d7137Swdenk       Useful to quickly avoid procedure declaration conflicts and linker
799*5b1d7137Swdenk       symbol conflicts with existing memory allocation routines.
800*5b1d7137Swdenk 
801*5b1d7137Swdenk */
802*5b1d7137Swdenk 
803*5b1d7137Swdenk /* #define USE_DL_PREFIX */
804*5b1d7137Swdenk 
805*5b1d7137Swdenk 
806*5b1d7137Swdenk 
807*5b1d7137Swdenk 
808*5b1d7137Swdenk /*
809*5b1d7137Swdenk 
810*5b1d7137Swdenk   Special defines for linux libc
811*5b1d7137Swdenk 
812*5b1d7137Swdenk   Except when compiled using these special defines for Linux libc
813*5b1d7137Swdenk   using weak aliases, this malloc is NOT designed to work in
814*5b1d7137Swdenk   multithreaded applications.  No semaphores or other concurrency
815*5b1d7137Swdenk   control are provided to ensure that multiple malloc or free calls
816*5b1d7137Swdenk   don't run at the same time, which could be disasterous. A single
817*5b1d7137Swdenk   semaphore could be used across malloc, realloc, and free (which is
818*5b1d7137Swdenk   essentially the effect of the linux weak alias approach). It would
819*5b1d7137Swdenk   be hard to obtain finer granularity.
820*5b1d7137Swdenk 
821*5b1d7137Swdenk */
822*5b1d7137Swdenk 
823*5b1d7137Swdenk 
824*5b1d7137Swdenk #ifdef INTERNAL_LINUX_C_LIB
825*5b1d7137Swdenk 
826*5b1d7137Swdenk #if __STD_C
827*5b1d7137Swdenk 
828*5b1d7137Swdenk Void_t * __default_morecore_init (ptrdiff_t);
829*5b1d7137Swdenk Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
830*5b1d7137Swdenk 
831*5b1d7137Swdenk #else
832*5b1d7137Swdenk 
833*5b1d7137Swdenk Void_t * __default_morecore_init ();
834*5b1d7137Swdenk Void_t *(*__morecore)() = __default_morecore_init;
835*5b1d7137Swdenk 
836*5b1d7137Swdenk #endif
837*5b1d7137Swdenk 
838*5b1d7137Swdenk #define MORECORE (*__morecore)
839*5b1d7137Swdenk #define MORECORE_FAILURE 0
840*5b1d7137Swdenk #define MORECORE_CLEARS 1
841*5b1d7137Swdenk 
842*5b1d7137Swdenk #else /* INTERNAL_LINUX_C_LIB */
843*5b1d7137Swdenk 
844*5b1d7137Swdenk #if __STD_C
845*5b1d7137Swdenk extern Void_t*     sbrk(ptrdiff_t);
846*5b1d7137Swdenk #else
847*5b1d7137Swdenk extern Void_t*     sbrk();
848*5b1d7137Swdenk #endif
849*5b1d7137Swdenk 
850*5b1d7137Swdenk #ifndef MORECORE
851*5b1d7137Swdenk #define MORECORE sbrk
852*5b1d7137Swdenk #endif
853*5b1d7137Swdenk 
854*5b1d7137Swdenk #ifndef MORECORE_FAILURE
855*5b1d7137Swdenk #define MORECORE_FAILURE -1
856*5b1d7137Swdenk #endif
857*5b1d7137Swdenk 
858*5b1d7137Swdenk #ifndef MORECORE_CLEARS
859*5b1d7137Swdenk #define MORECORE_CLEARS 1
860*5b1d7137Swdenk #endif
861*5b1d7137Swdenk 
862*5b1d7137Swdenk #endif /* INTERNAL_LINUX_C_LIB */
863*5b1d7137Swdenk 
864*5b1d7137Swdenk #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
865*5b1d7137Swdenk 
866*5b1d7137Swdenk #define cALLOc		__libc_calloc
867*5b1d7137Swdenk #define fREe		__libc_free
868*5b1d7137Swdenk #define mALLOc		__libc_malloc
869*5b1d7137Swdenk #define mEMALIGn	__libc_memalign
870*5b1d7137Swdenk #define rEALLOc		__libc_realloc
871*5b1d7137Swdenk #define vALLOc		__libc_valloc
872*5b1d7137Swdenk #define pvALLOc		__libc_pvalloc
873*5b1d7137Swdenk #define mALLINFo	__libc_mallinfo
874*5b1d7137Swdenk #define mALLOPt		__libc_mallopt
875*5b1d7137Swdenk 
876*5b1d7137Swdenk #pragma weak calloc = __libc_calloc
877*5b1d7137Swdenk #pragma weak free = __libc_free
878*5b1d7137Swdenk #pragma weak cfree = __libc_free
879*5b1d7137Swdenk #pragma weak malloc = __libc_malloc
880*5b1d7137Swdenk #pragma weak memalign = __libc_memalign
881*5b1d7137Swdenk #pragma weak realloc = __libc_realloc
882*5b1d7137Swdenk #pragma weak valloc = __libc_valloc
883*5b1d7137Swdenk #pragma weak pvalloc = __libc_pvalloc
884*5b1d7137Swdenk #pragma weak mallinfo = __libc_mallinfo
885*5b1d7137Swdenk #pragma weak mallopt = __libc_mallopt
886*5b1d7137Swdenk 
887*5b1d7137Swdenk #else
888*5b1d7137Swdenk 
889*5b1d7137Swdenk #ifdef USE_DL_PREFIX
890*5b1d7137Swdenk #define cALLOc		dlcalloc
891*5b1d7137Swdenk #define fREe		dlfree
892*5b1d7137Swdenk #define mALLOc		dlmalloc
893*5b1d7137Swdenk #define mEMALIGn	dlmemalign
894*5b1d7137Swdenk #define rEALLOc		dlrealloc
895*5b1d7137Swdenk #define vALLOc		dlvalloc
896*5b1d7137Swdenk #define pvALLOc		dlpvalloc
897*5b1d7137Swdenk #define mALLINFo	dlmallinfo
898*5b1d7137Swdenk #define mALLOPt		dlmallopt
899*5b1d7137Swdenk #else /* USE_DL_PREFIX */
900*5b1d7137Swdenk #define cALLOc		calloc
901*5b1d7137Swdenk #define fREe		free
902*5b1d7137Swdenk #define mALLOc		malloc
903*5b1d7137Swdenk #define mEMALIGn	memalign
904*5b1d7137Swdenk #define rEALLOc		realloc
905*5b1d7137Swdenk #define vALLOc		valloc
906*5b1d7137Swdenk #define pvALLOc		pvalloc
907*5b1d7137Swdenk #define mALLINFo	mallinfo
908*5b1d7137Swdenk #define mALLOPt		mallopt
909*5b1d7137Swdenk #endif /* USE_DL_PREFIX */
910*5b1d7137Swdenk 
911*5b1d7137Swdenk #endif
912*5b1d7137Swdenk 
913*5b1d7137Swdenk /* Public routines */
914*5b1d7137Swdenk 
915*5b1d7137Swdenk #if __STD_C
916*5b1d7137Swdenk 
917*5b1d7137Swdenk Void_t* mALLOc(size_t);
918*5b1d7137Swdenk void    fREe(Void_t*);
919*5b1d7137Swdenk Void_t* rEALLOc(Void_t*, size_t);
920*5b1d7137Swdenk Void_t* mEMALIGn(size_t, size_t);
921*5b1d7137Swdenk Void_t* vALLOc(size_t);
922*5b1d7137Swdenk Void_t* pvALLOc(size_t);
923*5b1d7137Swdenk Void_t* cALLOc(size_t, size_t);
924*5b1d7137Swdenk void    cfree(Void_t*);
925*5b1d7137Swdenk int     malloc_trim(size_t);
926*5b1d7137Swdenk size_t  malloc_usable_size(Void_t*);
927*5b1d7137Swdenk void    malloc_stats(void);
928*5b1d7137Swdenk int     mALLOPt(int, int);
929*5b1d7137Swdenk struct mallinfo mALLINFo(void);
930*5b1d7137Swdenk #else
931*5b1d7137Swdenk Void_t* mALLOc();
932*5b1d7137Swdenk void    fREe();
933*5b1d7137Swdenk Void_t* rEALLOc();
934*5b1d7137Swdenk Void_t* mEMALIGn();
935*5b1d7137Swdenk Void_t* vALLOc();
936*5b1d7137Swdenk Void_t* pvALLOc();
937*5b1d7137Swdenk Void_t* cALLOc();
938*5b1d7137Swdenk void    cfree();
939*5b1d7137Swdenk int     malloc_trim();
940*5b1d7137Swdenk size_t  malloc_usable_size();
941*5b1d7137Swdenk void    malloc_stats();
942*5b1d7137Swdenk int     mALLOPt();
943*5b1d7137Swdenk struct mallinfo mALLINFo();
944*5b1d7137Swdenk #endif
945*5b1d7137Swdenk 
946*5b1d7137Swdenk 
947*5b1d7137Swdenk #ifdef __cplusplus
948*5b1d7137Swdenk };  /* end of extern "C" */
949*5b1d7137Swdenk #endif
950