xref: /OK3568_Linux_fs/u-boot/common/dlmalloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #include <common.h>
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #if defined(CONFIG_UNIT_TEST)
4*4882a593Smuzhiyun #define DEBUG
5*4882a593Smuzhiyun #endif
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <malloc.h>
8*4882a593Smuzhiyun #include <asm/io.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifdef DEBUG
11*4882a593Smuzhiyun #if __STD_C
12*4882a593Smuzhiyun static void malloc_update_mallinfo (void);
13*4882a593Smuzhiyun void malloc_stats (void);
14*4882a593Smuzhiyun #else
15*4882a593Smuzhiyun static void malloc_update_mallinfo ();
16*4882a593Smuzhiyun void malloc_stats();
17*4882a593Smuzhiyun #endif
18*4882a593Smuzhiyun #endif	/* DEBUG */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun   Emulation of sbrk for WIN32
24*4882a593Smuzhiyun   All code within the ifdef WIN32 is untested by me.
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun   Thanks to Martin Fong and others for supplying this.
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #ifdef WIN32
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
33*4882a593Smuzhiyun ~(malloc_getpagesize-1))
34*4882a593Smuzhiyun #define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* resrve 64MB to insure large contiguous space */
37*4882a593Smuzhiyun #define RESERVED_SIZE (1024*1024*64)
38*4882a593Smuzhiyun #define NEXT_SIZE (2048*1024)
39*4882a593Smuzhiyun #define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct GmListElement;
42*4882a593Smuzhiyun typedef struct GmListElement GmListElement;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct GmListElement
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	GmListElement* next;
47*4882a593Smuzhiyun 	void* base;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun static GmListElement* head = 0;
51*4882a593Smuzhiyun static unsigned int gNextAddress = 0;
52*4882a593Smuzhiyun static unsigned int gAddressBase = 0;
53*4882a593Smuzhiyun static unsigned int gAllocatedSize = 0;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static
makeGmListElement(void * bas)56*4882a593Smuzhiyun GmListElement* makeGmListElement (void* bas)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	GmListElement* this;
59*4882a593Smuzhiyun 	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
60*4882a593Smuzhiyun 	assert (this);
61*4882a593Smuzhiyun 	if (this)
62*4882a593Smuzhiyun 	{
63*4882a593Smuzhiyun 		this->base = bas;
64*4882a593Smuzhiyun 		this->next = head;
65*4882a593Smuzhiyun 		head = this;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 	return this;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
gcleanup()70*4882a593Smuzhiyun void gcleanup ()
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	BOOL rval;
73*4882a593Smuzhiyun 	assert ( (head == NULL) || (head->base == (void*)gAddressBase));
74*4882a593Smuzhiyun 	if (gAddressBase && (gNextAddress - gAddressBase))
75*4882a593Smuzhiyun 	{
76*4882a593Smuzhiyun 		rval = VirtualFree ((void*)gAddressBase,
77*4882a593Smuzhiyun 							gNextAddress - gAddressBase,
78*4882a593Smuzhiyun 							MEM_DECOMMIT);
79*4882a593Smuzhiyun 	assert (rval);
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	while (head)
82*4882a593Smuzhiyun 	{
83*4882a593Smuzhiyun 		GmListElement* next = head->next;
84*4882a593Smuzhiyun 		rval = VirtualFree (head->base, 0, MEM_RELEASE);
85*4882a593Smuzhiyun 		assert (rval);
86*4882a593Smuzhiyun 		LocalFree (head);
87*4882a593Smuzhiyun 		head = next;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static
findRegion(void * start_address,unsigned long size)92*4882a593Smuzhiyun void* findRegion (void* start_address, unsigned long size)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	MEMORY_BASIC_INFORMATION info;
95*4882a593Smuzhiyun 	if (size >= TOP_MEMORY) return NULL;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	while ((unsigned long)start_address + size < TOP_MEMORY)
98*4882a593Smuzhiyun 	{
99*4882a593Smuzhiyun 		VirtualQuery (start_address, &info, sizeof (info));
100*4882a593Smuzhiyun 		if ((info.State == MEM_FREE) && (info.RegionSize >= size))
101*4882a593Smuzhiyun 			return start_address;
102*4882a593Smuzhiyun 		else
103*4882a593Smuzhiyun 		{
104*4882a593Smuzhiyun 			/* Requested region is not available so see if the */
105*4882a593Smuzhiyun 			/* next region is available.  Set 'start_address' */
106*4882a593Smuzhiyun 			/* to the next region and call 'VirtualQuery()' */
107*4882a593Smuzhiyun 			/* again. */
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 			start_address = (char*)info.BaseAddress + info.RegionSize;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 			/* Make sure we start looking for the next region */
112*4882a593Smuzhiyun 			/* on the *next* 64K boundary.  Otherwise, even if */
113*4882a593Smuzhiyun 			/* the new region is free according to */
114*4882a593Smuzhiyun 			/* 'VirtualQuery()', the subsequent call to */
115*4882a593Smuzhiyun 			/* 'VirtualAlloc()' (which follows the call to */
116*4882a593Smuzhiyun 			/* this routine in 'wsbrk()') will round *down* */
117*4882a593Smuzhiyun 			/* the requested address to a 64K boundary which */
118*4882a593Smuzhiyun 			/* we already know is an address in the */
119*4882a593Smuzhiyun 			/* unavailable region.  Thus, the subsequent call */
120*4882a593Smuzhiyun 			/* to 'VirtualAlloc()' will fail and bring us back */
121*4882a593Smuzhiyun 			/* here, causing us to go into an infinite loop. */
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 			start_address =
124*4882a593Smuzhiyun 				(void *) AlignPage64K((unsigned long) start_address);
125*4882a593Smuzhiyun 		}
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 	return NULL;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 
wsbrk(long size)132*4882a593Smuzhiyun void* wsbrk (long size)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	void* tmp;
135*4882a593Smuzhiyun 	if (size > 0)
136*4882a593Smuzhiyun 	{
137*4882a593Smuzhiyun 		if (gAddressBase == 0)
138*4882a593Smuzhiyun 		{
139*4882a593Smuzhiyun 			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
140*4882a593Smuzhiyun 			gNextAddress = gAddressBase =
141*4882a593Smuzhiyun 				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,
142*4882a593Smuzhiyun 											MEM_RESERVE, PAGE_NOACCESS);
143*4882a593Smuzhiyun 		} else if (AlignPage (gNextAddress + size) > (gAddressBase +
144*4882a593Smuzhiyun gAllocatedSize))
145*4882a593Smuzhiyun 		{
146*4882a593Smuzhiyun 			long new_size = max (NEXT_SIZE, AlignPage (size));
147*4882a593Smuzhiyun 			void* new_address = (void*)(gAddressBase+gAllocatedSize);
148*4882a593Smuzhiyun 			do
149*4882a593Smuzhiyun 			{
150*4882a593Smuzhiyun 				new_address = findRegion (new_address, new_size);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 				if (!new_address)
153*4882a593Smuzhiyun 					return (void*)-1;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 				gAddressBase = gNextAddress =
156*4882a593Smuzhiyun 					(unsigned int)VirtualAlloc (new_address, new_size,
157*4882a593Smuzhiyun 												MEM_RESERVE, PAGE_NOACCESS);
158*4882a593Smuzhiyun 				/* repeat in case of race condition */
159*4882a593Smuzhiyun 				/* The region that we found has been snagged */
160*4882a593Smuzhiyun 				/* by another thread */
161*4882a593Smuzhiyun 			}
162*4882a593Smuzhiyun 			while (gAddressBase == 0);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 			assert (new_address == (void*)gAddressBase);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 			gAllocatedSize = new_size;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 			if (!makeGmListElement ((void*)gAddressBase))
169*4882a593Smuzhiyun 				return (void*)-1;
170*4882a593Smuzhiyun 		}
171*4882a593Smuzhiyun 		if ((size + gNextAddress) > AlignPage (gNextAddress))
172*4882a593Smuzhiyun 		{
173*4882a593Smuzhiyun 			void* res;
174*4882a593Smuzhiyun 			res = VirtualAlloc ((void*)AlignPage (gNextAddress),
175*4882a593Smuzhiyun 								(size + gNextAddress -
176*4882a593Smuzhiyun 								 AlignPage (gNextAddress)),
177*4882a593Smuzhiyun 								MEM_COMMIT, PAGE_READWRITE);
178*4882a593Smuzhiyun 			if (!res)
179*4882a593Smuzhiyun 				return (void*)-1;
180*4882a593Smuzhiyun 		}
181*4882a593Smuzhiyun 		tmp = (void*)gNextAddress;
182*4882a593Smuzhiyun 		gNextAddress = (unsigned int)tmp + size;
183*4882a593Smuzhiyun 		return tmp;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 	else if (size < 0)
186*4882a593Smuzhiyun 	{
187*4882a593Smuzhiyun 		unsigned int alignedGoal = AlignPage (gNextAddress + size);
188*4882a593Smuzhiyun 		/* Trim by releasing the virtual memory */
189*4882a593Smuzhiyun 		if (alignedGoal >= gAddressBase)
190*4882a593Smuzhiyun 		{
191*4882a593Smuzhiyun 			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
192*4882a593Smuzhiyun 						 MEM_DECOMMIT);
193*4882a593Smuzhiyun 			gNextAddress = gNextAddress + size;
194*4882a593Smuzhiyun 			return (void*)gNextAddress;
195*4882a593Smuzhiyun 		}
196*4882a593Smuzhiyun 		else
197*4882a593Smuzhiyun 		{
198*4882a593Smuzhiyun 			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
199*4882a593Smuzhiyun 						 MEM_DECOMMIT);
200*4882a593Smuzhiyun 			gNextAddress = gAddressBase;
201*4882a593Smuzhiyun 			return (void*)-1;
202*4882a593Smuzhiyun 		}
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 	else
205*4882a593Smuzhiyun 	{
206*4882a593Smuzhiyun 		return (void*)gNextAddress;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #endif
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun   Type declarations
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun struct malloc_chunk
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun   INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
222*4882a593Smuzhiyun   INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
223*4882a593Smuzhiyun   struct malloc_chunk* fd;   /* double links -- used only if free. */
224*4882a593Smuzhiyun   struct malloc_chunk* bk;
225*4882a593Smuzhiyun } __attribute__((__may_alias__)) ;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun typedef struct malloc_chunk* mchunkptr;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun    malloc_chunk details:
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun     (The following includes lightly edited explanations by Colin Plumb.)
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun     Chunks of memory are maintained using a `boundary tag' method as
236*4882a593Smuzhiyun     described in e.g., Knuth or Standish.  (See the paper by Paul
237*4882a593Smuzhiyun     Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
238*4882a593Smuzhiyun     survey of such techniques.)  Sizes of free chunks are stored both
239*4882a593Smuzhiyun     in the front of each chunk and at the end.  This makes
240*4882a593Smuzhiyun     consolidating fragmented chunks into bigger chunks very fast.  The
241*4882a593Smuzhiyun     size fields also hold bits representing whether chunks are free or
242*4882a593Smuzhiyun     in use.
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun     An allocated chunk looks like this:
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
248*4882a593Smuzhiyun 	    |             Size of previous chunk, if allocated            | |
249*4882a593Smuzhiyun 	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
250*4882a593Smuzhiyun 	    |             Size of chunk, in bytes                         |P|
251*4882a593Smuzhiyun       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
252*4882a593Smuzhiyun 	    |             User data starts here...                          .
253*4882a593Smuzhiyun 	    .                                                               .
254*4882a593Smuzhiyun 	    .             (malloc_usable_space() bytes)                     .
255*4882a593Smuzhiyun 	    .                                                               |
256*4882a593Smuzhiyun nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
257*4882a593Smuzhiyun 	    |             Size of chunk                                     |
258*4882a593Smuzhiyun 	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun     Where "chunk" is the front of the chunk for the purpose of most of
262*4882a593Smuzhiyun     the malloc code, but "mem" is the pointer that is returned to the
263*4882a593Smuzhiyun     user.  "Nextchunk" is the beginning of the next contiguous chunk.
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun     Chunks always begin on even word boundries, so the mem portion
266*4882a593Smuzhiyun     (which is returned to the user) is also on an even word boundary, and
267*4882a593Smuzhiyun     thus double-word aligned.
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun     Free chunks are stored in circular doubly-linked lists, and look like this:
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
272*4882a593Smuzhiyun 	    |             Size of previous chunk                            |
273*4882a593Smuzhiyun 	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
274*4882a593Smuzhiyun     `head:' |             Size of chunk, in bytes                         |P|
275*4882a593Smuzhiyun       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
276*4882a593Smuzhiyun 	    |             Forward pointer to next chunk in list             |
277*4882a593Smuzhiyun 	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
278*4882a593Smuzhiyun 	    |             Back pointer to previous chunk in list            |
279*4882a593Smuzhiyun 	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
280*4882a593Smuzhiyun 	    |             Unused space (may be 0 bytes long)                .
281*4882a593Smuzhiyun 	    .                                                               .
282*4882a593Smuzhiyun 	    .                                                               |
283*4882a593Smuzhiyun nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
284*4882a593Smuzhiyun     `foot:' |             Size of chunk, in bytes                           |
285*4882a593Smuzhiyun 	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun     The P (PREV_INUSE) bit, stored in the unused low-order bit of the
288*4882a593Smuzhiyun     chunk size (which is always a multiple of two words), is an in-use
289*4882a593Smuzhiyun     bit for the *previous* chunk.  If that bit is *clear*, then the
290*4882a593Smuzhiyun     word before the current chunk size contains the previous chunk
291*4882a593Smuzhiyun     size, and can be used to find the front of the previous chunk.
292*4882a593Smuzhiyun     (The very first chunk allocated always has this bit set,
293*4882a593Smuzhiyun     preventing access to non-existent (or non-owned) memory.)
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun     Note that the `foot' of the current chunk is actually represented
296*4882a593Smuzhiyun     as the prev_size of the NEXT chunk. (This makes it easier to
297*4882a593Smuzhiyun     deal with alignments etc).
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun     The two exceptions to all this are
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun      1. The special chunk `top', which doesn't bother using the
302*4882a593Smuzhiyun 	trailing size field since there is no
303*4882a593Smuzhiyun 	next contiguous chunk that would have to index off it. (After
304*4882a593Smuzhiyun 	initialization, `top' is forced to always exist.  If it would
305*4882a593Smuzhiyun 	become less than MINSIZE bytes long, it is replenished via
306*4882a593Smuzhiyun 	malloc_extend_top.)
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun      2. Chunks allocated via mmap, which have the second-lowest-order
309*4882a593Smuzhiyun 	bit (IS_MMAPPED) set in their size fields.  Because they are
310*4882a593Smuzhiyun 	never merged or traversed from any other chunk, they have no
311*4882a593Smuzhiyun 	foot size or inuse information.
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun     Available chunks are kept in any of several places (all declared below):
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun     * `av': An array of chunks serving as bin headers for consolidated
316*4882a593Smuzhiyun        chunks. Each bin is doubly linked.  The bins are approximately
317*4882a593Smuzhiyun        proportionally (log) spaced.  There are a lot of these bins
318*4882a593Smuzhiyun        (128). This may look excessive, but works very well in
319*4882a593Smuzhiyun        practice.  All procedures maintain the invariant that no
320*4882a593Smuzhiyun        consolidated chunk physically borders another one. Chunks in
321*4882a593Smuzhiyun        bins are kept in size order, with ties going to the
322*4882a593Smuzhiyun        approximately least recently used chunk.
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun        The chunks in each bin are maintained in decreasing sorted order by
325*4882a593Smuzhiyun        size.  This is irrelevant for the small bins, which all contain
326*4882a593Smuzhiyun        the same-sized chunks, but facilitates best-fit allocation for
327*4882a593Smuzhiyun        larger chunks. (These lists are just sequential. Keeping them in
328*4882a593Smuzhiyun        order almost never requires enough traversal to warrant using
329*4882a593Smuzhiyun        fancier ordered data structures.)  Chunks of the same size are
330*4882a593Smuzhiyun        linked with the most recently freed at the front, and allocations
331*4882a593Smuzhiyun        are taken from the back.  This results in LRU or FIFO allocation
332*4882a593Smuzhiyun        order, which tends to give each chunk an equal opportunity to be
333*4882a593Smuzhiyun        consolidated with adjacent freed chunks, resulting in larger free
334*4882a593Smuzhiyun        chunks and less fragmentation.
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun     * `top': The top-most available chunk (i.e., the one bordering the
337*4882a593Smuzhiyun        end of available memory) is treated specially. It is never
338*4882a593Smuzhiyun        included in any bin, is used only if no other chunk is
339*4882a593Smuzhiyun        available, and is released back to the system if it is very
340*4882a593Smuzhiyun        large (see M_TRIM_THRESHOLD).
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun     * `last_remainder': A bin holding only the remainder of the
343*4882a593Smuzhiyun        most recently split (non-top) chunk. This bin is checked
344*4882a593Smuzhiyun        before other non-fitting chunks, so as to provide better
345*4882a593Smuzhiyun        locality for runs of sequentially allocated chunks.
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun     *  Implicitly, through the host system's memory mapping tables.
348*4882a593Smuzhiyun        If supported, requests greater than a threshold are usually
349*4882a593Smuzhiyun        serviced via calls to mmap, and then later released via munmap.
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun */
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /*  sizes, alignments */
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
356*4882a593Smuzhiyun #define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
357*4882a593Smuzhiyun #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
358*4882a593Smuzhiyun #define MINSIZE                (sizeof(struct malloc_chunk))
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* conversion from malloc headers to user pointers, and back */
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
363*4882a593Smuzhiyun #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /* pad request bytes into a usable size */
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #define request2size(req) \
368*4882a593Smuzhiyun  (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
369*4882a593Smuzhiyun   (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
370*4882a593Smuzhiyun    (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun /* Check if m has acceptable alignment */
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun #define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun   Physical chunk operations
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun #define PREV_INUSE 0x1
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun #define IS_MMAPPED 0x2
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun /* Bits to mask off when extracting size */
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun /* Ptr to next physical malloc_chunk. */
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /* Ptr to previous physical malloc_chunk */
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun #define prev_chunk(p)\
404*4882a593Smuzhiyun    ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /* Treat space at ptr + offset as a chunk */
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun   Dealing with use bits
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /* extract p's inuse bit */
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun #define inuse(p)\
421*4882a593Smuzhiyun ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /* extract inuse bit of previous chunk */
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun #define prev_inuse(p)  ((p)->size & PREV_INUSE)
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun /* check for mmap()'ed chunk */
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /* set/clear chunk as in use without otherwise disturbing */
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #define set_inuse(p)\
434*4882a593Smuzhiyun ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun #define clear_inuse(p)\
437*4882a593Smuzhiyun ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun /* check/set/clear inuse bits in known places */
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun #define inuse_bit_at_offset(p, s)\
442*4882a593Smuzhiyun  (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun #define set_inuse_bit_at_offset(p, s)\
445*4882a593Smuzhiyun  (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun #define clear_inuse_bit_at_offset(p, s)\
448*4882a593Smuzhiyun  (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun /*
454*4882a593Smuzhiyun   Dealing with size fields
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /* Get size, ignoring use bits */
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun #define chunksize(p)          ((p)->size & ~(SIZE_BITS))
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /* Set size at head, without disturbing its use bit */
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun #define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun /* Set size/use ignoring previous bits in header */
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun #define set_head(p, s)        ((p)->size = (s))
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /* Set size at footer (only when chunk is not in use) */
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun #define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun    Bins
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun     The bins, `av_' are an array of pairs of pointers serving as the
481*4882a593Smuzhiyun     heads of (initially empty) doubly-linked lists of chunks, laid out
482*4882a593Smuzhiyun     in a way so that each pair can be treated as if it were in a
483*4882a593Smuzhiyun     malloc_chunk. (This way, the fd/bk offsets for linking bin heads
484*4882a593Smuzhiyun     and chunks are the same).
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
487*4882a593Smuzhiyun     8 bytes apart. Larger bins are approximately logarithmically
488*4882a593Smuzhiyun     spaced. (See the table below.) The `av_' array is never mentioned
489*4882a593Smuzhiyun     directly in the code, but instead via bin access macros.
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun     Bin layout:
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun     64 bins of size       8
494*4882a593Smuzhiyun     32 bins of size      64
495*4882a593Smuzhiyun     16 bins of size     512
496*4882a593Smuzhiyun      8 bins of size    4096
497*4882a593Smuzhiyun      4 bins of size   32768
498*4882a593Smuzhiyun      2 bins of size  262144
499*4882a593Smuzhiyun      1 bin  of size what's left
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun     There is actually a little bit of slop in the numbers in bin_index
502*4882a593Smuzhiyun     for the sake of speed. This makes no difference elsewhere.
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun     The special chunks `top' and `last_remainder' get their own bins,
505*4882a593Smuzhiyun     (this is implemented via yet more trickery with the av_ array),
506*4882a593Smuzhiyun     although `top' is never properly linked to its bin since it is
507*4882a593Smuzhiyun     always handled specially.
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun #define NAV             128   /* number of bins */
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun typedef struct malloc_chunk* mbinptr;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun /* access macros */
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun #define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
518*4882a593Smuzhiyun #define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
519*4882a593Smuzhiyun #define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun /*
522*4882a593Smuzhiyun    The first 2 bins are never indexed. The corresponding av_ cells are instead
523*4882a593Smuzhiyun    used for bookkeeping. This is not to save space, but to simplify
524*4882a593Smuzhiyun    indexing, maintain locality, and avoid some initialization tests.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun #define top            (av_[2])          /* The topmost chunk */
528*4882a593Smuzhiyun #define last_remainder (bin_at(1))       /* remainder from last split */
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun /*
532*4882a593Smuzhiyun    Because top initially points to its own bin with initial
533*4882a593Smuzhiyun    zero size, thus forcing extension on the first malloc request,
534*4882a593Smuzhiyun    we avoid having any special code in malloc to check whether
535*4882a593Smuzhiyun    it even exists yet. But we still need to in malloc_extend_top.
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun #define initial_top    ((mchunkptr)(bin_at(0)))
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /* Helper macro to initialize bins */
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun #define IAV(i)  bin_at(i), bin_at(i)
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun static mbinptr av_[NAV * 2 + 2] = {
545*4882a593Smuzhiyun  NULL, NULL,
546*4882a593Smuzhiyun  IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
547*4882a593Smuzhiyun  IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
548*4882a593Smuzhiyun  IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
549*4882a593Smuzhiyun  IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
550*4882a593Smuzhiyun  IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
551*4882a593Smuzhiyun  IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
552*4882a593Smuzhiyun  IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
553*4882a593Smuzhiyun  IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
554*4882a593Smuzhiyun  IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
555*4882a593Smuzhiyun  IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
556*4882a593Smuzhiyun  IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
557*4882a593Smuzhiyun  IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
558*4882a593Smuzhiyun  IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
559*4882a593Smuzhiyun  IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
560*4882a593Smuzhiyun  IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
561*4882a593Smuzhiyun  IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
562*4882a593Smuzhiyun };
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun #ifdef CONFIG_NEEDS_MANUAL_RELOC
malloc_bin_reloc(void)565*4882a593Smuzhiyun static void malloc_bin_reloc(void)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	mbinptr *p = &av_[2];
568*4882a593Smuzhiyun 	size_t i;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p)
571*4882a593Smuzhiyun 		*p = (mbinptr)((ulong)*p + gd->reloc_off);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun #else
malloc_bin_reloc(void)574*4882a593Smuzhiyun static inline void malloc_bin_reloc(void) {}
575*4882a593Smuzhiyun #endif
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun ulong mem_malloc_start = 0;
578*4882a593Smuzhiyun ulong mem_malloc_end = 0;
579*4882a593Smuzhiyun ulong mem_malloc_brk = 0;
580*4882a593Smuzhiyun 
sbrk(ptrdiff_t increment)581*4882a593Smuzhiyun void *sbrk(ptrdiff_t increment)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	ulong old = mem_malloc_brk;
584*4882a593Smuzhiyun 	ulong new = old + increment;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	/*
587*4882a593Smuzhiyun 	 * if we are giving memory back make sure we clear it out since
588*4882a593Smuzhiyun 	 * we set MORECORE_CLEARS to 1
589*4882a593Smuzhiyun 	 */
590*4882a593Smuzhiyun 	if (increment < 0)
591*4882a593Smuzhiyun 		memset((void *)new, 0, -increment);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if ((new < mem_malloc_start) || (new > mem_malloc_end))
594*4882a593Smuzhiyun 		return (void *)MORECORE_FAILURE;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	mem_malloc_brk = new;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return (void *)old;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
mem_malloc_init(ulong start,ulong size)601*4882a593Smuzhiyun void mem_malloc_init(ulong start, ulong size)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	mem_malloc_start = start;
604*4882a593Smuzhiyun 	mem_malloc_end = start + size;
605*4882a593Smuzhiyun 	mem_malloc_brk = start;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
608*4882a593Smuzhiyun 	      mem_malloc_end);
609*4882a593Smuzhiyun #ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
610*4882a593Smuzhiyun 	memset((void *)mem_malloc_start, 0x0, size);
611*4882a593Smuzhiyun #endif
612*4882a593Smuzhiyun 	malloc_bin_reloc();
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun /* field-extraction macros */
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun #define first(b) ((b)->fd)
618*4882a593Smuzhiyun #define last(b)  ((b)->bk)
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun   Indexing into bins
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun #define bin_index(sz)                                                          \
625*4882a593Smuzhiyun (((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
626*4882a593Smuzhiyun  ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
627*4882a593Smuzhiyun  ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
628*4882a593Smuzhiyun  ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
629*4882a593Smuzhiyun  ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
630*4882a593Smuzhiyun  ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
631*4882a593Smuzhiyun 					  126)
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun   bins for chunks < 512 are all spaced 8 bytes apart, and hold
634*4882a593Smuzhiyun   identically sized chunks. This is exploited in malloc.
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun #define MAX_SMALLBIN         63
638*4882a593Smuzhiyun #define MAX_SMALLBIN_SIZE   512
639*4882a593Smuzhiyun #define SMALLBIN_WIDTH        8
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun #define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun    Requests are `small' if both the corresponding and the next bin are small
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun     To help compensate for the large number of bins, a one-level index
653*4882a593Smuzhiyun     structure is used for bin-by-bin searching.  `binblocks' is a
654*4882a593Smuzhiyun     one-word bitvector recording whether groups of BINBLOCKWIDTH bins
655*4882a593Smuzhiyun     have any (possibly) non-empty bins, so they can be skipped over
656*4882a593Smuzhiyun     all at once during during traversals. The bits are NOT always
657*4882a593Smuzhiyun     cleared as soon as all bins in a block are empty, but instead only
658*4882a593Smuzhiyun     when all are noticed to be empty during traversal in malloc.
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun #define BINBLOCKWIDTH     4   /* bins per block */
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun #define binblocks_r     ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
664*4882a593Smuzhiyun #define binblocks_w     (av_[1])
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /* bin<->block macros */
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun #define idx2binblock(ix)    ((unsigned)1 << (ix / BINBLOCKWIDTH))
669*4882a593Smuzhiyun #define mark_binblock(ii)   (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
670*4882a593Smuzhiyun #define clear_binblock(ii)  (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /*  Other static bookkeeping data */
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun /* variables holding tunable values */
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun static unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
681*4882a593Smuzhiyun static unsigned long top_pad          = DEFAULT_TOP_PAD;
682*4882a593Smuzhiyun static unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
683*4882a593Smuzhiyun static unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun /* The first value returned from sbrk */
686*4882a593Smuzhiyun static char* sbrk_base = (char*)(-1);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun /* The maximum memory obtained from system via sbrk */
689*4882a593Smuzhiyun static unsigned long max_sbrked_mem = 0;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun /* The maximum via either sbrk or mmap */
692*4882a593Smuzhiyun static unsigned long max_total_mem = 0;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun /* internal working copy of mallinfo */
695*4882a593Smuzhiyun static struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun /* The total memory obtained from system via sbrk */
698*4882a593Smuzhiyun #define sbrked_mem  (current_mallinfo.arena)
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun /* Tracking mmaps */
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun #ifdef DEBUG
703*4882a593Smuzhiyun static unsigned int n_mmaps = 0;
704*4882a593Smuzhiyun #endif	/* DEBUG */
705*4882a593Smuzhiyun static unsigned long mmapped_mem = 0;
706*4882a593Smuzhiyun #if HAVE_MMAP
707*4882a593Smuzhiyun static unsigned int max_n_mmaps = 0;
708*4882a593Smuzhiyun static unsigned long max_mmapped_mem = 0;
709*4882a593Smuzhiyun #endif
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun   Debugging support
715*4882a593Smuzhiyun */
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun #ifdef DEBUG
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun   These routines make a number of assertions about the states
722*4882a593Smuzhiyun   of data structures that should be true at all times. If any
723*4882a593Smuzhiyun   are not true, it's very likely that a user program has somehow
724*4882a593Smuzhiyun   trashed memory. (It's also possible that there is a coding error
725*4882a593Smuzhiyun   in malloc. In which case, please report it!)
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun #if __STD_C
do_check_chunk(mchunkptr p)729*4882a593Smuzhiyun static void do_check_chunk(mchunkptr p)
730*4882a593Smuzhiyun #else
731*4882a593Smuzhiyun static void do_check_chunk(p) mchunkptr p;
732*4882a593Smuzhiyun #endif
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun   /* No checkable chunk is mmapped */
737*4882a593Smuzhiyun   assert(!chunk_is_mmapped(p));
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun   /* Check for legal address ... */
740*4882a593Smuzhiyun   assert((char*)p >= sbrk_base);
741*4882a593Smuzhiyun   if (p != top)
742*4882a593Smuzhiyun     assert((char*)p + sz <= (char*)top);
743*4882a593Smuzhiyun   else
744*4882a593Smuzhiyun     assert((char*)p + sz <= sbrk_base + sbrked_mem);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun #if __STD_C
do_check_free_chunk(mchunkptr p)750*4882a593Smuzhiyun static void do_check_free_chunk(mchunkptr p)
751*4882a593Smuzhiyun #else
752*4882a593Smuzhiyun static void do_check_free_chunk(p) mchunkptr p;
753*4882a593Smuzhiyun #endif
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
756*4882a593Smuzhiyun   mchunkptr next = chunk_at_offset(p, sz);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun   do_check_chunk(p);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun   /* Check whether it claims to be free ... */
761*4882a593Smuzhiyun   assert(!inuse(p));
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun   /* Unless a special marker, must have OK fields */
764*4882a593Smuzhiyun   if ((long)sz >= (long)MINSIZE)
765*4882a593Smuzhiyun   {
766*4882a593Smuzhiyun     assert((sz & MALLOC_ALIGN_MASK) == 0);
767*4882a593Smuzhiyun     assert(aligned_OK(chunk2mem(p)));
768*4882a593Smuzhiyun     /* ... matching footer field */
769*4882a593Smuzhiyun     assert(next->prev_size == sz);
770*4882a593Smuzhiyun     /* ... and is fully consolidated */
771*4882a593Smuzhiyun     assert(prev_inuse(p));
772*4882a593Smuzhiyun     assert (next == top || inuse(next));
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun     /* ... and has minimally sane links */
775*4882a593Smuzhiyun     assert(p->fd->bk == p);
776*4882a593Smuzhiyun     assert(p->bk->fd == p);
777*4882a593Smuzhiyun   }
778*4882a593Smuzhiyun   else /* markers are always of size SIZE_SZ */
779*4882a593Smuzhiyun     assert(sz == SIZE_SZ);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun #if __STD_C
do_check_inuse_chunk(mchunkptr p)783*4882a593Smuzhiyun static void do_check_inuse_chunk(mchunkptr p)
784*4882a593Smuzhiyun #else
785*4882a593Smuzhiyun static void do_check_inuse_chunk(p) mchunkptr p;
786*4882a593Smuzhiyun #endif
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun   mchunkptr next = next_chunk(p);
789*4882a593Smuzhiyun   do_check_chunk(p);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun   /* Check whether it claims to be in use ... */
792*4882a593Smuzhiyun   assert(inuse(p));
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun   /* ... and is surrounded by OK chunks.
795*4882a593Smuzhiyun     Since more things can be checked with free chunks than inuse ones,
796*4882a593Smuzhiyun     if an inuse chunk borders them and debug is on, it's worth doing them.
797*4882a593Smuzhiyun   */
798*4882a593Smuzhiyun   if (!prev_inuse(p))
799*4882a593Smuzhiyun   {
800*4882a593Smuzhiyun     mchunkptr prv = prev_chunk(p);
801*4882a593Smuzhiyun     assert(next_chunk(prv) == p);
802*4882a593Smuzhiyun     do_check_free_chunk(prv);
803*4882a593Smuzhiyun   }
804*4882a593Smuzhiyun   if (next == top)
805*4882a593Smuzhiyun   {
806*4882a593Smuzhiyun     assert(prev_inuse(next));
807*4882a593Smuzhiyun     assert(chunksize(next) >= MINSIZE);
808*4882a593Smuzhiyun   }
809*4882a593Smuzhiyun   else if (!inuse(next))
810*4882a593Smuzhiyun     do_check_free_chunk(next);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun #if __STD_C
do_check_malloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)815*4882a593Smuzhiyun static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
816*4882a593Smuzhiyun #else
817*4882a593Smuzhiyun static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
818*4882a593Smuzhiyun #endif
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun   INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
821*4882a593Smuzhiyun   long room = sz - s;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun   do_check_inuse_chunk(p);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun   /* Legal size ... */
826*4882a593Smuzhiyun   assert((long)sz >= (long)MINSIZE);
827*4882a593Smuzhiyun   assert((sz & MALLOC_ALIGN_MASK) == 0);
828*4882a593Smuzhiyun   assert(room >= 0);
829*4882a593Smuzhiyun   assert(room < (long)MINSIZE);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun   /* ... and alignment */
832*4882a593Smuzhiyun   assert(aligned_OK(chunk2mem(p)));
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun   /* ... and was allocated at front of an available chunk */
836*4882a593Smuzhiyun   assert(prev_inuse(p));
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun #define check_free_chunk(P)  do_check_free_chunk(P)
842*4882a593Smuzhiyun #define check_inuse_chunk(P) do_check_inuse_chunk(P)
843*4882a593Smuzhiyun #define check_chunk(P) do_check_chunk(P)
844*4882a593Smuzhiyun #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
845*4882a593Smuzhiyun #else
846*4882a593Smuzhiyun #define check_free_chunk(P)
847*4882a593Smuzhiyun #define check_inuse_chunk(P)
848*4882a593Smuzhiyun #define check_chunk(P)
849*4882a593Smuzhiyun #define check_malloced_chunk(P,N)
850*4882a593Smuzhiyun #endif
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun /*
855*4882a593Smuzhiyun   Macro-based internal utilities
856*4882a593Smuzhiyun */
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun   Linking chunks in bin lists.
861*4882a593Smuzhiyun   Call these only with variables, not arbitrary expressions, as arguments.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun /*
865*4882a593Smuzhiyun   Place chunk p of size s in its bin, in size order,
866*4882a593Smuzhiyun   putting it ahead of others of same size.
867*4882a593Smuzhiyun */
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun #define frontlink(P, S, IDX, BK, FD)                                          \
871*4882a593Smuzhiyun {                                                                             \
872*4882a593Smuzhiyun   if (S < MAX_SMALLBIN_SIZE)                                                  \
873*4882a593Smuzhiyun   {                                                                           \
874*4882a593Smuzhiyun     IDX = smallbin_index(S);                                                  \
875*4882a593Smuzhiyun     mark_binblock(IDX);                                                       \
876*4882a593Smuzhiyun     BK = bin_at(IDX);                                                         \
877*4882a593Smuzhiyun     FD = BK->fd;                                                              \
878*4882a593Smuzhiyun     P->bk = BK;                                                               \
879*4882a593Smuzhiyun     P->fd = FD;                                                               \
880*4882a593Smuzhiyun     FD->bk = BK->fd = P;                                                      \
881*4882a593Smuzhiyun   }                                                                           \
882*4882a593Smuzhiyun   else                                                                        \
883*4882a593Smuzhiyun   {                                                                           \
884*4882a593Smuzhiyun     IDX = bin_index(S);                                                       \
885*4882a593Smuzhiyun     BK = bin_at(IDX);                                                         \
886*4882a593Smuzhiyun     FD = BK->fd;                                                              \
887*4882a593Smuzhiyun     if (FD == BK) mark_binblock(IDX);                                         \
888*4882a593Smuzhiyun     else                                                                      \
889*4882a593Smuzhiyun     {                                                                         \
890*4882a593Smuzhiyun       while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
891*4882a593Smuzhiyun       BK = FD->bk;                                                            \
892*4882a593Smuzhiyun     }                                                                         \
893*4882a593Smuzhiyun     P->bk = BK;                                                               \
894*4882a593Smuzhiyun     P->fd = FD;                                                               \
895*4882a593Smuzhiyun     FD->bk = BK->fd = P;                                                      \
896*4882a593Smuzhiyun   }                                                                           \
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun /* take a chunk off a list */
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun #define unlink(P, BK, FD)                                                     \
903*4882a593Smuzhiyun {                                                                             \
904*4882a593Smuzhiyun   BK = P->bk;                                                                 \
905*4882a593Smuzhiyun   FD = P->fd;                                                                 \
906*4882a593Smuzhiyun   FD->bk = BK;                                                                \
907*4882a593Smuzhiyun   BK->fd = FD;                                                                \
908*4882a593Smuzhiyun }                                                                             \
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun /* Place p as the last remainder */
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun #define link_last_remainder(P)                                                \
913*4882a593Smuzhiyun {                                                                             \
914*4882a593Smuzhiyun   last_remainder->fd = last_remainder->bk =  P;                               \
915*4882a593Smuzhiyun   P->fd = P->bk = last_remainder;                                             \
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun /* Clear the last_remainder bin */
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun #define clear_last_remainder \
921*4882a593Smuzhiyun   (last_remainder->fd = last_remainder->bk = last_remainder)
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun /* Routines dealing with mmap(). */
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun #if HAVE_MMAP
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun #if __STD_C
mmap_chunk(size_t size)932*4882a593Smuzhiyun static mchunkptr mmap_chunk(size_t size)
933*4882a593Smuzhiyun #else
934*4882a593Smuzhiyun static mchunkptr mmap_chunk(size) size_t size;
935*4882a593Smuzhiyun #endif
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun   size_t page_mask = malloc_getpagesize - 1;
938*4882a593Smuzhiyun   mchunkptr p;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun #ifndef MAP_ANONYMOUS
941*4882a593Smuzhiyun   static int fd = -1;
942*4882a593Smuzhiyun #endif
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun   if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun   /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
947*4882a593Smuzhiyun    * there is no following chunk whose prev_size field could be used.
948*4882a593Smuzhiyun    */
949*4882a593Smuzhiyun   size = (size + SIZE_SZ + page_mask) & ~page_mask;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun #ifdef MAP_ANONYMOUS
952*4882a593Smuzhiyun   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
953*4882a593Smuzhiyun 		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
954*4882a593Smuzhiyun #else /* !MAP_ANONYMOUS */
955*4882a593Smuzhiyun   if (fd < 0)
956*4882a593Smuzhiyun   {
957*4882a593Smuzhiyun     fd = open("/dev/zero", O_RDWR);
958*4882a593Smuzhiyun     if(fd < 0) return 0;
959*4882a593Smuzhiyun   }
960*4882a593Smuzhiyun   p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
961*4882a593Smuzhiyun #endif
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun   if(p == (mchunkptr)-1) return 0;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun   n_mmaps++;
966*4882a593Smuzhiyun   if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun   /* We demand that eight bytes into a page must be 8-byte aligned. */
969*4882a593Smuzhiyun   assert(aligned_OK(chunk2mem(p)));
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun   /* The offset to the start of the mmapped region is stored
972*4882a593Smuzhiyun    * in the prev_size field of the chunk; normally it is zero,
973*4882a593Smuzhiyun    * but that can be changed in memalign().
974*4882a593Smuzhiyun    */
975*4882a593Smuzhiyun   p->prev_size = 0;
976*4882a593Smuzhiyun   set_head(p, size|IS_MMAPPED);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun   mmapped_mem += size;
979*4882a593Smuzhiyun   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
980*4882a593Smuzhiyun     max_mmapped_mem = mmapped_mem;
981*4882a593Smuzhiyun   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
982*4882a593Smuzhiyun     max_total_mem = mmapped_mem + sbrked_mem;
983*4882a593Smuzhiyun   return p;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun #if __STD_C
munmap_chunk(mchunkptr p)987*4882a593Smuzhiyun static void munmap_chunk(mchunkptr p)
988*4882a593Smuzhiyun #else
989*4882a593Smuzhiyun static void munmap_chunk(p) mchunkptr p;
990*4882a593Smuzhiyun #endif
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun   INTERNAL_SIZE_T size = chunksize(p);
993*4882a593Smuzhiyun   int ret;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun   assert (chunk_is_mmapped(p));
996*4882a593Smuzhiyun   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
997*4882a593Smuzhiyun   assert((n_mmaps > 0));
998*4882a593Smuzhiyun   assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun   n_mmaps--;
1001*4882a593Smuzhiyun   mmapped_mem -= (size + p->prev_size);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun   ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun   /* munmap returns non-zero on failure */
1006*4882a593Smuzhiyun   assert(ret == 0);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun #if HAVE_MREMAP
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun #if __STD_C
mremap_chunk(mchunkptr p,size_t new_size)1012*4882a593Smuzhiyun static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1013*4882a593Smuzhiyun #else
1014*4882a593Smuzhiyun static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1015*4882a593Smuzhiyun #endif
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun   size_t page_mask = malloc_getpagesize - 1;
1018*4882a593Smuzhiyun   INTERNAL_SIZE_T offset = p->prev_size;
1019*4882a593Smuzhiyun   INTERNAL_SIZE_T size = chunksize(p);
1020*4882a593Smuzhiyun   char *cp;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun   assert (chunk_is_mmapped(p));
1023*4882a593Smuzhiyun   assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1024*4882a593Smuzhiyun   assert((n_mmaps > 0));
1025*4882a593Smuzhiyun   assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1028*4882a593Smuzhiyun   new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun   cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun   if (cp == (char *)-1) return 0;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun   p = (mchunkptr)(cp + offset);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun   assert(aligned_OK(chunk2mem(p)));
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun   assert((p->prev_size == offset));
1039*4882a593Smuzhiyun   set_head(p, (new_size - offset)|IS_MMAPPED);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun   mmapped_mem -= size + offset;
1042*4882a593Smuzhiyun   mmapped_mem += new_size;
1043*4882a593Smuzhiyun   if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1044*4882a593Smuzhiyun     max_mmapped_mem = mmapped_mem;
1045*4882a593Smuzhiyun   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1046*4882a593Smuzhiyun     max_total_mem = mmapped_mem + sbrked_mem;
1047*4882a593Smuzhiyun   return p;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun #endif /* HAVE_MREMAP */
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun #endif /* HAVE_MMAP */
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /*
1058*4882a593Smuzhiyun   Extend the top-most chunk by obtaining memory from system.
1059*4882a593Smuzhiyun   Main interface to sbrk (but see also malloc_trim).
1060*4882a593Smuzhiyun */
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun #if __STD_C
malloc_extend_top(INTERNAL_SIZE_T nb)1063*4882a593Smuzhiyun static void malloc_extend_top(INTERNAL_SIZE_T nb)
1064*4882a593Smuzhiyun #else
1065*4882a593Smuzhiyun static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1066*4882a593Smuzhiyun #endif
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun   char*     brk;                  /* return value from sbrk */
1069*4882a593Smuzhiyun   INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1070*4882a593Smuzhiyun   INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
1071*4882a593Smuzhiyun   char*     new_brk;              /* return of 2nd sbrk call */
1072*4882a593Smuzhiyun   INTERNAL_SIZE_T top_size;       /* new size of top chunk */
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun   mchunkptr old_top     = top;  /* Record state of old top */
1075*4882a593Smuzhiyun   INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1076*4882a593Smuzhiyun   char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun   /* Pad request with top_pad plus minimal overhead */
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun   INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
1081*4882a593Smuzhiyun   unsigned long pagesz    = malloc_getpagesize;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun   /* If not the first time through, round to preserve page boundary */
1084*4882a593Smuzhiyun   /* Otherwise, we need to correct to a page size below anyway. */
1085*4882a593Smuzhiyun   /* (We also correct below if an intervening foreign sbrk call.) */
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun   if (sbrk_base != (char*)(-1))
1088*4882a593Smuzhiyun     sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun   brk = (char*)(MORECORE (sbrk_size));
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun   /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1093*4882a593Smuzhiyun   if (brk == (char*)(MORECORE_FAILURE) ||
1094*4882a593Smuzhiyun       (brk < old_end && old_top != initial_top))
1095*4882a593Smuzhiyun     return;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun   sbrked_mem += sbrk_size;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun   if (brk == old_end) /* can just add bytes to current top */
1100*4882a593Smuzhiyun   {
1101*4882a593Smuzhiyun     top_size = sbrk_size + old_top_size;
1102*4882a593Smuzhiyun     set_head(top, top_size | PREV_INUSE);
1103*4882a593Smuzhiyun   }
1104*4882a593Smuzhiyun   else
1105*4882a593Smuzhiyun   {
1106*4882a593Smuzhiyun     if (sbrk_base == (char*)(-1))  /* First time through. Record base */
1107*4882a593Smuzhiyun       sbrk_base = brk;
1108*4882a593Smuzhiyun     else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
1109*4882a593Smuzhiyun       sbrked_mem += brk - (char*)old_end;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun     /* Guarantee alignment of first new chunk made from this space */
1112*4882a593Smuzhiyun     front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1113*4882a593Smuzhiyun     if (front_misalign > 0)
1114*4882a593Smuzhiyun     {
1115*4882a593Smuzhiyun       correction = (MALLOC_ALIGNMENT) - front_misalign;
1116*4882a593Smuzhiyun       brk += correction;
1117*4882a593Smuzhiyun     }
1118*4882a593Smuzhiyun     else
1119*4882a593Smuzhiyun       correction = 0;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun     /* Guarantee the next brk will be at a page boundary */
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun     correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
1124*4882a593Smuzhiyun 		   ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun     /* Allocate correction */
1127*4882a593Smuzhiyun     new_brk = (char*)(MORECORE (correction));
1128*4882a593Smuzhiyun     if (new_brk == (char*)(MORECORE_FAILURE)) return;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun     sbrked_mem += correction;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun     top = (mchunkptr)brk;
1133*4882a593Smuzhiyun     top_size = new_brk - brk + correction;
1134*4882a593Smuzhiyun     set_head(top, top_size | PREV_INUSE);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun     if (old_top != initial_top)
1137*4882a593Smuzhiyun     {
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun       /* There must have been an intervening foreign sbrk call. */
1140*4882a593Smuzhiyun       /* A double fencepost is necessary to prevent consolidation */
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun       /* If not enough space to do this, then user did something very wrong */
1143*4882a593Smuzhiyun       if (old_top_size < MINSIZE)
1144*4882a593Smuzhiyun       {
1145*4882a593Smuzhiyun 	set_head(top, PREV_INUSE); /* will force null return from malloc */
1146*4882a593Smuzhiyun 	return;
1147*4882a593Smuzhiyun       }
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun       /* Also keep size a multiple of MALLOC_ALIGNMENT */
1150*4882a593Smuzhiyun       old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1151*4882a593Smuzhiyun       set_head_size(old_top, old_top_size);
1152*4882a593Smuzhiyun       chunk_at_offset(old_top, old_top_size          )->size =
1153*4882a593Smuzhiyun 	SIZE_SZ|PREV_INUSE;
1154*4882a593Smuzhiyun       chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
1155*4882a593Smuzhiyun 	SIZE_SZ|PREV_INUSE;
1156*4882a593Smuzhiyun       /* If possible, release the rest. */
1157*4882a593Smuzhiyun       if (old_top_size >= MINSIZE)
1158*4882a593Smuzhiyun 	fREe(chunk2mem(old_top));
1159*4882a593Smuzhiyun     }
1160*4882a593Smuzhiyun   }
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun   if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1163*4882a593Smuzhiyun     max_sbrked_mem = sbrked_mem;
1164*4882a593Smuzhiyun   if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1165*4882a593Smuzhiyun     max_total_mem = mmapped_mem + sbrked_mem;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun   /* We always land on a page boundary */
1168*4882a593Smuzhiyun   assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun /* Main public routines */
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun /*
1178*4882a593Smuzhiyun   Malloc Algorthim:
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun     The requested size is first converted into a usable form, `nb'.
1181*4882a593Smuzhiyun     This currently means to add 4 bytes overhead plus possibly more to
1182*4882a593Smuzhiyun     obtain 8-byte alignment and/or to obtain a size of at least
1183*4882a593Smuzhiyun     MINSIZE (currently 16 bytes), the smallest allocatable size.
1184*4882a593Smuzhiyun     (All fits are considered `exact' if they are within MINSIZE bytes.)
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun     From there, the first successful of the following steps is taken:
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun       1. The bin corresponding to the request size is scanned, and if
1189*4882a593Smuzhiyun 	 a chunk of exactly the right size is found, it is taken.
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun       2. The most recently remaindered chunk is used if it is big
1192*4882a593Smuzhiyun 	 enough.  This is a form of (roving) first fit, used only in
1193*4882a593Smuzhiyun 	 the absence of exact fits. Runs of consecutive requests use
1194*4882a593Smuzhiyun 	 the remainder of the chunk used for the previous such request
1195*4882a593Smuzhiyun 	 whenever possible. This limited use of a first-fit style
1196*4882a593Smuzhiyun 	 allocation strategy tends to give contiguous chunks
1197*4882a593Smuzhiyun 	 coextensive lifetimes, which improves locality and can reduce
1198*4882a593Smuzhiyun 	 fragmentation in the long run.
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun       3. Other bins are scanned in increasing size order, using a
1201*4882a593Smuzhiyun 	 chunk big enough to fulfill the request, and splitting off
1202*4882a593Smuzhiyun 	 any remainder.  This search is strictly by best-fit; i.e.,
1203*4882a593Smuzhiyun 	 the smallest (with ties going to approximately the least
1204*4882a593Smuzhiyun 	 recently used) chunk that fits is selected.
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun       4. If large enough, the chunk bordering the end of memory
1207*4882a593Smuzhiyun 	 (`top') is split off. (This use of `top' is in accord with
1208*4882a593Smuzhiyun 	 the best-fit search rule.  In effect, `top' is treated as
1209*4882a593Smuzhiyun 	 larger (and thus less well fitting) than any other available
1210*4882a593Smuzhiyun 	 chunk since it can be extended to be as large as necessary
1211*4882a593Smuzhiyun 	 (up to system limitations).
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun       5. If the request size meets the mmap threshold and the
1214*4882a593Smuzhiyun 	 system supports mmap, and there are few enough currently
1215*4882a593Smuzhiyun 	 allocated mmapped regions, and a call to mmap succeeds,
1216*4882a593Smuzhiyun 	 the request is allocated via direct memory mapping.
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun       6. Otherwise, the top of memory is extended by
1219*4882a593Smuzhiyun 	 obtaining more space from the system (normally using sbrk,
1220*4882a593Smuzhiyun 	 but definable to anything else via the MORECORE macro).
1221*4882a593Smuzhiyun 	 Memory is gathered from the system (in system page-sized
1222*4882a593Smuzhiyun 	 units) in a way that allows chunks obtained across different
1223*4882a593Smuzhiyun 	 sbrk calls to be consolidated, but does not require
1224*4882a593Smuzhiyun 	 contiguous memory. Thus, it should be safe to intersperse
1225*4882a593Smuzhiyun 	 mallocs with other sbrk calls.
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun       All allocations are made from the the `lowest' part of any found
1229*4882a593Smuzhiyun       chunk. (The implementation invariant is that prev_inuse is
1230*4882a593Smuzhiyun       always true of any allocated chunk; i.e., that each allocated
1231*4882a593Smuzhiyun       chunk borders either a previously allocated and still in-use chunk,
1232*4882a593Smuzhiyun       or the base of its memory arena.)
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun */
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun #if __STD_C
mALLOc(size_t bytes)1237*4882a593Smuzhiyun Void_t* mALLOc(size_t bytes)
1238*4882a593Smuzhiyun #else
1239*4882a593Smuzhiyun Void_t* mALLOc(bytes) size_t bytes;
1240*4882a593Smuzhiyun #endif
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun   mchunkptr victim;                  /* inspected/selected chunk */
1243*4882a593Smuzhiyun   INTERNAL_SIZE_T victim_size;       /* its size */
1244*4882a593Smuzhiyun   int       idx;                     /* index for bin traversal */
1245*4882a593Smuzhiyun   mbinptr   bin;                     /* associated bin */
1246*4882a593Smuzhiyun   mchunkptr remainder;               /* remainder from a split */
1247*4882a593Smuzhiyun   long      remainder_size;          /* its size */
1248*4882a593Smuzhiyun   int       remainder_index;         /* its bin index */
1249*4882a593Smuzhiyun   unsigned long block;               /* block traverser bit */
1250*4882a593Smuzhiyun   int       startidx;                /* first bin of a traversed block */
1251*4882a593Smuzhiyun   mchunkptr fwd;                     /* misc temp for linking */
1252*4882a593Smuzhiyun   mchunkptr bck;                     /* misc temp for linking */
1253*4882a593Smuzhiyun   mbinptr q;                         /* misc temp */
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun   INTERNAL_SIZE_T nb;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1258*4882a593Smuzhiyun 	if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1259*4882a593Smuzhiyun 		return malloc_simple(bytes);
1260*4882a593Smuzhiyun #endif
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun   /* check if mem_malloc_init() was run */
1263*4882a593Smuzhiyun   if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1264*4882a593Smuzhiyun     /* not initialized yet */
1265*4882a593Smuzhiyun     return NULL;
1266*4882a593Smuzhiyun   }
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun   if ((long)bytes < 0) return NULL;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun   nb = request2size(bytes);  /* padded request size; */
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun   /* Check for exact match in a bin */
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun   if (is_small_request(nb))  /* Faster version for small requests */
1275*4882a593Smuzhiyun   {
1276*4882a593Smuzhiyun     idx = smallbin_index(nb);
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun     /* No traversal or size check necessary for small bins.  */
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun     q = bin_at(idx);
1281*4882a593Smuzhiyun     victim = last(q);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun     /* Also scan the next one, since it would have a remainder < MINSIZE */
1284*4882a593Smuzhiyun     if (victim == q)
1285*4882a593Smuzhiyun     {
1286*4882a593Smuzhiyun       q = next_bin(q);
1287*4882a593Smuzhiyun       victim = last(q);
1288*4882a593Smuzhiyun     }
1289*4882a593Smuzhiyun     if (victim != q)
1290*4882a593Smuzhiyun     {
1291*4882a593Smuzhiyun       victim_size = chunksize(victim);
1292*4882a593Smuzhiyun       unlink(victim, bck, fwd);
1293*4882a593Smuzhiyun       set_inuse_bit_at_offset(victim, victim_size);
1294*4882a593Smuzhiyun       check_malloced_chunk(victim, nb);
1295*4882a593Smuzhiyun       return chunk2mem(victim);
1296*4882a593Smuzhiyun     }
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun     idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun   }
1301*4882a593Smuzhiyun   else
1302*4882a593Smuzhiyun   {
1303*4882a593Smuzhiyun     idx = bin_index(nb);
1304*4882a593Smuzhiyun     bin = bin_at(idx);
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun     for (victim = last(bin); victim != bin; victim = victim->bk)
1307*4882a593Smuzhiyun     {
1308*4882a593Smuzhiyun       victim_size = chunksize(victim);
1309*4882a593Smuzhiyun       remainder_size = victim_size - nb;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun       if (remainder_size >= (long)MINSIZE) /* too big */
1312*4882a593Smuzhiyun       {
1313*4882a593Smuzhiyun 	--idx; /* adjust to rescan below after checking last remainder */
1314*4882a593Smuzhiyun 	break;
1315*4882a593Smuzhiyun       }
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun       else if (remainder_size >= 0) /* exact fit */
1318*4882a593Smuzhiyun       {
1319*4882a593Smuzhiyun 	unlink(victim, bck, fwd);
1320*4882a593Smuzhiyun 	set_inuse_bit_at_offset(victim, victim_size);
1321*4882a593Smuzhiyun 	check_malloced_chunk(victim, nb);
1322*4882a593Smuzhiyun 	return chunk2mem(victim);
1323*4882a593Smuzhiyun       }
1324*4882a593Smuzhiyun     }
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun     ++idx;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun   }
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun   /* Try to use the last split-off remainder */
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun   if ( (victim = last_remainder->fd) != last_remainder)
1333*4882a593Smuzhiyun   {
1334*4882a593Smuzhiyun     victim_size = chunksize(victim);
1335*4882a593Smuzhiyun     remainder_size = victim_size - nb;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun     if (remainder_size >= (long)MINSIZE) /* re-split */
1338*4882a593Smuzhiyun     {
1339*4882a593Smuzhiyun       remainder = chunk_at_offset(victim, nb);
1340*4882a593Smuzhiyun       set_head(victim, nb | PREV_INUSE);
1341*4882a593Smuzhiyun       link_last_remainder(remainder);
1342*4882a593Smuzhiyun       set_head(remainder, remainder_size | PREV_INUSE);
1343*4882a593Smuzhiyun       set_foot(remainder, remainder_size);
1344*4882a593Smuzhiyun       check_malloced_chunk(victim, nb);
1345*4882a593Smuzhiyun       return chunk2mem(victim);
1346*4882a593Smuzhiyun     }
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun     clear_last_remainder;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun     if (remainder_size >= 0)  /* exhaust */
1351*4882a593Smuzhiyun     {
1352*4882a593Smuzhiyun       set_inuse_bit_at_offset(victim, victim_size);
1353*4882a593Smuzhiyun       check_malloced_chunk(victim, nb);
1354*4882a593Smuzhiyun       return chunk2mem(victim);
1355*4882a593Smuzhiyun     }
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun     /* Else place in bin */
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun     frontlink(victim, victim_size, remainder_index, bck, fwd);
1360*4882a593Smuzhiyun   }
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun   /*
1363*4882a593Smuzhiyun      If there are any possibly nonempty big-enough blocks,
1364*4882a593Smuzhiyun      search for best fitting chunk by scanning bins in blockwidth units.
1365*4882a593Smuzhiyun   */
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun   if ( (block = idx2binblock(idx)) <= binblocks_r)
1368*4882a593Smuzhiyun   {
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun     /* Get to the first marked block */
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun     if ( (block & binblocks_r) == 0)
1373*4882a593Smuzhiyun     {
1374*4882a593Smuzhiyun       /* force to an even block boundary */
1375*4882a593Smuzhiyun       idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1376*4882a593Smuzhiyun       block <<= 1;
1377*4882a593Smuzhiyun       while ((block & binblocks_r) == 0)
1378*4882a593Smuzhiyun       {
1379*4882a593Smuzhiyun 	idx += BINBLOCKWIDTH;
1380*4882a593Smuzhiyun 	block <<= 1;
1381*4882a593Smuzhiyun       }
1382*4882a593Smuzhiyun     }
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun     /* For each possibly nonempty block ... */
1385*4882a593Smuzhiyun     for (;;)
1386*4882a593Smuzhiyun     {
1387*4882a593Smuzhiyun       startidx = idx;          /* (track incomplete blocks) */
1388*4882a593Smuzhiyun       q = bin = bin_at(idx);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun       /* For each bin in this block ... */
1391*4882a593Smuzhiyun       do
1392*4882a593Smuzhiyun       {
1393*4882a593Smuzhiyun 	/* Find and use first big enough chunk ... */
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	for (victim = last(bin); victim != bin; victim = victim->bk)
1396*4882a593Smuzhiyun 	{
1397*4882a593Smuzhiyun 	  victim_size = chunksize(victim);
1398*4882a593Smuzhiyun 	  remainder_size = victim_size - nb;
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	  if (remainder_size >= (long)MINSIZE) /* split */
1401*4882a593Smuzhiyun 	  {
1402*4882a593Smuzhiyun 	    remainder = chunk_at_offset(victim, nb);
1403*4882a593Smuzhiyun 	    set_head(victim, nb | PREV_INUSE);
1404*4882a593Smuzhiyun 	    unlink(victim, bck, fwd);
1405*4882a593Smuzhiyun 	    link_last_remainder(remainder);
1406*4882a593Smuzhiyun 	    set_head(remainder, remainder_size | PREV_INUSE);
1407*4882a593Smuzhiyun 	    set_foot(remainder, remainder_size);
1408*4882a593Smuzhiyun 	    check_malloced_chunk(victim, nb);
1409*4882a593Smuzhiyun 	    return chunk2mem(victim);
1410*4882a593Smuzhiyun 	  }
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	  else if (remainder_size >= 0)  /* take */
1413*4882a593Smuzhiyun 	  {
1414*4882a593Smuzhiyun 	    set_inuse_bit_at_offset(victim, victim_size);
1415*4882a593Smuzhiyun 	    unlink(victim, bck, fwd);
1416*4882a593Smuzhiyun 	    check_malloced_chunk(victim, nb);
1417*4882a593Smuzhiyun 	    return chunk2mem(victim);
1418*4882a593Smuzhiyun 	  }
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	}
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun        bin = next_bin(bin);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun       } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun       /* Clear out the block bit. */
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun       do   /* Possibly backtrack to try to clear a partial block */
1429*4882a593Smuzhiyun       {
1430*4882a593Smuzhiyun 	if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1431*4882a593Smuzhiyun 	{
1432*4882a593Smuzhiyun 	  av_[1] = (mbinptr)(binblocks_r & ~block);
1433*4882a593Smuzhiyun 	  break;
1434*4882a593Smuzhiyun 	}
1435*4882a593Smuzhiyun 	--startidx;
1436*4882a593Smuzhiyun        q = prev_bin(q);
1437*4882a593Smuzhiyun       } while (first(q) == q);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun       /* Get to the next possibly nonempty block */
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun       if ( (block <<= 1) <= binblocks_r && (block != 0) )
1442*4882a593Smuzhiyun       {
1443*4882a593Smuzhiyun 	while ((block & binblocks_r) == 0)
1444*4882a593Smuzhiyun 	{
1445*4882a593Smuzhiyun 	  idx += BINBLOCKWIDTH;
1446*4882a593Smuzhiyun 	  block <<= 1;
1447*4882a593Smuzhiyun 	}
1448*4882a593Smuzhiyun       }
1449*4882a593Smuzhiyun       else
1450*4882a593Smuzhiyun 	break;
1451*4882a593Smuzhiyun     }
1452*4882a593Smuzhiyun   }
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun   /* Try to use top chunk */
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun   /* Require that there be a remainder, ensuring top always exists  */
1458*4882a593Smuzhiyun   if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1459*4882a593Smuzhiyun   {
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun #if HAVE_MMAP
1462*4882a593Smuzhiyun     /* If big and would otherwise need to extend, try to use mmap instead */
1463*4882a593Smuzhiyun     if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
1464*4882a593Smuzhiyun 	(victim = mmap_chunk(nb)))
1465*4882a593Smuzhiyun       return chunk2mem(victim);
1466*4882a593Smuzhiyun #endif
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun     /* Try to extend */
1469*4882a593Smuzhiyun     malloc_extend_top(nb);
1470*4882a593Smuzhiyun     if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1471*4882a593Smuzhiyun       return NULL; /* propagate failure */
1472*4882a593Smuzhiyun   }
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun   victim = top;
1475*4882a593Smuzhiyun   set_head(victim, nb | PREV_INUSE);
1476*4882a593Smuzhiyun   top = chunk_at_offset(victim, nb);
1477*4882a593Smuzhiyun   set_head(top, remainder_size | PREV_INUSE);
1478*4882a593Smuzhiyun   check_malloced_chunk(victim, nb);
1479*4882a593Smuzhiyun   return chunk2mem(victim);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun /*
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun   free() algorithm :
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun     cases:
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun        1. free(0) has no effect.
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun        2. If the chunk was allocated via mmap, it is release via munmap().
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun        3. If a returned chunk borders the current high end of memory,
1497*4882a593Smuzhiyun 	  it is consolidated into the top, and if the total unused
1498*4882a593Smuzhiyun 	  topmost memory exceeds the trim threshold, malloc_trim is
1499*4882a593Smuzhiyun 	  called.
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun        4. Other chunks are consolidated as they arrive, and
1502*4882a593Smuzhiyun 	  placed in corresponding bins. (This includes the case of
1503*4882a593Smuzhiyun 	  consolidating with the current `last_remainder').
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun */
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun #if __STD_C
fREe(Void_t * mem)1509*4882a593Smuzhiyun void fREe(Void_t* mem)
1510*4882a593Smuzhiyun #else
1511*4882a593Smuzhiyun void fREe(mem) Void_t* mem;
1512*4882a593Smuzhiyun #endif
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun   mchunkptr p;         /* chunk corresponding to mem */
1515*4882a593Smuzhiyun   INTERNAL_SIZE_T hd;  /* its head field */
1516*4882a593Smuzhiyun   INTERNAL_SIZE_T sz;  /* its size */
1517*4882a593Smuzhiyun   int       idx;       /* its bin index */
1518*4882a593Smuzhiyun   mchunkptr next;      /* next contiguous chunk */
1519*4882a593Smuzhiyun   INTERNAL_SIZE_T nextsz; /* its size */
1520*4882a593Smuzhiyun   INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1521*4882a593Smuzhiyun   mchunkptr bck;       /* misc temp for linking */
1522*4882a593Smuzhiyun   mchunkptr fwd;       /* misc temp for linking */
1523*4882a593Smuzhiyun   int       islr;      /* track whether merging with last_remainder */
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1526*4882a593Smuzhiyun 	/* free() is a no-op - all the memory will be freed on relocation */
1527*4882a593Smuzhiyun 	if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1528*4882a593Smuzhiyun 		return;
1529*4882a593Smuzhiyun #endif
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun   if (mem == NULL)                              /* free(0) has no effect */
1532*4882a593Smuzhiyun     return;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun   p = mem2chunk(mem);
1535*4882a593Smuzhiyun   hd = p->size;
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun #if HAVE_MMAP
1538*4882a593Smuzhiyun   if (hd & IS_MMAPPED)                       /* release mmapped memory. */
1539*4882a593Smuzhiyun   {
1540*4882a593Smuzhiyun     munmap_chunk(p);
1541*4882a593Smuzhiyun     return;
1542*4882a593Smuzhiyun   }
1543*4882a593Smuzhiyun #endif
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun   check_inuse_chunk(p);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun   sz = hd & ~PREV_INUSE;
1548*4882a593Smuzhiyun   next = chunk_at_offset(p, sz);
1549*4882a593Smuzhiyun   nextsz = chunksize(next);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun   if (next == top)                            /* merge with top */
1552*4882a593Smuzhiyun   {
1553*4882a593Smuzhiyun     sz += nextsz;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun     if (!(hd & PREV_INUSE))                    /* consolidate backward */
1556*4882a593Smuzhiyun     {
1557*4882a593Smuzhiyun       prevsz = p->prev_size;
1558*4882a593Smuzhiyun       p = chunk_at_offset(p, -((long) prevsz));
1559*4882a593Smuzhiyun       sz += prevsz;
1560*4882a593Smuzhiyun       unlink(p, bck, fwd);
1561*4882a593Smuzhiyun     }
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun     set_head(p, sz | PREV_INUSE);
1564*4882a593Smuzhiyun     top = p;
1565*4882a593Smuzhiyun     if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1566*4882a593Smuzhiyun       malloc_trim(top_pad);
1567*4882a593Smuzhiyun     return;
1568*4882a593Smuzhiyun   }
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun   set_head(next, nextsz);                    /* clear inuse bit */
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun   islr = 0;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun   if (!(hd & PREV_INUSE))                    /* consolidate backward */
1575*4882a593Smuzhiyun   {
1576*4882a593Smuzhiyun     prevsz = p->prev_size;
1577*4882a593Smuzhiyun     p = chunk_at_offset(p, -((long) prevsz));
1578*4882a593Smuzhiyun     sz += prevsz;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun     if (p->fd == last_remainder)             /* keep as last_remainder */
1581*4882a593Smuzhiyun       islr = 1;
1582*4882a593Smuzhiyun     else
1583*4882a593Smuzhiyun       unlink(p, bck, fwd);
1584*4882a593Smuzhiyun   }
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun   if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
1587*4882a593Smuzhiyun   {
1588*4882a593Smuzhiyun     sz += nextsz;
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun     if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
1591*4882a593Smuzhiyun     {
1592*4882a593Smuzhiyun       islr = 1;
1593*4882a593Smuzhiyun       link_last_remainder(p);
1594*4882a593Smuzhiyun     }
1595*4882a593Smuzhiyun     else
1596*4882a593Smuzhiyun       unlink(next, bck, fwd);
1597*4882a593Smuzhiyun   }
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun   set_head(p, sz | PREV_INUSE);
1601*4882a593Smuzhiyun   set_foot(p, sz);
1602*4882a593Smuzhiyun   if (!islr)
1603*4882a593Smuzhiyun     frontlink(p, sz, idx, bck, fwd);
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun /*
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun   Realloc algorithm:
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun     Chunks that were obtained via mmap cannot be extended or shrunk
1615*4882a593Smuzhiyun     unless HAVE_MREMAP is defined, in which case mremap is used.
1616*4882a593Smuzhiyun     Otherwise, if their reallocation is for additional space, they are
1617*4882a593Smuzhiyun     copied.  If for less, they are just left alone.
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun     Otherwise, if the reallocation is for additional space, and the
1620*4882a593Smuzhiyun     chunk can be extended, it is, else a malloc-copy-free sequence is
1621*4882a593Smuzhiyun     taken.  There are several different ways that a chunk could be
1622*4882a593Smuzhiyun     extended. All are tried:
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun        * Extending forward into following adjacent free chunk.
1625*4882a593Smuzhiyun        * Shifting backwards, joining preceding adjacent space
1626*4882a593Smuzhiyun        * Both shifting backwards and extending forward.
1627*4882a593Smuzhiyun        * Extending into newly sbrked space
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun     Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1630*4882a593Smuzhiyun     size argument of zero (re)allocates a minimum-sized chunk.
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun     If the reallocation is for less space, and the new request is for
1633*4882a593Smuzhiyun     a `small' (<512 bytes) size, then the newly unused space is lopped
1634*4882a593Smuzhiyun     off and freed.
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun     The old unix realloc convention of allowing the last-free'd chunk
1637*4882a593Smuzhiyun     to be used as an argument to realloc is no longer supported.
1638*4882a593Smuzhiyun     I don't know of any programs still relying on this feature,
1639*4882a593Smuzhiyun     and allowing it would also allow too many other incorrect
1640*4882a593Smuzhiyun     usages of realloc to be sensible.
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun */
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun #if __STD_C
rEALLOc(Void_t * oldmem,size_t bytes)1647*4882a593Smuzhiyun Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1648*4882a593Smuzhiyun #else
1649*4882a593Smuzhiyun Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1650*4882a593Smuzhiyun #endif
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun   INTERNAL_SIZE_T    nb;      /* padded request size */
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun   mchunkptr oldp;             /* chunk corresponding to oldmem */
1655*4882a593Smuzhiyun   INTERNAL_SIZE_T    oldsize; /* its size */
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun   mchunkptr newp;             /* chunk to return */
1658*4882a593Smuzhiyun   INTERNAL_SIZE_T    newsize; /* its size */
1659*4882a593Smuzhiyun   Void_t*   newmem;           /* corresponding user mem */
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun   mchunkptr next;             /* next contiguous chunk after oldp */
1662*4882a593Smuzhiyun   INTERNAL_SIZE_T  nextsize;  /* its size */
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun   mchunkptr prev;             /* previous contiguous chunk before oldp */
1665*4882a593Smuzhiyun   INTERNAL_SIZE_T  prevsize;  /* its size */
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun   mchunkptr remainder;        /* holds split off extra space from newp */
1668*4882a593Smuzhiyun   INTERNAL_SIZE_T  remainder_size;   /* its size */
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun   mchunkptr bck;              /* misc temp for linking */
1671*4882a593Smuzhiyun   mchunkptr fwd;              /* misc temp for linking */
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun #ifdef REALLOC_ZERO_BYTES_FREES
1674*4882a593Smuzhiyun   if (!bytes) {
1675*4882a593Smuzhiyun 	fREe(oldmem);
1676*4882a593Smuzhiyun 	return NULL;
1677*4882a593Smuzhiyun   }
1678*4882a593Smuzhiyun #endif
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun   if ((long)bytes < 0) return NULL;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun   /* realloc of null is supposed to be same as malloc */
1683*4882a593Smuzhiyun   if (oldmem == NULL) return mALLOc(bytes);
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1686*4882a593Smuzhiyun 	if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1687*4882a593Smuzhiyun 		/* This is harder to support and should not be needed */
1688*4882a593Smuzhiyun 		panic("pre-reloc realloc() is not supported");
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun #endif
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun   newp    = oldp    = mem2chunk(oldmem);
1693*4882a593Smuzhiyun   newsize = oldsize = chunksize(oldp);
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun   nb = request2size(bytes);
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun #if HAVE_MMAP
1699*4882a593Smuzhiyun   if (chunk_is_mmapped(oldp))
1700*4882a593Smuzhiyun   {
1701*4882a593Smuzhiyun #if HAVE_MREMAP
1702*4882a593Smuzhiyun     newp = mremap_chunk(oldp, nb);
1703*4882a593Smuzhiyun     if(newp) return chunk2mem(newp);
1704*4882a593Smuzhiyun #endif
1705*4882a593Smuzhiyun     /* Note the extra SIZE_SZ overhead. */
1706*4882a593Smuzhiyun     if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1707*4882a593Smuzhiyun     /* Must alloc, copy, free. */
1708*4882a593Smuzhiyun     newmem = mALLOc(bytes);
1709*4882a593Smuzhiyun     if (!newmem)
1710*4882a593Smuzhiyun 	return NULL; /* propagate failure */
1711*4882a593Smuzhiyun     MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1712*4882a593Smuzhiyun     munmap_chunk(oldp);
1713*4882a593Smuzhiyun     return newmem;
1714*4882a593Smuzhiyun   }
1715*4882a593Smuzhiyun #endif
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun   check_inuse_chunk(oldp);
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun   if ((long)(oldsize) < (long)(nb))
1720*4882a593Smuzhiyun   {
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun     /* Try expanding forward */
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun     next = chunk_at_offset(oldp, oldsize);
1725*4882a593Smuzhiyun     if (next == top || !inuse(next))
1726*4882a593Smuzhiyun     {
1727*4882a593Smuzhiyun       nextsize = chunksize(next);
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun       /* Forward into top only if a remainder */
1730*4882a593Smuzhiyun       if (next == top)
1731*4882a593Smuzhiyun       {
1732*4882a593Smuzhiyun 	if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1733*4882a593Smuzhiyun 	{
1734*4882a593Smuzhiyun 	  newsize += nextsize;
1735*4882a593Smuzhiyun 	  top = chunk_at_offset(oldp, nb);
1736*4882a593Smuzhiyun 	  set_head(top, (newsize - nb) | PREV_INUSE);
1737*4882a593Smuzhiyun 	  set_head_size(oldp, nb);
1738*4882a593Smuzhiyun 	  return chunk2mem(oldp);
1739*4882a593Smuzhiyun 	}
1740*4882a593Smuzhiyun       }
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun       /* Forward into next chunk */
1743*4882a593Smuzhiyun       else if (((long)(nextsize + newsize) >= (long)(nb)))
1744*4882a593Smuzhiyun       {
1745*4882a593Smuzhiyun 	unlink(next, bck, fwd);
1746*4882a593Smuzhiyun 	newsize  += nextsize;
1747*4882a593Smuzhiyun 	goto split;
1748*4882a593Smuzhiyun       }
1749*4882a593Smuzhiyun     }
1750*4882a593Smuzhiyun     else
1751*4882a593Smuzhiyun     {
1752*4882a593Smuzhiyun       next = NULL;
1753*4882a593Smuzhiyun       nextsize = 0;
1754*4882a593Smuzhiyun     }
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun     /* Try shifting backwards. */
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun     if (!prev_inuse(oldp))
1759*4882a593Smuzhiyun     {
1760*4882a593Smuzhiyun       prev = prev_chunk(oldp);
1761*4882a593Smuzhiyun       prevsize = chunksize(prev);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun       /* try forward + backward first to save a later consolidation */
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun       if (next != NULL)
1766*4882a593Smuzhiyun       {
1767*4882a593Smuzhiyun 	/* into top */
1768*4882a593Smuzhiyun 	if (next == top)
1769*4882a593Smuzhiyun 	{
1770*4882a593Smuzhiyun 	  if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1771*4882a593Smuzhiyun 	  {
1772*4882a593Smuzhiyun 	    unlink(prev, bck, fwd);
1773*4882a593Smuzhiyun 	    newp = prev;
1774*4882a593Smuzhiyun 	    newsize += prevsize + nextsize;
1775*4882a593Smuzhiyun 	    newmem = chunk2mem(newp);
1776*4882a593Smuzhiyun 	    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1777*4882a593Smuzhiyun 	    top = chunk_at_offset(newp, nb);
1778*4882a593Smuzhiyun 	    set_head(top, (newsize - nb) | PREV_INUSE);
1779*4882a593Smuzhiyun 	    set_head_size(newp, nb);
1780*4882a593Smuzhiyun 	    return newmem;
1781*4882a593Smuzhiyun 	  }
1782*4882a593Smuzhiyun 	}
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	/* into next chunk */
1785*4882a593Smuzhiyun 	else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1786*4882a593Smuzhiyun 	{
1787*4882a593Smuzhiyun 	  unlink(next, bck, fwd);
1788*4882a593Smuzhiyun 	  unlink(prev, bck, fwd);
1789*4882a593Smuzhiyun 	  newp = prev;
1790*4882a593Smuzhiyun 	  newsize += nextsize + prevsize;
1791*4882a593Smuzhiyun 	  newmem = chunk2mem(newp);
1792*4882a593Smuzhiyun 	  MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1793*4882a593Smuzhiyun 	  goto split;
1794*4882a593Smuzhiyun 	}
1795*4882a593Smuzhiyun       }
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun       /* backward only */
1798*4882a593Smuzhiyun       if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
1799*4882a593Smuzhiyun       {
1800*4882a593Smuzhiyun 	unlink(prev, bck, fwd);
1801*4882a593Smuzhiyun 	newp = prev;
1802*4882a593Smuzhiyun 	newsize += prevsize;
1803*4882a593Smuzhiyun 	newmem = chunk2mem(newp);
1804*4882a593Smuzhiyun 	MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1805*4882a593Smuzhiyun 	goto split;
1806*4882a593Smuzhiyun       }
1807*4882a593Smuzhiyun     }
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun     /* Must allocate */
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun     newmem = mALLOc (bytes);
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun     if (newmem == NULL)  /* propagate failure */
1814*4882a593Smuzhiyun       return NULL;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun     /* Avoid copy if newp is next chunk after oldp. */
1817*4882a593Smuzhiyun     /* (This can only happen when new chunk is sbrk'ed.) */
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun     if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1820*4882a593Smuzhiyun     {
1821*4882a593Smuzhiyun       newsize += chunksize(newp);
1822*4882a593Smuzhiyun       newp = oldp;
1823*4882a593Smuzhiyun       goto split;
1824*4882a593Smuzhiyun     }
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun     /* Otherwise copy, free, and exit */
1827*4882a593Smuzhiyun     MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1828*4882a593Smuzhiyun     fREe(oldmem);
1829*4882a593Smuzhiyun     return newmem;
1830*4882a593Smuzhiyun   }
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun  split:  /* split off extra room in old or expanded chunk */
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun   if (newsize - nb >= MINSIZE) /* split off remainder */
1836*4882a593Smuzhiyun   {
1837*4882a593Smuzhiyun     remainder = chunk_at_offset(newp, nb);
1838*4882a593Smuzhiyun     remainder_size = newsize - nb;
1839*4882a593Smuzhiyun     set_head_size(newp, nb);
1840*4882a593Smuzhiyun     set_head(remainder, remainder_size | PREV_INUSE);
1841*4882a593Smuzhiyun     set_inuse_bit_at_offset(remainder, remainder_size);
1842*4882a593Smuzhiyun     fREe(chunk2mem(remainder)); /* let free() deal with it */
1843*4882a593Smuzhiyun   }
1844*4882a593Smuzhiyun   else
1845*4882a593Smuzhiyun   {
1846*4882a593Smuzhiyun     set_head_size(newp, newsize);
1847*4882a593Smuzhiyun     set_inuse_bit_at_offset(newp, newsize);
1848*4882a593Smuzhiyun   }
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun   check_inuse_chunk(newp);
1851*4882a593Smuzhiyun   return chunk2mem(newp);
1852*4882a593Smuzhiyun }
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun /*
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun   memalign algorithm:
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun     memalign requests more than enough space from malloc, finds a spot
1862*4882a593Smuzhiyun     within that chunk that meets the alignment request, and then
1863*4882a593Smuzhiyun     possibly frees the leading and trailing space.
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun     The alignment argument must be a power of two. This property is not
1866*4882a593Smuzhiyun     checked by memalign, so misuse may result in random runtime errors.
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun     8-byte alignment is guaranteed by normal malloc calls, so don't
1869*4882a593Smuzhiyun     bother calling memalign with an argument of 8 or less.
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun     Overreliance on memalign is a sure way to fragment space.
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun */
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun #if __STD_C
mEMALIGn(size_t alignment,size_t bytes)1877*4882a593Smuzhiyun Void_t* mEMALIGn(size_t alignment, size_t bytes)
1878*4882a593Smuzhiyun #else
1879*4882a593Smuzhiyun Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1880*4882a593Smuzhiyun #endif
1881*4882a593Smuzhiyun {
1882*4882a593Smuzhiyun   INTERNAL_SIZE_T    nb;      /* padded  request size */
1883*4882a593Smuzhiyun   char*     m;                /* memory returned by malloc call */
1884*4882a593Smuzhiyun   mchunkptr p;                /* corresponding chunk */
1885*4882a593Smuzhiyun   char*     brk;              /* alignment point within p */
1886*4882a593Smuzhiyun   mchunkptr newp;             /* chunk to return */
1887*4882a593Smuzhiyun   INTERNAL_SIZE_T  newsize;   /* its size */
1888*4882a593Smuzhiyun   INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
1889*4882a593Smuzhiyun   mchunkptr remainder;        /* spare room at end to split off */
1890*4882a593Smuzhiyun   long      remainder_size;   /* its size */
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun   if ((long)bytes < 0) return NULL;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun   /* If need less alignment than we give anyway, just relay to malloc */
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun   if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun   /* Otherwise, ensure that it is at least a minimum chunk size */
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun   if (alignment <  MINSIZE) alignment = MINSIZE;
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun   /* Call malloc with worst case padding to hit alignment. */
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun   nb = request2size(bytes);
1905*4882a593Smuzhiyun   m  = (char*)(mALLOc(nb + alignment + MINSIZE));
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun   /*
1908*4882a593Smuzhiyun   * The attempt to over-allocate (with a size large enough to guarantee the
1909*4882a593Smuzhiyun   * ability to find an aligned region within allocated memory) failed.
1910*4882a593Smuzhiyun   *
1911*4882a593Smuzhiyun   * Try again, this time only allocating exactly the size the user wants. If
1912*4882a593Smuzhiyun   * the allocation now succeeds and just happens to be aligned, we can still
1913*4882a593Smuzhiyun   * fulfill the user's request.
1914*4882a593Smuzhiyun   */
1915*4882a593Smuzhiyun   if (m == NULL) {
1916*4882a593Smuzhiyun     size_t extra, extra2;
1917*4882a593Smuzhiyun     /*
1918*4882a593Smuzhiyun      * Use bytes not nb, since mALLOc internally calls request2size too, and
1919*4882a593Smuzhiyun      * each call increases the size to allocate, to account for the header.
1920*4882a593Smuzhiyun      */
1921*4882a593Smuzhiyun     m  = (char*)(mALLOc(bytes));
1922*4882a593Smuzhiyun     /* Aligned -> return it */
1923*4882a593Smuzhiyun     if ((((unsigned long)(m)) % alignment) == 0)
1924*4882a593Smuzhiyun       return m;
1925*4882a593Smuzhiyun     /*
1926*4882a593Smuzhiyun      * Otherwise, try again, requesting enough extra space to be able to
1927*4882a593Smuzhiyun      * acquire alignment.
1928*4882a593Smuzhiyun      */
1929*4882a593Smuzhiyun     fREe(m);
1930*4882a593Smuzhiyun     /* Add in extra bytes to match misalignment of unexpanded allocation */
1931*4882a593Smuzhiyun     extra = alignment - (((unsigned long)(m)) % alignment);
1932*4882a593Smuzhiyun     m  = (char*)(mALLOc(bytes + extra));
1933*4882a593Smuzhiyun     /*
1934*4882a593Smuzhiyun      * m might not be the same as before. Validate that the previous value of
1935*4882a593Smuzhiyun      * extra still works for the current value of m.
1936*4882a593Smuzhiyun      * If (!m), extra2=alignment so
1937*4882a593Smuzhiyun      */
1938*4882a593Smuzhiyun     if (m) {
1939*4882a593Smuzhiyun       extra2 = alignment - (((unsigned long)(m)) % alignment);
1940*4882a593Smuzhiyun       if (extra2 > extra) {
1941*4882a593Smuzhiyun         fREe(m);
1942*4882a593Smuzhiyun         m = NULL;
1943*4882a593Smuzhiyun       }
1944*4882a593Smuzhiyun     }
1945*4882a593Smuzhiyun     /* Fall through to original NULL check and chunk splitting logic */
1946*4882a593Smuzhiyun   }
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun   if (m == NULL) return NULL; /* propagate failure */
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun   p = mem2chunk(m);
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun   if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
1953*4882a593Smuzhiyun   {
1954*4882a593Smuzhiyun #if HAVE_MMAP
1955*4882a593Smuzhiyun     if(chunk_is_mmapped(p))
1956*4882a593Smuzhiyun       return chunk2mem(p); /* nothing more to do */
1957*4882a593Smuzhiyun #endif
1958*4882a593Smuzhiyun   }
1959*4882a593Smuzhiyun   else /* misaligned */
1960*4882a593Smuzhiyun   {
1961*4882a593Smuzhiyun     /*
1962*4882a593Smuzhiyun       Find an aligned spot inside chunk.
1963*4882a593Smuzhiyun       Since we need to give back leading space in a chunk of at
1964*4882a593Smuzhiyun       least MINSIZE, if the first calculation places us at
1965*4882a593Smuzhiyun       a spot with less than MINSIZE leader, we can move to the
1966*4882a593Smuzhiyun       next aligned spot -- we've allocated enough total room so that
1967*4882a593Smuzhiyun       this is always possible.
1968*4882a593Smuzhiyun     */
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun     brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
1971*4882a593Smuzhiyun     if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun     newp = (mchunkptr)brk;
1974*4882a593Smuzhiyun     leadsize = brk - (char*)(p);
1975*4882a593Smuzhiyun     newsize = chunksize(p) - leadsize;
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun #if HAVE_MMAP
1978*4882a593Smuzhiyun     if(chunk_is_mmapped(p))
1979*4882a593Smuzhiyun     {
1980*4882a593Smuzhiyun       newp->prev_size = p->prev_size + leadsize;
1981*4882a593Smuzhiyun       set_head(newp, newsize|IS_MMAPPED);
1982*4882a593Smuzhiyun       return chunk2mem(newp);
1983*4882a593Smuzhiyun     }
1984*4882a593Smuzhiyun #endif
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun     /* give back leader, use the rest */
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun     set_head(newp, newsize | PREV_INUSE);
1989*4882a593Smuzhiyun     set_inuse_bit_at_offset(newp, newsize);
1990*4882a593Smuzhiyun     set_head_size(p, leadsize);
1991*4882a593Smuzhiyun     fREe(chunk2mem(p));
1992*4882a593Smuzhiyun     p = newp;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun     assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
1995*4882a593Smuzhiyun   }
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun   /* Also give back spare room at the end */
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun   remainder_size = chunksize(p) - nb;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun   if (remainder_size >= (long)MINSIZE)
2002*4882a593Smuzhiyun   {
2003*4882a593Smuzhiyun     remainder = chunk_at_offset(p, nb);
2004*4882a593Smuzhiyun     set_head(remainder, remainder_size | PREV_INUSE);
2005*4882a593Smuzhiyun     set_head_size(p, nb);
2006*4882a593Smuzhiyun     fREe(chunk2mem(remainder));
2007*4882a593Smuzhiyun   }
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun   check_inuse_chunk(p);
2010*4882a593Smuzhiyun   return chunk2mem(p);
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun /*
2018*4882a593Smuzhiyun     valloc just invokes memalign with alignment argument equal
2019*4882a593Smuzhiyun     to the page size of the system (or as near to this as can
2020*4882a593Smuzhiyun     be figured out from all the includes/defines above.)
2021*4882a593Smuzhiyun */
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun #if __STD_C
vALLOc(size_t bytes)2024*4882a593Smuzhiyun Void_t* vALLOc(size_t bytes)
2025*4882a593Smuzhiyun #else
2026*4882a593Smuzhiyun Void_t* vALLOc(bytes) size_t bytes;
2027*4882a593Smuzhiyun #endif
2028*4882a593Smuzhiyun {
2029*4882a593Smuzhiyun   return mEMALIGn (malloc_getpagesize, bytes);
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun /*
2033*4882a593Smuzhiyun   pvalloc just invokes valloc for the nearest pagesize
2034*4882a593Smuzhiyun   that will accommodate request
2035*4882a593Smuzhiyun */
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun #if __STD_C
pvALLOc(size_t bytes)2039*4882a593Smuzhiyun Void_t* pvALLOc(size_t bytes)
2040*4882a593Smuzhiyun #else
2041*4882a593Smuzhiyun Void_t* pvALLOc(bytes) size_t bytes;
2042*4882a593Smuzhiyun #endif
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun   size_t pagesize = malloc_getpagesize;
2045*4882a593Smuzhiyun   return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun /*
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun   calloc calls malloc, then zeroes out the allocated chunk.
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun */
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun #if __STD_C
cALLOc(size_t n,size_t elem_size)2055*4882a593Smuzhiyun Void_t* cALLOc(size_t n, size_t elem_size)
2056*4882a593Smuzhiyun #else
2057*4882a593Smuzhiyun Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2058*4882a593Smuzhiyun #endif
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun   mchunkptr p;
2061*4882a593Smuzhiyun   INTERNAL_SIZE_T csz;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun   INTERNAL_SIZE_T sz = n * elem_size;
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun   /* check if expand_top called, in which case don't need to clear */
2067*4882a593Smuzhiyun #ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
2068*4882a593Smuzhiyun #if MORECORE_CLEARS
2069*4882a593Smuzhiyun   mchunkptr oldtop = top;
2070*4882a593Smuzhiyun   INTERNAL_SIZE_T oldtopsize = chunksize(top);
2071*4882a593Smuzhiyun #endif
2072*4882a593Smuzhiyun #endif
2073*4882a593Smuzhiyun   Void_t* mem = mALLOc (sz);
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun   if ((long)n < 0) return NULL;
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun   if (mem == NULL)
2078*4882a593Smuzhiyun     return NULL;
2079*4882a593Smuzhiyun   else
2080*4882a593Smuzhiyun   {
2081*4882a593Smuzhiyun #if CONFIG_VAL(SYS_MALLOC_F_LEN)
2082*4882a593Smuzhiyun 	if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
2083*4882a593Smuzhiyun 		MALLOC_ZERO(mem, sz);
2084*4882a593Smuzhiyun 		return mem;
2085*4882a593Smuzhiyun 	}
2086*4882a593Smuzhiyun #endif
2087*4882a593Smuzhiyun     p = mem2chunk(mem);
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun     /* Two optional cases in which clearing not necessary */
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun #if HAVE_MMAP
2093*4882a593Smuzhiyun     if (chunk_is_mmapped(p)) return mem;
2094*4882a593Smuzhiyun #endif
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun     csz = chunksize(p);
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun #ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
2099*4882a593Smuzhiyun #if MORECORE_CLEARS
2100*4882a593Smuzhiyun     if (p == oldtop && csz > oldtopsize)
2101*4882a593Smuzhiyun     {
2102*4882a593Smuzhiyun       /* clear only the bytes from non-freshly-sbrked memory */
2103*4882a593Smuzhiyun       csz = oldtopsize;
2104*4882a593Smuzhiyun     }
2105*4882a593Smuzhiyun #endif
2106*4882a593Smuzhiyun #endif
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun     MALLOC_ZERO(mem, csz - SIZE_SZ);
2109*4882a593Smuzhiyun     return mem;
2110*4882a593Smuzhiyun   }
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun /*
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun   cfree just calls free. It is needed/defined on some systems
2116*4882a593Smuzhiyun   that pair it with calloc, presumably for odd historical reasons.
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun */
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2121*4882a593Smuzhiyun #if __STD_C
cfree(Void_t * mem)2122*4882a593Smuzhiyun void cfree(Void_t *mem)
2123*4882a593Smuzhiyun #else
2124*4882a593Smuzhiyun void cfree(mem) Void_t *mem;
2125*4882a593Smuzhiyun #endif
2126*4882a593Smuzhiyun {
2127*4882a593Smuzhiyun   fREe(mem);
2128*4882a593Smuzhiyun }
2129*4882a593Smuzhiyun #endif
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun /*
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun     Malloc_trim gives memory back to the system (via negative
2136*4882a593Smuzhiyun     arguments to sbrk) if there is unused memory at the `high' end of
2137*4882a593Smuzhiyun     the malloc pool. You can call this after freeing large blocks of
2138*4882a593Smuzhiyun     memory to potentially reduce the system-level memory requirements
2139*4882a593Smuzhiyun     of a program. However, it cannot guarantee to reduce memory. Under
2140*4882a593Smuzhiyun     some allocation patterns, some large free blocks of memory will be
2141*4882a593Smuzhiyun     locked between two used chunks, so they cannot be given back to
2142*4882a593Smuzhiyun     the system.
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun     The `pad' argument to malloc_trim represents the amount of free
2145*4882a593Smuzhiyun     trailing space to leave untrimmed. If this argument is zero,
2146*4882a593Smuzhiyun     only the minimum amount of memory to maintain internal data
2147*4882a593Smuzhiyun     structures will be left (one page or less). Non-zero arguments
2148*4882a593Smuzhiyun     can be supplied to maintain enough trailing space to service
2149*4882a593Smuzhiyun     future expected allocations without having to re-obtain memory
2150*4882a593Smuzhiyun     from the system.
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun     Malloc_trim returns 1 if it actually released any memory, else 0.
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun */
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun #if __STD_C
malloc_trim(size_t pad)2157*4882a593Smuzhiyun int malloc_trim(size_t pad)
2158*4882a593Smuzhiyun #else
2159*4882a593Smuzhiyun int malloc_trim(pad) size_t pad;
2160*4882a593Smuzhiyun #endif
2161*4882a593Smuzhiyun {
2162*4882a593Smuzhiyun   long  top_size;        /* Amount of top-most memory */
2163*4882a593Smuzhiyun   long  extra;           /* Amount to release */
2164*4882a593Smuzhiyun   char* current_brk;     /* address returned by pre-check sbrk call */
2165*4882a593Smuzhiyun   char* new_brk;         /* address returned by negative sbrk call */
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun   unsigned long pagesz = malloc_getpagesize;
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun   top_size = chunksize(top);
2170*4882a593Smuzhiyun   extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun   if (extra < (long)pagesz)  /* Not enough memory to release */
2173*4882a593Smuzhiyun     return 0;
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun   else
2176*4882a593Smuzhiyun   {
2177*4882a593Smuzhiyun     /* Test to make sure no one else called sbrk */
2178*4882a593Smuzhiyun     current_brk = (char*)(MORECORE (0));
2179*4882a593Smuzhiyun     if (current_brk != (char*)(top) + top_size)
2180*4882a593Smuzhiyun       return 0;     /* Apparently we don't own memory; must fail */
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun     else
2183*4882a593Smuzhiyun     {
2184*4882a593Smuzhiyun       new_brk = (char*)(MORECORE (-extra));
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun       if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2187*4882a593Smuzhiyun       {
2188*4882a593Smuzhiyun 	/* Try to figure out what we have */
2189*4882a593Smuzhiyun 	current_brk = (char*)(MORECORE (0));
2190*4882a593Smuzhiyun 	top_size = current_brk - (char*)top;
2191*4882a593Smuzhiyun 	if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2192*4882a593Smuzhiyun 	{
2193*4882a593Smuzhiyun 	  sbrked_mem = current_brk - sbrk_base;
2194*4882a593Smuzhiyun 	  set_head(top, top_size | PREV_INUSE);
2195*4882a593Smuzhiyun 	}
2196*4882a593Smuzhiyun 	check_chunk(top);
2197*4882a593Smuzhiyun 	return 0;
2198*4882a593Smuzhiyun       }
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun       else
2201*4882a593Smuzhiyun       {
2202*4882a593Smuzhiyun 	/* Success. Adjust top accordingly. */
2203*4882a593Smuzhiyun 	set_head(top, (top_size - extra) | PREV_INUSE);
2204*4882a593Smuzhiyun 	sbrked_mem -= extra;
2205*4882a593Smuzhiyun 	check_chunk(top);
2206*4882a593Smuzhiyun 	return 1;
2207*4882a593Smuzhiyun       }
2208*4882a593Smuzhiyun     }
2209*4882a593Smuzhiyun   }
2210*4882a593Smuzhiyun }
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun /*
2215*4882a593Smuzhiyun   malloc_usable_size:
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun     This routine tells you how many bytes you can actually use in an
2218*4882a593Smuzhiyun     allocated chunk, which may be more than you requested (although
2219*4882a593Smuzhiyun     often not). You can use this many bytes without worrying about
2220*4882a593Smuzhiyun     overwriting other allocated objects. Not a particularly great
2221*4882a593Smuzhiyun     programming practice, but still sometimes useful.
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun */
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun #if __STD_C
malloc_usable_size(Void_t * mem)2226*4882a593Smuzhiyun size_t malloc_usable_size(Void_t* mem)
2227*4882a593Smuzhiyun #else
2228*4882a593Smuzhiyun size_t malloc_usable_size(mem) Void_t* mem;
2229*4882a593Smuzhiyun #endif
2230*4882a593Smuzhiyun {
2231*4882a593Smuzhiyun   mchunkptr p;
2232*4882a593Smuzhiyun   if (mem == NULL)
2233*4882a593Smuzhiyun     return 0;
2234*4882a593Smuzhiyun   else
2235*4882a593Smuzhiyun   {
2236*4882a593Smuzhiyun     p = mem2chunk(mem);
2237*4882a593Smuzhiyun     if(!chunk_is_mmapped(p))
2238*4882a593Smuzhiyun     {
2239*4882a593Smuzhiyun       if (!inuse(p)) return 0;
2240*4882a593Smuzhiyun       check_inuse_chunk(p);
2241*4882a593Smuzhiyun       return chunksize(p) - SIZE_SZ;
2242*4882a593Smuzhiyun     }
2243*4882a593Smuzhiyun     return chunksize(p) - 2*SIZE_SZ;
2244*4882a593Smuzhiyun   }
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun /* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2251*4882a593Smuzhiyun 
2252*4882a593Smuzhiyun #ifdef DEBUG
malloc_update_mallinfo()2253*4882a593Smuzhiyun static void malloc_update_mallinfo()
2254*4882a593Smuzhiyun {
2255*4882a593Smuzhiyun   int i;
2256*4882a593Smuzhiyun   mbinptr b;
2257*4882a593Smuzhiyun   mchunkptr p;
2258*4882a593Smuzhiyun #ifdef DEBUG
2259*4882a593Smuzhiyun   mchunkptr q;
2260*4882a593Smuzhiyun #endif
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun   INTERNAL_SIZE_T avail = chunksize(top);
2263*4882a593Smuzhiyun   int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun   for (i = 1; i < NAV; ++i)
2266*4882a593Smuzhiyun   {
2267*4882a593Smuzhiyun     b = bin_at(i);
2268*4882a593Smuzhiyun     for (p = last(b); p != b; p = p->bk)
2269*4882a593Smuzhiyun     {
2270*4882a593Smuzhiyun #ifdef DEBUG
2271*4882a593Smuzhiyun       check_free_chunk(p);
2272*4882a593Smuzhiyun       for (q = next_chunk(p);
2273*4882a593Smuzhiyun 	   q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2274*4882a593Smuzhiyun 	   q = next_chunk(q))
2275*4882a593Smuzhiyun 	check_inuse_chunk(q);
2276*4882a593Smuzhiyun #endif
2277*4882a593Smuzhiyun       avail += chunksize(p);
2278*4882a593Smuzhiyun       navail++;
2279*4882a593Smuzhiyun     }
2280*4882a593Smuzhiyun   }
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun   current_mallinfo.ordblks = navail;
2283*4882a593Smuzhiyun   current_mallinfo.uordblks = sbrked_mem - avail;
2284*4882a593Smuzhiyun   current_mallinfo.fordblks = avail;
2285*4882a593Smuzhiyun   current_mallinfo.hblks = n_mmaps;
2286*4882a593Smuzhiyun   current_mallinfo.hblkhd = mmapped_mem;
2287*4882a593Smuzhiyun   current_mallinfo.keepcost = chunksize(top);
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun #endif	/* DEBUG */
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun /*
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun   malloc_stats:
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun     Prints on the amount of space obtain from the system (both
2299*4882a593Smuzhiyun     via sbrk and mmap), the maximum amount (which may be more than
2300*4882a593Smuzhiyun     current if malloc_trim and/or munmap got called), the maximum
2301*4882a593Smuzhiyun     number of simultaneous mmap regions used, and the current number
2302*4882a593Smuzhiyun     of bytes allocated via malloc (or realloc, etc) but not yet
2303*4882a593Smuzhiyun     freed. (Note that this is the number of bytes allocated, not the
2304*4882a593Smuzhiyun     number requested. It will be larger than the number requested
2305*4882a593Smuzhiyun     because of alignment and bookkeeping overhead.)
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun */
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun #ifdef DEBUG
malloc_stats()2310*4882a593Smuzhiyun void malloc_stats()
2311*4882a593Smuzhiyun {
2312*4882a593Smuzhiyun   malloc_update_mallinfo();
2313*4882a593Smuzhiyun   printf("max system bytes = %10u\n",
2314*4882a593Smuzhiyun 	  (unsigned int)(max_total_mem));
2315*4882a593Smuzhiyun   printf("system bytes     = %10u\n",
2316*4882a593Smuzhiyun 	  (unsigned int)(sbrked_mem + mmapped_mem));
2317*4882a593Smuzhiyun   printf("in use bytes     = %10u\n",
2318*4882a593Smuzhiyun 	  (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
2319*4882a593Smuzhiyun #if HAVE_MMAP
2320*4882a593Smuzhiyun   printf("max mmap regions = %10u\n",
2321*4882a593Smuzhiyun 	  (unsigned int)max_n_mmaps);
2322*4882a593Smuzhiyun #endif
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun #endif	/* DEBUG */
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun /*
2327*4882a593Smuzhiyun   mallinfo returns a copy of updated current mallinfo.
2328*4882a593Smuzhiyun */
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun #ifdef DEBUG
mALLINFo()2331*4882a593Smuzhiyun struct mallinfo mALLINFo()
2332*4882a593Smuzhiyun {
2333*4882a593Smuzhiyun   malloc_update_mallinfo();
2334*4882a593Smuzhiyun   return current_mallinfo;
2335*4882a593Smuzhiyun }
2336*4882a593Smuzhiyun #endif	/* DEBUG */
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun /*
2342*4882a593Smuzhiyun   mallopt:
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun     mallopt is the general SVID/XPG interface to tunable parameters.
2345*4882a593Smuzhiyun     The format is to provide a (parameter-number, parameter-value) pair.
2346*4882a593Smuzhiyun     mallopt then sets the corresponding parameter to the argument
2347*4882a593Smuzhiyun     value if it can (i.e., so long as the value is meaningful),
2348*4882a593Smuzhiyun     and returns 1 if successful else 0.
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun     See descriptions of tunable parameters above.
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun */
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun #if __STD_C
mALLOPt(int param_number,int value)2355*4882a593Smuzhiyun int mALLOPt(int param_number, int value)
2356*4882a593Smuzhiyun #else
2357*4882a593Smuzhiyun int mALLOPt(param_number, value) int param_number; int value;
2358*4882a593Smuzhiyun #endif
2359*4882a593Smuzhiyun {
2360*4882a593Smuzhiyun   switch(param_number)
2361*4882a593Smuzhiyun   {
2362*4882a593Smuzhiyun     case M_TRIM_THRESHOLD:
2363*4882a593Smuzhiyun       trim_threshold = value; return 1;
2364*4882a593Smuzhiyun     case M_TOP_PAD:
2365*4882a593Smuzhiyun       top_pad = value; return 1;
2366*4882a593Smuzhiyun     case M_MMAP_THRESHOLD:
2367*4882a593Smuzhiyun       mmap_threshold = value; return 1;
2368*4882a593Smuzhiyun     case M_MMAP_MAX:
2369*4882a593Smuzhiyun #if HAVE_MMAP
2370*4882a593Smuzhiyun       n_mmaps_max = value; return 1;
2371*4882a593Smuzhiyun #else
2372*4882a593Smuzhiyun       if (value != 0) return 0; else  n_mmaps_max = value; return 1;
2373*4882a593Smuzhiyun #endif
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun     default:
2376*4882a593Smuzhiyun       return 0;
2377*4882a593Smuzhiyun   }
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun 
initf_malloc(void)2380*4882a593Smuzhiyun int initf_malloc(void)
2381*4882a593Smuzhiyun {
2382*4882a593Smuzhiyun #if CONFIG_VAL(SYS_MALLOC_F_LEN)
2383*4882a593Smuzhiyun 	assert(gd->malloc_base);	/* Set up by crt0.S */
2384*4882a593Smuzhiyun 	gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
2385*4882a593Smuzhiyun 	gd->malloc_ptr = 0;
2386*4882a593Smuzhiyun #endif
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 	return 0;
2389*4882a593Smuzhiyun }
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun /*
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun History:
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
2396*4882a593Smuzhiyun       * return null for negative arguments
2397*4882a593Smuzhiyun       * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
2398*4882a593Smuzhiyun 	 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2399*4882a593Smuzhiyun 	  (e.g. WIN32 platforms)
2400*4882a593Smuzhiyun 	 * Cleanup up header file inclusion for WIN32 platforms
2401*4882a593Smuzhiyun 	 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2402*4882a593Smuzhiyun 	 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2403*4882a593Smuzhiyun 	   memory allocation routines
2404*4882a593Smuzhiyun 	 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2405*4882a593Smuzhiyun 	 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
2406*4882a593Smuzhiyun 	   usage of 'assert' in non-WIN32 code
2407*4882a593Smuzhiyun 	 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2408*4882a593Smuzhiyun 	   avoid infinite loop
2409*4882a593Smuzhiyun       * Always call 'fREe()' rather than 'free()'
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
2412*4882a593Smuzhiyun       * Fixed ordering problem with boundary-stamping
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
2415*4882a593Smuzhiyun       * Added pvalloc, as recommended by H.J. Liu
2416*4882a593Smuzhiyun       * Added 64bit pointer support mainly from Wolfram Gloger
2417*4882a593Smuzhiyun       * Added anonymously donated WIN32 sbrk emulation
2418*4882a593Smuzhiyun       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2419*4882a593Smuzhiyun       * malloc_extend_top: fix mask error that caused wastage after
2420*4882a593Smuzhiyun 	foreign sbrks
2421*4882a593Smuzhiyun       * Add linux mremap support code from HJ Liu
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
2424*4882a593Smuzhiyun       * Integrated most documentation with the code.
2425*4882a593Smuzhiyun       * Add support for mmap, with help from
2426*4882a593Smuzhiyun 	Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2427*4882a593Smuzhiyun       * Use last_remainder in more cases.
2428*4882a593Smuzhiyun       * Pack bins using idea from  colin@nyx10.cs.du.edu
2429*4882a593Smuzhiyun       * Use ordered bins instead of best-fit threshhold
2430*4882a593Smuzhiyun       * Eliminate block-local decls to simplify tracing and debugging.
2431*4882a593Smuzhiyun       * Support another case of realloc via move into top
2432*4882a593Smuzhiyun       * Fix error occuring when initial sbrk_base not word-aligned.
2433*4882a593Smuzhiyun       * Rely on page size for units instead of SBRK_UNIT to
2434*4882a593Smuzhiyun 	avoid surprises about sbrk alignment conventions.
2435*4882a593Smuzhiyun       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
2436*4882a593Smuzhiyun 	(raymond@es.ele.tue.nl) for the suggestion.
2437*4882a593Smuzhiyun       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2438*4882a593Smuzhiyun       * More precautions for cases where other routines call sbrk,
2439*4882a593Smuzhiyun 	courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2440*4882a593Smuzhiyun       * Added macros etc., allowing use in linux libc from
2441*4882a593Smuzhiyun 	H.J. Lu (hjl@gnu.ai.mit.edu)
2442*4882a593Smuzhiyun       * Inverted this history list
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
2445*4882a593Smuzhiyun       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2446*4882a593Smuzhiyun       * Removed all preallocation code since under current scheme
2447*4882a593Smuzhiyun 	the work required to undo bad preallocations exceeds
2448*4882a593Smuzhiyun 	the work saved in good cases for most test programs.
2449*4882a593Smuzhiyun       * No longer use return list or unconsolidated bins since
2450*4882a593Smuzhiyun 	no scheme using them consistently outperforms those that don't
2451*4882a593Smuzhiyun 	given above changes.
2452*4882a593Smuzhiyun       * Use best fit for very large chunks to prevent some worst-cases.
2453*4882a593Smuzhiyun       * Added some support for debugging
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
2456*4882a593Smuzhiyun       * Removed footers when chunks are in use. Thanks to
2457*4882a593Smuzhiyun 	Paul Wilson (wilson@cs.texas.edu) for the suggestion.
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
2460*4882a593Smuzhiyun       * Added malloc_trim, with help from Wolfram Gloger
2461*4882a593Smuzhiyun 	(wmglo@Dent.MED.Uni-Muenchen.DE).
2462*4882a593Smuzhiyun 
2463*4882a593Smuzhiyun     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
2466*4882a593Smuzhiyun       * realloc: try to expand in both directions
2467*4882a593Smuzhiyun       * malloc: swap order of clean-bin strategy;
2468*4882a593Smuzhiyun       * realloc: only conditionally expand backwards
2469*4882a593Smuzhiyun       * Try not to scavenge used bins
2470*4882a593Smuzhiyun       * Use bin counts as a guide to preallocation
2471*4882a593Smuzhiyun       * Occasionally bin return list chunks in first scan
2472*4882a593Smuzhiyun       * Add a few optimizations from colin@nyx10.cs.du.edu
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
2475*4882a593Smuzhiyun       * faster bin computation & slightly different binning
2476*4882a593Smuzhiyun       * merged all consolidations to one part of malloc proper
2477*4882a593Smuzhiyun 	 (eliminating old malloc_find_space & malloc_clean_bin)
2478*4882a593Smuzhiyun       * Scan 2 returns chunks (not just 1)
2479*4882a593Smuzhiyun       * Propagate failure in realloc if malloc returns 0
2480*4882a593Smuzhiyun       * Add stuff to allow compilation on non-ANSI compilers
2481*4882a593Smuzhiyun 	  from kpv@research.att.com
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
2484*4882a593Smuzhiyun       * removed potential for odd address access in prev_chunk
2485*4882a593Smuzhiyun       * removed dependency on getpagesize.h
2486*4882a593Smuzhiyun       * misc cosmetics and a bit more internal documentation
2487*4882a593Smuzhiyun       * anticosmetics: mangled names in macros to evade debugger strangeness
2488*4882a593Smuzhiyun       * tested on sparc, hp-700, dec-mips, rs6000
2489*4882a593Smuzhiyun 	  with gcc & native cc (hp, dec only) allowing
2490*4882a593Smuzhiyun 	  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
2493*4882a593Smuzhiyun       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
2494*4882a593Smuzhiyun 	 structure of old version,  but most details differ.)
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun */
2497