xref: /optee_os/lib/libutils/isoc/bget.c (revision 5d5d7d0b1c038a6836be9f0b38585f5aa6a4dd01)
1 /*
2 
3 			       B G E T
4 
5 			   Buffer allocator
6 
7     Designed and implemented in April of 1972 by John Walker, based on the
8     Case Algol OPRO$ algorithm implemented in 1966.
9 
10     Reimplemented in 1975 by John Walker for the Interdata 70.
11     Reimplemented in 1977 by John Walker for the Marinchip 9900.
12     Reimplemented in 1982 by Duff Kurland for the Intel 8080.
13 
14     Portable C version implemented in September of 1990 by an older, wiser
15     instance of the original implementor.
16 
17     Souped up and/or weighed down  slightly  shortly  thereafter  by  Greg
18     Lutz.
19 
20     AMIX  edition, including the new compaction call-back option, prepared
21     by John Walker in July of 1992.
22 
23     Bug in built-in test program fixed, ANSI compiler warnings eradicated,
24     buffer pool validator  implemented,  and  guaranteed  repeatable  test
25     added by John Walker in October of 1995.
26 
27     This program is in the public domain.
28 
29      1. This is the book of the generations of Adam.   In the day that God
30 	created man, in the likeness of God made he him;
31      2. Male and female created he them;  and  blessed	them,  and  called
32 	their name Adam, in the day when they were created.
33      3. And  Adam  lived  an hundred and thirty years,	and begat a son in
34 	his own likeness, and after his image; and called his name Seth:
35      4. And the days of  Adam  after  he  had  begotten  Seth  were  eight
36 	hundred years: and he begat sons and daughters:
37      5. And  all  the  days  that Adam lived were nine	hundred and thirty
38 	years: and he died.
39      6. And Seth lived an hundred and five years, and begat Enos:
40      7. And Seth lived after he begat Enos eight hundred and seven  years,
41 	and begat sons and daughters:
42      8.  And  all the days of Seth were nine hundred and twelve years: and
43 	 he died.
44      9. And Enos lived ninety years, and begat Cainan:
45     10. And Enos lived after he begat  Cainan eight  hundred  and  fifteen
46 	years, and begat sons and daughters:
47     11. And  all  the days of Enos were nine hundred  and five years:  and
48 	he died.
49     12. And Cainan lived seventy years and begat Mahalaleel:
50     13. And Cainan lived  after he  begat  Mahalaleel  eight  hundred  and
51 	forty years, and begat sons and daughters:
52     14. And  all the days of Cainan were nine  hundred and ten years:  and
53 	he died.
54     15. And Mahalaleel lived sixty and five years, and begat Jared:
55     16. And Mahalaleel lived  after  he  begat	Jared  eight  hundred  and
56 	thirty years, and begat sons and daughters:
57     17. And  all  the  days  of Mahalaleel  were eight hundred	ninety and
58 	five years: and he died.
59     18. And Jared lived an hundred sixty and  two  years,   and  he  begat
60 	Enoch:
61     19. And  Jared  lived  after he begat Enoch  eight hundred years,  and
62 	begat sons and daughters:
63     20. And all the days of Jared  were nine hundred sixty and two  years:
64 	and he died.
65     21. And Enoch lived sixty and five years, and begat Methuselah:
66     22. And  Enoch  walked   with  God	after  he  begat Methuselah  three
67 	hundred years, and begat sons and daughters:
68     23. And all the days of  Enoch  were  three  hundred  sixty  and  five
69 	years:
70     24. And Enoch walked with God: and he was not; for God took him.
71     25. And  Methuselah  lived	an  hundred  eighty and  seven years,  and
72 	begat Lamech.
73     26. And Methuselah lived after he  begat Lamech seven  hundred  eighty
74 	and two years, and begat sons and daughters:
75     27. And  all the days of Methuselah  were nine hundred  sixty and nine
76 	years: and he died.
77     28. And Lamech lived an hundred eighty  and two  years,  and  begat  a
78 	son:
79     29. And  he called his name Noah, saying,  This same shall	comfort us
80 	concerning  our  work and toil of our hands, because of the ground
81 	which the LORD hath cursed.
82     30. And  Lamech  lived  after  he begat Noah  five hundred	ninety and
83 	five years, and begat sons and daughters:
84     31. And all the days of Lamech were  seven hundred seventy	and  seven
85 	years: and he died.
86     32. And  Noah  was five hundred years old:	and Noah begat Shem,  Ham,
87 	and Japheth.
88 
89     And buffers begat buffers, and links begat	links,	and  buffer  pools
90     begat  links  to chains of buffer pools containing buffers, and lo the
91     buffers and links and pools of buffers and pools of links to chains of
92     pools  of  buffers were fruitful and they multiplied and the Operating
93     System looked down upon them and said that it was Good.
94 
95 
96     INTRODUCTION
97     ============
98 
99     BGET  is a comprehensive memory allocation package which is easily
100     configured to the needs of an application.	BGET is  efficient  in
101     both  the  time  needed to allocate and release buffers and in the
102     memory  overhead  required	for  buffer   pool   management.    It
103     automatically    consolidates   contiguous	 space	 to   minimise
104     fragmentation.  BGET is configured	by  compile-time  definitions,
105     Major options include:
106 
107 	*   A  built-in  test  program	to  exercise  BGET   and
108 	    demonstrate how the various functions are used.
109 
110         *   Allocation  by  either the "first fit" or "best fit"
111 	    method.
112 
113 	*   Wiping buffers at release time to catch  code  which
114 	    references previously released storage.
115 
116 	*   Built-in  routines to dump individual buffers or the
117 	    entire buffer pool.
118 
119 	*   Retrieval of allocation and pool size statistics.
120 
121 	*   Quantisation of buffer sizes to a power  of  two  to
122 	    satisfy hardware alignment constraints.
123 
124 	*   Automatic  pool compaction, growth, and shrinkage by
125 	    means of call-backs to user defined functions.
126 
127     Applications  of  BGET  can  range	from  storage  management   in
128     ROM-based  embedded programs to providing the framework upon which
129     a  multitasking  system  incorporating   garbage   collection   is
130     constructed.   BGET  incorporates  extensive  internal consistency
131     checking using the <assert.h> mechanism; all these checks  can  be
132     turned off by compiling with NDEBUG defined, yielding a version of
133     BGET with minimal size and maximum speed.
134 
135     The  basic	algorithm  underlying  BGET  has withstood the test of
136     time;  more  than  25  years   have   passed   since   the	 first
137     implementation  of	this  code.  And yet, it is substantially more
138     efficient than the native allocation  schemes  of  many  operating
139     systems: the Macintosh and Microsoft Windows to name two, on which
140     programs have obtained substantial speed-ups by layering  BGET  as
141     an application level memory manager atop the underlying system's.
142 
143     BGET has been implemented on the largest mainframes and the lowest
144     of	microprocessors.   It  has served as the core for multitasking
145     operating systems, multi-thread applications, embedded software in
146     data  network switching processors, and a host of C programs.  And
147     while it has accreted flexibility and additional options over  the
148     years,  it	remains  fast, memory efficient, portable, and easy to
149     integrate into your program.
150 
151 
152     BGET IMPLEMENTATION ASSUMPTIONS
153     ===============================
154 
155     BGET is written in as portable a dialect of C  as  possible.   The
156     only   fundamental	 assumption   about  the  underlying  hardware
157     architecture is that memory is allocated is a linear  array  which
158     can  be  addressed  as a vector of C "char" objects.  On segmented
159     address space architectures, this generally means that BGET should
160     be used to allocate storage within a single segment (although some
161     compilers	simulate   linear   address   spaces   on    segmented
162     architectures).   On  segmented  architectures,  then, BGET buffer
163     pools  may not be larger than a segment, but since BGET allows any
164     number of separate buffer pools, there is no limit	on  the  total
165     storage  which  can  be  managed,  only  on the largest individual
166     object which can be allocated.  Machines  with  a  linear  address
167     architecture,  such  as  the VAX, 680x0, Sparc, MIPS, or the Intel
168     80386 and above in native mode, may use BGET without restriction.
169 
170 
171     GETTING STARTED WITH BGET
172     =========================
173 
174     Although BGET can be configured in a multitude of fashions,  there
175     are  three	basic  ways  of  working  with	BGET.	The  functions
176     mentioned below are documented in the following  section.	Please
177     excuse  the  forward  references which are made in the interest of
178     providing a roadmap to guide you  to  the  BGET  functions  you're
179     likely to need.
180 
181     Embedded Applications
182     ---------------------
183 
184     Embedded applications  typically  have  a  fixed  area  of	memory
185     dedicated  to  buffer  allocation (often in a separate RAM address
186     space distinct from the ROM that contains  the  executable	code).
187     To	use  BGET in such an environment, simply call bpool() with the
188     start address and length of the buffer  pool  area	in  RAM,  then
189     allocate  buffers  with  bget()  and  release  them  with  brel().
190     Embedded applications with very limited RAM but abundant CPU speed
191     may  benefit  by configuring BGET for BestFit allocation (which is
192     usually not worth it in other environments).
193 
194     Malloc() Emulation
195     ------------------
196 
197     If the C library malloc() function is too  slow,  not  present  in
198     your  development environment (for example, an a native Windows or
199     Macintosh program), or otherwise unsuitable, you  can  replace  it
200     with  BGET.  Initially define a buffer pool of an appropriate size
201     with bpool()--usually obtained by making a call to	the  operating
202     system's  low-level  memory allocator.  Then allocate buffers with
203     bget(), bgetz(), and bgetr() (the last two permit  the  allocation
204     of	buffers initialised to zero and [inefficient] re-allocation of
205     existing buffers for  compatibility  with  C  library  functions).
206     Release buffers by calling brel().	If a buffer allocation request
207     fails, obtain more storage from the underlying  operating  system,
208     add it to the buffer pool by another call to bpool(), and continue
209     execution.
210 
211     Automatic Storage Management
212     ----------------------------
213 
214     You can use BGET as your application's native memory  manager  and
215     implement  automatic  storage  pool  expansion,  contraction,  and
216     optionally application-specific  memory  compaction  by  compiling
217     BGET  with	the  BECtl  variable defined, then calling bectl() and
218     supplying  functions  for  storage	compaction,  acquisition,  and
219     release,  as  well as a standard pool expansion increment.	All of
220     these functions are optional (although it doesn't make much  sense
221     to	provide  a  release  function without an acquisition function,
222     does it?).	Once the call-back functions have  been  defined  with
223     bectl(),  you simply use bget() and brel() to allocate and release
224     storage as before.	You can supply an  initial  buffer  pool  with
225     bpool()  or  rely  on  automatic  allocation to acquire the entire
226     pool.  When a call on  bget()  cannot  be  satisfied,  BGET  first
227     checks  if	a compaction function has been supplied.  If so, it is
228     called (with the space required to satisfy the allocation  request
229     and a sequence number to allow the compaction routine to be called
230     successively without looping).  If the compaction function is able
231     to  free any storage (it needn't know whether the storage it freed
232     was adequate) it should return a  nonzero  value,  whereupon  BGET
233     will retry the allocation request and, if it fails again, call the
234     compaction function again with the next-higher sequence number.
235 
236     If	the  compaction  function  returns zero, indicating failure to
237     free space, or no compaction function is defined, BGET next  tests
238     whether  a	non-NULL  allocation function was supplied to bectl().
239     If so, that function is called with  an  argument  indicating  how
240     many  bytes  of  additional  space are required.  This will be the
241     standard pool expansion increment supplied in the call to  bectl()
242     unless  the  original  bget()  call requested a buffer larger than
243     this; buffers larger than the standard pool block can  be  managed
244     "off  the books" by BGET in this mode.  If the allocation function
245     succeeds in obtaining the storage, it returns a pointer to the new
246     block  and	BGET  expands  the  buffer  pool;  if  it  fails,  the
247     allocation request fails and returns NULL to  the  caller.	 If  a
248     non-NULL  release  function  is  supplied,	expansion blocks which
249     become totally empty are released  to  the	global	free  pool  by
250     passing their addresses to the release function.
251 
252     Equipped  with  appropriate  allocation,  release,	and compaction
253     functions, BGET can be used as part of very  sophisticated	memory
254     management	 strategies,  including  garbage  collection.	(Note,
255     however, that BGET is *not* a garbage  collector  by  itself,  and
256     that  developing  such a system requires much additional logic and
257     careful design of the application's memory allocation strategy.)
258 
259 
260     BGET FUNCTION DESCRIPTIONS
261     ==========================
262 
263     Functions implemented in this file (some are enabled by certain of
264     the optional settings below):
265 
266 	    void bpool(void *buffer, bufsize len);
267 
268     Create a buffer pool of <len> bytes, using the storage starting at
269     <buffer>.	You  can  call	bpool()  subsequently  to   contribute
270     additional storage to the overall buffer pool.
271 
272 	    void *bget(bufsize size);
273 
274     Allocate  a  buffer of <size> bytes.  The address of the buffer is
275     returned, or NULL if insufficient memory was available to allocate
276     the buffer.
277 
278 	    void *bgetz(bufsize size);
279 
280     Allocate a buffer of <size> bytes and clear it to all zeroes.  The
281     address of the buffer is returned, or NULL if insufficient	memory
282     was available to allocate the buffer.
283 
284 	    void *bgetr(void *buffer, bufsize newsize);
285 
286     Reallocate a buffer previously allocated by bget(),  changing  its
287     size  to  <newsize>  and  preserving  all  existing data.  NULL is
288     returned if insufficient memory is	available  to  reallocate  the
289     buffer, in which case the original buffer remains intact.
290 
291 	    void brel(void *buf);
292 
293     Return  the  buffer  <buf>, previously allocated by bget(), to the
294     free space pool.
295 
296 	    void bectl(int (*compact)(bufsize sizereq, int sequence),
297 		       void *(*acquire)(bufsize size),
298 		       void (*release)(void *buf),
299 		       bufsize pool_incr);
300 
301     Expansion control: specify functions through which the package may
302     compact  storage  (or  take  other	appropriate  action)  when  an
303     allocation	request  fails,  and  optionally automatically acquire
304     storage for expansion blocks  when	necessary,  and  release  such
305     blocks when they become empty.  If <compact> is non-NULL, whenever
306     a buffer allocation request fails, the <compact> function will  be
307     called with arguments specifying the number of bytes (total buffer
308     size,  including  header  overhead)  required   to	 satisfy   the
309     allocation request, and a sequence number indicating the number of
310     consecutive  calls	on  <compact>  attempting  to	satisfy   this
311     allocation	request.   The sequence number is 1 for the first call
312     on <compact> for a given allocation  request,  and	increments  on
313     subsequent	calls,	permitting  the  <compact>  function  to  take
314     increasingly dire measures in an attempt to free up  storage.   If
315     the  <compact>  function  returns  a nonzero value, the allocation
316     attempt is re-tried.  If <compact> returns 0 (as  it  must	if  it
317     isn't  able  to  release  any  space  or add storage to the buffer
318     pool), the allocation request fails, which can  trigger  automatic
319     pool expansion if the <acquire> argument is non-NULL.  At the time
320     the  <compact>  function  is  called,  the	state  of  the	buffer
321     allocator  is  identical  to  that	at  the  moment the allocation
322     request was made; consequently, the <compact>  function  may  call
323     brel(), bpool(), bstats(), and/or directly manipulate  the	buffer
324     pool  in  any  manner which would be valid were the application in
325     control.  This does not, however, relieve the  <compact>  function
326     of the need to ensure that whatever actions it takes do not change
327     things   underneath  the  application  that  made  the  allocation
328     request.  For example, a <compact> function that released a buffer
329     in	the  process  of  being reallocated with bgetr() would lead to
330     disaster.  Implementing a safe and effective  <compact>  mechanism
331     requires  careful  design of an application's memory architecture,
332     and cannot generally be easily retrofitted into existing code.
333 
334     If <acquire> is non-NULL, that function will be called whenever an
335     allocation	request  fails.  If the <acquire> function succeeds in
336     allocating the requested space and returns a pointer  to  the  new
337     area,  allocation will proceed using the expanded buffer pool.  If
338     <acquire> cannot obtain the requested space, it should return NULL
339     and   the	entire	allocation  process  will  fail.   <pool_incr>
340     specifies the normal expansion block size.	Providing an <acquire>
341     function will cause subsequent bget()  requests  for  buffers  too
342     large  to  be  managed in the linked-block scheme (in other words,
343     larger than <pool_incr> minus the buffer overhead) to be satisfied
344     directly by calls to the <acquire> function.  Automatic release of
345     empty pool blocks will occur only if all pool blocks in the system
346     are the size given by <pool_incr>.
347 
348 	    void bstats(bufsize *curalloc, bufsize *totfree,
349 			bufsize *maxfree, long *nget, long *nrel);
350 
351     The amount	of  space  currently  allocated  is  stored  into  the
352     variable  pointed  to by <curalloc>.  The total free space (sum of
353     all free blocks in the pool) is stored into the  variable  pointed
354     to	by  <totfree>, and the size of the largest single block in the
355     free space	pool  is  stored  into	the  variable  pointed	to  by
356     <maxfree>.	 The  variables  pointed  to  by <nget> and <nrel> are
357     filled, respectively, with	the  number  of  successful  (non-NULL
358     return) bget() calls and the number of brel() calls.
359 
360 	    void bstatse(bufsize *pool_incr, long *npool,
361 			 long *npget, long *nprel,
362 			 long *ndget, long *ndrel);
363 
364     Extended  statistics: The expansion block size will be stored into
365     the variable pointed to by <pool_incr>, or the negative thereof if
366     automatic  expansion  block  releases are disabled.  The number of
367     currently active pool blocks will  be  stored  into  the  variable
368     pointed  to  by  <npool>.  The variables pointed to by <npget> and
369     <nprel> will be filled with, respectively, the number of expansion
370     block   acquisitions   and	releases  which  have  occurred.   The
371     variables pointed to by <ndget> and <ndrel> will  be  filled  with
372     the  number  of  bget()  and  brel()  calls, respectively, managed
373     through blocks directly allocated by the acquisition  and  release
374     functions.
375 
376 	    void bufdump(void *buf);
377 
378     The buffer pointed to by <buf> is dumped on standard output.
379 
380 	    void bpoold(void *pool, int dumpalloc, int dumpfree);
381 
382     All buffers in the buffer pool <pool>, previously initialised by a
383     call on bpool(), are listed in ascending memory address order.  If
384     <dumpalloc> is nonzero, the  contents  of  allocated  buffers  are
385     dumped;  if <dumpfree> is nonzero, the contents of free blocks are
386     dumped.
387 
388 	    int bpoolv(void *pool);
389 
390     The  named	buffer	pool,  previously  initialised	by  a  call on
391     bpool(), is validated for bad pointers, overwritten data, etc.  If
392     compiled with NDEBUG not defined, any error generates an assertion
393     failure.  Otherwise 1 is returned if the pool is valid,  0	if  an
394     error is found.
395 
396 
397     BGET CONFIGURATION
398     ==================
399 */
400 
401 /*
402  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
403  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
404  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
405  * IN NO EVENT SHALL ST BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
406  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
407  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
408  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
409  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
410  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
411  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
412  */
413 
414 /* #define BGET_ENABLE_ALL_OPTIONS */
415 #ifdef BGET_ENABLE_OPTION
416 #define TestProg    20000	      /* Generate built-in test program
417 					 if defined.  The value specifies
418 					 how many buffer allocation attempts
419 					 the test program should make. */
420 
421 #define SizeQuant   4		      /* Buffer allocation size quantum:
422 					 all buffers allocated are a
423 					 multiple of this size.  This
424 					 MUST be a power of two. */
425 
426 #define BufDump     1		      /* Define this symbol to enable the
427 					 bpoold() function which dumps the
428 					 buffers in a buffer pool. */
429 
430 #define BufValid    1		      /* Define this symbol to enable the
431 					 bpoolv() function for validating
432 					 a buffer pool. */
433 
434 #define DumpData    1		      /* Define this symbol to enable the
435 					 bufdump() function which allows
436 					 dumping the contents of an allocated
437 					 or free buffer. */
438 
439 #define BufStats    1		      /* Define this symbol to enable the
440 					 bstats() function which calculates
441 					 the total free space in the buffer
442 					 pool, the largest available
443 					 buffer, and the total space
444 					 currently allocated. */
445 
446 #define FreeWipe    1		      /* Wipe free buffers to a guaranteed
447 					 pattern of garbage to trip up
448 					 miscreants who attempt to use
449 					 pointers into released buffers. */
450 
451 #define BestFit     1		      /* Use a best fit algorithm when
452 					 searching for space for an
453 					 allocation request.  This uses
454 					 memory more efficiently, but
455 					 allocation will be much slower. */
456 
457 #define BECtl	    1		      /* Define this symbol to enable the
458 					 bectl() function for automatic
459 					 pool space control.  */
460 #endif
461 
462 #include <stdio.h>
463 #include <stdbool.h>
464 
465 #ifdef lint
466 #define NDEBUG			      /* Exits in asserts confuse lint */
467 /* LINTLIBRARY */                     /* Don't complain about def, no ref */
468 extern char *sprintf();               /* Sun includes don't define sprintf */
469 #endif
470 
471 #include <assert.h>
472 #include <memory.h>
473 
474 #ifdef BufDump			      /* BufDump implies DumpData */
475 #ifndef DumpData
476 #define DumpData    1
477 #endif
478 #endif
479 
480 #ifdef DumpData
481 #include <ctype.h>
482 #endif
483 
484 #ifdef __KERNEL__
485 #ifdef CFG_CORE_BGET_BESTFIT
486 #define BestFit 1
487 #endif
488 #endif
489 
490 /*  Declare the interface, including the requested buffer size type,
491     bufsize.  */
492 
493 #include "bget.h"
494 
495 #define MemSize     int 	      /* Type for size arguments to memxxx()
496 					 functions such as memcmp(). */
497 
498 /* Queue links */
499 
500 struct qlinks {
501     struct bfhead *flink;	      /* Forward link */
502     struct bfhead *blink;	      /* Backward link */
503 };
504 
505 /* Header in allocated and free buffers */
506 
507 struct bhead {
508     bufsize prevfree;		      /* Relative link back to previous
509 					 free buffer in memory or 0 if
510 					 previous buffer is allocated.	*/
511     bufsize bsize;		      /* Buffer size: positive if free,
512 					 negative if allocated. */
513 };
514 #define BH(p)	((struct bhead *) (p))
515 
516 /*  Header in directly allocated buffers (by acqfcn) */
517 
518 struct bdhead {
519     bufsize tsize;		      /* Total size, including overhead */
520     bufsize offs;		      /* Offset from allocated buffer */
521     struct bhead bh;		      /* Common header */
522 };
523 #define BDH(p)	((struct bdhead *) (p))
524 
525 /* Header in free buffers */
526 
527 struct bfhead {
528     struct bhead bh;		      /* Common allocated/free header */
529     struct qlinks ql;		      /* Links on free list */
530 };
531 #define BFH(p)	((struct bfhead *) (p))
532 
533 /* Poolset definition */
534 struct bpoolset {
535     struct bfhead freelist;
536 #ifdef BufStats
537     bufsize totalloc;		      /* Total space currently allocated */
538     long numget;		      /* Number of bget() calls */
539     long numrel;		      /* Number of brel() calls */
540     uint64_t free2_sum;	              /* Sum of size^2 of each free chunk */
541 #ifdef BECtl
542     long numpblk;		      /* Number of pool blocks */
543     long numpget;		      /* Number of block gets and rels */
544     long numprel;
545     long numdget;		      /* Number of direct gets and rels */
546     long numdrel;
547 #endif /* BECtl */
548 #endif /* BufStats */
549 
550 #ifdef BECtl
551     /* Automatic expansion block management functions */
552 
553     int (*compfcn) _((bufsize sizereq, int sequence));
554     void *(*acqfcn) _((bufsize size));
555     void (*relfcn) _((void *buf));
556 
557     bufsize exp_incr;		      /* Expansion block size */
558     bufsize pool_len;		      /* 0: no bpool calls have been made
559 					 -1: not all pool blocks are
560 					     the same size
561 					 >0: (common) block size for all
562 					     bpool calls made so far
563 				      */
564 #endif
565 };
566 
567 /*  Minimum allocation quantum: */
568 
569 #define QLSize	(sizeof(struct qlinks))
570 #define SizeQ	((SizeQuant > QLSize) ? SizeQuant : QLSize)
571 
572 #define V   (void)		      /* To denote unwanted returned values */
573 
574 /* End sentinel: value placed in bsize field of dummy block delimiting
575    end of pool block.  The most negative number which will  fit  in  a
576    bufsize, defined in a way that the compiler will accept. */
577 
578 #define ESent	((bufsize) (-(((1L << (sizeof(bufsize) * 8 - 2)) - 1) * 2) - 2))
579 
580 static bufsize buf_get_pos(struct bfhead *bf, bufsize align, bufsize hdr_size,
581                            bufsize size)
582 {
583     unsigned long buf = 0;
584     bufsize pos = 0;
585 
586     if (bf->bh.bsize < size)
587        return -1;
588 
589     /*
590      * plus sizeof(struct bhead) and hdr_size since buf will follow just
591      * after a struct bhead and an eventual extra header.
592      */
593     buf = (unsigned long)bf + bf->bh.bsize - size + sizeof(struct bhead) +
594           hdr_size;
595     buf &= ~(align - 1);
596     pos = buf - (unsigned long)bf - sizeof(struct bhead) - hdr_size;
597 
598     if (pos == 0) /* exact match */
599         return pos;
600     if (pos >= SizeQ + sizeof(struct bhead)) /* room for an empty buffer */
601         return pos;
602 
603     return -1;
604 }
605 
606 static uint64_t __maybe_unused get_free2_sum(struct bpoolset *poolset)
607 {
608 	struct bfhead *b = poolset->freelist.ql.flink;
609 	uint64_t free2_sum = 0;
610 	uint64_t bs = 0;
611 
612 	while (b != &poolset->freelist) {
613 		bs = b->bh.bsize;
614 		free2_sum += bs * bs;
615 		b = b->ql.flink;		  /* Link to next buffer */
616 	}
617 
618 	return free2_sum;
619 }
620 
621 /*
622  * update_free2_sum() - cumulative update of the free^2 sum
623  * @poolset:	The addressed poolset
624  * @rem_sz:	Size of a removed free block, 0 if unused
625  * @rem_sz2:	Size of one more removed free block, 0 if unused
626  * @add_sz:	Size of an added free block, 0 if unused
627  *
628  * As free blocks are removed, added, or merged the sum of the size ^ 2 of
629  * all free blocks needs to be updated. The most complicated case is where
630  * two free blocks are merged into one free block, both the old sizes must be
631  * supplied and the new size.
632  */
633 static void update_free2_sum(struct bpoolset *poolset __maybe_unused,
634 			     uint64_t rem_sz __maybe_unused,
635 			     uint64_t rem_sz2 __maybe_unused,
636 			     uint64_t add_sz __maybe_unused)
637 {
638 #ifdef BufStats
639 	uint64_t r2 = rem_sz * rem_sz + rem_sz2 * rem_sz2;
640 	uint64_t a2 = add_sz * add_sz;
641 	uint64_t f2s __maybe_unused = get_free2_sum(poolset);
642 
643 	assert(f2s == poolset->free2_sum - r2 + a2);
644 	assert(poolset->free2_sum >= r2);
645 	poolset->free2_sum -= r2;
646 	poolset->free2_sum += a2;
647 #endif
648 }
649 
650 /*  BGET  --  Allocate a buffer.  */
651 
652 void *bget(requested_align, hdr_size, requested_size, poolset)
653   bufsize requested_align;
654   bufsize hdr_size;
655   bufsize requested_size;
656   struct bpoolset *poolset;
657 {
658     bufsize align = requested_align;
659     bufsize size = requested_size;
660     bufsize pos;
661     struct bfhead *b;
662 #ifdef BestFit
663     struct bfhead *best;
664 #endif
665     void *buf;
666 #ifdef BECtl
667     int compactseq = 0;
668 #endif
669 
670     assert(size > 0);
671     COMPILE_TIME_ASSERT(BGET_HDR_QUANTUM == SizeQ);
672 
673     if (align < 0 || (align > 0 && !IS_POWER_OF_TWO((unsigned long)align)))
674         return NULL;
675     if (hdr_size % BGET_HDR_QUANTUM != 0)
676         return NULL;
677 
678     if (size < SizeQ) { 	      /* Need at least room for the */
679 	size = SizeQ;		      /*    queue links.  */
680     }
681     if (align < SizeQ)
682         align = SizeQ;
683 #ifdef SizeQuant
684 #if SizeQuant > 1
685     if (ADD_OVERFLOW(size, SizeQuant - 1, &size))
686         return NULL;
687 
688     size = ROUNDDOWN(size, SizeQuant);
689 #endif
690 #endif
691 
692     /* Add overhead in allocated buffer to size required. */
693     if (ADD_OVERFLOW(size, sizeof(struct bhead), &size))
694         return NULL;
695     if (ADD_OVERFLOW(size, hdr_size, &size))
696         return NULL;
697 
698 #ifdef BECtl
699     /* If a compact function was provided in the call to bectl(), wrap
700        a loop around the allocation process  to  allow	compaction  to
701        intervene in case we don't find a suitable buffer in the chain. */
702 
703     while (1) {
704 #endif
705 	b = poolset->freelist.ql.flink;
706 #ifdef BestFit
707 	best = &poolset->freelist;
708 #endif
709 
710 
711 	/* Scan the free list searching for the first buffer big enough
712 	   to hold the requested size buffer. */
713 
714 #ifdef BestFit
715 	while (b != &poolset->freelist) {
716             assert(b->bh.prevfree == 0);
717             pos = buf_get_pos(b, align, hdr_size, size);
718             if (pos >= 0) {
719 		if ((best == &poolset->freelist) ||
720 		    (b->bh.bsize < best->bh.bsize)) {
721 		    best = b;
722 		}
723 	    }
724 	    b = b->ql.flink;		  /* Link to next buffer */
725 	}
726 	b = best;
727 #endif /* BestFit */
728 
729 	while (b != &poolset->freelist) {
730             pos = buf_get_pos(b, align, hdr_size, size);
731             if (pos >= 0) {
732                 struct bhead *b_alloc = BH((char *)b + pos);
733                 struct bhead *b_next = BH((char *)b + b->bh.bsize);
734 		bufsize rem_sz = b->bh.bsize;
735 		bufsize add_sz = pos;
736 
737                 assert(b_next->prevfree == b->bh.bsize);
738 
739                 /*
740                  * Zero the back pointer in the next buffer in memory
741                  * to indicate that this buffer is allocated.
742                  */
743                 b_next->prevfree = 0;
744 
745                 assert(b->ql.blink->ql.flink == b);
746                 assert(b->ql.flink->ql.blink == b);
747 
748                 if (pos == 0) {
749                     /*
750                      * Need to allocate from the beginning of this free block.
751                      * Unlink the block and mark it as allocated.
752                      */
753 		    b->ql.blink->ql.flink = b->ql.flink;
754 		    b->ql.flink->ql.blink = b->ql.blink;
755 
756 		    /* Negate size to mark buffer allocated. */
757 		    b->bh.bsize = -b->bh.bsize;
758                 } else {
759                     /*
760                      * Carve out the memory allocation from the end of this
761                      * free block. Negative size to mark buffer allocated.
762                      */
763                     b_alloc->bsize = -(b->bh.bsize - pos);
764                     b_alloc->prevfree = pos;
765                     b->bh.bsize = pos;
766                 }
767 		update_free2_sum(poolset, rem_sz, 0, add_sz);
768 
769                 assert(b_alloc->bsize < 0);
770                 /*
771                  * At this point is b_alloc pointing to the allocated
772                  * buffer and b_next at the buffer following. b might be a
773                  * free block or a used block now.
774                  */
775                 if (-b_alloc->bsize - size > SizeQ + sizeof(struct bhead)) {
776                     /*
777                      * b_alloc has too much unused memory at the
778                      * end we need to split the block and register that
779                      * last part as free.
780                      */
781                     b = BFH((char *)b_alloc + size);
782                     b->bh.bsize = -b_alloc->bsize - size;
783                     b->bh.prevfree = 0;
784                     b_alloc->bsize += b->bh.bsize;
785 
786                     assert(poolset->freelist.ql.blink->ql.flink ==
787 			   &poolset->freelist);
788                     assert(poolset->freelist.ql.flink->ql.blink ==
789 			   &poolset->freelist);
790                     b->ql.flink = &poolset->freelist;
791                     b->ql.blink = poolset->freelist.ql.blink;
792                     poolset->freelist.ql.blink = b;
793                     b->ql.blink->ql.flink = b;
794 
795                     assert(BH((char *)b + b->bh.bsize) == b_next);
796                     b_next->prevfree = b->bh.bsize;
797 		    update_free2_sum(poolset, 0, 0, b->bh.bsize);
798                 }
799 
800 #ifdef BufStats
801 		poolset->totalloc -= b_alloc->bsize;
802 		poolset->numget++;		  /* Increment number of bget() calls */
803 #endif
804                 buf = (char *)b_alloc + sizeof(struct bhead);
805                 return buf;
806 	    }
807 	    b = b->ql.flink;		  /* Link to next buffer */
808 	}
809 #ifdef BECtl
810 
811         /* We failed to find a buffer.  If there's a compact  function
812 	   defined,  notify  it  of the size requested.  If it returns
813 	   TRUE, try the allocation again. */
814 
815 	if ((poolset->compfcn == NULL) ||
816 	    (!(poolset->compfcn)(size, ++compactseq))) {
817 	    break;
818 	}
819     }
820 
821     /* No buffer available with requested size free. */
822 
823     /* Don't give up yet -- look in the reserve supply. */
824 
825     if (poolset->acqfcn != NULL) {
826 	if (size > exp_incr - sizeof(struct bfhead) - align) {
827 
828 	    /* Request	is  too  large	to  fit in a single expansion
829 	       block.  Try to satisy it by a direct buffer acquisition. */
830             char *p;
831 
832 	    size += sizeof(struct bdhead) - sizeof(struct bhead);
833             if (align > QLSize)
834                 size += align;
835 	    p = poolset->acqfcn(size);
836             if (p != NULL) {
837 	        struct bdhead *bdh;
838 
839                 if (align <= QLSize) {
840                     bdh = BDH(p);
841 		    buf = bdh + 1;
842                 } else {
843                     unsigned long tp = (unsigned long)p;
844 
845                     tp += sizeof(*bdh) + hdr_size + align;
846                     tp &= ~(align - 1);
847                     tp -= hdr_size;
848 		    buf = (void *)tp;
849                     bdh = BDH((char *)buf - sizeof(*bdh));
850                 }
851 
852 		/*  Mark the buffer special by setting the size field
853 		    of its header to zero.  */
854 		bdh->bh.bsize = 0;
855 		bdh->bh.prevfree = 0;
856 		bdh->tsize = size;
857 		bdh->offs = (unsigned long)bdh - (unsigned long)p;
858 #ifdef BufStats
859 		poolset->totalloc += size;
860 		poolset->numget++;	  /* Increment number of bget() calls */
861 		poolset->numdget++;	  /* Direct bget() call count */
862 #endif
863 		return buf;
864 	    }
865 
866 	} else {
867 
868 	    /*	Try to obtain a new expansion block */
869 
870 	    void *newpool;
871 
872 	    if ((newpool = poolset->acqfcn((bufsize) exp_incr)) != NULL) {
873 		bpool(newpool, exp_incr, poolset);
874                 buf =  bget(align, hdr_size, requested_size, pool);  /* This can't, I say, can't
875 						       get into a loop. */
876 		return buf;
877 	    }
878 	}
879     }
880 
881     /*	Still no buffer available */
882 
883 #endif /* BECtl */
884 
885     return NULL;
886 }
887 
888 /*  BGETZ  --  Allocate a buffer and clear its contents to zero.  We clear
889 	       the  entire  contents  of  the buffer to zero, not just the
890 	       region requested by the caller. */
891 
892 void *bgetz(align, hdr_size, size, poolset)
893   bufsize align;
894   bufsize hdr_size;
895   bufsize size;
896   struct bpoolset *poolset;
897 {
898     char *buf = (char *) bget(align, hdr_size, size, poolset);
899 
900     if (buf != NULL) {
901 	struct bhead *b;
902 	bufsize rsize;
903 
904 	b = BH(buf - sizeof(struct bhead));
905 	rsize = -(b->bsize);
906 	if (rsize == 0) {
907 	    struct bdhead *bd;
908 
909 	    bd = BDH(buf - sizeof(struct bdhead));
910 	    rsize = bd->tsize - sizeof(struct bdhead) - bd->offs;
911 	} else {
912 	    rsize -= sizeof(struct bhead);
913 	}
914 	assert(rsize >= size);
915 	V memset_unchecked(buf, 0, (MemSize) rsize);
916     }
917     return ((void *) buf);
918 }
919 
920 /*  BGETR  --  Reallocate a buffer.  This is a minimal implementation,
921 	       simply in terms of brel()  and  bget().	 It  could  be
922 	       enhanced to allow the buffer to grow into adjacent free
923 	       blocks and to avoid moving data unnecessarily.  */
924 
925 void *bgetr(buf, align, hdr_size, size, poolset)
926   void *buf;
927   bufsize align;
928   bufsize hdr_size;
929   bufsize size;
930   struct bpoolset *poolset;
931 {
932     void *nbuf;
933     bufsize osize;		      /* Old size of buffer */
934     struct bhead *b;
935 
936     if ((nbuf = bget(align, hdr_size, size, poolset)) == NULL) { /* Acquire new buffer */
937 	return NULL;
938     }
939     if (buf == NULL) {
940 	return nbuf;
941     }
942     b = BH(((char *) buf) - sizeof(struct bhead));
943     osize = -b->bsize;
944 #ifdef BECtl
945     if (osize == 0) {
946 	/*  Buffer acquired directly through acqfcn. */
947 	struct bdhead *bd;
948 
949 	bd = BDH(((char *) buf) - sizeof(struct bdhead));
950 	osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
951     } else
952 #endif
953 	osize -= sizeof(struct bhead);
954     assert(osize > 0);
955     V memcpy_unchecked((char *) nbuf, (char *) buf, /* Copy the data */
956 	     (MemSize) ((size < osize) ? size : osize));
957 #ifndef __KERNEL__
958     /* User space reallocations are always zeroed */
959     if (size > osize)
960          V memset_unchecked((char *) nbuf + osize, 0, size - osize);
961 #endif
962     brel(buf, poolset, false /* !wipe */);
963     return nbuf;
964 }
965 
966 /*  BREL  --  Release a buffer.  */
967 
968 void brel(buf, poolset, wipe)
969   void *buf;
970   struct bpoolset *poolset;
971   int wipe;
972 {
973     struct bfhead *b, *bn;
974     char *wipe_start;
975     bufsize wipe_size;
976     bufsize add_sz;
977     bufsize rem_sz;
978     bufsize rem_sz2;
979 
980     b = BFH(((char *) buf) - sizeof(struct bhead));
981 #ifdef BufStats
982     poolset->numrel++;		      /* Increment number of brel() calls */
983 #endif
984     assert(buf != NULL);
985 
986 #ifdef FreeWipe
987     wipe = true;
988 #endif
989 #ifdef BECtl
990     if (b->bh.bsize == 0) {	      /* Directly-acquired buffer? */
991 	struct bdhead *bdh;
992 
993 	bdh = BDH(((char *) buf) - sizeof(struct bdhead));
994 	assert(b->bh.prevfree == 0);
995 #ifdef BufStats
996 	poolset->totalloc -= bdh->tsize;
997 	assert(poolset->totalloc >= 0);
998 	poolset->numdrel++;	       /* Number of direct releases */
999 #endif /* BufStats */
1000 	if (wipe) {
1001 		V memset_unchecked((char *) buf, 0x55,
1002 				   (MemSize) (bdh->tsize -
1003 					      sizeof(struct bdhead)));
1004 	}
1005 	assert(poolset->relfcn != NULL);
1006 	poolset->relfcn((char *)buf - sizeof(struct bdhead) - bdh->offs);      /* Release it directly. */
1007 	return;
1008     }
1009 #endif /* BECtl */
1010 
1011     /* Buffer size must be negative, indicating that the buffer is
1012        allocated. */
1013 
1014     if (b->bh.bsize >= 0) {
1015 	bn = NULL;
1016     }
1017     assert(b->bh.bsize < 0);
1018 
1019     /*	Back pointer in next buffer must be zero, indicating the
1020 	same thing: */
1021 
1022     assert(BH((char *) b - b->bh.bsize)->prevfree == 0);
1023 
1024 #ifdef BufStats
1025     poolset->totalloc += b->bh.bsize;
1026     assert(poolset->totalloc >= 0);
1027 #endif
1028 
1029     /* If the back link is nonzero, the previous buffer is free.  */
1030 
1031     if (b->bh.prevfree != 0) {
1032 
1033 	/* The previous buffer is free.  Consolidate this buffer  with	it
1034 	   by  adding  the  length  of	this  buffer  to the previous free
1035 	   buffer.  Note that we subtract the size  in	the  buffer  being
1036            released,  since  it's  negative to indicate that the buffer is
1037 	   allocated. */
1038 
1039 	register bufsize size = b->bh.bsize;
1040 
1041 	/* Only wipe the current buffer, including bfhead. */
1042 	wipe_start = (char *)b;
1043 	wipe_size = -size;
1044 
1045         /* Make the previous buffer the one we're working on. */
1046 	assert(BH((char *) b - b->bh.prevfree)->bsize == b->bh.prevfree);
1047 	b = BFH(((char *) b) - b->bh.prevfree);
1048 	rem_sz = b->bh.bsize;
1049 	b->bh.bsize -= size;
1050 	add_sz = b->bh.bsize;
1051 	update_free2_sum(poolset, rem_sz, 0, add_sz);
1052     } else {
1053 
1054         /* The previous buffer isn't allocated.  Insert this buffer
1055 	   on the free list as an isolated free block. */
1056 
1057 	assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
1058 	assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
1059 	b->ql.flink = &poolset->freelist;
1060 	b->ql.blink = poolset->freelist.ql.blink;
1061 	poolset->freelist.ql.blink = b;
1062 	b->ql.blink->ql.flink = b;
1063 	b->bh.bsize = -b->bh.bsize;
1064 	update_free2_sum(poolset, 0, 0, b->bh.bsize);
1065 
1066 	wipe_start = (char *)b + sizeof(struct bfhead);
1067 	wipe_size = b->bh.bsize - sizeof(struct bfhead);
1068     }
1069 
1070     /* Now we look at the next buffer in memory, located by advancing from
1071        the  start  of  this  buffer  by its size, to see if that buffer is
1072        free.  If it is, we combine  this  buffer  with	the  next  one	in
1073        memory, dechaining the second buffer from the free list. */
1074 
1075     bn =  BFH(((char *) b) + b->bh.bsize);
1076     if (bn->bh.bsize > 0) {
1077 
1078 	/* The buffer is free.	Remove it from the free list and add
1079 	   its size to that of our buffer. */
1080 
1081 	assert(BH((char *) bn + bn->bh.bsize)->prevfree == bn->bh.bsize);
1082 	assert(bn->ql.blink->ql.flink == bn);
1083 	assert(bn->ql.flink->ql.blink == bn);
1084 	bn->ql.blink->ql.flink = bn->ql.flink;
1085 	bn->ql.flink->ql.blink = bn->ql.blink;
1086 	rem_sz = b->bh.bsize;
1087 	rem_sz2 = bn->bh.bsize;
1088 	b->bh.bsize += bn->bh.bsize;
1089 	add_sz = b->bh.bsize;
1090 	update_free2_sum(poolset, rem_sz, rem_sz2, add_sz);
1091 
1092 	/* Finally,  advance  to   the	buffer	that   follows	the  newly
1093 	   consolidated free block.  We must set its  backpointer  to  the
1094 	   head  of  the  consolidated free block.  We know the next block
1095 	   must be an allocated block because the process of recombination
1096 	   guarantees  that  two  free	blocks will never be contiguous in
1097 	   memory.  */
1098 
1099 	bn = BFH(((char *) b) + b->bh.bsize);
1100 	/* Only bfhead of next buffer needs to be wiped */
1101 	wipe_size += sizeof(struct bfhead);
1102     }
1103     if (wipe) {
1104 	V memset_unchecked(wipe_start, 0x55, wipe_size);
1105     }
1106     assert(bn->bh.bsize < 0);
1107 
1108     /* The next buffer is allocated.  Set the backpointer in it  to  point
1109        to this buffer; the previous free buffer in memory. */
1110 
1111     bn->bh.prevfree = b->bh.bsize;
1112 
1113 #ifdef BECtl
1114 
1115     /*	If  a  block-release function is defined, and this free buffer
1116 	constitutes the entire block, release it.  Note that  pool_len
1117 	is  defined  in  such a way that the test will fail unless all
1118 	pool blocks are the same size.	*/
1119 
1120     if (poolset->relfcn != NULL &&
1121 	((bufsize) b->bh.bsize) == (pool_len - sizeof(struct bhead))) {
1122 
1123 	assert(b->bh.prevfree == 0);
1124 	assert(BH((char *) b + b->bh.bsize)->bsize == ESent);
1125 	assert(BH((char *) b + b->bh.bsize)->prevfree == b->bh.bsize);
1126 	/*  Unlink the buffer from the free list  */
1127 	b->ql.blink->ql.flink = b->ql.flink;
1128 	b->ql.flink->ql.blink = b->ql.blink;
1129 
1130 	poolset->relfcn(b);
1131 #ifdef BufStats
1132 	poolset->numprel++;	       /* Nr of expansion block releases */
1133 	poolset->numpblk--;	       /* Total number of blocks */
1134 	assert(numpblk == numpget - numprel);
1135 #endif /* BufStats */
1136     }
1137 #endif /* BECtl */
1138 }
1139 
1140 #ifdef BECtl
1141 
1142 /*  BECTL  --  Establish automatic pool expansion control  */
1143 
1144 void bectl(compact, acquire, release, pool_incr, poolset)
1145   int (*compact) _((bufsize sizereq, int sequence));
1146   void *(*acquire) _((bufsize size));
1147   void (*release) _((void *buf));
1148   bufsize pool_incr;
1149   struct bpoolset *poolset;
1150 {
1151     poolset->compfcn = compact;
1152     poolset->acqfcn = acquire;
1153     poolset->relfcn = release;
1154     poolset->exp_incr = pool_incr;
1155 }
1156 #endif
1157 
1158 /*  BPOOL  --  Add a region of memory to the buffer pool.  */
1159 
1160 void bpool(buf, len, poolset)
1161   void *buf;
1162   bufsize len;
1163   struct bpoolset *poolset;
1164 {
1165     struct bfhead *b = BFH(buf);
1166     struct bhead *bn;
1167 
1168 #ifdef SizeQuant
1169     len &= ~(SizeQuant - 1);
1170 #endif
1171 #ifdef BECtl
1172     if (poolset->pool_len == 0) {
1173 	pool_len = len;
1174     } else if (len != poolset->pool_len) {
1175 	poolset->pool_len = -1;
1176     }
1177 #ifdef BufStats
1178     poolset->numpget++;		       /* Number of block acquisitions */
1179     poolset->numpblk++;		       /* Number of blocks total */
1180     assert(poolset->numpblk == poolset->numpget - poolset->numprel);
1181 #endif /* BufStats */
1182 #endif /* BECtl */
1183 
1184     /* Since the block is initially occupied by a single free  buffer,
1185        it  had	better	not  be  (much) larger than the largest buffer
1186        whose size we can store in bhead.bsize. */
1187 
1188     assert(len - sizeof(struct bhead) <= -((bufsize) ESent + 1));
1189 
1190     /* Clear  the  backpointer at  the start of the block to indicate that
1191        there  is  no  free  block  prior  to  this   one.    That   blocks
1192        recombination when the first block in memory is released. */
1193 
1194     b->bh.prevfree = 0;
1195 
1196     /* Chain the new block to the free list. */
1197 
1198     assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
1199     assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
1200     b->ql.flink = &poolset->freelist;
1201     b->ql.blink = poolset->freelist.ql.blink;
1202     poolset->freelist.ql.blink = b;
1203     b->ql.blink->ql.flink = b;
1204 
1205     /* Create a dummy allocated buffer at the end of the pool.	This dummy
1206        buffer is seen when a buffer at the end of the pool is released and
1207        blocks  recombination  of  the last buffer with the dummy buffer at
1208        the end.  The length in the dummy buffer  is  set  to  the  largest
1209        negative  number  to  denote  the  end  of  the pool for diagnostic
1210        routines (this specific value is  not  counted  on  by  the  actual
1211        allocation and release functions). */
1212 
1213     len -= sizeof(struct bhead);
1214     b->bh.bsize = (bufsize) len;
1215     update_free2_sum(poolset, 0, 0, b->bh.bsize);
1216 #ifdef FreeWipe
1217     V memset_unchecked(((char *) b) + sizeof(struct bfhead), 0x55,
1218 		       (MemSize) (len - sizeof(struct bfhead)));
1219 #endif
1220     bn = BH(((char *) b) + len);
1221     bn->prevfree = (bufsize) len;
1222     /* Definition of ESent assumes two's complement! */
1223     assert((~0) == -1);
1224     bn->bsize = ESent;
1225 }
1226 
1227 #ifdef BufStats
1228 
1229 /*  BSTATS  --	Return buffer allocation free space statistics.  */
1230 
1231 void bstats(curalloc, totfree, maxfree, nget, nrel, poolset)
1232   bufsize *curalloc, *totfree, *maxfree;
1233   long *nget, *nrel;
1234   struct bpoolset *poolset;
1235 {
1236     struct bfhead *b = poolset->freelist.ql.flink;
1237 
1238     *nget = poolset->numget;
1239     *nrel = poolset->numrel;
1240     *curalloc = poolset->totalloc;
1241     *totfree = 0;
1242     *maxfree = -1;
1243     while (b != &poolset->freelist) {
1244 	assert(b->bh.bsize > 0);
1245 	*totfree += b->bh.bsize;
1246 	if (b->bh.bsize > *maxfree) {
1247 	    *maxfree = b->bh.bsize;
1248 	}
1249 	b = b->ql.flink;	      /* Link to next buffer */
1250     }
1251 }
1252 
1253 #ifdef BECtl
1254 
1255 /*  BSTATSE  --  Return extended statistics  */
1256 
1257 void bstatse(pool_incr, npool, npget, nprel, ndget, ndrel, poolset)
1258   bufsize *pool_incr;
1259   long *npool, *npget, *nprel, *ndget, *ndrel;
1260   struct bpoolset *poolset;
1261 {
1262     *pool_incr = (poolset->pool_len < 0) ?
1263 	    -poolset->exp_incr : poolset->exp_incr;
1264     *npool = poolset->numpblk;
1265     *npget = poolset->numpget;
1266     *nprel = poolset->numprel;
1267     *ndget = poolset->numdget;
1268     *ndrel = poolset->numdrel;
1269 }
1270 #endif /* BECtl */
1271 #endif /* BufStats */
1272 
1273 #ifdef DumpData
1274 
1275 /*  BUFDUMP  --  Dump the data in a buffer.  This is called with the  user
1276 		 data pointer, and backs up to the buffer header.  It will
1277 		 dump either a free block or an allocated one.	*/
1278 
1279 void bufdump(buf)
1280   void *buf;
1281 {
1282     struct bfhead *b;
1283     unsigned char *bdump;
1284     bufsize bdlen;
1285 
1286     b = BFH(((char *) buf) - sizeof(struct bhead));
1287     assert(b->bh.bsize != 0);
1288     if (b->bh.bsize < 0) {
1289 	bdump = (unsigned char *) buf;
1290 	bdlen = (-b->bh.bsize) - sizeof(struct bhead);
1291     } else {
1292 	bdump = (unsigned char *) (((char *) b) + sizeof(struct bfhead));
1293 	bdlen = b->bh.bsize - sizeof(struct bfhead);
1294     }
1295 
1296     while (bdlen > 0) {
1297 	int i, dupes = 0;
1298 	bufsize l = bdlen;
1299 	char bhex[50], bascii[20];
1300 
1301 	if (l > 16) {
1302 	    l = 16;
1303 	}
1304 
1305 	for (i = 0; i < l; i++) {
1306 			V snprintf(bhex + i * 3, sizeof(bhex) - i * 3, "%02X ",
1307 				   bdump[i]);
1308             bascii[i] = isprint(bdump[i]) ? bdump[i] : ' ';
1309 	}
1310 	bascii[i] = 0;
1311         V printf("%-48s   %s\n", bhex, bascii);
1312 	bdump += l;
1313 	bdlen -= l;
1314 	while ((bdlen > 16) && (memcmp((char *) (bdump - 16),
1315 				       (char *) bdump, 16) == 0)) {
1316 	    dupes++;
1317 	    bdump += 16;
1318 	    bdlen -= 16;
1319 	}
1320 	if (dupes > 1) {
1321 	    V printf(
1322                 "     (%d lines [%d bytes] identical to above line skipped)\n",
1323 		dupes, dupes * 16);
1324 	} else if (dupes == 1) {
1325 	    bdump -= 16;
1326 	    bdlen += 16;
1327 	}
1328     }
1329 }
1330 #endif
1331 
1332 #ifdef BufDump
1333 
1334 /*  BPOOLD  --	Dump a buffer pool.  The buffer headers are always listed.
1335 		If DUMPALLOC is nonzero, the contents of allocated buffers
1336 		are  dumped.   If  DUMPFREE  is  nonzero,  free blocks are
1337 		dumped as well.  If FreeWipe  checking	is  enabled,  free
1338 		blocks	which  have  been clobbered will always be dumped. */
1339 
1340 void bpoold(buf, dumpalloc, dumpfree)
1341   void *buf;
1342   int dumpalloc, dumpfree;
1343 {
1344     struct bfhead *b = BFH(buf);
1345 
1346     while (b->bh.bsize != ESent) {
1347 	bufsize bs = b->bh.bsize;
1348 
1349 	if (bs < 0) {
1350 	    bs = -bs;
1351             V printf("Allocated buffer: size %6ld bytes.\n", (long) bs);
1352 	    if (dumpalloc) {
1353 		bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1354 	    }
1355 	} else {
1356             char *lerr = "";
1357 
1358 	    assert(bs > 0);
1359 	    if ((b->ql.blink->ql.flink != b) ||
1360 		(b->ql.flink->ql.blink != b)) {
1361                 lerr = "  (Bad free list links)";
1362 	    }
1363             V printf("Free block:       size %6ld bytes.%s\n",
1364 		(long) bs, lerr);
1365 #ifdef FreeWipe
1366 	    lerr = ((char *) b) + sizeof(struct bfhead);
1367 	    if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) ||
1368 		(memcmp(lerr, lerr + 1,
1369 		  (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) {
1370 		V printf(
1371                     "(Contents of above free block have been overstored.)\n");
1372 		bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1373 	    } else
1374 #endif
1375 	    if (dumpfree) {
1376 		bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1377 	    }
1378 	}
1379 	b = BFH(((char *) b) + bs);
1380     }
1381 }
1382 #endif /* BufDump */
1383 
1384 #ifdef BufValid
1385 
1386 /*  BPOOLV  --  Validate a buffer pool.  If NDEBUG isn't defined,
1387 		any error generates an assertion failure.  */
1388 
1389 int bpoolv(buf)
1390   void *buf;
1391 {
1392     struct bfhead *b = BFH(buf);
1393 
1394     while (b->bh.bsize != ESent) {
1395 	bufsize bs = b->bh.bsize;
1396 
1397 	if (bs < 0) {
1398 	    bs = -bs;
1399 	} else {
1400 			const char *lerr = "";
1401 
1402 	    assert(bs > 0);
1403 	    if (bs <= 0) {
1404 		return 0;
1405 	    }
1406 	    if ((b->ql.blink->ql.flink != b) ||
1407 		(b->ql.flink->ql.blink != b)) {
1408                 V printf("Free block: size %6ld bytes.  (Bad free list links)\n",
1409 		     (long) bs);
1410 		assert(0);
1411 		return 0;
1412 	    }
1413 #ifdef FreeWipe
1414 	    lerr = ((char *) b) + sizeof(struct bfhead);
1415 	    if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) ||
1416 		(memcmp(lerr, lerr + 1,
1417 		  (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) {
1418 		V printf(
1419                     "(Contents of above free block have been overstored.)\n");
1420 		bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1421 		assert(0);
1422 		return 0;
1423 	    }
1424 #endif
1425 	}
1426 	b = BFH(((char *) b) + bs);
1427     }
1428     return 1;
1429 }
1430 #endif /* BufValid */
1431 
1432         /***********************\
1433 	*			*
1434 	* Built-in test program *
1435 	*			*
1436         \***********************/
1437 
1438 #if !defined(__KERNEL__) && !defined(__LDELF__) && defined(CFG_TA_BGET_TEST)
1439 
1440 #define TestProg 20000
1441 
1442 #ifdef BECtl
1443 #define PoolSize    300000	      /* Test buffer pool size */
1444 #else
1445 #define PoolSize    50000	      /* Test buffer pool size */
1446 #endif
1447 #define ExpIncr     32768	      /* Test expansion block size */
1448 #define CompactTries 10 	      /* Maximum tries at compacting */
1449 
1450 #define dumpAlloc   0		      /* Dump allocated buffers ? */
1451 #define dumpFree    0		      /* Dump free buffers ? */
1452 
1453 static char *bchain = NULL;	      /* Our private buffer chain */
1454 static char *bp = NULL; 	      /* Our initial buffer pool */
1455 
1456 #ifdef UsingFloat
1457 #include <math.h>
1458 #endif
1459 
1460 static unsigned long int next = 1;
1461 
1462 static void *(*mymalloc)(size_t size);
1463 static void (*myfree)(void *ptr);
1464 
1465 static struct bpoolset mypoolset = {
1466 	.freelist = {
1467 		.bh = { 0, 0},
1468 		.ql = { &mypoolset.freelist, &mypoolset.freelist},
1469 	}
1470 };
1471 
1472 /* Return next random integer */
1473 
1474 static int myrand(void)
1475 {
1476 	next = next * 1103515245L + 12345;
1477 	return (unsigned int) (next / 65536L) % 32768L;
1478 }
1479 
1480 /* Set seed for random generator */
1481 
1482 static void mysrand(unsigned int seed)
1483 {
1484 	next = seed;
1485 }
1486 
1487 /*  STATS  --  Edit statistics returned by bstats() or bstatse().  */
1488 
1489 static void stats(const char *when __maybe_unused,
1490 		  struct bpoolset *poolset __maybe_unused)
1491 {
1492 #ifdef BufStats
1493     bufsize cural, totfree, maxfree;
1494     long nget, nfree;
1495 #endif
1496 #ifdef BECtl
1497     bufsize pincr;
1498     long totblocks, npget, nprel, ndget, ndrel;
1499 #endif
1500 
1501 #ifdef BufStats
1502     bstats(&cural, &totfree, &maxfree, &nget, &nfree, poolset);
1503     V printf(
1504         "%s: %ld gets, %ld releases.  %ld in use, %ld free, largest = %ld\n",
1505 	when, nget, nfree, (long) cural, (long) totfree, (long) maxfree);
1506 #endif
1507 #ifdef BECtl
1508     bstatse(&pincr, &totblocks, &npget, &nprel, &ndget, &ndrel, poolset);
1509     V printf(
1510          "  Blocks: size = %ld, %ld (%ld bytes) in use, %ld gets, %ld frees\n",
1511 	 (long)pincr, totblocks, pincr * totblocks, npget, nprel);
1512     V printf("  %ld direct gets, %ld direct frees\n", ndget, ndrel);
1513 #endif /* BECtl */
1514 }
1515 
1516 #ifdef BECtl
1517 static int protect = 0; 	      /* Disable compaction during bgetr() */
1518 
1519 /*  BCOMPACT  --  Compaction call-back function.  */
1520 
1521 static int bcompact(bsize, seq)
1522   bufsize bsize;
1523   int seq;
1524 {
1525 #ifdef CompactTries
1526     char *bc = bchain;
1527     int i = myrand() & 0x3;
1528 
1529 #ifdef COMPACTRACE
1530     V printf("Compaction requested.  %ld bytes needed, sequence %d.\n",
1531 	(long) bsize, seq);
1532 #endif
1533 
1534     if (protect || (seq > CompactTries)) {
1535 #ifdef COMPACTRACE
1536         V printf("Compaction gave up.\n");
1537 #endif
1538 	return 0;
1539     }
1540 
1541     /* Based on a random cast, release a random buffer in the list
1542        of allocated buffers. */
1543 
1544     while (i > 0 && bc != NULL) {
1545 	bc = *((char **) bc);
1546 	i--;
1547     }
1548     if (bc != NULL) {
1549 	char *fb;
1550 
1551 	fb = *((char **) bc);
1552 	if (fb != NULL) {
1553 	    *((char **) bc) = *((char **) fb);
1554 	    brel((void *) fb);
1555 	    return 1;
1556 	}
1557     }
1558 
1559 #ifdef COMPACTRACE
1560     V printf("Compaction bailed out.\n");
1561 #endif
1562 #endif /* CompactTries */
1563     return 0;
1564 }
1565 
1566 /*  BEXPAND  --  Expand pool call-back function.  */
1567 
1568 static void *bexpand(size)
1569   bufsize size;
1570 {
1571     void *np = NULL;
1572     bufsize cural, totfree, maxfree;
1573     long nget, nfree;
1574 
1575     /* Don't expand beyond the total allocated size given by PoolSize. */
1576 
1577     bstats(&cural, &totfree, &maxfree, &nget, &nfree);
1578 
1579     if (cural < PoolSize) {
1580 	np = (void *) mymalloc((unsigned) size);
1581     }
1582 #ifdef EXPTRACE
1583     V printf("Expand pool by %ld -- %s.\n", (long) size,
1584         np == NULL ? "failed" : "succeeded");
1585 #endif
1586     return np;
1587 }
1588 
1589 /*  BSHRINK  --  Shrink buffer pool call-back function.  */
1590 
1591 static void bshrink(buf)
1592   void *buf;
1593 {
1594     if (((char *) buf) == bp) {
1595 #ifdef EXPTRACE
1596         V printf("Initial pool released.\n");
1597 #endif
1598 	bp = NULL;
1599     }
1600 #ifdef EXPTRACE
1601     V printf("Shrink pool.\n");
1602 #endif
1603     myfree((char *) buf);
1604 }
1605 
1606 #endif /* BECtl */
1607 
1608 /*  Restrict buffer requests to those large enough to contain our pointer and
1609     small enough for the CPU architecture.  */
1610 
1611 static bufsize blimit(bufsize bs)
1612 {
1613     if (bs < sizeof(char *)) {
1614 	bs = sizeof(char *);
1615     }
1616 
1617     /* This is written out in this ugly fashion because the
1618        cool expression in sizeof(int) that auto-configured
1619        to any length int befuddled some compilers. */
1620 
1621     if (sizeof(int) == 2) {
1622 	if (bs > 32767) {
1623 	    bs = 32767;
1624 	}
1625     } else {
1626 	if (bs > 200000) {
1627 	    bs = 200000;
1628 	}
1629     }
1630     return bs;
1631 }
1632 
1633 int bget_main_test(void *(*malloc_func)(size_t), void (*free_func)(void *))
1634 {
1635     int i;
1636 #ifdef UsingFloat
1637     double x;
1638 #endif
1639 
1640     mymalloc = malloc_func;
1641     myfree = free_func;
1642 
1643     /* Seed the random number generator.  If Repeatable is defined, we
1644        always use the same seed.  Otherwise, we seed from the clock to
1645        shake things up from run to run. */
1646 
1647     mysrand(1234);
1648 
1649     /*	Compute x such that pow(x, p) ranges between 1 and 4*ExpIncr as
1650 	p ranges from 0 to ExpIncr-1, with a concentration in the lower
1651 	numbers.  */
1652 
1653 #ifdef UsingFloat
1654     x = 4.0 * ExpIncr;
1655     x = log(x);
1656     x = exp(log(4.0 * ExpIncr) / (ExpIncr - 1.0));
1657 #endif
1658 
1659 #ifdef BECtl
1660     bectl(bcompact, bexpand, bshrink, (bufsize) ExpIncr, &mypoolset);
1661     bp = mymalloc(ExpIncr);
1662     assert(bp != NULL);
1663     bpool((void *) bp, (bufsize) ExpIncr);
1664 #else
1665     bp = mymalloc(PoolSize);
1666     assert(bp != NULL);
1667     bpool((void *) bp, (bufsize) PoolSize, &mypoolset);
1668 #endif
1669 
1670     stats("Create pool", &mypoolset);
1671 #ifdef BufValid
1672     V bpoolv((void *) bp);
1673 #endif
1674 #ifdef BufDump
1675     bpoold((void *) bp, dumpAlloc, dumpFree);
1676 #endif
1677 
1678     for (i = 0; i < TestProg; i++) {
1679 	char *cb;
1680 #ifdef UsingFloat
1681 	bufsize bs = pow(x, (double) (myrand() & (ExpIncr - 1)));
1682 #else
1683 	bufsize bs = (myrand() & (ExpIncr * 4 - 1)) / (1 << (myrand() & 0x7));
1684 #endif
1685 	bufsize align = 0;
1686 	bufsize hdr_size = 0;
1687 
1688         switch (rand() & 0x3) {
1689         case 1:
1690             align = 32;
1691             break;
1692         case 2:
1693             align = 64;
1694             break;
1695         case 3:
1696             align = 128;
1697             break;
1698         default:
1699             break;
1700         }
1701 
1702         hdr_size = (rand() & 0x3) * BGET_HDR_QUANTUM;
1703 
1704 	assert(bs <= (((bufsize) 4) * ExpIncr));
1705 	bs = blimit(bs);
1706 	if (myrand() & 0x400) {
1707 	    cb = (char *) bgetz(align, hdr_size, bs, &mypoolset);
1708 	} else {
1709 	    cb = (char *) bget(align, hdr_size, bs, &mypoolset);
1710 	}
1711 	if (cb == NULL) {
1712 #ifdef EasyOut
1713 	    break;
1714 #else
1715 	    char *bc = bchain;
1716 
1717 	    if (bc != NULL) {
1718 		char *fb;
1719 
1720 		fb = *((char **) bc);
1721 		if (fb != NULL) {
1722 		    *((char **) bc) = *((char **) fb);
1723 		    brel((void *) fb, &mypoolset, true/*wipe*/);
1724 		}
1725 	    }
1726 	    continue;
1727 #endif
1728 	}
1729         assert(!align || !(((unsigned long)cb + hdr_size) & (align - 1)));
1730 	*((char **) cb) = (char *) bchain;
1731 	bchain = cb;
1732 
1733 	/* Based on a random cast, release a random buffer in the list
1734 	   of allocated buffers. */
1735 
1736 	if ((myrand() & 0x10) == 0) {
1737 	    char *bc = bchain;
1738 	    int j = myrand() & 0x3;
1739 
1740 	    while (j > 0 && bc != NULL) {
1741 		bc = *((char **) bc);
1742 		j--;
1743 	    }
1744 	    if (bc != NULL) {
1745 		char *fb;
1746 
1747 		fb = *((char **) bc);
1748 		if (fb != NULL) {
1749 		    *((char **) bc) = *((char **) fb);
1750 		    brel((void *) fb, &mypoolset, true/*wipe*/);
1751 		}
1752 	    }
1753 	}
1754 
1755 	/* Based on a random cast, reallocate a random buffer in the list
1756 	   to a random size */
1757 
1758 	if ((myrand() & 0x20) == 0) {
1759 	    char *bc = bchain;
1760 	    int j = myrand() & 0x3;
1761 
1762 	    while (j > 0 && bc != NULL) {
1763 		bc = *((char **) bc);
1764 		j--;
1765 	    }
1766 	    if (bc != NULL) {
1767 		char *fb;
1768 
1769 		fb = *((char **) bc);
1770 		if (fb != NULL) {
1771 		    char *newb;
1772 
1773 #ifdef UsingFloat
1774 		    bs = pow(x, (double) (myrand() & (ExpIncr - 1)));
1775 #else
1776 		    bs = (rand() & (ExpIncr * 4 - 1)) / (1 << (rand() & 0x7));
1777 #endif
1778 		    bs = blimit(bs);
1779 #ifdef BECtl
1780 		    protect = 1;      /* Protect against compaction */
1781 #endif
1782 		    newb = (char *) bgetr((void *) fb, align, hdr_size, bs, &mypoolset);
1783 #ifdef BECtl
1784 		    protect = 0;
1785 #endif
1786 		    if (newb != NULL) {
1787                         assert(!align || !(((unsigned long)newb + hdr_size) &
1788                                            (align - 1)));
1789 			*((char **) bc) = newb;
1790 		    }
1791 		}
1792 	    }
1793 	}
1794     }
1795     stats("\nAfter allocation", &mypoolset);
1796     if (bp != NULL) {
1797 #ifdef BufValid
1798 	V bpoolv((void *) bp);
1799 #endif
1800 #ifdef BufDump
1801 	bpoold((void *) bp, dumpAlloc, dumpFree);
1802 #endif
1803     }
1804 
1805     while (bchain != NULL) {
1806 	char *buf = bchain;
1807 
1808 	bchain = *((char **) buf);
1809 	brel((void *) buf, &mypoolset, true/*wipe*/);
1810     }
1811     stats("\nAfter release", &mypoolset);
1812 #ifndef BECtl
1813     if (bp != NULL) {
1814 #ifdef BufValid
1815 	V bpoolv((void *) bp);
1816 #endif
1817 #ifdef BufDump
1818 	bpoold((void *) bp, dumpAlloc, dumpFree);
1819 #endif
1820     }
1821 #endif
1822 
1823     return 0;
1824 }
1825 #endif
1826