1 /* 2 3 B G E T 4 5 Buffer allocator 6 7 Designed and implemented in April of 1972 by John Walker, based on the 8 Case Algol OPRO$ algorithm implemented in 1966. 9 10 Reimplemented in 1975 by John Walker for the Interdata 70. 11 Reimplemented in 1977 by John Walker for the Marinchip 9900. 12 Reimplemented in 1982 by Duff Kurland for the Intel 8080. 13 14 Portable C version implemented in September of 1990 by an older, wiser 15 instance of the original implementor. 16 17 Souped up and/or weighed down slightly shortly thereafter by Greg 18 Lutz. 19 20 AMIX edition, including the new compaction call-back option, prepared 21 by John Walker in July of 1992. 22 23 Bug in built-in test program fixed, ANSI compiler warnings eradicated, 24 buffer pool validator implemented, and guaranteed repeatable test 25 added by John Walker in October of 1995. 26 27 This program is in the public domain. 28 29 1. This is the book of the generations of Adam. In the day that God 30 created man, in the likeness of God made he him; 31 2. Male and female created he them; and blessed them, and called 32 their name Adam, in the day when they were created. 33 3. And Adam lived an hundred and thirty years, and begat a son in 34 his own likeness, and after his image; and called his name Seth: 35 4. And the days of Adam after he had begotten Seth were eight 36 hundred years: and he begat sons and daughters: 37 5. And all the days that Adam lived were nine hundred and thirty 38 years: and he died. 39 6. And Seth lived an hundred and five years, and begat Enos: 40 7. And Seth lived after he begat Enos eight hundred and seven years, 41 and begat sons and daughters: 42 8. And all the days of Seth were nine hundred and twelve years: and 43 he died. 44 9. And Enos lived ninety years, and begat Cainan: 45 10. And Enos lived after he begat Cainan eight hundred and fifteen 46 years, and begat sons and daughters: 47 11. And all the days of Enos were nine hundred and five years: and 48 he died. 49 12. And Cainan lived seventy years and begat Mahalaleel: 50 13. And Cainan lived after he begat Mahalaleel eight hundred and 51 forty years, and begat sons and daughters: 52 14. And all the days of Cainan were nine hundred and ten years: and 53 he died. 54 15. And Mahalaleel lived sixty and five years, and begat Jared: 55 16. And Mahalaleel lived after he begat Jared eight hundred and 56 thirty years, and begat sons and daughters: 57 17. And all the days of Mahalaleel were eight hundred ninety and 58 five years: and he died. 59 18. And Jared lived an hundred sixty and two years, and he begat 60 Enoch: 61 19. And Jared lived after he begat Enoch eight hundred years, and 62 begat sons and daughters: 63 20. And all the days of Jared were nine hundred sixty and two years: 64 and he died. 65 21. And Enoch lived sixty and five years, and begat Methuselah: 66 22. And Enoch walked with God after he begat Methuselah three 67 hundred years, and begat sons and daughters: 68 23. And all the days of Enoch were three hundred sixty and five 69 years: 70 24. And Enoch walked with God: and he was not; for God took him. 71 25. And Methuselah lived an hundred eighty and seven years, and 72 begat Lamech. 73 26. And Methuselah lived after he begat Lamech seven hundred eighty 74 and two years, and begat sons and daughters: 75 27. And all the days of Methuselah were nine hundred sixty and nine 76 years: and he died. 77 28. And Lamech lived an hundred eighty and two years, and begat a 78 son: 79 29. And he called his name Noah, saying, This same shall comfort us 80 concerning our work and toil of our hands, because of the ground 81 which the LORD hath cursed. 82 30. And Lamech lived after he begat Noah five hundred ninety and 83 five years, and begat sons and daughters: 84 31. And all the days of Lamech were seven hundred seventy and seven 85 years: and he died. 86 32. And Noah was five hundred years old: and Noah begat Shem, Ham, 87 and Japheth. 88 89 And buffers begat buffers, and links begat links, and buffer pools 90 begat links to chains of buffer pools containing buffers, and lo the 91 buffers and links and pools of buffers and pools of links to chains of 92 pools of buffers were fruitful and they multiplied and the Operating 93 System looked down upon them and said that it was Good. 94 95 96 INTRODUCTION 97 ============ 98 99 BGET is a comprehensive memory allocation package which is easily 100 configured to the needs of an application. BGET is efficient in 101 both the time needed to allocate and release buffers and in the 102 memory overhead required for buffer pool management. It 103 automatically consolidates contiguous space to minimise 104 fragmentation. BGET is configured by compile-time definitions, 105 Major options include: 106 107 * A built-in test program to exercise BGET and 108 demonstrate how the various functions are used. 109 110 * Allocation by either the "first fit" or "best fit" 111 method. 112 113 * Wiping buffers at release time to catch code which 114 references previously released storage. 115 116 * Built-in routines to dump individual buffers or the 117 entire buffer pool. 118 119 * Retrieval of allocation and pool size statistics. 120 121 * Quantisation of buffer sizes to a power of two to 122 satisfy hardware alignment constraints. 123 124 * Automatic pool compaction, growth, and shrinkage by 125 means of call-backs to user defined functions. 126 127 Applications of BGET can range from storage management in 128 ROM-based embedded programs to providing the framework upon which 129 a multitasking system incorporating garbage collection is 130 constructed. BGET incorporates extensive internal consistency 131 checking using the <assert.h> mechanism; all these checks can be 132 turned off by compiling with NDEBUG defined, yielding a version of 133 BGET with minimal size and maximum speed. 134 135 The basic algorithm underlying BGET has withstood the test of 136 time; more than 25 years have passed since the first 137 implementation of this code. And yet, it is substantially more 138 efficient than the native allocation schemes of many operating 139 systems: the Macintosh and Microsoft Windows to name two, on which 140 programs have obtained substantial speed-ups by layering BGET as 141 an application level memory manager atop the underlying system's. 142 143 BGET has been implemented on the largest mainframes and the lowest 144 of microprocessors. It has served as the core for multitasking 145 operating systems, multi-thread applications, embedded software in 146 data network switching processors, and a host of C programs. And 147 while it has accreted flexibility and additional options over the 148 years, it remains fast, memory efficient, portable, and easy to 149 integrate into your program. 150 151 152 BGET IMPLEMENTATION ASSUMPTIONS 153 =============================== 154 155 BGET is written in as portable a dialect of C as possible. The 156 only fundamental assumption about the underlying hardware 157 architecture is that memory is allocated is a linear array which 158 can be addressed as a vector of C "char" objects. On segmented 159 address space architectures, this generally means that BGET should 160 be used to allocate storage within a single segment (although some 161 compilers simulate linear address spaces on segmented 162 architectures). On segmented architectures, then, BGET buffer 163 pools may not be larger than a segment, but since BGET allows any 164 number of separate buffer pools, there is no limit on the total 165 storage which can be managed, only on the largest individual 166 object which can be allocated. Machines with a linear address 167 architecture, such as the VAX, 680x0, Sparc, MIPS, or the Intel 168 80386 and above in native mode, may use BGET without restriction. 169 170 171 GETTING STARTED WITH BGET 172 ========================= 173 174 Although BGET can be configured in a multitude of fashions, there 175 are three basic ways of working with BGET. The functions 176 mentioned below are documented in the following section. Please 177 excuse the forward references which are made in the interest of 178 providing a roadmap to guide you to the BGET functions you're 179 likely to need. 180 181 Embedded Applications 182 --------------------- 183 184 Embedded applications typically have a fixed area of memory 185 dedicated to buffer allocation (often in a separate RAM address 186 space distinct from the ROM that contains the executable code). 187 To use BGET in such an environment, simply call bpool() with the 188 start address and length of the buffer pool area in RAM, then 189 allocate buffers with bget() and release them with brel(). 190 Embedded applications with very limited RAM but abundant CPU speed 191 may benefit by configuring BGET for BestFit allocation (which is 192 usually not worth it in other environments). 193 194 Malloc() Emulation 195 ------------------ 196 197 If the C library malloc() function is too slow, not present in 198 your development environment (for example, an a native Windows or 199 Macintosh program), or otherwise unsuitable, you can replace it 200 with BGET. Initially define a buffer pool of an appropriate size 201 with bpool()--usually obtained by making a call to the operating 202 system's low-level memory allocator. Then allocate buffers with 203 bget(), bgetz(), and bgetr() (the last two permit the allocation 204 of buffers initialised to zero and [inefficient] re-allocation of 205 existing buffers for compatibility with C library functions). 206 Release buffers by calling brel(). If a buffer allocation request 207 fails, obtain more storage from the underlying operating system, 208 add it to the buffer pool by another call to bpool(), and continue 209 execution. 210 211 Automatic Storage Management 212 ---------------------------- 213 214 You can use BGET as your application's native memory manager and 215 implement automatic storage pool expansion, contraction, and 216 optionally application-specific memory compaction by compiling 217 BGET with the BECtl variable defined, then calling bectl() and 218 supplying functions for storage compaction, acquisition, and 219 release, as well as a standard pool expansion increment. All of 220 these functions are optional (although it doesn't make much sense 221 to provide a release function without an acquisition function, 222 does it?). Once the call-back functions have been defined with 223 bectl(), you simply use bget() and brel() to allocate and release 224 storage as before. You can supply an initial buffer pool with 225 bpool() or rely on automatic allocation to acquire the entire 226 pool. When a call on bget() cannot be satisfied, BGET first 227 checks if a compaction function has been supplied. If so, it is 228 called (with the space required to satisfy the allocation request 229 and a sequence number to allow the compaction routine to be called 230 successively without looping). If the compaction function is able 231 to free any storage (it needn't know whether the storage it freed 232 was adequate) it should return a nonzero value, whereupon BGET 233 will retry the allocation request and, if it fails again, call the 234 compaction function again with the next-higher sequence number. 235 236 If the compaction function returns zero, indicating failure to 237 free space, or no compaction function is defined, BGET next tests 238 whether a non-NULL allocation function was supplied to bectl(). 239 If so, that function is called with an argument indicating how 240 many bytes of additional space are required. This will be the 241 standard pool expansion increment supplied in the call to bectl() 242 unless the original bget() call requested a buffer larger than 243 this; buffers larger than the standard pool block can be managed 244 "off the books" by BGET in this mode. If the allocation function 245 succeeds in obtaining the storage, it returns a pointer to the new 246 block and BGET expands the buffer pool; if it fails, the 247 allocation request fails and returns NULL to the caller. If a 248 non-NULL release function is supplied, expansion blocks which 249 become totally empty are released to the global free pool by 250 passing their addresses to the release function. 251 252 Equipped with appropriate allocation, release, and compaction 253 functions, BGET can be used as part of very sophisticated memory 254 management strategies, including garbage collection. (Note, 255 however, that BGET is *not* a garbage collector by itself, and 256 that developing such a system requires much additional logic and 257 careful design of the application's memory allocation strategy.) 258 259 260 BGET FUNCTION DESCRIPTIONS 261 ========================== 262 263 Functions implemented in this file (some are enabled by certain of 264 the optional settings below): 265 266 void bpool(void *buffer, bufsize len); 267 268 Create a buffer pool of <len> bytes, using the storage starting at 269 <buffer>. You can call bpool() subsequently to contribute 270 additional storage to the overall buffer pool. 271 272 void *bget(bufsize size); 273 274 Allocate a buffer of <size> bytes. The address of the buffer is 275 returned, or NULL if insufficient memory was available to allocate 276 the buffer. 277 278 void *bgetz(bufsize size); 279 280 Allocate a buffer of <size> bytes and clear it to all zeroes. The 281 address of the buffer is returned, or NULL if insufficient memory 282 was available to allocate the buffer. 283 284 void *bgetr(void *buffer, bufsize newsize); 285 286 Reallocate a buffer previously allocated by bget(), changing its 287 size to <newsize> and preserving all existing data. NULL is 288 returned if insufficient memory is available to reallocate the 289 buffer, in which case the original buffer remains intact. 290 291 void brel(void *buf); 292 293 Return the buffer <buf>, previously allocated by bget(), to the 294 free space pool. 295 296 void bectl(int (*compact)(bufsize sizereq, int sequence), 297 void *(*acquire)(bufsize size), 298 void (*release)(void *buf), 299 bufsize pool_incr); 300 301 Expansion control: specify functions through which the package may 302 compact storage (or take other appropriate action) when an 303 allocation request fails, and optionally automatically acquire 304 storage for expansion blocks when necessary, and release such 305 blocks when they become empty. If <compact> is non-NULL, whenever 306 a buffer allocation request fails, the <compact> function will be 307 called with arguments specifying the number of bytes (total buffer 308 size, including header overhead) required to satisfy the 309 allocation request, and a sequence number indicating the number of 310 consecutive calls on <compact> attempting to satisfy this 311 allocation request. The sequence number is 1 for the first call 312 on <compact> for a given allocation request, and increments on 313 subsequent calls, permitting the <compact> function to take 314 increasingly dire measures in an attempt to free up storage. If 315 the <compact> function returns a nonzero value, the allocation 316 attempt is re-tried. If <compact> returns 0 (as it must if it 317 isn't able to release any space or add storage to the buffer 318 pool), the allocation request fails, which can trigger automatic 319 pool expansion if the <acquire> argument is non-NULL. At the time 320 the <compact> function is called, the state of the buffer 321 allocator is identical to that at the moment the allocation 322 request was made; consequently, the <compact> function may call 323 brel(), bpool(), bstats(), and/or directly manipulate the buffer 324 pool in any manner which would be valid were the application in 325 control. This does not, however, relieve the <compact> function 326 of the need to ensure that whatever actions it takes do not change 327 things underneath the application that made the allocation 328 request. For example, a <compact> function that released a buffer 329 in the process of being reallocated with bgetr() would lead to 330 disaster. Implementing a safe and effective <compact> mechanism 331 requires careful design of an application's memory architecture, 332 and cannot generally be easily retrofitted into existing code. 333 334 If <acquire> is non-NULL, that function will be called whenever an 335 allocation request fails. If the <acquire> function succeeds in 336 allocating the requested space and returns a pointer to the new 337 area, allocation will proceed using the expanded buffer pool. If 338 <acquire> cannot obtain the requested space, it should return NULL 339 and the entire allocation process will fail. <pool_incr> 340 specifies the normal expansion block size. Providing an <acquire> 341 function will cause subsequent bget() requests for buffers too 342 large to be managed in the linked-block scheme (in other words, 343 larger than <pool_incr> minus the buffer overhead) to be satisfied 344 directly by calls to the <acquire> function. Automatic release of 345 empty pool blocks will occur only if all pool blocks in the system 346 are the size given by <pool_incr>. 347 348 void bstats(bufsize *curalloc, bufsize *totfree, 349 bufsize *maxfree, long *nget, long *nrel); 350 351 The amount of space currently allocated is stored into the 352 variable pointed to by <curalloc>. The total free space (sum of 353 all free blocks in the pool) is stored into the variable pointed 354 to by <totfree>, and the size of the largest single block in the 355 free space pool is stored into the variable pointed to by 356 <maxfree>. The variables pointed to by <nget> and <nrel> are 357 filled, respectively, with the number of successful (non-NULL 358 return) bget() calls and the number of brel() calls. 359 360 void bstatse(bufsize *pool_incr, long *npool, 361 long *npget, long *nprel, 362 long *ndget, long *ndrel); 363 364 Extended statistics: The expansion block size will be stored into 365 the variable pointed to by <pool_incr>, or the negative thereof if 366 automatic expansion block releases are disabled. The number of 367 currently active pool blocks will be stored into the variable 368 pointed to by <npool>. The variables pointed to by <npget> and 369 <nprel> will be filled with, respectively, the number of expansion 370 block acquisitions and releases which have occurred. The 371 variables pointed to by <ndget> and <ndrel> will be filled with 372 the number of bget() and brel() calls, respectively, managed 373 through blocks directly allocated by the acquisition and release 374 functions. 375 376 void bufdump(void *buf); 377 378 The buffer pointed to by <buf> is dumped on standard output. 379 380 void bpoold(void *pool, int dumpalloc, int dumpfree); 381 382 All buffers in the buffer pool <pool>, previously initialised by a 383 call on bpool(), are listed in ascending memory address order. If 384 <dumpalloc> is nonzero, the contents of allocated buffers are 385 dumped; if <dumpfree> is nonzero, the contents of free blocks are 386 dumped. 387 388 int bpoolv(void *pool); 389 390 The named buffer pool, previously initialised by a call on 391 bpool(), is validated for bad pointers, overwritten data, etc. If 392 compiled with NDEBUG not defined, any error generates an assertion 393 failure. Otherwise 1 is returned if the pool is valid, 0 if an 394 error is found. 395 396 397 BGET CONFIGURATION 398 ================== 399 */ 400 401 /* 402 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 403 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 404 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 405 * IN NO EVENT SHALL ST BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 406 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 407 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 408 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 409 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 410 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 411 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 412 */ 413 414 /* #define BGET_ENABLE_ALL_OPTIONS */ 415 #ifdef BGET_ENABLE_OPTION 416 #define TestProg 20000 /* Generate built-in test program 417 if defined. The value specifies 418 how many buffer allocation attempts 419 the test program should make. */ 420 421 #define SizeQuant 4 /* Buffer allocation size quantum: 422 all buffers allocated are a 423 multiple of this size. This 424 MUST be a power of two. */ 425 426 #define BufDump 1 /* Define this symbol to enable the 427 bpoold() function which dumps the 428 buffers in a buffer pool. */ 429 430 #define BufValid 1 /* Define this symbol to enable the 431 bpoolv() function for validating 432 a buffer pool. */ 433 434 #define DumpData 1 /* Define this symbol to enable the 435 bufdump() function which allows 436 dumping the contents of an allocated 437 or free buffer. */ 438 439 #define BufStats 1 /* Define this symbol to enable the 440 bstats() function which calculates 441 the total free space in the buffer 442 pool, the largest available 443 buffer, and the total space 444 currently allocated. */ 445 446 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 447 pattern of garbage to trip up 448 miscreants who attempt to use 449 pointers into released buffers. */ 450 451 #define BestFit 1 /* Use a best fit algorithm when 452 searching for space for an 453 allocation request. This uses 454 memory more efficiently, but 455 allocation will be much slower. */ 456 457 #define BECtl 1 /* Define this symbol to enable the 458 bectl() function for automatic 459 pool space control. */ 460 #endif 461 462 #include <stdio.h> 463 #include <stdbool.h> 464 465 #ifdef lint 466 #define NDEBUG /* Exits in asserts confuse lint */ 467 /* LINTLIBRARY */ /* Don't complain about def, no ref */ 468 extern char *sprintf(); /* Sun includes don't define sprintf */ 469 #endif 470 471 #include <assert.h> 472 #include <memory.h> 473 474 #ifdef BufDump /* BufDump implies DumpData */ 475 #ifndef DumpData 476 #define DumpData 1 477 #endif 478 #endif 479 480 #ifdef DumpData 481 #include <ctype.h> 482 #endif 483 484 #ifdef __KERNEL__ 485 #ifdef CFG_CORE_BGET_BESTFIT 486 #define BestFit 1 487 #endif 488 #endif 489 490 /* Declare the interface, including the requested buffer size type, 491 bufsize. */ 492 493 #include "bget.h" 494 495 #define MemSize int /* Type for size arguments to memxxx() 496 functions such as memcmp(). */ 497 498 /* Queue links */ 499 500 struct qlinks { 501 struct bfhead *flink; /* Forward link */ 502 struct bfhead *blink; /* Backward link */ 503 }; 504 505 /* Header in allocated and free buffers */ 506 507 struct bhead { 508 bufsize prevfree; /* Relative link back to previous 509 free buffer in memory or 0 if 510 previous buffer is allocated. */ 511 bufsize bsize; /* Buffer size: positive if free, 512 negative if allocated. */ 513 }; 514 #define BH(p) ((struct bhead *) (p)) 515 516 /* Header in directly allocated buffers (by acqfcn) */ 517 518 struct bdhead { 519 bufsize tsize; /* Total size, including overhead */ 520 bufsize offs; /* Offset from allocated buffer */ 521 struct bhead bh; /* Common header */ 522 }; 523 #define BDH(p) ((struct bdhead *) (p)) 524 525 /* Header in free buffers */ 526 527 struct bfhead { 528 struct bhead bh; /* Common allocated/free header */ 529 struct qlinks ql; /* Links on free list */ 530 }; 531 #define BFH(p) ((struct bfhead *) (p)) 532 533 /* Poolset definition */ 534 struct bpoolset { 535 struct bfhead freelist; 536 #ifdef BufStats 537 bufsize totalloc; /* Total space currently allocated */ 538 long numget; /* Number of bget() calls */ 539 long numrel; /* Number of brel() calls */ 540 #ifdef BECtl 541 long numpblk; /* Number of pool blocks */ 542 long numpget; /* Number of block gets and rels */ 543 long numprel; 544 long numdget; /* Number of direct gets and rels */ 545 long numdrel; 546 #endif /* BECtl */ 547 #endif /* BufStats */ 548 549 #ifdef BECtl 550 /* Automatic expansion block management functions */ 551 552 int (*compfcn) _((bufsize sizereq, int sequence)); 553 void *(*acqfcn) _((bufsize size)); 554 void (*relfcn) _((void *buf)); 555 556 bufsize exp_incr; /* Expansion block size */ 557 bufsize pool_len; /* 0: no bpool calls have been made 558 -1: not all pool blocks are 559 the same size 560 >0: (common) block size for all 561 bpool calls made so far 562 */ 563 #endif 564 }; 565 566 /* Minimum allocation quantum: */ 567 568 #define QLSize (sizeof(struct qlinks)) 569 #define SizeQ ((SizeQuant > QLSize) ? SizeQuant : QLSize) 570 571 #define V (void) /* To denote unwanted returned values */ 572 573 /* End sentinel: value placed in bsize field of dummy block delimiting 574 end of pool block. The most negative number which will fit in a 575 bufsize, defined in a way that the compiler will accept. */ 576 577 #define ESent ((bufsize) (-(((1L << (sizeof(bufsize) * 8 - 2)) - 1) * 2) - 2)) 578 579 static bufsize buf_get_pos(struct bfhead *bf, bufsize align, bufsize hdr_size, 580 bufsize size) 581 { 582 unsigned long buf = 0; 583 bufsize pos = 0; 584 585 if (bf->bh.bsize < size) 586 return -1; 587 588 /* 589 * plus sizeof(struct bhead) and hdr_size since buf will follow just 590 * after a struct bhead and an eventual extra header. 591 */ 592 buf = (unsigned long)bf + bf->bh.bsize - size + sizeof(struct bhead) + 593 hdr_size; 594 buf &= ~(align - 1); 595 pos = buf - (unsigned long)bf - sizeof(struct bhead) - hdr_size; 596 597 if (pos == 0) /* exact match */ 598 return pos; 599 if (pos >= SizeQ + sizeof(struct bhead)) /* room for an empty buffer */ 600 return pos; 601 602 return -1; 603 } 604 605 /* BGET -- Allocate a buffer. */ 606 607 void *bget(requested_align, hdr_size, requested_size, poolset) 608 bufsize requested_align; 609 bufsize hdr_size; 610 bufsize requested_size; 611 struct bpoolset *poolset; 612 { 613 bufsize align = requested_align; 614 bufsize size = requested_size; 615 bufsize pos; 616 struct bfhead *b; 617 #ifdef BestFit 618 struct bfhead *best; 619 #endif 620 void *buf; 621 #ifdef BECtl 622 int compactseq = 0; 623 #endif 624 625 assert(size > 0); 626 COMPILE_TIME_ASSERT(BGET_HDR_QUANTUM == SizeQ); 627 628 if (align < 0 || (align > 0 && !IS_POWER_OF_TWO((unsigned long)align))) 629 return NULL; 630 if (hdr_size % BGET_HDR_QUANTUM != 0) 631 return NULL; 632 633 if (size < SizeQ) { /* Need at least room for the */ 634 size = SizeQ; /* queue links. */ 635 } 636 if (align < SizeQ) 637 align = SizeQ; 638 #ifdef SizeQuant 639 #if SizeQuant > 1 640 if (ADD_OVERFLOW(size, SizeQuant - 1, &size)) 641 return NULL; 642 643 size = ROUNDDOWN(size, SizeQuant); 644 #endif 645 #endif 646 647 /* Add overhead in allocated buffer to size required. */ 648 if (ADD_OVERFLOW(size, sizeof(struct bhead), &size)) 649 return NULL; 650 if (ADD_OVERFLOW(size, hdr_size, &size)) 651 return NULL; 652 653 #ifdef BECtl 654 /* If a compact function was provided in the call to bectl(), wrap 655 a loop around the allocation process to allow compaction to 656 intervene in case we don't find a suitable buffer in the chain. */ 657 658 while (1) { 659 #endif 660 b = poolset->freelist.ql.flink; 661 #ifdef BestFit 662 best = &poolset->freelist; 663 #endif 664 665 666 /* Scan the free list searching for the first buffer big enough 667 to hold the requested size buffer. */ 668 669 #ifdef BestFit 670 while (b != &poolset->freelist) { 671 assert(b->bh.prevfree == 0); 672 pos = buf_get_pos(b, align, hdr_size, size); 673 if (pos >= 0) { 674 if ((best == &poolset->freelist) || 675 (b->bh.bsize < best->bh.bsize)) { 676 best = b; 677 } 678 } 679 b = b->ql.flink; /* Link to next buffer */ 680 } 681 b = best; 682 #endif /* BestFit */ 683 684 while (b != &poolset->freelist) { 685 pos = buf_get_pos(b, align, hdr_size, size); 686 if (pos >= 0) { 687 struct bhead *b_alloc = BH((char *)b + pos); 688 struct bhead *b_next = BH((char *)b + b->bh.bsize); 689 690 assert(b_next->prevfree == b->bh.bsize); 691 692 /* 693 * Zero the back pointer in the next buffer in memory 694 * to indicate that this buffer is allocated. 695 */ 696 b_next->prevfree = 0; 697 698 assert(b->ql.blink->ql.flink == b); 699 assert(b->ql.flink->ql.blink == b); 700 701 if (pos == 0) { 702 /* 703 * Need to allocate from the beginning of this free block. 704 * Unlink the block and mark it as allocated. 705 */ 706 b->ql.blink->ql.flink = b->ql.flink; 707 b->ql.flink->ql.blink = b->ql.blink; 708 709 /* Negate size to mark buffer allocated. */ 710 b->bh.bsize = -b->bh.bsize; 711 } else { 712 /* 713 * Carve out the memory allocation from the end of this 714 * free block. Negative size to mark buffer allocated. 715 */ 716 b_alloc->bsize = -(b->bh.bsize - pos); 717 b_alloc->prevfree = pos; 718 b->bh.bsize = pos; 719 } 720 721 assert(b_alloc->bsize < 0); 722 /* 723 * At this point is b_alloc pointing to the allocated 724 * buffer and b_next at the buffer following. b might be a 725 * free block or a used block now. 726 */ 727 if (-b_alloc->bsize - size > SizeQ + sizeof(struct bhead)) { 728 /* 729 * b_alloc has too much unused memory at the 730 * end we need to split the block and register that 731 * last part as free. 732 */ 733 b = BFH((char *)b_alloc + size); 734 b->bh.bsize = -b_alloc->bsize - size; 735 b->bh.prevfree = 0; 736 b_alloc->bsize += b->bh.bsize; 737 738 assert(poolset->freelist.ql.blink->ql.flink == 739 &poolset->freelist); 740 assert(poolset->freelist.ql.flink->ql.blink == 741 &poolset->freelist); 742 b->ql.flink = &poolset->freelist; 743 b->ql.blink = poolset->freelist.ql.blink; 744 poolset->freelist.ql.blink = b; 745 b->ql.blink->ql.flink = b; 746 747 assert(BH((char *)b + b->bh.bsize) == b_next); 748 b_next->prevfree = b->bh.bsize; 749 } 750 751 #ifdef BufStats 752 poolset->totalloc -= b_alloc->bsize; 753 poolset->numget++; /* Increment number of bget() calls */ 754 #endif 755 buf = (char *)b_alloc + sizeof(struct bhead); 756 return buf; 757 } 758 b = b->ql.flink; /* Link to next buffer */ 759 } 760 #ifdef BECtl 761 762 /* We failed to find a buffer. If there's a compact function 763 defined, notify it of the size requested. If it returns 764 TRUE, try the allocation again. */ 765 766 if ((poolset->compfcn == NULL) || 767 (!(poolset->compfcn)(size, ++compactseq))) { 768 break; 769 } 770 } 771 772 /* No buffer available with requested size free. */ 773 774 /* Don't give up yet -- look in the reserve supply. */ 775 776 if (poolset->acqfcn != NULL) { 777 if (size > exp_incr - sizeof(struct bfhead) - align) { 778 779 /* Request is too large to fit in a single expansion 780 block. Try to satisy it by a direct buffer acquisition. */ 781 char *p; 782 783 size += sizeof(struct bdhead) - sizeof(struct bhead); 784 if (align > QLSize) 785 size += align; 786 p = poolset->acqfcn(size); 787 if (p != NULL) { 788 struct bdhead *bdh; 789 790 if (align <= QLSize) { 791 bdh = BDH(p); 792 buf = bdh + 1; 793 } else { 794 unsigned long tp = (unsigned long)p; 795 796 tp += sizeof(*bdh) + hdr_size + align; 797 tp &= ~(align - 1); 798 tp -= hdr_size; 799 buf = (void *)tp; 800 bdh = BDH((char *)buf - sizeof(*bdh)); 801 } 802 803 /* Mark the buffer special by setting the size field 804 of its header to zero. */ 805 bdh->bh.bsize = 0; 806 bdh->bh.prevfree = 0; 807 bdh->tsize = size; 808 bdh->offs = (unsigned long)bdh - (unsigned long)p; 809 #ifdef BufStats 810 poolset->totalloc += size; 811 poolset->numget++; /* Increment number of bget() calls */ 812 poolset->numdget++; /* Direct bget() call count */ 813 #endif 814 return buf; 815 } 816 817 } else { 818 819 /* Try to obtain a new expansion block */ 820 821 void *newpool; 822 823 if ((newpool = poolset->acqfcn((bufsize) exp_incr)) != NULL) { 824 bpool(newpool, exp_incr, poolset); 825 buf = bget(align, hdr_size, requested_size, pool); /* This can't, I say, can't 826 get into a loop. */ 827 return buf; 828 } 829 } 830 } 831 832 /* Still no buffer available */ 833 834 #endif /* BECtl */ 835 836 return NULL; 837 } 838 839 /* BGETZ -- Allocate a buffer and clear its contents to zero. We clear 840 the entire contents of the buffer to zero, not just the 841 region requested by the caller. */ 842 843 void *bgetz(align, hdr_size, size, poolset) 844 bufsize align; 845 bufsize hdr_size; 846 bufsize size; 847 struct bpoolset *poolset; 848 { 849 char *buf = (char *) bget(align, hdr_size, size, poolset); 850 851 if (buf != NULL) { 852 struct bhead *b; 853 bufsize rsize; 854 855 b = BH(buf - sizeof(struct bhead)); 856 rsize = -(b->bsize); 857 if (rsize == 0) { 858 struct bdhead *bd; 859 860 bd = BDH(buf - sizeof(struct bdhead)); 861 rsize = bd->tsize - sizeof(struct bdhead) - bd->offs; 862 } else { 863 rsize -= sizeof(struct bhead); 864 } 865 assert(rsize >= size); 866 V memset_unchecked(buf, 0, (MemSize) rsize); 867 } 868 return ((void *) buf); 869 } 870 871 /* BGETR -- Reallocate a buffer. This is a minimal implementation, 872 simply in terms of brel() and bget(). It could be 873 enhanced to allow the buffer to grow into adjacent free 874 blocks and to avoid moving data unnecessarily. */ 875 876 void *bgetr(buf, align, hdr_size, size, poolset) 877 void *buf; 878 bufsize align; 879 bufsize hdr_size; 880 bufsize size; 881 struct bpoolset *poolset; 882 { 883 void *nbuf; 884 bufsize osize; /* Old size of buffer */ 885 struct bhead *b; 886 887 if ((nbuf = bget(align, hdr_size, size, poolset)) == NULL) { /* Acquire new buffer */ 888 return NULL; 889 } 890 if (buf == NULL) { 891 return nbuf; 892 } 893 b = BH(((char *) buf) - sizeof(struct bhead)); 894 osize = -b->bsize; 895 #ifdef BECtl 896 if (osize == 0) { 897 /* Buffer acquired directly through acqfcn. */ 898 struct bdhead *bd; 899 900 bd = BDH(((char *) buf) - sizeof(struct bdhead)); 901 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 902 } else 903 #endif 904 osize -= sizeof(struct bhead); 905 assert(osize > 0); 906 V memcpy_unchecked((char *) nbuf, (char *) buf, /* Copy the data */ 907 (MemSize) ((size < osize) ? size : osize)); 908 #ifndef __KERNEL__ 909 /* User space reallocations are always zeroed */ 910 if (size > osize) 911 V memset_unchecked((char *) nbuf + osize, 0, size - osize); 912 #endif 913 brel(buf, poolset, false /* !wipe */); 914 return nbuf; 915 } 916 917 /* BREL -- Release a buffer. */ 918 919 void brel(buf, poolset, wipe) 920 void *buf; 921 struct bpoolset *poolset; 922 int wipe; 923 { 924 struct bfhead *b, *bn; 925 926 b = BFH(((char *) buf) - sizeof(struct bhead)); 927 #ifdef BufStats 928 poolset->numrel++; /* Increment number of brel() calls */ 929 #endif 930 assert(buf != NULL); 931 932 #ifdef FreeWipe 933 wipe = true; 934 #endif 935 #ifdef BECtl 936 if (b->bh.bsize == 0) { /* Directly-acquired buffer? */ 937 struct bdhead *bdh; 938 939 bdh = BDH(((char *) buf) - sizeof(struct bdhead)); 940 assert(b->bh.prevfree == 0); 941 #ifdef BufStats 942 poolset->totalloc -= bdh->tsize; 943 assert(poolset->totalloc >= 0); 944 poolset->numdrel++; /* Number of direct releases */ 945 #endif /* BufStats */ 946 if (wipe) { 947 V memset_unchecked((char *) buf, 0x55, 948 (MemSize) (bdh->tsize - 949 sizeof(struct bdhead))); 950 } 951 assert(poolset->relfcn != NULL); 952 poolset->relfcn((char *)buf - sizeof(struct bdhead) - bdh->offs); /* Release it directly. */ 953 return; 954 } 955 #endif /* BECtl */ 956 957 /* Buffer size must be negative, indicating that the buffer is 958 allocated. */ 959 960 if (b->bh.bsize >= 0) { 961 bn = NULL; 962 } 963 assert(b->bh.bsize < 0); 964 965 /* Back pointer in next buffer must be zero, indicating the 966 same thing: */ 967 968 assert(BH((char *) b - b->bh.bsize)->prevfree == 0); 969 970 #ifdef BufStats 971 poolset->totalloc += b->bh.bsize; 972 assert(poolset->totalloc >= 0); 973 #endif 974 975 /* If the back link is nonzero, the previous buffer is free. */ 976 977 if (b->bh.prevfree != 0) { 978 979 /* The previous buffer is free. Consolidate this buffer with it 980 by adding the length of this buffer to the previous free 981 buffer. Note that we subtract the size in the buffer being 982 released, since it's negative to indicate that the buffer is 983 allocated. */ 984 985 register bufsize size = b->bh.bsize; 986 987 /* Make the previous buffer the one we're working on. */ 988 assert(BH((char *) b - b->bh.prevfree)->bsize == b->bh.prevfree); 989 b = BFH(((char *) b) - b->bh.prevfree); 990 b->bh.bsize -= size; 991 } else { 992 993 /* The previous buffer isn't allocated. Insert this buffer 994 on the free list as an isolated free block. */ 995 996 assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist); 997 assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist); 998 b->ql.flink = &poolset->freelist; 999 b->ql.blink = poolset->freelist.ql.blink; 1000 poolset->freelist.ql.blink = b; 1001 b->ql.blink->ql.flink = b; 1002 b->bh.bsize = -b->bh.bsize; 1003 } 1004 1005 /* Now we look at the next buffer in memory, located by advancing from 1006 the start of this buffer by its size, to see if that buffer is 1007 free. If it is, we combine this buffer with the next one in 1008 memory, dechaining the second buffer from the free list. */ 1009 1010 bn = BFH(((char *) b) + b->bh.bsize); 1011 if (bn->bh.bsize > 0) { 1012 1013 /* The buffer is free. Remove it from the free list and add 1014 its size to that of our buffer. */ 1015 1016 assert(BH((char *) bn + bn->bh.bsize)->prevfree == bn->bh.bsize); 1017 assert(bn->ql.blink->ql.flink == bn); 1018 assert(bn->ql.flink->ql.blink == bn); 1019 bn->ql.blink->ql.flink = bn->ql.flink; 1020 bn->ql.flink->ql.blink = bn->ql.blink; 1021 b->bh.bsize += bn->bh.bsize; 1022 1023 /* Finally, advance to the buffer that follows the newly 1024 consolidated free block. We must set its backpointer to the 1025 head of the consolidated free block. We know the next block 1026 must be an allocated block because the process of recombination 1027 guarantees that two free blocks will never be contiguous in 1028 memory. */ 1029 1030 bn = BFH(((char *) b) + b->bh.bsize); 1031 } 1032 if (wipe) { 1033 V memset_unchecked(((char *) b) + sizeof(struct bfhead), 0x55, 1034 (MemSize) (b->bh.bsize - sizeof(struct bfhead))); 1035 } 1036 assert(bn->bh.bsize < 0); 1037 1038 /* The next buffer is allocated. Set the backpointer in it to point 1039 to this buffer; the previous free buffer in memory. */ 1040 1041 bn->bh.prevfree = b->bh.bsize; 1042 1043 #ifdef BECtl 1044 1045 /* If a block-release function is defined, and this free buffer 1046 constitutes the entire block, release it. Note that pool_len 1047 is defined in such a way that the test will fail unless all 1048 pool blocks are the same size. */ 1049 1050 if (poolset->relfcn != NULL && 1051 ((bufsize) b->bh.bsize) == (pool_len - sizeof(struct bhead))) { 1052 1053 assert(b->bh.prevfree == 0); 1054 assert(BH((char *) b + b->bh.bsize)->bsize == ESent); 1055 assert(BH((char *) b + b->bh.bsize)->prevfree == b->bh.bsize); 1056 /* Unlink the buffer from the free list */ 1057 b->ql.blink->ql.flink = b->ql.flink; 1058 b->ql.flink->ql.blink = b->ql.blink; 1059 1060 poolset->relfcn(b); 1061 #ifdef BufStats 1062 poolset->numprel++; /* Nr of expansion block releases */ 1063 poolset->numpblk--; /* Total number of blocks */ 1064 assert(numpblk == numpget - numprel); 1065 #endif /* BufStats */ 1066 } 1067 #endif /* BECtl */ 1068 } 1069 1070 #ifdef BECtl 1071 1072 /* BECTL -- Establish automatic pool expansion control */ 1073 1074 void bectl(compact, acquire, release, pool_incr, poolset) 1075 int (*compact) _((bufsize sizereq, int sequence)); 1076 void *(*acquire) _((bufsize size)); 1077 void (*release) _((void *buf)); 1078 bufsize pool_incr; 1079 struct bpoolset *poolset; 1080 { 1081 poolset->compfcn = compact; 1082 poolset->acqfcn = acquire; 1083 poolset->relfcn = release; 1084 poolset->exp_incr = pool_incr; 1085 } 1086 #endif 1087 1088 /* BPOOL -- Add a region of memory to the buffer pool. */ 1089 1090 void bpool(buf, len, poolset) 1091 void *buf; 1092 bufsize len; 1093 struct bpoolset *poolset; 1094 { 1095 struct bfhead *b = BFH(buf); 1096 struct bhead *bn; 1097 1098 #ifdef SizeQuant 1099 len &= ~(SizeQuant - 1); 1100 #endif 1101 #ifdef BECtl 1102 if (poolset->pool_len == 0) { 1103 pool_len = len; 1104 } else if (len != poolset->pool_len) { 1105 poolset->pool_len = -1; 1106 } 1107 #ifdef BufStats 1108 poolset->numpget++; /* Number of block acquisitions */ 1109 poolset->numpblk++; /* Number of blocks total */ 1110 assert(poolset->numpblk == poolset->numpget - poolset->numprel); 1111 #endif /* BufStats */ 1112 #endif /* BECtl */ 1113 1114 /* Since the block is initially occupied by a single free buffer, 1115 it had better not be (much) larger than the largest buffer 1116 whose size we can store in bhead.bsize. */ 1117 1118 assert(len - sizeof(struct bhead) <= -((bufsize) ESent + 1)); 1119 1120 /* Clear the backpointer at the start of the block to indicate that 1121 there is no free block prior to this one. That blocks 1122 recombination when the first block in memory is released. */ 1123 1124 b->bh.prevfree = 0; 1125 1126 /* Chain the new block to the free list. */ 1127 1128 assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist); 1129 assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist); 1130 b->ql.flink = &poolset->freelist; 1131 b->ql.blink = poolset->freelist.ql.blink; 1132 poolset->freelist.ql.blink = b; 1133 b->ql.blink->ql.flink = b; 1134 1135 /* Create a dummy allocated buffer at the end of the pool. This dummy 1136 buffer is seen when a buffer at the end of the pool is released and 1137 blocks recombination of the last buffer with the dummy buffer at 1138 the end. The length in the dummy buffer is set to the largest 1139 negative number to denote the end of the pool for diagnostic 1140 routines (this specific value is not counted on by the actual 1141 allocation and release functions). */ 1142 1143 len -= sizeof(struct bhead); 1144 b->bh.bsize = (bufsize) len; 1145 #ifdef FreeWipe 1146 V memset_unchecked(((char *) b) + sizeof(struct bfhead), 0x55, 1147 (MemSize) (len - sizeof(struct bfhead))); 1148 #endif 1149 bn = BH(((char *) b) + len); 1150 bn->prevfree = (bufsize) len; 1151 /* Definition of ESent assumes two's complement! */ 1152 assert((~0) == -1); 1153 bn->bsize = ESent; 1154 } 1155 1156 #ifdef BufStats 1157 1158 /* BSTATS -- Return buffer allocation free space statistics. */ 1159 1160 void bstats(curalloc, totfree, maxfree, nget, nrel, poolset) 1161 bufsize *curalloc, *totfree, *maxfree; 1162 long *nget, *nrel; 1163 struct bpoolset *poolset; 1164 { 1165 struct bfhead *b = poolset->freelist.ql.flink; 1166 1167 *nget = poolset->numget; 1168 *nrel = poolset->numrel; 1169 *curalloc = poolset->totalloc; 1170 *totfree = 0; 1171 *maxfree = -1; 1172 while (b != &poolset->freelist) { 1173 assert(b->bh.bsize > 0); 1174 *totfree += b->bh.bsize; 1175 if (b->bh.bsize > *maxfree) { 1176 *maxfree = b->bh.bsize; 1177 } 1178 b = b->ql.flink; /* Link to next buffer */ 1179 } 1180 } 1181 1182 #ifdef BECtl 1183 1184 /* BSTATSE -- Return extended statistics */ 1185 1186 void bstatse(pool_incr, npool, npget, nprel, ndget, ndrel, poolset) 1187 bufsize *pool_incr; 1188 long *npool, *npget, *nprel, *ndget, *ndrel; 1189 struct bpoolset *poolset; 1190 { 1191 *pool_incr = (poolset->pool_len < 0) ? 1192 -poolset->exp_incr : poolset->exp_incr; 1193 *npool = poolset->numpblk; 1194 *npget = poolset->numpget; 1195 *nprel = poolset->numprel; 1196 *ndget = poolset->numdget; 1197 *ndrel = poolset->numdrel; 1198 } 1199 #endif /* BECtl */ 1200 #endif /* BufStats */ 1201 1202 #ifdef DumpData 1203 1204 /* BUFDUMP -- Dump the data in a buffer. This is called with the user 1205 data pointer, and backs up to the buffer header. It will 1206 dump either a free block or an allocated one. */ 1207 1208 void bufdump(buf) 1209 void *buf; 1210 { 1211 struct bfhead *b; 1212 unsigned char *bdump; 1213 bufsize bdlen; 1214 1215 b = BFH(((char *) buf) - sizeof(struct bhead)); 1216 assert(b->bh.bsize != 0); 1217 if (b->bh.bsize < 0) { 1218 bdump = (unsigned char *) buf; 1219 bdlen = (-b->bh.bsize) - sizeof(struct bhead); 1220 } else { 1221 bdump = (unsigned char *) (((char *) b) + sizeof(struct bfhead)); 1222 bdlen = b->bh.bsize - sizeof(struct bfhead); 1223 } 1224 1225 while (bdlen > 0) { 1226 int i, dupes = 0; 1227 bufsize l = bdlen; 1228 char bhex[50], bascii[20]; 1229 1230 if (l > 16) { 1231 l = 16; 1232 } 1233 1234 for (i = 0; i < l; i++) { 1235 V snprintf(bhex + i * 3, sizeof(bhex) - i * 3, "%02X ", 1236 bdump[i]); 1237 bascii[i] = isprint(bdump[i]) ? bdump[i] : ' '; 1238 } 1239 bascii[i] = 0; 1240 V printf("%-48s %s\n", bhex, bascii); 1241 bdump += l; 1242 bdlen -= l; 1243 while ((bdlen > 16) && (memcmp((char *) (bdump - 16), 1244 (char *) bdump, 16) == 0)) { 1245 dupes++; 1246 bdump += 16; 1247 bdlen -= 16; 1248 } 1249 if (dupes > 1) { 1250 V printf( 1251 " (%d lines [%d bytes] identical to above line skipped)\n", 1252 dupes, dupes * 16); 1253 } else if (dupes == 1) { 1254 bdump -= 16; 1255 bdlen += 16; 1256 } 1257 } 1258 } 1259 #endif 1260 1261 #ifdef BufDump 1262 1263 /* BPOOLD -- Dump a buffer pool. The buffer headers are always listed. 1264 If DUMPALLOC is nonzero, the contents of allocated buffers 1265 are dumped. If DUMPFREE is nonzero, free blocks are 1266 dumped as well. If FreeWipe checking is enabled, free 1267 blocks which have been clobbered will always be dumped. */ 1268 1269 void bpoold(buf, dumpalloc, dumpfree) 1270 void *buf; 1271 int dumpalloc, dumpfree; 1272 { 1273 struct bfhead *b = BFH(buf); 1274 1275 while (b->bh.bsize != ESent) { 1276 bufsize bs = b->bh.bsize; 1277 1278 if (bs < 0) { 1279 bs = -bs; 1280 V printf("Allocated buffer: size %6ld bytes.\n", (long) bs); 1281 if (dumpalloc) { 1282 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1283 } 1284 } else { 1285 char *lerr = ""; 1286 1287 assert(bs > 0); 1288 if ((b->ql.blink->ql.flink != b) || 1289 (b->ql.flink->ql.blink != b)) { 1290 lerr = " (Bad free list links)"; 1291 } 1292 V printf("Free block: size %6ld bytes.%s\n", 1293 (long) bs, lerr); 1294 #ifdef FreeWipe 1295 lerr = ((char *) b) + sizeof(struct bfhead); 1296 if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) || 1297 (memcmp(lerr, lerr + 1, 1298 (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) { 1299 V printf( 1300 "(Contents of above free block have been overstored.)\n"); 1301 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1302 } else 1303 #endif 1304 if (dumpfree) { 1305 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1306 } 1307 } 1308 b = BFH(((char *) b) + bs); 1309 } 1310 } 1311 #endif /* BufDump */ 1312 1313 #ifdef BufValid 1314 1315 /* BPOOLV -- Validate a buffer pool. If NDEBUG isn't defined, 1316 any error generates an assertion failure. */ 1317 1318 int bpoolv(buf) 1319 void *buf; 1320 { 1321 struct bfhead *b = BFH(buf); 1322 1323 while (b->bh.bsize != ESent) { 1324 bufsize bs = b->bh.bsize; 1325 1326 if (bs < 0) { 1327 bs = -bs; 1328 } else { 1329 const char *lerr = ""; 1330 1331 assert(bs > 0); 1332 if (bs <= 0) { 1333 return 0; 1334 } 1335 if ((b->ql.blink->ql.flink != b) || 1336 (b->ql.flink->ql.blink != b)) { 1337 V printf("Free block: size %6ld bytes. (Bad free list links)\n", 1338 (long) bs); 1339 assert(0); 1340 return 0; 1341 } 1342 #ifdef FreeWipe 1343 lerr = ((char *) b) + sizeof(struct bfhead); 1344 if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) || 1345 (memcmp(lerr, lerr + 1, 1346 (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) { 1347 V printf( 1348 "(Contents of above free block have been overstored.)\n"); 1349 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1350 assert(0); 1351 return 0; 1352 } 1353 #endif 1354 } 1355 b = BFH(((char *) b) + bs); 1356 } 1357 return 1; 1358 } 1359 #endif /* BufValid */ 1360 1361 /***********************\ 1362 * * 1363 * Built-in test program * 1364 * * 1365 \***********************/ 1366 1367 #if !defined(__KERNEL__) && !defined(__LDELF__) && defined(CFG_TA_BGET_TEST) 1368 1369 #define TestProg 20000 1370 1371 #ifdef BECtl 1372 #define PoolSize 300000 /* Test buffer pool size */ 1373 #else 1374 #define PoolSize 50000 /* Test buffer pool size */ 1375 #endif 1376 #define ExpIncr 32768 /* Test expansion block size */ 1377 #define CompactTries 10 /* Maximum tries at compacting */ 1378 1379 #define dumpAlloc 0 /* Dump allocated buffers ? */ 1380 #define dumpFree 0 /* Dump free buffers ? */ 1381 1382 static char *bchain = NULL; /* Our private buffer chain */ 1383 static char *bp = NULL; /* Our initial buffer pool */ 1384 1385 #ifdef UsingFloat 1386 #include <math.h> 1387 #endif 1388 1389 static unsigned long int next = 1; 1390 1391 static void *(*mymalloc)(size_t size); 1392 static void (*myfree)(void *ptr); 1393 1394 static struct bpoolset mypoolset = { 1395 .freelist = { 1396 .bh = { 0, 0}, 1397 .ql = { &mypoolset.freelist, &mypoolset.freelist}, 1398 } 1399 }; 1400 1401 /* Return next random integer */ 1402 1403 static int myrand(void) 1404 { 1405 next = next * 1103515245L + 12345; 1406 return (unsigned int) (next / 65536L) % 32768L; 1407 } 1408 1409 /* Set seed for random generator */ 1410 1411 static void mysrand(unsigned int seed) 1412 { 1413 next = seed; 1414 } 1415 1416 /* STATS -- Edit statistics returned by bstats() or bstatse(). */ 1417 1418 static void stats(const char *when __maybe_unused, 1419 struct bpoolset *poolset __maybe_unused) 1420 { 1421 #ifdef BufStats 1422 bufsize cural, totfree, maxfree; 1423 long nget, nfree; 1424 #endif 1425 #ifdef BECtl 1426 bufsize pincr; 1427 long totblocks, npget, nprel, ndget, ndrel; 1428 #endif 1429 1430 #ifdef BufStats 1431 bstats(&cural, &totfree, &maxfree, &nget, &nfree, poolset); 1432 V printf( 1433 "%s: %ld gets, %ld releases. %ld in use, %ld free, largest = %ld\n", 1434 when, nget, nfree, (long) cural, (long) totfree, (long) maxfree); 1435 #endif 1436 #ifdef BECtl 1437 bstatse(&pincr, &totblocks, &npget, &nprel, &ndget, &ndrel, poolset); 1438 V printf( 1439 " Blocks: size = %ld, %ld (%ld bytes) in use, %ld gets, %ld frees\n", 1440 (long)pincr, totblocks, pincr * totblocks, npget, nprel); 1441 V printf(" %ld direct gets, %ld direct frees\n", ndget, ndrel); 1442 #endif /* BECtl */ 1443 } 1444 1445 #ifdef BECtl 1446 static int protect = 0; /* Disable compaction during bgetr() */ 1447 1448 /* BCOMPACT -- Compaction call-back function. */ 1449 1450 static int bcompact(bsize, seq) 1451 bufsize bsize; 1452 int seq; 1453 { 1454 #ifdef CompactTries 1455 char *bc = bchain; 1456 int i = myrand() & 0x3; 1457 1458 #ifdef COMPACTRACE 1459 V printf("Compaction requested. %ld bytes needed, sequence %d.\n", 1460 (long) bsize, seq); 1461 #endif 1462 1463 if (protect || (seq > CompactTries)) { 1464 #ifdef COMPACTRACE 1465 V printf("Compaction gave up.\n"); 1466 #endif 1467 return 0; 1468 } 1469 1470 /* Based on a random cast, release a random buffer in the list 1471 of allocated buffers. */ 1472 1473 while (i > 0 && bc != NULL) { 1474 bc = *((char **) bc); 1475 i--; 1476 } 1477 if (bc != NULL) { 1478 char *fb; 1479 1480 fb = *((char **) bc); 1481 if (fb != NULL) { 1482 *((char **) bc) = *((char **) fb); 1483 brel((void *) fb); 1484 return 1; 1485 } 1486 } 1487 1488 #ifdef COMPACTRACE 1489 V printf("Compaction bailed out.\n"); 1490 #endif 1491 #endif /* CompactTries */ 1492 return 0; 1493 } 1494 1495 /* BEXPAND -- Expand pool call-back function. */ 1496 1497 static void *bexpand(size) 1498 bufsize size; 1499 { 1500 void *np = NULL; 1501 bufsize cural, totfree, maxfree; 1502 long nget, nfree; 1503 1504 /* Don't expand beyond the total allocated size given by PoolSize. */ 1505 1506 bstats(&cural, &totfree, &maxfree, &nget, &nfree); 1507 1508 if (cural < PoolSize) { 1509 np = (void *) mymalloc((unsigned) size); 1510 } 1511 #ifdef EXPTRACE 1512 V printf("Expand pool by %ld -- %s.\n", (long) size, 1513 np == NULL ? "failed" : "succeeded"); 1514 #endif 1515 return np; 1516 } 1517 1518 /* BSHRINK -- Shrink buffer pool call-back function. */ 1519 1520 static void bshrink(buf) 1521 void *buf; 1522 { 1523 if (((char *) buf) == bp) { 1524 #ifdef EXPTRACE 1525 V printf("Initial pool released.\n"); 1526 #endif 1527 bp = NULL; 1528 } 1529 #ifdef EXPTRACE 1530 V printf("Shrink pool.\n"); 1531 #endif 1532 myfree((char *) buf); 1533 } 1534 1535 #endif /* BECtl */ 1536 1537 /* Restrict buffer requests to those large enough to contain our pointer and 1538 small enough for the CPU architecture. */ 1539 1540 static bufsize blimit(bufsize bs) 1541 { 1542 if (bs < sizeof(char *)) { 1543 bs = sizeof(char *); 1544 } 1545 1546 /* This is written out in this ugly fashion because the 1547 cool expression in sizeof(int) that auto-configured 1548 to any length int befuddled some compilers. */ 1549 1550 if (sizeof(int) == 2) { 1551 if (bs > 32767) { 1552 bs = 32767; 1553 } 1554 } else { 1555 if (bs > 200000) { 1556 bs = 200000; 1557 } 1558 } 1559 return bs; 1560 } 1561 1562 int bget_main_test(void *(*malloc_func)(size_t), void (*free_func)(void *)) 1563 { 1564 int i; 1565 #ifdef UsingFloat 1566 double x; 1567 #endif 1568 1569 mymalloc = malloc_func; 1570 myfree = free_func; 1571 1572 /* Seed the random number generator. If Repeatable is defined, we 1573 always use the same seed. Otherwise, we seed from the clock to 1574 shake things up from run to run. */ 1575 1576 mysrand(1234); 1577 1578 /* Compute x such that pow(x, p) ranges between 1 and 4*ExpIncr as 1579 p ranges from 0 to ExpIncr-1, with a concentration in the lower 1580 numbers. */ 1581 1582 #ifdef UsingFloat 1583 x = 4.0 * ExpIncr; 1584 x = log(x); 1585 x = exp(log(4.0 * ExpIncr) / (ExpIncr - 1.0)); 1586 #endif 1587 1588 #ifdef BECtl 1589 bectl(bcompact, bexpand, bshrink, (bufsize) ExpIncr, &mypoolset); 1590 bp = mymalloc(ExpIncr); 1591 assert(bp != NULL); 1592 bpool((void *) bp, (bufsize) ExpIncr); 1593 #else 1594 bp = mymalloc(PoolSize); 1595 assert(bp != NULL); 1596 bpool((void *) bp, (bufsize) PoolSize, &mypoolset); 1597 #endif 1598 1599 stats("Create pool", &mypoolset); 1600 #ifdef BufValid 1601 V bpoolv((void *) bp); 1602 #endif 1603 #ifdef BufDump 1604 bpoold((void *) bp, dumpAlloc, dumpFree); 1605 #endif 1606 1607 for (i = 0; i < TestProg; i++) { 1608 char *cb; 1609 #ifdef UsingFloat 1610 bufsize bs = pow(x, (double) (myrand() & (ExpIncr - 1))); 1611 #else 1612 bufsize bs = (myrand() & (ExpIncr * 4 - 1)) / (1 << (myrand() & 0x7)); 1613 #endif 1614 bufsize align = 0; 1615 bufsize hdr_size = 0; 1616 1617 switch (rand() & 0x3) { 1618 case 1: 1619 align = 32; 1620 break; 1621 case 2: 1622 align = 64; 1623 break; 1624 case 3: 1625 align = 128; 1626 break; 1627 default: 1628 break; 1629 } 1630 1631 hdr_size = (rand() & 0x3) * BGET_HDR_QUANTUM; 1632 1633 assert(bs <= (((bufsize) 4) * ExpIncr)); 1634 bs = blimit(bs); 1635 if (myrand() & 0x400) { 1636 cb = (char *) bgetz(align, hdr_size, bs, &mypoolset); 1637 } else { 1638 cb = (char *) bget(align, hdr_size, bs, &mypoolset); 1639 } 1640 if (cb == NULL) { 1641 #ifdef EasyOut 1642 break; 1643 #else 1644 char *bc = bchain; 1645 1646 if (bc != NULL) { 1647 char *fb; 1648 1649 fb = *((char **) bc); 1650 if (fb != NULL) { 1651 *((char **) bc) = *((char **) fb); 1652 brel((void *) fb, &mypoolset, true/*wipe*/); 1653 } 1654 } 1655 continue; 1656 #endif 1657 } 1658 assert(!align || !(((unsigned long)cb + hdr_size) & (align - 1))); 1659 *((char **) cb) = (char *) bchain; 1660 bchain = cb; 1661 1662 /* Based on a random cast, release a random buffer in the list 1663 of allocated buffers. */ 1664 1665 if ((myrand() & 0x10) == 0) { 1666 char *bc = bchain; 1667 int j = myrand() & 0x3; 1668 1669 while (j > 0 && bc != NULL) { 1670 bc = *((char **) bc); 1671 j--; 1672 } 1673 if (bc != NULL) { 1674 char *fb; 1675 1676 fb = *((char **) bc); 1677 if (fb != NULL) { 1678 *((char **) bc) = *((char **) fb); 1679 brel((void *) fb, &mypoolset, true/*wipe*/); 1680 } 1681 } 1682 } 1683 1684 /* Based on a random cast, reallocate a random buffer in the list 1685 to a random size */ 1686 1687 if ((myrand() & 0x20) == 0) { 1688 char *bc = bchain; 1689 int j = myrand() & 0x3; 1690 1691 while (j > 0 && bc != NULL) { 1692 bc = *((char **) bc); 1693 j--; 1694 } 1695 if (bc != NULL) { 1696 char *fb; 1697 1698 fb = *((char **) bc); 1699 if (fb != NULL) { 1700 char *newb; 1701 1702 #ifdef UsingFloat 1703 bs = pow(x, (double) (myrand() & (ExpIncr - 1))); 1704 #else 1705 bs = (rand() & (ExpIncr * 4 - 1)) / (1 << (rand() & 0x7)); 1706 #endif 1707 bs = blimit(bs); 1708 #ifdef BECtl 1709 protect = 1; /* Protect against compaction */ 1710 #endif 1711 newb = (char *) bgetr((void *) fb, align, hdr_size, bs, &mypoolset); 1712 #ifdef BECtl 1713 protect = 0; 1714 #endif 1715 if (newb != NULL) { 1716 assert(!align || !(((unsigned long)newb + hdr_size) & 1717 (align - 1))); 1718 *((char **) bc) = newb; 1719 } 1720 } 1721 } 1722 } 1723 } 1724 stats("\nAfter allocation", &mypoolset); 1725 if (bp != NULL) { 1726 #ifdef BufValid 1727 V bpoolv((void *) bp); 1728 #endif 1729 #ifdef BufDump 1730 bpoold((void *) bp, dumpAlloc, dumpFree); 1731 #endif 1732 } 1733 1734 while (bchain != NULL) { 1735 char *buf = bchain; 1736 1737 bchain = *((char **) buf); 1738 brel((void *) buf, &mypoolset, true/*wipe*/); 1739 } 1740 stats("\nAfter release", &mypoolset); 1741 #ifndef BECtl 1742 if (bp != NULL) { 1743 #ifdef BufValid 1744 V bpoolv((void *) bp); 1745 #endif 1746 #ifdef BufDump 1747 bpoold((void *) bp, dumpAlloc, dumpFree); 1748 #endif 1749 } 1750 #endif 1751 1752 return 0; 1753 } 1754 #endif 1755