1 /* 2 3 B G E T 4 5 Buffer allocator 6 7 Designed and implemented in April of 1972 by John Walker, based on the 8 Case Algol OPRO$ algorithm implemented in 1966. 9 10 Reimplemented in 1975 by John Walker for the Interdata 70. 11 Reimplemented in 1977 by John Walker for the Marinchip 9900. 12 Reimplemented in 1982 by Duff Kurland for the Intel 8080. 13 14 Portable C version implemented in September of 1990 by an older, wiser 15 instance of the original implementor. 16 17 Souped up and/or weighed down slightly shortly thereafter by Greg 18 Lutz. 19 20 AMIX edition, including the new compaction call-back option, prepared 21 by John Walker in July of 1992. 22 23 Bug in built-in test program fixed, ANSI compiler warnings eradicated, 24 buffer pool validator implemented, and guaranteed repeatable test 25 added by John Walker in October of 1995. 26 27 This program is in the public domain. 28 29 1. This is the book of the generations of Adam. In the day that God 30 created man, in the likeness of God made he him; 31 2. Male and female created he them; and blessed them, and called 32 their name Adam, in the day when they were created. 33 3. And Adam lived an hundred and thirty years, and begat a son in 34 his own likeness, and after his image; and called his name Seth: 35 4. And the days of Adam after he had begotten Seth were eight 36 hundred years: and he begat sons and daughters: 37 5. And all the days that Adam lived were nine hundred and thirty 38 years: and he died. 39 6. And Seth lived an hundred and five years, and begat Enos: 40 7. And Seth lived after he begat Enos eight hundred and seven years, 41 and begat sons and daughters: 42 8. And all the days of Seth were nine hundred and twelve years: and 43 he died. 44 9. And Enos lived ninety years, and begat Cainan: 45 10. And Enos lived after he begat Cainan eight hundred and fifteen 46 years, and begat sons and daughters: 47 11. And all the days of Enos were nine hundred and five years: and 48 he died. 49 12. And Cainan lived seventy years and begat Mahalaleel: 50 13. And Cainan lived after he begat Mahalaleel eight hundred and 51 forty years, and begat sons and daughters: 52 14. And all the days of Cainan were nine hundred and ten years: and 53 he died. 54 15. And Mahalaleel lived sixty and five years, and begat Jared: 55 16. And Mahalaleel lived after he begat Jared eight hundred and 56 thirty years, and begat sons and daughters: 57 17. And all the days of Mahalaleel were eight hundred ninety and 58 five years: and he died. 59 18. And Jared lived an hundred sixty and two years, and he begat 60 Enoch: 61 19. And Jared lived after he begat Enoch eight hundred years, and 62 begat sons and daughters: 63 20. And all the days of Jared were nine hundred sixty and two years: 64 and he died. 65 21. And Enoch lived sixty and five years, and begat Methuselah: 66 22. And Enoch walked with God after he begat Methuselah three 67 hundred years, and begat sons and daughters: 68 23. And all the days of Enoch were three hundred sixty and five 69 years: 70 24. And Enoch walked with God: and he was not; for God took him. 71 25. And Methuselah lived an hundred eighty and seven years, and 72 begat Lamech. 73 26. And Methuselah lived after he begat Lamech seven hundred eighty 74 and two years, and begat sons and daughters: 75 27. And all the days of Methuselah were nine hundred sixty and nine 76 years: and he died. 77 28. And Lamech lived an hundred eighty and two years, and begat a 78 son: 79 29. And he called his name Noah, saying, This same shall comfort us 80 concerning our work and toil of our hands, because of the ground 81 which the LORD hath cursed. 82 30. And Lamech lived after he begat Noah five hundred ninety and 83 five years, and begat sons and daughters: 84 31. And all the days of Lamech were seven hundred seventy and seven 85 years: and he died. 86 32. And Noah was five hundred years old: and Noah begat Shem, Ham, 87 and Japheth. 88 89 And buffers begat buffers, and links begat links, and buffer pools 90 begat links to chains of buffer pools containing buffers, and lo the 91 buffers and links and pools of buffers and pools of links to chains of 92 pools of buffers were fruitful and they multiplied and the Operating 93 System looked down upon them and said that it was Good. 94 95 96 INTRODUCTION 97 ============ 98 99 BGET is a comprehensive memory allocation package which is easily 100 configured to the needs of an application. BGET is efficient in 101 both the time needed to allocate and release buffers and in the 102 memory overhead required for buffer pool management. It 103 automatically consolidates contiguous space to minimise 104 fragmentation. BGET is configured by compile-time definitions, 105 Major options include: 106 107 * A built-in test program to exercise BGET and 108 demonstrate how the various functions are used. 109 110 * Allocation by either the "first fit" or "best fit" 111 method. 112 113 * Wiping buffers at release time to catch code which 114 references previously released storage. 115 116 * Built-in routines to dump individual buffers or the 117 entire buffer pool. 118 119 * Retrieval of allocation and pool size statistics. 120 121 * Quantisation of buffer sizes to a power of two to 122 satisfy hardware alignment constraints. 123 124 * Automatic pool compaction, growth, and shrinkage by 125 means of call-backs to user defined functions. 126 127 Applications of BGET can range from storage management in 128 ROM-based embedded programs to providing the framework upon which 129 a multitasking system incorporating garbage collection is 130 constructed. BGET incorporates extensive internal consistency 131 checking using the <assert.h> mechanism; all these checks can be 132 turned off by compiling with NDEBUG defined, yielding a version of 133 BGET with minimal size and maximum speed. 134 135 The basic algorithm underlying BGET has withstood the test of 136 time; more than 25 years have passed since the first 137 implementation of this code. And yet, it is substantially more 138 efficient than the native allocation schemes of many operating 139 systems: the Macintosh and Microsoft Windows to name two, on which 140 programs have obtained substantial speed-ups by layering BGET as 141 an application level memory manager atop the underlying system's. 142 143 BGET has been implemented on the largest mainframes and the lowest 144 of microprocessors. It has served as the core for multitasking 145 operating systems, multi-thread applications, embedded software in 146 data network switching processors, and a host of C programs. And 147 while it has accreted flexibility and additional options over the 148 years, it remains fast, memory efficient, portable, and easy to 149 integrate into your program. 150 151 152 BGET IMPLEMENTATION ASSUMPTIONS 153 =============================== 154 155 BGET is written in as portable a dialect of C as possible. The 156 only fundamental assumption about the underlying hardware 157 architecture is that memory is allocated is a linear array which 158 can be addressed as a vector of C "char" objects. On segmented 159 address space architectures, this generally means that BGET should 160 be used to allocate storage within a single segment (although some 161 compilers simulate linear address spaces on segmented 162 architectures). On segmented architectures, then, BGET buffer 163 pools may not be larger than a segment, but since BGET allows any 164 number of separate buffer pools, there is no limit on the total 165 storage which can be managed, only on the largest individual 166 object which can be allocated. Machines with a linear address 167 architecture, such as the VAX, 680x0, Sparc, MIPS, or the Intel 168 80386 and above in native mode, may use BGET without restriction. 169 170 171 GETTING STARTED WITH BGET 172 ========================= 173 174 Although BGET can be configured in a multitude of fashions, there 175 are three basic ways of working with BGET. The functions 176 mentioned below are documented in the following section. Please 177 excuse the forward references which are made in the interest of 178 providing a roadmap to guide you to the BGET functions you're 179 likely to need. 180 181 Embedded Applications 182 --------------------- 183 184 Embedded applications typically have a fixed area of memory 185 dedicated to buffer allocation (often in a separate RAM address 186 space distinct from the ROM that contains the executable code). 187 To use BGET in such an environment, simply call bpool() with the 188 start address and length of the buffer pool area in RAM, then 189 allocate buffers with bget() and release them with brel(). 190 Embedded applications with very limited RAM but abundant CPU speed 191 may benefit by configuring BGET for BestFit allocation (which is 192 usually not worth it in other environments). 193 194 Malloc() Emulation 195 ------------------ 196 197 If the C library malloc() function is too slow, not present in 198 your development environment (for example, an a native Windows or 199 Macintosh program), or otherwise unsuitable, you can replace it 200 with BGET. Initially define a buffer pool of an appropriate size 201 with bpool()--usually obtained by making a call to the operating 202 system's low-level memory allocator. Then allocate buffers with 203 bget(), bgetz(), and bgetr() (the last two permit the allocation 204 of buffers initialised to zero and [inefficient] re-allocation of 205 existing buffers for compatibility with C library functions). 206 Release buffers by calling brel(). If a buffer allocation request 207 fails, obtain more storage from the underlying operating system, 208 add it to the buffer pool by another call to bpool(), and continue 209 execution. 210 211 Automatic Storage Management 212 ---------------------------- 213 214 You can use BGET as your application's native memory manager and 215 implement automatic storage pool expansion, contraction, and 216 optionally application-specific memory compaction by compiling 217 BGET with the BECtl variable defined, then calling bectl() and 218 supplying functions for storage compaction, acquisition, and 219 release, as well as a standard pool expansion increment. All of 220 these functions are optional (although it doesn't make much sense 221 to provide a release function without an acquisition function, 222 does it?). Once the call-back functions have been defined with 223 bectl(), you simply use bget() and brel() to allocate and release 224 storage as before. You can supply an initial buffer pool with 225 bpool() or rely on automatic allocation to acquire the entire 226 pool. When a call on bget() cannot be satisfied, BGET first 227 checks if a compaction function has been supplied. If so, it is 228 called (with the space required to satisfy the allocation request 229 and a sequence number to allow the compaction routine to be called 230 successively without looping). If the compaction function is able 231 to free any storage (it needn't know whether the storage it freed 232 was adequate) it should return a nonzero value, whereupon BGET 233 will retry the allocation request and, if it fails again, call the 234 compaction function again with the next-higher sequence number. 235 236 If the compaction function returns zero, indicating failure to 237 free space, or no compaction function is defined, BGET next tests 238 whether a non-NULL allocation function was supplied to bectl(). 239 If so, that function is called with an argument indicating how 240 many bytes of additional space are required. This will be the 241 standard pool expansion increment supplied in the call to bectl() 242 unless the original bget() call requested a buffer larger than 243 this; buffers larger than the standard pool block can be managed 244 "off the books" by BGET in this mode. If the allocation function 245 succeeds in obtaining the storage, it returns a pointer to the new 246 block and BGET expands the buffer pool; if it fails, the 247 allocation request fails and returns NULL to the caller. If a 248 non-NULL release function is supplied, expansion blocks which 249 become totally empty are released to the global free pool by 250 passing their addresses to the release function. 251 252 Equipped with appropriate allocation, release, and compaction 253 functions, BGET can be used as part of very sophisticated memory 254 management strategies, including garbage collection. (Note, 255 however, that BGET is *not* a garbage collector by itself, and 256 that developing such a system requires much additional logic and 257 careful design of the application's memory allocation strategy.) 258 259 260 BGET FUNCTION DESCRIPTIONS 261 ========================== 262 263 Functions implemented in this file (some are enabled by certain of 264 the optional settings below): 265 266 void bpool(void *buffer, bufsize len); 267 268 Create a buffer pool of <len> bytes, using the storage starting at 269 <buffer>. You can call bpool() subsequently to contribute 270 additional storage to the overall buffer pool. 271 272 void *bget(bufsize size); 273 274 Allocate a buffer of <size> bytes. The address of the buffer is 275 returned, or NULL if insufficient memory was available to allocate 276 the buffer. 277 278 void *bgetz(bufsize size); 279 280 Allocate a buffer of <size> bytes and clear it to all zeroes. The 281 address of the buffer is returned, or NULL if insufficient memory 282 was available to allocate the buffer. 283 284 void *bgetr(void *buffer, bufsize newsize); 285 286 Reallocate a buffer previously allocated by bget(), changing its 287 size to <newsize> and preserving all existing data. NULL is 288 returned if insufficient memory is available to reallocate the 289 buffer, in which case the original buffer remains intact. 290 291 void brel(void *buf); 292 293 Return the buffer <buf>, previously allocated by bget(), to the 294 free space pool. 295 296 void bectl(int (*compact)(bufsize sizereq, int sequence), 297 void *(*acquire)(bufsize size), 298 void (*release)(void *buf), 299 bufsize pool_incr); 300 301 Expansion control: specify functions through which the package may 302 compact storage (or take other appropriate action) when an 303 allocation request fails, and optionally automatically acquire 304 storage for expansion blocks when necessary, and release such 305 blocks when they become empty. If <compact> is non-NULL, whenever 306 a buffer allocation request fails, the <compact> function will be 307 called with arguments specifying the number of bytes (total buffer 308 size, including header overhead) required to satisfy the 309 allocation request, and a sequence number indicating the number of 310 consecutive calls on <compact> attempting to satisfy this 311 allocation request. The sequence number is 1 for the first call 312 on <compact> for a given allocation request, and increments on 313 subsequent calls, permitting the <compact> function to take 314 increasingly dire measures in an attempt to free up storage. If 315 the <compact> function returns a nonzero value, the allocation 316 attempt is re-tried. If <compact> returns 0 (as it must if it 317 isn't able to release any space or add storage to the buffer 318 pool), the allocation request fails, which can trigger automatic 319 pool expansion if the <acquire> argument is non-NULL. At the time 320 the <compact> function is called, the state of the buffer 321 allocator is identical to that at the moment the allocation 322 request was made; consequently, the <compact> function may call 323 brel(), bpool(), bstats(), and/or directly manipulate the buffer 324 pool in any manner which would be valid were the application in 325 control. This does not, however, relieve the <compact> function 326 of the need to ensure that whatever actions it takes do not change 327 things underneath the application that made the allocation 328 request. For example, a <compact> function that released a buffer 329 in the process of being reallocated with bgetr() would lead to 330 disaster. Implementing a safe and effective <compact> mechanism 331 requires careful design of an application's memory architecture, 332 and cannot generally be easily retrofitted into existing code. 333 334 If <acquire> is non-NULL, that function will be called whenever an 335 allocation request fails. If the <acquire> function succeeds in 336 allocating the requested space and returns a pointer to the new 337 area, allocation will proceed using the expanded buffer pool. If 338 <acquire> cannot obtain the requested space, it should return NULL 339 and the entire allocation process will fail. <pool_incr> 340 specifies the normal expansion block size. Providing an <acquire> 341 function will cause subsequent bget() requests for buffers too 342 large to be managed in the linked-block scheme (in other words, 343 larger than <pool_incr> minus the buffer overhead) to be satisfied 344 directly by calls to the <acquire> function. Automatic release of 345 empty pool blocks will occur only if all pool blocks in the system 346 are the size given by <pool_incr>. 347 348 void bstats(bufsize *curalloc, bufsize *totfree, 349 bufsize *maxfree, long *nget, long *nrel); 350 351 The amount of space currently allocated is stored into the 352 variable pointed to by <curalloc>. The total free space (sum of 353 all free blocks in the pool) is stored into the variable pointed 354 to by <totfree>, and the size of the largest single block in the 355 free space pool is stored into the variable pointed to by 356 <maxfree>. The variables pointed to by <nget> and <nrel> are 357 filled, respectively, with the number of successful (non-NULL 358 return) bget() calls and the number of brel() calls. 359 360 void bstatse(bufsize *pool_incr, long *npool, 361 long *npget, long *nprel, 362 long *ndget, long *ndrel); 363 364 Extended statistics: The expansion block size will be stored into 365 the variable pointed to by <pool_incr>, or the negative thereof if 366 automatic expansion block releases are disabled. The number of 367 currently active pool blocks will be stored into the variable 368 pointed to by <npool>. The variables pointed to by <npget> and 369 <nprel> will be filled with, respectively, the number of expansion 370 block acquisitions and releases which have occurred. The 371 variables pointed to by <ndget> and <ndrel> will be filled with 372 the number of bget() and brel() calls, respectively, managed 373 through blocks directly allocated by the acquisition and release 374 functions. 375 376 void bufdump(void *buf); 377 378 The buffer pointed to by <buf> is dumped on standard output. 379 380 void bpoold(void *pool, int dumpalloc, int dumpfree); 381 382 All buffers in the buffer pool <pool>, previously initialised by a 383 call on bpool(), are listed in ascending memory address order. If 384 <dumpalloc> is nonzero, the contents of allocated buffers are 385 dumped; if <dumpfree> is nonzero, the contents of free blocks are 386 dumped. 387 388 int bpoolv(void *pool); 389 390 The named buffer pool, previously initialised by a call on 391 bpool(), is validated for bad pointers, overwritten data, etc. If 392 compiled with NDEBUG not defined, any error generates an assertion 393 failure. Otherwise 1 is returned if the pool is valid, 0 if an 394 error is found. 395 396 397 BGET CONFIGURATION 398 ================== 399 */ 400 401 /* 402 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 403 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 404 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 405 * IN NO EVENT SHALL ST BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 406 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 407 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 408 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 409 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 410 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 411 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 412 */ 413 414 /* #define BGET_ENABLE_ALL_OPTIONS */ 415 #ifdef BGET_ENABLE_OPTION 416 #define TestProg 20000 /* Generate built-in test program 417 if defined. The value specifies 418 how many buffer allocation attempts 419 the test program should make. */ 420 421 #define SizeQuant 4 /* Buffer allocation size quantum: 422 all buffers allocated are a 423 multiple of this size. This 424 MUST be a power of two. */ 425 426 #define BufDump 1 /* Define this symbol to enable the 427 bpoold() function which dumps the 428 buffers in a buffer pool. */ 429 430 #define BufValid 1 /* Define this symbol to enable the 431 bpoolv() function for validating 432 a buffer pool. */ 433 434 #define DumpData 1 /* Define this symbol to enable the 435 bufdump() function which allows 436 dumping the contents of an allocated 437 or free buffer. */ 438 439 #define BufStats 1 /* Define this symbol to enable the 440 bstats() function which calculates 441 the total free space in the buffer 442 pool, the largest available 443 buffer, and the total space 444 currently allocated. */ 445 446 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 447 pattern of garbage to trip up 448 miscreants who attempt to use 449 pointers into released buffers. */ 450 451 #define BestFit 1 /* Use a best fit algorithm when 452 searching for space for an 453 allocation request. This uses 454 memory more efficiently, but 455 allocation will be much slower. */ 456 457 #define BECtl 1 /* Define this symbol to enable the 458 bectl() function for automatic 459 pool space control. */ 460 #endif 461 462 #include <stdio.h> 463 464 #ifdef lint 465 #define NDEBUG /* Exits in asserts confuse lint */ 466 /* LINTLIBRARY */ /* Don't complain about def, no ref */ 467 extern char *sprintf(); /* Sun includes don't define sprintf */ 468 #endif 469 470 #include <assert.h> 471 #include <memory.h> 472 473 #ifdef BufDump /* BufDump implies DumpData */ 474 #ifndef DumpData 475 #define DumpData 1 476 #endif 477 #endif 478 479 #ifdef DumpData 480 #include <ctype.h> 481 #endif 482 483 #ifdef __KERNEL__ 484 #ifdef CFG_CORE_BGET_BESTFIT 485 #define BestFit 1 486 #endif 487 #endif 488 489 /* Declare the interface, including the requested buffer size type, 490 bufsize. */ 491 492 #include "bget.h" 493 494 #define MemSize int /* Type for size arguments to memxxx() 495 functions such as memcmp(). */ 496 497 /* Queue links */ 498 499 struct qlinks { 500 struct bfhead *flink; /* Forward link */ 501 struct bfhead *blink; /* Backward link */ 502 }; 503 504 /* Header in allocated and free buffers */ 505 506 struct bhead { 507 bufsize prevfree; /* Relative link back to previous 508 free buffer in memory or 0 if 509 previous buffer is allocated. */ 510 bufsize bsize; /* Buffer size: positive if free, 511 negative if allocated. */ 512 }; 513 #define BH(p) ((struct bhead *) (p)) 514 515 /* Header in directly allocated buffers (by acqfcn) */ 516 517 struct bdhead { 518 bufsize tsize; /* Total size, including overhead */ 519 struct bhead bh; /* Common header */ 520 }; 521 #define BDH(p) ((struct bdhead *) (p)) 522 523 /* Header in free buffers */ 524 525 struct bfhead { 526 struct bhead bh; /* Common allocated/free header */ 527 struct qlinks ql; /* Links on free list */ 528 }; 529 #define BFH(p) ((struct bfhead *) (p)) 530 531 /* Poolset definition */ 532 struct bpoolset { 533 struct bfhead freelist; 534 #ifdef BufStats 535 bufsize totalloc; /* Total space currently allocated */ 536 long numget; /* Number of bget() calls */ 537 long numrel; /* Number of brel() calls */ 538 #ifdef BECtl 539 long numpblk; /* Number of pool blocks */ 540 long numpget; /* Number of block gets and rels */ 541 long numprel; 542 long numdget; /* Number of direct gets and rels */ 543 long numdrel; 544 #endif /* BECtl */ 545 #endif /* BufStats */ 546 547 #ifdef BECtl 548 /* Automatic expansion block management functions */ 549 550 int (*compfcn) _((bufsize sizereq, int sequence)); 551 void *(*acqfcn) _((bufsize size)); 552 void (*relfcn) _((void *buf)); 553 554 bufsize exp_incr; /* Expansion block size */ 555 bufsize pool_len; /* 0: no bpool calls have been made 556 -1: not all pool blocks are 557 the same size 558 >0: (common) block size for all 559 bpool calls made so far 560 */ 561 #endif 562 }; 563 564 /* Minimum allocation quantum: */ 565 566 #define QLSize (sizeof(struct qlinks)) 567 #define SizeQ ((SizeQuant > QLSize) ? SizeQuant : QLSize) 568 569 #define V (void) /* To denote unwanted returned values */ 570 571 /* End sentinel: value placed in bsize field of dummy block delimiting 572 end of pool block. The most negative number which will fit in a 573 bufsize, defined in a way that the compiler will accept. */ 574 575 #define ESent ((bufsize) (-(((1L << (sizeof(bufsize) * 8 - 2)) - 1) * 2) - 2)) 576 577 /* BGET -- Allocate a buffer. */ 578 579 void *bget(requested_size, poolset) 580 bufsize requested_size; 581 struct bpoolset *poolset; 582 { 583 bufsize size = requested_size; 584 struct bfhead *b; 585 #ifdef BestFit 586 struct bfhead *best; 587 #endif 588 void *buf; 589 #ifdef BECtl 590 int compactseq = 0; 591 #endif 592 593 assert(size > 0); 594 595 if (size < SizeQ) { /* Need at least room for the */ 596 size = SizeQ; /* queue links. */ 597 } 598 #ifdef SizeQuant 599 #if SizeQuant > 1 600 size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1)); 601 #endif 602 #endif 603 604 size += sizeof(struct bhead); /* Add overhead in allocated buffer 605 to size required. */ 606 607 #ifdef BECtl 608 /* If a compact function was provided in the call to bectl(), wrap 609 a loop around the allocation process to allow compaction to 610 intervene in case we don't find a suitable buffer in the chain. */ 611 612 while (1) { 613 #endif 614 b = poolset->freelist.ql.flink; 615 #ifdef BestFit 616 best = &poolset->freelist; 617 #endif 618 619 620 /* Scan the free list searching for the first buffer big enough 621 to hold the requested size buffer. */ 622 623 #ifdef BestFit 624 while (b != &poolset->freelist) { 625 if (b->bh.bsize >= size) { 626 if ((best == &poolset->freelist) || 627 (b->bh.bsize < best->bh.bsize)) { 628 best = b; 629 } 630 } 631 b = b->ql.flink; /* Link to next buffer */ 632 } 633 b = best; 634 #endif /* BestFit */ 635 636 while (b != &poolset->freelist) { 637 if ((bufsize) b->bh.bsize >= size) { 638 639 /* Buffer is big enough to satisfy the request. Allocate it 640 to the caller. We must decide whether the buffer is large 641 enough to split into the part given to the caller and a 642 free buffer that remains on the free list, or whether the 643 entire buffer should be removed from the free list and 644 given to the caller in its entirety. We only split the 645 buffer if enough room remains for a header plus the minimum 646 quantum of allocation. */ 647 648 if ((b->bh.bsize - size) > (SizeQ + (sizeof(struct bhead)))) { 649 struct bhead *ba, *bn; 650 651 ba = BH(((char *) b) + (b->bh.bsize - size)); 652 bn = BH(((char *) ba) + size); 653 assert(bn->prevfree == b->bh.bsize); 654 /* Subtract size from length of free block. */ 655 b->bh.bsize -= size; 656 /* Link allocated buffer to the previous free buffer. */ 657 ba->prevfree = b->bh.bsize; 658 /* Plug negative size into user buffer. */ 659 ba->bsize = -(bufsize) size; 660 /* Mark buffer after this one not preceded by free block. */ 661 bn->prevfree = 0; 662 663 #ifdef BufStats 664 poolset->totalloc += size; 665 poolset->numget++; /* Increment number of bget() calls */ 666 #endif 667 buf = (void *) ((((char *) ba) + sizeof(struct bhead))); 668 tag_asan_alloced(buf, size); 669 return buf; 670 } else { 671 struct bhead *ba; 672 673 ba = BH(((char *) b) + b->bh.bsize); 674 assert(ba->prevfree == b->bh.bsize); 675 676 /* The buffer isn't big enough to split. Give the whole 677 shebang to the caller and remove it from the free list. */ 678 679 assert(b->ql.blink->ql.flink == b); 680 assert(b->ql.flink->ql.blink == b); 681 b->ql.blink->ql.flink = b->ql.flink; 682 b->ql.flink->ql.blink = b->ql.blink; 683 684 #ifdef BufStats 685 poolset->totalloc += b->bh.bsize; 686 poolset->numget++; /* Increment number of bget() calls */ 687 #endif 688 /* Negate size to mark buffer allocated. */ 689 b->bh.bsize = -(b->bh.bsize); 690 691 /* Zero the back pointer in the next buffer in memory 692 to indicate that this buffer is allocated. */ 693 ba->prevfree = 0; 694 695 /* Give user buffer starting at queue links. */ 696 buf = (void *) &(b->ql); 697 tag_asan_alloced(buf, size); 698 return buf; 699 } 700 } 701 b = b->ql.flink; /* Link to next buffer */ 702 } 703 #ifdef BECtl 704 705 /* We failed to find a buffer. If there's a compact function 706 defined, notify it of the size requested. If it returns 707 TRUE, try the allocation again. */ 708 709 if ((poolset->compfcn == NULL) || 710 (!(poolset->compfcn)(size, ++compactseq))) { 711 break; 712 } 713 } 714 715 /* No buffer available with requested size free. */ 716 717 /* Don't give up yet -- look in the reserve supply. */ 718 719 if (poolset->acqfcn != NULL) { 720 if (size > exp_incr - sizeof(struct bhead)) { 721 722 /* Request is too large to fit in a single expansion 723 block. Try to satisy it by a direct buffer acquisition. */ 724 725 struct bdhead *bdh; 726 727 size += sizeof(struct bdhead) - sizeof(struct bhead); 728 if ((bdh = BDH((*acqfcn)((bufsize) size))) != NULL) { 729 730 /* Mark the buffer special by setting the size field 731 of its header to zero. */ 732 bdh->bh.bsize = 0; 733 bdh->bh.prevfree = 0; 734 bdh->tsize = size; 735 #ifdef BufStats 736 poolset->totalloc += size; 737 poolset->numget++; /* Increment number of bget() calls */ 738 poolset->numdget++; /* Direct bget() call count */ 739 #endif 740 buf = (void *) (bdh + 1); 741 tag_asan_alloced(buf, size); 742 return buf; 743 } 744 745 } else { 746 747 /* Try to obtain a new expansion block */ 748 749 void *newpool; 750 751 if ((newpool = poolset->acqfcn((bufsize) exp_incr)) != NULL) { 752 bpool(newpool, exp_incr, poolset); 753 buf = bget(requested_size, pool); /* This can't, I say, can't 754 get into a loop. */ 755 return buf; 756 } 757 } 758 } 759 760 /* Still no buffer available */ 761 762 #endif /* BECtl */ 763 764 return NULL; 765 } 766 767 /* BGETZ -- Allocate a buffer and clear its contents to zero. We clear 768 the entire contents of the buffer to zero, not just the 769 region requested by the caller. */ 770 771 void *bgetz(size, poolset) 772 bufsize size; 773 struct bpoolset *poolset; 774 { 775 char *buf = (char *) bget(size, poolset); 776 777 if (buf != NULL) { 778 struct bhead *b; 779 bufsize rsize; 780 781 b = BH(buf - sizeof(struct bhead)); 782 rsize = -(b->bsize); 783 if (rsize == 0) { 784 struct bdhead *bd; 785 786 bd = BDH(buf - sizeof(struct bdhead)); 787 rsize = bd->tsize - sizeof(struct bdhead); 788 } else { 789 rsize -= sizeof(struct bhead); 790 } 791 assert(rsize >= size); 792 V memset_unchecked(buf, 0, (MemSize) rsize); 793 } 794 return ((void *) buf); 795 } 796 797 /* BGETR -- Reallocate a buffer. This is a minimal implementation, 798 simply in terms of brel() and bget(). It could be 799 enhanced to allow the buffer to grow into adjacent free 800 blocks and to avoid moving data unnecessarily. */ 801 802 void *bgetr(buf, size, poolset) 803 void *buf; 804 bufsize size; 805 struct bpoolset *poolset; 806 { 807 void *nbuf; 808 bufsize osize; /* Old size of buffer */ 809 struct bhead *b; 810 811 if ((nbuf = bget(size, poolset)) == NULL) { /* Acquire new buffer */ 812 return NULL; 813 } 814 if (buf == NULL) { 815 return nbuf; 816 } 817 b = BH(((char *) buf) - sizeof(struct bhead)); 818 osize = -b->bsize; 819 #ifdef BECtl 820 if (osize == 0) { 821 /* Buffer acquired directly through acqfcn. */ 822 struct bdhead *bd; 823 824 bd = BDH(((char *) buf) - sizeof(struct bdhead)); 825 osize = bd->tsize - sizeof(struct bdhead); 826 } else 827 #endif 828 osize -= sizeof(struct bhead); 829 assert(osize > 0); 830 V memcpy((char *) nbuf, (char *) buf, /* Copy the data */ 831 (MemSize) ((size < osize) ? size : osize)); 832 #ifndef __KERNEL__ 833 /* User space reallocations are always zeroed */ 834 if (size > osize) 835 V memset((char *) nbuf + osize, 0, size - osize); 836 #endif 837 brel(buf, poolset); 838 return nbuf; 839 } 840 841 /* BREL -- Release a buffer. */ 842 843 void brel(buf, poolset) 844 void *buf; 845 struct bpoolset *poolset; 846 { 847 struct bfhead *b, *bn; 848 bufsize bs; 849 850 b = BFH(((char *) buf) - sizeof(struct bhead)); 851 #ifdef BufStats 852 poolset->numrel++; /* Increment number of brel() calls */ 853 #endif 854 assert(buf != NULL); 855 856 #ifdef BECtl 857 if (b->bh.bsize == 0) { /* Directly-acquired buffer? */ 858 struct bdhead *bdh; 859 860 bdh = BDH(((char *) buf) - sizeof(struct bdhead)); 861 assert(b->bh.prevfree == 0); 862 #ifdef BufStats 863 poolset->totalloc -= bdh->tsize; 864 assert(poolset->totalloc >= 0); 865 poolset->numdrel++; /* Number of direct releases */ 866 #endif /* BufStats */ 867 #ifdef FreeWipe 868 V memset_unchecked((char *) buf, 0x55, 869 (MemSize) (bdh->tsize - sizeof(struct bdhead))); 870 #endif /* FreeWipe */ 871 bs = bdh->tsize - sizeof(struct bdhead); 872 assert(poolset->relfcn != NULL); 873 poolset->relfcn((void *) bdh); /* Release it directly. */ 874 tag_asan_free(buf, bs); 875 return; 876 } 877 #endif /* BECtl */ 878 879 /* Buffer size must be negative, indicating that the buffer is 880 allocated. */ 881 882 if (b->bh.bsize >= 0) { 883 bn = NULL; 884 } 885 assert(b->bh.bsize < 0); 886 bs = -b->bh.bsize; 887 888 /* Back pointer in next buffer must be zero, indicating the 889 same thing: */ 890 891 assert(BH((char *) b - b->bh.bsize)->prevfree == 0); 892 893 #ifdef BufStats 894 poolset->totalloc += b->bh.bsize; 895 assert(poolset->totalloc >= 0); 896 #endif 897 898 /* If the back link is nonzero, the previous buffer is free. */ 899 900 if (b->bh.prevfree != 0) { 901 902 /* The previous buffer is free. Consolidate this buffer with it 903 by adding the length of this buffer to the previous free 904 buffer. Note that we subtract the size in the buffer being 905 released, since it's negative to indicate that the buffer is 906 allocated. */ 907 908 register bufsize size = b->bh.bsize; 909 910 /* Make the previous buffer the one we're working on. */ 911 assert(BH((char *) b - b->bh.prevfree)->bsize == b->bh.prevfree); 912 b = BFH(((char *) b) - b->bh.prevfree); 913 b->bh.bsize -= size; 914 } else { 915 916 /* The previous buffer isn't allocated. Insert this buffer 917 on the free list as an isolated free block. */ 918 919 assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist); 920 assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist); 921 b->ql.flink = &poolset->freelist; 922 b->ql.blink = poolset->freelist.ql.blink; 923 poolset->freelist.ql.blink = b; 924 b->ql.blink->ql.flink = b; 925 b->bh.bsize = -b->bh.bsize; 926 } 927 928 /* Now we look at the next buffer in memory, located by advancing from 929 the start of this buffer by its size, to see if that buffer is 930 free. If it is, we combine this buffer with the next one in 931 memory, dechaining the second buffer from the free list. */ 932 933 bn = BFH(((char *) b) + b->bh.bsize); 934 if (bn->bh.bsize > 0) { 935 936 /* The buffer is free. Remove it from the free list and add 937 its size to that of our buffer. */ 938 939 assert(BH((char *) bn + bn->bh.bsize)->prevfree == bn->bh.bsize); 940 assert(bn->ql.blink->ql.flink == bn); 941 assert(bn->ql.flink->ql.blink == bn); 942 bn->ql.blink->ql.flink = bn->ql.flink; 943 bn->ql.flink->ql.blink = bn->ql.blink; 944 b->bh.bsize += bn->bh.bsize; 945 946 /* Finally, advance to the buffer that follows the newly 947 consolidated free block. We must set its backpointer to the 948 head of the consolidated free block. We know the next block 949 must be an allocated block because the process of recombination 950 guarantees that two free blocks will never be contiguous in 951 memory. */ 952 953 bn = BFH(((char *) b) + b->bh.bsize); 954 } 955 #ifdef FreeWipe 956 V memset_unchecked(((char *) b) + sizeof(struct bfhead), 0x55, 957 (MemSize) (b->bh.bsize - sizeof(struct bfhead))); 958 #endif 959 assert(bn->bh.bsize < 0); 960 961 /* The next buffer is allocated. Set the backpointer in it to point 962 to this buffer; the previous free buffer in memory. */ 963 964 bn->bh.prevfree = b->bh.bsize; 965 966 #ifdef BECtl 967 968 /* If a block-release function is defined, and this free buffer 969 constitutes the entire block, release it. Note that pool_len 970 is defined in such a way that the test will fail unless all 971 pool blocks are the same size. */ 972 973 if (poolset->relfcn != NULL && 974 ((bufsize) b->bh.bsize) == (pool_len - sizeof(struct bhead))) { 975 976 assert(b->bh.prevfree == 0); 977 assert(BH((char *) b + b->bh.bsize)->bsize == ESent); 978 assert(BH((char *) b + b->bh.bsize)->prevfree == b->bh.bsize); 979 /* Unlink the buffer from the free list */ 980 b->ql.blink->ql.flink = b->ql.flink; 981 b->ql.flink->ql.blink = b->ql.blink; 982 983 poolset->relfcn(b); 984 #ifdef BufStats 985 poolset->numprel++; /* Nr of expansion block releases */ 986 poolset->numpblk--; /* Total number of blocks */ 987 assert(numpblk == numpget - numprel); 988 #endif /* BufStats */ 989 } 990 #endif /* BECtl */ 991 tag_asan_free(buf, bs); 992 } 993 994 #ifdef BECtl 995 996 /* BECTL -- Establish automatic pool expansion control */ 997 998 void bectl(compact, acquire, release, pool_incr, poolset) 999 int (*compact) _((bufsize sizereq, int sequence)); 1000 void *(*acquire) _((bufsize size)); 1001 void (*release) _((void *buf)); 1002 bufsize pool_incr; 1003 struct bpoolset *poolset; 1004 { 1005 poolset->compfcn = compact; 1006 poolset->acqfcn = acquire; 1007 poolset->relfcn = release; 1008 poolset->exp_incr = pool_incr; 1009 } 1010 #endif 1011 1012 /* BPOOL -- Add a region of memory to the buffer pool. */ 1013 1014 void bpool(buf, len, poolset) 1015 void *buf; 1016 bufsize len; 1017 struct bpoolset *poolset; 1018 { 1019 struct bfhead *b = BFH(buf); 1020 struct bhead *bn; 1021 1022 #ifdef SizeQuant 1023 len &= ~(SizeQuant - 1); 1024 #endif 1025 #ifdef BECtl 1026 if (poolset->pool_len == 0) { 1027 pool_len = len; 1028 } else if (len != poolset->pool_len) { 1029 poolset->pool_len = -1; 1030 } 1031 #ifdef BufStats 1032 poolset->numpget++; /* Number of block acquisitions */ 1033 poolset->numpblk++; /* Number of blocks total */ 1034 assert(poolset->numpblk == poolset->numpget - poolset->numprel); 1035 #endif /* BufStats */ 1036 #endif /* BECtl */ 1037 1038 /* Since the block is initially occupied by a single free buffer, 1039 it had better not be (much) larger than the largest buffer 1040 whose size we can store in bhead.bsize. */ 1041 1042 assert(len - sizeof(struct bhead) <= -((bufsize) ESent + 1)); 1043 1044 /* Clear the backpointer at the start of the block to indicate that 1045 there is no free block prior to this one. That blocks 1046 recombination when the first block in memory is released. */ 1047 1048 b->bh.prevfree = 0; 1049 1050 /* Chain the new block to the free list. */ 1051 1052 assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist); 1053 assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist); 1054 b->ql.flink = &poolset->freelist; 1055 b->ql.blink = poolset->freelist.ql.blink; 1056 poolset->freelist.ql.blink = b; 1057 b->ql.blink->ql.flink = b; 1058 1059 /* Create a dummy allocated buffer at the end of the pool. This dummy 1060 buffer is seen when a buffer at the end of the pool is released and 1061 blocks recombination of the last buffer with the dummy buffer at 1062 the end. The length in the dummy buffer is set to the largest 1063 negative number to denote the end of the pool for diagnostic 1064 routines (this specific value is not counted on by the actual 1065 allocation and release functions). */ 1066 1067 len -= sizeof(struct bhead); 1068 b->bh.bsize = (bufsize) len; 1069 #ifdef FreeWipe 1070 V memset_unchecked(((char *) b) + sizeof(struct bfhead), 0x55, 1071 (MemSize) (len - sizeof(struct bfhead))); 1072 #endif 1073 bn = BH(((char *) b) + len); 1074 bn->prevfree = (bufsize) len; 1075 /* Definition of ESent assumes two's complement! */ 1076 assert((~0) == -1); 1077 bn->bsize = ESent; 1078 } 1079 1080 #ifdef BufStats 1081 1082 /* BSTATS -- Return buffer allocation free space statistics. */ 1083 1084 void bstats(curalloc, totfree, maxfree, nget, nrel, poolset) 1085 bufsize *curalloc, *totfree, *maxfree; 1086 long *nget, *nrel; 1087 struct bpoolset *poolset; 1088 { 1089 struct bfhead *b = poolset->freelist.ql.flink; 1090 1091 *nget = poolset->numget; 1092 *nrel = poolset->numrel; 1093 *curalloc = poolset->totalloc; 1094 *totfree = 0; 1095 *maxfree = -1; 1096 while (b != &poolset->freelist) { 1097 assert(b->bh.bsize > 0); 1098 *totfree += b->bh.bsize; 1099 if (b->bh.bsize > *maxfree) { 1100 *maxfree = b->bh.bsize; 1101 } 1102 b = b->ql.flink; /* Link to next buffer */ 1103 } 1104 } 1105 1106 #ifdef BECtl 1107 1108 /* BSTATSE -- Return extended statistics */ 1109 1110 void bstatse(pool_incr, npool, npget, nprel, ndget, ndrel, poolset) 1111 bufsize *pool_incr; 1112 long *npool, *npget, *nprel, *ndget, *ndrel; 1113 struct bpoolset *poolset; 1114 { 1115 *pool_incr = (poolset->pool_len < 0) ? 1116 -poolset->exp_incr : poolset->exp_incr; 1117 *npool = poolset->numpblk; 1118 *npget = poolset->numpget; 1119 *nprel = poolset->numprel; 1120 *ndget = poolset->numdget; 1121 *ndrel = poolset->numdrel; 1122 } 1123 #endif /* BECtl */ 1124 #endif /* BufStats */ 1125 1126 #ifdef DumpData 1127 1128 /* BUFDUMP -- Dump the data in a buffer. This is called with the user 1129 data pointer, and backs up to the buffer header. It will 1130 dump either a free block or an allocated one. */ 1131 1132 void bufdump(buf) 1133 void *buf; 1134 { 1135 struct bfhead *b; 1136 unsigned char *bdump; 1137 bufsize bdlen; 1138 1139 b = BFH(((char *) buf) - sizeof(struct bhead)); 1140 assert(b->bh.bsize != 0); 1141 if (b->bh.bsize < 0) { 1142 bdump = (unsigned char *) buf; 1143 bdlen = (-b->bh.bsize) - sizeof(struct bhead); 1144 } else { 1145 bdump = (unsigned char *) (((char *) b) + sizeof(struct bfhead)); 1146 bdlen = b->bh.bsize - sizeof(struct bfhead); 1147 } 1148 1149 while (bdlen > 0) { 1150 int i, dupes = 0; 1151 bufsize l = bdlen; 1152 char bhex[50], bascii[20]; 1153 1154 if (l > 16) { 1155 l = 16; 1156 } 1157 1158 for (i = 0; i < l; i++) { 1159 V snprintf(bhex + i * 3, sizeof(bhex) - i * 3, "%02X ", 1160 bdump[i]); 1161 bascii[i] = isprint(bdump[i]) ? bdump[i] : ' '; 1162 } 1163 bascii[i] = 0; 1164 V printf("%-48s %s\n", bhex, bascii); 1165 bdump += l; 1166 bdlen -= l; 1167 while ((bdlen > 16) && (memcmp((char *) (bdump - 16), 1168 (char *) bdump, 16) == 0)) { 1169 dupes++; 1170 bdump += 16; 1171 bdlen -= 16; 1172 } 1173 if (dupes > 1) { 1174 V printf( 1175 " (%d lines [%d bytes] identical to above line skipped)\n", 1176 dupes, dupes * 16); 1177 } else if (dupes == 1) { 1178 bdump -= 16; 1179 bdlen += 16; 1180 } 1181 } 1182 } 1183 #endif 1184 1185 #ifdef BufDump 1186 1187 /* BPOOLD -- Dump a buffer pool. The buffer headers are always listed. 1188 If DUMPALLOC is nonzero, the contents of allocated buffers 1189 are dumped. If DUMPFREE is nonzero, free blocks are 1190 dumped as well. If FreeWipe checking is enabled, free 1191 blocks which have been clobbered will always be dumped. */ 1192 1193 void bpoold(buf, dumpalloc, dumpfree) 1194 void *buf; 1195 int dumpalloc, dumpfree; 1196 { 1197 struct bfhead *b = BFH(buf); 1198 1199 while (b->bh.bsize != ESent) { 1200 bufsize bs = b->bh.bsize; 1201 1202 if (bs < 0) { 1203 bs = -bs; 1204 V printf("Allocated buffer: size %6ld bytes.\n", (long) bs); 1205 if (dumpalloc) { 1206 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1207 } 1208 } else { 1209 char *lerr = ""; 1210 1211 assert(bs > 0); 1212 if ((b->ql.blink->ql.flink != b) || 1213 (b->ql.flink->ql.blink != b)) { 1214 lerr = " (Bad free list links)"; 1215 } 1216 V printf("Free block: size %6ld bytes.%s\n", 1217 (long) bs, lerr); 1218 #ifdef FreeWipe 1219 lerr = ((char *) b) + sizeof(struct bfhead); 1220 if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) || 1221 (memcmp(lerr, lerr + 1, 1222 (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) { 1223 V printf( 1224 "(Contents of above free block have been overstored.)\n"); 1225 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1226 } else 1227 #endif 1228 if (dumpfree) { 1229 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1230 } 1231 } 1232 b = BFH(((char *) b) + bs); 1233 } 1234 } 1235 #endif /* BufDump */ 1236 1237 #ifdef BufValid 1238 1239 /* BPOOLV -- Validate a buffer pool. If NDEBUG isn't defined, 1240 any error generates an assertion failure. */ 1241 1242 int bpoolv(buf) 1243 void *buf; 1244 { 1245 struct bfhead *b = BFH(buf); 1246 1247 while (b->bh.bsize != ESent) { 1248 bufsize bs = b->bh.bsize; 1249 1250 if (bs < 0) { 1251 bs = -bs; 1252 } else { 1253 const char *lerr = ""; 1254 1255 assert(bs > 0); 1256 if (bs <= 0) { 1257 return 0; 1258 } 1259 if ((b->ql.blink->ql.flink != b) || 1260 (b->ql.flink->ql.blink != b)) { 1261 V printf("Free block: size %6ld bytes. (Bad free list links)\n", 1262 (long) bs); 1263 assert(0); 1264 return 0; 1265 } 1266 #ifdef FreeWipe 1267 lerr = ((char *) b) + sizeof(struct bfhead); 1268 if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) || 1269 (memcmp(lerr, lerr + 1, 1270 (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) { 1271 V printf( 1272 "(Contents of above free block have been overstored.)\n"); 1273 bufdump((void *) (((char *) b) + sizeof(struct bhead))); 1274 assert(0); 1275 return 0; 1276 } 1277 #endif 1278 } 1279 b = BFH(((char *) b) + bs); 1280 } 1281 return 1; 1282 } 1283 #endif /* BufValid */ 1284 1285 /***********************\ 1286 * * 1287 * Built-in test program * 1288 * * 1289 \***********************/ 1290 1291 #ifdef TestProg 1292 1293 #define Repeatable 1 /* Repeatable pseudorandom sequence */ 1294 /* If Repeatable is not defined, a 1295 time-seeded pseudorandom sequence 1296 is generated, exercising BGET with 1297 a different pattern of calls on each 1298 run. */ 1299 #define OUR_RAND /* Use our own built-in version of 1300 rand() to guarantee the test is 1301 100% repeatable. */ 1302 1303 #ifdef BECtl 1304 #define PoolSize 300000 /* Test buffer pool size */ 1305 #else 1306 #define PoolSize 50000 /* Test buffer pool size */ 1307 #endif 1308 #define ExpIncr 32768 /* Test expansion block size */ 1309 #define CompactTries 10 /* Maximum tries at compacting */ 1310 1311 #define dumpAlloc 0 /* Dump allocated buffers ? */ 1312 #define dumpFree 0 /* Dump free buffers ? */ 1313 1314 #ifndef Repeatable 1315 extern long time(); 1316 #endif 1317 1318 extern char *malloc(); 1319 extern int free _((char *)); 1320 1321 static char *bchain = NULL; /* Our private buffer chain */ 1322 static char *bp = NULL; /* Our initial buffer pool */ 1323 1324 #include <math.h> 1325 1326 #ifdef OUR_RAND 1327 1328 static unsigned long int next = 1; 1329 1330 /* Return next random integer */ 1331 1332 int rand() 1333 { 1334 next = next * 1103515245L + 12345; 1335 return (unsigned int) (next / 65536L) % 32768L; 1336 } 1337 1338 /* Set seed for random generator */ 1339 1340 void srand(seed) 1341 unsigned int seed; 1342 { 1343 next = seed; 1344 } 1345 #endif 1346 1347 /* STATS -- Edit statistics returned by bstats() or bstatse(). */ 1348 1349 static void stats(when) 1350 char *when; 1351 { 1352 bufsize cural, totfree, maxfree; 1353 long nget, nfree; 1354 #ifdef BECtl 1355 bufsize pincr; 1356 long totblocks, npget, nprel, ndget, ndrel; 1357 #endif 1358 1359 bstats(&cural, &totfree, &maxfree, &nget, &nfree); 1360 V printf( 1361 "%s: %ld gets, %ld releases. %ld in use, %ld free, largest = %ld\n", 1362 when, nget, nfree, (long) cural, (long) totfree, (long) maxfree); 1363 #ifdef BECtl 1364 bstatse(&pincr, &totblocks, &npget, &nprel, &ndget, &ndrel); 1365 V printf( 1366 " Blocks: size = %ld, %ld (%ld bytes) in use, %ld gets, %ld frees\n", 1367 (long)pincr, totblocks, pincr * totblocks, npget, nprel); 1368 V printf(" %ld direct gets, %ld direct frees\n", ndget, ndrel); 1369 #endif /* BECtl */ 1370 } 1371 1372 #ifdef BECtl 1373 static int protect = 0; /* Disable compaction during bgetr() */ 1374 1375 /* BCOMPACT -- Compaction call-back function. */ 1376 1377 static int bcompact(bsize, seq) 1378 bufsize bsize; 1379 int seq; 1380 { 1381 #ifdef CompactTries 1382 char *bc = bchain; 1383 int i = rand() & 0x3; 1384 1385 #ifdef COMPACTRACE 1386 V printf("Compaction requested. %ld bytes needed, sequence %d.\n", 1387 (long) bsize, seq); 1388 #endif 1389 1390 if (protect || (seq > CompactTries)) { 1391 #ifdef COMPACTRACE 1392 V printf("Compaction gave up.\n"); 1393 #endif 1394 return 0; 1395 } 1396 1397 /* Based on a random cast, release a random buffer in the list 1398 of allocated buffers. */ 1399 1400 while (i > 0 && bc != NULL) { 1401 bc = *((char **) bc); 1402 i--; 1403 } 1404 if (bc != NULL) { 1405 char *fb; 1406 1407 fb = *((char **) bc); 1408 if (fb != NULL) { 1409 *((char **) bc) = *((char **) fb); 1410 brel((void *) fb); 1411 return 1; 1412 } 1413 } 1414 1415 #ifdef COMPACTRACE 1416 V printf("Compaction bailed out.\n"); 1417 #endif 1418 #endif /* CompactTries */ 1419 return 0; 1420 } 1421 1422 /* BEXPAND -- Expand pool call-back function. */ 1423 1424 static void *bexpand(size) 1425 bufsize size; 1426 { 1427 void *np = NULL; 1428 bufsize cural, totfree, maxfree; 1429 long nget, nfree; 1430 1431 /* Don't expand beyond the total allocated size given by PoolSize. */ 1432 1433 bstats(&cural, &totfree, &maxfree, &nget, &nfree); 1434 1435 if (cural < PoolSize) { 1436 np = (void *) malloc((unsigned) size); 1437 } 1438 #ifdef EXPTRACE 1439 V printf("Expand pool by %ld -- %s.\n", (long) size, 1440 np == NULL ? "failed" : "succeeded"); 1441 #endif 1442 return np; 1443 } 1444 1445 /* BSHRINK -- Shrink buffer pool call-back function. */ 1446 1447 static void bshrink(buf) 1448 void *buf; 1449 { 1450 if (((char *) buf) == bp) { 1451 #ifdef EXPTRACE 1452 V printf("Initial pool released.\n"); 1453 #endif 1454 bp = NULL; 1455 } 1456 #ifdef EXPTRACE 1457 V printf("Shrink pool.\n"); 1458 #endif 1459 free((char *) buf); 1460 } 1461 1462 #endif /* BECtl */ 1463 1464 /* Restrict buffer requests to those large enough to contain our pointer and 1465 small enough for the CPU architecture. */ 1466 1467 static bufsize blimit(bs) 1468 bufsize bs; 1469 { 1470 if (bs < sizeof(char *)) { 1471 bs = sizeof(char *); 1472 } 1473 1474 /* This is written out in this ugly fashion because the 1475 cool expression in sizeof(int) that auto-configured 1476 to any length int befuddled some compilers. */ 1477 1478 if (sizeof(int) == 2) { 1479 if (bs > 32767) { 1480 bs = 32767; 1481 } 1482 } else { 1483 if (bs > 200000) { 1484 bs = 200000; 1485 } 1486 } 1487 return bs; 1488 } 1489 1490 int main() 1491 { 1492 int i; 1493 double x; 1494 1495 /* Seed the random number generator. If Repeatable is defined, we 1496 always use the same seed. Otherwise, we seed from the clock to 1497 shake things up from run to run. */ 1498 1499 #ifdef Repeatable 1500 V srand(1234); 1501 #else 1502 V srand((int) time((long *) NULL)); 1503 #endif 1504 1505 /* Compute x such that pow(x, p) ranges between 1 and 4*ExpIncr as 1506 p ranges from 0 to ExpIncr-1, with a concentration in the lower 1507 numbers. */ 1508 1509 x = 4.0 * ExpIncr; 1510 x = log(x); 1511 x = exp(log(4.0 * ExpIncr) / (ExpIncr - 1.0)); 1512 1513 #ifdef BECtl 1514 bectl(bcompact, bexpand, bshrink, (bufsize) ExpIncr); 1515 bp = malloc(ExpIncr); 1516 assert(bp != NULL); 1517 bpool((void *) bp, (bufsize) ExpIncr); 1518 #else 1519 bp = malloc(PoolSize); 1520 assert(bp != NULL); 1521 bpool((void *) bp, (bufsize) PoolSize); 1522 #endif 1523 1524 stats("Create pool"); 1525 V bpoolv((void *) bp); 1526 bpoold((void *) bp, dumpAlloc, dumpFree); 1527 1528 for (i = 0; i < TestProg; i++) { 1529 char *cb; 1530 bufsize bs = pow(x, (double) (rand() & (ExpIncr - 1))); 1531 1532 assert(bs <= (((bufsize) 4) * ExpIncr)); 1533 bs = blimit(bs); 1534 if (rand() & 0x400) { 1535 cb = (char *) bgetz(bs); 1536 } else { 1537 cb = (char *) bget(bs); 1538 } 1539 if (cb == NULL) { 1540 #ifdef EasyOut 1541 break; 1542 #else 1543 char *bc = bchain; 1544 1545 if (bc != NULL) { 1546 char *fb; 1547 1548 fb = *((char **) bc); 1549 if (fb != NULL) { 1550 *((char **) bc) = *((char **) fb); 1551 brel((void *) fb); 1552 } 1553 continue; 1554 } 1555 #endif 1556 } 1557 *((char **) cb) = (char *) bchain; 1558 bchain = cb; 1559 1560 /* Based on a random cast, release a random buffer in the list 1561 of allocated buffers. */ 1562 1563 if ((rand() & 0x10) == 0) { 1564 char *bc = bchain; 1565 int i = rand() & 0x3; 1566 1567 while (i > 0 && bc != NULL) { 1568 bc = *((char **) bc); 1569 i--; 1570 } 1571 if (bc != NULL) { 1572 char *fb; 1573 1574 fb = *((char **) bc); 1575 if (fb != NULL) { 1576 *((char **) bc) = *((char **) fb); 1577 brel((void *) fb); 1578 } 1579 } 1580 } 1581 1582 /* Based on a random cast, reallocate a random buffer in the list 1583 to a random size */ 1584 1585 if ((rand() & 0x20) == 0) { 1586 char *bc = bchain; 1587 int i = rand() & 0x3; 1588 1589 while (i > 0 && bc != NULL) { 1590 bc = *((char **) bc); 1591 i--; 1592 } 1593 if (bc != NULL) { 1594 char *fb; 1595 1596 fb = *((char **) bc); 1597 if (fb != NULL) { 1598 char *newb; 1599 1600 bs = pow(x, (double) (rand() & (ExpIncr - 1))); 1601 bs = blimit(bs); 1602 #ifdef BECtl 1603 protect = 1; /* Protect against compaction */ 1604 #endif 1605 newb = (char *) bgetr((void *) fb, bs); 1606 #ifdef BECtl 1607 protect = 0; 1608 #endif 1609 if (newb != NULL) { 1610 *((char **) bc) = newb; 1611 } 1612 } 1613 } 1614 } 1615 } 1616 stats("\nAfter allocation"); 1617 if (bp != NULL) { 1618 V bpoolv((void *) bp); 1619 bpoold((void *) bp, dumpAlloc, dumpFree); 1620 } 1621 1622 while (bchain != NULL) { 1623 char *buf = bchain; 1624 1625 bchain = *((char **) buf); 1626 brel((void *) buf); 1627 } 1628 stats("\nAfter release"); 1629 #ifndef BECtl 1630 if (bp != NULL) { 1631 V bpoolv((void *) bp); 1632 bpoold((void *) bp, dumpAlloc, dumpFree); 1633 } 1634 #endif 1635 1636 return 0; 1637 } 1638 #endif 1639