1 /* 2 This is a version (aka dlmalloc) of malloc/free/realloc written by 3 Doug Lea and released to the public domain. Use, modify, and 4 redistribute this code without permission or acknowledgement in any 5 way you wish. Send questions, comments, complaints, performance 6 data, etc to dl@cs.oswego.edu 7 8 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) 9 10 Note: There may be an updated version of this malloc obtainable at 11 ftp://gee.cs.oswego.edu/pub/misc/malloc.c 12 Check before installing! 13 14 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org> 15 */ 16 17 #include <features.h> 18 #include <stddef.h> 19 #include <unistd.h> 20 #include <errno.h> 21 #include <string.h> 22 #include <malloc.h> 23 #include <stdlib.h> 24 #include <sys/mman.h> 25 #include <bits/uClibc_mutex.h> 26 #include <bits/uClibc_page.h> 27 28 29 30 __UCLIBC_MUTEX_EXTERN(__malloc_lock) 31 #if defined __UCLIBC_HAS_THREADS__ && !defined __UCLIBC_HAS_LINUXTHREADS__ 32 attribute_hidden 33 #endif 34 ; 35 #define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock) 36 #define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock) 37 38 39 40 /* 41 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. 42 It must be a power of two at least 2 * (sizeof(size_t)), even on machines 43 for which smaller alignments would suffice. It may be defined as 44 larger than this though. Note however that code and data structures 45 are optimized for the case of 8-byte alignment. 46 */ 47 #ifndef MALLOC_ALIGNMENT 48 #define MALLOC_ALIGNMENT (2 * (sizeof(size_t))) 49 #endif 50 51 /* The corresponding bit mask value */ 52 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) 53 54 /* 55 TRIM_FASTBINS controls whether free() of a very small chunk can 56 immediately lead to trimming. Setting to true (1) can reduce memory 57 footprint, but will almost always slow down programs that use a lot 58 of small chunks. 59 60 Define this only if you are willing to give up some speed to more 61 aggressively reduce system-level memory footprint when releasing 62 memory in programs that use many small chunks. You can get 63 essentially the same effect by setting MXFAST to 0, but this can 64 lead to even greater slowdowns in programs using many small chunks. 65 TRIM_FASTBINS is an in-between compile-time option, that disables 66 only those chunks bordering topmost memory from being placed in 67 fastbins. 68 */ 69 #ifndef TRIM_FASTBINS 70 #define TRIM_FASTBINS 0 71 #endif 72 73 74 /* 75 MORECORE-related declarations. By default, rely on sbrk 76 */ 77 78 79 /* 80 MORECORE is the name of the routine to call to obtain more memory 81 from the system. See below for general guidance on writing 82 alternative MORECORE functions, as well as a version for WIN32 and a 83 sample version for pre-OSX macos. 84 */ 85 #ifndef MORECORE 86 #define MORECORE sbrk 87 #endif 88 89 /* 90 MORECORE_FAILURE is the value returned upon failure of MORECORE 91 as well as mmap. Since it cannot be an otherwise valid memory address, 92 and must reflect values of standard sys calls, you probably ought not 93 try to redefine it. 94 */ 95 #ifndef MORECORE_FAILURE 96 #define MORECORE_FAILURE (-1) 97 #endif 98 99 /* 100 If MORECORE_CONTIGUOUS is true, take advantage of fact that 101 consecutive calls to MORECORE with positive arguments always return 102 contiguous increasing addresses. This is true of unix sbrk. Even 103 if not defined, when regions happen to be contiguous, malloc will 104 permit allocations spanning regions obtained from different 105 calls. But defining this when applicable enables some stronger 106 consistency checks and space efficiencies. 107 */ 108 #ifndef MORECORE_CONTIGUOUS 109 #define MORECORE_CONTIGUOUS 1 110 #endif 111 112 /* 113 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if 114 sbrk fails, and mmap is used as a backup (which is done only if 115 HAVE_MMAP). The value must be a multiple of page size. This 116 backup strategy generally applies only when systems have "holes" in 117 address space, so sbrk cannot perform contiguous expansion, but 118 there is still space available on system. On systems for which 119 this is known to be useful (i.e. most linux kernels), this occurs 120 only when programs allocate huge amounts of memory. Between this, 121 and the fact that mmap regions tend to be limited, the size should 122 be large, to avoid too many mmap calls and thus avoid running out 123 of kernel resources. 124 */ 125 #ifndef MMAP_AS_MORECORE_SIZE 126 #define MMAP_AS_MORECORE_SIZE (1024 * 1024) 127 #endif 128 129 /* 130 The system page size. To the extent possible, this malloc manages 131 memory from the system in page-size units. Note that this value is 132 cached during initialization into a field of malloc_state. So even 133 if malloc_getpagesize is a function, it is only called once. 134 135 The following mechanics for getpagesize were adapted from bsd/gnu 136 getpagesize.h. If none of the system-probes here apply, a value of 137 4096 is used, which should be OK: If they don't apply, then using 138 the actual value probably doesn't impact performance. 139 */ 140 #ifndef malloc_getpagesize 141 # include <unistd.h> 142 # define malloc_getpagesize getpagesize() 143 #else /* just guess */ 144 # define malloc_getpagesize (4096) 145 #endif 146 147 148 /* mallopt tuning options */ 149 150 /* 151 M_MXFAST is the maximum request size used for "fastbins", special bins 152 that hold returned chunks without consolidating their spaces. This 153 enables future requests for chunks of the same size to be handled 154 very quickly, but can increase fragmentation, and thus increase the 155 overall memory footprint of a program. 156 157 This malloc manages fastbins very conservatively yet still 158 efficiently, so fragmentation is rarely a problem for values less 159 than or equal to the default. The maximum supported value of MXFAST 160 is 80. You wouldn't want it any higher than this anyway. Fastbins 161 are designed especially for use with many small structs, objects or 162 strings -- the default handles structs/objects/arrays with sizes up 163 to 16 4byte fields, or small strings representing words, tokens, 164 etc. Using fastbins for larger objects normally worsens 165 fragmentation without improving speed. 166 167 M_MXFAST is set in REQUEST size units. It is internally used in 168 chunksize units, which adds padding and alignment. You can reduce 169 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc 170 algorithm to be a closer approximation of fifo-best-fit in all cases, 171 not just for larger requests, but will generally cause it to be 172 slower. 173 */ 174 175 176 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ 177 #ifndef M_MXFAST 178 #define M_MXFAST 1 179 #endif 180 181 #ifndef DEFAULT_MXFAST 182 #define DEFAULT_MXFAST 64 183 #endif 184 185 186 /* 187 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory 188 to keep before releasing via malloc_trim in free(). 189 190 Automatic trimming is mainly useful in long-lived programs. 191 Because trimming via sbrk can be slow on some systems, and can 192 sometimes be wasteful (in cases where programs immediately 193 afterward allocate more large chunks) the value should be high 194 enough so that your overall system performance would improve by 195 releasing this much memory. 196 197 The trim threshold and the mmap control parameters (see below) 198 can be traded off with one another. Trimming and mmapping are 199 two different ways of releasing unused memory back to the 200 system. Between these two, it is often possible to keep 201 system-level demands of a long-lived program down to a bare 202 minimum. For example, in one test suite of sessions measuring 203 the XF86 X server on Linux, using a trim threshold of 128K and a 204 mmap threshold of 192K led to near-minimal long term resource 205 consumption. 206 207 If you are using this malloc in a long-lived program, it should 208 pay to experiment with these values. As a rough guide, you 209 might set to a value close to the average size of a process 210 (program) running on your system. Releasing this much memory 211 would allow such a process to run in memory. Generally, it's 212 worth it to tune for trimming rather tham memory mapping when a 213 program undergoes phases where several large chunks are 214 allocated and released in ways that can reuse each other's 215 storage, perhaps mixed with phases where there are no such 216 chunks at all. And in well-behaved long-lived programs, 217 controlling release of large blocks via trimming versus mapping 218 is usually faster. 219 220 However, in most programs, these parameters serve mainly as 221 protection against the system-level effects of carrying around 222 massive amounts of unneeded memory. Since frequent calls to 223 sbrk, mmap, and munmap otherwise degrade performance, the default 224 parameters are set to relatively high values that serve only as 225 safeguards. 226 227 The trim value must be greater than page size to have any useful 228 effect. To disable trimming completely, you can set to 229 (unsigned long)(-1) 230 231 Trim settings interact with fastbin (MXFAST) settings: Unless 232 TRIM_FASTBINS is defined, automatic trimming never takes place upon 233 freeing a chunk with size less than or equal to MXFAST. Trimming is 234 instead delayed until subsequent freeing of larger chunks. However, 235 you can still force an attempted trim by calling malloc_trim. 236 237 Also, trimming is not generally possible in cases where 238 the main arena is obtained via mmap. 239 240 Note that the trick some people use of mallocing a huge space and 241 then freeing it at program startup, in an attempt to reserve system 242 memory, doesn't have the intended effect under automatic trimming, 243 since that memory will immediately be returned to the system. 244 */ 245 #define M_TRIM_THRESHOLD -1 246 247 #ifndef DEFAULT_TRIM_THRESHOLD 248 #define DEFAULT_TRIM_THRESHOLD (256 * 1024) 249 #endif 250 251 /* 252 M_TOP_PAD is the amount of extra `padding' space to allocate or 253 retain whenever sbrk is called. It is used in two ways internally: 254 255 * When sbrk is called to extend the top of the arena to satisfy 256 a new malloc request, this much padding is added to the sbrk 257 request. 258 259 * When malloc_trim is called automatically from free(), 260 it is used as the `pad' argument. 261 262 In both cases, the actual amount of padding is rounded 263 so that the end of the arena is always a system page boundary. 264 265 The main reason for using padding is to avoid calling sbrk so 266 often. Having even a small pad greatly reduces the likelihood 267 that nearly every malloc request during program start-up (or 268 after trimming) will invoke sbrk, which needlessly wastes 269 time. 270 271 Automatic rounding-up to page-size units is normally sufficient 272 to avoid measurable overhead, so the default is 0. However, in 273 systems where sbrk is relatively slow, it can pay to increase 274 this value, at the expense of carrying around more memory than 275 the program needs. 276 */ 277 #define M_TOP_PAD -2 278 279 #ifndef DEFAULT_TOP_PAD 280 #define DEFAULT_TOP_PAD (0) 281 #endif 282 283 /* 284 M_MMAP_THRESHOLD is the request size threshold for using mmap() 285 to service a request. Requests of at least this size that cannot 286 be allocated using already-existing space will be serviced via mmap. 287 (If enough normal freed space already exists it is used instead.) 288 289 Using mmap segregates relatively large chunks of memory so that 290 they can be individually obtained and released from the host 291 system. A request serviced through mmap is never reused by any 292 other request (at least not directly; the system may just so 293 happen to remap successive requests to the same locations). 294 295 Segregating space in this way has the benefits that: 296 297 1. Mmapped space can ALWAYS be individually released back 298 to the system, which helps keep the system level memory 299 demands of a long-lived program low. 300 2. Mapped memory can never become `locked' between 301 other chunks, as can happen with normally allocated chunks, which 302 means that even trimming via malloc_trim would not release them. 303 3. On some systems with "holes" in address spaces, mmap can obtain 304 memory that sbrk cannot. 305 306 However, it has the disadvantages that: 307 308 1. The space cannot be reclaimed, consolidated, and then 309 used to service later requests, as happens with normal chunks. 310 2. It can lead to more wastage because of mmap page alignment 311 requirements 312 3. It causes malloc performance to be more dependent on host 313 system memory management support routines which may vary in 314 implementation quality and may impose arbitrary 315 limitations. Generally, servicing a request via normal 316 malloc steps is faster than going through a system's mmap. 317 318 The advantages of mmap nearly always outweigh disadvantages for 319 "large" chunks, but the value of "large" varies across systems. The 320 default is an empirically derived value that works well in most 321 systems. 322 */ 323 #define M_MMAP_THRESHOLD -3 324 325 #ifndef DEFAULT_MMAP_THRESHOLD 326 #define DEFAULT_MMAP_THRESHOLD (256 * 1024) 327 #endif 328 329 /* 330 M_MMAP_MAX is the maximum number of requests to simultaneously 331 service using mmap. This parameter exists because 332 . Some systems have a limited number of internal tables for 333 use by mmap, and using more than a few of them may degrade 334 performance. 335 336 The default is set to a value that serves only as a safeguard. 337 Setting to 0 disables use of mmap for servicing large requests. If 338 HAVE_MMAP is not set, the default value is 0, and attempts to set it 339 to non-zero values in mallopt will fail. 340 */ 341 #define M_MMAP_MAX -4 342 343 #ifndef DEFAULT_MMAP_MAX 344 #define DEFAULT_MMAP_MAX (65536) 345 #endif 346 347 348 /* ------------------ MMAP support ------------------ */ 349 #include <fcntl.h> 350 #include <sys/mman.h> 351 352 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) 353 #define MAP_ANONYMOUS MAP_ANON 354 #endif 355 356 #ifdef __ARCH_USE_MMU__ 357 # define _MAP_UNINITIALIZED 0 358 #else 359 # define _MAP_UNINITIALIZED MAP_UNINITIALIZED 360 #endif 361 362 #define MMAP(addr, size, prot) \ 363 (mmap((addr), (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS|_MAP_UNINITIALIZED, 0, 0)) 364 365 366 /* ----------------------- Chunk representations ----------------------- */ 367 368 369 /* 370 This struct declaration is misleading (but accurate and necessary). 371 It declares a "view" into memory allowing access to necessary 372 fields at known offsets from a given base. See explanation below. 373 */ 374 375 struct malloc_chunk { 376 377 size_t prev_size; /* Size of previous chunk (if free). */ 378 size_t size; /* Size in bytes, including overhead. */ 379 380 struct malloc_chunk* fd; /* double links -- used only if free. */ 381 struct malloc_chunk* bk; 382 }; 383 384 385 typedef struct malloc_chunk* mchunkptr; 386 387 /* 388 malloc_chunk details: 389 390 (The following includes lightly edited explanations by Colin Plumb.) 391 392 Chunks of memory are maintained using a `boundary tag' method as 393 described in e.g., Knuth or Standish. (See the paper by Paul 394 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a 395 survey of such techniques.) Sizes of free chunks are stored both 396 in the front of each chunk and at the end. This makes 397 consolidating fragmented chunks into bigger chunks very fast. The 398 size fields also hold bits representing whether chunks are free or 399 in use. 400 401 An allocated chunk looks like this: 402 403 404 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 405 | Size of previous chunk, if allocated | | 406 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 407 | Size of chunk, in bytes |P| 408 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 409 | User data starts here... . 410 . . 411 . (malloc_usable_space() bytes) . 412 . | 413 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 414 | Size of chunk | 415 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 416 417 418 Where "chunk" is the front of the chunk for the purpose of most of 419 the malloc code, but "mem" is the pointer that is returned to the 420 user. "Nextchunk" is the beginning of the next contiguous chunk. 421 422 Chunks always begin on even word boundries, so the mem portion 423 (which is returned to the user) is also on an even word boundary, and 424 thus at least double-word aligned. 425 426 Free chunks are stored in circular doubly-linked lists, and look like this: 427 428 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 429 | Size of previous chunk | 430 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 431 `head:' | Size of chunk, in bytes |P| 432 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 433 | Forward pointer to next chunk in list | 434 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 435 | Back pointer to previous chunk in list | 436 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 437 | Unused space (may be 0 bytes long) . 438 . . 439 . | 440 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 441 `foot:' | Size of chunk, in bytes | 442 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 443 444 The P (PREV_INUSE) bit, stored in the unused low-order bit of the 445 chunk size (which is always a multiple of two words), is an in-use 446 bit for the *previous* chunk. If that bit is *clear*, then the 447 word before the current chunk size contains the previous chunk 448 size, and can be used to find the front of the previous chunk. 449 The very first chunk allocated always has this bit set, 450 preventing access to non-existent (or non-owned) memory. If 451 prev_inuse is set for any given chunk, then you CANNOT determine 452 the size of the previous chunk, and might even get a memory 453 addressing fault when trying to do so. 454 455 Note that the `foot' of the current chunk is actually represented 456 as the prev_size of the NEXT chunk. This makes it easier to 457 deal with alignments etc but can be very confusing when trying 458 to extend or adapt this code. 459 460 The two exceptions to all this are 461 462 1. The special chunk `top' doesn't bother using the 463 trailing size field since there is no next contiguous chunk 464 that would have to index off it. After initialization, `top' 465 is forced to always exist. If it would become less than 466 MINSIZE bytes long, it is replenished. 467 468 2. Chunks allocated via mmap, which have the second-lowest-order 469 bit (IS_MMAPPED) set in their size fields. Because they are 470 allocated one-by-one, each must contain its own trailing size field. 471 472 */ 473 474 /* 475 ---------- Size and alignment checks and conversions ---------- 476 */ 477 478 /* conversion from malloc headers to user pointers, and back */ 479 480 #define chunk2mem(p) ((void*)((char*)(p) + 2*(sizeof(size_t)))) 481 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*(sizeof(size_t)))) 482 483 /* The smallest possible chunk */ 484 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk)) 485 486 /* The smallest size we can malloc is an aligned minimal chunk */ 487 488 #define MINSIZE \ 489 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) 490 491 /* Check if m has acceptable alignment */ 492 493 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) 494 495 496 /* Check if a request is so large that it would wrap around zero when 497 padded and aligned. To simplify some other code, the bound is made 498 low enough so that adding MINSIZE will also not wrap around sero. 499 */ 500 501 #define REQUEST_OUT_OF_RANGE(req) \ 502 ((unsigned long)(req) >= \ 503 (unsigned long)(size_t)(-2 * MINSIZE)) 504 505 /* pad request bytes into a usable size -- internal version */ 506 507 #define request2size(req) \ 508 (((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK < MINSIZE) ? \ 509 MINSIZE : \ 510 ((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) 511 512 /* Same, except also perform argument check */ 513 514 #define checked_request2size(req, sz) \ 515 if (REQUEST_OUT_OF_RANGE(req)) { \ 516 __set_errno(ENOMEM); \ 517 return 0; \ 518 } \ 519 (sz) = request2size(req); 520 521 /* 522 --------------- Physical chunk operations --------------- 523 */ 524 525 526 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ 527 #define PREV_INUSE 0x1 528 529 /* extract inuse bit of previous chunk */ 530 #define prev_inuse(p) ((p)->size & PREV_INUSE) 531 532 533 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ 534 #define IS_MMAPPED 0x2 535 536 /* check for mmap()'ed chunk */ 537 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) 538 539 /* Bits to mask off when extracting size 540 541 Note: IS_MMAPPED is intentionally not masked off from size field in 542 macros for which mmapped chunks should never be seen. This should 543 cause helpful core dumps to occur if it is tried by accident by 544 people extending or adapting this malloc. 545 */ 546 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED) 547 548 /* Get size, ignoring use bits */ 549 #define chunksize(p) ((p)->size & ~(SIZE_BITS)) 550 551 552 /* Ptr to next physical malloc_chunk. */ 553 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) 554 555 /* Ptr to previous physical malloc_chunk */ 556 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) 557 558 /* Treat space at ptr + offset as a chunk */ 559 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) 560 561 /* extract p's inuse bit */ 562 #define inuse(p)\ 563 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) 564 565 /* set/clear chunk as being inuse without otherwise disturbing */ 566 #define set_inuse(p)\ 567 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE 568 569 #define clear_inuse(p)\ 570 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) 571 572 573 /* check/set/clear inuse bits in known places */ 574 #define inuse_bit_at_offset(p, s)\ 575 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) 576 577 #define set_inuse_bit_at_offset(p, s)\ 578 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) 579 580 #define clear_inuse_bit_at_offset(p, s)\ 581 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) 582 583 584 /* Set size at head, without disturbing its use bit */ 585 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) 586 587 /* Set size/use field */ 588 #define set_head(p, s) ((p)->size = (s)) 589 590 /* Set size at footer (only when chunk is not in use) */ 591 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) 592 593 594 /* -------------------- Internal data structures -------------------- */ 595 596 /* 597 Bins 598 599 An array of bin headers for free chunks. Each bin is doubly 600 linked. The bins are approximately proportionally (log) spaced. 601 There are a lot of these bins (128). This may look excessive, but 602 works very well in practice. Most bins hold sizes that are 603 unusual as malloc request sizes, but are more usual for fragments 604 and consolidated sets of chunks, which is what these bins hold, so 605 they can be found quickly. All procedures maintain the invariant 606 that no consolidated chunk physically borders another one, so each 607 chunk in a list is known to be preceeded and followed by either 608 inuse chunks or the ends of memory. 609 610 Chunks in bins are kept in size order, with ties going to the 611 approximately least recently used chunk. Ordering isn't needed 612 for the small bins, which all contain the same-sized chunks, but 613 facilitates best-fit allocation for larger chunks. These lists 614 are just sequential. Keeping them in order almost never requires 615 enough traversal to warrant using fancier ordered data 616 structures. 617 618 Chunks of the same size are linked with the most 619 recently freed at the front, and allocations are taken from the 620 back. This results in LRU (FIFO) allocation order, which tends 621 to give each chunk an equal opportunity to be consolidated with 622 adjacent freed chunks, resulting in larger free chunks and less 623 fragmentation. 624 625 To simplify use in double-linked lists, each bin header acts 626 as a malloc_chunk. This avoids special-casing for headers. 627 But to conserve space and improve locality, we allocate 628 only the fd/bk pointers of bins, and then use repositioning tricks 629 to treat these as the fields of a malloc_chunk*. 630 */ 631 632 typedef struct malloc_chunk* mbinptr; 633 634 /* addressing -- note that bin_at(0) does not exist */ 635 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - ((sizeof(size_t))<<1))) 636 637 /* analog of ++bin */ 638 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) 639 640 /* Reminders about list directionality within bins */ 641 #define first(b) ((b)->fd) 642 #define last(b) ((b)->bk) 643 644 /* Take a chunk off a bin list */ 645 #define unlink(P, BK, FD) { \ 646 FD = P->fd; \ 647 BK = P->bk; \ 648 if (FD->bk != P || BK->fd != P) \ 649 abort(); \ 650 FD->bk = BK; \ 651 BK->fd = FD; \ 652 } 653 654 /* 655 Indexing 656 657 Bins for sizes < 512 bytes contain chunks of all the same size, spaced 658 8 bytes apart. Larger bins are approximately logarithmically spaced: 659 660 64 bins of size 8 661 32 bins of size 64 662 16 bins of size 512 663 8 bins of size 4096 664 4 bins of size 32768 665 2 bins of size 262144 666 1 bin of size what's left 667 668 The bins top out around 1MB because we expect to service large 669 requests via mmap. 670 */ 671 672 #define NBINS 96 673 #define NSMALLBINS 32 674 #define SMALLBIN_WIDTH 8 675 #define MIN_LARGE_SIZE 256 676 677 #define in_smallbin_range(sz) \ 678 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE) 679 680 #define smallbin_index(sz) (((unsigned)(sz)) >> 3) 681 682 #define bin_index(sz) \ 683 ((in_smallbin_range(sz)) ? smallbin_index(sz) : __malloc_largebin_index(sz)) 684 685 /* 686 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the 687 first bin that is maintained in sorted order. This must 688 be the smallest size corresponding to a given bin. 689 690 Normally, this should be MIN_LARGE_SIZE. But you can weaken 691 best fit guarantees to sometimes speed up malloc by increasing value. 692 Doing this means that malloc may choose a chunk that is 693 non-best-fitting by up to the width of the bin. 694 695 Some useful cutoff values: 696 512 - all bins sorted 697 2560 - leaves bins <= 64 bytes wide unsorted 698 12288 - leaves bins <= 512 bytes wide unsorted 699 65536 - leaves bins <= 4096 bytes wide unsorted 700 262144 - leaves bins <= 32768 bytes wide unsorted 701 -1 - no bins sorted (not recommended!) 702 */ 703 704 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE 705 /* #define FIRST_SORTED_BIN_SIZE 65536 */ 706 707 /* 708 Unsorted chunks 709 710 All remainders from chunk splits, as well as all returned chunks, 711 are first placed in the "unsorted" bin. They are then placed 712 in regular bins after malloc gives them ONE chance to be used before 713 binning. So, basically, the unsorted_chunks list acts as a queue, 714 with chunks being placed on it in free (and __malloc_consolidate), 715 and taken off (to be either used or placed in bins) in malloc. 716 */ 717 718 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ 719 #define unsorted_chunks(M) (bin_at(M, 1)) 720 721 /* 722 Top 723 724 The top-most available chunk (i.e., the one bordering the end of 725 available memory) is treated specially. It is never included in 726 any bin, is used only if no other chunk is available, and is 727 released back to the system if it is very large (see 728 M_TRIM_THRESHOLD). Because top initially 729 points to its own bin with initial zero size, thus forcing 730 extension on the first malloc request, we avoid having any special 731 code in malloc to check whether it even exists yet. But we still 732 need to do so when getting memory from system, so we make 733 initial_top treat the bin as a legal but unusable chunk during the 734 interval between initialization and the first call to 735 __malloc_alloc. (This is somewhat delicate, since it relies on 736 the 2 preceding words to be zero during this interval as well.) 737 */ 738 739 /* Conveniently, the unsorted bin can be used as dummy top on first call */ 740 #define initial_top(M) (unsorted_chunks(M)) 741 742 /* 743 Binmap 744 745 To help compensate for the large number of bins, a one-level index 746 structure is used for bin-by-bin searching. `binmap' is a 747 bitvector recording whether bins are definitely empty so they can 748 be skipped over during during traversals. The bits are NOT always 749 cleared as soon as bins are empty, but instead only 750 when they are noticed to be empty during traversal in malloc. 751 */ 752 753 /* Conservatively use 32 bits per map word, even if on 64bit system */ 754 #define BINMAPSHIFT 5 755 #define BITSPERMAP (1U << BINMAPSHIFT) 756 #define BINMAPSIZE (NBINS / BITSPERMAP) 757 758 #define idx2block(i) ((i) >> BINMAPSHIFT) 759 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) 760 761 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) 762 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) 763 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) 764 765 /* 766 Fastbins 767 768 An array of lists holding recently freed small chunks. Fastbins 769 are not doubly linked. It is faster to single-link them, and 770 since chunks are never removed from the middles of these lists, 771 double linking is not necessary. Also, unlike regular bins, they 772 are not even processed in FIFO order (they use faster LIFO) since 773 ordering doesn't much matter in the transient contexts in which 774 fastbins are normally used. 775 776 Chunks in fastbins keep their inuse bit set, so they cannot 777 be consolidated with other free chunks. __malloc_consolidate 778 releases all chunks in fastbins and consolidates them with 779 other free chunks. 780 */ 781 782 typedef struct malloc_chunk* mfastbinptr; 783 784 /* offset 2 to use otherwise unindexable first 2 bins */ 785 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2) 786 787 /* The maximum fastbin request size we support */ 788 #define MAX_FAST_SIZE 80 789 790 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) 791 792 /* 793 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() 794 that triggers automatic consolidation of possibly-surrounding 795 fastbin chunks. This is a heuristic, so the exact value should not 796 matter too much. It is defined at half the default trim threshold as a 797 compromise heuristic to only attempt consolidation if it is likely 798 to lead to trimming. However, it is not dynamically tunable, since 799 consolidation reduces fragmentation surrounding loarge chunks even 800 if trimming is not used. 801 */ 802 803 #define FASTBIN_CONSOLIDATION_THRESHOLD \ 804 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1) 805 806 /* 807 Since the lowest 2 bits in max_fast don't matter in size comparisons, 808 they are used as flags. 809 */ 810 811 /* 812 ANYCHUNKS_BIT held in max_fast indicates that there may be any 813 freed chunks at all. It is set true when entering a chunk into any 814 bin. 815 */ 816 817 #define ANYCHUNKS_BIT (1U) 818 819 #define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT)) 820 #define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT) 821 #define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT) 822 823 /* 824 FASTCHUNKS_BIT held in max_fast indicates that there are probably 825 some fastbin chunks. It is set true on entering a chunk into any 826 fastbin, and cleared only in __malloc_consolidate. 827 */ 828 829 #define FASTCHUNKS_BIT (2U) 830 831 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT)) 832 #define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT)) 833 #define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT)) 834 835 /* Set value of max_fast. Use impossibly small value if 0. */ 836 #define set_max_fast(M, s) \ 837 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \ 838 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT)) 839 840 #define get_max_fast(M) \ 841 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT)) 842 843 /* 844 Safe-Linking: 845 Use randomness from ASLR (mmap_base) to protect single-linked lists 846 of fastbins. Together with allocation alignment checks, this mechanism 847 reduces the risk of pointer hijacking, as was done with Safe-Unlinking 848 in the double-linked lists of smallbins. 849 */ 850 #define PROTECT_PTR(pos, ptr) ((mchunkptr)((((size_t)pos) >> PAGE_SHIFT) ^ ((size_t)ptr))) 851 #define REVEAL_PTR(pos, ptr) PROTECT_PTR(pos, ptr) 852 #define PTR_FOR_ALIGNMENT_CHECK(P) \ 853 (MALLOC_ALIGNMENT == 2*(sizeof(size_t)) ? (P) : chunk2mem(P)) 854 855 #define CHECK_PTR(P) \ 856 if (!aligned_OK(PTR_FOR_ALIGNMENT_CHECK(P))) \ 857 abort(); 858 859 /* 860 morecore_properties is a status word holding dynamically discovered 861 or controlled properties of the morecore function 862 */ 863 864 #define MORECORE_CONTIGUOUS_BIT (1U) 865 866 #define contiguous(M) \ 867 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT)) 868 #define noncontiguous(M) \ 869 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0) 870 #define set_contiguous(M) \ 871 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT) 872 #define set_noncontiguous(M) \ 873 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT) 874 875 876 /* 877 ----------- Internal state representation and initialization ----------- 878 */ 879 880 struct malloc_state { 881 882 /* The maximum chunk size to be eligible for fastbin */ 883 size_t max_fast; /* low 2 bits used as flags */ 884 885 /* Fastbins */ 886 mfastbinptr fastbins[NFASTBINS]; 887 888 /* Base of the topmost chunk -- not otherwise kept in a bin */ 889 mchunkptr top; 890 891 /* The remainder from the most recent split of a small request */ 892 mchunkptr last_remainder; 893 894 /* Normal bins packed as described above */ 895 mchunkptr bins[NBINS * 2]; 896 897 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */ 898 unsigned int binmap[BINMAPSIZE+1]; 899 900 /* Tunable parameters */ 901 unsigned long trim_threshold; 902 size_t top_pad; 903 size_t mmap_threshold; 904 905 /* Memory map support */ 906 int n_mmaps; 907 int n_mmaps_max; 908 int max_n_mmaps; 909 910 /* Cache malloc_getpagesize */ 911 unsigned int pagesize; 912 913 /* Track properties of MORECORE */ 914 unsigned int morecore_properties; 915 916 /* Statistics */ 917 size_t mmapped_mem; 918 size_t sbrked_mem; 919 size_t max_sbrked_mem; 920 size_t max_mmapped_mem; 921 size_t max_total_mem; 922 }; 923 924 typedef struct malloc_state *mstate; 925 926 /* 927 There is exactly one instance of this struct in this malloc. 928 If you are adapting this malloc in a way that does NOT use a static 929 malloc_state, you MUST explicitly zero-fill it before using. This 930 malloc relies on the property that malloc_state is initialized to 931 all zeroes (as is true of C statics). 932 */ 933 extern struct malloc_state __malloc_state attribute_hidden; /* never directly referenced */ 934 935 /* 936 All uses of av_ are via get_malloc_state(). 937 At most one "call" to get_malloc_state is made per invocation of 938 the public versions of malloc and free, but other routines 939 that in turn invoke malloc and/or free may call more then once. 940 Also, it is called in check* routines if __UCLIBC_MALLOC_DEBUGGING__ is set. 941 */ 942 943 #define get_malloc_state() (&(__malloc_state)) 944 945 /* External internal utilities operating on mstates */ 946 void __malloc_consolidate(mstate) attribute_hidden; 947 948 949 /* Debugging support */ 950 #ifndef __UCLIBC_MALLOC_DEBUGGING__ 951 952 #define check_chunk(P) 953 #define check_free_chunk(P) 954 #define check_inuse_chunk(P) 955 #define check_remalloced_chunk(P,N) 956 #define check_malloced_chunk(P,N) 957 #define check_malloc_state() 958 #define assert(x) ((void)0) 959 960 961 #else 962 963 #define check_chunk(P) __do_check_chunk(P) 964 #define check_free_chunk(P) __do_check_free_chunk(P) 965 #define check_inuse_chunk(P) __do_check_inuse_chunk(P) 966 #define check_remalloced_chunk(P,N) __do_check_remalloced_chunk(P,N) 967 #define check_malloced_chunk(P,N) __do_check_malloced_chunk(P,N) 968 #define check_malloc_state() __do_check_malloc_state() 969 970 extern void __do_check_chunk(mchunkptr p) attribute_hidden; 971 extern void __do_check_free_chunk(mchunkptr p) attribute_hidden; 972 extern void __do_check_inuse_chunk(mchunkptr p) attribute_hidden; 973 extern void __do_check_remalloced_chunk(mchunkptr p, size_t s) attribute_hidden; 974 extern void __do_check_malloced_chunk(mchunkptr p, size_t s) attribute_hidden; 975 extern void __do_check_malloc_state(void) attribute_hidden; 976 977 #include <assert.h> 978 979 #endif 980