root/oshmem/mca/memheap/ptmalloc/malloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. win32mmap
  2. win32direct_mmap
  3. win32munmap
  4. pthread_acquire_lock
  5. pthread_release_lock
  6. pthread_try_lock
  7. win32_acquire_lock
  8. win32_release_lock
  9. win32_try_lock
  10. pthread_acquire_lock
  11. pthread_release_lock
  12. pthread_try_lock
  13. pthread_init_lock
  14. pthread_islocked
  15. segment_holding
  16. has_segment_link
  17. init_mparams
  18. change_mparam
  19. do_check_any_chunk
  20. do_check_top_chunk
  21. do_check_mmapped_chunk
  22. do_check_inuse_chunk
  23. do_check_free_chunk
  24. do_check_malloced_chunk
  25. do_check_tree
  26. do_check_treebin
  27. do_check_smallbin
  28. bin_find
  29. traverse_and_check
  30. do_check_malloc_state
  31. internal_mallinfo
  32. internal_malloc_stats
  33. mmap_alloc
  34. mmap_resize
  35. init_top
  36. init_bins
  37. reset_on_error
  38. prepend_alloc
  39. add_segment
  40. sys_alloc
  41. release_unused_segments
  42. sys_trim
  43. tmalloc_large
  44. tmalloc_small
  45. internal_realloc
  46. internal_memalign
  47. ialloc
  48. dlmalloc
  49. dlfree
  50. dlcalloc
  51. dlrealloc
  52. dlmemalign
  53. dlindependent_calloc
  54. dlindependent_comalloc
  55. dlvalloc
  56. dlpvalloc
  57. dlmalloc_trim
  58. dlmalloc_footprint
  59. dlmalloc_max_footprint
  60. dlmallinfo
  61. dlmalloc_stats
  62. dlmalloc_usable_size
  63. dlmallopt
  64. init_user_mstate
  65. create_mspace
  66. create_mspace_with_base
  67. destroy_mspace
  68. mspace_malloc
  69. mspace_free
  70. mspace_calloc
  71. mspace_realloc
  72. mspace_memalign
  73. mspace_independent_calloc
  74. mspace_independent_comalloc
  75. mspace_trim
  76. mspace_malloc_stats
  77. mspace_footprint
  78. mspace_max_footprint
  79. mspace_mallinfo
  80. mspace_usable_size
  81. mspace_mallopt

   1 #include "malloc_defs.h"
   2 /*
   3   $Id: malloc.c,v 1.4 2006/03/30 16:47:29 wg Exp $
   4 
   5   This version of malloc.c was adapted for ptmalloc3 by Wolfram Gloger
   6   <wg@malloc.de>.  Therefore, some of the comments below do not apply
   7   for this modified version.  However, it is the intention to keep
   8   differences to Doug Lea's original version minimal, hence the
   9   comments were mostly left unchanged.
  10 
  11  -----------------------------------------------------------------------
  12 
  13   This is a version (aka dlmalloc) of malloc/free/realloc written by
  14   Doug Lea and released to the public domain, as explained at
  15   http://creativecommons.org/licenses/publicdomain.  Send questions,
  16   comments, complaints, performance data, etc to dl@cs.oswego.edu
  17 
  18 * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
  19 
  20    Note: There may be an updated version of this malloc obtainable at
  21            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
  22          Check before installing!
  23 
  24 * Quickstart
  25 
  26   This library is all in one file to simplify the most common usage:
  27   ftp it, compile it (-O3), and link it into another program. All of
  28   the compile-time options default to reasonable values for use on
  29   most platforms.  You might later want to step through various
  30   compile-time and dynamic tuning options.
  31 
  32   For convenience, an include file for code using this malloc is at:
  33      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
  34   You don't really need this .h file unless you call functions not
  35   defined in your system include files.  The .h file contains only the
  36   excerpts from this file needed for using this malloc on ANSI C/C++
  37   systems, so long as you haven't changed compile-time options about
  38   naming and tuning parameters.  If you do, then you can create your
  39   own malloc.h that does include all settings by cutting at the point
  40   indicated below. Note that you may already by default be using a C
  41   library containing a malloc that is based on some version of this
  42   malloc (for example in linux). You might still want to use the one
  43   in this file to customize settings or to avoid overheads associated
  44   with library versions.
  45 
  46 * Vital statistics:
  47 
  48   Supported pointer/size_t representation:       4 or 8 bytes
  49        size_t MUST be an unsigned type of the same width as
  50        pointers. (If you are using an ancient system that declares
  51        size_t as a signed type, or need it to be a different width
  52        than pointers, you can use a previous release of this malloc
  53        (e.g. 2.7.2) supporting these.)
  54 
  55   Alignment:                                     8 bytes (default)
  56        This suffices for nearly all current machines and C compilers.
  57        However, you can define MALLOC_ALIGNMENT to be wider than this
  58        if necessary (up to 128bytes), at the expense of using more space.
  59 
  60   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
  61                                           8 or 16 bytes (if 8byte sizes)
  62        Each malloced chunk has a hidden word of overhead holding size
  63        and status information, and additional cross-check word
  64        if FOOTERS is defined.
  65 
  66   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
  67                           8-byte ptrs:  32 bytes    (including overhead)
  68 
  69        Even a request for zero bytes (i.e., malloc(0)) returns a
  70        pointer to something of the minimum allocatable size.
  71        The maximum overhead wastage (i.e., number of extra bytes
  72        allocated than were requested in malloc) is less than or equal
  73        to the minimum size, except for requests >= mmap_threshold that
  74        are serviced via mmap(), where the worst case wastage is about
  75        32 bytes plus the remainder from a system page (the minimal
  76        mmap unit); typically 4096 or 8192 bytes.
  77 
  78   Security: static-safe; optionally more or less
  79        The "security" of malloc refers to the ability of malicious
  80        code to accentuate the effects of errors (for example, freeing
  81        space that is not currently malloc'ed or overwriting past the
  82        ends of chunks) in code that calls malloc.  This malloc
  83        guarantees not to modify any memory locations below the base of
  84        heap, i.e., static variables, even in the presence of usage
  85        errors.  The routines additionally detect most improper frees
  86        and reallocs.  All this holds as long as the static bookkeeping
  87        for malloc itself is not corrupted by some other means.  This
  88        is only one aspect of security -- these checks do not, and
  89        cannot, detect all possible programming errors.
  90 
  91        If FOOTERS is defined nonzero, then each allocated chunk
  92        carries an additional check word to verify that it was malloced
  93        from its space.  These check words are the same within each
  94        execution of a program using malloc, but differ across
  95        executions, so externally crafted fake chunks cannot be
  96        freed. This improves security by rejecting frees/reallocs that
  97        could corrupt heap memory, in addition to the checks preventing
  98        writes to statics that are always on.  This may further improve
  99        security at the expense of time and space overhead.  (Note that
 100        FOOTERS may also be worth using with MSPACES.)
 101 
 102        By default detected errors cause the program to abort (calling
 103        "abort()"). You can override this to instead proceed past
 104        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
 105        has no effect, and a malloc that encounters a bad address
 106        caused by user overwrites will ignore the bad address by
 107        dropping pointers and indices to all known memory. This may
 108        be appropriate for programs that should continue if at all
 109        possible in the face of programming errors, although they may
 110        run out of memory because dropped memory is never reclaimed.
 111 
 112        If you don't like either of these options, you can define
 113        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
 114        else. And if if you are sure that your program using malloc has
 115        no errors or vulnerabilities, you can define INSECURE to 1,
 116        which might (or might not) provide a small performance improvement.
 117 
 118   Thread-safety: NOT thread-safe unless USE_LOCKS defined
 119        When USE_LOCKS is defined, each public call to malloc, free,
 120        etc is surrounded with either a pthread mutex or a win32
 121        spinlock (depending on WIN32). This is not especially fast, and
 122        can be a major bottleneck.  It is designed only to provide
 123        minimal protection in concurrent environments, and to provide a
 124        basis for extensions.  If you are using malloc in a concurrent
 125        program, consider instead using nedmalloc
 126        (http://www.nedprod.com/programs/portable/nedmalloc/) or
 127        ptmalloc (See http://www.malloc.de), which are derived
 128        from versions of this malloc.
 129 
 130   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
 131        This malloc can use unix sbrk or any emulation (invoked using
 132        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
 133        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
 134        memory.  On most unix systems, it tends to work best if both
 135        MORECORE and MMAP are enabled.  On Win32, it uses emulations
 136        based on VirtualAlloc. It also uses common C library functions
 137        like memset.
 138 
 139   Compliance: I believe it is compliant with the Single Unix Specification
 140        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
 141        others as well.
 142 
 143 * Overview of algorithms
 144 
 145   This is not the fastest, most space-conserving, most portable, or
 146   most tunable malloc ever written. However it is among the fastest
 147   while also being among the most space-conserving, portable and
 148   tunable.  Consistent balance across these factors results in a good
 149   general-purpose allocator for malloc-intensive programs.
 150 
 151   In most ways, this malloc is a best-fit allocator. Generally, it
 152   chooses the best-fitting existing chunk for a request, with ties
 153   broken in approximately least-recently-used order. (This strategy
 154   normally maintains low fragmentation.) However, for requests less
 155   than 256bytes, it deviates from best-fit when there is not an
 156   exactly fitting available chunk by preferring to use space adjacent
 157   to that used for the previous small request, as well as by breaking
 158   ties in approximately most-recently-used order. (These enhance
 159   locality of series of small allocations.)  And for very large requests
 160   (>= 256Kb by default), it relies on system memory mapping
 161   facilities, if supported.  (This helps avoid carrying around and
 162   possibly fragmenting memory used only for large chunks.)
 163 
 164   All operations (except malloc_stats and mallinfo) have execution
 165   times that are bounded by a constant factor of the number of bits in
 166   a size_t, not counting any clearing in calloc or copying in realloc,
 167   or actions surrounding MORECORE and MMAP that have times
 168   proportional to the number of non-contiguous regions returned by
 169   system allocation routines, which is often just 1. In real-time
 170   applications, you can optionally suppress segment traversals using
 171   NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
 172   system allocators return non-contiguous spaces, at the typical
 173   expense of carrying around more memory and increased fragmentation.
 174 
 175   The implementation is not very modular and seriously overuses
 176   macros. Perhaps someday all C compilers will do as good a job
 177   inlining modular code as can now be done by brute-force expansion,
 178   but now, enough of them seem not to.
 179 
 180   Some compilers issue a lot of warnings about code that is
 181   dead/unreachable only on some platforms, and also about intentional
 182   uses of negation on unsigned types. All known cases of each can be
 183   ignored.
 184 
 185   For a longer but out of date high-level description, see
 186      http://gee.cs.oswego.edu/dl/html/malloc.html
 187 
 188 * MSPACES
 189   If MSPACES is defined, then in addition to malloc, free, etc.,
 190   this file also defines mspace_malloc, mspace_free, etc. These
 191   are versions of malloc routines that take an "mspace" argument
 192   obtained using create_mspace, to control all internal bookkeeping.
 193   If ONLY_MSPACES is defined, only these versions are compiled.
 194   So if you would like to use this allocator for only some allocations,
 195   and your system malloc for others, you can compile with
 196   ONLY_MSPACES and then do something like...
 197     static mspace mymspace = create_mspace(0,0); // for example
 198     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
 199 
 200   (Note: If you only need one instance of an mspace, you can instead
 201   use "USE_DL_PREFIX" to relabel the global malloc.)
 202 
 203   You can similarly create thread-local allocators by storing
 204   mspaces as thread-locals. For example:
 205     static __thread mspace tlms = 0;
 206     void*  tlmalloc(size_t bytes) {
 207       if (tlms == 0) tlms = create_mspace(0, 0);
 208       return mspace_malloc(tlms, bytes);
 209     }
 210     void  tlfree(void* mem) { mspace_free(tlms, mem); }
 211 
 212   Unless FOOTERS is defined, each mspace is completely independent.
 213   You cannot allocate from one and free to another (although
 214   conformance is only weakly checked, so usage errors are not always
 215   caught). If FOOTERS is defined, then each chunk carries around a tag
 216   indicating its originating mspace, and frees are directed to their
 217   originating spaces.
 218 
 219  -------------------------  Compile-time options ---------------------------
 220 
 221 Be careful in setting #define values for numerical constants of type
 222 size_t. On some systems, literal values are not automatically extended
 223 to size_t precision unless they are explicitly casted. You can also
 224 use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
 225 
 226 WIN32                    default: defined if _WIN32 defined
 227   Defining WIN32 sets up defaults for MS environment and compilers.
 228   Otherwise defaults are for unix.
 229 
 230 MALLOC_ALIGNMENT         default: (size_t)8
 231   Controls the minimum alignment for malloc'ed chunks.  It must be a
 232   power of two and at least 8, even on machines for which smaller
 233   alignments would suffice. It may be defined as larger than this
 234   though. Note however that code and data structures are optimized for
 235   the case of 8-byte alignment.
 236 
 237 MSPACES                  default: 0 (false)
 238   If true, compile in support for independent allocation spaces.
 239   This is only supported if DL_HAVE_MMAP is true.
 240 
 241 ONLY_MSPACES             default: 0 (false)
 242   If true, only compile in mspace versions, not regular versions.
 243 
 244 USE_LOCKS                default: 0 (false)
 245   Causes each call to each public routine to be surrounded with
 246   pthread or WIN32 mutex lock/unlock. (If set true, this can be
 247   overridden on a per-mspace basis for mspace versions.) If set to a
 248   non-zero value other than 1, locks are used, but their
 249   implementation is left out, so lock functions must be supplied manually.
 250 
 251 USE_SPIN_LOCKS           default: 1 iff USE_LOCKS and on x86 using gcc or MSC
 252   If true, uses custom spin locks for locking. This is currently
 253   supported only for x86 platforms using gcc or recent MS compilers.
 254   Otherwise, posix locks or win32 critical sections are used.
 255 
 256 FOOTERS                  default: 0
 257   If true, provide extra checking and dispatching by placing
 258   information in the footers of allocated chunks. This adds
 259   space and time overhead.
 260 
 261 INSECURE                 default: 0
 262   If true, omit checks for usage errors and heap space overwrites.
 263 
 264 USE_DL_PREFIX            default: NOT defined
 265   Causes compiler to prefix all public routines with the string 'dl'.
 266   This can be useful when you only want to use this malloc in one part
 267   of a program, using your regular system malloc elsewhere.
 268 
 269 ABORT                    default: defined as abort()
 270   Defines how to abort on failed checks.  On most systems, a failed
 271   check cannot die with an "assert" or even print an informative
 272   message, because the underlying print routines in turn call malloc,
 273   which will fail again.  Generally, the best policy is to simply call
 274   abort(). It's not very useful to do more than this because many
 275   errors due to overwriting will show up as address faults (null, odd
 276   addresses etc) rather than malloc-triggered checks, so will also
 277   abort.  Also, most compilers know that abort() does not return, so
 278   can better optimize code conditionally calling it.
 279 
 280 PROCEED_ON_ERROR           default: defined as 0 (false)
 281   Controls whether detected bad addresses cause them to bypassed
 282   rather than aborting. If set, detected bad arguments to free and
 283   realloc are ignored. And all bookkeeping information is zeroed out
 284   upon a detected overwrite of freed heap space, thus losing the
 285   ability to ever return it from malloc again, but enabling the
 286   application to proceed. If PROCEED_ON_ERROR is defined, the
 287   static variable malloc_corruption_error_count is compiled in
 288   and can be examined to see if errors have occurred. This option
 289   generates slower code than the default abort policy.
 290 
 291 DL_DEBUG                    default: NOT defined
 292   The DL_DEBUG setting is mainly intended for people trying to modify
 293   this code or diagnose problems when porting to new platforms.
 294   However, it may also be able to better isolate user errors than just
 295   using runtime checks.  The assertions in the check routines spell
 296   out in more detail the assumptions and invariants underlying the
 297   algorithms.  The checking is fairly extensive, and will slow down
 298   execution noticeably. Calling malloc_stats or mallinfo with DL_DEBUG
 299   set will attempt to check every non-mmapped allocated and free chunk
 300   in the course of computing the summaries.
 301 
 302 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
 303   Debugging assertion failures can be nearly impossible if your
 304   version of the assert macro causes malloc to be called, which will
 305   lead to a cascade of further failures, blowing the runtime stack.
 306   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
 307   which will usually make debugging easier.
 308 
 309 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
 310   The action to take before "return 0" when malloc fails to be able to
 311   return memory because there is none available.
 312 
 313 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
 314   True if this system supports sbrk or an emulation of it.
 315 
 316 MORECORE                  default: sbrk
 317   The name of the sbrk-style system routine to call to obtain more
 318   memory.  See below for guidance on writing custom MORECORE
 319   functions. The type of the argument to sbrk/MORECORE varies across
 320   systems.  It cannot be size_t, because it supports negative
 321   arguments, so it is normally the signed type of the same width as
 322   size_t (sometimes declared as "intptr_t").  It doesn't much matter
 323   though. Internally, we only call it with arguments less than half
 324   the max value of a size_t, which should work across all reasonable
 325   possibilities, although sometimes generating compiler warnings.  See
 326   near the end of this file for guidelines for creating a custom
 327   version of MORECORE.
 328 
 329 MORECORE_CONTIGUOUS       default: 1 (true) if HAVE_MORECORE
 330   If true, take advantage of fact that consecutive calls to MORECORE
 331   with positive arguments always return contiguous increasing
 332   addresses.  This is true of unix sbrk. It does not hurt too much to
 333   set it true anyway, since malloc copes with non-contiguities.
 334   Setting it false when definitely non-contiguous saves time
 335   and possibly wasted space it would take to discover this though.
 336 
 337 MORECORE_CANNOT_TRIM      default: NOT defined
 338   True if MORECORE cannot release space back to the system when given
 339   negative arguments. This is generally necessary only if you are
 340   using a hand-crafted MORECORE function that cannot handle negative
 341   arguments.
 342 
 343 NO_SEGMENT_TRAVERSAL       default: 0
 344   If non-zero, suppresses traversals of memory segments
 345   returned by either MORECORE or CALL_MMAP. This disables
 346   merging of segments that are contiguous, and selectively
 347   releasing them to the OS if unused, but bounds execution times.
 348 
 349 DL_HAVE_MMAP                 default: 1 (true)
 350   True if this system supports mmap or an emulation of it.  If so, and
 351   HAVE_MORECORE is not true, MMAP is used for all system
 352   allocation. If set and HAVE_MORECORE is true as well, MMAP is
 353   primarily used to directly allocate very large blocks. It is also
 354   used as a backup strategy in cases where MORECORE fails to provide
 355   space from system. Note: A single call to MUNMAP is assumed to be
 356   able to unmap memory that may have be allocated using multiple calls
 357   to MMAP, so long as they are adjacent.
 358 
 359 DL_HAVE_MREMAP               default: 1 on linux, else 0
 360   If true realloc() uses mremap() to re-allocate large blocks and
 361   extend or shrink allocation spaces.
 362 
 363 MMAP_CLEARS               default: 1 except on WINCE.
 364   True if mmap clears memory so calloc doesn't need to. This is true
 365   for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
 366 
 367 USE_BUILTIN_FFS            default: 0 (i.e., not used)
 368   Causes malloc to use the builtin ffs() function to compute indices.
 369   Some compilers may recognize and intrinsify ffs to be faster than the
 370   supplied C version. Also, the case of x86 using gcc is special-cased
 371   to an asm instruction, so is already as fast as it can be, and so
 372   this setting has no effect. Similarly for Win32 under recent MS compilers.
 373   (On most x86s, the asm version is only slightly faster than the C version.)
 374 
 375 malloc_getpagesize         default: derive from system includes, or 4096.
 376   The system page size. To the extent possible, this malloc manages
 377   memory from the system in page-size units.  This may be (and
 378   usually is) a function rather than a constant. This is ignored
 379   if WIN32, where page size is determined using getSystemInfo during
 380   initialization.
 381 
 382 USE_DEV_RANDOM             default: 0 (i.e., not used)
 383   Causes malloc to use /dev/random to initialize secure magic seed for
 384   stamping footers. Otherwise, the current time is used.
 385 
 386 NO_MALLINFO                default: 0
 387   If defined, don't compile "mallinfo". This can be a simple way
 388   of dealing with mismatches between system declarations and
 389   those in this file.
 390 
 391 MALLINFO_FIELD_TYPE        default: size_t
 392   The type of the fields in the mallinfo struct. This was originally
 393   defined as "int" in SVID etc, but is more usefully defined as
 394   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
 395 
 396 REALLOC_ZERO_BYTES_FREES    default: not defined
 397   This should be set if a call to realloc with zero bytes should
 398   be the same as a call to free. Some people think it should. Otherwise,
 399   since this malloc returns a unique pointer for malloc(0), so does
 400   realloc(p, 0).
 401 
 402 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
 403 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
 404 LACKS_STDLIB_H                default: NOT defined unless on WIN32
 405   Define these if your system does not have these header files.
 406   You might need to manually insert some of the declarations they provide.
 407 
 408 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
 409                                 system_info.dwAllocationGranularity in WIN32,
 410                                 otherwise 64K.
 411       Also settable using mallopt(M_GRANULARITY, x)
 412   The unit for allocating and deallocating memory from the system.  On
 413   most systems with contiguous MORECORE, there is no reason to
 414   make this more than a page. However, systems with MMAP tend to
 415   either require or encourage larger granularities.  You can increase
 416   this value to prevent system allocation functions to be called so
 417   often, especially if they are slow.  The value must be at least one
 418   page and must be a power of two.  Setting to 0 causes initialization
 419   to either page size or win32 region size.  (Note: In previous
 420   versions of malloc, the equivalent of this option was called
 421   "TOP_PAD")
 422 
 423 DEFAULT_TRIM_THRESHOLD    default: 2MB
 424       Also settable using mallopt(M_TRIM_THRESHOLD, x)
 425   The maximum amount of unused top-most memory to keep before
 426   releasing via malloc_trim in free().  Automatic trimming is mainly
 427   useful in long-lived programs using contiguous MORECORE.  Because
 428   trimming via sbrk can be slow on some systems, and can sometimes be
 429   wasteful (in cases where programs immediately afterward allocate
 430   more large chunks) the value should be high enough so that your
 431   overall system performance would improve by releasing this much
 432   memory.  As a rough guide, you might set to a value close to the
 433   average size of a process (program) running on your system.
 434   Releasing this much memory would allow such a process to run in
 435   memory.  Generally, it is worth tuning trim thresholds when a
 436   program undergoes phases where several large chunks are allocated
 437   and released in ways that can reuse each other's storage, perhaps
 438   mixed with phases where there are no such chunks at all. The trim
 439   value must be greater than page size to have any useful effect.  To
 440   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
 441   some people use of mallocing a huge space and then freeing it at
 442   program startup, in an attempt to reserve system memory, doesn't
 443   have the intended effect under automatic trimming, since that memory
 444   will immediately be returned to the system.
 445 
 446 DEFAULT_MMAP_THRESHOLD       default: 256K
 447       Also settable using mallopt(M_MMAP_THRESHOLD, x)
 448   The request size threshold for using MMAP to directly service a
 449   request. Requests of at least this size that cannot be allocated
 450   using already-existing space will be serviced via mmap.  (If enough
 451   normal freed space already exists it is used instead.)  Using mmap
 452   segregates relatively large chunks of memory so that they can be
 453   individually obtained and released from the host system. A request
 454   serviced through mmap is never reused by any other request (at least
 455   not directly; the system may just so happen to remap successive
 456   requests to the same locations).  Segregating space in this way has
 457   the benefits that: Mmapped space can always be individually released
 458   back to the system, which helps keep the system level memory demands
 459   of a long-lived program low.  Also, mapped memory doesn't become
 460   `locked' between other chunks, as can happen with normally allocated
 461   chunks, which means that even trimming via malloc_trim would not
 462   release them.  However, it has the disadvantage that the space
 463   cannot be reclaimed, consolidated, and then used to service later
 464   requests, as happens with normal chunks.  The advantages of mmap
 465   nearly always outweigh disadvantages for "large" chunks, but the
 466   value of "large" may vary across systems.  The default is an
 467   empirically derived value that works well in most systems. You can
 468   disable mmap by setting to MAX_SIZE_T.
 469 
 470 MAX_RELEASE_CHECK_RATE   default: 255 unless not DL_HAVE_MMAP
 471   The number of consolidated frees between checks to release
 472   unused segments when freeing. When using non-contiguous segments,
 473   especially with multiple mspaces, checking only for topmost space
 474   doesn't always suffice to trigger trimming. To compensate for this,
 475   free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
 476   current number of segments, if greater) try to release unused
 477   segments to the OS when freeing chunks that result in
 478   consolidation. The best value for this parameter is a compromise
 479   between slowing down frees with relatively costly checks that
 480   rarely trigger versus holding on to unused memory. To effectively
 481   disable, set to MAX_SIZE_T. This may lead to a very slight speed
 482   improvement at the expense of carrying around more memory.
 483 */
 484 
 485 #if defined(DARWIN) || defined(_DARWIN)
 486 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
 487 #ifndef HAVE_MORECORE
 488 #define HAVE_MORECORE 0
 489 #define DL_HAVE_MMAP 1
 490 #endif  /* HAVE_MORECORE */
 491 #endif  /* DARWIN */
 492 
 493 #ifndef LACKS_SYS_TYPES_H
 494 #include <sys/types.h>  /* For size_t */
 495 #endif  /* LACKS_SYS_TYPES_H */
 496 
 497 /* The maximum possible size_t value has all bits set */
 498 #define MAX_SIZE_T           (~(size_t)0)
 499 
 500 #ifndef ONLY_MSPACES
 501 #define ONLY_MSPACES 0
 502 #endif  /* ONLY_MSPACES */
 503 #ifndef MSPACES
 504 #if ONLY_MSPACES
 505 #define MSPACES 1
 506 #else   /* ONLY_MSPACES */
 507 #define MSPACES 0
 508 #endif  /* ONLY_MSPACES */
 509 #endif  /* MSPACES */
 510 #ifndef MALLOC_ALIGNMENT
 511 #define MALLOC_ALIGNMENT ((size_t)8U)
 512 #endif  /* MALLOC_ALIGNMENT */
 513 #ifndef FOOTERS
 514 #define FOOTERS 0
 515 #endif  /* FOOTERS */
 516 #ifndef ABORT
 517 #define ABORT  abort()
 518 #endif  /* ABORT */
 519 #ifndef ABORT_ON_ASSERT_FAILURE
 520 #define ABORT_ON_ASSERT_FAILURE 1
 521 #endif  /* ABORT_ON_ASSERT_FAILURE */
 522 #ifndef PROCEED_ON_ERROR
 523 #define PROCEED_ON_ERROR 0
 524 #endif  /* PROCEED_ON_ERROR */
 525 #ifndef USE_LOCKS
 526 #define USE_LOCKS 0
 527 #endif  /* USE_LOCKS */
 528 #ifndef USE_SPIN_LOCKS
 529 #if USE_LOCKS && (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(_MSC_VER) && _MSC_VER>=1310)
 530 #define USE_SPIN_LOCKS 1
 531 #else
 532 #define USE_SPIN_LOCKS 0
 533 #endif /* USE_LOCKS && ... */
 534 #endif /* USE_SPIN_LOCKS */
 535 #ifndef INSECURE
 536 #define INSECURE 0
 537 #endif  /* INSECURE */
 538 #ifndef DL_HAVE_MMAP
 539 #define DL_HAVE_MMAP 1
 540 #endif  /* DL_HAVE_MMAP */
 541 #ifndef MMAP_CLEARS
 542 #define MMAP_CLEARS 1
 543 #endif  /* MMAP_CLEARS */
 544 #ifndef DL_HAVE_MREMAP
 545 #ifdef linux
 546 #define DL_HAVE_MREMAP 1
 547 #else   /* linux */
 548 #define DL_HAVE_MREMAP 0
 549 #endif  /* linux */
 550 #endif  /* DL_HAVE_MREMAP */
 551 #ifndef MALLOC_FAILURE_ACTION
 552 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
 553 #endif  /* MALLOC_FAILURE_ACTION */
 554 #ifndef HAVE_MORECORE
 555 #if ONLY_MSPACES
 556 #define HAVE_MORECORE 0
 557 #else   /* ONLY_MSPACES */
 558 #define HAVE_MORECORE 1
 559 #endif  /* ONLY_MSPACES */
 560 #endif  /* HAVE_MORECORE */
 561 #if !HAVE_MORECORE
 562 #define MORECORE_CONTIGUOUS 0
 563 #else   /* !HAVE_MORECORE */
 564 #ifndef MORECORE
 565 #define MORECORE sbrk
 566 #endif  /* MORECORE */
 567 #ifndef MORECORE_CONTIGUOUS
 568 #define MORECORE_CONTIGUOUS 1
 569 #endif  /* MORECORE_CONTIGUOUS */
 570 #endif  /* HAVE_MORECORE */
 571 #ifndef DEFAULT_GRANULARITY
 572 #if MORECORE_CONTIGUOUS
 573 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
 574 #else   /* MORECORE_CONTIGUOUS */
 575 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
 576 #endif  /* MORECORE_CONTIGUOUS */
 577 #endif  /* DEFAULT_GRANULARITY */
 578 #ifndef DEFAULT_TRIM_THRESHOLD
 579 #ifndef MORECORE_CANNOT_TRIM
 580 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
 581 #else   /* MORECORE_CANNOT_TRIM */
 582 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
 583 #endif  /* MORECORE_CANNOT_TRIM */
 584 #endif  /* DEFAULT_TRIM_THRESHOLD */
 585 #ifndef DEFAULT_MMAP_THRESHOLD
 586 #if DL_HAVE_MMAP
 587 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
 588 #else   /* DL_HAVE_MMAP */
 589 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
 590 #endif  /* DL_HAVE_MMAP */
 591 #endif  /* DEFAULT_MMAP_THRESHOLD */
 592 #ifndef MAX_RELEASE_CHECK_RATE
 593 #if DL_HAVE_MMAP
 594 #define MAX_RELEASE_CHECK_RATE 255
 595 #else
 596 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
 597 #endif /* DL_HAVE_MMAP */
 598 #endif /* MAX_RELEASE_CHECK_RATE */
 599 #ifndef USE_BUILTIN_FFS
 600 #define USE_BUILTIN_FFS 0
 601 #endif  /* USE_BUILTIN_FFS */
 602 #ifndef USE_DEV_RANDOM
 603 #define USE_DEV_RANDOM 0
 604 #endif  /* USE_DEV_RANDOM */
 605 #ifndef NO_MALLINFO
 606 #define NO_MALLINFO 0
 607 #endif  /* NO_MALLINFO */
 608 #ifndef MALLINFO_FIELD_TYPE
 609 #define MALLINFO_FIELD_TYPE size_t
 610 #endif  /* MALLINFO_FIELD_TYPE */
 611 #ifndef NO_SEGMENT_TRAVERSAL
 612 #define NO_SEGMENT_TRAVERSAL 0
 613 #endif /* NO_SEGMENT_TRAVERSAL */
 614 
 615 /*
 616   mallopt tuning options.  SVID/XPG defines four standard parameter
 617   numbers for mallopt, normally defined in malloc.h.  None of these
 618   are used in this malloc, so setting them has no effect. But this
 619   malloc does support the following options.
 620 */
 621 
 622 #define M_TRIM_THRESHOLD     (-1)
 623 #define M_GRANULARITY        (-2)
 624 #define M_MMAP_THRESHOLD     (-3)
 625 
 626 /* ------------------------ Mallinfo declarations ------------------------ */
 627 
 628 #if !NO_MALLINFO
 629 /*
 630   This version of malloc supports the standard SVID/XPG mallinfo
 631   routine that returns a struct containing usage properties and
 632   statistics. It should work on any system that has a
 633   /usr/include/malloc.h defining struct mallinfo.  The main
 634   declaration needed is the mallinfo struct that is returned (by-copy)
 635   by mallinfo().  The malloinfo struct contains a bunch of fields that
 636   are not even meaningful in this version of malloc.  These fields are
 637   are instead filled by mallinfo() with other numbers that might be of
 638   interest.
 639 
 640   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
 641   /usr/include/malloc.h file that includes a declaration of struct
 642   mallinfo.  If so, it is included; else a compliant version is
 643   declared below.  These must be precisely the same for mallinfo() to
 644   work.  The original SVID version of this struct, defined on most
 645   systems with mallinfo, declares all fields as ints. But some others
 646   define as unsigned long. If your system defines the fields using a
 647   type of different width than listed here, you MUST #include your
 648   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
 649 */
 650 
 651 /* #define HAVE_USR_INCLUDE_MALLOC_H */
 652 
 653 #ifdef HAVE_USR_INCLUDE_MALLOC_H
 654 #include "/usr/include/malloc.h"
 655 #else /* HAVE_USR_INCLUDE_MALLOC_H */
 656 
 657 struct mallinfo {
 658   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
 659   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
 660   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
 661   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
 662   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
 663   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
 664   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
 665   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
 666   MALLINFO_FIELD_TYPE fordblks; /* total free space */
 667   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
 668 };
 669 
 670 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
 671 #endif /* NO_MALLINFO */
 672 
 673 /*
 674   Try to persuade compilers to inline. The most critical functions for
 675   inlining are defined as macros, so these aren't used for them.
 676 */
 677 
 678 #ifndef FORCEINLINE
 679   #if defined(__GNUC__)
 680 #define FORCEINLINE __inline __attribute__ ((always_inline))
 681   #elif defined(_MSC_VER)
 682     #define FORCEINLINE __forceinline
 683   #endif
 684 #endif
 685 #ifndef NOINLINE
 686   #if defined(__GNUC__)
 687     #define NOINLINE __attribute__ ((noinline))
 688   #elif defined(_MSC_VER)
 689     #define NOINLINE __declspec(noinline)
 690   #else
 691     #define NOINLINE
 692   #endif
 693 #endif
 694 
 695 #ifdef __cplusplus
 696 extern "C" {
 697 #ifndef FORCEINLINE
 698  #define FORCEINLINE inline
 699 #endif
 700 #endif /* __cplusplus */
 701 #ifndef FORCEINLINE
 702  #define FORCEINLINE
 703 #endif
 704 
 705 #if !ONLY_MSPACES
 706 
 707 /* ------------------- Declarations of public routines ------------------- */
 708 
 709 #ifndef USE_DL_PREFIX
 710 #define dlcalloc               calloc
 711 #define dlfree                 free
 712 #define dlmalloc               malloc
 713 #define dlmemalign             memalign
 714 #define dlrealloc              realloc
 715 #define dlvalloc               valloc
 716 #define dlpvalloc              pvalloc
 717 #define dlmallinfo             mallinfo
 718 #define dlmallopt              mallopt
 719 #define dlmalloc_trim          malloc_trim
 720 #define dlmalloc_stats         malloc_stats
 721 #define dlmalloc_usable_size   malloc_usable_size
 722 #define dlmalloc_footprint     malloc_footprint
 723 #define dlmalloc_max_footprint malloc_max_footprint
 724 #define dlindependent_calloc   independent_calloc
 725 #define dlindependent_comalloc independent_comalloc
 726 #endif /* USE_DL_PREFIX */
 727 
 728 
 729 /*
 730   malloc(size_t n)
 731   Returns a pointer to a newly allocated chunk of at least n bytes, or
 732   null if no space is available, in which case errno is set to ENOMEM
 733   on ANSI C systems.
 734 
 735   If n is zero, malloc returns a minimum-sized chunk. (The minimum
 736   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
 737   systems.)  Note that size_t is an unsigned type, so calls with
 738   arguments that would be negative if signed are interpreted as
 739   requests for huge amounts of space, which will often fail. The
 740   maximum supported value of n differs across systems, but is in all
 741   cases less than the maximum representable value of a size_t.
 742 */
 743 void* dlmalloc(size_t);
 744 
 745 /*
 746   free(void* p)
 747   Releases the chunk of memory pointed to by p, that had been previously
 748   allocated using malloc or a related routine such as realloc.
 749   It has no effect if p is null. If p was not malloced or already
 750   freed, free(p) will by default cause the current program to abort.
 751 */
 752 void  dlfree(void*);
 753 
 754 /*
 755   calloc(size_t n_elements, size_t element_size);
 756   Returns a pointer to n_elements * element_size bytes, with all locations
 757   set to zero.
 758 */
 759 void* dlcalloc(size_t, size_t);
 760 
 761 /*
 762   realloc(void* p, size_t n)
 763   Returns a pointer to a chunk of size n that contains the same data
 764   as does chunk p up to the minimum of (n, p's size) bytes, or null
 765   if no space is available.
 766 
 767   The returned pointer may or may not be the same as p. The algorithm
 768   prefers extending p in most cases when possible, otherwise it
 769   employs the equivalent of a malloc-copy-free sequence.
 770 
 771   If p is null, realloc is equivalent to malloc.
 772 
 773   If space is not available, realloc returns null, errno is set (if on
 774   ANSI) and p is NOT freed.
 775 
 776   if n is for fewer bytes than already held by p, the newly unused
 777   space is lopped off and freed if possible.  realloc with a size
 778   argument of zero (re)allocates a minimum-sized chunk.
 779 
 780   The old unix realloc convention of allowing the last-free'd chunk
 781   to be used as an argument to realloc is not supported.
 782 */
 783 
 784 void* dlrealloc(void*, size_t);
 785 
 786 /*
 787   memalign(size_t alignment, size_t n);
 788   Returns a pointer to a newly allocated chunk of n bytes, aligned
 789   in accord with the alignment argument.
 790 
 791   The alignment argument should be a power of two. If the argument is
 792   not a power of two, the nearest greater power is used.
 793   8-byte alignment is guaranteed by normal malloc calls, so don't
 794   bother calling memalign with an argument of 8 or less.
 795 
 796   Overreliance on memalign is a sure way to fragment space.
 797 */
 798 void* dlmemalign(size_t, size_t);
 799 
 800 /*
 801   valloc(size_t n);
 802   Equivalent to memalign(pagesize, n), where pagesize is the page
 803   size of the system. If the pagesize is unknown, 4096 is used.
 804 */
 805 void* dlvalloc(size_t);
 806 
 807 /*
 808   mallopt(int parameter_number, int parameter_value)
 809   Sets tunable parameters The format is to provide a
 810   (parameter-number, parameter-value) pair.  mallopt then sets the
 811   corresponding parameter to the argument value if it can (i.e., so
 812   long as the value is meaningful), and returns 1 if successful else
 813   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
 814   normally defined in malloc.h.  None of these are use in this malloc,
 815   so setting them has no effect. But this malloc also supports other
 816   options in mallopt. See below for details.  Briefly, supported
 817   parameters are as follows (listed defaults are for "typical"
 818   configurations).
 819 
 820   Symbol            param #  default    allowed param values
 821   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
 822   M_GRANULARITY        -2     page size   any power of 2 >= page size
 823   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
 824 */
 825 int dlmallopt(int, int);
 826 
 827 /*
 828   malloc_footprint();
 829   Returns the number of bytes obtained from the system.  The total
 830   number of bytes allocated by malloc, realloc etc., is less than this
 831   value. Unlike mallinfo, this function returns only a precomputed
 832   result, so can be called frequently to monitor memory consumption.
 833   Even if locks are otherwise defined, this function does not use them,
 834   so results might not be up to date.
 835 */
 836 size_t dlmalloc_footprint(void);
 837 
 838 /*
 839   malloc_max_footprint();
 840   Returns the maximum number of bytes obtained from the system. This
 841   value will be greater than current footprint if deallocated space
 842   has been reclaimed by the system. The peak number of bytes allocated
 843   by malloc, realloc etc., is less than this value. Unlike mallinfo,
 844   this function returns only a precomputed result, so can be called
 845   frequently to monitor memory consumption.  Even if locks are
 846   otherwise defined, this function does not use them, so results might
 847   not be up to date.
 848 */
 849 size_t dlmalloc_max_footprint(void);
 850 
 851 #if !NO_MALLINFO
 852 /*
 853   mallinfo()
 854   Returns (by copy) a struct containing various summary statistics:
 855 
 856   arena:     current total non-mmapped bytes allocated from system
 857   ordblks:   the number of free chunks
 858   smblks:    always zero.
 859   hblks:     current number of mmapped regions
 860   hblkhd:    total bytes held in mmapped regions
 861   usmblks:   the maximum total allocated space. This will be greater
 862                 than current total if trimming has occurred.
 863   fsmblks:   always zero
 864   uordblks:  current total allocated space (normal or mmapped)
 865   fordblks:  total free space
 866   keepcost:  the maximum number of bytes that could ideally be released
 867                back to system via malloc_trim. ("ideally" means that
 868                it ignores page restrictions etc.)
 869 
 870   Because these fields are ints, but internal bookkeeping may
 871   be kept as longs, the reported values may wrap around zero and
 872   thus be inaccurate.
 873 */
 874 struct mallinfo dlmallinfo(void);
 875 #endif /* NO_MALLINFO */
 876 
 877 /*
 878   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
 879 
 880   independent_calloc is similar to calloc, but instead of returning a
 881   single cleared space, it returns an array of pointers to n_elements
 882   independent elements that can hold contents of size elem_size, each
 883   of which starts out cleared, and can be independently freed,
 884   realloc'ed etc. The elements are guaranteed to be adjacently
 885   allocated (this is not guaranteed to occur with multiple callocs or
 886   mallocs), which may also improve cache locality in some
 887   applications.
 888 
 889   The "chunks" argument is optional (i.e., may be null, which is
 890   probably the most typical usage). If it is null, the returned array
 891   is itself dynamically allocated and should also be freed when it is
 892   no longer needed. Otherwise, the chunks array must be of at least
 893   n_elements in length. It is filled in with the pointers to the
 894   chunks.
 895 
 896   In either case, independent_calloc returns this pointer array, or
 897   null if the allocation failed.  If n_elements is zero and "chunks"
 898   is null, it returns a chunk representing an array with zero elements
 899   (which should be freed if not wanted).
 900 
 901   Each element must be individually freed when it is no longer
 902   needed. If you'd like to instead be able to free all at once, you
 903   should instead use regular calloc and assign pointers into this
 904   space to represent elements.  (In this case though, you cannot
 905   independently free elements.)
 906 
 907   independent_calloc simplifies and speeds up implementations of many
 908   kinds of pools.  It may also be useful when constructing large data
 909   structures that initially have a fixed number of fixed-sized nodes,
 910   but the number is not known at compile time, and some of the nodes
 911   may later need to be freed. For example:
 912 
 913   struct Node { int item; struct Node* next; };
 914 
 915   struct Node* build_list() {
 916     struct Node** pool;
 917     int n = read_number_of_nodes_needed();
 918     if (n <= 0) return 0;
 919     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
 920     if (pool == 0) die();
 921     // organize into a linked list...
 922     struct Node* first = pool[0];
 923     for (i = 0; i < n-1; ++i)
 924       pool[i]->next = pool[i+1];
 925     free(pool);     // Can now free the array (or not, if it is needed later)
 926     return first;
 927   }
 928 */
 929 void** dlindependent_calloc(size_t, size_t, void**);
 930 
 931 /*
 932   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
 933 
 934   independent_comalloc allocates, all at once, a set of n_elements
 935   chunks with sizes indicated in the "sizes" array.    It returns
 936   an array of pointers to these elements, each of which can be
 937   independently freed, realloc'ed etc. The elements are guaranteed to
 938   be adjacently allocated (this is not guaranteed to occur with
 939   multiple callocs or mallocs), which may also improve cache locality
 940   in some applications.
 941 
 942   The "chunks" argument is optional (i.e., may be null). If it is null
 943   the returned array is itself dynamically allocated and should also
 944   be freed when it is no longer needed. Otherwise, the chunks array
 945   must be of at least n_elements in length. It is filled in with the
 946   pointers to the chunks.
 947 
 948   In either case, independent_comalloc returns this pointer array, or
 949   null if the allocation failed.  If n_elements is zero and chunks is
 950   null, it returns a chunk representing an array with zero elements
 951   (which should be freed if not wanted).
 952 
 953   Each element must be individually freed when it is no longer
 954   needed. If you'd like to instead be able to free all at once, you
 955   should instead use a single regular malloc, and assign pointers at
 956   particular offsets in the aggregate space. (In this case though, you
 957   cannot independently free elements.)
 958 
 959   independent_comallac differs from independent_calloc in that each
 960   element may have a different size, and also that it does not
 961   automatically clear elements.
 962 
 963   independent_comalloc can be used to speed up allocation in cases
 964   where several structs or objects must always be allocated at the
 965   same time.  For example:
 966 
 967   struct Head { ... }
 968   struct Foot { ... }
 969 
 970   void send_message(char* msg) {
 971     int msglen = strlen(msg);
 972     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
 973     void* chunks[3];
 974     if (independent_comalloc(3, sizes, chunks) == 0)
 975       die();
 976     struct Head* head = (struct Head*)(chunks[0]);
 977     char*        body = (char*)(chunks[1]);
 978     struct Foot* foot = (struct Foot*)(chunks[2]);
 979     // ...
 980   }
 981 
 982   In general though, independent_comalloc is worth using only for
 983   larger values of n_elements. For small values, you probably won't
 984   detect enough difference from series of malloc calls to bother.
 985 
 986   Overuse of independent_comalloc can increase overall memory usage,
 987   since it cannot reuse existing noncontiguous small chunks that
 988   might be available for some of the elements.
 989 */
 990 void** dlindependent_comalloc(size_t, size_t*, void**);
 991 
 992 
 993 /*
 994   pvalloc(size_t n);
 995   Equivalent to valloc(minimum-page-that-holds(n)), that is,
 996   round up n to nearest pagesize.
 997  */
 998 void*  dlpvalloc(size_t);
 999 
1000 /*
1001   malloc_trim(size_t pad);
1002 
1003   If possible, gives memory back to the system (via negative arguments
1004   to sbrk) if there is unused memory at the `high' end of the malloc
1005   pool or in unused MMAP segments. You can call this after freeing
1006   large blocks of memory to potentially reduce the system-level memory
1007   requirements of a program. However, it cannot guarantee to reduce
1008   memory. Under some allocation patterns, some large free blocks of
1009   memory will be locked between two used chunks, so they cannot be
1010   given back to the system.
1011 
1012   The `pad' argument to malloc_trim represents the amount of free
1013   trailing space to leave untrimmed. If this argument is zero, only
1014   the minimum amount of memory to maintain internal data structures
1015   will be left. Non-zero arguments can be supplied to maintain enough
1016   trailing space to service future expected allocations without having
1017   to re-obtain memory from the system.
1018 
1019   Malloc_trim returns 1 if it actually released any memory, else 0.
1020 */
1021 int  dlmalloc_trim(size_t);
1022 
1023 /*
1024   malloc_usable_size(void* p);
1025 
1026   Returns the number of bytes you can actually use in
1027   an allocated chunk, which may be more than you requested (although
1028   often not) due to alignment and minimum size constraints.
1029   You can use this many bytes without worrying about
1030   overwriting other allocated objects. This is not a particularly great
1031   programming practice. malloc_usable_size can be more useful in
1032   debugging and assertions, for example:
1033 
1034   p = malloc(n);
1035   assert(malloc_usable_size(p) >= 256);
1036 */
1037 size_t dlmalloc_usable_size(void*);
1038 
1039 /*
1040   malloc_stats();
1041   Prints on stderr the amount of space obtained from the system (both
1042   via sbrk and mmap), the maximum amount (which may be more than
1043   current if malloc_trim and/or munmap got called), and the current
1044   number of bytes allocated via malloc (or realloc, etc) but not yet
1045   freed. Note that this is the number of bytes allocated, not the
1046   number requested. It will be larger than the number requested
1047   because of alignment and bookkeeping overhead. Because it includes
1048   alignment wastage as being in use, this figure may be greater than
1049   zero even when no user-level chunks are allocated.
1050 
1051   The reported current and maximum system memory can be inaccurate if
1052   a program makes other calls to system memory allocation functions
1053   (normally sbrk) outside of malloc.
1054 
1055   malloc_stats prints only the most commonly interesting statistics.
1056   More information can be obtained by calling mallinfo.
1057 */
1058 void  dlmalloc_stats(void);
1059 
1060 #endif /* ONLY_MSPACES */
1061 
1062 #if MSPACES
1063 
1064 /*
1065   mspace is an opaque type representing an independent
1066   region of space that supports mspace_malloc, etc.
1067 */
1068 typedef void* mspace;
1069 
1070 /*
1071   create_mspace creates and returns a new independent space with the
1072   given initial capacity, or, if 0, the default granularity size.  It
1073   returns null if there is no system memory available to create the
1074   space.  If argument locked is non-zero, the space uses a separate
1075   lock to control access. The capacity of the space will grow
1076   dynamically as needed to service mspace_malloc requests.  You can
1077   control the sizes of incremental increases of this space by
1078   compiling with a different DEFAULT_GRANULARITY or dynamically
1079   setting with mallopt(M_GRANULARITY, value).
1080 */
1081 mspace create_mspace(size_t capacity, int locked);
1082 
1083 /*
1084   destroy_mspace destroys the given space, and attempts to return all
1085   of its memory back to the system, returning the total number of
1086   bytes freed. After destruction, the results of access to all memory
1087   used by the space become undefined.
1088 */
1089 size_t destroy_mspace(mspace msp);
1090 
1091 /*
1092   create_mspace_with_base uses the memory supplied as the initial base
1093   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1094   space is used for bookkeeping, so the capacity must be at least this
1095   large. (Otherwise 0 is returned.) When this initial space is
1096   exhausted, additional memory will be obtained from the system.
1097   Destroying this space will deallocate all additionally allocated
1098   space (if possible) but not the initial base.
1099 */
1100 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
1101 
1102 /*
1103   mspace_malloc behaves as malloc, but operates within
1104   the given space.
1105 */
1106 void* mspace_malloc(mspace msp, size_t bytes);
1107 
1108 /*
1109   mspace_free behaves as free, but operates within
1110   the given space.
1111 
1112   If compiled with FOOTERS==1, mspace_free is not actually needed.
1113   free may be called instead of mspace_free because freed chunks from
1114   any space are handled by their originating spaces.
1115 */
1116 void mspace_free(mspace msp, void* mem);
1117 
1118 /*
1119   mspace_realloc behaves as realloc, but operates within
1120   the given space.
1121 
1122   If compiled with FOOTERS==1, mspace_realloc is not actually
1123   needed.  realloc may be called instead of mspace_realloc because
1124   realloced chunks from any space are handled by their originating
1125   spaces.
1126 */
1127 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
1128 
1129 /*
1130   mspace_calloc behaves as calloc, but operates within
1131   the given space.
1132 */
1133 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1134 
1135 /*
1136   mspace_memalign behaves as memalign, but operates within
1137   the given space.
1138 */
1139 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1140 
1141 /*
1142   mspace_independent_calloc behaves as independent_calloc, but
1143   operates within the given space.
1144 */
1145 void** mspace_independent_calloc(mspace msp, size_t n_elements,
1146                                  size_t elem_size, void* chunks[]);
1147 
1148 /*
1149   mspace_independent_comalloc behaves as independent_comalloc, but
1150   operates within the given space.
1151 */
1152 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
1153                                    size_t sizes[], void* chunks[]);
1154 
1155 /*
1156   mspace_footprint() returns the number of bytes obtained from the
1157   system for this space.
1158 */
1159 size_t mspace_footprint(mspace msp);
1160 
1161 /*
1162   mspace_max_footprint() returns the peak number of bytes obtained from the
1163   system for this space.
1164 */
1165 size_t mspace_max_footprint(mspace msp);
1166 
1167 
1168 #if !NO_MALLINFO
1169 /*
1170   mspace_mallinfo behaves as mallinfo, but reports properties of
1171   the given space.
1172 */
1173 struct mallinfo mspace_mallinfo(mspace msp);
1174 #endif /* NO_MALLINFO */
1175 
1176 /*
1177   mspace_malloc_stats behaves as malloc_stats, but reports
1178   properties of the given space.
1179 */
1180 void mspace_malloc_stats(mspace msp);
1181 
1182 /*
1183   mspace_trim behaves as malloc_trim, but
1184   operates within the given space.
1185 */
1186 int mspace_trim(mspace msp, size_t pad);
1187 
1188 /*
1189   An alias for mallopt.
1190 */
1191 int mspace_mallopt(int, int);
1192 
1193 #endif /* MSPACES */
1194 
1195 #ifdef __cplusplus
1196 };  /* end of extern "C" */
1197 #endif /* __cplusplus */
1198 
1199 /*
1200   ========================================================================
1201   To make a fully customizable malloc.h header file, cut everything
1202   above this line, put into file malloc.h, edit to suit, and #include it
1203   on the next line, as well as in programs that use this malloc.
1204   ========================================================================
1205 */
1206 
1207 /* #include "malloc.h" */
1208 
1209 /*------------------------------ internal #includes ---------------------- */
1210 
1211 #ifdef WIN32
1212 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1213 #endif /* WIN32 */
1214 
1215 #include <stdio.h>       /* for printing in malloc_stats */
1216 
1217 #ifndef LACKS_ERRNO_H
1218 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
1219 #endif /* LACKS_ERRNO_H */
1220 #if FOOTERS
1221 #include <time.h>        /* for magic initialization */
1222 #endif /* FOOTERS */
1223 #ifndef LACKS_STDLIB_H
1224 #include <stdlib.h>      /* for abort() */
1225 #endif /* LACKS_STDLIB_H */
1226 #ifdef DL_DEBUG
1227 #if ABORT_ON_ASSERT_FAILURE
1228 #define dl_assert(x) if(!(x)) ABORT
1229 #else /* ABORT_ON_ASSERT_FAILURE */
1230 #include <assert.h>
1231 #endif /* ABORT_ON_ASSERT_FAILURE */
1232 #else  /* DL_DEBUG */
1233 #define dl_assert(x) assert(x)
1234 #include <assert.h>
1235 #endif /* DL_DEBUG */
1236 #ifndef LACKS_STRING_H
1237 #include <string.h>      /* for memset etc */
1238 #endif  /* LACKS_STRING_H */
1239 #if USE_BUILTIN_FFS
1240 #ifndef LACKS_STRINGS_H
1241 #include <strings.h>     /* for ffs */
1242 #endif /* LACKS_STRINGS_H */
1243 #endif /* USE_BUILTIN_FFS */
1244 #if DL_HAVE_MMAP
1245 #ifndef LACKS_SYS_MMAN_H
1246 #include <sys/mman.h>    /* for mmap */
1247 #endif /* LACKS_SYS_MMAN_H */
1248 #ifndef LACKS_FCNTL_H
1249 #include <fcntl.h>
1250 #endif /* LACKS_FCNTL_H */
1251 #endif /* DL_HAVE_MMAP */
1252 #if HAVE_MORECORE
1253 #ifndef LACKS_UNISTD_H
1254 #include <unistd.h>     /* for sbrk */
1255 #else /* LACKS_UNISTD_H */
1256 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1257 extern void*     sbrk(ptrdiff_t);
1258 #endif /* FreeBSD etc */
1259 #endif /* LACKS_UNISTD_H */
1260 #endif /* DL_HAVE_MMAP */
1261 
1262 /* Declarations for locking */
1263 #if USE_LOCKS
1264 #ifndef WIN32
1265 #include <pthread.h>
1266 #if defined (__SVR4) && defined (__sun)  /* solaris */
1267 #include <thread.h>
1268 #endif /* solaris */
1269 #else
1270 #ifndef _M_AMD64
1271 /* These are already defined on AMD64 builds */
1272 #ifdef __cplusplus
1273 extern "C" {
1274 #endif /* __cplusplus */
1275 LONG __cdecl _InterlockedCompareExchange(LPLONG volatile Dest, LONG Exchange, LONG Comp);
1276 LONG __cdecl _InterlockedExchange(LPLONG volatile Target, LONG Value);
1277 #ifdef __cplusplus
1278 }
1279 #endif /* __cplusplus */
1280 #endif /* _M_AMD64 */
1281 #pragma intrinsic (_InterlockedCompareExchange)
1282 #pragma intrinsic (_InterlockedExchange)
1283 #define interlockedcompareexchange _InterlockedCompareExchange
1284 #define interlockedexchange _InterlockedExchange
1285 #endif /* Win32 */
1286 #endif /* USE_LOCKS */
1287 
1288 /* Declarations for bit scanning on win32 */
1289 #if defined(_MSC_VER) && _MSC_VER>=1300
1290 #ifndef BitScanForward  /* Try to avoid pulling in WinNT.h */
1291 #ifdef __cplusplus
1292 extern "C" {
1293 #endif /* __cplusplus */
1294 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
1295 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
1296 #ifdef __cplusplus
1297 }
1298 #endif /* __cplusplus */
1299 
1300 #define BitScanForward _BitScanForward
1301 #define BitScanReverse _BitScanReverse
1302 #pragma intrinsic(_BitScanForward)
1303 #pragma intrinsic(_BitScanReverse)
1304 #endif /* BitScanForward */
1305 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
1306 
1307 #ifndef WIN32
1308 #ifndef malloc_getpagesize
1309 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
1310 #    ifndef _SC_PAGE_SIZE
1311 #      define _SC_PAGE_SIZE _SC_PAGESIZE
1312 #    endif
1313 #  endif
1314 #  ifdef _SC_PAGE_SIZE
1315 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1316 #  else
1317 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1318        extern size_t getpagesize();
1319 #      define malloc_getpagesize getpagesize()
1320 #    else
1321 #      ifdef WIN32 /* use supplied emulation of getpagesize */
1322 #        define malloc_getpagesize getpagesize()
1323 #      else
1324 #        ifndef LACKS_SYS_PARAM_H
1325 #          include <sys/param.h>
1326 #        endif
1327 #        ifdef EXEC_PAGESIZE
1328 #          define malloc_getpagesize EXEC_PAGESIZE
1329 #        else
1330 #          ifdef NBPG
1331 #            ifndef CLSIZE
1332 #              define malloc_getpagesize NBPG
1333 #            else
1334 #              define malloc_getpagesize (NBPG * CLSIZE)
1335 #            endif
1336 #          else
1337 #            ifdef NBPC
1338 #              define malloc_getpagesize NBPC
1339 #            else
1340 #              ifdef PAGESIZE
1341 #                define malloc_getpagesize PAGESIZE
1342 #              else /* just guess */
1343 #                define malloc_getpagesize ((size_t)4096U)
1344 #              endif
1345 #            endif
1346 #          endif
1347 #        endif
1348 #      endif
1349 #    endif
1350 #  endif
1351 #endif
1352 #endif
1353 
1354 
1355 
1356 /* ------------------- size_t and alignment properties -------------------- */
1357 
1358 /* The byte and bit size of a size_t */
1359 #define SIZE_T_SIZE         (sizeof(size_t))
1360 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
1361 
1362 /* Some constants coerced to size_t */
1363 /* Annoying but necessary to avoid errors on some platforms */
1364 #define SIZE_T_ZERO         ((size_t)0)
1365 #define SIZE_T_ONE          ((size_t)1)
1366 #define SIZE_T_TWO          ((size_t)2)
1367 #define SIZE_T_FOUR         ((size_t)4)
1368 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
1369 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
1370 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1371 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
1372 
1373 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
1374 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
1375 
1376 /* True if address a has acceptable alignment */
1377 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1378 
1379 /* the number of bytes to offset an address to align it */
1380 #define align_offset(A)\
1381  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1382   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1383 
1384 /* -------------------------- MMAP preliminaries ------------------------- */
1385 
1386 /*
1387    If HAVE_MORECORE or DL_HAVE_MMAP are false, we just define calls and
1388    checks to fail so compiler optimizer can delete code rather than
1389    using so many "#if"s.
1390 */
1391 
1392 
1393 /* MORECORE and MMAP must return MFAIL on failure */
1394 #define MFAIL                ((void*)(MAX_SIZE_T))
1395 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
1396 
1397 #if !DL_HAVE_MMAP
1398 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
1399 #define USE_MMAP_BIT         (SIZE_T_ZERO)
1400 #define CALL_MMAP(s)         MFAIL
1401 #define CALL_MUNMAP(a, s)    (-1)
1402 #define DIRECT_MMAP(s)       MFAIL
1403 
1404 #else /* DL_HAVE_MMAP */
1405 #define IS_MMAPPED_BIT       (SIZE_T_ONE)
1406 #define USE_MMAP_BIT         (SIZE_T_ONE)
1407 
1408 #ifndef WIN32
1409 #define CALL_MUNMAP(a, s)    munmap((a), (s))
1410 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
1411 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1412 #define MAP_ANONYMOUS        MAP_ANON
1413 #endif /* MAP_ANON */
1414 #ifdef MAP_ANONYMOUS
1415 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
1416 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1417 #else /* MAP_ANONYMOUS */
1418 /*
1419    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1420    is unlikely to be needed, but is supplied just in case.
1421 */
1422 #define MMAP_FLAGS           (MAP_PRIVATE)
1423 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1424 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1425            (dev_zero_fd = open("/dev/zero", O_RDWR), \
1426             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1427             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1428 #endif /* MAP_ANONYMOUS */
1429 
1430 #define DIRECT_MMAP(s)       CALL_MMAP(s)
1431 #else /* WIN32 */
1432 
1433 /* Win32 MMAP via VirtualAlloc */
1434 static FORCEINLINE void* win32mmap(size_t size) {
1435   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1436   return (ptr != 0)? ptr: MFAIL;
1437 }
1438 
1439 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1440 static FORCEINLINE void* win32direct_mmap(size_t size) {
1441   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1442                            PAGE_READWRITE);
1443   return (ptr != 0)? ptr: MFAIL;
1444 }
1445 
1446 /* This function supports releasing coalesed segments */
1447 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
1448   MEMORY_BASIC_INFORMATION minfo;
1449   char* cptr = (char*)ptr;
1450   while (size) {
1451     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1452       return -1;
1453     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1454         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1455       return -1;
1456     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1457       return -1;
1458     cptr += minfo.RegionSize;
1459     size -= minfo.RegionSize;
1460   }
1461   return 0;
1462 }
1463 
1464 #define CALL_MMAP(s)         win32mmap(s)
1465 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
1466 #define DIRECT_MMAP(s)       win32direct_mmap(s)
1467 #endif /* WIN32 */
1468 #endif /* DL_HAVE_MMAP */
1469 
1470 #if DL_HAVE_MMAP && DL_HAVE_MREMAP
1471 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1472 #else  /* DL_HAVE_MMAP && DL_HAVE_MREMAP */
1473 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)(addr),(void)(osz), \
1474                                          (void)(nsz), (void)(mv),MFAIL)
1475 #endif /* DL_HAVE_MMAP && DL_HAVE_MREMAP */
1476 
1477 #if HAVE_MORECORE
1478 #define CALL_MORECORE(S)     MORECORE(S)
1479 #else  /* HAVE_MORECORE */
1480 #define CALL_MORECORE(S)     MFAIL
1481 #endif /* HAVE_MORECORE */
1482 
1483 /* mstate bit set if continguous morecore disabled or failed */
1484 #define USE_NONCONTIGUOUS_BIT (4U)
1485 
1486 /* segment bit set in create_mspace_with_base */
1487 #define EXTERN_BIT            (8U)
1488 
1489 
1490 /* --------------------------- Lock preliminaries ------------------------ */
1491 
1492 /*
1493   When locks are defined, there are up to two global locks:
1494 
1495   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
1496     MORECORE.  In many cases sys_alloc requires two calls, that should
1497     not be interleaved with calls by other threads.  This does not
1498     protect against direct calls to MORECORE by other threads not
1499     using this lock, so there is still code to cope the best we can on
1500     interference.
1501 
1502   * magic_init_mutex ensures that mparams.magic and other
1503     unique mparams values are initialized only once.
1504 
1505    To enable use in layered extensions, locks are reentrant.
1506 
1507    Because lock-protected regions generally have bounded times, we use
1508    the supplied simple spinlocks in the custom versions for x86.
1509 
1510    If USE_LOCKS is > 1, the definitions of lock routines here are
1511    bypassed, in which case you will need to define at least
1512    INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK, and
1513    NULL_LOCK_INITIALIZER, and possibly TRY_LOCK and IS_LOCKED
1514    (The latter two are not used in this malloc, but are
1515    commonly needed in extensions.)
1516 */
1517 
1518 #if USE_LOCKS == 1
1519 
1520 #if USE_SPIN_LOCKS
1521 #ifndef WIN32
1522 /* Custom pthread-style spin locks on x86 and x64 for gcc */
1523 struct pthread_mlock_t
1524 {
1525   volatile pthread_t threadid;
1526   volatile unsigned int c;
1527   volatile unsigned int l;
1528 };
1529 #define MLOCK_T struct pthread_mlock_t
1530 #define CURRENT_THREAD        pthread_self()
1531 #define SPINS_PER_YIELD       63
1532 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
1533   if(CURRENT_THREAD==sl->threadid)
1534     ++sl->c;
1535   else {
1536     int spins = 0;
1537     for (;;) {
1538       int ret;
1539       __asm__ __volatile__ ("lock cmpxchgl %2,(%1)" : "=a" (ret) : "r" (&sl->l), "r" (1), "a" (0));
1540       if(!ret) {
1541         dl_assert(!sl->threadid);
1542         sl->threadid=CURRENT_THREAD;
1543         sl->c=1;
1544         break;
1545       }
1546       if ((++spins & SPINS_PER_YIELD) == 0) {
1547 #if defined (__SVR4) && defined (__sun) /* solaris */
1548         thr_yield();
1549 #else
1550 #ifdef linux
1551         sched_yield();
1552 #else  /* no-op yield on unknown systems */
1553         ;
1554 #endif /* linux */
1555 #endif /* solaris */
1556       }
1557     }
1558   }
1559 
1560   return 0;
1561 }
1562 
1563 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
1564   int ret;
1565   dl_assert(CURRENT_THREAD==sl->threadid);
1566   if (!--sl->c) {
1567     sl->threadid=0;
1568     __asm__ __volatile__ ("xchgl %2,(%1)" : "=r" (ret) : "r" (&sl->l), "0" (0));
1569   }
1570 }
1571 
1572 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
1573   int ret;
1574   __asm__ __volatile__ ("lock cmpxchgl %2,(%1)" : "=a" (ret) : "r" (&sl->l), "r" (1), "a" (0));
1575   if(!ret){
1576     dl_assert(!sl->threadid);
1577     sl->threadid=CURRENT_THREAD;
1578     sl->c=1;
1579     return 1;
1580   }
1581   return 0;
1582 }
1583 
1584 #define INITIAL_LOCK(sl)      (memset((sl), 0, sizeof(MLOCK_T)), 0)
1585 #define ACQUIRE_LOCK(sl)      pthread_acquire_lock(sl)
1586 #define RELEASE_LOCK(sl)      pthread_release_lock(sl)
1587 #define TRY_LOCK(sl)          pthread_try_lock(sl)
1588 #define IS_LOCKED(sl)         ((sl)->l)
1589 
1590 static MLOCK_T magic_init_mutex = {0, 0, 0 };
1591 #if HAVE_MORECORE
1592 static MLOCK_T morecore_mutex = {0, 0, 0 };
1593 #endif /* HAVE_MORECORE */
1594 
1595 #else /* WIN32 */
1596 /* Custom win32-style spin locks on x86 and x64 for MSC */
1597 struct win32_mlock_t
1598 {
1599   volatile long threadid;
1600   volatile unsigned int c;
1601   long l;
1602 };
1603 #define MLOCK_T struct win32_mlock_t
1604 #define CURRENT_THREAD        GetCurrentThreadId()
1605 #define SPINS_PER_YIELD    63
1606 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
1607   long mythreadid=CURRENT_THREAD;
1608   if(mythreadid==sl->threadid)
1609     ++sl->c;
1610   else {
1611     int spins = 0;
1612     for (;;) {
1613       if (!interlockedexchange(&sl->l, 1)) {
1614         dl_assert(!sl->threadid);
1615         sl->threadid=mythreadid;
1616         sl->c=1;
1617         break;
1618       }
1619       if ((++spins & SPINS_PER_YIELD) == 0)
1620         SleepEx(0, FALSE);
1621     }
1622   }
1623   return 0;
1624 }
1625 
1626 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
1627   dl_assert(CURRENT_THREAD==sl->threadid);
1628   if (!--sl->c) {
1629     sl->threadid=0;
1630     interlockedexchange (&sl->l, 0);
1631   }
1632 }
1633 
1634 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
1635   if (!interlockedexchange(&sl->l, 1)){
1636     dl_assert(!sl->threadid);
1637     sl->threadid=CURRENT_THREAD;
1638     sl->c=1;
1639     return 1;
1640   }
1641   return 0;
1642 }
1643 
1644 #define INITIAL_LOCK(sl)      (memset(sl, 0, sizeof(MLOCK_T)), 0)
1645 #define ACQUIRE_LOCK(sl)      win32_acquire_lock(sl)
1646 #define RELEASE_LOCK(sl)      win32_release_lock(sl)
1647 #define TRY_LOCK(sl)          win32_try_lock(sl)
1648 #define IS_LOCKED(sl)         ((sl)->l)
1649 
1650 static MLOCK_T magic_init_mutex = {0, 0 };
1651 #if HAVE_MORECORE
1652 static MLOCK_T morecore_mutex = {0, 0 };
1653 #endif /* HAVE_MORECORE */
1654 
1655 #endif /* WIN32 */
1656 #else /* USE_SPIN_LOCKS */
1657 
1658 #ifndef WIN32
1659 /* pthreads-based locks */
1660 struct pthread_mlock_t
1661 {
1662   volatile unsigned int c;
1663   pthread_mutex_t l;
1664 };
1665 #define MLOCK_T struct pthread_mlock_t
1666 #define CURRENT_THREAD        pthread_self()
1667 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
1668   if(!pthread_mutex_lock(&(sl)->l)){
1669     sl->c++;
1670     return 0;
1671   }
1672   return 1;
1673 }
1674 
1675 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
1676   --sl->c;
1677   pthread_mutex_unlock(&(sl)->l);
1678 }
1679 
1680 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
1681   if(!pthread_mutex_trylock(&(sl)->l)){
1682     sl->c++;
1683     return 1;
1684   }
1685   return 0;
1686 }
1687 
1688 static FORCEINLINE int pthread_init_lock (MLOCK_T *sl) {
1689   pthread_mutexattr_t attr;
1690   sl->c=0;
1691   if(pthread_mutexattr_init(&attr)) return 1;
1692   if(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
1693   if(pthread_mutex_init(&sl->l, &attr)) return 1;
1694   pthread_mutexattr_destroy(&attr);
1695   return 0;
1696 }
1697 
1698 static FORCEINLINE int pthread_islocked (MLOCK_T *sl) {
1699   if(!pthread_try_lock(sl)){
1700     int ret = (sl->c != 0);
1701     pthread_mutex_unlock(sl);
1702     return ret;
1703   }
1704   return 0;
1705 }
1706 
1707 #define INITIAL_LOCK(sl)      pthread_init_lock(sl)
1708 #define ACQUIRE_LOCK(sl)      pthread_acquire_lock(sl)
1709 #define RELEASE_LOCK(sl)      pthread_release_lock(sl)
1710 #define TRY_LOCK(sl)          pthread_try_lock(sl)
1711 #define IS_LOCKED(sl)         pthread_islocked(sl)
1712 
1713 static MLOCK_T magic_init_mutex = {0, PTHREAD_MUTEX_INITIALIZER };
1714 #if HAVE_MORECORE
1715 static MLOCK_T morecore_mutex = {0, PTHREAD_MUTEX_INITIALIZER };
1716 #endif /* HAVE_MORECORE */
1717 
1718 #else /* WIN32 */
1719 /* Win32 critical sections */
1720 #define MLOCK_T         CRITICAL_SECTION
1721 #define CURRENT_THREAD  GetCurrentThreadId()
1722 #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 4000)
1723 #define ACQUIRE_LOCK(s) ( (!((s))->DebugInfo ? INITIAL_LOCK((s)) : 0), !EnterCriticalSection((s)), 0)
1724 #define RELEASE_LOCK(s) ( LeaveCriticalSection((s)), 0 )
1725 #define TRY_LOCK(s)     ( TryEnterCriticalSection((s)) )
1726 #define IS_LOCKED(s)    ( (s)->LockCount >= 0 )
1727 #define NULL_LOCK_INITIALIZER
1728 static MLOCK_T magic_init_mutex;
1729 #if HAVE_MORECORE
1730 static MLOCK_T morecore_mutex;
1731 #endif /* HAVE_MORECORE */
1732 #endif /* WIN32 */
1733 #endif /* USE_SPIN_LOCKS */
1734 #endif /* USE_LOCKS == 1 */
1735 
1736 /* -----------------------  User-defined locks ------------------------ */
1737 
1738 #if USE_LOCKS > 1
1739 /* Define your own lock implementation here */
1740 /* #define INITIAL_LOCK(sl)  ... */
1741 /* #define ACQUIRE_LOCK(sl)  ... */
1742 /* #define RELEASE_LOCK(sl)  ... */
1743 /* #define TRY_LOCK(sl) ... */
1744 /* #define IS_LOCKED(sl) ... */
1745 /* #define NULL_LOCK_INITIALIZER ... */
1746 
1747 static MLOCK_T magic_init_mutex = NULL_LOCK_INITIALIZER;
1748 #if HAVE_MORECORE
1749 static MLOCK_T morecore_mutex = NULL_LOCK_INITIALIZER;
1750 #endif /* HAVE_MORECORE */
1751 #endif /* USE_LOCKS > 1 */
1752 
1753 /* -----------------------  Lock-based state ------------------------ */
1754 
1755 
1756 #if USE_LOCKS
1757 #define USE_LOCK_BIT               (2U)
1758 #else  /* USE_LOCKS */
1759 #define USE_LOCK_BIT               (0U)
1760 #define INITIAL_LOCK(l)
1761 #endif /* USE_LOCKS */
1762 
1763 #if USE_LOCKS && HAVE_MORECORE
1764 #define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
1765 #define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
1766 #else /* USE_LOCKS && HAVE_MORECORE */
1767 #define ACQUIRE_MORECORE_LOCK()
1768 #define RELEASE_MORECORE_LOCK()
1769 #endif /* USE_LOCKS && HAVE_MORECORE */
1770 
1771 #if USE_LOCKS
1772 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
1773 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
1774 #else  /* USE_LOCKS */
1775 #define ACQUIRE_MAGIC_INIT_LOCK()
1776 #define RELEASE_MAGIC_INIT_LOCK()
1777 #endif /* USE_LOCKS */
1778 
1779 
1780 /* -----------------------  Chunk representations ------------------------ */
1781 
1782 /*
1783   (The following includes lightly edited explanations by Colin Plumb.)
1784 
1785   The malloc_chunk declaration below is misleading (but accurate and
1786   necessary).  It declares a "view" into memory allowing access to
1787   necessary fields at known offsets from a given base.
1788 
1789   Chunks of memory are maintained using a `boundary tag' method as
1790   originally described by Knuth.  (See the paper by Paul Wilson
1791   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
1792   techniques.)  Sizes of free chunks are stored both in the front of
1793   each chunk and at the end.  This makes consolidating fragmented
1794   chunks into bigger chunks fast.  The head fields also hold bits
1795   representing whether chunks are free or in use.
1796 
1797   Here are some pictures to make it clearer.  They are "exploded" to
1798   show that the state of a chunk can be thought of as extending from
1799   the high 31 bits of the head field of its header through the
1800   prev_foot and PINUSE_BIT bit of the following chunk header.
1801 
1802   A chunk that's in use looks like:
1803 
1804    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1805            | Size of previous chunk (if P = 1)                             |
1806            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1807          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1808          | Size of this chunk                                         1| +-+
1809    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1810          |                                                               |
1811          +-                                                             -+
1812          |                                                               |
1813          +-                                                             -+
1814          |                                                               :
1815          +-      size - sizeof(size_t) available payload bytes          -+
1816          :                                                               |
1817  chunk-> +-                                                             -+
1818          |                                                               |
1819          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1820        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
1821        | Size of next chunk (may or may not be in use)               | +-+
1822  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1823 
1824     And if it's free, it looks like this:
1825 
1826    chunk-> +-                                                             -+
1827            | User payload (must be in use, or we would have merged!)       |
1828            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1829          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1830          | Size of this chunk                                         0| +-+
1831    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1832          | Next pointer                                                  |
1833          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1834          | Prev pointer                                                  |
1835          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1836          |                                                               :
1837          +-      size - sizeof(struct chunk) unused bytes               -+
1838          :                                                               |
1839  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1840          | Size of this chunk                                            |
1841          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1842        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
1843        | Size of next chunk (must be in use, or we would have merged)| +-+
1844  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1845        |                                                               :
1846        +- User payload                                                -+
1847        :                                                               |
1848        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1849                                                                      |0|
1850                                                                      +-+
1851   Note that since we always merge adjacent free chunks, the chunks
1852   adjacent to a free chunk must be in use.
1853 
1854   Given a pointer to a chunk (which can be derived trivially from the
1855   payload pointer) we can, in O(1) time, find out whether the adjacent
1856   chunks are free, and if so, unlink them from the lists that they
1857   are on and merge them with the current chunk.
1858 
1859   Chunks always begin on even word boundaries, so the mem portion
1860   (which is returned to the user) is also on an even word boundary, and
1861   thus at least double-word aligned.
1862 
1863   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
1864   chunk size (which is always a multiple of two words), is an in-use
1865   bit for the *previous* chunk.  If that bit is *clear*, then the
1866   word before the current chunk size contains the previous chunk
1867   size, and can be used to find the front of the previous chunk.
1868   The very first chunk allocated always has this bit set, preventing
1869   access to non-existent (or non-owned) memory. If pinuse is set for
1870   any given chunk, then you CANNOT determine the size of the
1871   previous chunk, and might even get a memory addressing fault when
1872   trying to do so.
1873 
1874   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
1875   the chunk size redundantly records whether the current chunk is
1876   inuse. This redundancy enables usage checks within free and realloc,
1877   and reduces indirection when freeing and consolidating chunks.
1878 
1879   Each freshly allocated chunk must have both cinuse and pinuse set.
1880   That is, each allocated chunk borders either a previously allocated
1881   and still in-use chunk, or the base of its memory arena. This is
1882   ensured by making all allocations from the the `lowest' part of any
1883   found chunk.  Further, no free chunk physically borders another one,
1884   so each free chunk is known to be preceded and followed by either
1885   inuse chunks or the ends of memory.
1886 
1887   Note that the `foot' of the current chunk is actually represented
1888   as the prev_foot of the NEXT chunk. This makes it easier to
1889   deal with alignments etc but can be very confusing when trying
1890   to extend or adapt this code.
1891 
1892   The exceptions to all this are
1893 
1894      1. The special chunk `top' is the top-most available chunk (i.e.,
1895         the one bordering the end of available memory). It is treated
1896         specially.  Top is never included in any bin, is used only if
1897         no other chunk is available, and is released back to the
1898         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
1899         the top chunk is treated as larger (and thus less well
1900         fitting) than any other available chunk.  The top chunk
1901         doesn't update its trailing size field since there is no next
1902         contiguous chunk that would have to index off it. However,
1903         space is still allocated for it (TOP_FOOT_SIZE) to enable
1904         separation or merging when space is extended.
1905 
1906      3. Chunks allocated via mmap, which have the lowest-order bit
1907         (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
1908         PINUSE_BIT in their head fields.  Because they are allocated
1909         one-by-one, each must carry its own prev_foot field, which is
1910         also used to hold the offset this chunk has within its mmapped
1911         region, which is needed to preserve alignment. Each mmapped
1912         chunk is trailed by the first two fields of a fake next-chunk
1913         for sake of usage checks.
1914 
1915 */
1916 
1917 struct malloc_chunk {
1918   size_t               prev_foot;  /* Size of previous chunk (if free).  */
1919   size_t               head;       /* Size and inuse bits. */
1920   struct malloc_chunk* fd;         /* double links -- used only if free. */
1921   struct malloc_chunk* bk;
1922 };
1923 
1924 typedef struct malloc_chunk  mchunk;
1925 typedef struct malloc_chunk* mchunkptr;
1926 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
1927 typedef unsigned int bindex_t;         /* Described below */
1928 typedef unsigned int binmap_t;         /* Described below */
1929 typedef unsigned int flag_t;           /* The type of various bit flag sets */
1930 
1931 /* ------------------- Chunks sizes and alignments ----------------------- */
1932 
1933 #define MCHUNK_SIZE         (sizeof(mchunk))
1934 
1935 #if FOOTERS
1936 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
1937 #else /* FOOTERS */
1938 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
1939 #endif /* FOOTERS */
1940 
1941 /* MMapped chunks need a second word of overhead ... */
1942 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1943 /* ... and additional padding for fake next-chunk at foot */
1944 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
1945 
1946 /* The smallest size we can malloc is an aligned minimal chunk */
1947 #define MIN_CHUNK_SIZE\
1948   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1949 
1950 /* conversion from malloc headers to user pointers, and back */
1951 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
1952 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1953 /* chunk associated with aligned address A */
1954 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
1955 
1956 /* Bounds on request (not chunk) sizes. */
1957 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
1958 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1959 
1960 /* pad request bytes into a usable size */
1961 #define pad_request(req) \
1962    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1963 
1964 /* pad request, checking for minimum (but not maximum) */
1965 #define request2size(req) \
1966   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1967 
1968 
1969 /* ------------------ Operations on head and foot fields ----------------- */
1970 
1971 /*
1972   The head field of a chunk is or'ed with PINUSE_BIT when previous
1973   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
1974   use. If the chunk was obtained with mmap, the prev_foot field has
1975   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
1976   mmapped region to the base of the chunk.
1977 
1978   FLAG4_BIT is not used by this malloc, but might be useful in extensions.
1979 */
1980 
1981 #define PINUSE_BIT          (SIZE_T_ONE)
1982 #define CINUSE_BIT          (SIZE_T_TWO)
1983 #define FLAG4_BIT           (SIZE_T_FOUR)
1984 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
1985 #define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
1986 
1987 /* Head value for fenceposts */
1988 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
1989 
1990 /* extraction of fields from head words */
1991 #define cinuse(p)           ((p)->head & CINUSE_BIT)
1992 #define pinuse(p)           ((p)->head & PINUSE_BIT)
1993 #define chunksize(p)        ((p)->head & ~(FLAG_BITS))
1994 
1995 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
1996 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
1997 
1998 /* Treat space at ptr +/- offset as a chunk */
1999 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
2000 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2001 
2002 /* Ptr to next or previous physical malloc_chunk. */
2003 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2004 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2005 
2006 /* extract next chunk's pinuse bit */
2007 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
2008 
2009 /* Get/set size at footer */
2010 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2011 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2012 
2013 /* Set size, pinuse bit, and foot */
2014 #define set_size_and_pinuse_of_free_chunk(p, s)\
2015   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2016 
2017 /* Set size, pinuse bit, foot, and clear next pinuse */
2018 #define set_free_with_pinuse(p, s, n)\
2019   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2020 
2021 #define is_mmapped(p)\
2022   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
2023 
2024 /* Get the internal overhead associated with chunk p */
2025 #define overhead_for(p)\
2026  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2027 
2028 /* Return true if malloced space is not necessarily cleared */
2029 #if MMAP_CLEARS
2030 #define calloc_must_clear(p) (!is_mmapped(p))
2031 #else /* MMAP_CLEARS */
2032 #define calloc_must_clear(p) (1)
2033 #endif /* MMAP_CLEARS */
2034 
2035 /* ---------------------- Overlaid data structures ----------------------- */
2036 
2037 /*
2038   When chunks are not in use, they are treated as nodes of either
2039   lists or trees.
2040 
2041   "Small"  chunks are stored in circular doubly-linked lists, and look
2042   like this:
2043 
2044     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2045             |             Size of previous chunk                            |
2046             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2047     `head:' |             Size of chunk, in bytes                         |P|
2048       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2049             |             Forward pointer to next chunk in list             |
2050             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2051             |             Back pointer to previous chunk in list            |
2052             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2053             |             Unused space (may be 0 bytes long)                .
2054             .                                                               .
2055             .                                                               |
2056 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2057     `foot:' |             Size of chunk, in bytes                           |
2058             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2059 
2060   Larger chunks are kept in a form of bitwise digital trees (aka
2061   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
2062   free chunks greater than 256 bytes, their size doesn't impose any
2063   constraints on user chunk sizes.  Each node looks like:
2064 
2065     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2066             |             Size of previous chunk                            |
2067             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2068     `head:' |             Size of chunk, in bytes                         |P|
2069       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2070             |             Forward pointer to next chunk of same size        |
2071             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2072             |             Back pointer to previous chunk of same size       |
2073             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2074             |             Pointer to left child (child[0])                  |
2075             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2076             |             Pointer to right child (child[1])                 |
2077             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2078             |             Pointer to parent                                 |
2079             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2080             |             bin index of this chunk                           |
2081             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2082             |             Unused space                                      .
2083             .                                                               |
2084 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2085     `foot:' |             Size of chunk, in bytes                           |
2086             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2087 
2088   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
2089   of the same size are arranged in a circularly-linked list, with only
2090   the oldest chunk (the next to be used, in our FIFO ordering)
2091   actually in the tree.  (Tree members are distinguished by a non-null
2092   parent pointer.)  If a chunk with the same size an an existing node
2093   is inserted, it is linked off the existing node using pointers that
2094   work in the same way as fd/bk pointers of small chunks.
2095 
2096   Each tree contains a power of 2 sized range of chunk sizes (the
2097   smallest is 0x100 <= x < 0x180), which is is divided in half at each
2098   tree level, with the chunks in the smaller half of the range (0x100
2099   <= x < 0x140 for the top nose) in the left subtree and the larger
2100   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
2101   done by inspecting individual bits.
2102 
2103   Using these rules, each node's left subtree contains all smaller
2104   sizes than its right subtree.  However, the node at the root of each
2105   subtree has no particular ordering relationship to either.  (The
2106   dividing line between the subtree sizes is based on trie relation.)
2107   If we remove the last chunk of a given size from the interior of the
2108   tree, we need to replace it with a leaf node.  The tree ordering
2109   rules permit a node to be replaced by any leaf below it.
2110 
2111   The smallest chunk in a tree (a common operation in a best-fit
2112   allocator) can be found by walking a path to the leftmost leaf in
2113   the tree.  Unlike a usual binary tree, where we follow left child
2114   pointers until we reach a null, here we follow the right child
2115   pointer any time the left one is null, until we reach a leaf with
2116   both child pointers null. The smallest chunk in the tree will be
2117   somewhere along that path.
2118 
2119   The worst case number of steps to add, find, or remove a node is
2120   bounded by the number of bits differentiating chunks within
2121   bins. Under current bin calculations, this ranges from 6 up to 21
2122   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
2123   is of course much better.
2124 */
2125 
2126 struct malloc_tree_chunk {
2127   /* The first four fields must be compatible with malloc_chunk */
2128   size_t                    prev_foot;
2129   size_t                    head;
2130   struct malloc_tree_chunk* fd;
2131   struct malloc_tree_chunk* bk;
2132 
2133   struct malloc_tree_chunk* child[2];
2134   struct malloc_tree_chunk* parent;
2135   bindex_t                  index;
2136 };
2137 
2138 typedef struct malloc_tree_chunk  tchunk;
2139 typedef struct malloc_tree_chunk* tchunkptr;
2140 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
2141 
2142 /* A little helper macro for trees */
2143 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2144 
2145 /* ----------------------------- Segments -------------------------------- */
2146 
2147 /*
2148   Each malloc space may include non-contiguous segments, held in a
2149   list headed by an embedded malloc_segment record representing the
2150   top-most space. Segments also include flags holding properties of
2151   the space. Large chunks that are directly allocated by mmap are not
2152   included in this list. They are instead independently created and
2153   destroyed without otherwise keeping track of them.
2154 
2155   Segment management mainly comes into play for spaces allocated by
2156   MMAP.  Any call to MMAP might or might not return memory that is
2157   adjacent to an existing segment.  MORECORE normally contiguously
2158   extends the current space, so this space is almost always adjacent,
2159   which is simpler and faster to deal with. (This is why MORECORE is
2160   used preferentially to MMAP when both are available -- see
2161   sys_alloc.)  When allocating using MMAP, we don't use any of the
2162   hinting mechanisms (inconsistently) supported in various
2163   implementations of unix mmap, or distinguish reserving from
2164   committing memory. Instead, we just ask for space, and exploit
2165   contiguity when we get it.  It is probably possible to do
2166   better than this on some systems, but no general scheme seems
2167   to be significantly better.
2168 
2169   Management entails a simpler variant of the consolidation scheme
2170   used for chunks to reduce fragmentation -- new adjacent memory is
2171   normally prepended or appended to an existing segment. However,
2172   there are limitations compared to chunk consolidation that mostly
2173   reflect the fact that segment processing is relatively infrequent
2174   (occurring only when getting memory from system) and that we
2175   don't expect to have huge numbers of segments:
2176 
2177   * Segments are not indexed, so traversal requires linear scans.  (It
2178     would be possible to index these, but is not worth the extra
2179     overhead and complexity for most programs on most platforms.)
2180   * New segments are only appended to old ones when holding top-most
2181     memory; if they cannot be prepended to others, they are held in
2182     different segments.
2183 
2184   Except for the top-most segment of an mstate, each segment record
2185   is kept at the tail of its segment. Segments are added by pushing
2186   segment records onto the list headed by &mstate.seg for the
2187   containing mstate.
2188 
2189   Segment flags control allocation/merge/deallocation policies:
2190   * If EXTERN_BIT set, then we did not allocate this segment,
2191     and so should not try to deallocate or merge with others.
2192     (This currently holds only for the initial segment passed
2193     into create_mspace_with_base.)
2194   * If IS_MMAPPED_BIT set, the segment may be merged with
2195     other surrounding mmapped segments and trimmed/de-allocated
2196     using munmap.
2197   * If neither bit is set, then the segment was obtained using
2198     MORECORE so can be merged with surrounding MORECORE'd segments
2199     and deallocated/trimmed using MORECORE with negative arguments.
2200 */
2201 
2202 struct malloc_segment {
2203   char*        base;             /* base address */
2204   size_t       size;             /* allocated size */
2205   struct malloc_segment* next;   /* ptr to next segment */
2206   flag_t       sflags;           /* mmap and extern flag */
2207 };
2208 
2209 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
2210 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
2211 
2212 typedef struct malloc_segment  msegment;
2213 typedef struct malloc_segment* msegmentptr;
2214 
2215 /* ---------------------------- malloc_state ----------------------------- */
2216 
2217 /*
2218    A malloc_state holds all of the bookkeeping for a space.
2219    The main fields are:
2220 
2221   Top
2222     The topmost chunk of the currently active segment. Its size is
2223     cached in topsize.  The actual size of topmost space is
2224     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
2225     fenceposts and segment records if necessary when getting more
2226     space from the system.  The size at which to autotrim top is
2227     cached from mparams in trim_check, except that it is disabled if
2228     an autotrim fails.
2229 
2230   Designated victim (dv)
2231     This is the preferred chunk for servicing small requests that
2232     don't have exact fits.  It is normally the chunk split off most
2233     recently to service another small request.  Its size is cached in
2234     dvsize. The link fields of this chunk are not maintained since it
2235     is not kept in a bin.
2236 
2237   SmallBins
2238     An array of bin headers for free chunks.  These bins hold chunks
2239     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
2240     chunks of all the same size, spaced 8 bytes apart.  To simplify
2241     use in double-linked lists, each bin header acts as a malloc_chunk
2242     pointing to the real first node, if it exists (else pointing to
2243     itself).  This avoids special-casing for headers.  But to avoid
2244     waste, we allocate only the fd/bk pointers of bins, and then use
2245     repositioning tricks to treat these as the fields of a chunk.
2246 
2247   TreeBins
2248     Treebins are pointers to the roots of trees holding a range of
2249     sizes. There are 2 equally spaced treebins for each power of two
2250     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2251     larger.
2252 
2253   Bin maps
2254     There is one bit map for small bins ("smallmap") and one for
2255     treebins ("treemap).  Each bin sets its bit when non-empty, and
2256     clears the bit when empty.  Bit operations are then used to avoid
2257     bin-by-bin searching -- nearly all "search" is done without ever
2258     looking at bins that won't be selected.  The bit maps
2259     conservatively use 32 bits per map word, even if on 64bit system.
2260     For a good description of some of the bit-based techniques used
2261     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2262     supplement at http://hackersdelight.org/). Many of these are
2263     intended to reduce the branchiness of paths through malloc etc, as
2264     well as to reduce the number of memory locations read or written.
2265 
2266   Segments
2267     A list of segments headed by an embedded malloc_segment record
2268     representing the initial space.
2269 
2270   Address check support
2271     The least_addr field is the least address ever obtained from
2272     MORECORE or MMAP. Attempted frees and reallocs of any address less
2273     than this are trapped (unless INSECURE is defined).
2274 
2275   Magic tag
2276     A cross-check field that should always hold same value as mparams.magic.
2277 
2278   Flags
2279     Bits recording whether to use MMAP, locks, or contiguous MORECORE
2280 
2281   Statistics
2282     Each space keeps track of current and maximum system memory
2283     obtained via MORECORE or MMAP.
2284 
2285   Trim support
2286     Fields holding the amount of unused topmost memory that should trigger
2287     timming, and a counter to force periodic scanning to release unused
2288     non-topmost segments.
2289 
2290   Locking
2291     If USE_LOCKS is defined, the "mutex" lock is acquired and released
2292     around every public call using this mspace.
2293 
2294   Extension support
2295     A void* pointer and a size_t field that can be used to help implement
2296     extensions to this malloc.
2297 */
2298 
2299 /* Bin types, widths and sizes */
2300 #define NSMALLBINS        (32U)
2301 #define NTREEBINS         (32U)
2302 #define SMALLBIN_SHIFT    (3U)
2303 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
2304 #define TREEBIN_SHIFT     (8U)
2305 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
2306 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
2307 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2308 
2309 struct malloc_state {
2310   binmap_t   smallmap;
2311   binmap_t   treemap;
2312   size_t     dvsize;
2313   size_t     topsize;
2314   char*      least_addr;
2315   mchunkptr  dv;
2316   mchunkptr  top;
2317   size_t     trim_check;
2318   size_t     release_checks;
2319   size_t     magic;
2320   mchunkptr  smallbins[(NSMALLBINS+1)*2];
2321   tbinptr    treebins[NTREEBINS];
2322   size_t     footprint;
2323   size_t     max_footprint;
2324   flag_t     mflags;
2325 #if USE_LOCKS
2326   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
2327 #endif /* USE_LOCKS */
2328   msegment   seg;
2329   void*      extp;      /* Unused but available for extensions */
2330   size_t     exts;
2331 };
2332 
2333 typedef struct malloc_state*    mstate;
2334 
2335 /* ------------- Global malloc_state and malloc_params ------------------- */
2336 
2337 /*
2338   malloc_params holds global properties, including those that can be
2339   dynamically set using mallopt. There is a single instance, mparams,
2340   initialized in init_mparams.
2341 */
2342 
2343 struct malloc_params {
2344   size_t magic;
2345   size_t page_size;
2346   size_t granularity;
2347   size_t mmap_threshold;
2348   size_t trim_threshold;
2349   flag_t default_mflags;
2350 };
2351 
2352 static struct malloc_params mparams;
2353 
2354 #if !ONLY_MSPACES
2355 
2356 /* The global malloc_state used for all non-"mspace" calls */
2357 static struct malloc_state _gm_;
2358 #define gm                 (&_gm_)
2359 #define is_global(M)       ((M) == &_gm_)
2360 
2361 #endif /* !ONLY_MSPACES */
2362 
2363 #define is_initialized(M)  ((M)->top != 0)
2364 
2365 /* -------------------------- system alloc setup ------------------------- */
2366 
2367 /* Operations on mflags */
2368 
2369 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
2370 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
2371 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
2372 
2373 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
2374 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
2375 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
2376 
2377 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
2378 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
2379 
2380 #define set_lock(M,L)\
2381  ((M)->mflags = (L)?\
2382   ((M)->mflags | USE_LOCK_BIT) :\
2383   ((M)->mflags & ~USE_LOCK_BIT))
2384 
2385 /* page-align a size */
2386 #define page_align(S)\
2387  (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2388 
2389 /* granularity-align a size */
2390 #define granularity_align(S)\
2391   (((S) + (mparams.granularity - SIZE_T_ONE))\
2392    & ~(mparams.granularity - SIZE_T_ONE))
2393 
2394 
2395 /* For mmap, use granularity alignment on windows, else page-align */
2396 #ifdef WIN32
2397 #define mmap_align(S) granularity_align(S)
2398 #else
2399 #define mmap_align(S) page_align(S)
2400 #endif
2401 
2402 #define is_page_aligned(S)\
2403    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2404 #define is_granularity_aligned(S)\
2405    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2406 
2407 /*  True if segment S holds address A */
2408 #define segment_holds(S, A)\
2409   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2410 
2411 /* Return segment holding given address */
2412 static msegmentptr segment_holding(mstate m, char* addr) {
2413   msegmentptr sp = &m->seg;
2414   for (;;) {
2415     if (addr >= sp->base && addr < sp->base + sp->size)
2416       return sp;
2417     if ((sp = sp->next) == 0)
2418       return 0;
2419   }
2420 }
2421 
2422 /* Return true if segment contains a segment link */
2423 static int has_segment_link(mstate m, msegmentptr ss) {
2424   msegmentptr sp = &m->seg;
2425   for (;;) {
2426     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
2427       return 1;
2428     if ((sp = sp->next) == 0)
2429       return 0;
2430   }
2431 }
2432 
2433 #ifndef MORECORE_CANNOT_TRIM
2434 #define should_trim(M,s)  ((s) > (M)->trim_check)
2435 #else  /* MORECORE_CANNOT_TRIM */
2436 #define should_trim(M,s)  (0)
2437 #endif /* MORECORE_CANNOT_TRIM */
2438 
2439 /*
2440   TOP_FOOT_SIZE is padding at the end of a segment, including space
2441   that may be needed to place segment records and fenceposts when new
2442   noncontiguous segments are added.
2443 */
2444 #define TOP_FOOT_SIZE\
2445   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2446 
2447 
2448 /* -------------------------------  Hooks -------------------------------- */
2449 
2450 /*
2451   PREACTION should be defined to return 0 on success, and nonzero on
2452   failure. If you are not using locking, you can redefine these to do
2453   anything you like.
2454 */
2455 
2456 #if USE_LOCKS
2457 
2458 /* Ensure locks are initialized */
2459 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2460 
2461 #define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2462 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2463 #else /* USE_LOCKS */
2464 
2465 #ifndef PREACTION
2466 #define PREACTION(M) (0)
2467 #endif  /* PREACTION */
2468 
2469 #ifndef POSTACTION
2470 #define POSTACTION(M)
2471 #endif  /* POSTACTION */
2472 
2473 #endif /* USE_LOCKS */
2474 
2475 /*
2476   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2477   USAGE_ERROR_ACTION is triggered on detected bad frees and
2478   reallocs. The argument p is an address that might have triggered the
2479   fault. It is ignored by the two predefined actions, but might be
2480   useful in custom actions that try to help diagnose errors.
2481 */
2482 
2483 #if PROCEED_ON_ERROR
2484 
2485 /* A count of the number of corruption errors causing resets */
2486 int malloc_corruption_error_count;
2487 
2488 /* default corruption action */
2489 static void reset_on_error(mstate m);
2490 
2491 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
2492 #define USAGE_ERROR_ACTION(m, p)
2493 
2494 #else /* PROCEED_ON_ERROR */
2495 
2496 #ifndef CORRUPTION_ERROR_ACTION
2497 #define CORRUPTION_ERROR_ACTION(m) ABORT
2498 #endif /* CORRUPTION_ERROR_ACTION */
2499 
2500 #ifndef USAGE_ERROR_ACTION
2501 #define USAGE_ERROR_ACTION(m,p) ABORT
2502 #endif /* USAGE_ERROR_ACTION */
2503 
2504 #endif /* PROCEED_ON_ERROR */
2505 
2506 /* -------------------------- Debugging setup ---------------------------- */
2507 
2508 #if ! DL_DEBUG
2509 
2510 #define check_free_chunk(M,P)
2511 #define check_inuse_chunk(M,P)
2512 #define check_malloced_chunk(M,P,N)
2513 #define check_mmapped_chunk(M,P)
2514 #define check_malloc_state(M)
2515 #define check_top_chunk(M,P)
2516 
2517 #else /* DL_DEBUG */
2518 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
2519 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
2520 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
2521 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2522 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
2523 #define check_malloc_state(M)       do_check_malloc_state(M)
2524 
2525 static void   do_check_any_chunk(mstate m, mchunkptr p);
2526 static void   do_check_top_chunk(mstate m, mchunkptr p);
2527 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
2528 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
2529 static void   do_check_free_chunk(mstate m, mchunkptr p);
2530 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
2531 static void   do_check_tree(mstate m, tchunkptr t);
2532 static void   do_check_treebin(mstate m, bindex_t i);
2533 static void   do_check_smallbin(mstate m, bindex_t i);
2534 static void   do_check_malloc_state(mstate m);
2535 static int    bin_find(mstate m, mchunkptr x);
2536 static size_t traverse_and_check(mstate m);
2537 #endif /* DL_DEBUG */
2538 
2539 /* ---------------------------- Indexing Bins ---------------------------- */
2540 
2541 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2542 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
2543 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
2544 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
2545 
2546 /* addressing by index. See above about smallbin repositioning */
2547 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2548 #define treebin_at(M,i)     (&((M)->treebins[i]))
2549 
2550 /* assign tree index for size S to variable I */
2551 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2552 #define compute_tree_index(S, I)\
2553 {\
2554   unsigned int X = S >> TREEBIN_SHIFT;\
2555   if (X == 0)\
2556     I = 0;\
2557   else if (X > 0xFFFF)\
2558     I = NTREEBINS-1;\
2559   else {\
2560     unsigned int K;\
2561     __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
2562     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2563   }\
2564 }
2565 
2566 #elif defined(_MSC_VER) && _MSC_VER>=1300
2567 #define compute_tree_index(S, I)\
2568 {\
2569   size_t X = S >> TREEBIN_SHIFT;\
2570   if (X == 0)\
2571     I = 0;\
2572   else if (X > 0xFFFF)\
2573     I = NTREEBINS-1;\
2574   else {\
2575     unsigned int K;\
2576     _BitScanReverse((DWORD *) &K, X);\
2577     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2578   }\
2579 }
2580 #else /* GNUC */
2581 #define compute_tree_index(S, I)\
2582 {\
2583   size_t X = S >> TREEBIN_SHIFT;\
2584   if (X == 0)\
2585     I = 0;\
2586   else if (X > 0xFFFF)\
2587     I = NTREEBINS-1;\
2588   else {\
2589     unsigned int Y = (unsigned int)X;\
2590     unsigned int N = ((Y - 0x100) >> 16) & 8;\
2591     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2592     N += K;\
2593     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2594     K = 14 - N + ((Y <<= K) >> 15);\
2595     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2596   }\
2597 }
2598 #endif /* GNUC */
2599 
2600 /* Bit representing maximum resolved size in a treebin at i */
2601 #define bit_for_tree_index(i) \
2602    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2603 
2604 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
2605 #define leftshift_for_tree_index(i) \
2606    ((i == NTREEBINS-1)? 0 : \
2607     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2608 
2609 /* The size of the smallest chunk held in bin with index i */
2610 #define minsize_for_tree_index(i) \
2611    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
2612    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2613 
2614 
2615 /* ------------------------ Operations on bin maps ----------------------- */
2616 
2617 /* bit corresponding to given index */
2618 #define idx2bit(i)              ((binmap_t)(1) << (i))
2619 
2620 /* Mark/Clear bits with given index */
2621 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
2622 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
2623 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
2624 
2625 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
2626 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
2627 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
2628 
2629 /* index corresponding to given bit */
2630 
2631 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2632 #define compute_bit2idx(X, I)\
2633 {\
2634   unsigned int J;\
2635   __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
2636   I = (bindex_t)J;\
2637 }
2638 #elif defined(_MSC_VER) && _MSC_VER>=1300
2639 #define compute_bit2idx(X, I)\
2640 {\
2641   unsigned int J;\
2642   _BitScanForward((DWORD *) &J, X);\
2643   I = (bindex_t)J;\
2644 }
2645 
2646 #else /* GNUC */
2647 #if  USE_BUILTIN_FFS
2648 #define compute_bit2idx(X, I) I = ffs(X)-1
2649 
2650 #else /* USE_BUILTIN_FFS */
2651 #define compute_bit2idx(X, I)\
2652 {\
2653   unsigned int Y = X - 1;\
2654   unsigned int K = Y >> (16-4) & 16;\
2655   unsigned int N = K;        Y >>= K;\
2656   N += K = Y >> (8-3) &  8;  Y >>= K;\
2657   N += K = Y >> (4-2) &  4;  Y >>= K;\
2658   N += K = Y >> (2-1) &  2;  Y >>= K;\
2659   N += K = Y >> (1-0) &  1;  Y >>= K;\
2660   I = (bindex_t)(N + Y);\
2661 }
2662 #endif /* USE_BUILTIN_FFS */
2663 #endif /* GNUC */
2664 
2665 /* isolate the least set bit of a bitmap */
2666 #define least_bit(x)         ((x) & -(x))
2667 
2668 /* mask with all bits to left of least bit of x on */
2669 #define left_bits(x)         ((x<<1) | -(x<<1))
2670 
2671 /* mask with all bits to left of or equal to least bit of x on */
2672 #define same_or_left_bits(x) ((x) | -(x))
2673 
2674 
2675 /* ----------------------- Runtime Check Support ------------------------- */
2676 
2677 /*
2678   For security, the main invariant is that malloc/free/etc never
2679   writes to a static address other than malloc_state, unless static
2680   malloc_state itself has been corrupted, which cannot occur via
2681   malloc (because of these checks). In essence this means that we
2682   believe all pointers, sizes, maps etc held in malloc_state, but
2683   check all of those linked or offsetted from other embedded data
2684   structures.  These checks are interspersed with main code in a way
2685   that tends to minimize their run-time cost.
2686 
2687   When FOOTERS is defined, in addition to range checking, we also
2688   verify footer fields of inuse chunks, which can be used guarantee
2689   that the mstate controlling malloc/free is intact.  This is a
2690   streamlined version of the approach described by William Robertson
2691   et al in "Run-time Detection of Heap-based Overflows" LISA'03
2692   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2693   of an inuse chunk holds the xor of its mstate and a random seed,
2694   that is checked upon calls to free() and realloc().  This is
2695   (probablistically) unguessable from outside the program, but can be
2696   computed by any code successfully malloc'ing any chunk, so does not
2697   itself provide protection against code that has already broken
2698   security through some other means.  Unlike Robertson et al, we
2699   always dynamically check addresses of all offset chunks (previous,
2700   next, etc). This turns out to be cheaper than relying on hashes.
2701 */
2702 
2703 #if !INSECURE
2704 /* Check if address a is at least as high as any from MORECORE or MMAP */
2705 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2706 /* Check if address of next chunk n is higher than base chunk p */
2707 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
2708 /* Check if p has its cinuse bit on */
2709 #define ok_cinuse(p)     cinuse(p)
2710 /* Check if p has its pinuse bit on */
2711 #define ok_pinuse(p)     pinuse(p)
2712 
2713 #else /* !INSECURE */
2714 #define ok_address(M, a) (1)
2715 #define ok_next(b, n)    (1)
2716 #define ok_cinuse(p)     (1)
2717 #define ok_pinuse(p)     (1)
2718 #endif /* !INSECURE */
2719 
2720 #if (FOOTERS && !INSECURE)
2721 /* Check if (alleged) mstate m has expected magic field */
2722 /* Modified by sasha: also check that address is within memheap */
2723 #define ok_magic(M)      ((char *)(M) >= (char *)gm->least_addr && (M)->magic == mparams.magic)
2724 #else  /* (FOOTERS && !INSECURE) */
2725 #define ok_magic(M)      (1)
2726 #endif /* (FOOTERS && !INSECURE) */
2727 
2728 
2729 /* In gcc, use __builtin_expect to minimize impact of checks */
2730 #if !INSECURE
2731 #if defined(__GNUC__) && __GNUC__ >= 3
2732 #define RTCHECK(e)  __builtin_expect(e, 1)
2733 #else /* GNUC */
2734 #define RTCHECK(e)  (e)
2735 #endif /* GNUC */
2736 #else /* !INSECURE */
2737 #define RTCHECK(e)  (1)
2738 #endif /* !INSECURE */
2739 
2740 /* macros to set up inuse chunks with or without footers */
2741 
2742 #if !FOOTERS
2743 
2744 #define mark_inuse_foot(M,p,s)
2745 
2746 /* Set cinuse bit and pinuse bit of next chunk */
2747 #define set_inuse(M,p,s)\
2748   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2749   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2750 
2751 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
2752 #define set_inuse_and_pinuse(M,p,s)\
2753   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2754   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2755 
2756 /* Set size, cinuse and pinuse bit of this chunk */
2757 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2758   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2759 
2760 #else /* FOOTERS */
2761 
2762 /* Set foot of inuse chunk to be xor of mstate and seed */
2763 #define mark_inuse_foot(M,p,s)\
2764   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2765 
2766 #define get_mstate_for(p)\
2767   ((mstate)(((mchunkptr)((char*)(p) +\
2768     (chunksize(p))))->prev_foot ^ mparams.magic))
2769 
2770 #define set_inuse(M,p,s)\
2771   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2772   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2773   mark_inuse_foot(M,p,s))
2774 
2775 #define set_inuse_and_pinuse(M,p,s)\
2776   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2777   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2778  mark_inuse_foot(M,p,s))
2779 
2780 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2781   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2782   mark_inuse_foot(M, p, s))
2783 
2784 #endif /* !FOOTERS */
2785 
2786 /* ---------------------------- setting mparams -------------------------- */
2787 
2788 /* Initialize mparams */
2789 static int init_mparams(void) {
2790   if (mparams.page_size == 0) {
2791     size_t s;
2792 
2793     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2794     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
2795 #if MORECORE_CONTIGUOUS
2796     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
2797 #else  /* MORECORE_CONTIGUOUS */
2798     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
2799 #endif /* MORECORE_CONTIGUOUS */
2800 
2801 #if (FOOTERS && !INSECURE)
2802     {
2803 #if USE_DEV_RANDOM
2804       int fd;
2805       unsigned char buf[sizeof(size_t)];
2806       /* Try to use /dev/urandom, else fall back on using time */
2807       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2808           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2809         s = *((size_t *) buf);
2810         close(fd);
2811       }
2812       else
2813 #endif /* USE_DEV_RANDOM */
2814         s = (size_t)(time(0) ^ (size_t)0x55555555U);
2815 
2816       s |= (size_t)8U;    /* ensure nonzero */
2817       s &= ~(size_t)7U;   /* improve chances of fault for bad values */
2818 
2819     }
2820 #else /* (FOOTERS && !INSECURE) */
2821     s = (size_t)0x58585858U;
2822 #endif /* (FOOTERS && !INSECURE) */
2823     ACQUIRE_MAGIC_INIT_LOCK();
2824     if (mparams.magic == 0) {
2825       mparams.magic = s;
2826 #if !ONLY_MSPACES
2827       /* Set up lock for main malloc area */
2828       INITIAL_LOCK(&gm->mutex);
2829       gm->mflags = mparams.default_mflags;
2830 #endif
2831     }
2832     RELEASE_MAGIC_INIT_LOCK();
2833 
2834 #ifndef WIN32
2835     mparams.page_size = malloc_getpagesize;
2836     mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
2837                            DEFAULT_GRANULARITY : mparams.page_size);
2838 #else /* WIN32 */
2839     {
2840       SYSTEM_INFO system_info;
2841       GetSystemInfo(&system_info);
2842       mparams.page_size = system_info.dwPageSize;
2843       mparams.granularity = system_info.dwAllocationGranularity;
2844     }
2845 #endif /* WIN32 */
2846 
2847     /* Sanity-check configuration:
2848        size_t must be unsigned and as wide as pointer type.
2849        ints must be at least 4 bytes.
2850        alignment must be at least 8.
2851        Alignment, min chunk size, and page size must all be powers of 2.
2852     */
2853     if ((sizeof(size_t) != sizeof(char*)) ||
2854         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
2855         (sizeof(int) < 4)  ||
2856         (MALLOC_ALIGNMENT < (size_t)8U) ||
2857         ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
2858         ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
2859         ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
2860         ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
2861       ABORT;
2862   }
2863   return 0;
2864 }
2865 
2866 /* support for mallopt */
2867 static int change_mparam(int param_number, int value) {
2868   size_t val = (size_t)value;
2869   init_mparams();
2870   switch(param_number) {
2871   case M_TRIM_THRESHOLD:
2872     mparams.trim_threshold = val;
2873     return 1;
2874   case M_GRANULARITY:
2875     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
2876       mparams.granularity = val;
2877       return 1;
2878     }
2879     else
2880       return 0;
2881   case M_MMAP_THRESHOLD:
2882     mparams.mmap_threshold = val;
2883     return 1;
2884   default:
2885     return 0;
2886   }
2887 }
2888 
2889 #if DL_DEBUG
2890 /* ------------------------- Debugging Support --------------------------- */
2891 
2892 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
2893 static void do_check_any_chunk(mstate m, mchunkptr p) {
2894   dl_assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2895   dl_assert(ok_address(m, p));
2896 }
2897 
2898 /* Check properties of top chunk */
2899 static void do_check_top_chunk(mstate m, mchunkptr p) {
2900   msegmentptr sp = segment_holding(m, (char*)p);
2901   size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
2902   dl_assert(sp != 0);
2903   dl_assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2904   dl_assert(ok_address(m, p));
2905   dl_assert(sz == m->topsize);
2906   dl_assert(sz > 0);
2907   dl_assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
2908   dl_assert(pinuse(p));
2909   dl_assert(!pinuse(chunk_plus_offset(p, sz)));
2910 }
2911 
2912 /* Check properties of (inuse) mmapped chunks */
2913 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
2914   size_t  sz = chunksize(p);
2915   size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2916   dl_assert(is_mmapped(p));
2917   dl_assert(use_mmap(m));
2918   dl_assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2919   dl_assert(ok_address(m, p));
2920   dl_assert(!is_small(sz));
2921   dl_assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
2922   dl_assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
2923   dl_assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
2924 }
2925 
2926 /* Check properties of inuse chunks */
2927 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
2928   do_check_any_chunk(m, p);
2929   dl_assert(cinuse(p));
2930   dl_assert(next_pinuse(p));
2931   /* If not pinuse and not mmapped, previous chunk has OK offset */
2932   dl_assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
2933   if (is_mmapped(p))
2934     do_check_mmapped_chunk(m, p);
2935 }
2936 
2937 /* Check properties of free chunks */
2938 static void do_check_free_chunk(mstate m, mchunkptr p) {
2939   size_t sz = chunksize(p);
2940   mchunkptr next = chunk_plus_offset(p, sz);
2941   do_check_any_chunk(m, p);
2942   dl_assert(!cinuse(p));
2943   dl_assert(!next_pinuse(p));
2944   assert (!is_mmapped(p));
2945   if (p != m->dv && p != m->top) {
2946     if (sz >= MIN_CHUNK_SIZE) {
2947       dl_assert((sz & CHUNK_ALIGN_MASK) == 0);
2948       dl_assert(is_aligned(chunk2mem(p)));
2949       dl_assert(next->prev_foot == sz);
2950       dl_assert(pinuse(p));
2951       assert (next == m->top || cinuse(next));
2952       dl_assert(p->fd->bk == p);
2953       dl_assert(p->bk->fd == p);
2954     }
2955     else  /* markers are always of size SIZE_T_SIZE */
2956       dl_assert(sz == SIZE_T_SIZE);
2957   }
2958 }
2959 
2960 /* Check properties of malloced chunks at the point they are malloced */
2961 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
2962   if (mem != 0) {
2963     mchunkptr p = mem2chunk(mem);
2964     size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
2965     do_check_inuse_chunk(m, p);
2966     dl_assert((sz & CHUNK_ALIGN_MASK) == 0);
2967     dl_assert(sz >= MIN_CHUNK_SIZE);
2968     dl_assert(sz >= s);
2969     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2970     dl_assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2971   }
2972 }
2973 
2974 /* Check a tree and its subtrees.  */
2975 static void do_check_tree(mstate m, tchunkptr t) {
2976   tchunkptr head = 0;
2977   tchunkptr u = t;
2978   bindex_t tindex = t->index;
2979   size_t tsize = chunksize(t);
2980   bindex_t idx;
2981   compute_tree_index(tsize, idx);
2982   dl_assert(tindex == idx);
2983   dl_assert(tsize >= MIN_LARGE_SIZE);
2984   dl_assert(tsize >= minsize_for_tree_index(idx));
2985   dl_assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
2986 
2987   do { /* traverse through chain of same-sized nodes */
2988     do_check_any_chunk(m, ((mchunkptr)u));
2989     dl_assert(u->index == tindex);
2990     dl_assert(chunksize(u) == tsize);
2991     dl_assert(!cinuse(u));
2992     dl_assert(!next_pinuse(u));
2993     dl_assert(u->fd->bk == u);
2994     dl_assert(u->bk->fd == u);
2995     if (u->parent == 0) {
2996       dl_assert(u->child[0] == 0);
2997       dl_assert(u->child[1] == 0);
2998     }
2999     else {
3000       dl_assert(head == 0); /* only one node on chain has parent */
3001       head = u;
3002       dl_assert(u->parent != u);
3003       assert (u->parent->child[0] == u ||
3004               u->parent->child[1] == u ||
3005               *((tbinptr*)(u->parent)) == u);
3006       if (u->child[0] != 0) {
3007         dl_assert(u->child[0]->parent == u);
3008         dl_assert(u->child[0] != u);
3009         do_check_tree(m, u->child[0]);
3010       }
3011       if (u->child[1] != 0) {
3012         dl_assert(u->child[1]->parent == u);
3013         dl_assert(u->child[1] != u);
3014         do_check_tree(m, u->child[1]);
3015       }
3016       if (u->child[0] != 0 && u->child[1] != 0) {
3017         dl_assert(chunksize(u->child[0]) < chunksize(u->child[1]));
3018       }
3019     }
3020     u = u->fd;
3021   } while (u != t);
3022   dl_assert(head != 0);
3023 }
3024 
3025 /*  Check all the chunks in a treebin.  */
3026 static void do_check_treebin(mstate m, bindex_t i) {
3027   tbinptr* tb = treebin_at(m, i);
3028   tchunkptr t = *tb;
3029   int empty = (m->treemap & (1U << i)) == 0;
3030   if (t == 0)
3031     dl_assert(empty);
3032   if (!empty)
3033     do_check_tree(m, t);
3034 }
3035 
3036 /*  Check all the chunks in a smallbin.  */
3037 static void do_check_smallbin(mstate m, bindex_t i) {
3038   sbinptr b = smallbin_at(m, i);
3039   mchunkptr p = b->bk;
3040   unsigned int empty = (m->smallmap & (1U << i)) == 0;
3041   if (p == b)
3042     dl_assert(empty);
3043   if (!empty) {
3044     for (; p != b; p = p->bk) {
3045       size_t size = chunksize(p);
3046       mchunkptr q;
3047       /* each chunk claims to be free */
3048       do_check_free_chunk(m, p);
3049       /* chunk belongs in bin */
3050       dl_assert(small_index(size) == i);
3051       dl_assert(p->bk == b || chunksize(p->bk) == chunksize(p));
3052       /* chunk is followed by an inuse chunk */
3053       q = next_chunk(p);
3054       if (q->head != FENCEPOST_HEAD)
3055         do_check_inuse_chunk(m, q);
3056     }
3057   }
3058 }
3059 
3060 /* Find x in a bin. Used in other check functions. */
3061 static int bin_find(mstate m, mchunkptr x) {
3062   size_t size = chunksize(x);
3063   if (is_small(size)) {
3064     bindex_t sidx = small_index(size);
3065     sbinptr b = smallbin_at(m, sidx);
3066     if (smallmap_is_marked(m, sidx)) {
3067       mchunkptr p = b;
3068       do {
3069         if (p == x)
3070           return 1;
3071       } while ((p = p->fd) != b);
3072     }
3073   }
3074   else {
3075     bindex_t tidx;
3076     compute_tree_index(size, tidx);
3077     if (treemap_is_marked(m, tidx)) {
3078       tchunkptr t = *treebin_at(m, tidx);
3079       size_t sizebits = size << leftshift_for_tree_index(tidx);
3080       while (t != 0 && chunksize(t) != size) {
3081         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3082         sizebits <<= 1;
3083       }
3084       if (t != 0) {
3085         tchunkptr u = t;
3086         do {
3087           if (u == (tchunkptr)x)
3088             return 1;
3089         } while ((u = u->fd) != t);
3090       }
3091     }
3092   }
3093   return 0;
3094 }
3095 
3096 /* Traverse each chunk and check it; return total */
3097 static size_t traverse_and_check(mstate m) {
3098   size_t sum = 0;
3099   if (is_initialized(m)) {
3100     msegmentptr s = &m->seg;
3101     sum += m->topsize + TOP_FOOT_SIZE;
3102     while (s != 0) {
3103       mchunkptr q = align_as_chunk(s->base);
3104       mchunkptr lastq = 0;
3105       dl_assert(pinuse(q));
3106       while (segment_holds(s, q) &&
3107              q != m->top && q->head != FENCEPOST_HEAD) {
3108         sum += chunksize(q);
3109         if (cinuse(q)) {
3110           dl_assert(!bin_find(m, q));
3111           do_check_inuse_chunk(m, q);
3112         }
3113         else {
3114           dl_assert(q == m->dv || bin_find(m, q));
3115           dl_assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
3116           do_check_free_chunk(m, q);
3117         }
3118         lastq = q;
3119         q = next_chunk(q);
3120       }
3121       s = s->next;
3122     }
3123   }
3124   return sum;
3125 }
3126 
3127 /* Check all properties of malloc_state. */
3128 static void do_check_malloc_state(mstate m) {
3129   bindex_t i;
3130   size_t total;
3131   /* check bins */
3132   for (i = 0; i < NSMALLBINS; ++i)
3133     do_check_smallbin(m, i);
3134   for (i = 0; i < NTREEBINS; ++i)
3135     do_check_treebin(m, i);
3136 
3137   if (m->dvsize != 0) { /* check dv chunk */
3138     do_check_any_chunk(m, m->dv);
3139     dl_assert(m->dvsize == chunksize(m->dv));
3140     dl_assert(m->dvsize >= MIN_CHUNK_SIZE);
3141     dl_assert(bin_find(m, m->dv) == 0);
3142   }
3143 
3144   if (m->top != 0) {   /* check top chunk */
3145     do_check_top_chunk(m, m->top);
3146     /*dl_assert(m->topsize == chunksize(m->top)); redundant */
3147     dl_assert(m->topsize > 0);
3148     dl_assert(bin_find(m, m->top) == 0);
3149   }
3150 
3151   total = traverse_and_check(m);
3152   dl_assert(total <= m->footprint);
3153   dl_assert(m->footprint <= m->max_footprint);
3154 }
3155 #endif /* DL_DEBUG */
3156 
3157 /* ----------------------------- statistics ------------------------------ */
3158 
3159 #if !NO_MALLINFO
3160 static struct mallinfo internal_mallinfo(mstate m) {
3161   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3162   if (!PREACTION(m)) {
3163     check_malloc_state(m);
3164     if (is_initialized(m)) {
3165       size_t nfree = SIZE_T_ONE; /* top always free */
3166       size_t mfree = m->topsize + TOP_FOOT_SIZE;
3167       size_t sum = mfree;
3168       msegmentptr s = &m->seg;
3169       while (s != 0) {
3170         mchunkptr q = align_as_chunk(s->base);
3171         while (segment_holds(s, q) &&
3172                q != m->top && q->head != FENCEPOST_HEAD) {
3173           size_t sz = chunksize(q);
3174           sum += sz;
3175           if (!cinuse(q)) {
3176             mfree += sz;
3177             ++nfree;
3178           }
3179           q = next_chunk(q);
3180         }
3181         s = s->next;
3182       }
3183 
3184       nm.arena    = sum;
3185       nm.ordblks  = nfree;
3186       nm.hblkhd   = m->footprint - sum;
3187       nm.usmblks  = m->max_footprint;
3188       nm.uordblks = m->footprint - mfree;
3189       nm.fordblks = mfree;
3190       nm.keepcost = m->topsize;
3191     }
3192 
3193     POSTACTION(m);
3194   }
3195   return nm;
3196 }
3197 #endif /* !NO_MALLINFO */
3198 
3199 static void internal_malloc_stats(mstate m) {
3200   if (!PREACTION(m)) {
3201     size_t maxfp = 0;
3202     size_t fp = 0;
3203     size_t used = 0;
3204     check_malloc_state(m);
3205     if (is_initialized(m)) {
3206       msegmentptr s = &m->seg;
3207       maxfp = m->max_footprint;
3208       fp = m->footprint;
3209       used = fp - (m->topsize + TOP_FOOT_SIZE);
3210 
3211       while (s != 0) {
3212         mchunkptr q = align_as_chunk(s->base);
3213         while (segment_holds(s, q) &&
3214                q != m->top && q->head != FENCEPOST_HEAD) {
3215           if (!cinuse(q))
3216             used -= chunksize(q);
3217           q = next_chunk(q);
3218         }
3219         s = s->next;
3220       }
3221     }
3222 
3223     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
3224     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
3225     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
3226 
3227     POSTACTION(m);
3228   }
3229 }
3230 
3231 /* ----------------------- Operations on smallbins ----------------------- */
3232 
3233 /*
3234   Various forms of linking and unlinking are defined as macros.  Even
3235   the ones for trees, which are very long but have very short typical
3236   paths.  This is ugly but reduces reliance on inlining support of
3237   compilers.
3238 */
3239 
3240 /* Link a free chunk into a smallbin  */
3241 #define insert_small_chunk(M, P, S) {\
3242   bindex_t IDX  = small_index(S);\
3243   mchunkptr B = smallbin_at(M, IDX);\
3244   mchunkptr F = B;\
3245   dl_assert(S >= MIN_CHUNK_SIZE);\
3246   if (!smallmap_is_marked(M, IDX))\
3247     mark_smallmap(M, IDX);\
3248   else if (RTCHECK(ok_address(M, B->fd)))\
3249     F = B->fd;\
3250   else {\
3251     CORRUPTION_ERROR_ACTION(M);\
3252   }\
3253   B->fd = P;\
3254   F->bk = P;\
3255   P->fd = F;\
3256   P->bk = B;\
3257 }
3258 
3259 /* Unlink a chunk from a smallbin  */
3260 #define unlink_small_chunk(M, P, S) {\
3261   mchunkptr F = P->fd;\
3262   mchunkptr B = P->bk;\
3263   bindex_t IDX = small_index(S);\
3264   dl_assert(P != B);\
3265   dl_assert(P != F);\
3266   dl_assert(chunksize(P) == small_index2size(IDX));\
3267   if (F == B)\
3268     clear_smallmap(M, IDX);\
3269   else if (RTCHECK((F == smallbin_at(M,IDX) || ok_address(M, F)) &&\
3270                    (B == smallbin_at(M,IDX) || ok_address(M, B)))) {\
3271     F->bk = B;\
3272     B->fd = F;\
3273   }\
3274   else {\
3275     CORRUPTION_ERROR_ACTION(M);\
3276   }\
3277 }
3278 
3279 /* Unlink the first chunk from a smallbin */
3280 #define unlink_first_small_chunk(M, B, P, IDX) {\
3281   mchunkptr F = P->fd;\
3282   dl_assert(P != B);\
3283   dl_assert(P != F);\
3284   dl_assert(chunksize(P) == small_index2size(IDX));\
3285   if (B == F)\
3286     clear_smallmap(M, IDX);\
3287   else if (RTCHECK(ok_address(M, F))) {\
3288     B->fd = F;\
3289     F->bk = B;\
3290   }\
3291   else {\
3292     CORRUPTION_ERROR_ACTION(M);\
3293   }\
3294 }
3295 
3296 /* Replace dv node, binning the old one */
3297 /* Used only when dvsize known to be small */
3298 #define replace_dv(M, P, S) {\
3299   size_t DVS = M->dvsize;\
3300   if (DVS != 0) {\
3301     mchunkptr DV = M->dv;\
3302     dl_assert(is_small(DVS));\
3303     insert_small_chunk(M, DV, DVS);\
3304   }\
3305   M->dvsize = S;\
3306   M->dv = P;\
3307 }
3308 
3309 /* ------------------------- Operations on trees ------------------------- */
3310 
3311 /* Insert chunk into tree */
3312 #define insert_large_chunk(M, X, S) {\
3313   tbinptr* H;\
3314   bindex_t IDX;\
3315   compute_tree_index(S, IDX);\
3316   H = treebin_at(M, IDX);\
3317   X->index = IDX;\
3318   X->child[0] = X->child[1] = 0;\
3319   if (!treemap_is_marked(M, IDX)) {\
3320     mark_treemap(M, IDX);\
3321     *H = X;\
3322     X->parent = (tchunkptr)H;\
3323     X->fd = X->bk = X;\
3324   }\
3325   else {\
3326     tchunkptr T = *H;\
3327     size_t K = S << leftshift_for_tree_index(IDX);\
3328     for (;;) {\
3329       if (chunksize(T) != S) {\
3330         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3331         K <<= 1;\
3332         if (*C != 0)\
3333           T = *C;\
3334         else if (RTCHECK(ok_address(M, C))) {\
3335           *C = X;\
3336           X->parent = T;\
3337           X->fd = X->bk = X;\
3338           break;\
3339         }\
3340         else {\
3341           CORRUPTION_ERROR_ACTION(M);\
3342           break;\
3343         }\
3344       }\
3345       else {\
3346         tchunkptr F = T->fd;\
3347         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3348           T->fd = F->bk = X;\
3349           X->fd = F;\
3350           X->bk = T;\
3351           X->parent = 0;\
3352           break;\
3353         }\
3354         else {\
3355           CORRUPTION_ERROR_ACTION(M);\
3356           break;\
3357         }\
3358       }\
3359     }\
3360   }\
3361 }
3362 
3363 /*
3364   Unlink steps:
3365 
3366   1. If x is a chained node, unlink it from its same-sized fd/bk links
3367      and choose its bk node as its replacement.
3368   2. If x was the last node of its size, but not a leaf node, it must
3369      be replaced with a leaf node (not merely one with an open left or
3370      right), to make sure that lefts and rights of descendents
3371      correspond properly to bit masks.  We use the rightmost descendent
3372      of x.  We could use any other leaf, but this is easy to locate and
3373      tends to counteract removal of leftmosts elsewhere, and so keeps
3374      paths shorter than minimally guaranteed.  This doesn't loop much
3375      because on average a node in a tree is near the bottom.
3376   3. If x is the base of a chain (i.e., has parent links) relink
3377      x's parent and children to x's replacement (or null if none).
3378 */
3379 
3380 #define unlink_large_chunk(M, X) {\
3381   tchunkptr XP = X->parent;\
3382   tchunkptr R;\
3383   if (X->bk != X) {\
3384     tchunkptr F = X->fd;\
3385     R = X->bk;\
3386     if (RTCHECK(ok_address(M, F))) {\
3387       F->bk = R;\
3388       R->fd = F;\
3389     }\
3390     else {\
3391       CORRUPTION_ERROR_ACTION(M);\
3392     }\
3393   }\
3394   else {\
3395     tchunkptr* RP;\
3396     if (((R = *(RP = &(X->child[1]))) != 0) ||\
3397         ((R = *(RP = &(X->child[0]))) != 0)) {\
3398       tchunkptr* CP;\
3399       while ((*(CP = &(R->child[1])) != 0) ||\
3400              (*(CP = &(R->child[0])) != 0)) {\
3401         R = *(RP = CP);\
3402       }\
3403       if (RTCHECK(ok_address(M, RP)))\
3404         *RP = 0;\
3405       else {\
3406         CORRUPTION_ERROR_ACTION(M);\
3407       }\
3408     }\
3409   }\
3410   if (XP != 0) {\
3411     tbinptr* H = treebin_at(M, X->index);\
3412     if (X == *H) {\
3413       if ((*H = R) == 0) \
3414         clear_treemap(M, X->index);\
3415     }\
3416     else if (RTCHECK(ok_address(M, XP))) {\
3417       if (XP->child[0] == X) \
3418         XP->child[0] = R;\
3419       else \
3420         XP->child[1] = R;\
3421     }\
3422     else\
3423       CORRUPTION_ERROR_ACTION(M);\
3424     if (R != 0) {\
3425       if (RTCHECK(ok_address(M, R))) {\
3426         tchunkptr C0, C1;\
3427         R->parent = XP;\
3428         if ((C0 = X->child[0]) != 0) {\
3429           if (RTCHECK(ok_address(M, C0))) {\
3430             R->child[0] = C0;\
3431             C0->parent = R;\
3432           }\
3433           else\
3434             CORRUPTION_ERROR_ACTION(M);\
3435         }\
3436         if ((C1 = X->child[1]) != 0) {\
3437           if (RTCHECK(ok_address(M, C1))) {\
3438             R->child[1] = C1;\
3439             C1->parent = R;\
3440           }\
3441           else\
3442             CORRUPTION_ERROR_ACTION(M);\
3443         }\
3444       }\
3445       else\
3446         CORRUPTION_ERROR_ACTION(M);\
3447     }\
3448   }\
3449 }
3450 
3451 /* Relays to large vs small bin operations */
3452 
3453 #define insert_chunk(M, P, S)\
3454   if (is_small(S)) insert_small_chunk(M, P, S)\
3455   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3456 
3457 #define unlink_chunk(M, P, S)\
3458   if (is_small(S)) unlink_small_chunk(M, P, S)\
3459   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3460 
3461 
3462 /* Relays to internal calls to malloc/free from realloc, memalign etc */
3463 
3464 #if ONLY_MSPACES
3465 #define internal_malloc(m, b) mspace_malloc(m, b)
3466 #define internal_free(m, mem) mspace_free(m,mem);
3467 #else /* ONLY_MSPACES */
3468 #if MSPACES
3469 #define internal_malloc(m, b)\
3470    (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3471 #define internal_free(m, mem)\
3472    if (m == gm) dlfree(mem); else mspace_free(m,mem);
3473 #else /* MSPACES */
3474 #define internal_malloc(m, b) dlmalloc(b)
3475 #define internal_free(m, mem) dlfree(mem)
3476 #endif /* MSPACES */
3477 #endif /* ONLY_MSPACES */
3478 
3479 /* -----------------------  Direct-mmapping chunks ----------------------- */
3480 
3481 /*
3482   Directly mmapped chunks are set up with an offset to the start of
3483   the mmapped region stored in the prev_foot field of the chunk. This
3484   allows reconstruction of the required argument to MUNMAP when freed,
3485   and also allows adjustment of the returned chunk to meet alignment
3486   requirements (especially in memalign).  There is also enough space
3487   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3488   the PINUSE bit so frees can be checked.
3489 */
3490 
3491 /* Malloc using mmap */
3492 static void* mmap_alloc(mstate m, size_t nb) {
3493   size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3494   if (mmsize > nb) {     /* Check for wrap around 0 */
3495     char* mm = (char*)(DIRECT_MMAP(mmsize));
3496     if (mm != CMFAIL) {
3497       size_t offset = align_offset(chunk2mem(mm));
3498       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3499       mchunkptr p = (mchunkptr)(mm + offset);
3500       p->prev_foot = offset | IS_MMAPPED_BIT;
3501       (p)->head = (psize|CINUSE_BIT);
3502       mark_inuse_foot(m, p, psize);
3503       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3504       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3505 
3506       if (mm < m->least_addr)
3507         m->least_addr = mm;
3508       if ((m->footprint += mmsize) > m->max_footprint)
3509         m->max_footprint = m->footprint;
3510       dl_assert(is_aligned(chunk2mem(p)));
3511       check_mmapped_chunk(m, p);
3512       return chunk2mem(p);
3513     }
3514   }
3515   return 0;
3516 }
3517 
3518 /* Realloc using mmap */
3519 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
3520   size_t oldsize = chunksize(oldp);
3521   if (is_small(nb)) /* Can't shrink mmap regions below small size */
3522     return 0;
3523   /* Keep old chunk if big enough but not too big */
3524   if (oldsize >= nb + SIZE_T_SIZE &&
3525       (oldsize - nb) <= (mparams.granularity << 1))
3526     return oldp;
3527   else {
3528     size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3529     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3530     size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3531     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
3532                                   oldmmsize, newmmsize, 1);
3533     if (cp != CMFAIL) {
3534       mchunkptr newp = (mchunkptr)(cp + offset);
3535       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3536       newp->head = (psize|CINUSE_BIT);
3537       mark_inuse_foot(m, newp, psize);
3538       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3539       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3540 
3541       if (cp < m->least_addr)
3542         m->least_addr = cp;
3543       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3544         m->max_footprint = m->footprint;
3545       check_mmapped_chunk(m, newp);
3546       return newp;
3547     }
3548   }
3549   return 0;
3550 }
3551 
3552 /* -------------------------- mspace management -------------------------- */
3553 
3554 /* Initialize top chunk and its size */
3555 static void init_top(mstate m, mchunkptr p, size_t psize) {
3556   /* Ensure alignment */
3557   size_t offset = align_offset(chunk2mem(p));
3558   p = (mchunkptr)((char*)p + offset);
3559   psize -= offset;
3560 
3561   m->top = p;
3562   m->topsize = psize;
3563   p->head = psize | PINUSE_BIT;
3564   /* set size of fake trailing chunk holding overhead space only once */
3565   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3566   m->trim_check = mparams.trim_threshold; /* reset on each update */
3567 }
3568 
3569 /* Initialize bins for a new mstate that is otherwise zeroed out */
3570 static void init_bins(mstate m) {
3571   /* Establish circular links for smallbins */
3572   bindex_t i;
3573   for (i = 0; i < NSMALLBINS; ++i) {
3574     sbinptr bin = smallbin_at(m,i);
3575     bin->fd = bin->bk = bin;
3576   }
3577 }
3578 
3579 #if PROCEED_ON_ERROR
3580 
3581 /* default corruption action */
3582 static void reset_on_error(mstate m) {
3583   int i;
3584   ++malloc_corruption_error_count;
3585   /* Reinitialize fields to forget about all memory */
3586   m->smallbins = m->treebins = 0;
3587   m->dvsize = m->topsize = 0;
3588   m->seg.base = 0;
3589   m->seg.size = 0;
3590   m->seg.next = 0;
3591   m->top = m->dv = 0;
3592   for (i = 0; i < NTREEBINS; ++i)
3593     *treebin_at(m, i) = 0;
3594   init_bins(m);
3595 }
3596 #endif /* PROCEED_ON_ERROR */
3597 
3598 /* Allocate chunk and prepend remainder with chunk in successor base. */
3599 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
3600                            size_t nb) {
3601   mchunkptr p = align_as_chunk(newbase);
3602   mchunkptr oldfirst = align_as_chunk(oldbase);
3603   size_t psize = (char*)oldfirst - (char*)p;
3604   mchunkptr q = chunk_plus_offset(p, nb);
3605   size_t qsize = psize - nb;
3606   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3607 
3608   dl_assert((char*)oldfirst > (char*)q);
3609   dl_assert(pinuse(oldfirst));
3610   dl_assert(qsize >= MIN_CHUNK_SIZE);
3611 
3612   /* consolidate remainder with first chunk of old base */
3613   if (oldfirst == m->top) {
3614     size_t tsize = m->topsize += qsize;
3615     m->top = q;
3616     q->head = tsize | PINUSE_BIT;
3617     check_top_chunk(m, q);
3618   }
3619   else if (oldfirst == m->dv) {
3620     size_t dsize = m->dvsize += qsize;
3621     m->dv = q;
3622     set_size_and_pinuse_of_free_chunk(q, dsize);
3623   }
3624   else {
3625     if (!cinuse(oldfirst)) {
3626       size_t nsize = chunksize(oldfirst);
3627       unlink_chunk(m, oldfirst, nsize);
3628       oldfirst = chunk_plus_offset(oldfirst, nsize);
3629       qsize += nsize;
3630     }
3631     set_free_with_pinuse(q, qsize, oldfirst);
3632     insert_chunk(m, q, qsize);
3633     check_free_chunk(m, q);
3634   }
3635 
3636   check_malloced_chunk(m, chunk2mem(p), nb);
3637   return chunk2mem(p);
3638 }
3639 
3640 /* Add a segment to hold a new noncontiguous region */
3641 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
3642   /* Determine locations and sizes of segment, fenceposts, old top */
3643   char* old_top = (char*)m->top;
3644   msegmentptr oldsp = segment_holding(m, old_top);
3645   char* old_end = oldsp->base + oldsp->size;
3646   size_t ssize = pad_request(sizeof(struct malloc_segment));
3647   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3648   size_t offset = align_offset(chunk2mem(rawsp));
3649   char* asp = rawsp + offset;
3650   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
3651   mchunkptr sp = (mchunkptr)csp;
3652   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
3653   mchunkptr tnext = chunk_plus_offset(sp, ssize);
3654   mchunkptr p = tnext;
3655   int nfences = 0;
3656 
3657   /* reset top to new space */
3658   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
3659 
3660   /* Set up segment record */
3661   dl_assert(is_aligned(ss));
3662   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3663   *ss = m->seg; /* Push current record */
3664   m->seg.base = tbase;
3665   m->seg.size = tsize;
3666   m->seg.sflags = mmapped;
3667   m->seg.next = ss;
3668 
3669   /* Insert trailing fenceposts */
3670   for (;;) {
3671     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3672     p->head = FENCEPOST_HEAD;
3673     ++nfences;
3674     if ((char*)(&(nextp->head)) < old_end)
3675       p = nextp;
3676     else
3677       break;
3678   }
3679   dl_assert(nfences >= 2);
3680 
3681   /* Insert the rest of old top into a bin as an ordinary free chunk */
3682   if (csp != old_top) {
3683     mchunkptr q = (mchunkptr)old_top;
3684     size_t psize = csp - old_top;
3685     mchunkptr tn = chunk_plus_offset(q, psize);
3686     set_free_with_pinuse(q, psize, tn);
3687     insert_chunk(m, q, psize);
3688   }
3689 
3690   check_top_chunk(m, m->top);
3691 }
3692 
3693 /* -------------------------- System allocation -------------------------- */
3694 
3695 /* Get memory from system using MORECORE or MMAP */
3696 static void* sys_alloc(mstate m, size_t nb) {
3697   char* tbase = CMFAIL;
3698   size_t tsize = 0;
3699   flag_t mmap_flag = 0;
3700 
3701   init_mparams();
3702 
3703   /* Directly map large chunks */
3704   if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3705     void* mem = mmap_alloc(m, nb);
3706     if (mem != 0)
3707       return mem;
3708   }
3709 
3710   /*
3711     Try getting memory in any of three ways (in most-preferred to
3712     least-preferred order):
3713     1. A call to MORECORE that can normally contiguously extend memory.
3714        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3715        or main space is mmapped or a previous contiguous call failed)
3716     2. A call to MMAP new space (disabled if not DL_HAVE_MMAP).
3717        Note that under the default settings, if MORECORE is unable to
3718        fulfill a request, and DL_HAVE_MMAP is true, then mmap is
3719        used as a noncontiguous system allocator. This is a useful backup
3720        strategy for systems with holes in address spaces -- in this case
3721        sbrk cannot contiguously expand the heap, but mmap may be able to
3722        find space.
3723     3. A call to MORECORE that cannot usually contiguously extend memory.
3724        (disabled if not HAVE_MORECORE)
3725   */
3726 
3727   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
3728     char* br = CMFAIL;
3729     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
3730     size_t asize = 0;
3731     ACQUIRE_MORECORE_LOCK();
3732 
3733     if (ss == 0) {  /* First time through or recovery */
3734       char* base = (char*)CALL_MORECORE(0);
3735       if (base != CMFAIL) {
3736         asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3737         /* Adjust to end on a page boundary */
3738         if (!is_page_aligned(base))
3739           asize += (page_align((size_t)base) - (size_t)base);
3740         /* Can't call MORECORE if size is negative when treated as signed */
3741         if (asize < HALF_MAX_SIZE_T &&
3742             (br = (char*)(CALL_MORECORE(asize))) == base) {
3743           tbase = base;
3744           tsize = asize;
3745         }
3746       }
3747     }
3748     else {
3749       /* Subtract out existing available top space from MORECORE request. */
3750       asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
3751       /* Use mem here only if it did continuously extend old space */
3752       if (asize < HALF_MAX_SIZE_T &&
3753           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
3754         tbase = br;
3755         tsize = asize;
3756       }
3757     }
3758 
3759     if (tbase == CMFAIL) {    /* Cope with partial failure */
3760       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
3761         if (asize < HALF_MAX_SIZE_T &&
3762             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3763           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
3764           if (esize < HALF_MAX_SIZE_T) {
3765             char* end = (char*)CALL_MORECORE(esize);
3766             if (end != CMFAIL)
3767               asize += esize;
3768             else {            /* Can't use; try to release */
3769               (void) CALL_MORECORE(-asize);
3770               br = CMFAIL;
3771             }
3772           }
3773         }
3774       }
3775       if (br != CMFAIL) {    /* Use the space we did get */
3776         tbase = br;
3777         tsize = asize;
3778       }
3779       else
3780         disable_contiguous(m); /* Don't try contiguous path in the future */
3781     }
3782 
3783     RELEASE_MORECORE_LOCK();
3784   }
3785 
3786   if (DL_HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
3787     size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
3788     size_t rsize = granularity_align(req);
3789     if (rsize > nb) { /* Fail if wraps around zero */
3790       char* mp = (char*)(CALL_MMAP(rsize));
3791       if (mp != CMFAIL) {
3792         tbase = mp;
3793         tsize = rsize;
3794         mmap_flag = IS_MMAPPED_BIT;
3795       }
3796     }
3797   }
3798 
3799   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3800     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3801     if (asize < HALF_MAX_SIZE_T) {
3802       char* br = CMFAIL;
3803       char* end = CMFAIL;
3804       ACQUIRE_MORECORE_LOCK();
3805       br = (char*)(CALL_MORECORE(asize));
3806       end = (char*)(CALL_MORECORE(0));
3807       RELEASE_MORECORE_LOCK();
3808       if (br != CMFAIL && end != CMFAIL && br < end) {
3809         size_t ssize = end - br;
3810         if (ssize > nb + TOP_FOOT_SIZE) {
3811           tbase = br;
3812           tsize = ssize;
3813         }
3814       }
3815     }
3816   }
3817 
3818   if (tbase != CMFAIL) {
3819 
3820     if ((m->footprint += tsize) > m->max_footprint)
3821       m->max_footprint = m->footprint;
3822 
3823     if (!is_initialized(m)) { /* first-time initialization */
3824       m->seg.base = m->least_addr = tbase;
3825       m->seg.size = tsize;
3826       m->seg.sflags = mmap_flag;
3827       m->magic = mparams.magic;
3828       m->release_checks = MAX_RELEASE_CHECK_RATE;
3829       init_bins(m);
3830 #if !ONLY_MSPACES
3831       if (is_global(m))
3832         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
3833       else
3834 #endif
3835       {
3836         /* Offset top by embedded malloc_state */
3837         mchunkptr mn = next_chunk(mem2chunk(m));
3838         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
3839       }
3840     }
3841 
3842     else {
3843       /* Try to merge with an existing segment */
3844       msegmentptr sp = &m->seg;
3845       /* Only consider most recent segment if traversal suppressed */
3846       while (sp != 0 && tbase != sp->base + sp->size)
3847         sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
3848       if (sp != 0 &&
3849           !is_extern_segment(sp) &&
3850           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
3851           segment_holds(sp, m->top)) { /* append */
3852         sp->size += tsize;
3853         init_top(m, m->top, m->topsize + tsize);
3854       }
3855       else {
3856         if (tbase < m->least_addr)
3857           m->least_addr = tbase;
3858         sp = &m->seg;
3859         while (sp != 0 && sp->base != tbase + tsize)
3860           sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
3861         if (sp != 0 &&
3862             !is_extern_segment(sp) &&
3863             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3864           char* oldbase = sp->base;
3865           sp->base = tbase;
3866           sp->size += tsize;
3867           return prepend_alloc(m, tbase, oldbase, nb);
3868         }
3869         else
3870           add_segment(m, tbase, tsize, mmap_flag);
3871       }
3872     }
3873 
3874     if (nb < m->topsize) { /* Allocate from new or extended top space */
3875       size_t rsize = m->topsize -= nb;
3876       mchunkptr p = m->top;
3877       mchunkptr r = m->top = chunk_plus_offset(p, nb);
3878       r->head = rsize | PINUSE_BIT;
3879       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3880       check_top_chunk(m, m->top);
3881       check_malloced_chunk(m, chunk2mem(p), nb);
3882       return chunk2mem(p);
3883     }
3884   }
3885 
3886   MALLOC_FAILURE_ACTION;
3887   return 0;
3888 }
3889 
3890 /* -----------------------  system deallocation -------------------------- */
3891 
3892 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3893 static size_t release_unused_segments(mstate m) {
3894   size_t released = 0;
3895   int nsegs = 0;
3896   msegmentptr pred = &m->seg;
3897   msegmentptr sp = pred->next;
3898   while (sp != 0) {
3899     char* base = sp->base;
3900     size_t size = sp->size;
3901     msegmentptr next = sp->next;
3902     ++nsegs;
3903     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3904       mchunkptr p = align_as_chunk(base);
3905       size_t psize = chunksize(p);
3906       /* Can unmap if first chunk holds entire segment and not pinned */
3907       if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
3908         tchunkptr tp = (tchunkptr)p;
3909         dl_assert(segment_holds(sp, (char*)sp));
3910         if (p == m->dv) {
3911           m->dv = 0;
3912           m->dvsize = 0;
3913         }
3914         else {
3915           unlink_large_chunk(m, tp);
3916         }
3917         if (CALL_MUNMAP(base, size) == 0) {
3918           released += size;
3919           m->footprint -= size;
3920           /* unlink obsoleted record */
3921           sp = pred;
3922           sp->next = next;
3923         }
3924         else { /* back out if cannot unmap */
3925           insert_large_chunk(m, tp, psize);
3926         }
3927       }
3928     }
3929     if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
3930       break;
3931     pred = sp;
3932     sp = next;
3933   }
3934   /* Reset check counter */
3935   m->release_checks = ((nsegs > (int)MAX_RELEASE_CHECK_RATE)?
3936                        (size_t)nsegs : MAX_RELEASE_CHECK_RATE);
3937   return released;
3938 }
3939 
3940 static int sys_trim(mstate m, size_t pad) {
3941   size_t released = 0;
3942   if (pad < MAX_REQUEST && is_initialized(m)) {
3943     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3944 
3945     if (m->topsize > pad) {
3946       /* Shrink top space in granularity-size units, keeping at least one */
3947       size_t unit = mparams.granularity;
3948       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3949                       SIZE_T_ONE) * unit;
3950       msegmentptr sp = segment_holding(m, (char*)m->top);
3951 
3952       if (!is_extern_segment(sp)) {
3953         if (is_mmapped_segment(sp)) {
3954           if (DL_HAVE_MMAP &&
3955               sp->size >= extra &&
3956               !has_segment_link(m, sp)) { /* can't shrink if pinned */
3957             size_t newsize = sp->size - extra;
3958             /* Prefer mremap, fall back to munmap */
3959             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
3960                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3961               released = extra;
3962             }
3963           }
3964         }
3965         else if (HAVE_MORECORE) {
3966           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3967             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3968           ACQUIRE_MORECORE_LOCK();
3969           {
3970             /* Make sure end of memory is where we last set it. */
3971             char* old_br = (char*)(CALL_MORECORE(0));
3972             if (old_br == sp->base + sp->size) {
3973               char* rel_br = (char*)(CALL_MORECORE(-extra));
3974               char* new_br = (char*)(CALL_MORECORE(0));
3975               if (rel_br != CMFAIL && new_br < old_br)
3976                 released = old_br - new_br;
3977             }
3978           }
3979           RELEASE_MORECORE_LOCK();
3980         }
3981       }
3982 
3983       if (released != 0) {
3984         sp->size -= released;
3985         m->footprint -= released;
3986         init_top(m, m->top, m->topsize - released);
3987         check_top_chunk(m, m->top);
3988       }
3989     }
3990 
3991     /* Unmap any unused mmapped segments */
3992     if (DL_HAVE_MMAP)
3993       released += release_unused_segments(m);
3994 
3995     /* On failure, disable autotrim to avoid repeated failed future calls */
3996     if (released == 0 && m->topsize > m->trim_check)
3997       m->trim_check = MAX_SIZE_T;
3998   }
3999 
4000   return (released != 0)? 1 : 0;
4001 }
4002 
4003 /* ---------------------------- malloc support --------------------------- */
4004 
4005 /* allocate a large request from the best fitting chunk in a treebin */
4006 static void* tmalloc_large(mstate m, size_t nb) {
4007   tchunkptr v = 0;
4008   size_t rsize = -nb; /* Unsigned negation */
4009   tchunkptr t;
4010   bindex_t idx;
4011   compute_tree_index(nb, idx);
4012 
4013   if ((t = *treebin_at(m, idx)) != 0) {
4014     /* Traverse tree for this bin looking for node with size == nb */
4015     size_t sizebits = nb << leftshift_for_tree_index(idx);
4016     tchunkptr rst = 0;  /* The deepest untaken right subtree */
4017     for (;;) {
4018       tchunkptr rt;
4019       size_t trem = chunksize(t) - nb;
4020       if (trem < rsize) {
4021         v = t;
4022         if ((rsize = trem) == 0)
4023           break;
4024       }
4025       rt = t->child[1];
4026       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
4027       if (rt != 0 && rt != t)
4028         rst = rt;
4029       if (t == 0) {
4030         t = rst; /* set t to least subtree holding sizes > nb */
4031         break;
4032       }
4033       sizebits <<= 1;
4034     }
4035   }
4036 
4037   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
4038     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
4039     if (leftbits != 0) {
4040       bindex_t i;
4041       binmap_t leastbit = least_bit(leftbits);
4042       compute_bit2idx(leastbit, i);
4043       t = *treebin_at(m, i);
4044     }
4045   }
4046 
4047   while (t != 0) { /* find smallest of tree or subtree */
4048     size_t trem = chunksize(t) - nb;
4049     if (trem < rsize) {
4050       rsize = trem;
4051       v = t;
4052     }
4053     t = leftmost_child(t);
4054   }
4055 
4056   /*  If dv is a better fit, return 0 so malloc will use it */
4057   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
4058     if (RTCHECK(ok_address(m, v))) { /* split */
4059       mchunkptr r = chunk_plus_offset(v, nb);
4060       dl_assert(chunksize(v) == rsize + nb);
4061       if (RTCHECK(ok_next(v, r))) {
4062         unlink_large_chunk(m, v);
4063         if (rsize < MIN_CHUNK_SIZE)
4064           set_inuse_and_pinuse(m, v, (rsize + nb));
4065         else {
4066           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4067           set_size_and_pinuse_of_free_chunk(r, rsize);
4068           insert_chunk(m, r, rsize);
4069         }
4070         return chunk2mem(v);
4071       }
4072     }
4073     CORRUPTION_ERROR_ACTION(m);
4074   }
4075   return 0;
4076 }
4077 
4078 /* allocate a small request from the best fitting chunk in a treebin */
4079 static void* tmalloc_small(mstate m, size_t nb) {
4080   tchunkptr t, v;
4081   size_t rsize;
4082   bindex_t i;
4083   binmap_t leastbit = least_bit(m->treemap);
4084   compute_bit2idx(leastbit, i);
4085 
4086   v = t = *treebin_at(m, i);
4087   rsize = chunksize(t) - nb;
4088 
4089   while ((t = leftmost_child(t)) != 0) {
4090     size_t trem = chunksize(t) - nb;
4091     if (trem < rsize) {
4092       rsize = trem;
4093       v = t;
4094     }
4095   }
4096 
4097   if (RTCHECK(ok_address(m, v))) {
4098     mchunkptr r = chunk_plus_offset(v, nb);
4099     dl_assert(chunksize(v) == rsize + nb);
4100     if (RTCHECK(ok_next(v, r))) {
4101       unlink_large_chunk(m, v);
4102       if (rsize < MIN_CHUNK_SIZE)
4103         set_inuse_and_pinuse(m, v, (rsize + nb));
4104       else {
4105         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4106         set_size_and_pinuse_of_free_chunk(r, rsize);
4107         replace_dv(m, r, rsize);
4108       }
4109       return chunk2mem(v);
4110     }
4111   }
4112 
4113   CORRUPTION_ERROR_ACTION(m);
4114   return 0;
4115 }
4116 
4117 /* --------------------------- realloc support --------------------------- */
4118 
4119 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
4120   if (bytes >= MAX_REQUEST) {
4121     MALLOC_FAILURE_ACTION;
4122     return 0;
4123   }
4124   if (!PREACTION(m)) {
4125     mchunkptr oldp = mem2chunk(oldmem);
4126     size_t oldsize = chunksize(oldp);
4127     mchunkptr next = chunk_plus_offset(oldp, oldsize);
4128     mchunkptr newp = 0;
4129     void* extra = 0;
4130 
4131     /* Try to either shrink or extend into top. Else malloc-copy-free */
4132 
4133     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
4134                 ok_next(oldp, next) && ok_pinuse(next))) {
4135       size_t nb = request2size(bytes);
4136       if (is_mmapped(oldp))
4137         newp = mmap_resize(m, oldp, nb);
4138       else if (oldsize >= nb) { /* already big enough */
4139         size_t rsize = oldsize - nb;
4140         newp = oldp;
4141         if (rsize >= MIN_CHUNK_SIZE) {
4142           mchunkptr remainder = chunk_plus_offset(newp, nb);
4143           set_inuse(m, newp, nb);
4144           set_inuse(m, remainder, rsize);
4145           extra = chunk2mem(remainder);
4146         }
4147       }
4148       else if (next == m->top && oldsize + m->topsize > nb) {
4149         /* Expand into top */
4150         size_t newsize = oldsize + m->topsize;
4151         size_t newtopsize = newsize - nb;
4152         mchunkptr newtop = chunk_plus_offset(oldp, nb);
4153         set_inuse(m, oldp, nb);
4154         newtop->head = newtopsize |PINUSE_BIT;
4155         m->top = newtop;
4156         m->topsize = newtopsize;
4157         newp = oldp;
4158       }
4159     }
4160     else {
4161       USAGE_ERROR_ACTION(m, oldmem);
4162       POSTACTION(m);
4163       return 0;
4164     }
4165 
4166     POSTACTION(m);
4167 
4168     if (newp != 0) {
4169       if (extra != 0) {
4170         internal_free(m, extra);
4171       }
4172       check_inuse_chunk(m, newp);
4173       return chunk2mem(newp);
4174     }
4175     else {
4176       void* newmem = internal_malloc(m, bytes);
4177       if (newmem != 0) {
4178         size_t oc = oldsize - overhead_for(oldp);
4179         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
4180         internal_free(m, oldmem);
4181       }
4182       return newmem;
4183     }
4184   }
4185   return 0;
4186 }
4187 
4188 /* --------------------------- memalign support -------------------------- */
4189 
4190 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
4191   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
4192     return internal_malloc(m, bytes);
4193   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4194     alignment = MIN_CHUNK_SIZE;
4195   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
4196     size_t a = MALLOC_ALIGNMENT << 1;
4197     while (a < alignment) a <<= 1;
4198     alignment = a;
4199   }
4200 
4201   if (bytes >= MAX_REQUEST - alignment) {
4202     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
4203       MALLOC_FAILURE_ACTION;
4204     }
4205   }
4206   else {
4207     size_t nb = request2size(bytes);
4208     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4209     char* mem = (char*)internal_malloc(m, req);
4210     if (mem != 0) {
4211       void* leader = 0;
4212       void* trailer = 0;
4213       mchunkptr p = mem2chunk(mem);
4214 
4215       if (PREACTION(m)) return 0;
4216       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
4217         /*
4218           Find an aligned spot inside chunk.  Since we need to give
4219           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4220           the first calculation places us at a spot with less than
4221           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4222           We've allocated enough total room so that this is always
4223           possible.
4224         */
4225         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
4226                                                        alignment -
4227                                                        SIZE_T_ONE)) &
4228                                              -alignment));
4229         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
4230           br : br+alignment;
4231         mchunkptr newp = (mchunkptr)pos;
4232         size_t leadsize = pos - (char*)(p);
4233         size_t newsize = chunksize(p) - leadsize;
4234 
4235         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
4236           newp->prev_foot = p->prev_foot + leadsize;
4237           newp->head = (newsize|CINUSE_BIT);
4238         }
4239         else { /* Otherwise, give back leader, use the rest */
4240           set_inuse(m, newp, newsize);
4241           set_inuse(m, p, leadsize);
4242           leader = chunk2mem(p);
4243         }
4244         p = newp;
4245       }
4246 
4247       /* Give back spare room at the end */
4248       if (!is_mmapped(p)) {
4249         size_t size = chunksize(p);
4250         if (size > nb + MIN_CHUNK_SIZE) {
4251           size_t remainder_size = size - nb;
4252           mchunkptr remainder = chunk_plus_offset(p, nb);
4253           set_inuse(m, p, nb);
4254           set_inuse(m, remainder, remainder_size);
4255           trailer = chunk2mem(remainder);
4256         }
4257       }
4258 
4259       assert (chunksize(p) >= nb);
4260       dl_assert((((size_t)(chunk2mem(p))) % alignment) == 0);
4261       check_inuse_chunk(m, p);
4262       POSTACTION(m);
4263       if (leader != 0) {
4264         internal_free(m, leader);
4265       }
4266       if (trailer != 0) {
4267         internal_free(m, trailer);
4268       }
4269       return chunk2mem(p);
4270     }
4271   }
4272   return 0;
4273 }
4274 
4275 /* ------------------------ comalloc/coalloc support --------------------- */
4276 
4277 static void** ialloc(mstate m,
4278                      size_t n_elements,
4279                      size_t* sizes,
4280                      int opts,
4281                      void* chunks[]) {
4282   /*
4283     This provides common support for independent_X routines, handling
4284     all of the combinations that can result.
4285 
4286     The opts arg has:
4287     bit 0 set if all elements are same size (using sizes[0])
4288     bit 1 set if elements should be zeroed
4289   */
4290 
4291   size_t    element_size;   /* chunksize of each element, if all same */
4292   size_t    contents_size;  /* total size of elements */
4293   size_t    array_size;     /* request size of pointer array */
4294   void*     mem;            /* malloced aggregate space */
4295   mchunkptr p;              /* corresponding chunk */
4296   size_t    remainder_size; /* remaining bytes while splitting */
4297   void**    marray;         /* either "chunks" or malloced ptr array */
4298   mchunkptr array_chunk;    /* chunk for malloced ptr array */
4299   flag_t    was_enabled;    /* to disable mmap */
4300   size_t    size;
4301   size_t    i;
4302 
4303   /* compute array length, if needed */
4304   if (chunks != 0) {
4305     if (n_elements == 0)
4306       return chunks; /* nothing to do */
4307     marray = chunks;
4308     array_size = 0;
4309   }
4310   else {
4311     /* if empty req, must still return chunk representing empty array */
4312     if (n_elements == 0)
4313       return (void**)internal_malloc(m, 0);
4314     marray = 0;
4315     array_size = request2size(n_elements * (sizeof(void*)));
4316   }
4317 
4318   /* compute total element size */
4319   if (opts & 0x1) { /* all-same-size */
4320     element_size = request2size(*sizes);
4321     contents_size = n_elements * element_size;
4322   }
4323   else { /* add up all the sizes */
4324     element_size = 0;
4325     contents_size = 0;
4326     for (i = 0; i != n_elements; ++i)
4327       contents_size += request2size(sizes[i]);
4328   }
4329 
4330   size = contents_size + array_size;
4331 
4332   /*
4333      Allocate the aggregate chunk.  First disable direct-mmapping so
4334      malloc won't use it, since we would not be able to later
4335      free/realloc space internal to a segregated mmap region.
4336   */
4337   was_enabled = use_mmap(m);
4338   disable_mmap(m);
4339   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4340   if (was_enabled)
4341     enable_mmap(m);
4342   if (mem == 0)
4343     return 0;
4344 
4345   if (PREACTION(m)) return 0;
4346   p = mem2chunk(mem);
4347   remainder_size = chunksize(p);
4348 
4349   dl_assert(!is_mmapped(p));
4350 
4351   if (opts & 0x2) {       /* optionally clear the elements */
4352     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4353   }
4354 
4355   /* If not provided, allocate the pointer array as final part of chunk */
4356   if (marray == 0) {
4357     size_t  array_chunk_size;
4358     array_chunk = chunk_plus_offset(p, contents_size);
4359     array_chunk_size = remainder_size - contents_size;
4360     marray = (void**) (chunk2mem(array_chunk));
4361     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4362     remainder_size = contents_size;
4363   }
4364 
4365   /* split out elements */
4366   for (i = 0; ; ++i) {
4367     marray[i] = chunk2mem(p);
4368     if (i != n_elements-1) {
4369       if (element_size != 0)
4370         size = element_size;
4371       else
4372         size = request2size(sizes[i]);
4373       remainder_size -= size;
4374       set_size_and_pinuse_of_inuse_chunk(m, p, size);
4375       p = chunk_plus_offset(p, size);
4376     }
4377     else { /* the final element absorbs any overallocation slop */
4378       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4379       break;
4380     }
4381   }
4382 
4383 #if DL_DEBUG
4384   if (marray != chunks) {
4385     /* final element must have exactly exhausted chunk */
4386     if (element_size != 0) {
4387       dl_assert(remainder_size == element_size);
4388     }
4389     else {
4390       dl_assert(remainder_size == request2size(sizes[i]));
4391     }
4392     check_inuse_chunk(m, mem2chunk(marray));
4393   }
4394   for (i = 0; i != n_elements; ++i)
4395     check_inuse_chunk(m, mem2chunk(marray[i]));
4396 
4397 #endif /* DL_DEBUG */
4398 
4399   POSTACTION(m);
4400   return marray;
4401 }
4402 
4403 
4404 /* -------------------------- public routines ---------------------------- */
4405 
4406 #if !ONLY_MSPACES
4407 
4408 void* dlmalloc(size_t bytes) {
4409   /*
4410      Basic algorithm:
4411      If a small request (< 256 bytes minus per-chunk overhead):
4412        1. If one exists, use a remainderless chunk in associated smallbin.
4413           (Remainderless means that there are too few excess bytes to
4414           represent as a chunk.)
4415        2. If it is big enough, use the dv chunk, which is normally the
4416           chunk adjacent to the one used for the most recent small request.
4417        3. If one exists, split the smallest available chunk in a bin,
4418           saving remainder in dv.
4419        4. If it is big enough, use the top chunk.
4420        5. If available, get memory from system and use it
4421      Otherwise, for a large request:
4422        1. Find the smallest available binned chunk that fits, and use it
4423           if it is better fitting than dv chunk, splitting if necessary.
4424        2. If better fitting than any binned chunk, use the dv chunk.
4425        3. If it is big enough, use the top chunk.
4426        4. If request size >= mmap threshold, try to directly mmap this chunk.
4427        5. If available, get memory from system and use it
4428 
4429      The ugly goto's here ensure that postaction occurs along all paths.
4430   */
4431 
4432   if (!PREACTION(gm)) {
4433     void* mem;
4434     size_t nb;
4435     if (bytes <= MAX_SMALL_REQUEST) {
4436       bindex_t idx;
4437       binmap_t smallbits;
4438       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4439       idx = small_index(nb);
4440       smallbits = gm->smallmap >> idx;
4441 
4442       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4443         mchunkptr b, p;
4444         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
4445         b = smallbin_at(gm, idx);
4446         p = b->fd;
4447         dl_assert(chunksize(p) == small_index2size(idx));
4448         unlink_first_small_chunk(gm, b, p, idx);
4449         set_inuse_and_pinuse(gm, p, small_index2size(idx));
4450         mem = chunk2mem(p);
4451         check_malloced_chunk(gm, mem, nb);
4452         goto postaction;
4453       }
4454 
4455       else if (nb > gm->dvsize) {
4456         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4457           mchunkptr b, p, r;
4458           size_t rsize;
4459           bindex_t i;
4460           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4461           binmap_t leastbit = least_bit(leftbits);
4462           compute_bit2idx(leastbit, i);
4463           b = smallbin_at(gm, i);
4464           p = b->fd;
4465           dl_assert(chunksize(p) == small_index2size(i));
4466           unlink_first_small_chunk(gm, b, p, i);
4467           rsize = small_index2size(i) - nb;
4468           /* Fit here cannot be remainderless if 4byte sizes */
4469           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4470             set_inuse_and_pinuse(gm, p, small_index2size(i));
4471           else {
4472             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4473             r = chunk_plus_offset(p, nb);
4474             set_size_and_pinuse_of_free_chunk(r, rsize);
4475             replace_dv(gm, r, rsize);
4476           }
4477           mem = chunk2mem(p);
4478           check_malloced_chunk(gm, mem, nb);
4479           goto postaction;
4480         }
4481 
4482         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
4483           check_malloced_chunk(gm, mem, nb);
4484           goto postaction;
4485         }
4486       }
4487     }
4488     else if (bytes >= MAX_REQUEST)
4489       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4490     else {
4491       nb = pad_request(bytes);
4492       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4493         check_malloced_chunk(gm, mem, nb);
4494         goto postaction;
4495       }
4496     }
4497 
4498     if (nb <= gm->dvsize) {
4499       size_t rsize = gm->dvsize - nb;
4500       mchunkptr p = gm->dv;
4501       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4502         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4503         gm->dvsize = rsize;
4504         set_size_and_pinuse_of_free_chunk(r, rsize);
4505         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4506       }
4507       else { /* exhaust dv */
4508         size_t dvs = gm->dvsize;
4509         gm->dvsize = 0;
4510         gm->dv = 0;
4511         set_inuse_and_pinuse(gm, p, dvs);
4512       }
4513       mem = chunk2mem(p);
4514       check_malloced_chunk(gm, mem, nb);
4515       goto postaction;
4516     }
4517 
4518     else if (nb < gm->topsize) { /* Split top */
4519       size_t rsize = gm->topsize -= nb;
4520       mchunkptr p = gm->top;
4521       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4522       r->head = rsize | PINUSE_BIT;
4523       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4524       mem = chunk2mem(p);
4525       check_top_chunk(gm, gm->top);
4526       check_malloced_chunk(gm, mem, nb);
4527       goto postaction;
4528     }
4529 
4530     mem = sys_alloc(gm, nb);
4531 
4532   postaction:
4533     POSTACTION(gm);
4534     return mem;
4535   }
4536 
4537   return 0;
4538 }
4539 
4540 void dlfree(void* mem) {
4541   /*
4542      Consolidate freed chunks with preceeding or succeeding bordering
4543      free chunks, if they exist, and then place in a bin.  Intermixed
4544      with special cases for top, dv, mmapped chunks, and usage errors.
4545   */
4546 
4547   if (mem != 0) {
4548     mchunkptr p  = mem2chunk(mem);
4549 #if FOOTERS
4550     mstate fm = get_mstate_for(p);
4551     if (!ok_magic(fm)) {
4552       USAGE_ERROR_ACTION(fm, p);
4553       return;
4554     }
4555 #else /* FOOTERS */
4556 #define fm gm
4557 #endif /* FOOTERS */
4558     if (!PREACTION(fm)) {
4559       check_inuse_chunk(fm, p);
4560       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4561         size_t psize = chunksize(p);
4562         mchunkptr next = chunk_plus_offset(p, psize);
4563         if (!pinuse(p)) {
4564           size_t prevsize = p->prev_foot;
4565           if ((prevsize & IS_MMAPPED_BIT) != 0) {
4566             prevsize &= ~IS_MMAPPED_BIT;
4567             psize += prevsize + MMAP_FOOT_PAD;
4568             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4569               fm->footprint -= psize;
4570             goto postaction;
4571           }
4572           else {
4573             mchunkptr prev = chunk_minus_offset(p, prevsize);
4574             psize += prevsize;
4575             p = prev;
4576             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4577               if (p != fm->dv) {
4578                 unlink_chunk(fm, p, prevsize);
4579               }
4580               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4581                 fm->dvsize = psize;
4582                 set_free_with_pinuse(p, psize, next);
4583                 goto postaction;
4584               }
4585             }
4586             else
4587               goto erroraction;
4588           }
4589         }
4590 
4591         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4592           if (!cinuse(next)) {  /* consolidate forward */
4593             if (next == fm->top) {
4594               size_t tsize = fm->topsize += psize;
4595               fm->top = p;
4596               p->head = tsize | PINUSE_BIT;
4597               if (p == fm->dv) {
4598                 fm->dv = 0;
4599                 fm->dvsize = 0;
4600               }
4601               if (should_trim(fm, tsize))
4602                 sys_trim(fm, 0);
4603               goto postaction;
4604             }
4605             else if (next == fm->dv) {
4606               size_t dsize = fm->dvsize += psize;
4607               fm->dv = p;
4608               set_size_and_pinuse_of_free_chunk(p, dsize);
4609               goto postaction;
4610             }
4611             else {
4612               size_t nsize = chunksize(next);
4613               psize += nsize;
4614               unlink_chunk(fm, next, nsize);
4615               set_size_and_pinuse_of_free_chunk(p, psize);
4616               if (p == fm->dv) {
4617                 fm->dvsize = psize;
4618                 goto postaction;
4619               }
4620             }
4621           }
4622           else
4623             set_free_with_pinuse(p, psize, next);
4624 
4625           if (is_small(psize)) {
4626             insert_small_chunk(fm, p, psize);
4627             check_free_chunk(fm, p);
4628           }
4629           else {
4630             tchunkptr tp = (tchunkptr)p;
4631             insert_large_chunk(fm, tp, psize);
4632             check_free_chunk(fm, p);
4633             if (--fm->release_checks == 0)
4634               release_unused_segments(fm);
4635           }
4636           goto postaction;
4637         }
4638       }
4639     erroraction:
4640       USAGE_ERROR_ACTION(fm, p);
4641     postaction:
4642       POSTACTION(fm);
4643     }
4644   }
4645 #if !FOOTERS
4646 #undef fm
4647 #endif /* FOOTERS */
4648 }
4649 
4650 void* dlcalloc(size_t n_elements, size_t elem_size) {
4651   void* mem;
4652   size_t req = 0;
4653   if (n_elements != 0) {
4654     req = n_elements * elem_size;
4655     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4656         (req / n_elements != elem_size))
4657       req = MAX_SIZE_T; /* force downstream failure on overflow */
4658   }
4659   mem = dlmalloc(req);
4660   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4661     memset(mem, 0, req);
4662   return mem;
4663 }
4664 
4665 void* dlrealloc(void* oldmem, size_t bytes) {
4666  //   printf("oldmem=%p bytes=%d\n", oldmem, (int)bytes);
4667   if (oldmem == 0)
4668     return dlmalloc(bytes);
4669 #ifdef REALLOC_ZERO_BYTES_FREES
4670   if (bytes == 0) {
4671     dlfree(oldmem);
4672     return 0;
4673   }
4674 #endif /* REALLOC_ZERO_BYTES_FREES */
4675   else {
4676 #if ! FOOTERS
4677     mstate m = gm;
4678 #else /* FOOTERS */
4679 //printf("checking state\n");
4680     mstate m = get_mstate_for(mem2chunk(oldmem));
4681   //  mchunkptr p = mem2chunk(oldmem);
4682 //printf("checking state m=%p gm=%p least_addr=%p p=%p, head=%x size=%d pp=%x\n", m, gm, gm->least_addr, p, (unsigned)p->head, (int)chunksize(p),
4683  //       (unsigned)(((mchunkptr)((char*)(p) +(chunksize(p))))->prev_foot)
4684   //      );
4685     if (!ok_magic(m)) {
4686 //printf("checking state - oops\n");
4687       USAGE_ERROR_ACTION(m, oldmem);
4688       return 0;
4689     }
4690 //printf("checking state OK\n");
4691 #endif /* FOOTERS */
4692 //printf("to internal realloc m=%p gm=%p, mparams.magic=%x oldmem=%p bytes=%d\n", m, gm, (unsigned)mparams.magic, oldmem, (int)bytes);
4693     return internal_realloc(m, oldmem, bytes);
4694   }
4695 }
4696 
4697 void* dlmemalign(size_t alignment, size_t bytes) {
4698   return internal_memalign(gm, alignment, bytes);
4699 }
4700 
4701 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
4702                                  void* chunks[]) {
4703   size_t sz = elem_size; /* serves as 1-element array */
4704   return ialloc(gm, n_elements, &sz, 3, chunks);
4705 }
4706 
4707 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
4708                                    void* chunks[]) {
4709   return ialloc(gm, n_elements, sizes, 0, chunks);
4710 }
4711 
4712 void* dlvalloc(size_t bytes) {
4713   size_t pagesz;
4714   init_mparams();
4715   pagesz = mparams.page_size;
4716   return dlmemalign(pagesz, bytes);
4717 }
4718 
4719 void* dlpvalloc(size_t bytes) {
4720   size_t pagesz;
4721   init_mparams();
4722   pagesz = mparams.page_size;
4723   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4724 }
4725 
4726 int dlmalloc_trim(size_t pad) {
4727   int result = 0;
4728   if (!PREACTION(gm)) {
4729     result = sys_trim(gm, pad);
4730     POSTACTION(gm);
4731   }
4732   return result;
4733 }
4734 
4735 size_t dlmalloc_footprint(void) {
4736   return gm->footprint;
4737 }
4738 
4739 size_t dlmalloc_max_footprint(void) {
4740   return gm->max_footprint;
4741 }
4742 
4743 #if !NO_MALLINFO
4744 struct mallinfo dlmallinfo(void) {
4745   return internal_mallinfo(gm);
4746 }
4747 #endif /* NO_MALLINFO */
4748 
4749 void dlmalloc_stats() {
4750   internal_malloc_stats(gm);
4751 }
4752 
4753 size_t dlmalloc_usable_size(void* mem) {
4754   if (mem != 0) {
4755     mchunkptr p = mem2chunk(mem);
4756     if (cinuse(p))
4757       return chunksize(p) - overhead_for(p);
4758   }
4759   return 0;
4760 }
4761 
4762 int dlmallopt(int param_number, int value) {
4763   return change_mparam(param_number, value);
4764 }
4765 
4766 #endif /* !ONLY_MSPACES */
4767 
4768 /* ----------------------------- user mspaces ---------------------------- */
4769 
4770 #if MSPACES
4771 
4772 static mstate init_user_mstate(char* tbase, size_t tsize) {
4773   size_t msize = pad_request(sizeof(struct malloc_state));
4774   mchunkptr mn;
4775   mchunkptr msp = align_as_chunk(tbase);
4776   mstate m = (mstate)(chunk2mem(msp));
4777   memset(m, 0, msize);
4778   INITIAL_LOCK(&m->mutex);
4779   msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
4780   m->seg.base = m->least_addr = tbase;
4781   m->seg.size = m->footprint = m->max_footprint = tsize;
4782   m->magic = mparams.magic;
4783   m->release_checks = MAX_RELEASE_CHECK_RATE;
4784   m->mflags = mparams.default_mflags;
4785   m->extp = 0;
4786   m->exts = 0;
4787   disable_contiguous(m);
4788   init_bins(m);
4789   mn = next_chunk(mem2chunk(m));
4790   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4791   check_top_chunk(m, m->top);
4792   return m;
4793 }
4794 
4795 mspace create_mspace(size_t capacity, int locked) {
4796   mstate m = 0;
4797   size_t msize = pad_request(sizeof(struct malloc_state));
4798   init_mparams(); /* Ensure pagesize etc initialized */
4799 
4800   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4801     size_t rs = ((capacity == 0)? mparams.granularity :
4802                  (capacity + TOP_FOOT_SIZE + msize));
4803     size_t tsize = granularity_align(rs);
4804     char* tbase = (char*)(CALL_MMAP(tsize));
4805     if (tbase != CMFAIL) {
4806       m = init_user_mstate(tbase, tsize);
4807       m->seg.sflags = IS_MMAPPED_BIT;
4808       set_lock(m, locked);
4809     }
4810   }
4811   return (mspace)m;
4812 }
4813 
4814 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4815   mstate m = 0;
4816   size_t msize = pad_request(sizeof(struct malloc_state));
4817   init_mparams(); /* Ensure pagesize etc initialized */
4818 
4819   if (capacity > msize + TOP_FOOT_SIZE &&
4820       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4821     m = init_user_mstate((char*)base, capacity);
4822     m->seg.sflags = EXTERN_BIT;
4823     set_lock(m, locked);
4824   }
4825   return (mspace)m;
4826 }
4827 
4828 size_t destroy_mspace(mspace msp) {
4829   size_t freed = 0;
4830   mstate ms = (mstate)msp;
4831   if (ok_magic(ms)) {
4832     msegmentptr sp = &ms->seg;
4833     while (sp != 0) {
4834       char* base = sp->base;
4835       size_t size = sp->size;
4836       flag_t flag = sp->sflags;
4837       sp = sp->next;
4838       if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4839           CALL_MUNMAP(base, size) == 0)
4840         freed += size;
4841     }
4842   }
4843   else {
4844     USAGE_ERROR_ACTION(ms,ms);
4845   }
4846   return freed;
4847 }
4848 
4849 /*
4850   mspace versions of routines are near-clones of the global
4851   versions. This is not so nice but better than the alternatives.
4852 */
4853 
4854 
4855 void* mspace_malloc(mspace msp, size_t bytes) {
4856   mstate ms = (mstate)msp;
4857   if (!ok_magic(ms)) {
4858     USAGE_ERROR_ACTION(ms,ms);
4859     return 0;
4860   }
4861   if (!PREACTION(ms)) {
4862     void* mem;
4863     size_t nb;
4864     if (bytes <= MAX_SMALL_REQUEST) {
4865       bindex_t idx;
4866       binmap_t smallbits;
4867       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4868       idx = small_index(nb);
4869       smallbits = ms->smallmap >> idx;
4870 
4871       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4872         mchunkptr b, p;
4873         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
4874         b = smallbin_at(ms, idx);
4875         p = b->fd;
4876         dl_assert(chunksize(p) == small_index2size(idx));
4877         unlink_first_small_chunk(ms, b, p, idx);
4878         set_inuse_and_pinuse(ms, p, small_index2size(idx));
4879         mem = chunk2mem(p);
4880         check_malloced_chunk(ms, mem, nb);
4881         goto postaction;
4882       }
4883 
4884       else if (nb > ms->dvsize) {
4885         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4886           mchunkptr b, p, r;
4887           size_t rsize;
4888           bindex_t i;
4889           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4890           binmap_t leastbit = least_bit(leftbits);
4891           compute_bit2idx(leastbit, i);
4892           b = smallbin_at(ms, i);
4893           p = b->fd;
4894           dl_assert(chunksize(p) == small_index2size(i));
4895           unlink_first_small_chunk(ms, b, p, i);
4896           rsize = small_index2size(i) - nb;
4897           /* Fit here cannot be remainderless if 4byte sizes */
4898           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4899             set_inuse_and_pinuse(ms, p, small_index2size(i));
4900           else {
4901             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4902             r = chunk_plus_offset(p, nb);
4903             set_size_and_pinuse_of_free_chunk(r, rsize);
4904             replace_dv(ms, r, rsize);
4905           }
4906           mem = chunk2mem(p);
4907           check_malloced_chunk(ms, mem, nb);
4908           goto postaction;
4909         }
4910 
4911         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4912           check_malloced_chunk(ms, mem, nb);
4913           goto postaction;
4914         }
4915       }
4916     }
4917     else if (bytes >= MAX_REQUEST)
4918       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4919     else {
4920       nb = pad_request(bytes);
4921       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4922         check_malloced_chunk(ms, mem, nb);
4923         goto postaction;
4924       }
4925     }
4926 
4927     if (nb <= ms->dvsize) {
4928       size_t rsize = ms->dvsize - nb;
4929       mchunkptr p = ms->dv;
4930       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4931         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4932         ms->dvsize = rsize;
4933         set_size_and_pinuse_of_free_chunk(r, rsize);
4934         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4935       }
4936       else { /* exhaust dv */
4937         size_t dvs = ms->dvsize;
4938         ms->dvsize = 0;
4939         ms->dv = 0;
4940         set_inuse_and_pinuse(ms, p, dvs);
4941       }
4942       mem = chunk2mem(p);
4943       check_malloced_chunk(ms, mem, nb);
4944       goto postaction;
4945     }
4946 
4947     else if (nb < ms->topsize) { /* Split top */
4948       size_t rsize = ms->topsize -= nb;
4949       mchunkptr p = ms->top;
4950       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4951       r->head = rsize | PINUSE_BIT;
4952       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4953       mem = chunk2mem(p);
4954       check_top_chunk(ms, ms->top);
4955       check_malloced_chunk(ms, mem, nb);
4956       goto postaction;
4957     }
4958 
4959     mem = sys_alloc(ms, nb);
4960 
4961   postaction:
4962     POSTACTION(ms);
4963     return mem;
4964   }
4965 
4966   return 0;
4967 }
4968 
4969 void mspace_free(mspace msp, void* mem) {
4970   if (mem != 0) {
4971     mchunkptr p  = mem2chunk(mem);
4972 #if FOOTERS
4973     mstate fm = get_mstate_for(p);
4974 #else /* FOOTERS */
4975     mstate fm = (mstate)msp;
4976 #endif /* FOOTERS */
4977     if (!ok_magic(fm)) {
4978       USAGE_ERROR_ACTION(fm, p);
4979       return;
4980     }
4981     if (!PREACTION(fm)) {
4982       check_inuse_chunk(fm, p);
4983       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4984         size_t psize = chunksize(p);
4985         mchunkptr next = chunk_plus_offset(p, psize);
4986         if (!pinuse(p)) {
4987           size_t prevsize = p->prev_foot;
4988           if ((prevsize & IS_MMAPPED_BIT) != 0) {
4989             prevsize &= ~IS_MMAPPED_BIT;
4990             psize += prevsize + MMAP_FOOT_PAD;
4991             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4992               fm->footprint -= psize;
4993             goto postaction;
4994           }
4995           else {
4996             mchunkptr prev = chunk_minus_offset(p, prevsize);
4997             psize += prevsize;
4998             p = prev;
4999             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
5000               if (p != fm->dv) {
5001                 unlink_chunk(fm, p, prevsize);
5002               }
5003               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
5004                 fm->dvsize = psize;
5005                 set_free_with_pinuse(p, psize, next);
5006                 goto postaction;
5007               }
5008             }
5009             else
5010               goto erroraction;
5011           }
5012         }
5013 
5014         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
5015           if (!cinuse(next)) {  /* consolidate forward */
5016             if (next == fm->top) {
5017               size_t tsize = fm->topsize += psize;
5018               fm->top = p;
5019               p->head = tsize | PINUSE_BIT;
5020               if (p == fm->dv) {
5021                 fm->dv = 0;
5022                 fm->dvsize = 0;
5023               }
5024               if (should_trim(fm, tsize))
5025                 sys_trim(fm, 0);
5026               goto postaction;
5027             }
5028             else if (next == fm->dv) {
5029               size_t dsize = fm->dvsize += psize;
5030               fm->dv = p;
5031               set_size_and_pinuse_of_free_chunk(p, dsize);
5032               goto postaction;
5033             }
5034             else {
5035               size_t nsize = chunksize(next);
5036               psize += nsize;
5037               unlink_chunk(fm, next, nsize);
5038               set_size_and_pinuse_of_free_chunk(p, psize);
5039               if (p == fm->dv) {
5040                 fm->dvsize = psize;
5041                 goto postaction;
5042               }
5043             }
5044           }
5045           else
5046             set_free_with_pinuse(p, psize, next);
5047 
5048           if (is_small(psize)) {
5049             insert_small_chunk(fm, p, psize);
5050             check_free_chunk(fm, p);
5051           }
5052           else {
5053             tchunkptr tp = (tchunkptr)p;
5054             insert_large_chunk(fm, tp, psize);
5055             check_free_chunk(fm, p);
5056             if (--fm->release_checks == 0)
5057               release_unused_segments(fm);
5058           }
5059           goto postaction;
5060         }
5061       }
5062     erroraction:
5063       USAGE_ERROR_ACTION(fm, p);
5064     postaction:
5065       POSTACTION(fm);
5066     }
5067   }
5068 }
5069 
5070 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
5071   void* mem;
5072   size_t req = 0;
5073   mstate ms = (mstate)msp;
5074   if (!ok_magic(ms)) {
5075     USAGE_ERROR_ACTION(ms,ms);
5076     return 0;
5077   }
5078   if (n_elements != 0) {
5079     req = n_elements * elem_size;
5080     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
5081         (req / n_elements != elem_size))
5082       req = MAX_SIZE_T; /* force downstream failure on overflow */
5083   }
5084   mem = internal_malloc(ms, req);
5085   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
5086     memset(mem, 0, req);
5087   return mem;
5088 }
5089 
5090 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
5091   if (oldmem == 0)
5092     return mspace_malloc(msp, bytes);
5093 #ifdef REALLOC_ZERO_BYTES_FREES
5094   if (bytes == 0) {
5095     mspace_free(msp, oldmem);
5096     return 0;
5097   }
5098 #endif /* REALLOC_ZERO_BYTES_FREES */
5099   else {
5100 #if FOOTERS
5101     mchunkptr p  = mem2chunk(oldmem);
5102     mstate ms = get_mstate_for(p);
5103 #else /* FOOTERS */
5104     mstate ms = (mstate)msp;
5105 #endif /* FOOTERS */
5106     if (!ok_magic(ms)) {
5107       USAGE_ERROR_ACTION(ms,ms);
5108       return 0;
5109     }
5110     return internal_realloc(ms, oldmem, bytes);
5111   }
5112 }
5113 
5114 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
5115   mstate ms = (mstate)msp;
5116   if (!ok_magic(ms)) {
5117     USAGE_ERROR_ACTION(ms,ms);
5118     return 0;
5119   }
5120   return internal_memalign(ms, alignment, bytes);
5121 }
5122 
5123 void** mspace_independent_calloc(mspace msp, size_t n_elements,
5124                                  size_t elem_size, void* chunks[]) {
5125   size_t sz = elem_size; /* serves as 1-element array */
5126   mstate ms = (mstate)msp;
5127   if (!ok_magic(ms)) {
5128     USAGE_ERROR_ACTION(ms,ms);
5129     return 0;
5130   }
5131   return ialloc(ms, n_elements, &sz, 3, chunks);
5132 }
5133 
5134 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
5135                                    size_t sizes[], void* chunks[]) {
5136   mstate ms = (mstate)msp;
5137   if (!ok_magic(ms)) {
5138     USAGE_ERROR_ACTION(ms,ms);
5139     return 0;
5140   }
5141   return ialloc(ms, n_elements, sizes, 0, chunks);
5142 }
5143 
5144 int mspace_trim(mspace msp, size_t pad) {
5145   int result = 0;
5146   mstate ms = (mstate)msp;
5147   if (ok_magic(ms)) {
5148     if (!PREACTION(ms)) {
5149       result = sys_trim(ms, pad);
5150       POSTACTION(ms);
5151     }
5152   }
5153   else {
5154     USAGE_ERROR_ACTION(ms,ms);
5155   }
5156   return result;
5157 }
5158 
5159 void mspace_malloc_stats(mspace msp) {
5160   mstate ms = (mstate)msp;
5161   if (ok_magic(ms)) {
5162     internal_malloc_stats(ms);
5163   }
5164   else {
5165     USAGE_ERROR_ACTION(ms,ms);
5166   }
5167 }
5168 
5169 size_t mspace_footprint(mspace msp) {
5170   size_t result = 0;
5171   mstate ms = (mstate)msp;
5172   if (ok_magic(ms)) {
5173     result = ms->footprint;
5174   }
5175   else {
5176     USAGE_ERROR_ACTION(ms,ms);
5177   }
5178   return result;
5179 }
5180 
5181 
5182 size_t mspace_max_footprint(mspace msp) {
5183   size_t result = 0;
5184   mstate ms = (mstate)msp;
5185   if (ok_magic(ms)) {
5186     result = ms->max_footprint;
5187   }
5188   else {
5189     USAGE_ERROR_ACTION(ms,ms);
5190   }
5191   return result;
5192 }
5193 
5194 
5195 #if !NO_MALLINFO
5196 struct mallinfo mspace_mallinfo(mspace msp) {
5197   mstate ms = (mstate)msp;
5198   if (!ok_magic(ms)) {
5199     USAGE_ERROR_ACTION(ms,ms);
5200   }
5201   return internal_mallinfo(ms);
5202 }
5203 #endif /* NO_MALLINFO */
5204 
5205 size_t mspace_usable_size(void* mem) {
5206   if (mem != 0) {
5207     mchunkptr p = mem2chunk(mem);
5208     if (cinuse(p))
5209       return chunksize(p) - overhead_for(p);
5210   }
5211   return 0;
5212 }
5213 
5214 int mspace_mallopt(int param_number, int value) {
5215   return change_mparam(param_number, value);
5216 }
5217 
5218 #endif /* MSPACES */
5219 
5220 /* -------------------- Alternative MORECORE functions ------------------- */
5221 
5222 /*
5223   Guidelines for creating a custom version of MORECORE:
5224 
5225   * For best performance, MORECORE should allocate in multiples of pagesize.
5226   * MORECORE may allocate more memory than requested. (Or even less,
5227       but this will usually result in a malloc failure.)
5228   * MORECORE must not allocate memory when given argument zero, but
5229       instead return one past the end address of memory from previous
5230       nonzero call.
5231   * For best performance, consecutive calls to MORECORE with positive
5232       arguments should return increasing addresses, indicating that
5233       space has been contiguously extended.
5234   * Even though consecutive calls to MORECORE need not return contiguous
5235       addresses, it must be OK for malloc'ed chunks to span multiple
5236       regions in those cases where they do happen to be contiguous.
5237   * MORECORE need not handle negative arguments -- it may instead
5238       just return MFAIL when given negative arguments.
5239       Negative arguments are always multiples of pagesize. MORECORE
5240       must not misinterpret negative args as large positive unsigned
5241       args. You can suppress all such calls from even occurring by defining
5242       MORECORE_CANNOT_TRIM,
5243 
5244   As an example alternative MORECORE, here is a custom allocator
5245   kindly contributed for pre-OSX macOS.  It uses virtually but not
5246   necessarily physically contiguous non-paged memory (locked in,
5247   present and won't get swapped out).  You can use it by uncommenting
5248   this section, adding some #includes, and setting up the appropriate
5249   defines above:
5250 
5251       #define MORECORE osMoreCore
5252 
5253   There is also a shutdown routine that should somehow be called for
5254   cleanup upon program exit.
5255 
5256   #define MAX_POOL_ENTRIES 100
5257   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
5258   static int next_os_pool;
5259   void *our_os_pools[MAX_POOL_ENTRIES];
5260 
5261   void *osMoreCore(int size)
5262   {
5263     void *ptr = 0;
5264     static void *sbrk_top = 0;
5265 
5266     if (size > 0)
5267     {
5268       if (size < MINIMUM_MORECORE_SIZE)
5269          size = MINIMUM_MORECORE_SIZE;
5270       if (CurrentExecutionLevel() == kTaskLevel)
5271          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5272       if (ptr == 0)
5273       {
5274         return (void *) MFAIL;
5275       }
5276       // save ptrs so they can be freed during cleanup
5277       our_os_pools[next_os_pool] = ptr;
5278       next_os_pool++;
5279       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5280       sbrk_top = (char *) ptr + size;
5281       return ptr;
5282     }
5283     else if (size < 0)
5284     {
5285       // we don't currently support shrink behavior
5286       return (void *) MFAIL;
5287     }
5288     else
5289     {
5290       return sbrk_top;
5291     }
5292   }
5293 
5294   // cleanup any allocated memory pools
5295   // called as last thing before shutting down driver
5296 
5297   void osCleanupMem(void)
5298   {
5299     void **ptr;
5300 
5301     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5302       if (*ptr)
5303       {
5304          PoolDeallocate(*ptr);
5305          *ptr = 0;
5306       }
5307   }
5308 
5309 */
5310 
5311 
5312 /* -----------------------------------------------------------------------
5313 History:
5314     V2.8.4 (not yet released)
5315       * Fix bad error check in mspace_footprint
5316       * Adaptations for ptmalloc, courtesy of Wolfram Gloger.
5317       * Reentrant spin locks, courtesy of Earl Chew and others
5318       * Win32 improvements, courtesy of Niall Douglas and Earl Chew
5319       * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
5320       * Various small adjustments to reduce warnings on some compilers
5321       * Extension hook in malloc_state
5322 
5323     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
5324       * Add max_footprint functions
5325       * Ensure all appropriate literals are size_t
5326       * Fix conditional compilation problem for some #define settings
5327       * Avoid concatenating segments with the one provided
5328         in create_mspace_with_base
5329       * Rename some variables to avoid compiler shadowing warnings
5330       * Use explicit lock initialization.
5331       * Better handling of sbrk interference.
5332       * Simplify and fix segment insertion, trimming and mspace_destroy
5333       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5334       * Thanks especially to Dennis Flanagan for help on these.
5335 
5336     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
5337       * Fix memalign brace error.
5338 
5339     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
5340       * Fix improper #endif nesting in C++
5341       * Add explicit casts needed for C++
5342 
5343     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
5344       * Use trees for large bins
5345       * Support mspaces
5346       * Use segments to unify sbrk-based and mmap-based system allocation,
5347         removing need for emulation on most platforms without sbrk.
5348       * Default safety checks
5349       * Optional footer checks. Thanks to William Robertson for the idea.
5350       * Internal code refactoring
5351       * Incorporate suggestions and platform-specific changes.
5352         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5353         Aaron Bachmann,  Emery Berger, and others.
5354       * Speed up non-fastbin processing enough to remove fastbins.
5355       * Remove useless cfree() to avoid conflicts with other apps.
5356       * Remove internal memcpy, memset. Compilers handle builtins better.
5357       * Remove some options that no one ever used and rename others.
5358 
5359     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
5360       * Fix malloc_state bitmap array misdeclaration
5361 
5362     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
5363       * Allow tuning of FIRST_SORTED_BIN_SIZE
5364       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5365       * Better detection and support for non-contiguousness of MORECORE.
5366         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5367       * Bypass most of malloc if no frees. Thanks To Emery Berger.
5368       * Fix freeing of old top non-contiguous chunk im sysmalloc.
5369       * Raised default trim and map thresholds to 256K.
5370       * Fix mmap-related #defines. Thanks to Lubos Lunak.
5371       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5372       * Branch-free bin calculation
5373       * Default trim and mmap thresholds now 256K.
5374 
5375     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
5376       * Introduce independent_comalloc and independent_calloc.
5377         Thanks to Michael Pachos for motivation and help.
5378       * Make optional .h file available
5379       * Allow > 2GB requests on 32bit systems.
5380       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5381         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5382         and Anonymous.
5383       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5384         helping test this.)
5385       * memalign: check alignment arg
5386       * realloc: don't try to shift chunks backwards, since this
5387         leads to  more fragmentation in some programs and doesn't
5388         seem to help in any others.
5389       * Collect all cases in malloc requiring system memory into sysmalloc
5390       * Use mmap as backup to sbrk
5391       * Place all internal state in malloc_state
5392       * Introduce fastbins (although similar to 2.5.1)
5393       * Many minor tunings and cosmetic improvements
5394       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5395       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5396         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5397       * Include errno.h to support default failure action.
5398 
5399     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
5400       * return null for negative arguments
5401       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5402          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5403           (e.g. WIN32 platforms)
5404          * Cleanup header file inclusion for WIN32 platforms
5405          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5406          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5407            memory allocation routines
5408          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5409          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5410            usage of 'assert' in non-WIN32 code
5411          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5412            avoid infinite loop
5413       * Always call 'fREe()' rather than 'free()'
5414 
5415     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
5416       * Fixed ordering problem with boundary-stamping
5417 
5418     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
5419       * Added pvalloc, as recommended by H.J. Liu
5420       * Added 64bit pointer support mainly from Wolfram Gloger
5421       * Added anonymously donated WIN32 sbrk emulation
5422       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5423       * malloc_extend_top: fix mask error that caused wastage after
5424         foreign sbrks
5425       * Add linux mremap support code from HJ Liu
5426 
5427     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
5428       * Integrated most documentation with the code.
5429       * Add support for mmap, with help from
5430         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5431       * Use last_remainder in more cases.
5432       * Pack bins using idea from  colin@nyx10.cs.du.edu
5433       * Use ordered bins instead of best-fit threshhold
5434       * Eliminate block-local decls to simplify tracing and debugging.
5435       * Support another case of realloc via move into top
5436       * Fix error occuring when initial sbrk_base not word-aligned.
5437       * Rely on page size for units instead of SBRK_UNIT to
5438         avoid surprises about sbrk alignment conventions.
5439       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5440         (raymond@es.ele.tue.nl) for the suggestion.
5441       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5442       * More precautions for cases where other routines call sbrk,
5443         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5444       * Added macros etc., allowing use in linux libc from
5445         H.J. Lu (hjl@gnu.ai.mit.edu)
5446       * Inverted this history list
5447 
5448     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
5449       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5450       * Removed all preallocation code since under current scheme
5451         the work required to undo bad preallocations exceeds
5452         the work saved in good cases for most test programs.
5453       * No longer use return list or unconsolidated bins since
5454         no scheme using them consistently outperforms those that don't
5455         given above changes.
5456       * Use best fit for very large chunks to prevent some worst-cases.
5457       * Added some support for debugging
5458 
5459     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
5460       * Removed footers when chunks are in use. Thanks to
5461         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5462 
5463     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
5464       * Added malloc_trim, with help from Wolfram Gloger
5465         (wmglo@Dent.MED.Uni-Muenchen.DE).
5466 
5467     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
5468 
5469     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
5470       * realloc: try to expand in both directions
5471       * malloc: swap order of clean-bin strategy;
5472       * realloc: only conditionally expand backwards
5473       * Try not to scavenge used bins
5474       * Use bin counts as a guide to preallocation
5475       * Occasionally bin return list chunks in first scan
5476       * Add a few optimizations from colin@nyx10.cs.du.edu
5477 
5478     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
5479       * faster bin computation & slightly different binning
5480       * merged all consolidations to one part of malloc proper
5481          (eliminating old malloc_find_space & malloc_clean_bin)
5482       * Scan 2 returns chunks (not just 1)
5483       * Propagate failure in realloc if malloc returns 0
5484       * Add stuff to allow compilation on non-ANSI compilers
5485           from kpv@research.att.com
5486 
5487     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
5488       * removed potential for odd address access in prev_chunk
5489       * removed dependency on getpagesize.h
5490       * misc cosmetics and a bit more internal documentation
5491       * anticosmetics: mangled names in macros to evade debugger strangeness
5492       * tested on sparc, hp-700, dec-mips, rs6000
5493           with gcc & native cc (hp, dec only) allowing
5494           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5495 
5496     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
5497       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5498          structure of old version,  but most details differ.)
5499 
5500 */
5501 
5502 

/* [<][>][^][v][top][bottom][index][help] */