ScummVM API documentation
dlmalloc.h
1 /* ScummVM - Graphic Adventure Engine
2  *
3  * ScummVM is the legal property of its developers, whose names
4  * are too numerous to list here. Please refer to the COPYRIGHT
5  * file distributed with this source distribution.
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program. If not, see <http://www.gnu.org/licenses/>.
19  *
20  */
21 
22 #ifndef PLATFORM_ATARI_DLMALLOC_H
23 #define PLATFORM_ATARI_DLMALLOC_H
24 
25 #define HAVE_MMAP 0
26 #define HAVE_MORECORE 1
27 #define MORECORE_CONTIGUOUS 0
28 #define MORECORE_CANNOT_TRIM 1
29 #define NO_MALLOC_STATS 1
30 #define LACKS_TIME_H /* time(0) calls malloc... */
31 #define MSPACES 1
32 #define MALLOC_ALIGNMENT ((size_t)16U) /* 16B cache line */
33 
34 #pragma GCC diagnostic push
35 /* warning: 'mallinfo mallinfo()' hides constructor for 'struct mallinfo' [-Wshadow] */
36 #pragma GCC diagnostic ignored "-Wshadow"
37 /* warning: this use of "defined" may not be portable [-Wexpansion-to-defined] */
38 #pragma GCC diagnostic ignored "-Wexpansion-to-defined"
39 
40 /*
41 Copyright 2023 Doug Lea
42 
43 Permission is hereby granted, free of charge, to any person obtaining
44 a copy of this software and associated documentation files (the
45 "Software"), to deal in the Software without restriction, including
46 without limitation the rights to use, copy, modify, merge, publish,
47 distribute, sublicense, and/or sell copies of the Software, and to
48 permit persons to whom the Software is furnished to do so.
49 
50 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
51 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
52 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
53 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
54 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
55 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
56 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
57 
58 * Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
59  Re-licensed 25 Sep 2023 with MIT-0 replacing obsolete CC0
60  See https://opensource.org/license/mit-0/
61 
62 * Quickstart
63 
64  This library is all in one file to simplify the most common usage:
65  ftp it, compile it (-O3), and link it into another program. All of
66  the compile-time options default to reasonable values for use on
67  most platforms. You might later want to step through various
68  compile-time and dynamic tuning options.
69 
70  For convenience, an include file for code using this malloc is at:
71  ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
72  You don't really need this .h file unless you call functions not
73  defined in your system include files. The .h file contains only the
74  excerpts from this file needed for using this malloc on ANSI C/C++
75  systems, so long as you haven't changed compile-time options about
76  naming and tuning parameters. If you do, then you can create your
77  own malloc.h that does include all settings by cutting at the point
78  indicated below. Note that you may already by default be using a C
79  library containing a malloc that is based on some version of this
80  malloc (for example in linux). You might still want to use the one
81  in this file to customize settings or to avoid overheads associated
82  with library versions.
83 
84 * Vital statistics:
85 
86  Supported pointer/size_t representation: 4 or 8 bytes
87  size_t MUST be an unsigned type of the same width as
88  pointers. (If you are using an ancient system that declares
89  size_t as a signed type, or need it to be a different width
90  than pointers, you can use a previous release of this malloc
91  (e.g. 2.7.2) supporting these.)
92 
93  Alignment: 8 bytes (minimum)
94  This suffices for nearly all current machines and C compilers.
95  However, you can define MALLOC_ALIGNMENT to be wider than this
96  if necessary (up to 128bytes), at the expense of using more space.
97 
98  Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
99  8 or 16 bytes (if 8byte sizes)
100  Each malloced chunk has a hidden word of overhead holding size
101  and status information, and additional cross-check word
102  if FOOTERS is defined.
103 
104  Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
105  8-byte ptrs: 32 bytes (including overhead)
106 
107  Even a request for zero bytes (i.e., malloc(0)) returns a
108  pointer to something of the minimum allocatable size.
109  The maximum overhead wastage (i.e., number of extra bytes
110  allocated than were requested in malloc) is less than or equal
111  to the minimum size, except for requests >= mmap_threshold that
112  are serviced via mmap(), where the worst case wastage is about
113  32 bytes plus the remainder from a system page (the minimal
114  mmap unit); typically 4096 or 8192 bytes.
115 
116  Security: static-safe; optionally more or less
117  The "security" of malloc refers to the ability of malicious
118  code to accentuate the effects of errors (for example, freeing
119  space that is not currently malloc'ed or overwriting past the
120  ends of chunks) in code that calls malloc. This malloc
121  guarantees not to modify any memory locations below the base of
122  heap, i.e., static variables, even in the presence of usage
123  errors. The routines additionally detect most improper frees
124  and reallocs. All this holds as long as the static bookkeeping
125  for malloc itself is not corrupted by some other means. This
126  is only one aspect of security -- these checks do not, and
127  cannot, detect all possible programming errors.
128 
129  If FOOTERS is defined nonzero, then each allocated chunk
130  carries an additional check word to verify that it was malloced
131  from its space. These check words are the same within each
132  execution of a program using malloc, but differ across
133  executions, so externally crafted fake chunks cannot be
134  freed. This improves security by rejecting frees/reallocs that
135  could corrupt heap memory, in addition to the checks preventing
136  writes to statics that are always on. This may further improve
137  security at the expense of time and space overhead. (Note that
138  FOOTERS may also be worth using with MSPACES.)
139 
140  By default detected errors cause the program to abort (calling
141  "abort()"). You can override this to instead proceed past
142  errors by defining PROCEED_ON_ERROR. In this case, a bad free
143  has no effect, and a malloc that encounters a bad address
144  caused by user overwrites will ignore the bad address by
145  dropping pointers and indices to all known memory. This may
146  be appropriate for programs that should continue if at all
147  possible in the face of programming errors, although they may
148  run out of memory because dropped memory is never reclaimed.
149 
150  If you don't like either of these options, you can define
151  CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
152  else. And if if you are sure that your program using malloc has
153  no errors or vulnerabilities, you can define INSECURE to 1,
154  which might (or might not) provide a small performance improvement.
155 
156  It is also possible to limit the maximum total allocatable
157  space, using malloc_set_footprint_limit. This is not
158  designed as a security feature in itself (calls to set limits
159  are not screened or privileged), but may be useful as one
160  aspect of a secure implementation.
161 
162  Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
163  When USE_LOCKS is defined, each public call to malloc, free,
164  etc is surrounded with a lock. By default, this uses a plain
165  pthread mutex, win32 critical section, or a spin-lock if if
166  available for the platform and not disabled by setting
167  USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined,
168  recursive versions are used instead (which are not required for
169  base functionality but may be needed in layered extensions).
170  Using a global lock is not especially fast, and can be a major
171  bottleneck. It is designed only to provide minimal protection
172  in concurrent environments, and to provide a basis for
173  extensions. If you are using malloc in a concurrent program,
174  consider instead using nedmalloc
175  (http://www.nedprod.com/programs/portable/nedmalloc/) or
176  ptmalloc (See http://www.malloc.de), which are derived from
177  versions of this malloc.
178 
179  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
180  This malloc can use unix sbrk or any emulation (invoked using
181  the CALL_MORECORE macro) and/or mmap/munmap or any emulation
182  (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
183  memory. On most unix systems, it tends to work best if both
184  MORECORE and MMAP are enabled. On Win32, it uses emulations
185  based on VirtualAlloc. It also uses common C library functions
186  like memset.
187 
188  Compliance: I believe it is compliant with the Single Unix Specification
189  (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
190  others as well.
191 
192 * Overview of algorithms
193 
194  This is not the fastest, most space-conserving, most portable, or
195  most tunable malloc ever written. However it is among the fastest
196  while also being among the most space-conserving, portable and
197  tunable. Consistent balance across these factors results in a good
198  general-purpose allocator for malloc-intensive programs.
199 
200  In most ways, this malloc is a best-fit allocator. Generally, it
201  chooses the best-fitting existing chunk for a request, with ties
202  broken in approximately least-recently-used order. (This strategy
203  normally maintains low fragmentation.) However, for requests less
204  than 256bytes, it deviates from best-fit when there is not an
205  exactly fitting available chunk by preferring to use space adjacent
206  to that used for the previous small request, as well as by breaking
207  ties in approximately most-recently-used order. (These enhance
208  locality of series of small allocations.) And for very large requests
209  (>= 256Kb by default), it relies on system memory mapping
210  facilities, if supported. (This helps avoid carrying around and
211  possibly fragmenting memory used only for large chunks.)
212 
213  All operations (except malloc_stats and mallinfo) have execution
214  times that are bounded by a constant factor of the number of bits in
215  a size_t, not counting any clearing in calloc or copying in realloc,
216  or actions surrounding MORECORE and MMAP that have times
217  proportional to the number of non-contiguous regions returned by
218  system allocation routines, which is often just 1. In real-time
219  applications, you can optionally suppress segment traversals using
220  NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
221  system allocators return non-contiguous spaces, at the typical
222  expense of carrying around more memory and increased fragmentation.
223 
224  The implementation is not very modular and seriously overuses
225  macros. Perhaps someday all C compilers will do as good a job
226  inlining modular code as can now be done by brute-force expansion,
227  but now, enough of them seem not to.
228 
229  Some compilers issue a lot of warnings about code that is
230  dead/unreachable only on some platforms, and also about intentional
231  uses of negation on unsigned types. All known cases of each can be
232  ignored.
233 
234  For a longer but out of date high-level description, see
235  http://gee.cs.oswego.edu/dl/html/malloc.html
236 
237 * MSPACES
238  If MSPACES is defined, then in addition to malloc, free, etc.,
239  this file also defines mspace_malloc, mspace_free, etc. These
240  are versions of malloc routines that take an "mspace" argument
241  obtained using create_mspace, to control all internal bookkeeping.
242  If ONLY_MSPACES is defined, only these versions are compiled.
243  So if you would like to use this allocator for only some allocations,
244  and your system malloc for others, you can compile with
245  ONLY_MSPACES and then do something like...
246  static mspace mymspace = create_mspace(0,0); // for example
247  #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
248 
249  (Note: If you only need one instance of an mspace, you can instead
250  use "USE_DL_PREFIX" to relabel the global malloc.)
251 
252  You can similarly create thread-local allocators by storing
253  mspaces as thread-locals. For example:
254  static __thread mspace tlms = 0;
255  void* tlmalloc(size_t bytes) {
256  if (tlms == 0) tlms = create_mspace(0, 0);
257  return mspace_malloc(tlms, bytes);
258  }
259  void tlfree(void* mem) { mspace_free(tlms, mem); }
260 
261  Unless FOOTERS is defined, each mspace is completely independent.
262  You cannot allocate from one and free to another (although
263  conformance is only weakly checked, so usage errors are not always
264  caught). If FOOTERS is defined, then each chunk carries around a tag
265  indicating its originating mspace, and frees are directed to their
266  originating spaces. Normally, this requires use of locks.
267 
268  ------------------------- Compile-time options ---------------------------
269 
270 Be careful in setting #define values for numerical constants of type
271 size_t. On some systems, literal values are not automatically extended
272 to size_t precision unless they are explicitly casted. You can also
273 use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
274 
275 WIN32 default: defined if _WIN32 defined
276  Defining WIN32 sets up defaults for MS environment and compilers.
277  Otherwise defaults are for unix. Beware that there seem to be some
278  cases where this malloc might not be a pure drop-in replacement for
279  Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
280  SetDIBits()) may be due to bugs in some video driver implementations
281  when pixel buffers are malloc()ed, and the region spans more than
282  one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
283  default granularity, pixel buffers may straddle virtual allocation
284  regions more often than when using the Microsoft allocator. You can
285  avoid this by using VirtualAlloc() and VirtualFree() for all pixel
286  buffers rather than using malloc(). If this is not possible,
287  recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
288  in cases where MSC and gcc (cygwin) are known to differ on WIN32,
289  conditions use _MSC_VER to distinguish them.
290 
291 DLMALLOC_EXPORT default: extern
292  Defines how public APIs are declared. If you want to export via a
293  Windows DLL, you might define this as
294  #define DLMALLOC_EXPORT extern __declspec(dllexport)
295  If you want a POSIX ELF shared object, you might use
296  #define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
297 
298 MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *))
299  Controls the minimum alignment for malloc'ed chunks. It must be a
300  power of two and at least 8, even on machines for which smaller
301  alignments would suffice. It may be defined as larger than this
302  though. Note however that code and data structures are optimized for
303  the case of 8-byte alignment.
304 
305 MSPACES default: 0 (false)
306  If true, compile in support for independent allocation spaces.
307  This is only supported if HAVE_MMAP is true.
308 
309 ONLY_MSPACES default: 0 (false)
310  If true, only compile in mspace versions, not regular versions.
311 
312 USE_LOCKS default: 0 (false)
313  Causes each call to each public routine to be surrounded with
314  pthread or WIN32 mutex lock/unlock. (If set true, this can be
315  overridden on a per-mspace basis for mspace versions.) If set to a
316  non-zero value other than 1, locks are used, but their
317  implementation is left out, so lock functions must be supplied manually,
318  as described below.
319 
320 USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available
321  If true, uses custom spin locks for locking. This is currently
322  supported only gcc >= 4.1, older gccs on x86 platforms, and recent
323  MS compilers. Otherwise, posix locks or win32 critical sections are
324  used.
325 
326 USE_RECURSIVE_LOCKS default: not defined
327  If defined nonzero, uses recursive (aka reentrant) locks, otherwise
328  uses plain mutexes. This is not required for malloc proper, but may
329  be needed for layered allocators such as nedmalloc.
330 
331 LOCK_AT_FORK default: not defined
332  If defined nonzero, performs pthread_atfork upon initialization
333  to initialize child lock while holding parent lock. The implementation
334  assumes that pthread locks (not custom locks) are being used. In other
335  cases, you may need to customize the implementation.
336 
337 FOOTERS default: 0
338  If true, provide extra checking and dispatching by placing
339  information in the footers of allocated chunks. This adds
340  space and time overhead.
341 
342 INSECURE default: 0
343  If true, omit checks for usage errors and heap space overwrites.
344 
345 USE_DL_PREFIX default: NOT defined
346  Causes compiler to prefix all public routines with the string 'dl'.
347  This can be useful when you only want to use this malloc in one part
348  of a program, using your regular system malloc elsewhere.
349 
350 MALLOC_INSPECT_ALL default: NOT defined
351  If defined, compiles malloc_inspect_all and mspace_inspect_all, that
352  perform traversal of all heap space. Unless access to these
353  functions is otherwise restricted, you probably do not want to
354  include them in secure implementations.
355 
356 ABORT default: defined as abort()
357  Defines how to abort on failed checks. On most systems, a failed
358  check cannot die with an "assert" or even print an informative
359  message, because the underlying print routines in turn call malloc,
360  which will fail again. Generally, the best policy is to simply call
361  abort(). It's not very useful to do more than this because many
362  errors due to overwriting will show up as address faults (null, odd
363  addresses etc) rather than malloc-triggered checks, so will also
364  abort. Also, most compilers know that abort() does not return, so
365  can better optimize code conditionally calling it.
366 
367 PROCEED_ON_ERROR default: defined as 0 (false)
368  Controls whether detected bad addresses cause them to bypassed
369  rather than aborting. If set, detected bad arguments to free and
370  realloc are ignored. And all bookkeeping information is zeroed out
371  upon a detected overwrite of freed heap space, thus losing the
372  ability to ever return it from malloc again, but enabling the
373  application to proceed. If PROCEED_ON_ERROR is defined, the
374  static variable malloc_corruption_error_count is compiled in
375  and can be examined to see if errors have occurred. This option
376  generates slower code than the default abort policy.
377 
378 DEBUG default: NOT defined
379  The DEBUG setting is mainly intended for people trying to modify
380  this code or diagnose problems when porting to new platforms.
381  However, it may also be able to better isolate user errors than just
382  using runtime checks. The assertions in the check routines spell
383  out in more detail the assumptions and invariants underlying the
384  algorithms. The checking is fairly extensive, and will slow down
385  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
386  set will attempt to check every non-mmapped allocated and free chunk
387  in the course of computing the summaries.
388 
389 ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
390  Debugging assertion failures can be nearly impossible if your
391  version of the assert macro causes malloc to be called, which will
392  lead to a cascade of further failures, blowing the runtime stack.
393  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
394  which will usually make debugging easier.
395 
396 MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
397  The action to take before "return 0" when malloc fails to be able to
398  return memory because there is none available.
399 
400 HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
401  True if this system supports sbrk or an emulation of it.
402 
403 MORECORE default: sbrk
404  The name of the sbrk-style system routine to call to obtain more
405  memory. See below for guidance on writing custom MORECORE
406  functions. The type of the argument to sbrk/MORECORE varies across
407  systems. It cannot be size_t, because it supports negative
408  arguments, so it is normally the signed type of the same width as
409  size_t (sometimes declared as "intptr_t"). It doesn't much matter
410  though. Internally, we only call it with arguments less than half
411  the max value of a size_t, which should work across all reasonable
412  possibilities, although sometimes generating compiler warnings.
413 
414 MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE
415  If true, take advantage of fact that consecutive calls to MORECORE
416  with positive arguments always return contiguous increasing
417  addresses. This is true of unix sbrk. It does not hurt too much to
418  set it true anyway, since malloc copes with non-contiguities.
419  Setting it false when definitely non-contiguous saves time
420  and possibly wasted space it would take to discover this though.
421 
422 MORECORE_CANNOT_TRIM default: NOT defined
423  True if MORECORE cannot release space back to the system when given
424  negative arguments. This is generally necessary only if you are
425  using a hand-crafted MORECORE function that cannot handle negative
426  arguments.
427 
428 NO_SEGMENT_TRAVERSAL default: 0
429  If non-zero, suppresses traversals of memory segments
430  returned by either MORECORE or CALL_MMAP. This disables
431  merging of segments that are contiguous, and selectively
432  releasing them to the OS if unused, but bounds execution times.
433 
434 HAVE_MMAP default: 1 (true)
435  True if this system supports mmap or an emulation of it. If so, and
436  HAVE_MORECORE is not true, MMAP is used for all system
437  allocation. If set and HAVE_MORECORE is true as well, MMAP is
438  primarily used to directly allocate very large blocks. It is also
439  used as a backup strategy in cases where MORECORE fails to provide
440  space from system. Note: A single call to MUNMAP is assumed to be
441  able to unmap memory that may have be allocated using multiple calls
442  to MMAP, so long as they are adjacent.
443 
444 HAVE_MREMAP default: 1 on linux, else 0
445  If true realloc() uses mremap() to re-allocate large blocks and
446  extend or shrink allocation spaces.
447 
448 MMAP_CLEARS default: 1 except on WINCE.
449  True if mmap clears memory so calloc doesn't need to. This is true
450  for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
451 
452 USE_BUILTIN_FFS default: 0 (i.e., not used)
453  Causes malloc to use the builtin ffs() function to compute indices.
454  Some compilers may recognize and intrinsify ffs to be faster than the
455  supplied C version. Also, the case of x86 using gcc is special-cased
456  to an asm instruction, so is already as fast as it can be, and so
457  this setting has no effect. Similarly for Win32 under recent MS compilers.
458  (On most x86s, the asm version is only slightly faster than the C version.)
459 
460 malloc_getpagesize default: derive from system includes, or 4096.
461  The system page size. To the extent possible, this malloc manages
462  memory from the system in page-size units. This may be (and
463  usually is) a function rather than a constant. This is ignored
464  if WIN32, where page size is determined using getSystemInfo during
465  initialization.
466 
467 USE_DEV_RANDOM default: 0 (i.e., not used)
468  Causes malloc to use /dev/random to initialize secure magic seed for
469  stamping footers. Otherwise, the current time is used.
470 
471 NO_MALLINFO default: 0
472  If defined, don't compile "mallinfo". This can be a simple way
473  of dealing with mismatches between system declarations and
474  those in this file.
475 
476 MALLINFO_FIELD_TYPE default: size_t
477  The type of the fields in the mallinfo struct. This was originally
478  defined as "int" in SVID etc, but is more usefully defined as
479  size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
480 
481 NO_MALLOC_STATS default: 0
482  If defined, don't compile "malloc_stats". This avoids calls to
483  fprintf and bringing in stdio dependencies you might not want.
484 
485 REALLOC_ZERO_BYTES_FREES default: not defined
486  This should be set if a call to realloc with zero bytes should
487  be the same as a call to free. Some people think it should. Otherwise,
488  since this malloc returns a unique pointer for malloc(0), so does
489  realloc(p, 0).
490 
491 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
492 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
493 LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32
494  Define these if your system does not have these header files.
495  You might need to manually insert some of the declarations they provide.
496 
497 DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
498  system_info.dwAllocationGranularity in WIN32,
499  otherwise 64K.
500  Also settable using mallopt(M_GRANULARITY, x)
501  The unit for allocating and deallocating memory from the system. On
502  most systems with contiguous MORECORE, there is no reason to
503  make this more than a page. However, systems with MMAP tend to
504  either require or encourage larger granularities. You can increase
505  this value to prevent system allocation functions to be called so
506  often, especially if they are slow. The value must be at least one
507  page and must be a power of two. Setting to 0 causes initialization
508  to either page size or win32 region size. (Note: In previous
509  versions of malloc, the equivalent of this option was called
510  "TOP_PAD")
511 
512 DEFAULT_TRIM_THRESHOLD default: 2MB
513  Also settable using mallopt(M_TRIM_THRESHOLD, x)
514  The maximum amount of unused top-most memory to keep before
515  releasing via malloc_trim in free(). Automatic trimming is mainly
516  useful in long-lived programs using contiguous MORECORE. Because
517  trimming via sbrk can be slow on some systems, and can sometimes be
518  wasteful (in cases where programs immediately afterward allocate
519  more large chunks) the value should be high enough so that your
520  overall system performance would improve by releasing this much
521  memory. As a rough guide, you might set to a value close to the
522  average size of a process (program) running on your system.
523  Releasing this much memory would allow such a process to run in
524  memory. Generally, it is worth tuning trim thresholds when a
525  program undergoes phases where several large chunks are allocated
526  and released in ways that can reuse each other's storage, perhaps
527  mixed with phases where there are no such chunks at all. The trim
528  value must be greater than page size to have any useful effect. To
529  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
530  some people use of mallocing a huge space and then freeing it at
531  program startup, in an attempt to reserve system memory, doesn't
532  have the intended effect under automatic trimming, since that memory
533  will immediately be returned to the system.
534 
535 DEFAULT_MMAP_THRESHOLD default: 256K
536  Also settable using mallopt(M_MMAP_THRESHOLD, x)
537  The request size threshold for using MMAP to directly service a
538  request. Requests of at least this size that cannot be allocated
539  using already-existing space will be serviced via mmap. (If enough
540  normal freed space already exists it is used instead.) Using mmap
541  segregates relatively large chunks of memory so that they can be
542  individually obtained and released from the host system. A request
543  serviced through mmap is never reused by any other request (at least
544  not directly; the system may just so happen to remap successive
545  requests to the same locations). Segregating space in this way has
546  the benefits that: Mmapped space can always be individually released
547  back to the system, which helps keep the system level memory demands
548  of a long-lived program low. Also, mapped memory doesn't become
549  `locked' between other chunks, as can happen with normally allocated
550  chunks, which means that even trimming via malloc_trim would not
551  release them. However, it has the disadvantage that the space
552  cannot be reclaimed, consolidated, and then used to service later
553  requests, as happens with normal chunks. The advantages of mmap
554  nearly always outweigh disadvantages for "large" chunks, but the
555  value of "large" may vary across systems. The default is an
556  empirically derived value that works well in most systems. You can
557  disable mmap by setting to MAX_SIZE_T.
558 
559 MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
560  The number of consolidated frees between checks to release
561  unused segments when freeing. When using non-contiguous segments,
562  especially with multiple mspaces, checking only for topmost space
563  doesn't always suffice to trigger trimming. To compensate for this,
564  free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
565  current number of segments, if greater) try to release unused
566  segments to the OS when freeing chunks that result in
567  consolidation. The best value for this parameter is a compromise
568  between slowing down frees with relatively costly checks that
569  rarely trigger versus holding on to unused memory. To effectively
570  disable, set to MAX_SIZE_T. This may lead to a very slight speed
571  improvement at the expense of carrying around more memory.
572 */
573 
574 /* Version identifier to allow people to support multiple versions */
575 #ifndef DLMALLOC_VERSION
576 #define DLMALLOC_VERSION 20806
577 #endif /* DLMALLOC_VERSION */
578 
579 #ifndef DLMALLOC_EXPORT
580 #define DLMALLOC_EXPORT extern
581 #endif
582 
583 #ifndef WIN32
584 #ifdef _WIN32
585 #define WIN32 1
586 #endif /* _WIN32 */
587 #ifdef _WIN32_WCE
588 #define LACKS_FCNTL_H
589 #define WIN32 1
590 #endif /* _WIN32_WCE */
591 #endif /* WIN32 */
592 #ifdef WIN32
593 #define WIN32_LEAN_AND_MEAN
594 #include <windows.h>
595 #include <tchar.h>
596 #define HAVE_MMAP 1
597 #define HAVE_MORECORE 0
598 #define LACKS_UNISTD_H
599 #define LACKS_SYS_PARAM_H
600 #define LACKS_SYS_MMAN_H
601 #define LACKS_STRING_H
602 #define LACKS_STRINGS_H
603 #define LACKS_SYS_TYPES_H
604 #define LACKS_ERRNO_H
605 #define LACKS_SCHED_H
606 #ifndef MALLOC_FAILURE_ACTION
607 #define MALLOC_FAILURE_ACTION
608 #endif /* MALLOC_FAILURE_ACTION */
609 #ifndef MMAP_CLEARS
610 #ifdef _WIN32_WCE /* WINCE reportedly does not clear */
611 #define MMAP_CLEARS 0
612 #else
613 #define MMAP_CLEARS 1
614 #endif /* _WIN32_WCE */
615 #endif /*MMAP_CLEARS */
616 #endif /* WIN32 */
617 
618 #if defined(DARWIN) || defined(_DARWIN)
619 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
620 #ifndef HAVE_MORECORE
621 #define HAVE_MORECORE 0
622 #define HAVE_MMAP 1
623 /* OSX allocators provide 16 byte alignment */
624 #ifndef MALLOC_ALIGNMENT
625 #define MALLOC_ALIGNMENT ((size_t)16U)
626 #endif
627 #endif /* HAVE_MORECORE */
628 #endif /* DARWIN */
629 
630 #ifndef LACKS_SYS_TYPES_H
631 #include <sys/types.h> /* For size_t */
632 #endif /* LACKS_SYS_TYPES_H */
633 
634 /* The maximum possible size_t value has all bits set */
635 #define MAX_SIZE_T (~(size_t)0)
636 
637 #ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
638 #define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
639  (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
640 #endif /* USE_LOCKS */
641 
642 #if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
643 #if ((defined(__GNUC__) && \
644  ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
645  defined(__i386__) || defined(__x86_64__))) || \
646  (defined(_MSC_VER) && _MSC_VER>=1310))
647 #ifndef USE_SPIN_LOCKS
648 #define USE_SPIN_LOCKS 1
649 #endif /* USE_SPIN_LOCKS */
650 #elif USE_SPIN_LOCKS
651 #error "USE_SPIN_LOCKS defined without implementation"
652 #endif /* ... locks available... */
653 #elif !defined(USE_SPIN_LOCKS)
654 #define USE_SPIN_LOCKS 0
655 #endif /* USE_LOCKS */
656 
657 #ifndef ONLY_MSPACES
658 #define ONLY_MSPACES 0
659 #endif /* ONLY_MSPACES */
660 #ifndef MSPACES
661 #if ONLY_MSPACES
662 #define MSPACES 1
663 #else /* ONLY_MSPACES */
664 #define MSPACES 0
665 #endif /* ONLY_MSPACES */
666 #endif /* MSPACES */
667 #ifndef MALLOC_ALIGNMENT
668 #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
669 #endif /* MALLOC_ALIGNMENT */
670 #ifndef FOOTERS
671 #define FOOTERS 0
672 #endif /* FOOTERS */
673 #ifndef ABORT
674 #define ABORT abort()
675 #endif /* ABORT */
676 #ifndef ABORT_ON_ASSERT_FAILURE
677 #define ABORT_ON_ASSERT_FAILURE 1
678 #endif /* ABORT_ON_ASSERT_FAILURE */
679 #ifndef PROCEED_ON_ERROR
680 #define PROCEED_ON_ERROR 0
681 #endif /* PROCEED_ON_ERROR */
682 
683 #ifndef INSECURE
684 #define INSECURE 0
685 #endif /* INSECURE */
686 #ifndef MALLOC_INSPECT_ALL
687 #define MALLOC_INSPECT_ALL 0
688 #endif /* MALLOC_INSPECT_ALL */
689 #ifndef HAVE_MMAP
690 #define HAVE_MMAP 1
691 #endif /* HAVE_MMAP */
692 #ifndef MMAP_CLEARS
693 #define MMAP_CLEARS 1
694 #endif /* MMAP_CLEARS */
695 #ifndef HAVE_MREMAP
696 #ifdef linux
697 #define HAVE_MREMAP 1
698 #define _GNU_SOURCE /* Turns on mremap() definition */
699 #else /* linux */
700 #define HAVE_MREMAP 0
701 #endif /* linux */
702 #endif /* HAVE_MREMAP */
703 #ifndef MALLOC_FAILURE_ACTION
704 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
705 #endif /* MALLOC_FAILURE_ACTION */
706 #ifndef HAVE_MORECORE
707 #if ONLY_MSPACES
708 #define HAVE_MORECORE 0
709 #else /* ONLY_MSPACES */
710 #define HAVE_MORECORE 1
711 #endif /* ONLY_MSPACES */
712 #endif /* HAVE_MORECORE */
713 #if !HAVE_MORECORE
714 #define MORECORE_CONTIGUOUS 0
715 #else /* !HAVE_MORECORE */
716 #define MORECORE_DEFAULT sbrk
717 #ifndef MORECORE_CONTIGUOUS
718 #define MORECORE_CONTIGUOUS 1
719 #endif /* MORECORE_CONTIGUOUS */
720 #endif /* HAVE_MORECORE */
721 #ifndef DEFAULT_GRANULARITY
722 #if (MORECORE_CONTIGUOUS || defined(WIN32))
723 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
724 #else /* MORECORE_CONTIGUOUS */
725 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
726 #endif /* MORECORE_CONTIGUOUS */
727 #endif /* DEFAULT_GRANULARITY */
728 #ifndef DEFAULT_TRIM_THRESHOLD
729 #ifndef MORECORE_CANNOT_TRIM
730 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
731 #else /* MORECORE_CANNOT_TRIM */
732 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
733 #endif /* MORECORE_CANNOT_TRIM */
734 #endif /* DEFAULT_TRIM_THRESHOLD */
735 #ifndef DEFAULT_MMAP_THRESHOLD
736 #if HAVE_MMAP
737 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
738 #else /* HAVE_MMAP */
739 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
740 #endif /* HAVE_MMAP */
741 #endif /* DEFAULT_MMAP_THRESHOLD */
742 #ifndef MAX_RELEASE_CHECK_RATE
743 #if HAVE_MMAP
744 #define MAX_RELEASE_CHECK_RATE 4095
745 #else
746 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
747 #endif /* HAVE_MMAP */
748 #endif /* MAX_RELEASE_CHECK_RATE */
749 #ifndef USE_BUILTIN_FFS
750 #define USE_BUILTIN_FFS 0
751 #endif /* USE_BUILTIN_FFS */
752 #ifndef USE_DEV_RANDOM
753 #define USE_DEV_RANDOM 0
754 #endif /* USE_DEV_RANDOM */
755 #ifndef NO_MALLINFO
756 #define NO_MALLINFO 0
757 #endif /* NO_MALLINFO */
758 #ifndef MALLINFO_FIELD_TYPE
759 #define MALLINFO_FIELD_TYPE size_t
760 #endif /* MALLINFO_FIELD_TYPE */
761 #ifndef NO_MALLOC_STATS
762 #define NO_MALLOC_STATS 0
763 #endif /* NO_MALLOC_STATS */
764 #ifndef NO_SEGMENT_TRAVERSAL
765 #define NO_SEGMENT_TRAVERSAL 0
766 #endif /* NO_SEGMENT_TRAVERSAL */
767 
768 /*
769  mallopt tuning options. SVID/XPG defines four standard parameter
770  numbers for mallopt, normally defined in malloc.h. None of these
771  are used in this malloc, so setting them has no effect. But this
772  malloc does support the following options.
773 */
774 
775 #define M_TRIM_THRESHOLD (-1)
776 #define M_GRANULARITY (-2)
777 #define M_MMAP_THRESHOLD (-3)
778 
779 /* ------------------------ Mallinfo declarations ------------------------ */
780 
781 #if !NO_MALLINFO
782 /*
783  This version of malloc supports the standard SVID/XPG mallinfo
784  routine that returns a struct containing usage properties and
785  statistics. It should work on any system that has a
786  /usr/include/malloc.h defining struct mallinfo. The main
787  declaration needed is the mallinfo struct that is returned (by-copy)
788  by mallinfo(). The malloinfo struct contains a bunch of fields that
789  are not even meaningful in this version of malloc. These fields are
790  are instead filled by mallinfo() with other numbers that might be of
791  interest.
792 
793  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
794  /usr/include/malloc.h file that includes a declaration of struct
795  mallinfo. If so, it is included; else a compliant version is
796  declared below. These must be precisely the same for mallinfo() to
797  work. The original SVID version of this struct, defined on most
798  systems with mallinfo, declares all fields as ints. But some others
799  define as unsigned long. If your system defines the fields using a
800  type of different width than listed here, you MUST #include your
801  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
802 */
803 
804 /* #define HAVE_USR_INCLUDE_MALLOC_H */
805 
806 #ifdef HAVE_USR_INCLUDE_MALLOC_H
807 #include "/usr/include/malloc.h"
808 #else /* HAVE_USR_INCLUDE_MALLOC_H */
809 #ifndef STRUCT_MALLINFO_DECLARED
810 /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */
811 #define _STRUCT_MALLINFO
812 #define STRUCT_MALLINFO_DECLARED 1
813 struct mallinfo {
814  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
815  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
816  MALLINFO_FIELD_TYPE smblks; /* always 0 */
817  MALLINFO_FIELD_TYPE hblks; /* always 0 */
818  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
819  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
820  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
821  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
822  MALLINFO_FIELD_TYPE fordblks; /* total free space */
823  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
824 };
825 #endif /* STRUCT_MALLINFO_DECLARED */
826 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
827 #endif /* NO_MALLINFO */
828 
829 /*
830  Try to persuade compilers to inline. The most critical functions for
831  inlining are defined as macros, so these aren't used for them.
832 */
833 
834 #ifndef FORCEINLINE
835  #if defined(__GNUC__)
836 #define FORCEINLINE __inline __attribute__ ((always_inline))
837  #elif defined(_MSC_VER)
838  #define FORCEINLINE __forceinline
839  #endif
840 #endif
841 #ifndef NOINLINE
842  #if defined(__GNUC__)
843  #define NOINLINE __attribute__ ((noinline))
844  #elif defined(_MSC_VER)
845  #define NOINLINE __declspec(noinline)
846  #else
847  #define NOINLINE
848  #endif
849 #endif
850 
851 #ifdef __cplusplus
852 extern "C" {
853 #ifndef FORCEINLINE
854  #define FORCEINLINE inline
855 #endif
856 #endif /* __cplusplus */
857 #ifndef FORCEINLINE
858  #define FORCEINLINE
859 #endif
860 
861 #if !ONLY_MSPACES
862 
863 /* ------------------- Declarations of public routines ------------------- */
864 
865 #ifndef USE_DL_PREFIX
866 #define dlcalloc calloc
867 #define dlfree free
868 #define dlmalloc malloc
869 #define dlmemalign memalign
870 #define dlposix_memalign posix_memalign
871 #define dlrealloc realloc
872 #define dlrealloc_in_place realloc_in_place
873 #define dlvalloc valloc
874 #define dlpvalloc pvalloc
875 #define dlmallinfo mallinfo
876 #define dlmallopt mallopt
877 #define dlmalloc_trim malloc_trim
878 #define dlmalloc_stats malloc_stats
879 #define dlmalloc_usable_size malloc_usable_size
880 #define dlmalloc_footprint malloc_footprint
881 #define dlmalloc_max_footprint malloc_max_footprint
882 #define dlmalloc_footprint_limit malloc_footprint_limit
883 #define dlmalloc_set_footprint_limit malloc_set_footprint_limit
884 #define dlmalloc_inspect_all malloc_inspect_all
885 #define dlindependent_calloc independent_calloc
886 #define dlindependent_comalloc independent_comalloc
887 #define dlbulk_free bulk_free
888 #endif /* USE_DL_PREFIX */
889 
890 /*
891  malloc(size_t n)
892  Returns a pointer to a newly allocated chunk of at least n bytes, or
893  null if no space is available, in which case errno is set to ENOMEM
894  on ANSI C systems.
895 
896  If n is zero, malloc returns a minimum-sized chunk. (The minimum
897  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
898  systems.) Note that size_t is an unsigned type, so calls with
899  arguments that would be negative if signed are interpreted as
900  requests for huge amounts of space, which will often fail. The
901  maximum supported value of n differs across systems, but is in all
902  cases less than the maximum representable value of a size_t.
903 */
904 DLMALLOC_EXPORT void* dlmalloc(size_t);
905 
906 /*
907  free(void* p)
908  Releases the chunk of memory pointed to by p, that had been previously
909  allocated using malloc or a related routine such as realloc.
910  It has no effect if p is null. If p was not malloced or already
911  freed, free(p) will by default cause the current program to abort.
912 */
913 DLMALLOC_EXPORT void dlfree(void*);
914 
915 /*
916  calloc(size_t n_elements, size_t element_size);
917  Returns a pointer to n_elements * element_size bytes, with all locations
918  set to zero.
919 */
920 DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
921 
922 /*
923  realloc(void* p, size_t n)
924  Returns a pointer to a chunk of size n that contains the same data
925  as does chunk p up to the minimum of (n, p's size) bytes, or null
926  if no space is available.
927 
928  The returned pointer may or may not be the same as p. The algorithm
929  prefers extending p in most cases when possible, otherwise it
930  employs the equivalent of a malloc-copy-free sequence.
931 
932  If p is null, realloc is equivalent to malloc.
933 
934  If space is not available, realloc returns null, errno is set (if on
935  ANSI) and p is NOT freed.
936 
937  if n is for fewer bytes than already held by p, the newly unused
938  space is lopped off and freed if possible. realloc with a size
939  argument of zero (re)allocates a minimum-sized chunk.
940 
941  The old unix realloc convention of allowing the last-free'd chunk
942  to be used as an argument to realloc is not supported.
943 */
944 DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
945 
946 /*
947  realloc_in_place(void* p, size_t n)
948  Resizes the space allocated for p to size n, only if this can be
949  done without moving p (i.e., only if there is adjacent space
950  available if n is greater than p's current allocated size, or n is
951  less than or equal to p's size). This may be used instead of plain
952  realloc if an alternative allocation strategy is needed upon failure
953  to expand space; for example, reallocation of a buffer that must be
954  memory-aligned or cleared. You can use realloc_in_place to trigger
955  these alternatives only when needed.
956 
957  Returns p if successful; otherwise null.
958 */
959 DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
960 
961 /*
962  memalign(size_t alignment, size_t n);
963  Returns a pointer to a newly allocated chunk of n bytes, aligned
964  in accord with the alignment argument.
965 
966  The alignment argument should be a power of two. If the argument is
967  not a power of two, the nearest greater power is used.
968  8-byte alignment is guaranteed by normal malloc calls, so don't
969  bother calling memalign with an argument of 8 or less.
970 
971  Overreliance on memalign is a sure way to fragment space.
972 */
973 DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
974 
975 /*
976  int posix_memalign(void** pp, size_t alignment, size_t n);
977  Allocates a chunk of n bytes, aligned in accord with the alignment
978  argument. Differs from memalign only in that it (1) assigns the
979  allocated memory to *pp rather than returning it, (2) fails and
980  returns EINVAL if the alignment is not a power of two (3) fails and
981  returns ENOMEM if memory cannot be allocated.
982 */
983 DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
984 
985 /*
986  valloc(size_t n);
987  Equivalent to memalign(pagesize, n), where pagesize is the page
988  size of the system. If the pagesize is unknown, 4096 is used.
989 */
990 DLMALLOC_EXPORT void* dlvalloc(size_t);
991 
992 /*
993  mallopt(int parameter_number, int parameter_value)
994  Sets tunable parameters The format is to provide a
995  (parameter-number, parameter-value) pair. mallopt then sets the
996  corresponding parameter to the argument value if it can (i.e., so
997  long as the value is meaningful), and returns 1 if successful else
998  0. To workaround the fact that mallopt is specified to use int,
999  not size_t parameters, the value -1 is specially treated as the
1000  maximum unsigned size_t value.
1001 
1002  SVID/XPG/ANSI defines four standard param numbers for mallopt,
1003  normally defined in malloc.h. None of these are use in this malloc,
1004  so setting them has no effect. But this malloc also supports other
1005  options in mallopt. See below for details. Briefly, supported
1006  parameters are as follows (listed defaults are for "typical"
1007  configurations).
1008 
1009  Symbol param # default allowed param values
1010  M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)
1011  M_GRANULARITY -2 page size any power of 2 >= page size
1012  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
1013 */
1014 DLMALLOC_EXPORT int dlmallopt(int, int);
1015 
1016 /*
1017  malloc_footprint();
1018  Returns the number of bytes obtained from the system. The total
1019  number of bytes allocated by malloc, realloc etc., is less than this
1020  value. Unlike mallinfo, this function returns only a precomputed
1021  result, so can be called frequently to monitor memory consumption.
1022  Even if locks are otherwise defined, this function does not use them,
1023  so results might not be up to date.
1024 */
1025 DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
1026 
1027 /*
1028  malloc_max_footprint();
1029  Returns the maximum number of bytes obtained from the system. This
1030  value will be greater than current footprint if deallocated space
1031  has been reclaimed by the system. The peak number of bytes allocated
1032  by malloc, realloc etc., is less than this value. Unlike mallinfo,
1033  this function returns only a precomputed result, so can be called
1034  frequently to monitor memory consumption. Even if locks are
1035  otherwise defined, this function does not use them, so results might
1036  not be up to date.
1037 */
1038 DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
1039 
1040 /*
1041  malloc_footprint_limit();
1042  Returns the number of bytes that the heap is allowed to obtain from
1043  the system, returning the last value returned by
1044  malloc_set_footprint_limit, or the maximum size_t value if
1045  never set. The returned value reflects a permission. There is no
1046  guarantee that this number of bytes can actually be obtained from
1047  the system.
1048 */
1049 DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(void);
1050 
1051 /*
1052  malloc_set_footprint_limit();
1053  Sets the maximum number of bytes to obtain from the system, causing
1054  failure returns from malloc and related functions upon attempts to
1055  exceed this value. The argument value may be subject to page
1056  rounding to an enforceable limit; this actual value is returned.
1057  Using an argument of the maximum possible size_t effectively
1058  disables checks. If the argument is less than or equal to the
1059  current malloc_footprint, then all future allocations that require
1060  additional system memory will fail. However, invocation cannot
1061  retroactively deallocate existing used memory.
1062 */
1063 DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
1064 
1065 #if MALLOC_INSPECT_ALL
1066 /*
1067  malloc_inspect_all(void(*handler)(void *start,
1068  void *end,
1069  size_t used_bytes,
1070  void* callback_arg),
1071  void* arg);
1072  Traverses the heap and calls the given handler for each managed
1073  region, skipping all bytes that are (or may be) used for bookkeeping
1074  purposes. Traversal does not include include chunks that have been
1075  directly memory mapped. Each reported region begins at the start
1076  address, and continues up to but not including the end address. The
1077  first used_bytes of the region contain allocated data. If
1078  used_bytes is zero, the region is unallocated. The handler is
1079  invoked with the given callback argument. If locks are defined, they
1080  are held during the entire traversal. It is a bad idea to invoke
1081  other malloc functions from within the handler.
1082 
1083  For example, to count the number of in-use chunks with size greater
1084  than 1000, you could write:
1085  static int count = 0;
1086  void count_chunks(void* start, void* end, size_t used, void* arg) {
1087  if (used >= 1000) ++count;
1088  }
1089  then:
1090  malloc_inspect_all(count_chunks, NULL);
1091 
1092  malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
1093 */
1094 DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
1095  void* arg);
1096 
1097 #endif /* MALLOC_INSPECT_ALL */
1098 
1099 #if !NO_MALLINFO
1100 /*
1101  mallinfo()
1102  Returns (by copy) a struct containing various summary statistics:
1103 
1104  arena: current total non-mmapped bytes allocated from system
1105  ordblks: the number of free chunks
1106  smblks: always zero.
1107  hblks: current number of mmapped regions
1108  hblkhd: total bytes held in mmapped regions
1109  usmblks: the maximum total allocated space. This will be greater
1110  than current total if trimming has occurred.
1111  fsmblks: always zero
1112  uordblks: current total allocated space (normal or mmapped)
1113  fordblks: total free space
1114  keepcost: the maximum number of bytes that could ideally be released
1115  back to system via malloc_trim. ("ideally" means that
1116  it ignores page restrictions etc.)
1117 
1118  Because these fields are ints, but internal bookkeeping may
1119  be kept as longs, the reported values may wrap around zero and
1120  thus be inaccurate.
1121 */
1122 DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
1123 #endif /* NO_MALLINFO */
1124 
1125 /*
1126  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
1127 
1128  independent_calloc is similar to calloc, but instead of returning a
1129  single cleared space, it returns an array of pointers to n_elements
1130  independent elements that can hold contents of size elem_size, each
1131  of which starts out cleared, and can be independently freed,
1132  realloc'ed etc. The elements are guaranteed to be adjacently
1133  allocated (this is not guaranteed to occur with multiple callocs or
1134  mallocs), which may also improve cache locality in some
1135  applications.
1136 
1137  The "chunks" argument is optional (i.e., may be null, which is
1138  probably the most typical usage). If it is null, the returned array
1139  is itself dynamically allocated and should also be freed when it is
1140  no longer needed. Otherwise, the chunks array must be of at least
1141  n_elements in length. It is filled in with the pointers to the
1142  chunks.
1143 
1144  In either case, independent_calloc returns this pointer array, or
1145  null if the allocation failed. If n_elements is zero and "chunks"
1146  is null, it returns a chunk representing an array with zero elements
1147  (which should be freed if not wanted).
1148 
1149  Each element must be freed when it is no longer needed. This can be
1150  done all at once using bulk_free.
1151 
1152  independent_calloc simplifies and speeds up implementations of many
1153  kinds of pools. It may also be useful when constructing large data
1154  structures that initially have a fixed number of fixed-sized nodes,
1155  but the number is not known at compile time, and some of the nodes
1156  may later need to be freed. For example:
1157 
1158  struct Node { int item; struct Node* next; };
1159 
1160  struct Node* build_list() {
1161  struct Node** pool;
1162  int n = read_number_of_nodes_needed();
1163  if (n <= 0) return 0;
1164  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1165  if (pool == 0) die();
1166  // organize into a linked list...
1167  struct Node* first = pool[0];
1168  for (i = 0; i < n-1; ++i)
1169  pool[i]->next = pool[i+1];
1170  free(pool); // Can now free the array (or not, if it is needed later)
1171  return first;
1172  }
1173 */
1174 DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
1175 
1176 /*
1177  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
1178 
1179  independent_comalloc allocates, all at once, a set of n_elements
1180  chunks with sizes indicated in the "sizes" array. It returns
1181  an array of pointers to these elements, each of which can be
1182  independently freed, realloc'ed etc. The elements are guaranteed to
1183  be adjacently allocated (this is not guaranteed to occur with
1184  multiple callocs or mallocs), which may also improve cache locality
1185  in some applications.
1186 
1187  The "chunks" argument is optional (i.e., may be null). If it is null
1188  the returned array is itself dynamically allocated and should also
1189  be freed when it is no longer needed. Otherwise, the chunks array
1190  must be of at least n_elements in length. It is filled in with the
1191  pointers to the chunks.
1192 
1193  In either case, independent_comalloc returns this pointer array, or
1194  null if the allocation failed. If n_elements is zero and chunks is
1195  null, it returns a chunk representing an array with zero elements
1196  (which should be freed if not wanted).
1197 
1198  Each element must be freed when it is no longer needed. This can be
1199  done all at once using bulk_free.
1200 
1201  independent_comallac differs from independent_calloc in that each
1202  element may have a different size, and also that it does not
1203  automatically clear elements.
1204 
1205  independent_comalloc can be used to speed up allocation in cases
1206  where several structs or objects must always be allocated at the
1207  same time. For example:
1208 
1209  struct Head { ... }
1210  struct Foot { ... }
1211 
1212  void send_message(char* msg) {
1213  int msglen = strlen(msg);
1214  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1215  void* chunks[3];
1216  if (independent_comalloc(3, sizes, chunks) == 0)
1217  die();
1218  struct Head* head = (struct Head*)(chunks[0]);
1219  char* body = (char*)(chunks[1]);
1220  struct Foot* foot = (struct Foot*)(chunks[2]);
1221  // ...
1222  }
1223 
1224  In general though, independent_comalloc is worth using only for
1225  larger values of n_elements. For small values, you probably won't
1226  detect enough difference from series of malloc calls to bother.
1227 
1228  Overuse of independent_comalloc can increase overall memory usage,
1229  since it cannot reuse existing noncontiguous small chunks that
1230  might be available for some of the elements.
1231 */
1232 DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
1233 
1234 /*
1235  bulk_free(void* array[], size_t n_elements)
1236  Frees and clears (sets to null) each non-null pointer in the given
1237  array. This is likely to be faster than freeing them one-by-one.
1238  If footers are used, pointers that have been allocated in different
1239  mspaces are not freed or cleared, and the count of all such pointers
1240  is returned. For large arrays of pointers with poor locality, it
1241  may be worthwhile to sort this array before calling bulk_free.
1242 */
1243 DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);
1244 
1245 /*
1246  pvalloc(size_t n);
1247  Equivalent to valloc(minimum-page-that-holds(n)), that is,
1248  round up n to nearest pagesize.
1249  */
1250 DLMALLOC_EXPORT void* dlpvalloc(size_t);
1251 
1252 /*
1253  malloc_trim(size_t pad);
1254 
1255  If possible, gives memory back to the system (via negative arguments
1256  to sbrk) if there is unused memory at the `high' end of the malloc
1257  pool or in unused MMAP segments. You can call this after freeing
1258  large blocks of memory to potentially reduce the system-level memory
1259  requirements of a program. However, it cannot guarantee to reduce
1260  memory. Under some allocation patterns, some large free blocks of
1261  memory will be locked between two used chunks, so they cannot be
1262  given back to the system.
1263 
1264  The `pad' argument to malloc_trim represents the amount of free
1265  trailing space to leave untrimmed. If this argument is zero, only
1266  the minimum amount of memory to maintain internal data structures
1267  will be left. Non-zero arguments can be supplied to maintain enough
1268  trailing space to service future expected allocations without having
1269  to re-obtain memory from the system.
1270 
1271  Malloc_trim returns 1 if it actually released any memory, else 0.
1272 */
1273 DLMALLOC_EXPORT int dlmalloc_trim(size_t);
1274 
1275 /*
1276  malloc_stats();
1277  Prints on stderr the amount of space obtained from the system (both
1278  via sbrk and mmap), the maximum amount (which may be more than
1279  current if malloc_trim and/or munmap got called), and the current
1280  number of bytes allocated via malloc (or realloc, etc) but not yet
1281  freed. Note that this is the number of bytes allocated, not the
1282  number requested. It will be larger than the number requested
1283  because of alignment and bookkeeping overhead. Because it includes
1284  alignment wastage as being in use, this figure may be greater than
1285  zero even when no user-level chunks are allocated.
1286 
1287  The reported current and maximum system memory can be inaccurate if
1288  a program makes other calls to system memory allocation functions
1289  (normally sbrk) outside of malloc.
1290 
1291  malloc_stats prints only the most commonly interesting statistics.
1292  More information can be obtained by calling mallinfo.
1293 */
1294 DLMALLOC_EXPORT void dlmalloc_stats(void);
1295 
1296 /*
1297  malloc_usable_size(void* p);
1298 
1299  Returns the number of bytes you can actually use in
1300  an allocated chunk, which may be more than you requested (although
1301  often not) due to alignment and minimum size constraints.
1302  You can use this many bytes without worrying about
1303  overwriting other allocated objects. This is not a particularly great
1304  programming practice. malloc_usable_size can be more useful in
1305  debugging and assertions, for example:
1306 
1307  p = malloc(n);
1308  assert(malloc_usable_size(p) >= 256);
1309 */
1310 size_t dlmalloc_usable_size(void*);
1311 
1312 #endif /* ONLY_MSPACES */
1313 
1314 #if MSPACES
1315 
1316 /*
1317  mspace is an opaque type representing an independent
1318  region of space that supports mspace_malloc, etc.
1319 */
1320 typedef void* mspace;
1321 
1322 /*
1323  create_mspace creates and returns a new independent space with the
1324  given initial capacity, or, if 0, the default granularity size. It
1325  returns null if there is no system memory available to create the
1326  space. If argument locked is non-zero, the space uses a separate
1327  lock to control access. The capacity of the space will grow
1328  dynamically as needed to service mspace_malloc requests. You can
1329  control the sizes of incremental increases of this space by
1330  compiling with a different DEFAULT_GRANULARITY or dynamically
1331  setting with mallopt(M_GRANULARITY, value).
1332 */
1333 DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
1334 
1335 /*
1336  destroy_mspace destroys the given space, and attempts to return all
1337  of its memory back to the system, returning the total number of
1338  bytes freed. After destruction, the results of access to all memory
1339  used by the space become undefined.
1340 */
1341 DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
1342 
1343 /*
1344  create_mspace_with_base uses the memory supplied as the initial base
1345  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1346  space is used for bookkeeping, so the capacity must be at least this
1347  large. (Otherwise 0 is returned.) When this initial space is
1348  exhausted, additional memory will be obtained from the system.
1349  Destroying this space will deallocate all additionally allocated
1350  space (if possible) but not the initial base.
1351 */
1352 DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
1353 
1354 /*
1355  mspace_track_large_chunks controls whether requests for large chunks
1356  are allocated in their own untracked mmapped regions, separate from
1357  others in this mspace. By default large chunks are not tracked,
1358  which reduces fragmentation. However, such chunks are not
1359  necessarily released to the system upon destroy_mspace. Enabling
1360  tracking by setting to true may increase fragmentation, but avoids
1361  leakage when relying on destroy_mspace to release all memory
1362  allocated using this space. The function returns the previous
1363  setting.
1364 */
1365 DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
1366 
1367 
1368 /*
1369  mspace_malloc behaves as malloc, but operates within
1370  the given space.
1371 */
1372 DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
1373 
1374 /*
1375  mspace_free behaves as free, but operates within
1376  the given space.
1377 
1378  If compiled with FOOTERS==1, mspace_free is not actually needed.
1379  free may be called instead of mspace_free because freed chunks from
1380  any space are handled by their originating spaces.
1381 */
1382 DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
1383 
1384 /*
1385  mspace_realloc behaves as realloc, but operates within
1386  the given space.
1387 
1388  If compiled with FOOTERS==1, mspace_realloc is not actually
1389  needed. realloc may be called instead of mspace_realloc because
1390  realloced chunks from any space are handled by their originating
1391  spaces.
1392 */
1393 DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
1394 
1395 /*
1396  mspace_calloc behaves as calloc, but operates within
1397  the given space.
1398 */
1399 DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1400 
1401 /*
1402  mspace_memalign behaves as memalign, but operates within
1403  the given space.
1404 */
1405 DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1406 
1407 /*
1408  mspace_independent_calloc behaves as independent_calloc, but
1409  operates within the given space.
1410 */
1411 DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
1412  size_t elem_size, void* chunks[]);
1413 
1414 /*
1415  mspace_independent_comalloc behaves as independent_comalloc, but
1416  operates within the given space.
1417 */
1418 DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
1419  size_t sizes[], void* chunks[]);
1420 
1421 /*
1422  mspace_footprint() returns the number of bytes obtained from the
1423  system for this space.
1424 */
1425 DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
1426 
1427 /*
1428  mspace_max_footprint() returns the peak number of bytes obtained from the
1429  system for this space.
1430 */
1431 DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
1432 
1433 
1434 #if !NO_MALLINFO
1435 /*
1436  mspace_mallinfo behaves as mallinfo, but reports properties of
1437  the given space.
1438 */
1439 DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
1440 #endif /* NO_MALLINFO */
1441 
1442 /*
1443  malloc_usable_size(void* p) behaves the same as malloc_usable_size;
1444 */
1445 DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
1446 
1447 /*
1448  mspace_malloc_stats behaves as malloc_stats, but reports
1449  properties of the given space.
1450 */
1451 DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
1452 
1453 /*
1454  mspace_trim behaves as malloc_trim, but
1455  operates within the given space.
1456 */
1457 DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
1458 
1459 /*
1460  An alias for mallopt.
1461 */
1462 DLMALLOC_EXPORT int mspace_mallopt(int, int);
1463 
1464 #endif /* MSPACES */
1465 
1466 #ifdef __cplusplus
1467 } /* end of extern "C" */
1468 #endif /* __cplusplus */
1469 
1470 #pragma GCC diagnostic pop
1471 
1472 #endif /* PLATFORM_ATARI_DLMALLOC_H */
Definition: dlmalloc.h:813