1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
5 /* Copyright (c) University of Cambridge 1995 - 2018 */
6 /* Copyright (c) The Exim maintainers 2019 */
7 /* See the file NOTICE for conditions of use and distribution. */
9 /* Exim gets and frees all its store through these functions. In the original
10 implementation there was a lot of mallocing and freeing of small bits of store.
11 The philosophy has now changed to a scheme which includes the concept of
12 "stacking pools" of store. For the short-lived processes, there isn't any real
13 need to do any garbage collection, but the stack concept allows quick resetting
14 in places where this seems sensible.
16 Obviously the long-running processes (the daemon, the queue runner, and eximon)
17 must take care not to eat store.
19 The following different types of store are recognized:
21 . Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
25 . Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
30 . Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
36 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
40 . Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
50 /* keep config.h before memcheck.h, for NVALGRIND */
57 /* We need to know how to align blocks of data for general use. I'm not sure
58 how to get an alignment factor in general. In the current world, a value of 8
59 is probably right, and this is sizeof(double) on some systems and sizeof(void
60 *) on others, so take the larger of those. Since everything in this expression
61 is a constant, the compiler should optimize it to a simple constant wherever it
62 appears (I checked that gcc does do this). */
65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
67 /* store_reset() will not free the following block if the last used block has
68 less than this much left in it. */
70 #define STOREPOOL_MIN_SIZE 256
72 /* Structure describing the beginning of each big block. */
74 typedef struct storeblock {
75 struct storeblock *next;
79 /* Just in case we find ourselves on a system where the structure above has a
80 length that is not a multiple of the alignment, set up a macro for the padded
83 #define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
86 /* Size of block to get from malloc to carve up into smaller ones. This
87 must be a multiple of the alignment. We assume that 8192 is going to be
90 #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
92 /* Variables holding data for the local pools of store. The current pool number
93 is held in store_pool, which is global so that it can be changed from outside.
94 Setting the initial length values to -1 forces a malloc for the first call,
95 even if the length is zero (which is used for getting a point to reset to). */
97 int store_pool = POOL_MAIN;
100 static storeblock *chainbase[NPOOLS];
101 static storeblock *current_block[NPOOLS];
102 static void *next_yield[NPOOLS];
103 static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
105 /* The limits of the tainted pools. Tracking these on new allocations enables
106 a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107 one side or the other of data+heap, not both. */
109 static void * tainted_base = (void *)-1;
110 static void * tainted_top = (void *)0;
112 /* pool_malloc holds the amount of memory used by the store pools; this goes up
113 and down as store is reset or released. nonpool_malloc is the total got by
114 malloc from other calls; this doesn't go down because it is just freed by
117 static int pool_malloc;
118 static int nonpool_malloc;
120 /* This variable is set by store_get() to its yield, and by store_reset() to
121 NULL. This enables string_cat() to optimize its store handling for very long
122 strings. That's why the variable is global. */
124 void *store_last_get[NPOOLS];
126 /* These are purely for stats-gathering */
128 static int nbytes[NPOOLS]; /* current bytes allocated */
129 static int maxbytes[NPOOLS]; /* max number reached */
130 static int nblocks[NPOOLS]; /* current number of blocks allocated */
131 static int maxblocks[NPOOLS];
132 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133 static int max_nonpool_blocks;
134 static int max_pool_malloc; /* max value for pool_malloc */
135 static int max_nonpool_malloc; /* max value for nonpool_malloc */
138 static const uschar * pooluse[NPOOLS] = {
139 [POOL_MAIN] = US"main",
140 [POOL_PERM] = US"perm",
141 [POOL_SEARCH] = US"search",
142 [POOL_TAINT_MAIN] = US"main",
143 [POOL_TAINT_PERM] = US"perm",
144 [POOL_TAINT_SEARCH] = US"search",
146 static const uschar * poolclass[NPOOLS] = {
147 [POOL_MAIN] = US"untainted",
148 [POOL_PERM] = US"untainted",
149 [POOL_SEARCH] = US"untainted",
150 [POOL_TAINT_MAIN] = US"tainted",
151 [POOL_TAINT_PERM] = US"tainted",
152 [POOL_TAINT_SEARCH] = US"tainted",
156 static void * store_mmap(int, const char *, int);
157 static void * internal_store_malloc(int, const char *, int);
158 static void internal_store_free(void *, const char *, int linenumber);
160 /******************************************************************************/
162 /* Predicate: if an address is in a tainted pool.
163 By extension, a variable pointing to this address is tainted.
167 is_tainted(const void * p)
169 BOOL rc = p >= tainted_base && p < tainted_top;
171 #ifndef COMPILE_UTILITY
172 DEBUG(D_memory) if (rc) debug_printf_indent("is_tainted: YES\n");
178 die_tainted(const uschar * msg, const uschar * func, int line)
180 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
185 /*************************************************
186 * Get a block from the current pool *
187 *************************************************/
189 /* Running out of store is a total disaster. This function is called via the
190 macro store_get(). It passes back a block of store within the current big
191 block, getting a new one if necessary. The address is saved in
196 func function from which called
197 linenumber line number in source file
199 Returns: pointer to store (panic on malloc failure)
203 store_get_3(int size, BOOL tainted, const char *func, int linenumber)
205 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
207 /* Round up the size to a multiple of the alignment. Although this looks a
208 messy statement, because "alignment" is a constant expression, the compiler can
209 do a reasonable job of optimizing, especially if the value of "alignment" is a
210 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
211 instructions on a Sparc (alignment = 8). */
213 if (size % alignment != 0) size += alignment - (size % alignment);
215 /* If there isn't room in the current block, get a new one. The minimum
216 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
217 these functions are mostly called for small amounts of store. */
219 if (size > yield_length[pool])
221 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
222 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
223 storeblock * newblock;
225 /* Sometimes store_reset() may leave a block for us; check if we can use it */
227 if ( (newblock = current_block[pool])
228 && (newblock = newblock->next)
229 && newblock->length < length
232 /* Give up on this block, because it's too small */
234 if (pool < POOL_TAINT_BASE)
235 internal_store_free(newblock, func, linenumber);
238 #ifndef COMPILE_UTILITY
240 debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber);
242 munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK);
247 /* If there was no free block, get a new one */
251 if ((nbytes[pool] += mlength) > maxbytes[pool])
252 maxbytes[pool] = nbytes[pool];
253 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
254 max_pool_malloc = pool_malloc;
255 nonpool_malloc -= mlength; /* Exclude from overall total */
256 if (++nblocks[pool] > maxblocks[pool])
257 maxblocks[pool] = nblocks[pool];
260 ? store_mmap(mlength, func, linenumber)
261 : internal_store_malloc(mlength, func, linenumber);
262 newblock->next = NULL;
263 newblock->length = length;
265 if (!chainbase[pool])
266 chainbase[pool] = newblock;
268 current_block[pool]->next = newblock;
271 current_block[pool] = newblock;
272 yield_length[pool] = newblock->length;
274 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
275 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
278 /* There's (now) enough room in the current block; the yield is the next
281 store_last_get[pool] = next_yield[pool];
283 /* Cut out the debugging stuff for utilities, but stop picky compilers from
286 #ifdef COMPILE_UTILITY
288 linenumber = linenumber;
291 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
292 store_last_get[pool], size, func, linenumber);
293 #endif /* COMPILE_UTILITY */
295 (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
296 /* Update next pointer and number of bytes left in the current block. */
298 next_yield[pool] = (void *)(CS next_yield[pool] + size);
299 yield_length[pool] -= size;
300 return store_last_get[pool];
305 /*************************************************
306 * Get a block from the PERM pool *
307 *************************************************/
309 /* This is just a convenience function, useful when just a single block is to
314 func function from which called
315 linenumber line number in source file
317 Returns: pointer to store (panic on malloc failure)
321 store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
324 int old_pool = store_pool;
325 store_pool = POOL_PERM;
326 yield = store_get_3(size, tainted, func, linenumber);
327 store_pool = old_pool;
333 /*************************************************
334 * Extend a block if it is at the top *
335 *************************************************/
337 /* While reading strings of unknown length, it is often the case that the
338 string is being read into the block at the top of the stack. If it needs to be
339 extended, it is more efficient just to extend within the top block rather than
340 allocate a new block and then have to copy the data. This function is provided
341 for the use of string_cat(), but of course can be used elsewhere too.
342 The block itself is not expanded; only the top allocation from it.
345 ptr pointer to store block
346 oldsize current size of the block, as requested by user
347 newsize new size required
348 func function from which called
349 linenumber line number in source file
351 Returns: TRUE if the block is at the top of the stack and has been
352 extended; FALSE if it isn't at the top of the stack, or cannot
357 store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
358 const char *func, int linenumber)
360 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
361 int inc = newsize - oldsize;
362 int rounded_oldsize = oldsize;
364 /* Check that the block being extended was already of the required taint status;
365 refuse to extend if not. */
367 if (is_tainted(ptr) != tainted)
370 if (rounded_oldsize % alignment != 0)
371 rounded_oldsize += alignment - (rounded_oldsize % alignment);
373 if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
374 inc > yield_length[pool] + rounded_oldsize - oldsize)
377 /* Cut out the debugging stuff for utilities, but stop picky compilers from
380 #ifdef COMPILE_UTILITY
382 linenumber = linenumber;
385 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
387 #endif /* COMPILE_UTILITY */
389 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
390 next_yield[pool] = CS ptr + newsize;
391 yield_length[pool] -= newsize - rounded_oldsize;
392 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
399 /*************************************************
400 * Back up to a previous point on the stack *
401 *************************************************/
403 /* This function resets the next pointer, freeing any subsequent whole blocks
404 that are now unused. Call with a cookie obtained from store_mark() only; do
405 not call with a pointer returned by store_get(). Both the untainted and tainted
406 pools corresposding to store_pool are reset.
409 r place to back up to
410 func function from which called
411 linenumber line number in source file
417 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
420 storeblock * b = current_block[pool];
421 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
422 int newlength, count;
423 #ifndef COMPILE_UTILITY
424 int oldmalloc = pool_malloc;
427 /* Last store operation was not a get */
429 store_last_get[pool] = NULL;
431 /* See if the place is in the current block - as it often will be. Otherwise,
432 search for the block in which it lies. */
434 if (CS ptr < bc || CS ptr > bc + b->length)
436 for (b = chainbase[pool]; b; b = b->next)
438 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
439 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
442 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
443 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
446 /* Back up, rounding to the alignment if necessary. When testing, flatten
447 the released memory. */
449 newlength = bc + b->length - CS ptr;
450 #ifndef COMPILE_UTILITY
453 assert_no_variables(ptr, newlength, func, linenumber);
454 if (f.running_in_test_harness)
456 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
457 memset(ptr, 0xF0, newlength);
461 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
462 next_yield[pool] = CS ptr + (newlength % alignment);
463 count = yield_length[pool];
464 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
465 current_block[pool] = b;
467 /* Free any subsequent block. Do NOT free the first
468 successor, if our current block has less than 256 bytes left. This should
469 prevent us from flapping memory. However, keep this block only when it has
472 if ( yield_length[pool] < STOREPOOL_MIN_SIZE
474 && b->next->length == STORE_BLOCK_SIZE)
477 #ifndef COMPILE_UTILITY
479 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
482 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
483 b->length - ALIGNED_SIZEOF_STOREBLOCK);
491 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
492 #ifndef COMPILE_UTILITY
494 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
501 if (pool < POOL_TAINT_BASE)
502 internal_store_free(b, func, linenumber);
505 #ifndef COMPILE_UTILITY
507 debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber);
509 munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK);
513 /* Cut out the debugging stuff for utilities, but stop picky compilers from
516 #ifdef COMPILE_UTILITY
518 linenumber = linenumber;
521 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
522 count + oldmalloc - pool_malloc,
523 func, linenumber, pool_malloc);
524 #endif /* COMPILE_UTILITY */
529 store_reset_3(rmark r, int pool, const char *func, int linenumber)
533 if (pool >= POOL_TAINT_BASE)
534 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
535 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
537 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
538 "store_reset called with bad mark: %s %d\n", func, linenumber);
540 internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
541 internal_store_reset(ptr, pool, func, linenumber);
547 /* Free tail-end unused allocation. This lets us allocate a big chunk
548 early, for cases when we only discover later how much was really needed.
550 Can be called with a value from store_get(), or an offset after such. Only
551 the tainted or untainted pool that serviced the store_get() will be affected.
553 This is mostly a cut-down version of internal_store_reset().
554 XXX needs rationalising
558 store_release_above_3(void *ptr, const char *func, int linenumber)
560 /* Search all pools' "current" blocks. If it isn't one of those,
561 ignore it (it usually will be). */
563 for (int pool = 0; pool < nelem(current_block); pool++)
565 storeblock * b = current_block[pool];
567 int count, newlength;
572 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
573 if (CS ptr < bc || CS ptr > bc + b->length)
576 /* Last store operation was not a get */
578 store_last_get[pool] = NULL;
580 /* Back up, rounding to the alignment if necessary. When testing, flatten
581 the released memory. */
583 newlength = bc + b->length - CS ptr;
584 #ifndef COMPILE_UTILITY
587 assert_no_variables(ptr, newlength, func, linenumber);
588 if (f.running_in_test_harness)
590 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
591 memset(ptr, 0xF0, newlength);
595 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
596 next_yield[pool] = CS ptr + (newlength % alignment);
597 count = yield_length[pool];
598 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
600 /* Cut out the debugging stuff for utilities, but stop picky compilers from
603 #ifdef COMPILE_UTILITY
605 linenumber = linenumber;
608 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
609 func, linenumber, pool_malloc);
613 #ifndef COMPILE_UTILITY
615 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
622 store_mark_3(const char *func, int linenumber)
626 if (store_pool >= POOL_TAINT_BASE)
627 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
628 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
630 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
631 a cookie (actually the address in the untainted pool) to the caller.
632 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
633 and winds back the untainted pool with the cookie. */
635 p = store_get_3(sizeof(void *), FALSE, func, linenumber);
636 *p = store_get_3(0, TRUE, func, linenumber);
643 /************************************************
645 ************************************************/
647 /* This function checks that the pointer it is given is the first thing in a
648 block, and if so, releases that block.
651 block block of store to consider
652 func function from which called
653 linenumber line number in source file
659 store_release_3(void * block, int pool, const char * func, int linenumber)
661 /* It will never be the first block, so no need to check that. */
663 for (storeblock * b = chainbase[pool]; b; b = b->next)
665 storeblock * bb = b->next;
666 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
668 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
674 /* Cut out the debugging stuff for utilities, but stop picky compilers
675 from giving warnings. */
677 #ifdef COMPILE_UTILITY
679 linenumber = linenumber;
682 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
683 linenumber, pool_malloc);
685 if (f.running_in_test_harness)
686 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
687 #endif /* COMPILE_UTILITY */
696 /************************************************
698 ************************************************/
700 /* Allocate a new block big enough to expend to the given size and
701 copy the current data into it. Free the old one if possible.
703 This function is specifically provided for use when reading very
704 long strings, e.g. header lines. When the string gets longer than a
705 complete block, it gets copied to a new block. It is helpful to free
706 the old block iff the previous copy of the string is at its start,
707 and therefore the only thing in it. Otherwise, for very long strings,
708 dead store can pile up somewhat disastrously. This function checks that
709 the pointer it is given is the first thing in a block, and that nothing
710 has been allocated since. If so, releases that block.
717 Returns: new location of data
721 store_newblock_3(void * block, BOOL tainted, int newsize, int len,
722 const char * func, int linenumber)
724 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
725 BOOL release_ok = !tainted && store_last_get[pool] == block;
728 if (is_tainted(block) != tainted)
729 die_tainted(US"store_newblock", CUS func, linenumber);
731 newtext = store_get(newsize, tainted);
732 memcpy(newtext, block, len);
733 if (release_ok) store_release_3(block, pool, func, linenumber);
734 return (void *)newtext;
740 /******************************************************************************/
742 store_alloc_tail(void * yield, int size, const char * func, int line,
745 if ((nonpool_malloc += size) > max_nonpool_malloc)
746 max_nonpool_malloc = nonpool_malloc;
748 /* Cut out the debugging stuff for utilities, but stop picky compilers from
751 #ifdef COMPILE_UTILITY
752 func = func; line = line; type = type;
755 /* If running in test harness, spend time making sure all the new store
756 is not filled with zeros so as to catch problems. */
758 if (f.running_in_test_harness)
759 memset(yield, 0xF0, (size_t)size);
760 DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
761 type, yield, size, func, line, pool_malloc, nonpool_malloc);
762 #endif /* COMPILE_UTILITY */
767 /*************************************************
769 *************************************************/
772 store_mmap(int size, const char * func, int line)
776 if (size < 16) size = 16;
778 if (!(yield = mmap(NULL, (size_t)size,
779 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
780 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
781 "called from line %d of %s", size, line, func);
783 if (yield < tainted_base) tainted_base = yield;
784 if ((top = yield + size) > tainted_top) tainted_top = top;
786 return store_alloc_tail(yield, size, func, line, US"Mmap");
789 /*************************************************
791 *************************************************/
793 /* Running out of store is a total disaster for exim. Some malloc functions
794 do not run happily on very small sizes, nor do they document this fact. This
795 function is called via the macro store_malloc().
798 size amount of store wanted
799 func function from which called
800 linenumber line number in source file
802 Returns: pointer to gotten store (panic on failure)
806 internal_store_malloc(int size, const char *func, int linenumber)
810 if (size < 16) size = 16;
812 if (!(yield = malloc((size_t)size)))
813 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
814 "called from line %d in %s", size, linenumber, func);
816 return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
820 store_malloc_3(int size, const char *func, int linenumber)
822 if (n_nonpool_blocks++ > max_nonpool_blocks)
823 max_nonpool_blocks = n_nonpool_blocks;
824 return internal_store_malloc(size, func, linenumber);
828 /************************************************
830 ************************************************/
832 /* This function is called by the macro store_free().
835 block block of store to free
836 func function from which called
837 linenumber line number in source file
843 internal_store_free(void *block, const char *func, int linenumber)
845 #ifdef COMPILE_UTILITY
847 linenumber = linenumber;
850 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
851 #endif /* COMPILE_UTILITY */
856 store_free_3(void *block, const char *func, int linenumber)
859 internal_store_free(block, func, linenumber);
862 /******************************************************************************/
863 /* Stats output on process exit */
867 #ifndef COMPILE_UTILITY
870 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
871 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
872 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
873 for (int i = 0; i < NPOOLS; i++)
874 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
875 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);