1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
5 /* Copyright (c) University of Cambridge 1995 - 2018 */
6 /* Copyright (c) The Exim maintainers 2019 */
7 /* See the file NOTICE for conditions of use and distribution. */
9 /* Exim gets and frees all its store through these functions. In the original
10 implementation there was a lot of mallocing and freeing of small bits of store.
11 The philosophy has now changed to a scheme which includes the concept of
12 "stacking pools" of store. For the short-lived processes, there isn't any real
13 need to do any garbage collection, but the stack concept allows quick resetting
14 in places where this seems sensible.
16 Obviously the long-running processes (the daemon, the queue runner, and eximon)
17 must take care not to eat store.
19 The following different types of store are recognized:
21 . Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
25 . Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
30 . Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
36 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
40 . Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
47 Only memory blocks requested for tainted use are regarded as tainted; anything
48 else (including stack auto variables) is untainted. Care is needed when coding
49 to not copy untrusted data into untainted memory, as downstream taint-checks
52 Internally we currently use malloc for nontainted pools, and mmap for tainted
53 pools. The disparity is for speed of testing the taintedness of pointers;
54 because Linux appears to use distinct non-overlapping address allocations for
55 mmap vs. everything else, which means only two pointer-compares suffice for the
56 test. Other OS' cannot use that optimisation, and a more lengthy test against
57 the limits of tainted-pool allcations has to be done.
59 Intermediate layers (eg. the string functions) can test for taint, and use this
60 for ensurinng that results have proper state. For example the
61 string_vformat_trc() routing supporting the string_sprintf() interface will
62 recopy a string being built into a tainted allocation if it meets a %s for a
63 tainted argument. Any intermediate-layer function that (can) return a new
64 allocation should behave this way; returning a tainted result if any tainted
65 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
66 existing allocations fail if tainted data is written into an untainted area.
67 Users of functions that modify existing allocations should check if a tainted
68 source and an untainted destination is used, and fail instead (sprintf() being
74 /* keep config.h before memcheck.h, for NVALGRIND */
81 /* We need to know how to align blocks of data for general use. I'm not sure
82 how to get an alignment factor in general. In the current world, a value of 8
83 is probably right, and this is sizeof(double) on some systems and sizeof(void
84 *) on others, so take the larger of those. Since everything in this expression
85 is a constant, the compiler should optimize it to a simple constant wherever it
86 appears (I checked that gcc does do this). */
89 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
91 /* store_reset() will not free the following block if the last used block has
92 less than this much left in it. */
94 #define STOREPOOL_MIN_SIZE 256
96 /* Structure describing the beginning of each big block. */
98 typedef struct storeblock {
99 struct storeblock *next;
103 /* Just in case we find ourselves on a system where the structure above has a
104 length that is not a multiple of the alignment, set up a macro for the padded
107 #define ALIGNED_SIZEOF_STOREBLOCK \
108 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
110 /* Size of block to get from malloc to carve up into smaller ones. This
111 must be a multiple of the alignment. We assume that 8192 is going to be
114 #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
116 /* Variables holding data for the local pools of store. The current pool number
117 is held in store_pool, which is global so that it can be changed from outside.
118 Setting the initial length values to -1 forces a malloc for the first call,
119 even if the length is zero (which is used for getting a point to reset to). */
121 int store_pool = POOL_MAIN;
124 static storeblock *chainbase[NPOOLS];
125 static storeblock *current_block[NPOOLS];
126 static void *next_yield[NPOOLS];
127 static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
129 /* The limits of the tainted pools. Tracking these on new allocations enables
130 a fast is_tainted implementation. We assume the kernel only allocates mmaps using
131 one side or the other of data+heap, not both. */
133 void * tainted_base = (void *)-1;
134 void * tainted_top = (void *)0;
136 /* pool_malloc holds the amount of memory used by the store pools; this goes up
137 and down as store is reset or released. nonpool_malloc is the total got by
138 malloc from other calls; this doesn't go down because it is just freed by
141 static int pool_malloc;
142 static int nonpool_malloc;
144 /* This variable is set by store_get() to its yield, and by store_reset() to
145 NULL. This enables string_cat() to optimize its store handling for very long
146 strings. That's why the variable is global. */
148 void *store_last_get[NPOOLS];
150 /* These are purely for stats-gathering */
152 static int nbytes[NPOOLS]; /* current bytes allocated */
153 static int maxbytes[NPOOLS]; /* max number reached */
154 static int nblocks[NPOOLS]; /* current number of blocks allocated */
155 static int maxblocks[NPOOLS];
156 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
157 static int max_nonpool_blocks;
158 static int max_pool_malloc; /* max value for pool_malloc */
159 static int max_nonpool_malloc; /* max value for nonpool_malloc */
162 #ifndef COMPILE_UTILITY
163 static const uschar * pooluse[NPOOLS] = {
164 [POOL_MAIN] = US"main",
165 [POOL_PERM] = US"perm",
166 [POOL_SEARCH] = US"search",
167 [POOL_TAINT_MAIN] = US"main",
168 [POOL_TAINT_PERM] = US"perm",
169 [POOL_TAINT_SEARCH] = US"search",
171 static const uschar * poolclass[NPOOLS] = {
172 [POOL_MAIN] = US"untainted",
173 [POOL_PERM] = US"untainted",
174 [POOL_SEARCH] = US"untainted",
175 [POOL_TAINT_MAIN] = US"tainted",
176 [POOL_TAINT_PERM] = US"tainted",
177 [POOL_TAINT_SEARCH] = US"tainted",
182 static void * store_mmap(int, const char *, int);
183 static void * internal_store_malloc(int, const char *, int);
184 static void internal_untainted_free(void *, const char *, int linenumber);
185 static void internal_tainted_free(storeblock *, const char *, int linenumber);
187 /******************************************************************************/
189 /* Test if a pointer refers to tainted memory.
191 Slower version check, for use when platform intermixes malloc and mmap area
192 addresses. Test against the current-block of all tainted pools first, then all
193 blocks of all tainted pools.
195 Return: TRUE iff tainted
199 is_tainted_fn(const void * p)
203 for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
204 if ((b = current_block[pool]))
206 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
207 if (US p >= bc && US p <= bc + b->length) return TRUE;
210 for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
211 for (b = chainbase[pool]; b; b = b->next)
213 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
214 if (US p >= bc && US p <= bc + b->length) return TRUE;
221 die_tainted(const uschar * msg, const uschar * func, int line)
223 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
228 use_slow_taint_check(const uschar * why)
230 #ifndef COMPILE_UTILITY
232 debug_printf("switching to slow-mode taint checking (after %s) "
233 "taint bounds %p %p\n", why, tainted_base, tainted_top);
235 f.taint_check_slow = TRUE;
238 /* If the creation of a new tainted region results in any of the
239 untainted regions appearing to be tainted, using the fast-mode test,
240 we need to switch to safe-but-slow mode. */
243 verify_all_untainted(void)
245 for (int pool = 0; pool < POOL_TAINT_BASE; pool++)
246 for (storeblock * b = chainbase[pool]; b; b = b->next)
248 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
251 use_slow_taint_check(US"mmap");
259 /*************************************************
260 * Get a block from the current pool *
261 *************************************************/
263 /* Running out of store is a total disaster. This function is called via the
264 macro store_get(). It passes back a block of store within the current big
265 block, getting a new one if necessary. The address is saved in
269 size amount wanted, bytes
270 tainted class: set to true for untrusted data (eg. from smtp input)
271 func function from which called
272 linenumber line number in source file
274 Returns: pointer to store (panic on malloc failure)
278 store_get_3(int size, BOOL tainted, const char *func, int linenumber)
280 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
282 /* Round up the size to a multiple of the alignment. Although this looks a
283 messy statement, because "alignment" is a constant expression, the compiler can
284 do a reasonable job of optimizing, especially if the value of "alignment" is a
285 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
286 instructions on a Sparc (alignment = 8). */
288 if (size % alignment != 0) size += alignment - (size % alignment);
290 /* If there isn't room in the current block, get a new one. The minimum
291 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
292 these functions are mostly called for small amounts of store. */
294 if (size > yield_length[pool])
296 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
297 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
298 storeblock * newblock;
300 /* Sometimes store_reset() may leave a block for us; check if we can use it */
302 if ( (newblock = current_block[pool])
303 && (newblock = newblock->next)
304 && newblock->length < length
307 /* Give up on this block, because it's too small */
309 if (pool < POOL_TAINT_BASE)
310 internal_untainted_free(newblock, func, linenumber);
312 internal_tainted_free(newblock, func, linenumber);
316 /* If there was no free block, get a new one */
320 if ((nbytes[pool] += mlength) > maxbytes[pool])
321 maxbytes[pool] = nbytes[pool];
322 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
323 max_pool_malloc = pool_malloc;
324 nonpool_malloc -= mlength; /* Exclude from overall total */
325 if (++nblocks[pool] > maxblocks[pool])
326 maxblocks[pool] = nblocks[pool];
329 ? store_mmap(mlength, func, linenumber)
330 : internal_store_malloc(mlength, func, linenumber);
331 newblock->next = NULL;
332 newblock->length = length;
334 if (!chainbase[pool])
335 chainbase[pool] = newblock;
337 current_block[pool]->next = newblock;
340 current_block[pool] = newblock;
341 yield_length[pool] = newblock->length;
343 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
344 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
347 /* There's (now) enough room in the current block; the yield is the next
350 store_last_get[pool] = next_yield[pool];
352 /* Cut out the debugging stuff for utilities, but stop picky compilers from
355 #ifdef COMPILE_UTILITY
357 linenumber = linenumber;
360 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
361 store_last_get[pool], size, func, linenumber);
362 #endif /* COMPILE_UTILITY */
364 (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
365 /* Update next pointer and number of bytes left in the current block. */
367 next_yield[pool] = (void *)(CS next_yield[pool] + size);
368 yield_length[pool] -= size;
369 return store_last_get[pool];
374 /*************************************************
375 * Get a block from the PERM pool *
376 *************************************************/
378 /* This is just a convenience function, useful when just a single block is to
383 func function from which called
384 linenumber line number in source file
386 Returns: pointer to store (panic on malloc failure)
390 store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
393 int old_pool = store_pool;
394 store_pool = POOL_PERM;
395 yield = store_get_3(size, tainted, func, linenumber);
396 store_pool = old_pool;
402 /*************************************************
403 * Extend a block if it is at the top *
404 *************************************************/
406 /* While reading strings of unknown length, it is often the case that the
407 string is being read into the block at the top of the stack. If it needs to be
408 extended, it is more efficient just to extend within the top block rather than
409 allocate a new block and then have to copy the data. This function is provided
410 for the use of string_cat(), but of course can be used elsewhere too.
411 The block itself is not expanded; only the top allocation from it.
414 ptr pointer to store block
415 oldsize current size of the block, as requested by user
416 newsize new size required
417 func function from which called
418 linenumber line number in source file
420 Returns: TRUE if the block is at the top of the stack and has been
421 extended; FALSE if it isn't at the top of the stack, or cannot
426 store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
427 const char *func, int linenumber)
429 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
430 int inc = newsize - oldsize;
431 int rounded_oldsize = oldsize;
433 /* Check that the block being extended was already of the required taint status;
434 refuse to extend if not. */
436 if (is_tainted(ptr) != tainted)
439 if (rounded_oldsize % alignment != 0)
440 rounded_oldsize += alignment - (rounded_oldsize % alignment);
442 if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
443 inc > yield_length[pool] + rounded_oldsize - oldsize)
446 /* Cut out the debugging stuff for utilities, but stop picky compilers from
449 #ifdef COMPILE_UTILITY
451 linenumber = linenumber;
454 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
456 #endif /* COMPILE_UTILITY */
458 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
459 next_yield[pool] = CS ptr + newsize;
460 yield_length[pool] -= newsize - rounded_oldsize;
461 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
468 /*************************************************
469 * Back up to a previous point on the stack *
470 *************************************************/
472 /* This function resets the next pointer, freeing any subsequent whole blocks
473 that are now unused. Call with a cookie obtained from store_mark() only; do
474 not call with a pointer returned by store_get(). Both the untainted and tainted
475 pools corresposding to store_pool are reset.
478 r place to back up to
479 func function from which called
480 linenumber line number in source file
486 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
489 storeblock * b = current_block[pool];
490 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
491 int newlength, count;
492 #ifndef COMPILE_UTILITY
493 int oldmalloc = pool_malloc;
496 /* Last store operation was not a get */
498 store_last_get[pool] = NULL;
500 /* See if the place is in the current block - as it often will be. Otherwise,
501 search for the block in which it lies. */
503 if (CS ptr < bc || CS ptr > bc + b->length)
505 for (b = chainbase[pool]; b; b = b->next)
507 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
508 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
511 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
512 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
515 /* Back up, rounding to the alignment if necessary. When testing, flatten
516 the released memory. */
518 newlength = bc + b->length - CS ptr;
519 #ifndef COMPILE_UTILITY
522 assert_no_variables(ptr, newlength, func, linenumber);
523 if (f.running_in_test_harness)
525 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
526 memset(ptr, 0xF0, newlength);
530 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
531 next_yield[pool] = CS ptr + (newlength % alignment);
532 count = yield_length[pool];
533 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
534 current_block[pool] = b;
536 /* Free any subsequent block. Do NOT free the first
537 successor, if our current block has less than 256 bytes left. This should
538 prevent us from flapping memory. However, keep this block only when it has
541 if ( yield_length[pool] < STOREPOOL_MIN_SIZE
543 && b->next->length == STORE_BLOCK_SIZE)
546 #ifndef COMPILE_UTILITY
548 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
551 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
552 b->length - ALIGNED_SIZEOF_STOREBLOCK);
560 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
561 #ifndef COMPILE_UTILITY
563 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
570 if (pool < POOL_TAINT_BASE)
571 internal_untainted_free(b, func, linenumber);
573 internal_tainted_free(b, func, linenumber);
576 /* Cut out the debugging stuff for utilities, but stop picky compilers from
579 #ifdef COMPILE_UTILITY
581 linenumber = linenumber;
584 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
585 count + oldmalloc - pool_malloc,
586 func, linenumber, pool_malloc);
587 #endif /* COMPILE_UTILITY */
592 store_reset_3(rmark r, int pool, const char *func, int linenumber)
596 if (pool >= POOL_TAINT_BASE)
597 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
598 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
600 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
601 "store_reset called with bad mark: %s %d\n", func, linenumber);
603 internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
604 internal_store_reset(ptr, pool, func, linenumber);
610 /* Free tail-end unused allocation. This lets us allocate a big chunk
611 early, for cases when we only discover later how much was really needed.
613 Can be called with a value from store_get(), or an offset after such. Only
614 the tainted or untainted pool that serviced the store_get() will be affected.
616 This is mostly a cut-down version of internal_store_reset().
617 XXX needs rationalising
621 store_release_above_3(void *ptr, const char *func, int linenumber)
623 /* Search all pools' "current" blocks. If it isn't one of those,
624 ignore it (it usually will be). */
626 for (int pool = 0; pool < nelem(current_block); pool++)
628 storeblock * b = current_block[pool];
630 int count, newlength;
635 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
636 if (CS ptr < bc || CS ptr > bc + b->length)
639 /* Last store operation was not a get */
641 store_last_get[pool] = NULL;
643 /* Back up, rounding to the alignment if necessary. When testing, flatten
644 the released memory. */
646 newlength = bc + b->length - CS ptr;
647 #ifndef COMPILE_UTILITY
650 assert_no_variables(ptr, newlength, func, linenumber);
651 if (f.running_in_test_harness)
653 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
654 memset(ptr, 0xF0, newlength);
658 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
659 next_yield[pool] = CS ptr + (newlength % alignment);
660 count = yield_length[pool];
661 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
663 /* Cut out the debugging stuff for utilities, but stop picky compilers from
666 #ifdef COMPILE_UTILITY
668 linenumber = linenumber;
671 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
672 func, linenumber, pool_malloc);
676 #ifndef COMPILE_UTILITY
678 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
685 store_mark_3(const char *func, int linenumber)
689 if (store_pool >= POOL_TAINT_BASE)
690 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
691 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
693 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
694 a cookie (actually the address in the untainted pool) to the caller.
695 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
696 and winds back the untainted pool with the cookie. */
698 p = store_get_3(sizeof(void *), FALSE, func, linenumber);
699 *p = store_get_3(0, TRUE, func, linenumber);
706 /************************************************
708 ************************************************/
710 /* This function checks that the pointer it is given is the first thing in a
711 block, and if so, releases that block.
714 block block of store to consider
715 func function from which called
716 linenumber line number in source file
722 store_release_3(void * block, int pool, const char * func, int linenumber)
724 /* It will never be the first block, so no need to check that. */
726 for (storeblock * b = chainbase[pool]; b; b = b->next)
728 storeblock * bb = b->next;
729 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
731 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
737 /* Cut out the debugging stuff for utilities, but stop picky compilers
738 from giving warnings. */
740 #ifdef COMPILE_UTILITY
742 linenumber = linenumber;
745 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
746 linenumber, pool_malloc);
748 if (f.running_in_test_harness)
749 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
750 #endif /* COMPILE_UTILITY */
759 /************************************************
761 ************************************************/
763 /* Allocate a new block big enough to expend to the given size and
764 copy the current data into it. Free the old one if possible.
766 This function is specifically provided for use when reading very
767 long strings, e.g. header lines. When the string gets longer than a
768 complete block, it gets copied to a new block. It is helpful to free
769 the old block iff the previous copy of the string is at its start,
770 and therefore the only thing in it. Otherwise, for very long strings,
771 dead store can pile up somewhat disastrously. This function checks that
772 the pointer it is given is the first thing in a block, and that nothing
773 has been allocated since. If so, releases that block.
780 Returns: new location of data
784 store_newblock_3(void * block, BOOL tainted, int newsize, int len,
785 const char * func, int linenumber)
787 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
788 BOOL release_ok = !tainted && store_last_get[pool] == block;
791 #if !defined(MACRO_PREDEF) && !defined(COMPILE_UTILITY)
792 if (is_tainted(block) != tainted)
793 die_tainted(US"store_newblock", CUS func, linenumber);
796 newtext = store_get(newsize, tainted);
797 memcpy(newtext, block, len);
798 if (release_ok) store_release_3(block, pool, func, linenumber);
799 return (void *)newtext;
805 /******************************************************************************/
807 store_alloc_tail(void * yield, int size, const char * func, int line,
810 if ((nonpool_malloc += size) > max_nonpool_malloc)
811 max_nonpool_malloc = nonpool_malloc;
813 /* Cut out the debugging stuff for utilities, but stop picky compilers from
816 #ifdef COMPILE_UTILITY
817 func = func; line = line; type = type;
820 /* If running in test harness, spend time making sure all the new store
821 is not filled with zeros so as to catch problems. */
823 if (f.running_in_test_harness)
824 memset(yield, 0xF0, (size_t)size);
825 DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
826 type, yield, size, func, line, pool_malloc, nonpool_malloc);
827 #endif /* COMPILE_UTILITY */
832 /*************************************************
834 *************************************************/
837 store_mmap(int size, const char * func, int line)
841 if (size < 16) size = 16;
843 if (!(yield = mmap(NULL, (size_t)size,
844 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
845 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
846 "called from line %d of %s", size, line, func);
848 if (yield < tainted_base) tainted_base = yield;
849 if ((top = US yield + size) > tainted_top) tainted_top = top;
850 if (!f.taint_check_slow) verify_all_untainted();
852 return store_alloc_tail(yield, size, func, line, US"Mmap");
855 /*************************************************
857 *************************************************/
859 /* Running out of store is a total disaster for exim. Some malloc functions
860 do not run happily on very small sizes, nor do they document this fact. This
861 function is called via the macro store_malloc().
864 size amount of store wanted
865 func function from which called
866 linenumber line number in source file
868 Returns: pointer to gotten store (panic on failure)
872 internal_store_malloc(int size, const char *func, int linenumber)
876 if (size < 16) size = 16;
878 if (!(yield = malloc((size_t)size)))
879 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
880 "called from line %d in %s", size, linenumber, func);
882 /* If malloc ever returns apparently tainted memory, which glibc
883 malloc will as it uses mmap for larger requests, we must switch to
884 the slower checking for tainting (checking an address against all
885 the tainted pool block spans, rather than just the mmap span) */
887 if (!f.taint_check_slow && is_tainted(yield))
888 use_slow_taint_check(US"malloc");
890 return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
894 store_malloc_3(int size, const char *func, int linenumber)
896 if (n_nonpool_blocks++ > max_nonpool_blocks)
897 max_nonpool_blocks = n_nonpool_blocks;
898 return internal_store_malloc(size, func, linenumber);
902 /************************************************
904 ************************************************/
906 /* This function is called by the macro store_free().
909 block block of store to free
910 func function from which called
911 linenumber line number in source file
917 internal_untainted_free(void * block, const char * func, int linenumber)
919 #ifdef COMPILE_UTILITY
921 linenumber = linenumber;
924 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
925 #endif /* COMPILE_UTILITY */
930 store_free_3(void * block, const char * func, int linenumber)
933 internal_untainted_free(block, func, linenumber);
936 /******************************************************************************/
938 internal_tainted_free(storeblock * block, const char * func, int linenumber)
940 #ifdef COMPILE_UTILITY
942 linenumber = linenumber;
945 debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber);
947 munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK);
950 /******************************************************************************/
951 /* Stats output on process exit */
955 #ifndef COMPILE_UTILITY
958 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
959 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
960 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
961 for (int i = 0; i < NPOOLS; i++)
962 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
963 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);