1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
5 /* Copyright (c) The Exim maintainers 2019 - 2024 */
6 /* Copyright (c) University of Cambridge 1995 - 2018 */
7 /* See the file NOTICE for conditions of use and distribution. */
8 /* SPDX-License-Identifier: GPL-2.0-or-later */
10 /* Exim gets and frees all its store through these functions. In the original
11 implementation there was a lot of mallocing and freeing of small bits of store.
12 The philosophy has now changed to a scheme which includes the concept of
13 "stacking pools" of store. For the short-lived processes, there isn't any real
14 need to do any garbage collection, but the stack concept allows quick resetting
15 in places where this seems sensible.
17 Obviously the long-running processes (the daemon, the queue runner, and eximon)
18 must take care not to eat store.
20 The following different types of store are recognized:
22 . Long-lived, large blocks: This is implemented by retaining the original
23 malloc/free functions, and it used for permanent working buffers and for
24 getting blocks to cut up for the other types.
26 . Long-lived, small blocks: This is used for blocks that have to survive until
27 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
28 functionally the same as store_malloc(), except that the store can't be
29 freed, but I expect it to be more efficient for handling small blocks.
31 . Short-lived, short blocks: Most of the dynamic store falls into this
32 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
33 after accepting a message when multiple messages are received by a single
34 process. Resetting happens at some other times as well, usually fairly
35 locally after some specific processing that needs working store.
37 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
38 This means it can be freed when search_tidyup() is called to close down all
41 - There is another pool (POOL_MESSAGE) used for medium-lifetime objects; within
42 a single message transaction but needed for longer than the use of the main
43 pool permits. Currently this means only receive-time DKIM information.
45 - There is a dedicated pool for configuration data read from the config file(s).
46 Once complete, it is made readonly.
48 - There are pools for each active combination of lookup-quoting, dynamically created.
50 . Orthogonal to the four main pool types, there are two classes of memory: untainted
51 and tainted. The latter is used for values derived from untrusted input, and
52 the string-expansion mechanism refuses to operate on such values (obviously,
53 it can expand an untainted value to return a tainted result). The classes
54 are implemented by duplicating the four pool types. Pool resets are requested
55 against the nontainted sibling and apply to both siblings.
57 Only memory blocks requested for tainted use are regarded as tainted; anything
58 else (including stack auto variables) is untainted. Care is needed when coding
59 to not copy untrusted data into untainted memory, as downstream taint-checks
62 Intermediate layers (eg. the string functions) can test for taint, and use this
63 for ensurinng that results have proper state. For example the
64 string_vformat_trc() routing supporting the string_sprintf() interface will
65 recopy a string being built into a tainted allocation if it meets a %s for a
66 tainted argument. Any intermediate-layer function that (can) return a new
67 allocation should behave this way; returning a tainted result if any tainted
68 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
69 existing allocations fail if tainted data is written into an untainted area.
70 Users of functions that modify existing allocations should check if a tainted
71 source and an untainted destination is used, and fail instead (sprintf() being
77 /* keep config.h before memcheck.h, for NVALGRIND */
84 /* We need to know how to align blocks of data for general use. I'm not sure
85 how to get an alignment factor in general. In the current world, a value of 8
86 is probably right, and this is sizeof(double) on some systems and sizeof(void
87 *) on others, so take the larger of those. Since everything in this expression
88 is a constant, the compiler should optimize it to a simple constant wherever it
89 appears (I checked that gcc does do this). */
92 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
94 /* store_reset() will not free the following block if the last used block has
95 less than this much left in it. */
97 #define STOREPOOL_MIN_SIZE 256
99 /* Structure describing the beginning of each big block. */
101 typedef struct storeblock {
102 struct storeblock *next;
106 /* Pool descriptor struct */
108 typedef struct pooldesc {
109 storeblock * chainbase; /* list of blocks in pool */
110 storeblock * current_block; /* top block, still with free space */
111 void * next_yield; /* next allocation point */
112 int yield_length; /* remaining space in current block */
113 unsigned store_block_order; /* log2(size) block allocation size */
115 /* This variable is set by store_get() to its yield, and by store_reset() to
116 NULL. This enables string_cat() to optimize its store handling for very long
117 strings. That's why the variable is global. */
119 void * store_last_get;
121 /* These are purely for stats-gathering */
130 /* Enhanced pool descriptor for quoted pools */
132 typedef struct quoted_pooldesc {
135 struct quoted_pooldesc * next;
138 /* Just in case we find ourselves on a system where the structure above has a
139 length that is not a multiple of the alignment, set up a macro for the padded
142 #define ALIGNED_SIZEOF_STOREBLOCK \
143 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
145 /* Size of block to get from malloc to carve up into smaller ones. This
146 must be a multiple of the alignment. We assume that 4096 is going to be
147 suitably aligned. Double the size per-pool for every malloc, to mitigate
148 certain denial-of-service attacks. Don't bother to decrease on block frees.
149 We waste average half the current alloc size per pool. This could be several
150 hundred kB now, vs. 4kB with a constant-size block size. But the search time
151 for is_tainted(), linear in the number of blocks for the pool, is O(n log n)
153 A test of 2000 RCPTs and just accept ACL had 370kB in 21 blocks before,
154 504kB in 6 blocks now, for the untainted-main (largest) pool.
155 Builds for restricted-memory system can disable the expansion by
156 defining RESTRICTED_MEMORY */
157 /*XXX should we allow any for malloc's own overhead? But how much? */
159 /* #define RESTRICTED_MEMORY */
160 #define STORE_BLOCK_SIZE(order) ((1U << (order)) - ALIGNED_SIZEOF_STOREBLOCK)
162 /* Variables holding data for the local pools of store. The current pool number
163 is held in store_pool, which is global so that it can be changed from outside.
164 Setting the initial length values to -1 forces a malloc for the first call,
165 even if the length is zero (which is used for getting a point to reset to). */
167 int store_pool = POOL_MAIN;
169 pooldesc paired_pools[N_PAIRED_POOLS];
170 quoted_pooldesc * quoted_pools = NULL;
172 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
173 static int max_nonpool_blocks;
174 static int max_pool_malloc; /* max value for pool_malloc */
175 static int max_nonpool_malloc; /* max value for nonpool_malloc */
177 /* pool_malloc holds the amount of memory used by the store pools; this goes up
178 and down as store is reset or released. nonpool_malloc is the total got by
179 malloc from other calls; this doesn't go down because it is just freed by
182 static int pool_malloc;
183 static int nonpool_malloc;
186 #ifndef COMPILE_UTILITY
187 static const uschar * pooluse[N_PAIRED_POOLS] = {
188 [POOL_MAIN] = US"main",
189 [POOL_PERM] = US"perm",
190 [POOL_CONFIG] = US"config",
191 [POOL_SEARCH] = US"search",
192 [POOL_MESSAGE] = US"message",
193 [POOL_TAINT_MAIN] = US"main",
194 [POOL_TAINT_PERM] = US"perm",
195 [POOL_TAINT_CONFIG] = US"config",
196 [POOL_TAINT_SEARCH] = US"search",
197 [POOL_TAINT_MESSAGE] = US"message",
199 static const uschar * poolclass[N_PAIRED_POOLS] = {
200 [POOL_MAIN] = US"untainted",
201 [POOL_PERM] = US"untainted",
202 [POOL_CONFIG] = US"untainted",
203 [POOL_SEARCH] = US"untainted",
204 [POOL_MESSAGE] = US"untainted",
205 [POOL_TAINT_MAIN] = US"tainted",
206 [POOL_TAINT_PERM] = US"tainted",
207 [POOL_TAINT_CONFIG] = US"tainted",
208 [POOL_TAINT_SEARCH] = US"tainted",
209 [POOL_TAINT_MESSAGE] = US"tainted",
214 static void * internal_store_malloc(size_t, const char *, int);
215 static void internal_store_free(void *, const char *, int linenumber);
217 /******************************************************************************/
220 pool_init(pooldesc * pp)
222 memset(pp, 0, sizeof(*pp));
223 pp->yield_length = -1;
224 pp->store_block_order = 12; /* log2(allocation_size) ie. 4kB */
227 /* Initialisation, for things fragile with parameter channges when using
228 static initialisers. */
233 for (pooldesc * pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
237 /******************************************************************************/
238 /* Locating elements given memory pointer */
241 is_pointer_in_block(const storeblock * b, const void * p)
243 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
244 return US p >= bc && US p < bc + b->length;
248 pool_current_for_pointer(const void * p)
252 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
253 if ((b = qp->pool.current_block) && is_pointer_in_block(b, p))
256 for (pooldesc * pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
257 if ((b = pp->current_block) && is_pointer_in_block(b, p))
263 pool_for_pointer(const void * p, const char * func, int linenumber)
268 if ((pp = pool_current_for_pointer(p))) return pp;
270 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
271 for (b = qp->pool.chainbase; b; b = b->next)
272 if (is_pointer_in_block(b, p)) return &qp->pool;
274 for (pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
275 for (b = pp->chainbase; b; b = b->next)
276 if (is_pointer_in_block(b, p)) return pp;
278 #ifndef COMPILE_UTILITY
281 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
282 "bad memory reference; pool not found, at %s %d", func, linenumber);
286 /******************************************************************************/
287 /* Test if a pointer refers to tainted memory.
289 Slower version check, for use when platform intermixes malloc and mmap area
290 addresses. Test against the current-block of all tainted pools first, then all
291 blocks of all tainted pools.
293 Return: TRUE iff tainted
297 is_tainted_fn(const void * p)
301 if (p == GET_UNTAINTED) return FALSE;
302 if (p == GET_TAINTED) return TRUE;
304 for (pooldesc * pp = paired_pools + POOL_TAINT_BASE;
305 pp < paired_pools + N_PAIRED_POOLS; pp++)
306 if ((b = pp->current_block))
307 if (is_pointer_in_block(b, p)) return TRUE;
309 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
310 if (b = qp->pool.current_block)
311 if (is_pointer_in_block(b, p)) return TRUE;
313 for (pooldesc * pp = paired_pools + POOL_TAINT_BASE;
314 pp < paired_pools + N_PAIRED_POOLS; pp++)
315 for (b = pp->chainbase; b; b = b->next)
316 if (is_pointer_in_block(b, p)) return TRUE;
318 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
319 for (b = qp->pool.chainbase; b; b = b->next)
320 if (is_pointer_in_block(b, p)) return TRUE;
327 die_tainted(const uschar * msg, const uschar * func, int line)
329 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
334 #ifndef COMPILE_UTILITY
335 /* Return the pool for the given quoter, or null */
338 pool_for_quoter(unsigned quoter)
340 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
341 if (qp->quoter == quoter)
346 /* Allocate/init a new quoted-pool and return the pool */
349 quoted_pool_new(unsigned quoter)
351 // debug_printf("allocating quoted-pool\n");
352 quoted_pooldesc * qp = store_get_perm(sizeof(quoted_pooldesc), GET_UNTAINTED);
354 pool_init(&qp->pool);
356 qp->next = quoted_pools;
363 /******************************************************************************/
365 store_writeprotect(int pool)
367 #if !defined(COMPILE_UTILITY) && !defined(MISSING_POSIX_MEMALIGN)
368 for (storeblock * b = paired_pools[pool].chainbase; b; b = b->next)
369 if (mprotect(b, ALIGNED_SIZEOF_STOREBLOCK + b->length, PROT_READ) != 0)
370 DEBUG(D_any) debug_printf("config block mprotect: (%d) %s\n", errno, strerror(errno));
374 /******************************************************************************/
377 pool_get(pooldesc * pp, int size, BOOL align_mem, const char * func, int linenumber)
379 /* Ensure we've been asked to allocate memory.
380 A negative size is a sign of a security problem.
381 A zero size might be also suspect, but our internal usage deliberately
382 does this to return a current watermark value for a later release of
385 if (size < 0 || size >= INT_MAX/2)
386 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
387 "bad memory allocation requested (%d bytes) from %s %d",
388 size, func, linenumber);
390 /* Round up the size to a multiple of the alignment. Although this looks a
391 messy statement, because "alignment" is a constant expression, the compiler can
392 do a reasonable job of optimizing, especially if the value of "alignment" is a
393 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
394 instructions on a Sparc (alignment = 8). */
396 if (size % alignment != 0) size += alignment - (size % alignment);
398 /* If there isn't room in the current block, get a new one. The minimum
399 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
400 these functions are mostly called for small amounts of store. */
402 if (size > pp->yield_length)
405 STORE_BLOCK_SIZE(pp->store_block_order) - ALIGNED_SIZEOF_STOREBLOCK,
407 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
408 storeblock * newblock;
410 /* Sometimes store_reset() may leave a block for us; check if we can use it */
412 if ( (newblock = pp->current_block)
413 && (newblock = newblock->next)
414 && newblock->length < length
417 /* Give up on this block, because it's too small */
419 internal_store_free(newblock, func, linenumber);
423 /* If there was no free block, get a new one */
427 if ((pp->nbytes += mlength) > pp->maxbytes)
428 pp->maxbytes = pp->nbytes;
429 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
430 max_pool_malloc = pool_malloc;
431 nonpool_malloc -= mlength; /* Exclude from overall total */
432 if (++pp->nblocks > pp->maxblocks)
433 pp->maxblocks = pp->nblocks;
435 #ifndef MISSING_POSIX_MEMALIGN
438 long pgsize = sysconf(_SC_PAGESIZE);
439 int err = posix_memalign((void **)&newblock,
440 pgsize, (mlength + pgsize - 1) & ~(pgsize - 1));
442 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
443 "failed to alloc (using posix_memalign) %d bytes of memory: '%s'"
444 "called from line %d in %s",
445 size, strerror(err), linenumber, func);
449 newblock = internal_store_malloc(mlength, func, linenumber);
450 newblock->next = NULL;
451 newblock->length = length;
452 #ifndef RESTRICTED_MEMORY
453 if (pp->store_block_order++ > pp->maxorder)
454 pp->maxorder = pp->store_block_order;
458 pp->chainbase = newblock;
460 pp->current_block->next = newblock;
463 pp->current_block = newblock;
464 pp->yield_length = newblock->length;
466 (void *)(CS pp->current_block + ALIGNED_SIZEOF_STOREBLOCK);
467 (void) VALGRIND_MAKE_MEM_NOACCESS(pp->next_yield, pp->yield_length);
470 /* There's (now) enough room in the current block; the yield is the next
473 pp->store_last_get = pp->next_yield;
475 (void) VALGRIND_MAKE_MEM_UNDEFINED(pp->store_last_get, size);
476 /* Update next pointer and number of bytes left in the current block. */
478 pp->next_yield = (void *)(CS pp->next_yield + size);
479 pp->yield_length -= size;
480 return pp->store_last_get;
483 /*************************************************
484 * Get a block from the current pool *
485 *************************************************/
487 /* Running out of store is a total disaster. This function is called via the
488 macro store_get(). The current store_pool is used, adjusting for taint.
489 If the protoype is quoted, use a quoted-pool.
490 Return a block of store within the current big block of the pool, getting a new
491 one if necessary. The address is saved in store_last_get for the pool.
494 size amount wanted, bytes
495 proto_mem class: get store conformant to this
496 Special values: 0 forces untainted, 1 forces tainted
497 func function from which called
498 linenumber line number in source file
500 Returns: pointer to store (panic on malloc failure)
504 store_get_3(int size, const void * proto_mem, const char * func, int linenumber)
506 #ifndef COMPILE_UTILITY
507 int quoter = quoter_for_address(proto_mem);
512 #ifndef COMPILE_UTILITY
513 if (!is_real_quoter(quoter))
516 BOOL tainted = is_tainted(proto_mem);
517 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
518 pp = paired_pools + pool;
519 yield = pool_get(pp, size, (pool == POOL_CONFIG), func, linenumber);
521 /* Cut out the debugging stuff for utilities, but stop picky compilers from
524 #ifndef COMPILE_UTILITY
526 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
527 pp->store_last_get, size, func, linenumber);
530 #ifndef COMPILE_UTILITY
534 debug_printf("allocating quoted-block for quoter %u (from %s %d)\n",
535 quoter, func, linenumber);
536 if (!(pp = pool_for_quoter(quoter))) pp = quoted_pool_new(quoter);
537 yield = pool_get(pp, size, FALSE, func, linenumber);
539 debug_printf("---QQ Get %6p %5d %-14s %4d\n",
540 pp->store_last_get, size, func, linenumber);
548 /*************************************************
549 * Get a block from the PERM pool *
550 *************************************************/
552 /* This is just a convenience function, useful when just a single block is to
557 proto_mem class: get store conformant to this
558 func function from which called
559 linenumber line number in source file
561 Returns: pointer to store (panic on malloc failure)
565 store_get_perm_3(int size, const void * proto_mem, const char * func, int linenumber)
568 int old_pool = store_pool;
569 store_pool = POOL_PERM;
570 yield = store_get_3(size, proto_mem, func, linenumber);
571 store_pool = old_pool;
576 #ifndef COMPILE_UTILITY
577 /*************************************************
578 * Get a block annotated as being lookup-quoted *
579 *************************************************/
581 /* Allocate from pool a pool consistent with the proto_mem augmented by the
582 requested quoter type.
584 XXX currently not handling mark/release
586 Args: size number of bytes to allocate
587 quoter id for the quoting type
588 func caller, for debug
589 linenumber caller, for debug
591 Return: allocated memory block
595 store_force_get_quoted(int size, unsigned quoter,
596 const char * func, int linenumber)
598 pooldesc * pp = pool_for_quoter(quoter);
602 debug_printf("allocating quoted-block for quoter %u (from %s %d)\n", quoter, func, linenumber);
604 if (!pp) pp = quoted_pool_new(quoter);
605 yield = pool_get(pp, size, FALSE, func, linenumber);
608 debug_printf("---QQ Get %6p %5d %-14s %4d\n",
609 pp->store_last_get, size, func, linenumber);
614 /* Maybe get memory for the specified quoter, but only if the
615 prototype memory is tainted. Otherwise, get plain memory.
618 store_get_quoted_3(int size, const void * proto_mem, unsigned quoter,
619 const char * func, int linenumber)
621 // debug_printf("store_get_quoted_3: quoter %u\n", quoter);
622 return is_tainted(proto_mem)
623 ? store_force_get_quoted(size, quoter, func, linenumber)
624 : store_get_3(size, proto_mem, func, linenumber);
627 /* Return quoter for given address, or -1 if not in a quoted-pool. */
629 quoter_for_address(const void * p)
631 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
633 pooldesc * pp = &qp->pool;
636 if (b = pp->current_block)
637 if (is_pointer_in_block(b, p))
640 for (b = pp->chainbase; b; b = b->next)
641 if (is_pointer_in_block(b, p))
647 /* Return TRUE iff the given address is quoted for the given type.
648 There is extra complexity to handle lookup providers with multiple
649 find variants but shared quote functions. */
651 is_quoted_like(const void * p, unsigned quoter)
653 int pq = quoter_for_address(p);
654 const lookup_info * p_li = lookup_with_acq_num(pq);
655 void * p_qfn = p_li ? p_li->quote : NULL;
656 const lookup_info * q_li = lookup_with_acq_num(quoter);
657 void * q_qfn = q_li ? q_li->quote : NULL;
658 BOOL y = is_real_quoter(pq) && p_qfn == q_qfn;
659 /* debug_printf("is_quoted(%p, %u): %c\n", p, quoter, y?'T':'F'); */
663 /* Return TRUE if the quoter value indicates an actual quoter */
665 is_real_quoter(int quoter)
670 /* Return TRUE if the "new" data requires that the "old" data
671 be recopied to new-class memory. We order the classes as
673 2: tainted, not quoted
674 1: quoted (which is also tainted)
677 If the "new" is higher-order than the "old", they are not compatible
678 and a copy is needed. If both are quoted, but the quoters differ,
679 not compatible. Otherwise they are compatible.
682 is_incompatible_fn(const void * old, const void * new)
687 ni = is_real_quoter(nq = quoter_for_address(new)) ? 1 : is_tainted(new) ? 2 : 0;
688 oi = is_real_quoter(oq = quoter_for_address(old)) ? 1 : is_tainted(old) ? 2 : 0;
689 return ni > oi || ni == oi && nq != oq;
692 #endif /*!COMPILE_UTILITY*/
694 /*************************************************
695 * Extend a block if it is at the top *
696 *************************************************/
698 /* While reading strings of unknown length, it is often the case that the
699 string is being read into the block at the top of the stack. If it needs to be
700 extended, it is more efficient just to extend within the top block rather than
701 allocate a new block and then have to copy the data. This function is provided
702 for the use of string_cat(), but of course can be used elsewhere too.
703 The block itself is not expanded; only the top allocation from it.
706 ptr pointer to store block
707 oldsize current size of the block, as requested by user
708 newsize new size required
709 func function from which called
710 linenumber line number in source file
712 Returns: TRUE if the block is at the top of the stack and has been
713 extended; FALSE if it isn't at the top of the stack, or cannot
716 XXX needs extension for quoted-tracking. This assumes that the global store_pool
717 is the one to alloc from, which breaks with separated pools.
721 store_extend_3(void * ptr, int oldsize, int newsize,
722 const char * func, int linenumber)
724 pooldesc * pp = pool_for_pointer(ptr, func, linenumber);
725 int inc = newsize - oldsize;
726 int rounded_oldsize = oldsize;
728 if (oldsize < 0 || newsize < oldsize || newsize >= INT_MAX/2)
729 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
730 "bad memory extension requested (%d -> %d bytes) at %s %d",
731 oldsize, newsize, func, linenumber);
733 if (rounded_oldsize % alignment != 0)
734 rounded_oldsize += alignment - (rounded_oldsize % alignment);
736 if (CS ptr + rounded_oldsize != CS (pp->next_yield) ||
737 inc > pp->yield_length + rounded_oldsize - oldsize)
740 /* Cut out the debugging stuff for utilities, but stop picky compilers from
743 #ifndef COMPILE_UTILITY
746 quoted_pooldesc * qp;
747 for (qp = quoted_pools; qp; qp = qp->next)
750 debug_printf("---Q%d Ext %6p %5d %-14s %4d\n",
751 (int)(qp - quoted_pools),
752 ptr, newsize, func, linenumber);
756 debug_printf("---%d Ext %6p %5d %-14s %4d\n",
757 (int)(pp - paired_pools),
758 ptr, newsize, func, linenumber);
760 #endif /* COMPILE_UTILITY */
762 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
763 pp->next_yield = CS ptr + newsize;
764 pp->yield_length -= newsize - rounded_oldsize;
765 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
773 is_pwr2_size(int len)
776 return (x & (x - 1)) == 0;
780 /*************************************************
781 * Back up to a previous point on the stack *
782 *************************************************/
784 /* This function resets the next pointer, freeing any subsequent whole blocks
785 that are now unused. Call with a cookie obtained from store_mark() only; do
786 not call with a pointer returned by store_get(). Both the untainted and tainted
787 pools corresposding to store_pool are reset.
789 Quoted pools are not handled.
792 ptr place to back up to
793 pool pool holding the pointer
794 func function from which called
795 linenumber line number in source file
801 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
804 pooldesc * pp = paired_pools + pool;
805 storeblock * b = pp->current_block;
806 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
807 int newlength, count;
808 #ifndef COMPILE_UTILITY
809 int oldmalloc = pool_malloc;
812 if (!b) return; /* exim_dumpdb gets this, becuse it has never used tainted mem */
814 /* Last store operation was not a get */
816 pp->store_last_get = NULL;
818 /* See if the place is in the current block - as it often will be. Otherwise,
819 search for the block in which it lies. */
821 if (CS ptr < bc || CS ptr > bc + b->length)
823 for (b = pp->chainbase; b; b = b->next)
825 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
826 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
829 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
830 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
833 /* Back up, rounding to the alignment if necessary. When testing, flatten
834 the released memory. */
836 newlength = bc + b->length - CS ptr;
837 #ifndef COMPILE_UTILITY
840 assert_no_variables(ptr, newlength, func, linenumber);
841 if (f.running_in_test_harness)
843 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
844 memset(ptr, 0xF0, newlength);
848 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
849 pp->next_yield = CS ptr + (newlength % alignment);
850 count = pp->yield_length;
851 count = (pp->yield_length = newlength - (newlength % alignment)) - count;
852 pp->current_block = b;
854 /* Free any subsequent block. Do NOT free the first
855 successor, if our current block has less than 256 bytes left. This should
856 prevent us from flapping memory. However, keep this block only when it has
857 a power-of-two size so probably is not a custom inflated one. */
859 if ( pp->yield_length < STOREPOOL_MIN_SIZE
861 && is_pwr2_size(b->next->length + ALIGNED_SIZEOF_STOREBLOCK))
864 #ifndef COMPILE_UTILITY
866 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
869 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
870 b->length - ALIGNED_SIZEOF_STOREBLOCK);
874 if (pool != POOL_CONFIG)
879 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
881 #ifndef COMPILE_UTILITY
883 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
890 if (pool != POOL_CONFIG)
891 internal_store_free(b, func, linenumber);
893 #ifndef RESTRICTED_MEMORY
894 if (pp->store_block_order > 13) pp->store_block_order--;
898 /* Cut out the debugging stuff for utilities, but stop picky compilers from
901 #ifndef COMPILE_UTILITY
903 debug_printf("---%d Rst %6p %5d %-14s %4d\tpool %d\n", pool, ptr,
904 count + oldmalloc - pool_malloc,
905 func, linenumber, pool_malloc);
906 #endif /* COMPILE_UTILITY */
910 /* Back up the pool pair, untainted and tainted, of the store_pool setting.
911 Quoted pools are not handled.
915 store_reset_3(rmark r, const char * func, int linenumber)
919 if (store_pool >= POOL_TAINT_BASE)
920 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
921 "store_reset called for pool %d: %s %d\n", store_pool, func, linenumber);
923 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
924 "store_reset called with bad mark: %s %d\n", func, linenumber);
926 internal_store_reset(*ptr, store_pool + POOL_TAINT_BASE, func, linenumber);
927 internal_store_reset(ptr, store_pool, func, linenumber);
934 /* Free tail-end unused allocation. This lets us allocate a big chunk
935 early, for cases when we only discover later how much was really needed.
937 Can be called with a value from store_get(), or an offset after such. Only
938 the tainted or untainted pool that serviced the store_get() will be affected.
940 This is mostly a cut-down version of internal_store_reset().
941 XXX needs rationalising
945 store_release_above_3(void * ptr, const char * func, int linenumber)
949 /* Search all pools' "current" blocks. If it isn't one of those,
950 ignore it (it usually will be). */
952 if ((pp = pool_current_for_pointer(ptr)))
954 storeblock * b = pp->current_block;
955 int count, newlength;
957 /* Last store operation was not a get */
959 pp->store_last_get = NULL;
961 /* Back up, rounding to the alignment if necessary. When testing, flatten
962 the released memory. */
964 newlength = (CS b + ALIGNED_SIZEOF_STOREBLOCK) + b->length - CS ptr;
965 #ifndef COMPILE_UTILITY
968 assert_no_variables(ptr, newlength, func, linenumber);
969 if (f.running_in_test_harness)
971 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
972 memset(ptr, 0xF0, newlength);
976 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
977 pp->next_yield = CS ptr + (newlength % alignment);
978 count = pp->yield_length;
979 count = (pp->yield_length = newlength - (newlength % alignment)) - count;
981 /* Cut out the debugging stuff for utilities, but stop picky compilers from
984 #ifndef COMPILE_UTILITY
987 quoted_pooldesc * qp;
988 for (qp = quoted_pools; qp; qp = qp->next)
990 debug_printf("---Q%d Rel %6p %5d %-14s %4d\tpool %d\n",
991 (int)(qp - quoted_pools),
992 ptr, count, func, linenumber, pool_malloc);
994 debug_printf("---%d Rel %6p %5d %-14s %4d\tpool %d\n",
995 (int)(pp - paired_pools), ptr, count,
996 func, linenumber, pool_malloc);
1001 #ifndef COMPILE_UTILITY
1003 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
1010 store_mark_3(const char * func, int linenumber)
1014 #ifndef COMPILE_UTILITY
1016 debug_printf("---%d Mrk %-14s %4d\tpool %d\n",
1017 store_pool, func, linenumber, pool_malloc);
1018 #endif /* COMPILE_UTILITY */
1020 if (store_pool >= POOL_TAINT_BASE)
1021 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1022 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
1024 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
1025 a cookie (actually the address in the untainted pool) to the caller.
1026 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
1027 and winds back the untainted pool with the cookie. */
1029 p = store_get_3(sizeof(void *), GET_UNTAINTED, func, linenumber);
1030 *p = store_get_3(0, GET_TAINTED, func, linenumber);
1037 /************************************************
1039 ************************************************/
1041 /* This function checks that the pointer it is given is the first thing in a
1042 block, and if so, releases that block.
1045 block block of store to consider
1046 pp pool containing the block
1047 func function from which called
1048 linenumber line number in source file
1054 store_release_3(void * block, pooldesc * pp, const char * func, int linenumber)
1056 /* It will never be the first block, so no need to check that. */
1058 for (storeblock * b = pp->chainbase; b; b = b->next)
1060 storeblock * bb = b->next;
1061 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
1063 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
1069 /* Cut out the debugging stuff for utilities, but stop picky compilers
1070 from giving warnings. */
1072 #ifndef COMPILE_UTILITY
1074 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
1075 linenumber, pool_malloc);
1077 if (f.running_in_test_harness)
1078 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
1079 #endif /* COMPILE_UTILITY */
1081 internal_store_free(bb, func, linenumber);
1088 /************************************************
1090 ************************************************/
1092 /* Allocate a new block big enough to expend to the given size and
1093 copy the current data into it. Free the old one if possible.
1095 This function is specifically provided for use when reading very
1096 long strings, e.g. header lines. When the string gets longer than a
1097 complete block, it gets copied to a new block. It is helpful to free
1098 the old block iff the previous copy of the string is at its start,
1099 and therefore the only thing in it. Otherwise, for very long strings,
1100 dead store can pile up somewhat disastrously. This function checks that
1101 the pointer it is given is the first thing in a block, and that nothing
1102 has been allocated since. If so, releases that block.
1106 newsize requested size
1109 Returns: new location of data
1113 store_newblock_3(void * oldblock, int newsize, int len,
1114 const char * func, int linenumber)
1116 pooldesc * pp = pool_for_pointer(oldblock, func, linenumber);
1117 BOOL release_ok = !is_tainted(oldblock) && pp->store_last_get == oldblock; /*XXX why tainted not handled? */
1120 if (len < 0 || len > newsize)
1121 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1122 "bad memory extension requested (%d -> %d bytes) at %s %d",
1123 len, newsize, func, linenumber);
1125 newblock = store_get(newsize, oldblock);
1126 memcpy(newblock, oldblock, len);
1127 if (release_ok) store_release_3(oldblock, pp, func, linenumber);
1128 return (void *)newblock;
1134 /*************************************************
1136 *************************************************/
1138 /* Running out of store is a total disaster for exim. Some malloc functions
1139 do not run happily on very small sizes, nor do they document this fact. This
1140 function is called via the macro store_malloc().
1143 size amount of store wanted
1144 func function from which called
1145 line line number in source file
1147 Returns: pointer to gotten store (panic on failure)
1151 internal_store_malloc(size_t size, const char * func, int line)
1155 /* Check specifically for a possible result of conversion from
1156 a negative int, to the (unsigned, wider) size_t */
1158 if (size >= INT_MAX/2)
1159 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1160 "bad internal_store_malloc request (" SIZE_T_FMT " bytes) from %s %d",
1163 size += sizeof(size_t); /* space to store the size, used under debug */
1164 if (size < 16) size = 16;
1166 if (!(yield = malloc(size)))
1167 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc " SIZE_T_FMT " bytes of memory: "
1168 "called from line %d in %s", size, line, func);
1170 #ifndef COMPILE_UTILITY
1171 DEBUG(D_any) *(size_t *)yield = size;
1173 yield = US yield + sizeof(size_t);
1175 if ((nonpool_malloc += size) > max_nonpool_malloc)
1176 max_nonpool_malloc = nonpool_malloc;
1178 /* Cut out the debugging stuff for utilities, but stop picky compilers from
1181 #ifndef COMPILE_UTILITY
1182 /* If running in test harness, spend time making sure all the new store
1183 is not filled with zeros so as to catch problems. */
1185 if (f.running_in_test_harness)
1186 memset(yield, 0xF0, size - sizeof(size_t));
1187 DEBUG(D_memory) debug_printf("--Malloc %6p %5lu bytes\t%-20s %4d\tpool %5d nonpool %5d\n",
1188 yield, size, func, line, pool_malloc, nonpool_malloc);
1189 #endif /* COMPILE_UTILITY */
1195 store_malloc_3(size_t size, const char *func, int linenumber)
1197 if (n_nonpool_blocks++ > max_nonpool_blocks)
1198 max_nonpool_blocks = n_nonpool_blocks;
1199 return internal_store_malloc(size, func, linenumber);
1203 /************************************************
1205 ************************************************/
1207 /* This function is called by the macro store_free().
1210 block block of store to free
1211 func function from which called
1212 linenumber line number in source file
1218 internal_store_free(void * block, const char * func, int linenumber)
1220 uschar * p = US block - sizeof(size_t);
1221 #ifndef COMPILE_UTILITY
1222 DEBUG(D_any) nonpool_malloc -= *(size_t *)p;
1223 DEBUG(D_memory) debug_printf("----Free %6p %5ld bytes\t%-20s %4d\n",
1224 block, *(size_t *)p, func, linenumber);
1230 store_free_3(void * block, const char * func, int linenumber)
1233 internal_store_free(block, func, linenumber);
1236 /******************************************************************************/
1237 /* Stats output on process exit */
1241 #ifndef COMPILE_UTILITY
1245 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
1246 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
1247 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
1249 for (i = 0; i < N_PAIRED_POOLS; i++)
1251 pooldesc * pp = paired_pools + i;
1252 debug_printf("----Exit pool %2d max: %3d kB in %d blocks at order %u\t%s %s\n",
1253 i, (pp->maxbytes+1023)/1024, pp->maxblocks, pp->maxorder,
1254 poolclass[i], pooluse[i]);
1257 for (quoted_pooldesc * qp = quoted_pools; qp; i++, qp = qp->next)
1259 pooldesc * pp = &qp->pool;
1260 const lookup_info* li = lookup_with_acq_num(qp->quoter);
1261 debug_printf("----Exit pool Q%d max: %3d kB in %d blocks at order %u\ttainted quoted:%s\n",
1262 i, (pp->maxbytes+1023)/1024, pp->maxblocks, pp->maxorder,
1263 li ? li->name : US"???");
1270 /******************************************************************************/
1271 /* Per-message pool management */
1273 static rmark message_reset_point = NULL;
1278 int oldpool = store_pool;
1279 store_pool = POOL_MESSAGE;
1280 if (!message_reset_point) message_reset_point = store_mark();
1281 store_pool = oldpool;
1285 message_tidyup(void)
1288 if (!message_reset_point) return;
1289 oldpool = store_pool;
1290 store_pool = POOL_MESSAGE;
1291 message_reset_point = store_reset(message_reset_point);
1292 store_pool = oldpool;
1295 /******************************************************************************/
1296 /* Debug analysis of address */
1298 #ifndef COMPILE_UTILITY
1300 debug_print_taint(const void * p)
1302 int q = quoter_for_address(p);
1303 if (!is_tainted(p)) return;
1304 debug_printf("(tainted");
1305 if (is_real_quoter(q))
1307 const lookup_info * li = lookup_with_acq_num(q);
1308 debug_printf(", quoted:%s", li ? li->name : US"???");
1310 debug_printf(")\n");
1314 /* End of store.c */