1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
5 /* Copyright (c) The Exim maintainers 2019 - 2024 */
6 /* Copyright (c) University of Cambridge 1995 - 2018 */
7 /* See the file NOTICE for conditions of use and distribution. */
8 /* SPDX-License-Identifier: GPL-2.0-or-later */
10 /* Exim gets and frees all its store through these functions. In the original
11 implementation there was a lot of mallocing and freeing of small bits of store.
12 The philosophy has now changed to a scheme which includes the concept of
13 "stacking pools" of store. For the short-lived processes, there isn't any real
14 need to do any garbage collection, but the stack concept allows quick resetting
15 in places where this seems sensible.
17 Obviously the long-running processes (the daemon, the queue runner, and eximon)
18 must take care not to eat store.
20 The following different types of store are recognized:
22 . Long-lived, large blocks: This is implemented by retaining the original
23 malloc/free functions, and it used for permanent working buffers and for
24 getting blocks to cut up for the other types.
26 . Long-lived, small blocks: This is used for blocks that have to survive until
27 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
28 functionally the same as store_malloc(), except that the store can't be
29 freed, but I expect it to be more efficient for handling small blocks.
31 . Short-lived, short blocks: Most of the dynamic store falls into this
32 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
33 after accepting a message when multiple messages are received by a single
34 process. Resetting happens at some other times as well, usually fairly
35 locally after some specific processing that needs working store.
37 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
38 This means it can be freed when search_tidyup() is called to close down all
41 - There is another pool (POOL_MESSAGE) used for medium-lifetime objects; within
42 a single message transaction but needed for longer than the use of the main
43 pool permits. Currently this means only receive-time DKIM information.
45 - There is a dedicated pool for configuration data read from the config file(s).
46 Once complete, it is made readonly.
48 - There are pools for each active combination of lookup-quoting, dynamically created.
50 . Orthogonal to the four main pool types, there are two classes of memory: untainted
51 and tainted. The latter is used for values derived from untrusted input, and
52 the string-expansion mechanism refuses to operate on such values (obviously,
53 it can expand an untainted value to return a tainted result). The classes
54 are implemented by duplicating the four pool types. Pool resets are requested
55 against the nontainted sibling and apply to both siblings.
57 Only memory blocks requested for tainted use are regarded as tainted; anything
58 else (including stack auto variables) is untainted. Care is needed when coding
59 to not copy untrusted data into untainted memory, as downstream taint-checks
62 Intermediate layers (eg. the string functions) can test for taint, and use this
63 for ensurinng that results have proper state. For example the
64 string_vformat_trc() routing supporting the string_sprintf() interface will
65 recopy a string being built into a tainted allocation if it meets a %s for a
66 tainted argument. Any intermediate-layer function that (can) return a new
67 allocation should behave this way; returning a tainted result if any tainted
68 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
69 existing allocations fail if tainted data is written into an untainted area.
70 Users of functions that modify existing allocations should check if a tainted
71 source and an untainted destination is used, and fail instead (sprintf() being
77 /* keep config.h before memcheck.h, for NVALGRIND */
84 /* We need to know how to align blocks of data for general use. I'm not sure
85 how to get an alignment factor in general. In the current world, a value of 8
86 is probably right, and this is sizeof(double) on some systems and sizeof(void
87 *) on others, so take the larger of those. Since everything in this expression
88 is a constant, the compiler should optimize it to a simple constant wherever it
89 appears (I checked that gcc does do this). */
92 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
94 /* store_reset() will not free the following block if the last used block has
95 less than this much left in it. */
97 #define STOREPOOL_MIN_SIZE 256
99 /* Structure describing the beginning of each big block. */
101 typedef struct storeblock {
102 struct storeblock *next;
106 /* Pool descriptor struct */
108 typedef struct pooldesc {
109 storeblock * chainbase; /* list of blocks in pool */
110 storeblock * current_block; /* top block, still with free space */
111 void * next_yield; /* next allocation point */
112 int yield_length; /* remaining space in current block */
113 unsigned store_block_order; /* log2(size) block allocation size */
115 /* This variable is set by store_get() to its yield, and by store_reset() to
116 NULL. This enables string_cat() to optimize its store handling for very long
117 strings. That's why the variable is global. */
119 void * store_last_get;
121 /* These are purely for stats-gathering */
130 /* Enhanced pool descriptor for quoted pools */
132 typedef struct quoted_pooldesc {
135 const unsigned char * quoter_name;
136 struct quoted_pooldesc * next;
139 /* Just in case we find ourselves on a system where the structure above has a
140 length that is not a multiple of the alignment, set up a macro for the padded
143 #define ALIGNED_SIZEOF_STOREBLOCK \
144 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
146 /* Size of block to get from malloc to carve up into smaller ones. This
147 must be a multiple of the alignment. We assume that 4096 is going to be
148 suitably aligned. Double the size per-pool for every malloc, to mitigate
149 certain denial-of-service attacks. Don't bother to decrease on block frees.
150 We waste average half the current alloc size per pool. This could be several
151 hundred kB now, vs. 4kB with a constant-size block size. But the search time
152 for is_tainted(), linear in the number of blocks for the pool, is O(n log n)
154 A test of 2000 RCPTs and just accept ACL had 370kB in 21 blocks before,
155 504kB in 6 blocks now, for the untainted-main (largest) pool.
156 Builds for restricted-memory system can disable the expansion by
157 defining RESTRICTED_MEMORY */
158 /*XXX should we allow any for malloc's own overhead? But how much? */
160 /* #define RESTRICTED_MEMORY */
161 #define STORE_BLOCK_SIZE(order) ((1U << (order)) - ALIGNED_SIZEOF_STOREBLOCK)
163 /* Variables holding data for the local pools of store. The current pool number
164 is held in store_pool, which is global so that it can be changed from outside.
165 Setting the initial length values to -1 forces a malloc for the first call,
166 even if the length is zero (which is used for getting a point to reset to). */
168 int store_pool = POOL_MAIN;
170 pooldesc paired_pools[N_PAIRED_POOLS];
171 quoted_pooldesc * quoted_pools = NULL;
173 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
174 static int max_nonpool_blocks;
175 static int max_pool_malloc; /* max value for pool_malloc */
176 static int max_nonpool_malloc; /* max value for nonpool_malloc */
178 /* pool_malloc holds the amount of memory used by the store pools; this goes up
179 and down as store is reset or released. nonpool_malloc is the total got by
180 malloc from other calls; this doesn't go down because it is just freed by
183 static int pool_malloc;
184 static int nonpool_malloc;
187 #ifndef COMPILE_UTILITY
188 static const uschar * pooluse[N_PAIRED_POOLS] = {
189 [POOL_MAIN] = US"main",
190 [POOL_PERM] = US"perm",
191 [POOL_CONFIG] = US"config",
192 [POOL_SEARCH] = US"search",
193 [POOL_MESSAGE] = US"message",
194 [POOL_TAINT_MAIN] = US"main",
195 [POOL_TAINT_PERM] = US"perm",
196 [POOL_TAINT_CONFIG] = US"config",
197 [POOL_TAINT_SEARCH] = US"search",
198 [POOL_TAINT_MESSAGE] = US"message",
200 static const uschar * poolclass[N_PAIRED_POOLS] = {
201 [POOL_MAIN] = US"untainted",
202 [POOL_PERM] = US"untainted",
203 [POOL_CONFIG] = US"untainted",
204 [POOL_SEARCH] = US"untainted",
205 [POOL_MESSAGE] = US"untainted",
206 [POOL_TAINT_MAIN] = US"tainted",
207 [POOL_TAINT_PERM] = US"tainted",
208 [POOL_TAINT_CONFIG] = US"tainted",
209 [POOL_TAINT_SEARCH] = US"tainted",
210 [POOL_TAINT_MESSAGE] = US"tainted",
215 static void * internal_store_malloc(size_t, const char *, int);
216 static void internal_store_free(void *, const char *, int linenumber);
218 /******************************************************************************/
221 pool_init(pooldesc * pp)
223 memset(pp, 0, sizeof(*pp));
224 pp->yield_length = -1;
225 pp->store_block_order = 12; /* log2(allocation_size) ie. 4kB */
228 /* Initialisation, for things fragile with parameter channges when using
229 static initialisers. */
234 for (pooldesc * pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
238 /******************************************************************************/
239 /* Locating elements given memory pointer */
242 is_pointer_in_block(const storeblock * b, const void * p)
244 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
245 return US p >= bc && US p < bc + b->length;
249 pool_current_for_pointer(const void * p)
253 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
254 if ((b = qp->pool.current_block) && is_pointer_in_block(b, p))
257 for (pooldesc * pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
258 if ((b = pp->current_block) && is_pointer_in_block(b, p))
264 pool_for_pointer(const void * p, const char * func, int linenumber)
269 if ((pp = pool_current_for_pointer(p))) return pp;
271 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
272 for (b = qp->pool.chainbase; b; b = b->next)
273 if (is_pointer_in_block(b, p)) return &qp->pool;
275 for (pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
276 for (b = pp->chainbase; b; b = b->next)
277 if (is_pointer_in_block(b, p)) return pp;
279 #ifndef COMPILE_UTILITY
282 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
283 "bad memory reference; pool not found, at %s %d", func, linenumber);
287 /******************************************************************************/
288 /* Test if a pointer refers to tainted memory.
290 Slower version check, for use when platform intermixes malloc and mmap area
291 addresses. Test against the current-block of all tainted pools first, then all
292 blocks of all tainted pools.
294 Return: TRUE iff tainted
298 is_tainted_fn(const void * p)
302 if (p == GET_UNTAINTED) return FALSE;
303 if (p == GET_TAINTED) return TRUE;
305 for (pooldesc * pp = paired_pools + POOL_TAINT_BASE;
306 pp < paired_pools + N_PAIRED_POOLS; pp++)
307 if ((b = pp->current_block))
308 if (is_pointer_in_block(b, p)) return TRUE;
310 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
311 if (b = qp->pool.current_block)
312 if (is_pointer_in_block(b, p)) return TRUE;
314 for (pooldesc * pp = paired_pools + POOL_TAINT_BASE;
315 pp < paired_pools + N_PAIRED_POOLS; pp++)
316 for (b = pp->chainbase; b; b = b->next)
317 if (is_pointer_in_block(b, p)) return TRUE;
319 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
320 for (b = qp->pool.chainbase; b; b = b->next)
321 if (is_pointer_in_block(b, p)) return TRUE;
328 die_tainted(const uschar * msg, const uschar * func, int line)
330 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
335 #ifndef COMPILE_UTILITY
336 /* Return the pool for the given quoter, or null */
339 pool_for_quoter(unsigned quoter, const uschar ** namep)
341 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
342 if (qp->quoter == quoter)
344 if (namep) *namep = qp->quoter_name;
350 /* Allocate/init a new quoted-pool and return the pool */
353 quoted_pool_new(unsigned quoter, const uschar * quoter_name)
355 quoted_pooldesc * qp = store_get_perm(sizeof(quoted_pooldesc), GET_UNTAINTED);
357 pool_init(&qp->pool);
359 qp->quoter_name = quoter_name;
360 qp->next = quoted_pools;
367 /******************************************************************************/
369 store_writeprotect(int pool)
371 #if !defined(COMPILE_UTILITY) && !defined(MISSING_POSIX_MEMALIGN)
372 for (storeblock * b = paired_pools[pool].chainbase; b; b = b->next)
373 if (mprotect(b, ALIGNED_SIZEOF_STOREBLOCK + b->length, PROT_READ) != 0)
374 DEBUG(D_any) debug_printf("config block mprotect: (%d) %s\n", errno, strerror(errno));
378 /******************************************************************************/
381 pool_get(pooldesc * pp, int size, BOOL align_mem, const char * func, int linenumber)
383 /* Ensure we've been asked to allocate memory.
384 A negative size is a sign of a security problem.
385 A zero size might be also suspect, but our internal usage deliberately
386 does this to return a current watermark value for a later release of
389 if (size < 0 || size >= INT_MAX/2)
390 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
391 "bad memory allocation requested (%d bytes) from %s %d",
392 size, func, linenumber);
394 /* Round up the size to a multiple of the alignment. Although this looks a
395 messy statement, because "alignment" is a constant expression, the compiler can
396 do a reasonable job of optimizing, especially if the value of "alignment" is a
397 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
398 instructions on a Sparc (alignment = 8). */
400 if (size % alignment != 0) size += alignment - (size % alignment);
402 /* If there isn't room in the current block, get a new one. The minimum
403 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
404 these functions are mostly called for small amounts of store. */
406 if (size > pp->yield_length)
409 STORE_BLOCK_SIZE(pp->store_block_order) - ALIGNED_SIZEOF_STOREBLOCK,
411 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
412 storeblock * newblock;
414 /* Sometimes store_reset() may leave a block for us; check if we can use it */
416 if ( (newblock = pp->current_block)
417 && (newblock = newblock->next)
418 && newblock->length < length
421 /* Give up on this block, because it's too small */
423 internal_store_free(newblock, func, linenumber);
427 /* If there was no free block, get a new one */
431 if ((pp->nbytes += mlength) > pp->maxbytes)
432 pp->maxbytes = pp->nbytes;
433 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
434 max_pool_malloc = pool_malloc;
435 nonpool_malloc -= mlength; /* Exclude from overall total */
436 if (++pp->nblocks > pp->maxblocks)
437 pp->maxblocks = pp->nblocks;
439 #ifndef MISSING_POSIX_MEMALIGN
442 long pgsize = sysconf(_SC_PAGESIZE);
443 int err = posix_memalign((void **)&newblock,
444 pgsize, (mlength + pgsize - 1) & ~(pgsize - 1));
446 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
447 "failed to alloc (using posix_memalign) %d bytes of memory: '%s'"
448 "called from line %d in %s",
449 size, strerror(err), linenumber, func);
453 newblock = internal_store_malloc(mlength, func, linenumber);
454 newblock->next = NULL;
455 newblock->length = length;
456 #ifndef RESTRICTED_MEMORY
457 if (pp->store_block_order++ > pp->maxorder)
458 pp->maxorder = pp->store_block_order;
462 pp->chainbase = newblock;
464 pp->current_block->next = newblock;
467 pp->current_block = newblock;
468 pp->yield_length = newblock->length;
470 (void *)(CS pp->current_block + ALIGNED_SIZEOF_STOREBLOCK);
471 (void) VALGRIND_MAKE_MEM_NOACCESS(pp->next_yield, pp->yield_length);
474 /* There's (now) enough room in the current block; the yield is the next
477 pp->store_last_get = pp->next_yield;
479 (void) VALGRIND_MAKE_MEM_UNDEFINED(pp->store_last_get, size);
480 /* Update next pointer and number of bytes left in the current block. */
482 pp->next_yield = (void *)(CS pp->next_yield + size);
483 pp->yield_length -= size;
484 return pp->store_last_get;
487 /*************************************************
488 * Get a block from the current pool *
489 *************************************************/
491 /* Running out of store is a total disaster. This function is called via the
492 macro store_get(). The current store_pool is used, adjusting for taint.
493 If the protoype is quoted, use a quoted-pool.
494 Return a block of store within the current big block of the pool, getting a new
495 one if necessary. The address is saved in store_last_get for the pool.
498 size amount wanted, bytes
499 proto_mem class: get store conformant to this
500 Special values: 0 forces untainted, 1 forces tainted
501 func function from which called
502 linenumber line number in source file
504 Returns: pointer to store (panic on malloc failure)
508 store_get_3(int size, const void * proto_mem, const char * func, int linenumber)
510 #ifndef COMPILE_UTILITY
511 const uschar * quoter_name;
512 int quoter = quoter_for_address(proto_mem, "er_name);
517 #ifndef COMPILE_UTILITY
521 BOOL tainted = is_tainted(proto_mem);
522 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
523 pp = paired_pools + pool;
524 yield = pool_get(pp, size, (pool == POOL_CONFIG), func, linenumber);
526 /* Cut out the debugging stuff for utilities, but stop picky compilers from
529 #ifndef COMPILE_UTILITY
531 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
532 pp->store_last_get, size, func, linenumber);
535 #ifndef COMPILE_UTILITY
539 debug_printf("allocating quoted-block for quoter %u (from %s %d)\n",
540 quoter, func, linenumber);
541 if (!(pp = pool_for_quoter(quoter, NULL)))
542 pp = quoted_pool_new(quoter, quoter_name);
543 yield = pool_get(pp, size, FALSE, func, linenumber);
545 debug_printf("---QQ Get %6p %5d %-14s %4d\n",
546 pp->store_last_get, size, func, linenumber);
554 /*************************************************
555 * Get a block from the PERM pool *
556 *************************************************/
558 /* This is just a convenience function, useful when just a single block is to
563 proto_mem class: get store conformant to this
564 func function from which called
565 linenumber line number in source file
567 Returns: pointer to store (panic on malloc failure)
571 store_get_perm_3(int size, const void * proto_mem, const char * func, int linenumber)
574 int old_pool = store_pool;
575 store_pool = POOL_PERM;
576 yield = store_get_3(size, proto_mem, func, linenumber);
577 store_pool = old_pool;
582 #ifndef COMPILE_UTILITY
583 /*************************************************
584 * Get a block annotated as being lookup-quoted *
585 *************************************************/
587 /* Allocate from pool a pool consistent with the proto_mem augmented by the
588 requested quoter type.
590 XXX currently not handling mark/release
592 Args: size number of bytes to allocate
593 quoter id for the quoting type
594 func caller, for debug
595 linenumber caller, for debug
597 Return: allocated memory block
601 store_force_get_quoted(int size, unsigned quoter, const uschar * quoter_name,
602 const char * func, int linenumber)
604 pooldesc * pp = pool_for_quoter(quoter, NULL);
608 debug_printf("allocating quoted-block for quoter %u (from %s %d)\n", quoter, func, linenumber);
610 if (!pp) pp = quoted_pool_new(quoter, quoter_name);
611 yield = pool_get(pp, size, FALSE, func, linenumber);
614 debug_printf("---QQ Get %6p %5d %-14s %4d\n",
615 pp->store_last_get, size, func, linenumber);
620 /* Maybe get memory for the specified quoter, but only if the
621 prototype memory is tainted. Otherwise, get plain memory.
624 store_get_quoted_3(int size, const void * proto_mem, unsigned quoter,
625 const uschar * quoter_name, const char * func, int linenumber)
627 return is_tainted(proto_mem)
628 ? store_force_get_quoted(size, quoter, quoter_name, func, linenumber)
629 : store_get_3(size, proto_mem, func, linenumber);
632 /* Return quoter for given address, or -1 if not in a quoted-pool. */
634 quoter_for_address(const void * p, const uschar ** namep)
636 const quoted_pooldesc * qp;
637 for (qp = quoted_pools; qp; qp = qp->next)
639 const pooldesc * pp = &qp->pool;
642 if (b = pp->current_block)
643 if (is_pointer_in_block(b, p))
646 for (b = pp->chainbase; b; b = b->next)
647 if (is_pointer_in_block(b, p))
650 if (namep) *namep = NULL;
654 if (namep) *namep = qp->quoter_name;
658 /* Return TRUE iff the given address is quoted for the given type.
659 There is extra complexity to handle lookup providers with multiple
660 find variants but shared quote functions. */
662 is_quoted_like(const void * p, unsigned quoter)
664 const uschar * p_name, * q_name;
665 const lookup_info * p_li, * q_li;
666 void * p_qfn, * q_qfn;
668 (void) quoter_for_address(p, &p_name);
669 (void) pool_for_quoter(quoter, &q_name);
671 if (!p_name || !q_name) return FALSE;
673 p_li = search_findtype(p_name, Ustrlen(p_name));
674 p_qfn = p_li ? p_li->quote : NULL;
675 q_li = search_findtype(q_name, Ustrlen(q_name));
676 q_qfn = q_li ? q_li->quote : NULL;
678 BOOL y = p_qfn == q_qfn;
680 /* debug_printf("is_quoted(%p, %u): %c\n", p, quoter, y?'T':'F'); */
684 /* Return TRUE if the quoter value indicates an actual quoter */
686 is_real_quoter(int quoter)
692 /* Return TRUE if the "new" data requires that the "old" data
693 be recopied to new-class memory. We order the classes as
695 2: tainted, not quoted
696 1: quoted (which is also tainted)
699 If the "new" is higher-order than the "old", they are not compatible
700 and a copy is needed. If both are quoted, but the quoters differ,
701 not compatible. Otherwise they are compatible.
704 is_incompatible_fn(const void * old, const void * new)
709 ni = is_real_quoter(nq = quoter_for_address(new, NULL)) ? 1 : is_tainted(new) ? 2 : 0;
710 oi = is_real_quoter(oq = quoter_for_address(old, NULL)) ? 1 : is_tainted(old) ? 2 : 0;
711 return ni > oi || ni == oi && nq != oq;
714 #endif /*!COMPILE_UTILITY*/
716 /*************************************************
717 * Extend a block if it is at the top *
718 *************************************************/
720 /* While reading strings of unknown length, it is often the case that the
721 string is being read into the block at the top of the stack. If it needs to be
722 extended, it is more efficient just to extend within the top block rather than
723 allocate a new block and then have to copy the data. This function is provided
724 for the use of string_cat(), but of course can be used elsewhere too.
725 The block itself is not expanded; only the top allocation from it.
728 ptr pointer to store block
729 oldsize current size of the block, as requested by user
730 newsize new size required
731 func function from which called
732 linenumber line number in source file
734 Returns: TRUE if the block is at the top of the stack and has been
735 extended; FALSE if it isn't at the top of the stack, or cannot
738 XXX needs extension for quoted-tracking. This assumes that the global store_pool
739 is the one to alloc from, which breaks with separated pools.
743 store_extend_3(void * ptr, int oldsize, int newsize,
744 const char * func, int linenumber)
746 pooldesc * pp = pool_for_pointer(ptr, func, linenumber);
747 int inc = newsize - oldsize;
748 int rounded_oldsize = oldsize;
750 if (oldsize < 0 || newsize < oldsize || newsize >= INT_MAX/2)
751 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
752 "bad memory extension requested (%d -> %d bytes) at %s %d",
753 oldsize, newsize, func, linenumber);
755 if (rounded_oldsize % alignment != 0)
756 rounded_oldsize += alignment - (rounded_oldsize % alignment);
758 if (CS ptr + rounded_oldsize != CS (pp->next_yield) ||
759 inc > pp->yield_length + rounded_oldsize - oldsize)
762 /* Cut out the debugging stuff for utilities, but stop picky compilers from
765 #ifndef COMPILE_UTILITY
768 quoted_pooldesc * qp;
769 for (qp = quoted_pools; qp; qp = qp->next)
772 debug_printf("---Q%d Ext %6p %5d %-14s %4d\n",
773 (int)(qp - quoted_pools),
774 ptr, newsize, func, linenumber);
778 debug_printf("---%d Ext %6p %5d %-14s %4d\n",
779 (int)(pp - paired_pools),
780 ptr, newsize, func, linenumber);
782 #endif /* COMPILE_UTILITY */
784 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
785 pp->next_yield = CS ptr + newsize;
786 pp->yield_length -= newsize - rounded_oldsize;
787 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
795 is_pwr2_size(int len)
798 return (x & (x - 1)) == 0;
802 /*************************************************
803 * Back up to a previous point on the stack *
804 *************************************************/
806 /* This function resets the next pointer, freeing any subsequent whole blocks
807 that are now unused. Call with a cookie obtained from store_mark() only; do
808 not call with a pointer returned by store_get(). Both the untainted and tainted
809 pools corresposding to store_pool are reset.
811 Quoted pools are not handled.
814 ptr place to back up to
815 pool pool holding the pointer
816 func function from which called
817 linenumber line number in source file
823 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
826 pooldesc * pp = paired_pools + pool;
827 storeblock * b = pp->current_block;
828 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
829 int newlength, count;
830 #ifndef COMPILE_UTILITY
831 int oldmalloc = pool_malloc;
834 if (!b) return; /* exim_dumpdb gets this, becuse it has never used tainted mem */
836 /* Last store operation was not a get */
838 pp->store_last_get = NULL;
840 /* See if the place is in the current block - as it often will be. Otherwise,
841 search for the block in which it lies. */
843 if (CS ptr < bc || CS ptr > bc + b->length)
845 for (b = pp->chainbase; b; b = b->next)
847 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
848 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
851 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
852 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
855 /* Back up, rounding to the alignment if necessary. When testing, flatten
856 the released memory. */
858 newlength = bc + b->length - CS ptr;
859 #ifndef COMPILE_UTILITY
862 assert_no_variables(ptr, newlength, func, linenumber);
863 if (f.running_in_test_harness)
865 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
866 memset(ptr, 0xF0, newlength);
870 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
871 pp->next_yield = CS ptr + (newlength % alignment);
872 count = pp->yield_length;
873 count = (pp->yield_length = newlength - (newlength % alignment)) - count;
874 pp->current_block = b;
876 /* Free any subsequent block. Do NOT free the first
877 successor, if our current block has less than 256 bytes left. This should
878 prevent us from flapping memory. However, keep this block only when it has
879 a power-of-two size so probably is not a custom inflated one. */
881 if ( pp->yield_length < STOREPOOL_MIN_SIZE
883 && is_pwr2_size(b->next->length + ALIGNED_SIZEOF_STOREBLOCK))
886 #ifndef COMPILE_UTILITY
888 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
891 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
892 b->length - ALIGNED_SIZEOF_STOREBLOCK);
896 if (pool != POOL_CONFIG)
901 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
903 #ifndef COMPILE_UTILITY
905 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
912 if (pool != POOL_CONFIG)
913 internal_store_free(b, func, linenumber);
915 #ifndef RESTRICTED_MEMORY
916 if (pp->store_block_order > 13) pp->store_block_order--;
920 /* Cut out the debugging stuff for utilities, but stop picky compilers from
923 #ifndef COMPILE_UTILITY
925 debug_printf("---%d Rst %6p %5d %-14s %4d\tpool %d\n", pool, ptr,
926 count + oldmalloc - pool_malloc,
927 func, linenumber, pool_malloc);
928 #endif /* COMPILE_UTILITY */
932 /* Back up the pool pair, untainted and tainted, of the store_pool setting.
933 Quoted pools are not handled.
937 store_reset_3(rmark r, const char * func, int linenumber)
941 if (store_pool >= POOL_TAINT_BASE)
942 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
943 "store_reset called for pool %d: %s %d\n", store_pool, func, linenumber);
945 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
946 "store_reset called with bad mark: %s %d\n", func, linenumber);
948 internal_store_reset(*ptr, store_pool + POOL_TAINT_BASE, func, linenumber);
949 internal_store_reset(ptr, store_pool, func, linenumber);
956 /* Free tail-end unused allocation. This lets us allocate a big chunk
957 early, for cases when we only discover later how much was really needed.
959 Can be called with a value from store_get(), or an offset after such. Only
960 the tainted or untainted pool that serviced the store_get() will be affected.
962 This is mostly a cut-down version of internal_store_reset().
963 XXX needs rationalising
967 store_release_above_3(void * ptr, const char * func, int linenumber)
971 /* Search all pools' "current" blocks. If it isn't one of those,
972 ignore it (it usually will be). */
974 if ((pp = pool_current_for_pointer(ptr)))
976 storeblock * b = pp->current_block;
977 int count, newlength;
979 /* Last store operation was not a get */
981 pp->store_last_get = NULL;
983 /* Back up, rounding to the alignment if necessary. When testing, flatten
984 the released memory. */
986 newlength = (CS b + ALIGNED_SIZEOF_STOREBLOCK) + b->length - CS ptr;
987 #ifndef COMPILE_UTILITY
990 assert_no_variables(ptr, newlength, func, linenumber);
991 if (f.running_in_test_harness)
993 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
994 memset(ptr, 0xF0, newlength);
998 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
999 pp->next_yield = CS ptr + (newlength % alignment);
1000 count = pp->yield_length;
1001 count = (pp->yield_length = newlength - (newlength % alignment)) - count;
1003 /* Cut out the debugging stuff for utilities, but stop picky compilers from
1006 #ifndef COMPILE_UTILITY
1009 quoted_pooldesc * qp;
1010 for (qp = quoted_pools; qp; qp = qp->next)
1011 if (pp == &qp->pool)
1012 debug_printf("---Q%d Rel %6p %5d %-14s %4d\tpool %d\n",
1013 (int)(qp - quoted_pools),
1014 ptr, count, func, linenumber, pool_malloc);
1016 debug_printf("---%d Rel %6p %5d %-14s %4d\tpool %d\n",
1017 (int)(pp - paired_pools), ptr, count,
1018 func, linenumber, pool_malloc);
1023 #ifndef COMPILE_UTILITY
1025 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
1032 store_mark_3(const char * func, int linenumber)
1036 #ifndef COMPILE_UTILITY
1038 debug_printf("---%d Mrk %-14s %4d\tpool %d\n",
1039 store_pool, func, linenumber, pool_malloc);
1040 #endif /* COMPILE_UTILITY */
1042 if (store_pool >= POOL_TAINT_BASE)
1043 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1044 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
1046 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
1047 a cookie (actually the address in the untainted pool) to the caller.
1048 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
1049 and winds back the untainted pool with the cookie. */
1051 p = store_get_3(sizeof(void *), GET_UNTAINTED, func, linenumber);
1052 *p = store_get_3(0, GET_TAINTED, func, linenumber);
1059 /************************************************
1061 ************************************************/
1063 /* This function checks that the pointer it is given is the first thing in a
1064 block, and if so, releases that block.
1067 block block of store to consider
1068 pp pool containing the block
1069 func function from which called
1070 linenumber line number in source file
1076 store_release_3(void * block, pooldesc * pp, const char * func, int linenumber)
1078 /* It will never be the first block, so no need to check that. */
1080 for (storeblock * b = pp->chainbase; b; b = b->next)
1082 storeblock * bb = b->next;
1083 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
1085 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
1091 /* Cut out the debugging stuff for utilities, but stop picky compilers
1092 from giving warnings. */
1094 #ifndef COMPILE_UTILITY
1096 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
1097 linenumber, pool_malloc);
1099 if (f.running_in_test_harness)
1100 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
1101 #endif /* COMPILE_UTILITY */
1103 internal_store_free(bb, func, linenumber);
1110 /************************************************
1112 ************************************************/
1114 /* Allocate a new block big enough to expend to the given size and
1115 copy the current data into it. Free the old one if possible.
1117 This function is specifically provided for use when reading very
1118 long strings, e.g. header lines. When the string gets longer than a
1119 complete block, it gets copied to a new block. It is helpful to free
1120 the old block iff the previous copy of the string is at its start,
1121 and therefore the only thing in it. Otherwise, for very long strings,
1122 dead store can pile up somewhat disastrously. This function checks that
1123 the pointer it is given is the first thing in a block, and that nothing
1124 has been allocated since. If so, releases that block.
1128 newsize requested size
1131 Returns: new location of data
1135 store_newblock_3(void * oldblock, int newsize, int len,
1136 const char * func, int linenumber)
1138 pooldesc * pp = pool_for_pointer(oldblock, func, linenumber);
1139 BOOL release_ok = !is_tainted(oldblock) && pp->store_last_get == oldblock; /*XXX why tainted not handled? */
1142 if (len < 0 || len > newsize)
1143 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1144 "bad memory extension requested (%d -> %d bytes) at %s %d",
1145 len, newsize, func, linenumber);
1147 newblock = store_get(newsize, oldblock);
1148 memcpy(newblock, oldblock, len);
1149 if (release_ok) store_release_3(oldblock, pp, func, linenumber);
1150 return (void *)newblock;
1156 /*************************************************
1158 *************************************************/
1160 /* Running out of store is a total disaster for exim. Some malloc functions
1161 do not run happily on very small sizes, nor do they document this fact. This
1162 function is called via the macro store_malloc().
1165 size amount of store wanted
1166 func function from which called
1167 line line number in source file
1169 Returns: pointer to gotten store (panic on failure)
1173 internal_store_malloc(size_t size, const char * func, int line)
1177 /* Check specifically for a possible result of conversion from
1178 a negative int, to the (unsigned, wider) size_t */
1180 if (size >= INT_MAX/2)
1181 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1182 "bad internal_store_malloc request (" SIZE_T_FMT " bytes) from %s %d",
1185 size += sizeof(size_t); /* space to store the size, used under debug */
1186 if (size < 16) size = 16;
1188 if (!(yield = malloc(size)))
1189 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc " SIZE_T_FMT " bytes of memory: "
1190 "called from line %d in %s", size, line, func);
1192 #ifndef COMPILE_UTILITY
1193 DEBUG(D_any) *(size_t *)yield = size;
1195 yield = US yield + sizeof(size_t);
1197 if ((nonpool_malloc += size) > max_nonpool_malloc)
1198 max_nonpool_malloc = nonpool_malloc;
1200 /* Cut out the debugging stuff for utilities, but stop picky compilers from
1203 #ifndef COMPILE_UTILITY
1204 /* If running in test harness, spend time making sure all the new store
1205 is not filled with zeros so as to catch problems. */
1207 if (f.running_in_test_harness)
1208 memset(yield, 0xF0, size - sizeof(size_t));
1209 DEBUG(D_memory) debug_printf("--Malloc %6p %5lu bytes\t%-20s %4d\tpool %5d nonpool %5d\n",
1210 yield, size, func, line, pool_malloc, nonpool_malloc);
1211 #endif /* COMPILE_UTILITY */
1217 store_malloc_3(size_t size, const char *func, int linenumber)
1219 if (n_nonpool_blocks++ > max_nonpool_blocks)
1220 max_nonpool_blocks = n_nonpool_blocks;
1221 return internal_store_malloc(size, func, linenumber);
1225 /************************************************
1227 ************************************************/
1229 /* This function is called by the macro store_free().
1232 block block of store to free
1233 func function from which called
1234 linenumber line number in source file
1240 internal_store_free(void * block, const char * func, int linenumber)
1242 uschar * p = US block - sizeof(size_t);
1243 #ifndef COMPILE_UTILITY
1244 DEBUG(D_any) nonpool_malloc -= *(size_t *)p;
1245 DEBUG(D_memory) debug_printf("----Free %6p %5ld bytes\t%-20s %4d\n",
1246 block, *(size_t *)p, func, linenumber);
1252 store_free_3(void * block, const char * func, int linenumber)
1255 internal_store_free(block, func, linenumber);
1258 /******************************************************************************/
1259 /* Stats output on process exit */
1263 #ifndef COMPILE_UTILITY
1267 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
1268 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
1269 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
1271 for (i = 0; i < N_PAIRED_POOLS; i++)
1273 pooldesc * pp = paired_pools + i;
1274 debug_printf("----Exit pool %2d max: %3d kB in %d blocks at order %u\t%s %s\n",
1275 i, (pp->maxbytes+1023)/1024, pp->maxblocks, pp->maxorder,
1276 poolclass[i], pooluse[i]);
1279 for (quoted_pooldesc * qp = quoted_pools; qp; i++, qp = qp->next)
1281 pooldesc * pp = &qp->pool;
1282 debug_printf("----Exit pool Q%d max: %3d kB in %d blocks at order %u\ttainted quoted:%s\n",
1283 i, (pp->maxbytes+1023)/1024, pp->maxblocks, pp->maxorder,
1291 /******************************************************************************/
1292 /* Per-message pool management */
1294 static rmark message_reset_point = NULL;
1299 int oldpool = store_pool;
1300 store_pool = POOL_MESSAGE;
1301 if (!message_reset_point) message_reset_point = store_mark();
1302 store_pool = oldpool;
1306 message_tidyup(void)
1309 if (!message_reset_point) return;
1310 oldpool = store_pool;
1311 store_pool = POOL_MESSAGE;
1312 message_reset_point = store_reset(message_reset_point);
1313 store_pool = oldpool;
1316 /******************************************************************************/
1317 /* Debug analysis of address */
1319 #ifndef COMPILE_UTILITY
1321 debug_print_taint(const void * p)
1323 const uschar * quoter_name;
1324 if (!is_tainted(p)) return;
1325 debug_printf("(tainted");
1326 (void) quoter_for_address(p, "er_name);
1328 debug_printf(", quoted:%s", quoter_name);
1329 debug_printf(")\n");
1333 /* End of store.c */