1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
5 /* Copyright (c) The Exim maintainers 2019 - 2023 */
6 /* Copyright (c) University of Cambridge 1995 - 2018 */
7 /* See the file NOTICE for conditions of use and distribution. */
8 /* SPDX-License-Identifier: GPL-2.0-or-later */
10 /* Exim gets and frees all its store through these functions. In the original
11 implementation there was a lot of mallocing and freeing of small bits of store.
12 The philosophy has now changed to a scheme which includes the concept of
13 "stacking pools" of store. For the short-lived processes, there isn't any real
14 need to do any garbage collection, but the stack concept allows quick resetting
15 in places where this seems sensible.
17 Obviously the long-running processes (the daemon, the queue runner, and eximon)
18 must take care not to eat store.
20 The following different types of store are recognized:
22 . Long-lived, large blocks: This is implemented by retaining the original
23 malloc/free functions, and it used for permanent working buffers and for
24 getting blocks to cut up for the other types.
26 . Long-lived, small blocks: This is used for blocks that have to survive until
27 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
28 functionally the same as store_malloc(), except that the store can't be
29 freed, but I expect it to be more efficient for handling small blocks.
31 . Short-lived, short blocks: Most of the dynamic store falls into this
32 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
33 after accepting a message when multiple messages are received by a single
34 process. Resetting happens at some other times as well, usually fairly
35 locally after some specific processing that needs working store.
37 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
38 This means it can be freed when search_tidyup() is called to close down all
41 - There is another pool (POOL_MESSAGE) used for medium-lifetime objects; within
42 a single message transaction but needed for longer than the use of the main
43 pool permits. Currently this means only receive-time DKIM information.
45 - There is a dedicated pool for configuration data read from the config file(s).
46 Once complete, it is made readonly.
48 - There are pools for each active combination of lookup-quoting, dynamically created.
50 . Orthogonal to the four main pool types, there are two classes of memory: untainted
51 and tainted. The latter is used for values derived from untrusted input, and
52 the string-expansion mechanism refuses to operate on such values (obviously,
53 it can expand an untainted value to return a tainted result). The classes
54 are implemented by duplicating the four pool types. Pool resets are requested
55 against the nontainted sibling and apply to both siblings.
57 Only memory blocks requested for tainted use are regarded as tainted; anything
58 else (including stack auto variables) is untainted. Care is needed when coding
59 to not copy untrusted data into untainted memory, as downstream taint-checks
62 Intermediate layers (eg. the string functions) can test for taint, and use this
63 for ensurinng that results have proper state. For example the
64 string_vformat_trc() routing supporting the string_sprintf() interface will
65 recopy a string being built into a tainted allocation if it meets a %s for a
66 tainted argument. Any intermediate-layer function that (can) return a new
67 allocation should behave this way; returning a tainted result if any tainted
68 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
69 existing allocations fail if tainted data is written into an untainted area.
70 Users of functions that modify existing allocations should check if a tainted
71 source and an untainted destination is used, and fail instead (sprintf() being
77 /* keep config.h before memcheck.h, for NVALGRIND */
84 /* We need to know how to align blocks of data for general use. I'm not sure
85 how to get an alignment factor in general. In the current world, a value of 8
86 is probably right, and this is sizeof(double) on some systems and sizeof(void
87 *) on others, so take the larger of those. Since everything in this expression
88 is a constant, the compiler should optimize it to a simple constant wherever it
89 appears (I checked that gcc does do this). */
92 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
94 /* store_reset() will not free the following block if the last used block has
95 less than this much left in it. */
97 #define STOREPOOL_MIN_SIZE 256
99 /* Structure describing the beginning of each big block. */
101 typedef struct storeblock {
102 struct storeblock *next;
106 /* Pool descriptor struct */
108 typedef struct pooldesc {
109 storeblock * chainbase; /* list of blocks in pool */
110 storeblock * current_block; /* top block, still with free space */
111 void * next_yield; /* next allocation point */
112 int yield_length; /* remaining space in current block */
113 unsigned store_block_order; /* log2(size) block allocation size */
115 /* This variable is set by store_get() to its yield, and by store_reset() to
116 NULL. This enables string_cat() to optimize its store handling for very long
117 strings. That's why the variable is global. */
119 void * store_last_get;
121 /* These are purely for stats-gathering */
130 /* Enhanced pool descriptor for quoted pools */
132 typedef struct quoted_pooldesc {
135 struct quoted_pooldesc * next;
138 /* Just in case we find ourselves on a system where the structure above has a
139 length that is not a multiple of the alignment, set up a macro for the padded
142 #define ALIGNED_SIZEOF_STOREBLOCK \
143 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
145 /* Size of block to get from malloc to carve up into smaller ones. This
146 must be a multiple of the alignment. We assume that 4096 is going to be
147 suitably aligned. Double the size per-pool for every malloc, to mitigate
148 certain denial-of-service attacks. Don't bother to decrease on block frees.
149 We waste average half the current alloc size per pool. This could be several
150 hundred kB now, vs. 4kB with a constant-size block size. But the search time
151 for is_tainted(), linear in the number of blocks for the pool, is O(n log n)
153 A test of 2000 RCPTs and just accept ACL had 370kB in 21 blocks before,
154 504kB in 6 blocks now, for the untainted-main (largest) pool.
155 Builds for restricted-memory system can disable the expansion by
156 defining RESTRICTED_MEMORY */
157 /*XXX should we allow any for malloc's own overhead? But how much? */
159 /* #define RESTRICTED_MEMORY */
160 #define STORE_BLOCK_SIZE(order) ((1U << (order)) - ALIGNED_SIZEOF_STOREBLOCK)
162 /* Variables holding data for the local pools of store. The current pool number
163 is held in store_pool, which is global so that it can be changed from outside.
164 Setting the initial length values to -1 forces a malloc for the first call,
165 even if the length is zero (which is used for getting a point to reset to). */
167 int store_pool = POOL_MAIN;
169 pooldesc paired_pools[N_PAIRED_POOLS];
170 quoted_pooldesc * quoted_pools = NULL;
172 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
173 static int max_nonpool_blocks;
174 static int max_pool_malloc; /* max value for pool_malloc */
175 static int max_nonpool_malloc; /* max value for nonpool_malloc */
177 /* pool_malloc holds the amount of memory used by the store pools; this goes up
178 and down as store is reset or released. nonpool_malloc is the total got by
179 malloc from other calls; this doesn't go down because it is just freed by
182 static int pool_malloc;
183 static int nonpool_malloc;
186 #ifndef COMPILE_UTILITY
187 static const uschar * pooluse[N_PAIRED_POOLS] = {
188 [POOL_MAIN] = US"main",
189 [POOL_PERM] = US"perm",
190 [POOL_CONFIG] = US"config",
191 [POOL_SEARCH] = US"search",
192 [POOL_MESSAGE] = US"message",
193 [POOL_TAINT_MAIN] = US"main",
194 [POOL_TAINT_PERM] = US"perm",
195 [POOL_TAINT_CONFIG] = US"config",
196 [POOL_TAINT_SEARCH] = US"search",
197 [POOL_TAINT_MESSAGE] = US"message",
199 static const uschar * poolclass[N_PAIRED_POOLS] = {
200 [POOL_MAIN] = US"untainted",
201 [POOL_PERM] = US"untainted",
202 [POOL_CONFIG] = US"untainted",
203 [POOL_SEARCH] = US"untainted",
204 [POOL_MESSAGE] = US"untainted",
205 [POOL_TAINT_MAIN] = US"tainted",
206 [POOL_TAINT_PERM] = US"tainted",
207 [POOL_TAINT_CONFIG] = US"tainted",
208 [POOL_TAINT_SEARCH] = US"tainted",
209 [POOL_TAINT_MESSAGE] = US"tainted",
214 static void * internal_store_malloc(size_t, const char *, int);
215 static void internal_store_free(void *, const char *, int linenumber);
217 /******************************************************************************/
220 pool_init(pooldesc * pp)
222 memset(pp, 0, sizeof(*pp));
223 pp->yield_length = -1;
224 pp->store_block_order = 12; /* log2(allocation_size) ie. 4kB */
227 /* Initialisation, for things fragile with parameter channges when using
228 static initialisers. */
233 for (pooldesc * pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
237 /******************************************************************************/
238 /* Locating elements given memory pointer */
241 is_pointer_in_block(const storeblock * b, const void * p)
243 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
244 return US p >= bc && US p < bc + b->length;
248 pool_current_for_pointer(const void * p)
252 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
253 if ((b = qp->pool.current_block) && is_pointer_in_block(b, p))
256 for (pooldesc * pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
257 if ((b = pp->current_block) && is_pointer_in_block(b, p))
263 pool_for_pointer(const void * p, const char * func, int linenumber)
268 if ((pp = pool_current_for_pointer(p))) return pp;
270 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
271 for (b = qp->pool.chainbase; b; b = b->next)
272 if (is_pointer_in_block(b, p)) return &qp->pool;
274 for (pp = paired_pools; pp < paired_pools + N_PAIRED_POOLS; pp++)
275 for (b = pp->chainbase; b; b = b->next)
276 if (is_pointer_in_block(b, p)) return pp;
278 #ifndef COMPILE_UTILITY
281 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
282 "bad memory reference; pool not found, at %s %d", func, linenumber);
286 /******************************************************************************/
287 /* Test if a pointer refers to tainted memory.
289 Slower version check, for use when platform intermixes malloc and mmap area
290 addresses. Test against the current-block of all tainted pools first, then all
291 blocks of all tainted pools.
293 Return: TRUE iff tainted
297 is_tainted_fn(const void * p)
301 if (p == GET_UNTAINTED) return FALSE;
302 if (p == GET_TAINTED) return TRUE;
304 for (pooldesc * pp = paired_pools + POOL_TAINT_BASE;
305 pp < paired_pools + N_PAIRED_POOLS; pp++)
306 if ((b = pp->current_block))
307 if (is_pointer_in_block(b, p)) return TRUE;
309 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
310 if (b = qp->pool.current_block)
311 if (is_pointer_in_block(b, p)) return TRUE;
313 for (pooldesc * pp = paired_pools + POOL_TAINT_BASE;
314 pp < paired_pools + N_PAIRED_POOLS; pp++)
315 for (b = pp->chainbase; b; b = b->next)
316 if (is_pointer_in_block(b, p)) return TRUE;
318 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
319 for (b = qp->pool.chainbase; b; b = b->next)
320 if (is_pointer_in_block(b, p)) return TRUE;
327 die_tainted(const uschar * msg, const uschar * func, int line)
329 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
334 #ifndef COMPILE_UTILITY
335 /* Return the pool for the given quoter, or null */
338 pool_for_quoter(unsigned quoter)
340 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
341 if (qp->quoter == quoter)
346 /* Allocate/init a new quoted-pool and return the pool */
349 quoted_pool_new(unsigned quoter)
351 // debug_printf("allocating quoted-pool\n");
352 quoted_pooldesc * qp = store_get_perm(sizeof(quoted_pooldesc), GET_UNTAINTED);
354 pool_init(&qp->pool);
356 qp->next = quoted_pools;
363 /******************************************************************************/
365 store_writeprotect(int pool)
367 #if !defined(COMPILE_UTILITY) && !defined(MISSING_POSIX_MEMALIGN)
368 for (storeblock * b = paired_pools[pool].chainbase; b; b = b->next)
369 if (mprotect(b, ALIGNED_SIZEOF_STOREBLOCK + b->length, PROT_READ) != 0)
370 DEBUG(D_any) debug_printf("config block mprotect: (%d) %s\n", errno, strerror(errno));
374 /******************************************************************************/
377 pool_get(pooldesc * pp, int size, BOOL align_mem, const char * func, int linenumber)
379 /* Ensure we've been asked to allocate memory.
380 A negative size is a sign of a security problem.
381 A zero size might be also suspect, but our internal usage deliberately
382 does this to return a current watermark value for a later release of
385 if (size < 0 || size >= INT_MAX/2)
386 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
387 "bad memory allocation requested (%d bytes) from %s %d",
388 size, func, linenumber);
390 /* Round up the size to a multiple of the alignment. Although this looks a
391 messy statement, because "alignment" is a constant expression, the compiler can
392 do a reasonable job of optimizing, especially if the value of "alignment" is a
393 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
394 instructions on a Sparc (alignment = 8). */
396 if (size % alignment != 0) size += alignment - (size % alignment);
398 /* If there isn't room in the current block, get a new one. The minimum
399 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
400 these functions are mostly called for small amounts of store. */
402 if (size > pp->yield_length)
405 STORE_BLOCK_SIZE(pp->store_block_order) - ALIGNED_SIZEOF_STOREBLOCK,
407 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
408 storeblock * newblock;
410 /* Sometimes store_reset() may leave a block for us; check if we can use it */
412 if ( (newblock = pp->current_block)
413 && (newblock = newblock->next)
414 && newblock->length < length
417 /* Give up on this block, because it's too small */
419 internal_store_free(newblock, func, linenumber);
423 /* If there was no free block, get a new one */
427 if ((pp->nbytes += mlength) > pp->maxbytes)
428 pp->maxbytes = pp->nbytes;
429 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
430 max_pool_malloc = pool_malloc;
431 nonpool_malloc -= mlength; /* Exclude from overall total */
432 if (++pp->nblocks > pp->maxblocks)
433 pp->maxblocks = pp->nblocks;
435 #ifndef MISSING_POSIX_MEMALIGN
438 long pgsize = sysconf(_SC_PAGESIZE);
439 int err = posix_memalign((void **)&newblock,
440 pgsize, (mlength + pgsize - 1) & ~(pgsize - 1));
442 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
443 "failed to alloc (using posix_memalign) %d bytes of memory: '%s'"
444 "called from line %d in %s",
445 size, strerror(err), linenumber, func);
449 newblock = internal_store_malloc(mlength, func, linenumber);
450 newblock->next = NULL;
451 newblock->length = length;
452 #ifndef RESTRICTED_MEMORY
453 if (pp->store_block_order++ > pp->maxorder)
454 pp->maxorder = pp->store_block_order;
458 pp->chainbase = newblock;
460 pp->current_block->next = newblock;
463 pp->current_block = newblock;
464 pp->yield_length = newblock->length;
466 (void *)(CS pp->current_block + ALIGNED_SIZEOF_STOREBLOCK);
467 (void) VALGRIND_MAKE_MEM_NOACCESS(pp->next_yield, pp->yield_length);
470 /* There's (now) enough room in the current block; the yield is the next
473 pp->store_last_get = pp->next_yield;
475 (void) VALGRIND_MAKE_MEM_UNDEFINED(pp->store_last_get, size);
476 /* Update next pointer and number of bytes left in the current block. */
478 pp->next_yield = (void *)(CS pp->next_yield + size);
479 pp->yield_length -= size;
480 return pp->store_last_get;
483 /*************************************************
484 * Get a block from the current pool *
485 *************************************************/
487 /* Running out of store is a total disaster. This function is called via the
488 macro store_get(). The current store_pool is used, adjusting for taint.
489 If the protoype is quoted, use a quoted-pool.
490 Return a block of store within the current big block of the pool, getting a new
491 one if necessary. The address is saved in store_last_get for the pool.
494 size amount wanted, bytes
495 proto_mem class: get store conformant to this
496 Special values: 0 forces untainted, 1 forces tainted
497 func function from which called
498 linenumber line number in source file
500 Returns: pointer to store (panic on malloc failure)
504 store_get_3(int size, const void * proto_mem, const char * func, int linenumber)
506 #ifndef COMPILE_UTILITY
507 int quoter = quoter_for_address(proto_mem);
512 #ifndef COMPILE_UTILITY
513 if (!is_real_quoter(quoter))
516 BOOL tainted = is_tainted(proto_mem);
517 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
518 pp = paired_pools + pool;
519 yield = pool_get(pp, size, (pool == POOL_CONFIG), func, linenumber);
521 /* Cut out the debugging stuff for utilities, but stop picky compilers from
524 #ifndef COMPILE_UTILITY
526 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
527 pp->store_last_get, size, func, linenumber);
530 #ifndef COMPILE_UTILITY
534 debug_printf("allocating quoted-block for quoter %u (from %s %d)\n",
535 quoter, func, linenumber);
536 if (!(pp = pool_for_quoter(quoter))) pp = quoted_pool_new(quoter);
537 yield = pool_get(pp, size, FALSE, func, linenumber);
539 debug_printf("---QQ Get %6p %5d %-14s %4d\n",
540 pp->store_last_get, size, func, linenumber);
548 /*************************************************
549 * Get a block from the PERM pool *
550 *************************************************/
552 /* This is just a convenience function, useful when just a single block is to
557 proto_mem class: get store conformant to this
558 func function from which called
559 linenumber line number in source file
561 Returns: pointer to store (panic on malloc failure)
565 store_get_perm_3(int size, const void * proto_mem, const char * func, int linenumber)
568 int old_pool = store_pool;
569 store_pool = POOL_PERM;
570 yield = store_get_3(size, proto_mem, func, linenumber);
571 store_pool = old_pool;
576 #ifndef COMPILE_UTILITY
577 /*************************************************
578 * Get a block annotated as being lookup-quoted *
579 *************************************************/
581 /* Allocate from pool a pool consistent with the proto_mem augmented by the
582 requested quoter type.
584 XXX currently not handling mark/release
586 Args: size number of bytes to allocate
587 quoter id for the quoting type
588 func caller, for debug
589 linenumber caller, for debug
591 Return: allocated memory block
595 store_force_get_quoted(int size, unsigned quoter,
596 const char * func, int linenumber)
598 pooldesc * pp = pool_for_quoter(quoter);
602 debug_printf("allocating quoted-block for quoter %u (from %s %d)\n", quoter, func, linenumber);
604 if (!pp) pp = quoted_pool_new(quoter);
605 yield = pool_get(pp, size, FALSE, func, linenumber);
608 debug_printf("---QQ Get %6p %5d %-14s %4d\n",
609 pp->store_last_get, size, func, linenumber);
614 /* Maybe get memory for the specified quoter, but only if the
615 prototype memory is tainted. Otherwise, get plain memory.
618 store_get_quoted_3(int size, const void * proto_mem, unsigned quoter,
619 const char * func, int linenumber)
621 // debug_printf("store_get_quoted_3: quoter %u\n", quoter);
622 return is_tainted(proto_mem)
623 ? store_force_get_quoted(size, quoter, func, linenumber)
624 : store_get_3(size, proto_mem, func, linenumber);
627 /* Return quoter for given address, or -1 if not in a quoted-pool. */
629 quoter_for_address(const void * p)
631 for (quoted_pooldesc * qp = quoted_pools; qp; qp = qp->next)
633 pooldesc * pp = &qp->pool;
636 if (b = pp->current_block)
637 if (is_pointer_in_block(b, p))
640 for (b = pp->chainbase; b; b = b->next)
641 if (is_pointer_in_block(b, p))
647 /* Return TRUE iff the given address is quoted for the given type.
648 There is extra complexity to handle lookup providers with multiple
649 find variants but shared quote functions. */
651 is_quoted_like(const void * p, unsigned quoter)
653 int pq = quoter_for_address(p);
655 is_real_quoter(pq) && lookup_list[pq]->quote == lookup_list[quoter]->quote;
656 /* debug_printf("is_quoted(%p, %u): %c\n", p, quoter, y?'T':'F'); */
660 /* Return TRUE if the quoter value indicates an actual quoter */
662 is_real_quoter(int quoter)
667 /* Return TRUE if the "new" data requires that the "old" data
668 be recopied to new-class memory. We order the classes as
670 2: tainted, not quoted
671 1: quoted (which is also tainted)
674 If the "new" is higher-order than the "old", they are not compatible
675 and a copy is needed. If both are quoted, but the quoters differ,
676 not compatible. Otherwise they are compatible.
679 is_incompatible_fn(const void * old, const void * new)
684 ni = is_real_quoter(nq = quoter_for_address(new)) ? 1 : is_tainted(new) ? 2 : 0;
685 oi = is_real_quoter(oq = quoter_for_address(old)) ? 1 : is_tainted(old) ? 2 : 0;
686 return ni > oi || ni == oi && nq != oq;
689 #endif /*!COMPILE_UTILITY*/
691 /*************************************************
692 * Extend a block if it is at the top *
693 *************************************************/
695 /* While reading strings of unknown length, it is often the case that the
696 string is being read into the block at the top of the stack. If it needs to be
697 extended, it is more efficient just to extend within the top block rather than
698 allocate a new block and then have to copy the data. This function is provided
699 for the use of string_cat(), but of course can be used elsewhere too.
700 The block itself is not expanded; only the top allocation from it.
703 ptr pointer to store block
704 oldsize current size of the block, as requested by user
705 newsize new size required
706 func function from which called
707 linenumber line number in source file
709 Returns: TRUE if the block is at the top of the stack and has been
710 extended; FALSE if it isn't at the top of the stack, or cannot
713 XXX needs extension for quoted-tracking. This assumes that the global store_pool
714 is the one to alloc from, which breaks with separated pools.
718 store_extend_3(void * ptr, int oldsize, int newsize,
719 const char * func, int linenumber)
721 pooldesc * pp = pool_for_pointer(ptr, func, linenumber);
722 int inc = newsize - oldsize;
723 int rounded_oldsize = oldsize;
725 if (oldsize < 0 || newsize < oldsize || newsize >= INT_MAX/2)
726 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
727 "bad memory extension requested (%d -> %d bytes) at %s %d",
728 oldsize, newsize, func, linenumber);
730 if (rounded_oldsize % alignment != 0)
731 rounded_oldsize += alignment - (rounded_oldsize % alignment);
733 if (CS ptr + rounded_oldsize != CS (pp->next_yield) ||
734 inc > pp->yield_length + rounded_oldsize - oldsize)
737 /* Cut out the debugging stuff for utilities, but stop picky compilers from
740 #ifndef COMPILE_UTILITY
743 quoted_pooldesc * qp;
744 for (qp = quoted_pools; qp; qp = qp->next)
747 debug_printf("---Q%d Ext %6p %5d %-14s %4d\n",
748 (int)(qp - quoted_pools),
749 ptr, newsize, func, linenumber);
753 debug_printf("---%d Ext %6p %5d %-14s %4d\n",
754 (int)(pp - paired_pools),
755 ptr, newsize, func, linenumber);
757 #endif /* COMPILE_UTILITY */
759 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
760 pp->next_yield = CS ptr + newsize;
761 pp->yield_length -= newsize - rounded_oldsize;
762 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
770 is_pwr2_size(int len)
773 return (x & (x - 1)) == 0;
777 /*************************************************
778 * Back up to a previous point on the stack *
779 *************************************************/
781 /* This function resets the next pointer, freeing any subsequent whole blocks
782 that are now unused. Call with a cookie obtained from store_mark() only; do
783 not call with a pointer returned by store_get(). Both the untainted and tainted
784 pools corresposding to store_pool are reset.
786 Quoted pools are not handled.
789 ptr place to back up to
790 pool pool holding the pointer
791 func function from which called
792 linenumber line number in source file
798 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
801 pooldesc * pp = paired_pools + pool;
802 storeblock * b = pp->current_block;
803 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
804 int newlength, count;
805 #ifndef COMPILE_UTILITY
806 int oldmalloc = pool_malloc;
809 if (!b) return; /* exim_dumpdb gets this, becuse it has never used tainted mem */
811 /* Last store operation was not a get */
813 pp->store_last_get = NULL;
815 /* See if the place is in the current block - as it often will be. Otherwise,
816 search for the block in which it lies. */
818 if (CS ptr < bc || CS ptr > bc + b->length)
820 for (b = pp->chainbase; b; b = b->next)
822 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
823 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
826 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
827 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
830 /* Back up, rounding to the alignment if necessary. When testing, flatten
831 the released memory. */
833 newlength = bc + b->length - CS ptr;
834 #ifndef COMPILE_UTILITY
837 assert_no_variables(ptr, newlength, func, linenumber);
838 if (f.running_in_test_harness)
840 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
841 memset(ptr, 0xF0, newlength);
845 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
846 pp->next_yield = CS ptr + (newlength % alignment);
847 count = pp->yield_length;
848 count = (pp->yield_length = newlength - (newlength % alignment)) - count;
849 pp->current_block = b;
851 /* Free any subsequent block. Do NOT free the first
852 successor, if our current block has less than 256 bytes left. This should
853 prevent us from flapping memory. However, keep this block only when it has
854 a power-of-two size so probably is not a custom inflated one. */
856 if ( pp->yield_length < STOREPOOL_MIN_SIZE
858 && is_pwr2_size(b->next->length + ALIGNED_SIZEOF_STOREBLOCK))
861 #ifndef COMPILE_UTILITY
863 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
866 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
867 b->length - ALIGNED_SIZEOF_STOREBLOCK);
871 if (pool != POOL_CONFIG)
876 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
878 #ifndef COMPILE_UTILITY
880 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
887 if (pool != POOL_CONFIG)
888 internal_store_free(b, func, linenumber);
890 #ifndef RESTRICTED_MEMORY
891 if (pp->store_block_order > 13) pp->store_block_order--;
895 /* Cut out the debugging stuff for utilities, but stop picky compilers from
898 #ifndef COMPILE_UTILITY
900 debug_printf("---%d Rst %6p %5d %-14s %4d\tpool %d\n", pool, ptr,
901 count + oldmalloc - pool_malloc,
902 func, linenumber, pool_malloc);
903 #endif /* COMPILE_UTILITY */
907 /* Back up the pool pair, untainted and tainted, of the store_pool setting.
908 Quoted pools are not handled.
912 store_reset_3(rmark r, const char * func, int linenumber)
916 if (store_pool >= POOL_TAINT_BASE)
917 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
918 "store_reset called for pool %d: %s %d\n", store_pool, func, linenumber);
920 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
921 "store_reset called with bad mark: %s %d\n", func, linenumber);
923 internal_store_reset(*ptr, store_pool + POOL_TAINT_BASE, func, linenumber);
924 internal_store_reset(ptr, store_pool, func, linenumber);
931 /* Free tail-end unused allocation. This lets us allocate a big chunk
932 early, for cases when we only discover later how much was really needed.
934 Can be called with a value from store_get(), or an offset after such. Only
935 the tainted or untainted pool that serviced the store_get() will be affected.
937 This is mostly a cut-down version of internal_store_reset().
938 XXX needs rationalising
942 store_release_above_3(void * ptr, const char * func, int linenumber)
946 /* Search all pools' "current" blocks. If it isn't one of those,
947 ignore it (it usually will be). */
949 if ((pp = pool_current_for_pointer(ptr)))
951 storeblock * b = pp->current_block;
952 int count, newlength;
954 /* Last store operation was not a get */
956 pp->store_last_get = NULL;
958 /* Back up, rounding to the alignment if necessary. When testing, flatten
959 the released memory. */
961 newlength = (CS b + ALIGNED_SIZEOF_STOREBLOCK) + b->length - CS ptr;
962 #ifndef COMPILE_UTILITY
965 assert_no_variables(ptr, newlength, func, linenumber);
966 if (f.running_in_test_harness)
968 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
969 memset(ptr, 0xF0, newlength);
973 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
974 pp->next_yield = CS ptr + (newlength % alignment);
975 count = pp->yield_length;
976 count = (pp->yield_length = newlength - (newlength % alignment)) - count;
978 /* Cut out the debugging stuff for utilities, but stop picky compilers from
981 #ifndef COMPILE_UTILITY
984 quoted_pooldesc * qp;
985 for (qp = quoted_pools; qp; qp = qp->next)
987 debug_printf("---Q%d Rel %6p %5d %-14s %4d\tpool %d\n",
988 (int)(qp - quoted_pools),
989 ptr, count, func, linenumber, pool_malloc);
991 debug_printf("---%d Rel %6p %5d %-14s %4d\tpool %d\n",
992 (int)(pp - paired_pools), ptr, count,
993 func, linenumber, pool_malloc);
998 #ifndef COMPILE_UTILITY
1000 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
1007 store_mark_3(const char * func, int linenumber)
1011 #ifndef COMPILE_UTILITY
1013 debug_printf("---%d Mrk %-14s %4d\tpool %d\n",
1014 store_pool, func, linenumber, pool_malloc);
1015 #endif /* COMPILE_UTILITY */
1017 if (store_pool >= POOL_TAINT_BASE)
1018 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1019 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
1021 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
1022 a cookie (actually the address in the untainted pool) to the caller.
1023 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
1024 and winds back the untainted pool with the cookie. */
1026 p = store_get_3(sizeof(void *), GET_UNTAINTED, func, linenumber);
1027 *p = store_get_3(0, GET_TAINTED, func, linenumber);
1034 /************************************************
1036 ************************************************/
1038 /* This function checks that the pointer it is given is the first thing in a
1039 block, and if so, releases that block.
1042 block block of store to consider
1043 pp pool containing the block
1044 func function from which called
1045 linenumber line number in source file
1051 store_release_3(void * block, pooldesc * pp, const char * func, int linenumber)
1053 /* It will never be the first block, so no need to check that. */
1055 for (storeblock * b = pp->chainbase; b; b = b->next)
1057 storeblock * bb = b->next;
1058 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
1060 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
1066 /* Cut out the debugging stuff for utilities, but stop picky compilers
1067 from giving warnings. */
1069 #ifndef COMPILE_UTILITY
1071 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
1072 linenumber, pool_malloc);
1074 if (f.running_in_test_harness)
1075 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
1076 #endif /* COMPILE_UTILITY */
1078 internal_store_free(bb, func, linenumber);
1085 /************************************************
1087 ************************************************/
1089 /* Allocate a new block big enough to expend to the given size and
1090 copy the current data into it. Free the old one if possible.
1092 This function is specifically provided for use when reading very
1093 long strings, e.g. header lines. When the string gets longer than a
1094 complete block, it gets copied to a new block. It is helpful to free
1095 the old block iff the previous copy of the string is at its start,
1096 and therefore the only thing in it. Otherwise, for very long strings,
1097 dead store can pile up somewhat disastrously. This function checks that
1098 the pointer it is given is the first thing in a block, and that nothing
1099 has been allocated since. If so, releases that block.
1103 newsize requested size
1106 Returns: new location of data
1110 store_newblock_3(void * oldblock, int newsize, int len,
1111 const char * func, int linenumber)
1113 pooldesc * pp = pool_for_pointer(oldblock, func, linenumber);
1114 BOOL release_ok = !is_tainted(oldblock) && pp->store_last_get == oldblock; /*XXX why tainted not handled? */
1117 if (len < 0 || len > newsize)
1118 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1119 "bad memory extension requested (%d -> %d bytes) at %s %d",
1120 len, newsize, func, linenumber);
1122 newblock = store_get(newsize, oldblock);
1123 memcpy(newblock, oldblock, len);
1124 if (release_ok) store_release_3(oldblock, pp, func, linenumber);
1125 return (void *)newblock;
1131 /*************************************************
1133 *************************************************/
1135 /* Running out of store is a total disaster for exim. Some malloc functions
1136 do not run happily on very small sizes, nor do they document this fact. This
1137 function is called via the macro store_malloc().
1140 size amount of store wanted
1141 func function from which called
1142 line line number in source file
1144 Returns: pointer to gotten store (panic on failure)
1148 internal_store_malloc(size_t size, const char * func, int line)
1152 /* Check specifically for a possible result of conversion from
1153 a negative int, to the (unsigned, wider) size_t */
1155 if (size >= INT_MAX/2)
1156 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
1157 "bad internal_store_malloc request (" SIZE_T_FMT " bytes) from %s %d",
1160 size += sizeof(size_t); /* space to store the size, used under debug */
1161 if (size < 16) size = 16;
1163 if (!(yield = malloc(size)))
1164 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc " SIZE_T_FMT " bytes of memory: "
1165 "called from line %d in %s", size, line, func);
1167 #ifndef COMPILE_UTILITY
1168 DEBUG(D_any) *(size_t *)yield = size;
1170 yield = US yield + sizeof(size_t);
1172 if ((nonpool_malloc += size) > max_nonpool_malloc)
1173 max_nonpool_malloc = nonpool_malloc;
1175 /* Cut out the debugging stuff for utilities, but stop picky compilers from
1178 #ifndef COMPILE_UTILITY
1179 /* If running in test harness, spend time making sure all the new store
1180 is not filled with zeros so as to catch problems. */
1182 if (f.running_in_test_harness)
1183 memset(yield, 0xF0, size - sizeof(size_t));
1184 DEBUG(D_memory) debug_printf("--Malloc %6p %5lu bytes\t%-20s %4d\tpool %5d nonpool %5d\n",
1185 yield, size, func, line, pool_malloc, nonpool_malloc);
1186 #endif /* COMPILE_UTILITY */
1192 store_malloc_3(size_t size, const char *func, int linenumber)
1194 if (n_nonpool_blocks++ > max_nonpool_blocks)
1195 max_nonpool_blocks = n_nonpool_blocks;
1196 return internal_store_malloc(size, func, linenumber);
1200 /************************************************
1202 ************************************************/
1204 /* This function is called by the macro store_free().
1207 block block of store to free
1208 func function from which called
1209 linenumber line number in source file
1215 internal_store_free(void * block, const char * func, int linenumber)
1217 uschar * p = US block - sizeof(size_t);
1218 #ifndef COMPILE_UTILITY
1219 DEBUG(D_any) nonpool_malloc -= *(size_t *)p;
1220 DEBUG(D_memory) debug_printf("----Free %6p %5ld bytes\t%-20s %4d\n",
1221 block, *(size_t *)p, func, linenumber);
1227 store_free_3(void * block, const char * func, int linenumber)
1230 internal_store_free(block, func, linenumber);
1233 /******************************************************************************/
1234 /* Stats output on process exit */
1238 #ifndef COMPILE_UTILITY
1242 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
1243 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
1244 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
1246 for (i = 0; i < N_PAIRED_POOLS; i++)
1248 pooldesc * pp = paired_pools + i;
1249 debug_printf("----Exit pool %2d max: %3d kB in %d blocks at order %u\t%s %s\n",
1250 i, (pp->maxbytes+1023)/1024, pp->maxblocks, pp->maxorder,
1251 poolclass[i], pooluse[i]);
1254 for (quoted_pooldesc * qp = quoted_pools; qp; i++, qp = qp->next)
1256 pooldesc * pp = &qp->pool;
1257 debug_printf("----Exit pool Q%d max: %3d kB in %d blocks at order %u\ttainted quoted:%s\n",
1258 i, (pp->maxbytes+1023)/1024, pp->maxblocks, pp->maxorder, lookup_list[qp->quoter]->name);
1265 /******************************************************************************/
1266 /* Per-message pool management */
1268 static rmark message_reset_point = NULL;
1273 int oldpool = store_pool;
1274 store_pool = POOL_MESSAGE;
1275 if (!message_reset_point) message_reset_point = store_mark();
1276 store_pool = oldpool;
1280 message_tidyup(void)
1283 if (!message_reset_point) return;
1284 oldpool = store_pool;
1285 store_pool = POOL_MESSAGE;
1286 message_reset_point = store_reset(message_reset_point);
1287 store_pool = oldpool;
1290 /******************************************************************************/
1291 /* Debug analysis of address */
1293 #ifndef COMPILE_UTILITY
1295 debug_print_taint(const void * p)
1297 int q = quoter_for_address(p);
1298 if (!is_tainted(p)) return;
1299 debug_printf("(tainted");
1300 if (is_real_quoter(q)) debug_printf(", quoted:%s", lookup_list[q]->name);
1301 debug_printf(")\n");
1305 /* End of store.c */