X-Git-Url: https://git.exim.org/users/jgh/exim.git/blobdiff_plain/f3ebb786e451da973560f1c9d8cdb151d25108b5..4381d60bc96bed88d96e8cc6b534dd0dcd48163f:/src/src/store.c diff --git a/src/src/store.c b/src/src/store.c index 7871d0ccc..8e776568a 100644 --- a/src/src/store.c +++ b/src/src/store.c @@ -41,8 +41,32 @@ The following different types of store are recognized: and tainted. The latter is used for values derived from untrusted input, and the string-expansion mechanism refuses to operate on such values (obviously, it can expand an untainted value to return a tainted result). The classes - are implemented by duplicating the three pool types. Pool resets are requested + are implemented by duplicating the three pool types. Pool resets are requested against the nontainted sibling and apply to both siblings. + + Only memory blocks requested for tainted use are regarded as tainted; anything + else (including stack auto variables) is untainted. Care is needed when coding + to not copy untrusted data into untainted memory, as downstream taint-checks + would be avoided. + + Internally we currently use malloc for nontainted pools, and mmap for tainted + pools. The disparity is for speed of testing the taintedness of pointers; + because Linux appears to use distinct non-overlapping address allocations for + mmap vs. everything else, which means only two pointer-compares suffice for the + test. Other OS' cannot use that optimisation, and a more lengthy test against + the limits of tainted-pool allcations has to be done. + + Intermediate layers (eg. the string functions) can test for taint, and use this + for ensurinng that results have proper state. For example the + string_vformat_trc() routing supporting the string_sprintf() interface will + recopy a string being built into a tainted allocation if it meets a %s for a + tainted argument. Any intermediate-layer function that (can) return a new + allocation should behave this way; returning a tainted result if any tainted + content is used. Intermediate-layer functions (eg. Ustrncpy) that modify + existing allocations fail if tainted data is written into an untainted area. + Users of functions that modify existing allocations should check if a tainted + source and an untainted destination is used, and fail instead (sprintf() being + the classic case). */ @@ -102,13 +126,6 @@ static storeblock *current_block[NPOOLS]; static void *next_yield[NPOOLS]; static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 }; -/* The limits of the tainted pools. Tracking these on new allocations enables -a fast is_tainted implementation. We assume the kernel only allocates mmaps using -one side or the other of data+heap, not both. */ - -static void * tainted_base = (void *)-1; -static void * tainted_top = (void *)0; - /* pool_malloc holds the amount of memory used by the store pools; this goes up and down as store is reset or released. nonpool_malloc is the total got by malloc from other calls; this doesn't go down because it is just freed by @@ -135,6 +152,7 @@ static int max_pool_malloc; /* max value for pool_malloc */ static int max_nonpool_malloc; /* max value for nonpool_malloc */ +#ifndef COMPILE_UTILITY static const uschar * pooluse[NPOOLS] = { [POOL_MAIN] = US"main", [POOL_PERM] = US"perm", @@ -151,29 +169,47 @@ static const uschar * poolclass[NPOOLS] = { [POOL_TAINT_PERM] = US"tainted", [POOL_TAINT_SEARCH] = US"tainted", }; +#endif static void * store_mmap(int, const char *, int); static void * internal_store_malloc(int, const char *, int); -static void internal_store_free(void *, const char *, int linenumber); +static void internal_untainted_free(void *, const char *, int linenumber); +static void internal_tainted_free(storeblock *, const char *, int linenumber); /******************************************************************************/ -/* Predicate: if an address is in a tainted pool. -By extension, a variable pointing to this address is tainted. +/* Test if a pointer refers to tainted memory. + +Slower version check, for use when platform intermixes malloc and mmap area +addresses. Test against the current-block of all tainted pools first, then all +blocks of all tainted pools. + +Return: TRUE iff tainted */ BOOL -is_tainted(const void * p) +is_tainted_fn(const void * p) { -BOOL rc = p >= tainted_base && p < tainted_top; +storeblock * b; -#ifndef COMPILE_UTILITY -DEBUG(D_memory) if (rc) debug_printf_indent("is_tainted: YES\n"); -#endif -return rc; +for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++) + if ((b = current_block[pool])) + { + uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; + if (US p >= bc && US p <= bc + b->length) return TRUE; + } + +for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++) + for (b = chainbase[pool]; b; b = b->next) + { + uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; + if (US p >= bc && US p <= bc + b->length) return TRUE; + } +return FALSE; } + void die_tainted(const uschar * msg, const uschar * func, int line) { @@ -182,6 +218,7 @@ log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n", } + /************************************************* * Get a block from the current pool * *************************************************/ @@ -192,7 +229,8 @@ block, getting a new one if necessary. The address is saved in store_last_was_get. Arguments: - size amount wanted + size amount wanted, bytes + tainted class: set to true for untrusted data (eg. from smtp input) func function from which called linenumber line number in source file @@ -232,15 +270,9 @@ if (size > yield_length[pool]) /* Give up on this block, because it's too small */ nblocks[pool]--; if (pool < POOL_TAINT_BASE) - internal_store_free(newblock, func, linenumber); + internal_untainted_free(newblock, func, linenumber); else - { -#ifndef COMPILE_UTILITY - DEBUG(D_memory) - debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber); -#endif - munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK); - } + internal_tainted_free(newblock, func, linenumber); newblock = NULL; } @@ -499,15 +531,9 @@ while ((b = bb)) pool_malloc -= siz; nblocks[pool]--; if (pool < POOL_TAINT_BASE) - internal_store_free(b, func, linenumber); + internal_untainted_free(b, func, linenumber); else - { -#ifndef COMPILE_UTILITY - DEBUG(D_memory) - debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber); -#endif - munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK); - } + internal_tainted_free(b, func, linenumber); } /* Cut out the debugging stuff for utilities, but stop picky compilers from @@ -725,8 +751,10 @@ int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; BOOL release_ok = !tainted && store_last_get[pool] == block; uschar * newtext; +#if !defined(MACRO_PREDEF) && !defined(COMPILE_UTILITY) if (is_tainted(block) != tainted) die_tainted(US"store_newblock", CUS func, linenumber); +#endif newtext = store_get(newsize, tainted); memcpy(newtext, block, len); @@ -780,9 +808,6 @@ if (!(yield = mmap(NULL, (size_t)size, log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: " "called from line %d of %s", size, line, func); -if (yield < tainted_base) tainted_base = yield; -if ((top = yield + size) > tainted_top) tainted_top = top; - return store_alloc_tail(yield, size, func, line, US"Mmap"); } @@ -840,7 +865,7 @@ Returns: nothing */ static void -internal_store_free(void *block, const char *func, int linenumber) +internal_untainted_free(void * block, const char * func, int linenumber) { #ifdef COMPILE_UTILITY func = func; @@ -853,10 +878,24 @@ free(block); } void -store_free_3(void *block, const char *func, int linenumber) +store_free_3(void * block, const char * func, int linenumber) { n_nonpool_blocks--; -internal_store_free(block, func, linenumber); +internal_untainted_free(block, func, linenumber); +} + +/******************************************************************************/ +static void +internal_tainted_free(storeblock * block, const char * func, int linenumber) +{ +#ifdef COMPILE_UTILITY +func = func; +linenumber = linenumber; +#else +DEBUG(D_memory) + debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber); +#endif +munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK); } /******************************************************************************/