X-Git-Url: https://git.exim.org/exim.git/blobdiff_plain/f096bcccb8e4c9ba57d128c2f08c52f7dc94e07d..4381d60bc96bed88d96e8cc6b534dd0dcd48163f:/src/src/store.c diff --git a/src/src/store.c b/src/src/store.c index a8f5a07fc..8e776568a 100644 --- a/src/src/store.c +++ b/src/src/store.c @@ -49,18 +49,24 @@ The following different types of store are recognized: to not copy untrusted data into untainted memory, as downstream taint-checks would be avoided. - Intermediate layers (eg. the string functions) can test for taint, and use this - for ensuringn that results have proper state. For example the - string_vformat_trc() routing supporting the string_sprintf() interface will - recopy a string being built into a tainted allocation if it meets a %s for a - tainted argument. - Internally we currently use malloc for nontainted pools, and mmap for tainted pools. The disparity is for speed of testing the taintedness of pointers; because Linux appears to use distinct non-overlapping address allocations for mmap vs. everything else, which means only two pointer-compares suffice for the test. Other OS' cannot use that optimisation, and a more lengthy test against the limits of tainted-pool allcations has to be done. + + Intermediate layers (eg. the string functions) can test for taint, and use this + for ensurinng that results have proper state. For example the + string_vformat_trc() routing supporting the string_sprintf() interface will + recopy a string being built into a tainted allocation if it meets a %s for a + tainted argument. Any intermediate-layer function that (can) return a new + allocation should behave this way; returning a tainted result if any tainted + content is used. Intermediate-layer functions (eg. Ustrncpy) that modify + existing allocations fail if tainted data is written into an untainted area. + Users of functions that modify existing allocations should check if a tainted + source and an untainted destination is used, and fail instead (sprintf() being + the classic case). */ @@ -120,13 +126,6 @@ static storeblock *current_block[NPOOLS]; static void *next_yield[NPOOLS]; static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 }; -/* The limits of the tainted pools. Tracking these on new allocations enables -a fast is_tainted implementation. We assume the kernel only allocates mmaps using -one side or the other of data+heap, not both. */ - -void * tainted_base = (void *)-1; -void * tainted_top = (void *)0; - /* pool_malloc holds the amount of memory used by the store pools; this goes up and down as store is reset or released. nonpool_malloc is the total got by malloc from other calls; this doesn't go down because it is just freed by @@ -180,35 +179,35 @@ static void internal_tainted_free(storeblock *, const char *, int linenumber); /******************************************************************************/ -#ifndef TAINT_CHECK_FAST -/* Slower version check, for use when platform intermixes malloc and mmap area -addresses. */ +/* Test if a pointer refers to tainted memory. + +Slower version check, for use when platform intermixes malloc and mmap area +addresses. Test against the current-block of all tainted pools first, then all +blocks of all tainted pools. + +Return: TRUE iff tainted +*/ BOOL is_tainted_fn(const void * p) { storeblock * b; -int pool; -for (pool = 0; pool < nelem(chainbase); pool++) +for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++) if ((b = current_block[pool])) { - char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; - if (CS p >= bc && CS p <= bc + b->length) goto hit; + uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; + if (US p >= bc && US p <= bc + b->length) return TRUE; } -for (pool = 0; pool < nelem(chainbase); pool++) +for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++) for (b = chainbase[pool]; b; b = b->next) { - char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; - if (CS p >= bc && CS p <= bc + b->length) goto hit; + uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; + if (US p >= bc && US p <= bc + b->length) return TRUE; } return FALSE; - -hit: -return pool >= POOL_TAINT_BASE; } -#endif void @@ -219,6 +218,7 @@ log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n", } + /************************************************* * Get a block from the current pool * *************************************************/ @@ -751,7 +751,7 @@ int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; BOOL release_ok = !tainted && store_last_get[pool] == block; uschar * newtext; -#ifndef MACRO_PREDEF +#if !defined(MACRO_PREDEF) && !defined(COMPILE_UTILITY) if (is_tainted(block) != tainted) die_tainted(US"store_newblock", CUS func, linenumber); #endif @@ -808,9 +808,6 @@ if (!(yield = mmap(NULL, (size_t)size, log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: " "called from line %d of %s", size, line, func); -if (yield < tainted_base) tainted_base = yield; -if ((top = US yield + size) > tainted_top) tainted_top = top; - return store_alloc_tail(yield, size, func, line, US"Mmap"); }