a fast is_tainted implementation. We assume the kernel only allocates mmaps using
one side or the other of data+heap, not both. */
-static void * tainted_base = (void *)-1;
-static void * tainted_top = (void *)0;
+void * tainted_base = (void *)-1;
+void * tainted_top = (void *)0;
/* pool_malloc holds the amount of memory used by the store pools; this goes up
and down as store is reset or released. nonpool_malloc is the total got by
/******************************************************************************/
-/* Predicate: if an address is in a tainted pool.
-By extension, a variable pointing to this address is tainted.
-*/
+/* Slower version check, for use when platform intermixes malloc and mmap area
+addresses. */
BOOL
-is_tainted(const void * p)
+is_tainted_fn(const void * p)
{
-BOOL rc = p >= tainted_base && p < tainted_top;
+storeblock * b;
+int pool;
-#ifndef COMPILE_UTILITY
-DEBUG(D_memory) if (rc) debug_printf_indent("is_tainted: YES\n");
-#endif
-return rc;
+for (pool = 0; pool < nelem(chainbase); pool++)
+ if ((b = current_block[pool]))
+ {
+ char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
+ if (CS p >= bc && CS p <= bc + b->length) goto hit;
+ }
+
+for (pool = 0; pool < nelem(chainbase); pool++)
+ for (b = chainbase[pool]; b; b = b->next)
+ {
+ char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
+ if (CS p >= bc && CS p <= bc + b->length) goto hit;
+ }
+return FALSE;
+
+hit:
+return pool >= POOL_TAINT_BASE;
}
+
void
die_tainted(const uschar * msg, const uschar * func, int line)
{
BOOL release_ok = !tainted && store_last_get[pool] == block;
uschar * newtext;
+#ifndef MACRO_PREDEF
if (is_tainted(block) != tainted)
die_tainted(US"store_newblock", CUS func, linenumber);
+#endif
newtext = store_get(newsize, tainted);
memcpy(newtext, block, len);