buffer was in use at the time. Change to a compile-time increase in the
buffer size, when this authenticator is compiled into exim.
-JH/22 Taint checking: move to a hybrid approach for checking. Previously, one
- of two ways was used, depending on a build-time flag. The fast method
- relied on assumptions about the OS and libc malloc, which were known to
- not hold for the BSD-derived platforms, and discovered to not hold for
- 32-bit Linux either. In fact the glibc documentation describes cases
- where these assumptions do not hold. The new implementation tests for
- the situation arising and actively switches over from fast to safe mode.
+JH/22 Taint-checking: move to safe-mode taint checking on all platforms. The
+ previous fast-mode was untenable in the face of glibs using mmap to
+ support larger malloc requests.
PP/01 Update the openssl_options possible values through OpenSSL 1.1.1c.
New values supported, if defined on system where compiled:
static void *next_yield[NPOOLS];
static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
-/* The limits of the tainted pools. Tracking these on new allocations enables
-a fast is_tainted implementation. We assume the kernel only allocates mmaps using
-one side or the other of data+heap, not both. */
-
-void * tainted_base = (void *)-1;
-void * tainted_top = (void *)0;
-
/* pool_malloc holds the amount of memory used by the store pools; this goes up
and down as store is reset or released. nonpool_malloc is the total got by
malloc from other calls; this doesn't go down because it is just freed by
msg, func, line);
}
-static void
-use_slow_taint_check(const uschar * why)
-{
-#ifndef COMPILE_UTILITY
-DEBUG(D_any)
- debug_printf("switching to slow-mode taint checking (after %s) "
- "taint bounds %p %p\n", why, tainted_base, tainted_top);
-#endif
-f.taint_check_slow = TRUE;
-}
-
-/* If the creation of a new tainted region results in any of the
-untainted regions appearing to be tainted, using the fast-mode test,
-we need to switch to safe-but-slow mode. */
-
-static void
-verify_all_untainted(void)
-{
-for (int pool = 0; pool < POOL_TAINT_BASE; pool++)
- for (storeblock * b = chainbase[pool]; b; b = b->next)
- {
- uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
- if (is_tainted(bc))
- {
- use_slow_taint_check(US"mmap");
- return;
- }
- }
-}
-
/*************************************************
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
"called from line %d of %s", size, line, func);
-if (yield < tainted_base) tainted_base = yield;
-if ((top = US yield + size) > tainted_top) tainted_top = top;
-if (!f.taint_check_slow) verify_all_untainted();
-
return store_alloc_tail(yield, size, func, line, US"Mmap");
}
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
"called from line %d in %s", size, linenumber, func);
-/* If malloc ever returns apparently tainted memory, which glibc
-malloc will as it uses mmap for larger requests, we must switch to
-the slower checking for tainting (checking an address against all
-the tainted pool block spans, rather than just the mmap span) */
-
-if (!f.taint_check_slow && is_tainted(yield))
- use_slow_taint_check(US"malloc");
-
return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
}