MFV: libb2: use native calls for secure memory clearance

Drop our local patch and restore full vanilla upstream code in
contrib/libb2.

No functional change intended.  explicit_bzero() should continue to be used.

Obtained from:	libb2 b4b241a34824b51956a7866606329a065d397525
Sponsored by:	Dell EMC Isilon
This commit is contained in:
Conrad Meyer 2018-03-27 14:55:01 +00:00
parent ac663598df
commit 2cb2ba6df8
2 changed files with 15 additions and 4 deletions

View File

@ -131,11 +131,20 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c )
/* prevents compiler optimizing out memset() */
static inline void secure_zero_memory(void *v, size_t n)
{
#ifdef __FreeBSD__
explicit_bzero(v, n);
#if defined(_WIN32) || defined(WIN32)
SecureZeroMemory(v, n);
#else
static void *(*const volatile memset_v)(void *, int, size_t) = &memset;
memset_v(v, 0, n);
// prioritize first the general C11 call
#if defined(HAVE_MEMSET_S)
memset_s(v, n, 0, n);
#elif defined(HAVE_EXPLICIT_BZERO)
explicit_bzero(v, n);
#elif defined(HAVE_EXPLICIT_MEMSET)
explicit_memset(v, 0, n);
#else
memset(v, 0, n);
__asm__ __volatile__("" :: "r"(v) : "memory");
#endif
#endif
}

View File

@ -17,3 +17,5 @@
#else
#define HAVE_ALIGNED_ACCESS_REQUIRED 1
#endif
#define HAVE_EXPLICIT_BZERO 1