From a52ff0080fd2b91159df5276baf55cab9dd0b0ec Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Mon, 24 Nov 2025 16:13:19 -0500 Subject: [PATCH] Unify lazy atomics in entropy backends Replace ad-hoc atomic lazy caches with shared lazy helpers in the Linux/Android fallback, NetBSD, RDRAND, and RNDR backends. Add `LazyPtr` alongside `LazyUsize` and `LazyBool` so pointer and boolean caches use the same initialization contract. This reduces duplicated cache logic and keeps backend probing/fallback semantics aligned while preserving the existing retry-until-cached behavior. --- src/backends/efi_rng.rs | 18 ++- src/backends/linux_android_with_fallback.rs | 37 ++----- src/backends/netbsd.rs | 41 +++---- src/backends/rdrand.rs | 24 ++-- src/backends/rndr.rs | 1 + src/utils/lazy.rs | 117 ++++++++++++-------- 6 files changed, 123 insertions(+), 115 deletions(-) diff --git a/src/backends/efi_rng.rs b/src/backends/efi_rng.rs index 9d5888402..de7c25b49 100644 --- a/src/backends/efi_rng.rs +++ b/src/backends/efi_rng.rs @@ -2,8 +2,7 @@ use crate::Error; use core::{ mem::MaybeUninit, - ptr::{self, NonNull, null_mut}, - sync::atomic::{AtomicPtr, Ordering::Relaxed}, + ptr::{self, NonNull}, }; use r_efi::{ efi::{BootServices, Handle}, @@ -17,8 +16,6 @@ pub use crate::util::{inner_u32, inner_u64}; #[cfg(not(target_os = "uefi"))] compile_error!("`efi_rng` backend can be enabled only for UEFI targets!"); -static RNG_PROTOCOL: AtomicPtr = AtomicPtr::new(null_mut()); - #[cold] #[inline(never)] fn init() -> Result, Error> { @@ -36,7 +33,7 @@ fn init() -> Result, Error> { ((*boot_services.as_ptr()).locate_handle)( r_efi::efi::BY_PROTOCOL, &mut guid, - null_mut(), + ptr::null_mut(), &mut buf_size, handles.as_mut_ptr(), ) @@ -88,7 +85,6 @@ fn init() -> Result, Error> { continue; } - RNG_PROTOCOL.store(protocol.as_ptr(), Relaxed); return Ok(protocol); } Err(Error::NO_RNG_HANDLE) @@ -96,10 +92,12 @@ fn init() -> Result, Error> { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - let protocol = match NonNull::new(RNG_PROTOCOL.load(Relaxed)) { - Some(p) => p, - None => init()?, - }; + #[path = "../utils/lazy.rs"] + mod lazy; + + static RNG_PROTOCOL: lazy::LazyNonNull = lazy::LazyNonNull::new(); + + let protocol = RNG_PROTOCOL.try_unsync_init(init)?; let mut alg_guid = rng::ALGORITHM_RAW; let ret = unsafe { diff --git a/src/backends/linux_android_with_fallback.rs b/src/backends/linux_android_with_fallback.rs index 48ce62886..9ae11649f 100644 --- a/src/backends/linux_android_with_fallback.rs +++ b/src/backends/linux_android_with_fallback.rs @@ -4,8 +4,7 @@ use crate::Error; use core::{ ffi::c_void, mem::{MaybeUninit, transmute}, - ptr::NonNull, - sync::atomic::{AtomicPtr, Ordering}, + ptr::{self, NonNull}, }; use use_file::utils; @@ -15,20 +14,14 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) /// Sentinel value which indicates that `libc::getrandom` either not available, /// or not supported by kernel. -const NOT_AVAILABLE: NonNull = unsafe { NonNull::new_unchecked(usize::MAX as *mut c_void) }; - -static GETRANDOM_FN: AtomicPtr = AtomicPtr::new(core::ptr::null_mut()); +const NOT_AVAILABLE: NonNull = NonNull::dangling(); #[cold] #[inline(never)] fn init() -> NonNull { // Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else #[cfg(not(target_env = "musl"))] - let raw_ptr = { - static NAME: &[u8] = b"getrandom\0"; - let name_ptr = NAME.as_ptr().cast::(); - unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } - }; + let raw_ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; #[cfg(target_env = "musl")] let raw_ptr = { let fptr: GetRandomFn = libc::getrandom; @@ -37,10 +30,9 @@ fn init() -> NonNull { let res_ptr = match NonNull::new(raw_ptr) { Some(fptr) => { - let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; - let dangling_ptr = NonNull::dangling().as_ptr(); + let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) }; // Check that `getrandom` syscall is supported by kernel - let res = unsafe { getrandom_fn(dangling_ptr, 0, 0) }; + let res = unsafe { getrandom_fn(ptr::dangling_mut(), 0, 0) }; if cfg!(getrandom_test_linux_fallback) { NOT_AVAILABLE } else if res.is_negative() { @@ -65,7 +57,6 @@ fn init() -> NonNull { panic!("Fallback is triggered with enabled `getrandom_test_linux_without_fallback`") } - GETRANDOM_FN.store(res_ptr.as_ptr(), Ordering::Release); res_ptr } @@ -77,23 +68,17 @@ fn use_file_fallback(dest: &mut [MaybeUninit]) -> Result<(), Error> { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // Despite being only a single atomic variable, we still cannot always use - // Ordering::Relaxed, as we need to make sure a successful call to `init` - // is "ordered before" any data read through the returned pointer (which - // occurs when the function is called). Our implementation mirrors that of - // the one in libstd, meaning that the use of non-Relaxed operations is - // probably unnecessary. - let raw_ptr = GETRANDOM_FN.load(Ordering::Acquire); - let fptr = match NonNull::new(raw_ptr) { - Some(p) => p, - None => init(), - }; + #[path = "../utils/lazy.rs"] + mod lazy; + + static GETRANDOM_FN: lazy::LazyNonNull = lazy::LazyNonNull::new(); + let fptr = GETRANDOM_FN.unsync_init(init); if fptr == NOT_AVAILABLE { use_file_fallback(dest) } else { // note: `transmute` is currently the only way to convert a pointer into a function reference - let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; + let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) }; utils::sys_fill_exact(dest, |buf| unsafe { getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0) }) diff --git a/src/backends/netbsd.rs b/src/backends/netbsd.rs index 0da0d492c..8affa0348 100644 --- a/src/backends/netbsd.rs +++ b/src/backends/netbsd.rs @@ -8,8 +8,7 @@ use core::{ cmp, ffi::c_void, mem::{self, MaybeUninit}, - ptr, - sync::atomic::{AtomicPtr, Ordering}, + ptr::{self, NonNull}, }; pub use crate::util::{inner_u32, inner_u64}; @@ -42,36 +41,28 @@ unsafe extern "C" fn polyfill_using_kern_arand( type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; -static GETRANDOM: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - #[cold] #[inline(never)] -fn init() -> *mut c_void { - static NAME: &[u8] = b"getrandom\0"; - let name_ptr = NAME.as_ptr().cast::(); - let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }; - if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) { - // Verify `polyfill_using_kern_arand` has the right signature. - const POLYFILL: GetRandomFn = polyfill_using_kern_arand; - ptr = POLYFILL as *mut c_void; +fn init() -> NonNull { + let ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; + if !cfg!(getrandom_test_netbsd_fallback) { + if let Some(ptr) = NonNull::new(ptr) { + return ptr; + } } - GETRANDOM.store(ptr, Ordering::Release); - ptr + const POLYFILL: GetRandomFn = polyfill_using_kern_arand; + unsafe { NonNull::new_unchecked(POLYFILL as *mut c_void) } } #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // Despite being only a single atomic variable, we still cannot always use - // Ordering::Relaxed, as we need to make sure a successful call to `init` - // is "ordered before" any data read through the returned pointer (which - // occurs when the function is called). Our implementation mirrors that of - // the one in libstd, meaning that the use of non-Relaxed operations is - // probably unnecessary. - let mut fptr = GETRANDOM.load(Ordering::Acquire); - if fptr.is_null() { - fptr = init(); - } - let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) }; + #[path = "../utils/lazy.rs"] + mod lazy; + + static GETRANDOM_FN: lazy::LazyNonNull = lazy::LazyNonNull::new(); + + let fptr = GETRANDOM_FN.unsync_init(init); + let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) }; utils::sys_fill_exact(dest, |buf| unsafe { fptr(buf.as_mut_ptr().cast::(), buf.len(), 0) }) diff --git a/src/backends/rdrand.rs b/src/backends/rdrand.rs index d39c31512..b2bb95970 100644 --- a/src/backends/rdrand.rs +++ b/src/backends/rdrand.rs @@ -2,9 +2,6 @@ use crate::{Error, util::slice_as_uninit}; use core::mem::{MaybeUninit, size_of}; -#[path = "../utils/lazy.rs"] -mod lazy; - #[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))] compile_error!("`rdrand` backend can be enabled only for x86 and x86-64 targets!"); @@ -20,8 +17,6 @@ cfg_if! { } } -static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new(); - // Recommendation from "Intel® Digital Random Number Generator (DRNG) Software // Implementation Guide" - Section 5.2.1 and "Intel® 64 and IA-32 Architectures // Software Developer’s Manual" - Volume 1 - Section 7.3.17.1. @@ -72,7 +67,9 @@ fn self_test() -> bool { fails <= 2 } -fn is_rdrand_good() -> bool { +#[cold] +#[inline(never)] +fn init() -> bool { #[cfg(not(target_feature = "rdrand"))] { // SAFETY: All Rust x86 targets are new enough to have CPUID, and we @@ -115,6 +112,15 @@ fn is_rdrand_good() -> bool { unsafe { self_test() } } +fn is_rdrand_good() -> bool { + #[path = "../utils/lazy.rs"] + mod lazy; + + static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new(); + + RDRAND_GOOD.unsync_init(init) +} + #[target_feature(enable = "rdrand")] fn rdrand_exact(dest: &mut [MaybeUninit]) -> Option<()> { // We use chunks_exact_mut instead of chunks_mut as it allows almost all @@ -162,7 +168,7 @@ fn rdrand_u64() -> Option { #[inline] pub fn inner_u32() -> Result { - if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + if !is_rdrand_good() { return Err(Error::NO_RDRAND); } // SAFETY: After this point, we know rdrand is supported. @@ -171,7 +177,7 @@ pub fn inner_u32() -> Result { #[inline] pub fn inner_u64() -> Result { - if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + if !is_rdrand_good() { return Err(Error::NO_RDRAND); } // SAFETY: After this point, we know rdrand is supported. @@ -180,7 +186,7 @@ pub fn inner_u64() -> Result { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + if !is_rdrand_good() { return Err(Error::NO_RDRAND); } // SAFETY: After this point, we know rdrand is supported. diff --git a/src/backends/rndr.rs b/src/backends/rndr.rs index 1a24b49f7..83e43bc04 100644 --- a/src/backends/rndr.rs +++ b/src/backends/rndr.rs @@ -71,6 +71,7 @@ fn is_rndr_available() -> bool { fn is_rndr_available() -> bool { #[path = "../utils/lazy.rs"] mod lazy; + static RNDR_GOOD: lazy::LazyBool = lazy::LazyBool::new(); cfg_if::cfg_if! { diff --git a/src/utils/lazy.rs b/src/utils/lazy.rs index b191aa6d7..5ff1bde42 100644 --- a/src/utils/lazy.rs +++ b/src/utils/lazy.rs @@ -1,64 +1,91 @@ -//! Helpers built around pointer-sized atomics. -use core::sync::atomic::{AtomicUsize, Ordering}; +//! Lazy caches backed by a single atomic value. +//! +//! Each cache starts in an "uninitialized" sentinel state. Initialization is +//! intentionally unsynchronized: concurrent callers may race and run `init` +//! more than once. Once a non-sentinel value is produced, it is cached and +//! reused by subsequent calls. +//! +//! For fallible initialization (`try_unsync_init`), only successful values are +//! cached; errors are returned to the caller and are not cached. +//! +//! These helpers use `Ordering::Relaxed` because they are only intended to +//! publish the cached value itself. Callers must not rely on this mechanism to +//! synchronize unrelated memory side effects performed by `init`. -// This structure represents a lazily initialized static usize value. Useful -// when it is preferable to just rerun initialization instead of locking. -// unsync_init will invoke an init() function until it succeeds, then return the -// cached value for future calls. -// -// unsync_init supports init() "failing". If the init() method returns UNINIT, -// that value will be returned as normal, but will not be cached. -// -// Users should only depend on the _value_ returned by init() functions. -// Specifically, for the following init() function: -// fn init() -> usize { -// a(); -// let v = b(); -// c(); -// v -// } -// the effects of c() or writes to shared memory will not necessarily be -// observed and additional synchronization methods may be needed. -struct LazyUsize(AtomicUsize); +#![allow(dead_code)] -impl LazyUsize { - // The initialization is not completed. - const UNINIT: usize = usize::MAX; +use core::{ + ptr::{self, NonNull}, + sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, +}; - const fn new() -> Self { - Self(AtomicUsize::new(Self::UNINIT)) +pub(crate) struct LazyNonNull(AtomicPtr); + +impl LazyNonNull { + pub const fn new() -> Self { + Self(AtomicPtr::new(ptr::null_mut())) + } + + #[cold] + fn do_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { + let val = init(); + self.0.store(val.as_ptr(), Ordering::Relaxed); + val } - // Runs the init() function at most once, returning the value of some run of - // init(). Multiple callers can run their init() functions in parallel. - // init() should always return the same value, if it succeeds. - fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { - #[cold] - fn do_init(this: &LazyUsize, init: impl FnOnce() -> usize) -> usize { - let val = init(); - this.0.store(val, Ordering::Relaxed); - val + #[cold] + fn try_do_init( + &self, + init: impl FnOnce() -> Result, E>, + ) -> Result, E> { + let val = init()?; + self.0.store(val.as_ptr(), Ordering::Relaxed); + Ok(val) + } + + #[inline] + pub fn unsync_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { + match NonNull::new(self.0.load(Ordering::Relaxed)) { + Some(val) => val, + None => self.do_init(init), } + } - // Relaxed ordering is fine, as we only have a single atomic variable. - let val = self.0.load(Ordering::Relaxed); - if val != Self::UNINIT { - val - } else { - do_init(self, init) + #[inline] + pub fn try_unsync_init( + &self, + init: impl FnOnce() -> Result, E>, + ) -> Result, E> { + match NonNull::new(self.0.load(Ordering::Relaxed)) { + Some(val) => Ok(val), + None => self.try_do_init(init), } } } -// Identical to LazyUsize except with bool instead of usize. -pub(crate) struct LazyBool(LazyUsize); +pub(crate) struct LazyBool(AtomicUsize); impl LazyBool { + const UNINIT: usize = usize::MAX; + pub const fn new() -> Self { - Self(LazyUsize::new()) + Self(AtomicUsize::new(Self::UNINIT)) + } + + #[cold] + fn do_init(&self, init: impl FnOnce() -> bool) -> bool { + let val = usize::from(init()); + self.0.store(val, Ordering::Relaxed); + val != 0 } + #[inline] pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool { - self.0.unsync_init(|| usize::from(init())) != 0 + let val = self.0.load(Ordering::Relaxed); + if val != Self::UNINIT { + val != 0 + } else { + self.do_init(init) + } } }