Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 8 additions & 10 deletions src/backends/efi_rng.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
use crate::Error;
use core::{
mem::MaybeUninit,
ptr::{self, NonNull, null_mut},
sync::atomic::{AtomicPtr, Ordering::Relaxed},
ptr::{self, NonNull},
};
use r_efi::{
efi::{BootServices, Handle},
Expand All @@ -17,8 +16,6 @@ pub use crate::util::{inner_u32, inner_u64};
#[cfg(not(target_os = "uefi"))]
compile_error!("`efi_rng` backend can be enabled only for UEFI targets!");

static RNG_PROTOCOL: AtomicPtr<rng::Protocol> = AtomicPtr::new(null_mut());

#[cold]
#[inline(never)]
fn init() -> Result<NonNull<rng::Protocol>, Error> {
Expand All @@ -36,7 +33,7 @@ fn init() -> Result<NonNull<rng::Protocol>, Error> {
((*boot_services.as_ptr()).locate_handle)(
r_efi::efi::BY_PROTOCOL,
&mut guid,
null_mut(),
ptr::null_mut(),
&mut buf_size,
handles.as_mut_ptr(),
)
Expand Down Expand Up @@ -88,18 +85,19 @@ fn init() -> Result<NonNull<rng::Protocol>, Error> {
continue;
}

RNG_PROTOCOL.store(protocol.as_ptr(), Relaxed);
return Ok(protocol);
}
Err(Error::NO_RNG_HANDLE)
}

#[inline]
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
let protocol = match NonNull::new(RNG_PROTOCOL.load(Relaxed)) {
Some(p) => p,
None => init()?,
};
#[path = "../utils/lazy.rs"]
mod lazy;

static RNG_PROTOCOL: lazy::LazyNonNull<rng::Protocol> = lazy::LazyNonNull::new();

let protocol = RNG_PROTOCOL.try_unsync_init(init)?;

let mut alg_guid = rng::ALGORITHM_RAW;
let ret = unsafe {
Expand Down
37 changes: 11 additions & 26 deletions src/backends/linux_android_with_fallback.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@ use crate::Error;
use core::{
ffi::c_void,
mem::{MaybeUninit, transmute},
ptr::NonNull,
sync::atomic::{AtomicPtr, Ordering},
ptr::{self, NonNull},
};
use use_file::utils;

Expand All @@ -15,20 +14,14 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint)

/// Sentinel value which indicates that `libc::getrandom` either not available,
/// or not supported by kernel.
const NOT_AVAILABLE: NonNull<c_void> = unsafe { NonNull::new_unchecked(usize::MAX as *mut c_void) };

static GETRANDOM_FN: AtomicPtr<c_void> = AtomicPtr::new(core::ptr::null_mut());
const NOT_AVAILABLE: NonNull<c_void> = NonNull::dangling();

#[cold]
#[inline(never)]
fn init() -> NonNull<c_void> {
// Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else
#[cfg(not(target_env = "musl"))]
let raw_ptr = {
static NAME: &[u8] = b"getrandom\0";
let name_ptr = NAME.as_ptr().cast::<libc::c_char>();
unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }
};
let raw_ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) };
#[cfg(target_env = "musl")]
let raw_ptr = {
let fptr: GetRandomFn = libc::getrandom;
Expand All @@ -37,10 +30,9 @@ fn init() -> NonNull<c_void> {

let res_ptr = match NonNull::new(raw_ptr) {
Some(fptr) => {
let getrandom_fn = unsafe { transmute::<NonNull<c_void>, GetRandomFn>(fptr) };
let dangling_ptr = NonNull::dangling().as_ptr();
let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) };
// Check that `getrandom` syscall is supported by kernel
let res = unsafe { getrandom_fn(dangling_ptr, 0, 0) };
let res = unsafe { getrandom_fn(ptr::dangling_mut(), 0, 0) };
if cfg!(getrandom_test_linux_fallback) {
NOT_AVAILABLE
} else if res.is_negative() {
Expand All @@ -65,7 +57,6 @@ fn init() -> NonNull<c_void> {
panic!("Fallback is triggered with enabled `getrandom_test_linux_without_fallback`")
}

GETRANDOM_FN.store(res_ptr.as_ptr(), Ordering::Release);
res_ptr
}

Expand All @@ -77,23 +68,17 @@ fn use_file_fallback(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {

#[inline]
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
// Despite being only a single atomic variable, we still cannot always use
// Ordering::Relaxed, as we need to make sure a successful call to `init`
// is "ordered before" any data read through the returned pointer (which
// occurs when the function is called). Our implementation mirrors that of
// the one in libstd, meaning that the use of non-Relaxed operations is
// probably unnecessary.
let raw_ptr = GETRANDOM_FN.load(Ordering::Acquire);
let fptr = match NonNull::new(raw_ptr) {
Some(p) => p,
None => init(),
};
#[path = "../utils/lazy.rs"]
mod lazy;

static GETRANDOM_FN: lazy::LazyNonNull<c_void> = lazy::LazyNonNull::new();
let fptr = GETRANDOM_FN.unsync_init(init);

if fptr == NOT_AVAILABLE {
use_file_fallback(dest)
} else {
// note: `transmute` is currently the only way to convert a pointer into a function reference
let getrandom_fn = unsafe { transmute::<NonNull<c_void>, GetRandomFn>(fptr) };
let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) };
utils::sys_fill_exact(dest, |buf| unsafe {
getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0)
})
Expand Down
41 changes: 16 additions & 25 deletions src/backends/netbsd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ use core::{
cmp,
ffi::c_void,
mem::{self, MaybeUninit},
ptr,
sync::atomic::{AtomicPtr, Ordering},
ptr::{self, NonNull},
};

pub use crate::util::{inner_u32, inner_u64};
Expand Down Expand Up @@ -42,36 +41,28 @@ unsafe extern "C" fn polyfill_using_kern_arand(

type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t;

static GETRANDOM: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());

#[cold]
#[inline(never)]
fn init() -> *mut c_void {
static NAME: &[u8] = b"getrandom\0";
let name_ptr = NAME.as_ptr().cast::<libc::c_char>();
let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) };
if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) {
// Verify `polyfill_using_kern_arand` has the right signature.
const POLYFILL: GetRandomFn = polyfill_using_kern_arand;
ptr = POLYFILL as *mut c_void;
fn init() -> NonNull<c_void> {
let ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) };
if !cfg!(getrandom_test_netbsd_fallback) {
if let Some(ptr) = NonNull::new(ptr) {
return ptr;
}
}
GETRANDOM.store(ptr, Ordering::Release);
ptr
const POLYFILL: GetRandomFn = polyfill_using_kern_arand;
unsafe { NonNull::new_unchecked(POLYFILL as *mut c_void) }
}

#[inline]
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
// Despite being only a single atomic variable, we still cannot always use
// Ordering::Relaxed, as we need to make sure a successful call to `init`
// is "ordered before" any data read through the returned pointer (which
// occurs when the function is called). Our implementation mirrors that of
// the one in libstd, meaning that the use of non-Relaxed operations is
// probably unnecessary.
let mut fptr = GETRANDOM.load(Ordering::Acquire);
if fptr.is_null() {
fptr = init();
}
let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) };
#[path = "../utils/lazy.rs"]
mod lazy;

static GETRANDOM_FN: lazy::LazyNonNull<c_void> = lazy::LazyNonNull::new();

let fptr = GETRANDOM_FN.unsync_init(init);
let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) };
utils::sys_fill_exact(dest, |buf| unsafe {
fptr(buf.as_mut_ptr().cast::<c_void>(), buf.len(), 0)
})
Expand Down
24 changes: 15 additions & 9 deletions src/backends/rdrand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
use crate::{Error, util::slice_as_uninit};
use core::mem::{MaybeUninit, size_of};

#[path = "../utils/lazy.rs"]
mod lazy;

#[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))]
compile_error!("`rdrand` backend can be enabled only for x86 and x86-64 targets!");

Expand All @@ -20,8 +17,6 @@ cfg_if! {
}
}

static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new();

// Recommendation from "Intel® Digital Random Number Generator (DRNG) Software
// Implementation Guide" - Section 5.2.1 and "Intel® 64 and IA-32 Architectures
// Software Developer’s Manual" - Volume 1 - Section 7.3.17.1.
Expand Down Expand Up @@ -72,7 +67,9 @@ fn self_test() -> bool {
fails <= 2
}

fn is_rdrand_good() -> bool {
#[cold]
#[inline(never)]
fn init() -> bool {
#[cfg(not(target_feature = "rdrand"))]
{
// SAFETY: All Rust x86 targets are new enough to have CPUID, and we
Expand Down Expand Up @@ -115,6 +112,15 @@ fn is_rdrand_good() -> bool {
unsafe { self_test() }
}

fn is_rdrand_good() -> bool {
#[path = "../utils/lazy.rs"]
mod lazy;

static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new();

RDRAND_GOOD.unsync_init(init)
}

#[target_feature(enable = "rdrand")]
fn rdrand_exact(dest: &mut [MaybeUninit<u8>]) -> Option<()> {
// We use chunks_exact_mut instead of chunks_mut as it allows almost all
Expand Down Expand Up @@ -162,7 +168,7 @@ fn rdrand_u64() -> Option<u64> {

#[inline]
pub fn inner_u32() -> Result<u32, Error> {
if !RDRAND_GOOD.unsync_init(is_rdrand_good) {
if !is_rdrand_good() {
return Err(Error::NO_RDRAND);
}
// SAFETY: After this point, we know rdrand is supported.
Expand All @@ -171,7 +177,7 @@ pub fn inner_u32() -> Result<u32, Error> {

#[inline]
pub fn inner_u64() -> Result<u64, Error> {
if !RDRAND_GOOD.unsync_init(is_rdrand_good) {
if !is_rdrand_good() {
return Err(Error::NO_RDRAND);
}
// SAFETY: After this point, we know rdrand is supported.
Expand All @@ -180,7 +186,7 @@ pub fn inner_u64() -> Result<u64, Error> {

#[inline]
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
if !RDRAND_GOOD.unsync_init(is_rdrand_good) {
if !is_rdrand_good() {
return Err(Error::NO_RDRAND);
}
// SAFETY: After this point, we know rdrand is supported.
Expand Down
1 change: 1 addition & 0 deletions src/backends/rndr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ fn is_rndr_available() -> bool {
fn is_rndr_available() -> bool {
#[path = "../utils/lazy.rs"]
mod lazy;

static RNDR_GOOD: lazy::LazyBool = lazy::LazyBool::new();

cfg_if::cfg_if! {
Expand Down
Loading