Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
580 changes: 300 additions & 280 deletions fearless_simd/src/generated/avx2.rs

Large diffs are not rendered by default.

432 changes: 324 additions & 108 deletions fearless_simd/src/generated/neon.rs

Large diffs are not rendered by default.

720 changes: 276 additions & 444 deletions fearless_simd/src/generated/sse4_2.rs

Large diffs are not rendered by default.

720 changes: 276 additions & 444 deletions fearless_simd/src/generated/wasm.rs

Large diffs are not rendered by default.

22 changes: 0 additions & 22 deletions fearless_simd_gen/src/arch/neon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,28 +98,6 @@ pub(crate) fn simple_intrinsic(name: &str, ty: &VecType) -> Ident {
)
}

fn memory_intrinsic(op: &str, ty: &VecType) -> Ident {
let (opt_q, scalar_c, size) = neon_array_type(ty);
let num_blocks = ty.n_bits() / 128;
let opt_count = if num_blocks > 1 {
format!("_x{num_blocks}")
} else {
String::new()
};
Ident::new(
&format!("{op}1{opt_q}_{scalar_c}{size}{opt_count}"),
Span::call_site(),
)
}

pub(crate) fn load_intrinsic(ty: &VecType) -> Ident {
memory_intrinsic("vld", ty)
}

pub(crate) fn store_intrinsic(ty: &VecType) -> Ident {
memory_intrinsic("vst", ty)
}

pub(crate) fn split_intrinsic(name: &str, name2: &str, ty: &VecType) -> Ident {
let (opt_q, scalar_c, size) = neon_array_type(ty);
Ident::new(
Expand Down
93 changes: 36 additions & 57 deletions fearless_simd_gen/src/generic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -266,33 +266,31 @@ pub(crate) fn generic_block_combine(
pub(crate) fn generic_from_array(
method_sig: TokenStream,
vec_ty: &VecType,
_kind: RefKind,
max_block_size: usize,
load_unaligned_block: impl Fn(&VecType) -> Ident,
kind: RefKind,
) -> TokenStream {
let block_size = max_block_size.min(vec_ty.n_bits());
let block_count = vec_ty.n_bits() / block_size;
let num_scalars_per_block = vec_ty.len / block_count;

let native_block_ty = VecType::new(
vec_ty.scalar,
vec_ty.scalar_bits,
block_size / vec_ty.scalar_bits,
);

let wrapper_ty = vec_ty.aligned_wrapper();
let load_unaligned = load_unaligned_block(&native_block_ty);
let expr = if block_count == 1 {
quote! {
unsafe { #wrapper_ty(#load_unaligned(val.as_ptr() as *const _)) }
}
let inner_ref = if kind == RefKind::Value {
quote! { &val }
} else {
let blocks = (0..block_count).map(|n| n * num_scalars_per_block);
quote! {
unsafe { #wrapper_ty([
#(#load_unaligned(val.as_ptr().add(#blocks) as *const _)),*
]) }
}
quote! { val }
};

// There are architecture-specific "load" intrinsics, but they can actually be *worse* for performance. If they
// lower to LLVM intrinsics, they will likely not be optimized until much later in the pipeline (if at all),
// resulting in substantially worse codegen. See https://github.com/linebender/fearless_simd/pull/185.
let expr = quote! {
// Safety: The native vector type backing any implementation will be:
// - A `#[repr(simd)]` type, which has the same layout as an array of scalars
// - An array of `#[repr(simd)]` types
// - For AArch64 specifically, a `#[repr(C)]` tuple of `#[repr(simd)]` types
//
// These all have the same layout as a flat array of the corresponding scalars. The native vector types probably
// have greater alignment requirements than the source array type we're copying from, but that's explicitly
// allowed by transmute_copy:
//
// > This function will unsafely assume the pointer src is valid for size_of::<Dst> bytes by transmuting &Src to
// > &Dst and then reading the &Dst **(except that this is done in a way that is correct even when &Dst has
// > stricter alignment requirements than &Src).**
unsafe { core::mem::transmute_copy(#inner_ref) }
};
let vec_rust = vec_ty.rust();

Expand Down Expand Up @@ -333,39 +331,20 @@ pub(crate) fn generic_as_array<T: ToTokens>(
}
}

pub(crate) fn generic_store_array(
method_sig: TokenStream,
vec_ty: &VecType,
max_block_size: usize,
store_unaligned_block: impl Fn(&VecType) -> Ident,
) -> TokenStream {
let block_size = max_block_size.min(vec_ty.n_bits());
let block_count = vec_ty.n_bits() / block_size;
let num_scalars_per_block = vec_ty.len / block_count;

let native_block_ty = VecType::new(
vec_ty.scalar,
vec_ty.scalar_bits,
block_size / vec_ty.scalar_bits,
);
pub(crate) fn generic_store_array(method_sig: TokenStream, vec_ty: &VecType) -> TokenStream {
let scalar_ty = vec_ty.scalar.rust(vec_ty.scalar_bits);
let count = vec_ty.len;

let store_unaligned = store_unaligned_block(&native_block_ty);
let store_expr = if block_count == 1 {
quote! {
unsafe { #store_unaligned(dest.as_mut_ptr() as *mut _, a.val.0) }
}
} else {
let blocks = (0..block_count).map(|n| {
let offset = n * num_scalars_per_block;
let block_idx = proc_macro2::Literal::usize_unsuffixed(n);
quote! {
#store_unaligned(dest.as_mut_ptr().add(#offset) as *mut _, a.val.0[#block_idx])
}
});
quote! {
unsafe {
#(#blocks;)*
}
let store_expr = quote! {
unsafe {
// Copies `count` scalars from the backing type, which has the same layout as the destination array (see
// `generic_as_array`). The backing type is aligned to its own size, and the destination array must *by
// definition* be aligned to at least the alignment of the scalar.
core::ptr::copy_nonoverlapping(
(&raw const a.val.0) as *const #scalar_ty,
dest.as_mut_ptr(),
#count,
);
}
};

Expand Down
13 changes: 2 additions & 11 deletions fearless_simd_gen/src/mk_neon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
use proc_macro2::{Ident, Literal, Span, TokenStream};
use quote::{ToTokens as _, format_ident, quote};

use crate::arch::neon::{load_intrinsic, store_intrinsic};
use crate::generic::{
generic_as_array, generic_from_array, generic_from_bytes, generic_op_name, generic_store_array,
generic_to_bytes,
Expand Down Expand Up @@ -462,21 +461,13 @@ impl Level for Neon {
}
}
}
OpSig::FromArray { kind } => generic_from_array(
method_sig,
vec_ty,
kind,
self.max_block_size(),
load_intrinsic,
),
OpSig::FromArray { kind } => generic_from_array(method_sig, vec_ty, kind),
OpSig::AsArray { kind } => {
generic_as_array(method_sig, vec_ty, kind, self.max_block_size(), |vec_ty| {
self.arch_ty(vec_ty)
})
}
OpSig::StoreArray => {
generic_store_array(method_sig, vec_ty, self.max_block_size(), store_intrinsic)
}
OpSig::StoreArray => generic_store_array(method_sig, vec_ty),
OpSig::FromBytes => generic_from_bytes(method_sig, vec_ty),
OpSig::ToBytes => generic_to_bytes(method_sig, vec_ty),
}
Expand Down
12 changes: 2 additions & 10 deletions fearless_simd_gen/src/mk_wasm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -613,21 +613,13 @@ impl Level for WasmSimd128 {
}
}
}
OpSig::FromArray { kind } => {
generic_from_array(method_sig, vec_ty, kind, self.max_block_size(), |_| {
v128_intrinsic("load")
})
}
OpSig::FromArray { kind } => generic_from_array(method_sig, vec_ty, kind),
OpSig::AsArray { kind } => {
generic_as_array(method_sig, vec_ty, kind, self.max_block_size(), |_| {
Ident::new("v128", Span::call_site())
})
}
OpSig::StoreArray => {
generic_store_array(method_sig, vec_ty, self.max_block_size(), |_| {
v128_intrinsic("store")
})
}
OpSig::StoreArray => generic_store_array(method_sig, vec_ty),
OpSig::FromBytes => generic_from_bytes(method_sig, vec_ty),
OpSig::ToBytes => generic_to_bytes(method_sig, vec_ty),
}
Expand Down
14 changes: 2 additions & 12 deletions fearless_simd_gen/src/mk_x86.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,23 +170,13 @@ impl Level for X86 {
block_size,
block_count,
} => self.handle_store_interleaved(method_sig, vec_ty, block_size, block_count),
OpSig::FromArray { kind } => generic_from_array(
method_sig,
vec_ty,
kind,
self.max_block_size(),
|block_ty| intrinsic_ident("loadu", coarse_type(block_ty), block_ty.n_bits()),
),
OpSig::FromArray { kind } => generic_from_array(method_sig, vec_ty, kind),
OpSig::AsArray { kind } => {
generic_as_array(method_sig, vec_ty, kind, self.max_block_size(), |vec_ty| {
self.arch_ty(vec_ty)
})
}
OpSig::StoreArray => {
generic_store_array(method_sig, vec_ty, self.max_block_size(), |block_ty| {
intrinsic_ident("storeu", coarse_type(block_ty), block_ty.n_bits())
})
}
OpSig::StoreArray => generic_store_array(method_sig, vec_ty),
OpSig::FromBytes => generic_from_bytes(method_sig, vec_ty),
OpSig::ToBytes => generic_to_bytes(method_sig, vec_ty),
}
Expand Down