use crate::AllBytesValid; use std::cmp::Ordering; use std::fmt; use std::mem; use std::slice; /// Helper type representing a 1-byte-aligned little-endian value in memory. /// /// This type is used in slice types for Wasmtime host bindings. Guest types are /// not guaranteed to be either aligned or in the native endianness. This type /// wraps these types and provides explicit getters/setters to interact with the /// underlying value in a safe host-agnostic manner. #[repr(packed)] pub struct Le(T); impl Le where T: Endian, { /// Creates a new `Le` value where the internals are stored in a way /// that's safe to copy into wasm linear memory. pub fn new(t: T) -> Le { Le(t.into_le()) } /// Reads the value stored in this `Le`. /// /// This will perform a correct read even if the underlying memory is /// unaligned, and it will also convert to the host's endianness for the /// right representation of `T`. pub fn get(&self) -> T { self.0.from_le() } /// Writes the `val` to this slot. /// /// This will work correctly even if the underlying memory is unaligned and /// it will also automatically convert the `val` provided to an endianness /// appropriate for WebAssembly (little-endian). pub fn set(&mut self, val: T) { self.0 = val.into_le(); } pub(crate) fn from_slice(bytes: &[u8]) -> &[Le] { // SAFETY: The invariants we uphold here are: // // * the lifetime of the input is the same as the output, so we're only // dealing with valid memory. // * the alignment of the input is the same as the output (1) // * the input isn't being truncated and we're consuming all of it (it // must be a multiple of the size of `Le`) // * all byte-patterns for `Le` are valid. This is guaranteed by the // `AllBytesValid` supertrait of `Endian`. unsafe { assert_eq!(mem::align_of::>(), 1); assert!(bytes.len() % mem::size_of::>() == 0); fn all_bytes_valid() {} all_bytes_valid::>(); slice::from_raw_parts( bytes.as_ptr().cast::>(), bytes.len() / mem::size_of::>(), ) } } pub(crate) fn from_slice_mut(bytes: &mut [u8]) -> &mut [Le] { // SAFETY: see `from_slice` above // // Note that both the input and the output are `mut`, helping to // maintain the guarantee of uniqueness. unsafe { assert_eq!(mem::align_of::>(), 1); assert!(bytes.len() % mem::size_of::>() == 0); slice::from_raw_parts_mut( bytes.as_mut_ptr().cast::>(), bytes.len() / mem::size_of::>(), ) } } } impl Clone for Le { fn clone(&self) -> Self { *self } } impl Copy for Le {} impl PartialEq for Le { fn eq(&self, other: &Le) -> bool { self.get() == other.get() } } impl PartialEq for Le { fn eq(&self, other: &T) -> bool { self.get() == *other } } impl Eq for Le {} impl PartialOrd for Le { fn partial_cmp(&self, other: &Le) -> Option { self.get().partial_cmp(&other.get()) } } impl Ord for Le { fn cmp(&self, other: &Le) -> Ordering { self.get().cmp(&other.get()) } } impl fmt::Debug for Le { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.get().fmt(f) } } impl From for Le { fn from(t: T) -> Le { Le::new(t) } } unsafe impl AllBytesValid for Le {} /// Trait used for the implementation of the `Le` type. pub trait Endian: AllBytesValid + Copy + Sized { /// Converts this value and any aggregate fields (if any) into little-endian /// byte order fn into_le(self) -> Self; /// Converts this value and any aggregate fields (if any) from /// little-endian byte order fn from_le(self) -> Self; } macro_rules! primitives { ($($t:ident)*) => ($( impl Endian for $t { #[inline] fn into_le(self) -> Self { Self::from_ne_bytes(self.to_le_bytes()) } #[inline] fn from_le(self) -> Self { Self::from_le_bytes(self.to_ne_bytes()) } } )*) } primitives! { u8 i8 u16 i16 u32 i32 u64 i64 f32 f64 } macro_rules! tuples { ($(($($t:ident)*))*) => ($( #[allow(non_snake_case)] impl <$($t:Endian,)*> Endian for ($($t,)*) { fn into_le(self) -> Self { let ($($t,)*) = self; ($($t.into_le(),)*) } fn from_le(self) -> Self { let ($($t,)*) = self; ($($t.from_le(),)*) } } )*) } tuples! { () (T1) (T1 T2) (T1 T2 T3) (T1 T2 T3 T4) (T1 T2 T3 T4 T5) (T1 T2 T3 T4 T5 T6) (T1 T2 T3 T4 T5 T6 T7) (T1 T2 T3 T4 T5 T6 T7 T8) (T1 T2 T3 T4 T5 T6 T7 T8 T9) (T1 T2 T3 T4 T5 T6 T7 T8 T9 T10) }