From 79db4ea804c21392bf308c6dbdc14d40e9fd8f84 Mon Sep 17 00:00:00 2001 From: Julian Schindel Date: Fri, 27 Mar 2026 20:21:45 +0100 Subject: [PATCH 1/5] zerocopy: remove endian module in favor of zerocopy The zerocopy crate provides the same functionality. Signed-off-by: Julian Schindel --- src/endian.rs | 159 -------------------------------------------------- src/lib.rs | 3 - 2 files changed, 162 deletions(-) delete mode 100644 src/endian.rs diff --git a/src/endian.rs b/src/endian.rs deleted file mode 100644 index 40e49b14..00000000 --- a/src/endian.rs +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2017 The Chromium OS Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE-BSD-3-Clause file. -// -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! Explicit endian types useful for embedding in structs or reinterpreting data. -//! -//! Each endian type is guaarnteed to have the same size and alignment as a regular unsigned -//! primitive of the equal size. -//! -//! # Examples -//! -//! ``` -//! # use vm_memory::{Be32, Le32}; -//! # -//! let b: Be32 = From::from(3); -//! let l: Le32 = From::from(3); -//! -//! assert_eq!(b.to_native(), 3); -//! assert_eq!(l.to_native(), 3); -//! assert!(b == 3); -//! assert!(l == 3); -//! -//! let b_trans: u32 = unsafe { std::mem::transmute(b) }; -//! let l_trans: u32 = unsafe { std::mem::transmute(l) }; -//! -//! #[cfg(target_endian = "little")] -//! assert_eq!(l_trans, 3); -//! #[cfg(target_endian = "big")] -//! assert_eq!(b_trans, 3); -//! -//! assert_ne!(b_trans, l_trans); -//! ``` - -use std::mem::{align_of, size_of}; - -use crate::bytes::ByteValued; - -macro_rules! const_assert { - ($condition:expr) => { - let _ = [(); 0 - !$condition as usize]; - }; -} - -macro_rules! endian_type { - ($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => { - /// An unsigned integer type of with an explicit endianness. - /// - /// See module level documentation for examples. - #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)] - #[repr(transparent)] - pub struct $new_type($old_type); - - impl $new_type { - fn _assert() { - const_assert!(align_of::<$new_type>() == align_of::<$old_type>()); - const_assert!(size_of::<$new_type>() == size_of::<$old_type>()); - } - - /// Converts `self` to the native endianness. - pub fn to_native(self) -> $old_type { - $old_type::$from_new(self.0) - } - } - - // SAFETY: Safe because we are using this for implementing ByteValued for endian types - // which are POD. - unsafe impl ByteValued for $new_type {} - - impl PartialEq<$old_type> for $new_type { - fn eq(&self, other: &$old_type) -> bool { - self.0 == $old_type::$to_new(*other) - } - } - - impl PartialEq<$new_type> for $old_type { - fn eq(&self, other: &$new_type) -> bool { - $old_type::$to_new(other.0) == *self - } - } - - impl From<$new_type> for $old_type { - fn from(v: $new_type) -> $old_type { - v.to_native() - } - } - - impl From<$old_type> for $new_type { - fn from(v: $old_type) -> $new_type { - $new_type($old_type::$to_new(v)) - } - } - }; -} - -endian_type!(u16, Le16, to_le, from_le); -endian_type!(u32, Le32, to_le, from_le); -endian_type!(u64, Le64, to_le, from_le); -endian_type!(usize, LeSize, to_le, from_le); -endian_type!(u16, Be16, to_be, from_be); -endian_type!(u32, Be32, to_be, from_be); -endian_type!(u64, Be64, to_be, from_be); -endian_type!(usize, BeSize, to_be, from_be); - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - use super::*; - - use std::convert::From; - use std::mem::transmute; - - #[cfg(target_endian = "little")] - const NATIVE_LITTLE: bool = true; - #[cfg(target_endian = "big")] - const NATIVE_LITTLE: bool = false; - const NATIVE_BIG: bool = !NATIVE_LITTLE; - - macro_rules! endian_test { - ($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => { - mod $test_name { - use super::*; - - #[allow(overflowing_literals)] - #[test] - fn test_endian_type() { - <$new_type>::_assert(); - - let v = 0x0123_4567_89AB_CDEF as $old_type; - let endian_v: $new_type = From::from(v); - let endian_into: $old_type = endian_v.into(); - let endian_transmute: $old_type = unsafe { transmute(endian_v) }; - - if $native { - assert_eq!(endian_v, endian_transmute); - } else { - assert_eq!(endian_v, endian_transmute.swap_bytes()); - } - - assert_eq!(endian_into, v); - assert_eq!(endian_v.to_native(), v); - - assert!(v == endian_v); - assert!(endian_v == v); - } - } - }; - } - - endian_test!(u16, Le16, test_le16, NATIVE_LITTLE); - endian_test!(u32, Le32, test_le32, NATIVE_LITTLE); - endian_test!(u64, Le64, test_le64, NATIVE_LITTLE); - endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE); - endian_test!(u16, Be16, test_be16, NATIVE_BIG); - endian_test!(u32, Be32, test_be32, NATIVE_BIG); - endian_test!(u64, Be64, test_be64, NATIVE_BIG); - endian_test!(usize, BeSize, test_be_size, NATIVE_BIG); -} diff --git a/src/lib.rs b/src/lib.rs index d00b630c..e7456829 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -48,9 +48,6 @@ pub mod bitmap; pub mod bytes; pub use bytes::{AtomicAccess, ByteValued, Bytes}; -pub mod endian; -pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize}; - pub mod guest_memory; pub use guest_memory::{ Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, From a70090ef02f3b7c7ec2dc8bbfb40572c21fbcbfa Mon Sep 17 00:00:00 2001 From: Julian Schindel Date: Fri, 27 Mar 2026 21:45:01 +0100 Subject: [PATCH 2/5] zerocopy: replace `ByteValued` with `zerocopy` traits The safety documentation for the `ByteValued` trait states that a type `T` can only implement `ByteValued` if and only if it can be initialized by reading its contents from a byte array. This doesn't cover the `ByteValued::as{_mut}_slice` methods, which allow treating type `T` as slice of bytes. From this, the `ByteValued` invariants are captured only by requiring both, `zerocopy::FromBytes` and `zerocopy::IntoBytes`. Additionally, `ByteValued::zeroed` allows constructing a `T` with all bytes set to zero, which means `zerocopy::FromZeroes` is also required to map `ByteValued` to `zerocopy` traits. All `ByteValued` bounds are replaced by the existing bounds `Copy + Send + Sync` and the new bounds `FromBytes + IntoBytes + FromZeros`. Signed-off-by: Julian Schindel --- Cargo.toml | 1 + benches/mmap/mod.rs | 10 +- src/bytes.rs | 282 +++-------------------------------------- src/guest_memory.rs | 30 ++--- src/lib.rs | 2 +- src/volatile_memory.rs | 39 +++--- 6 files changed, 57 insertions(+), 307 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 856e55bc..c71ef7bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ bitflags = { version = "2.4.0", optional = true } rangemap = { version = "1.5.1", optional = true } thiserror = "2.0.16" vmm-sys-util = { version = ">=0.12.1, <=0.15.0", optional = true } +zerocopy = "0.8.48" [target.'cfg(target_family = "windows")'.dependencies.winapi] version = "0.3" diff --git a/benches/mmap/mod.rs b/benches/mmap/mod.rs index 7da8ea01..43cae7dd 100644 --- a/benches/mmap/mod.rs +++ b/benches/mmap/mod.rs @@ -16,29 +16,27 @@ use std::path::Path; use core::hint::black_box; use criterion::Criterion; +use zerocopy::{FromBytes, IntoBytes}; -use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemoryBackend}; +use vm_memory::{Bytes, GuestAddress, GuestMemoryBackend}; const REGION_SIZE: usize = 0x8000_0000; const REGIONS_COUNT: u64 = 8; const ACCESS_SIZE: usize = 0x200; #[repr(C)] -#[derive(Copy, Clone, Default)] +#[derive(Copy, Clone, Default, FromBytes, IntoBytes)] struct SmallDummy { a: u32, b: u32, } -unsafe impl ByteValued for SmallDummy {} #[repr(C)] -#[derive(Copy, Clone, Default)] +#[derive(Copy, Clone, Default, FromBytes, IntoBytes)] struct BigDummy { elements: [u64; 12], } -unsafe impl ByteValued for BigDummy {} - fn make_image(size: usize) -> Vec { let mut image: Vec = Vec::with_capacity(size); for i in 0..size { diff --git a/src/bytes.rs b/src/bytes.rs index 9ff74204..461399f6 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -8,177 +8,18 @@ // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause -//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random -//! data. +//! Define the `AtomicAccess` and `Bytes` traits. -use std::io::{Read, Write}; -use std::mem::{size_of, MaybeUninit}; use std::result::Result; -use std::slice::{from_raw_parts, from_raw_parts_mut}; use std::sync::atomic::Ordering; +use zerocopy::{FromBytes, FromZeros, IntoBytes}; use crate::atomic_integer::AtomicInteger; -use crate::volatile_memory::VolatileSlice; use crate::{ReadVolatile, WriteVolatile}; -/// Types for which it is safe to initialize from raw data. -/// -/// # Safety -/// -/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a -/// byte array. This is generally true for all plain-old-data structs. It is notably not true for -/// any type that includes a reference. It is generally also not safe for non-packed structs, as -/// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will -/// cause undefined behavior. -/// -/// Implementing this trait guarantees that it is safe to instantiate the struct with random data. -pub unsafe trait ByteValued: Copy + Send + Sync { - /// Converts a slice of raw data into a reference of `Self`. - /// - /// The value of `data` is not copied. Instead a reference is made from the given slice. The - /// value of `Self` will depend on the representation of the type in memory, and may change in - /// an unstable fashion. - /// - /// This will return `None` if the length of data does not match the size of `Self`, or if the - /// data is not aligned for the type of `Self`. - fn from_slice(data: &[u8]) -> Option<&Self> { - // Early out to avoid an unneeded `align_to` call. - if data.len() != size_of::() { - return None; - } - - // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and - // we ensured the size of the pointer's buffer is the correct size. The `align_to` method - // ensures that we don't have any unaligned references. This aliases a pointer, but because - // the pointer is from a const slice reference, there are no mutable aliases. Finally, the - // reference returned can not outlive data because they have equal implicit lifetime - // constraints. - match unsafe { data.align_to::() } { - ([], [mid], []) => Some(mid), - _ => None, - } - } - - /// Converts a mutable slice of raw data into a mutable reference of `Self`. - /// - /// Because `Self` is made from a reference to the mutable slice, mutations to the returned - /// reference are immediately reflected in `data`. The value of the returned `Self` will depend - /// on the representation of the type in memory, and may change in an unstable fashion. - /// - /// This will return `None` if the length of data does not match the size of `Self`, or if the - /// data is not aligned for the type of `Self`. - fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> { - // Early out to avoid an unneeded `align_to_mut` call. - if data.len() != size_of::() { - return None; - } - - // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and - // we ensured the size of the pointer's buffer is the correct size. The `align_to` method - // ensures that we don't have any unaligned references. This aliases a pointer, but because - // the pointer is from a mut slice reference, we borrow the passed in mutable reference. - // Finally, the reference returned can not outlive data because they have equal implicit - // lifetime constraints. - match unsafe { data.align_to_mut::() } { - ([], [mid], []) => Some(mid), - _ => None, - } - } - - /// Converts a reference to `self` into a slice of bytes. - /// - /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`. - /// The value of bytes in the returned slice will depend on the representation of the type in - /// memory, and may change in an unstable fashion. - fn as_slice(&self) -> &[u8] { - // SAFETY: Safe because the entire size of self is accessible as bytes because the trait - // guarantees it. The lifetime of the returned slice is the same as the passed reference, - // so that no dangling pointers will result from this pointer alias. - unsafe { from_raw_parts(self as *const Self as *const u8, size_of::()) } - } - - /// Converts a mutable reference to `self` into a mutable slice of bytes. - /// - /// Because the slice is made from a reference to `self`, mutations to the returned slice are - /// immediately reflected in `self`. The value of bytes in the returned slice will depend on - /// the representation of the type in memory, and may change in an unstable fashion. - fn as_mut_slice(&mut self) -> &mut [u8] { - // SAFETY: Safe because the entire size of self is accessible as bytes because the trait - // guarantees it. The trait also guarantees that any combination of bytes is valid for this - // type, so modifying them in the form of a byte slice is valid. The lifetime of the - // returned slice is the same as the passed reference, so that no dangling pointers will - // result from this pointer alias. Although this does alias a mutable pointer, we do so by - // exclusively borrowing the given mutable reference. - unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::()) } - } - - /// Converts a mutable reference to `self` into a `VolatileSlice`. This is - /// useful because `VolatileSlice` provides a `Bytes` implementation. - fn as_bytes(&mut self) -> VolatileSlice<'_> { - VolatileSlice::from(self.as_mut_slice()) - } - - /// Constructs a `Self` ewhose binary representation is set to all zeroes. - fn zeroed() -> Self { - // SAFETY: ByteValued objects must be assignable from arbitrary byte - // sequences and are mandated to be packed. - // Hence, zeroed memory is a fine initialization. - unsafe { MaybeUninit::::zeroed().assume_init() } - } - - /// Writes this [`ByteValued`]'s byte representation to the given [`Write`] impl. - fn write_all_to(&self, mut writer: W) -> Result<(), std::io::Error> { - writer.write_all(self.as_slice()) - } - - /// Constructs an instance of this [`ByteValued`] by reading from the given [`Read`] impl. - fn read_exact_from(mut reader: R) -> Result { - let mut result = Self::zeroed(); - reader.read_exact(result.as_mut_slice()).map(|_| result) - } -} - -macro_rules! byte_valued_array { - ($T:ty, $($N:expr)+) => { - $( - // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued. - // They are just numbers. - unsafe impl ByteValued for [$T; $N] {} - )+ - } -} - -macro_rules! byte_valued_type { - ($T:ty) => { - // SAFETY: Safe as long T is POD. - // We are using this macro to generated the implementation for integer types below. - unsafe impl ByteValued for $T {} - byte_valued_array! { - $T, - 0 1 2 3 4 5 6 7 8 9 - 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 - } - }; -} - -byte_valued_type!(u8); -byte_valued_type!(u16); -byte_valued_type!(u32); -byte_valued_type!(u64); -byte_valued_type!(u128); -byte_valued_type!(usize); -byte_valued_type!(i8); -byte_valued_type!(i16); -byte_valued_type!(i32); -byte_valued_type!(i64); -byte_valued_type!(i128); -byte_valued_type!(isize); - /// A trait used to identify types which can be accessed atomically by proxy. pub trait AtomicAccess: - ByteValued + Copy + Send + Sync + FromBytes + IntoBytes + FromZeros // Could not find a more succinct way of stating that `Self` can be converted // into `Self::A::V`, and the other way around. + From<<::A as AtomicInteger>::V> @@ -297,8 +138,12 @@ pub trait Bytes { /// # Errors /// /// Returns an error if the object doesn't fit inside the container. - fn write_obj(&self, val: T, addr: A) -> Result<(), Self::E> { - self.write_slice(val.as_slice(), addr) + fn write_obj( + &self, + mut val: T, + addr: A, + ) -> Result<(), Self::E> { + self.write_slice(val.as_mut_bytes(), addr) } /// Reads an object from the container at `addr`. @@ -310,9 +155,12 @@ pub trait Bytes { /// # Errors /// /// Returns an error if there's not enough data inside the container. - fn read_obj(&self, addr: A) -> Result { - let mut result = T::zeroed(); - self.read_slice(result.as_mut_slice(), addr).map(|_| result) + fn read_obj( + &self, + addr: A, + ) -> Result { + let mut result = T::new_zeroed(); + self.read_slice(result.as_mut_bytes(), addr).map(|_| result) } /// Reads up to `count` bytes from `src` and writes them into the container at `addr`. @@ -425,8 +273,6 @@ pub(crate) mod tests { use std::cell::RefCell; use std::fmt::Debug; - use std::io::ErrorKind; - use std::mem::align_of; // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be // zero-initialized. @@ -446,59 +292,6 @@ pub(crate) mod tests { b.store(val, bad_addr, Ordering::Relaxed).unwrap_err(); } - fn check_byte_valued_type() - where - T: ByteValued + PartialEq + Debug + Default, - { - let mut data = [0u8; 48]; - let pre_len = { - let (pre, _, _) = unsafe { data.align_to::() }; - pre.len() - }; - { - let aligned_data = &mut data[pre_len..pre_len + size_of::()]; - { - let mut val: T = Default::default(); - assert_eq!(T::from_slice(aligned_data), Some(&val)); - assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val)); - assert_eq!(val.as_slice(), aligned_data); - assert_eq!(val.as_mut_slice(), aligned_data); - } - } - for i in 1..size_of::().min(align_of::()) { - let begin = pre_len + i; - let end = begin + size_of::(); - let unaligned_data = &mut data[begin..end]; - { - if align_of::() != 1 { - assert_eq!(T::from_slice(unaligned_data), None); - assert_eq!(T::from_mut_slice(unaligned_data), None); - } - } - } - // Check the early out condition - { - assert!(T::from_slice(&data).is_none()); - assert!(T::from_mut_slice(&mut data).is_none()); - } - } - - #[test] - fn test_byte_valued() { - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - } - pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10; pub struct MockBytesContainer { @@ -625,49 +418,4 @@ pub(crate) mod tests { ); assert_eq!(bytes.read_obj::(MOCK_BYTES_CONTAINER_SIZE), Err(())); } - - #[repr(C)] - #[derive(Copy, Clone, Default, Debug)] - struct S { - a: u32, - b: u32, - } - - unsafe impl ByteValued for S {} - - #[test] - fn byte_valued_slice() { - let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1]; - let mut s: S = Default::default(); - s.as_bytes().copy_from(&a); - assert_eq!(s.a, 0); - assert_eq!(s.b, 0x0101_0101); - } - - #[test] - fn test_byte_valued_io() { - let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1]; - - let result = S::read_exact_from(&a[1..]); - assert_eq!(result.unwrap_err().kind(), ErrorKind::UnexpectedEof); - - let s = S::read_exact_from(&a[..]).unwrap(); - assert_eq!(s.a, 0); - assert_eq!(s.b, 0x0101_0101); - - let mut b = Vec::new(); - s.write_all_to(&mut b).unwrap(); - assert_eq!(a.as_ref(), b.as_slice()); - - let mut b = [0; 7]; - let result = s.write_all_to(b.as_mut_slice()); - assert_eq!(result.unwrap_err().kind(), ErrorKind::WriteZero); - } - - #[test] - fn test_byte_valued_zeroed() { - let s = S::zeroed(); - - assert!(s.as_slice().iter().all(|&b| b == 0x0)); - } } diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 1271d793..5af64bc0 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -906,11 +906,11 @@ mod tests { use super::*; #[cfg(feature = "backend-mmap")] - use crate::bytes::ByteValued; - #[cfg(feature = "backend-mmap")] use crate::GuestAddress; #[cfg(feature = "backend-mmap")] use std::time::{Duration, Instant}; + #[cfg(feature = "backend-mmap")] + use zerocopy::{FromBytes, FromZeros, IntoBytes}; use vmm_sys_util::tempfile::TempFile; @@ -984,7 +984,12 @@ mod tests { #[cfg(not(miri))] // This test simulates a race condition between guest and vmm fn non_atomic_access_helper() where - T: ByteValued + T: Copy + + Send + + Sync + + FromBytes + + IntoBytes + + FromZeros + std::fmt::Debug + From + Into @@ -994,23 +999,14 @@ mod tests { use std::mem; use std::thread; - // A dummy type that's always going to have the same alignment as the first member, - // and then adds some bytes at the end. - #[derive(Clone, Copy, Debug, Default, PartialEq)] + // A dummy type that has its alignment removed and then adds some bytes at the end. + #[derive(Clone, Copy, Debug, Default, PartialEq, FromBytes, IntoBytes)] + #[repr(C, packed)] struct Data { val: T, some_bytes: [u8; 8], } - // Some sanity checks. - assert_eq!(mem::align_of::(), mem::align_of::>()); - assert_eq!(mem::size_of::(), mem::align_of::()); - - // There must be no padding bytes, as otherwise implementing ByteValued is UB - assert_eq!(mem::size_of::>(), mem::size_of::() + 8); - - unsafe impl ByteValued for Data {} - // Start of first guest memory region. let start = GuestAddress(0); let region_len = 1 << 12; @@ -1082,14 +1078,12 @@ mod tests { #[cfg(feature = "backend-mmap")] #[test] fn test_zero_length_accesses() { - #[derive(Default, Clone, Copy)] + #[derive(Default, Clone, Copy, FromBytes, IntoBytes)] #[repr(C)] struct ZeroSizedStruct { dummy: [u32; 0], } - unsafe impl ByteValued for ZeroSizedStruct {} - let addr = GuestAddress(0x1000); let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); let obj = ZeroSizedStruct::default(); diff --git a/src/lib.rs b/src/lib.rs index e7456829..6763f622 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,7 +46,7 @@ pub use atomic_integer::AtomicInteger; pub mod bitmap; pub mod bytes; -pub use bytes::{AtomicAccess, ByteValued, Bytes}; +pub use bytes::{AtomicAccess, Bytes}; pub mod guest_memory; pub use guest_memory::{ diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index 9229d6c5..1a71d0ce 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -34,10 +34,11 @@ use std::ptr::copy; use std::ptr::{read_volatile, write_volatile}; use std::result; use std::sync::atomic::Ordering; +use zerocopy::{FromBytes, FromZeros, IntoBytes}; use crate::atomic_integer::AtomicInteger; use crate::bitmap::{Bitmap, BitmapSlice, BS}; -use crate::{AtomicAccess, ByteValued, Bytes}; +use crate::{AtomicAccess, Bytes}; #[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] use crate::mmap::xen::{MmapXen as MmapInfo, MmapXenSlice}; @@ -131,7 +132,10 @@ pub trait VolatileMemory { } /// Gets a `VolatileRef` at `offset`. - fn get_ref(&self, offset: usize) -> Result>> { + fn get_ref( + &self, + offset: usize, + ) -> Result>> { let slice = self.get_slice(offset, size_of::())?; assert_eq!( @@ -156,7 +160,7 @@ pub trait VolatileMemory { /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at /// `offset`. - fn get_array_ref( + fn get_array_ref( &self, offset: usize, n: usize, @@ -202,7 +206,10 @@ pub trait VolatileMemory { /// /// If the resulting pointer is not aligned, this method will return an /// [`Error`](enum.Error.html). - unsafe fn aligned_as_ref(&self, offset: usize) -> Result<&T> { + unsafe fn aligned_as_ref( + &self, + offset: usize, + ) -> Result<&T> { let slice = self.get_slice(offset, size_of::())?; slice.check_alignment(align_of::())?; @@ -238,7 +245,10 @@ pub trait VolatileMemory { // the function is unsafe, and the conversion is safe if following the safety // instrutions above #[allow(clippy::mut_from_ref)] - unsafe fn aligned_as_mut(&self, offset: usize) -> Result<&mut T> { + unsafe fn aligned_as_mut( + &self, + offset: usize, + ) -> Result<&mut T> { let slice = self.get_slice(offset, size_of::())?; slice.check_alignment(align_of::())?; @@ -578,7 +588,7 @@ impl<'a, B: BitmapSlice> VolatileSlice<'a, B> { /// ``` pub fn copy_to(&self, buf: &mut [T]) -> usize where - T: ByteValued, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros, { // A fast path for u8/i8 if size_of::() == 1 { @@ -657,7 +667,7 @@ impl<'a, B: BitmapSlice> VolatileSlice<'a, B> { /// ``` pub fn copy_from(&self, buf: &[T]) where - T: ByteValued, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros, { // A fast path for u8/i8 if size_of::() == 1 { @@ -897,7 +907,7 @@ pub struct VolatileRef<'a, T, B = ()> { impl VolatileRef<'_, T, ()> where - T: ByteValued, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros, { /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`. /// @@ -915,7 +925,7 @@ where #[allow(clippy::len_without_is_empty)] impl<'a, T, B> VolatileRef<'a, T, B> where - T: ByteValued, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros, B: BitmapSlice, { /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the @@ -1028,7 +1038,7 @@ pub struct VolatileArrayRef<'a, T, B = ()> { impl VolatileArrayRef<'_, T> where - T: ByteValued, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros, { /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of /// type `T`. @@ -1046,7 +1056,7 @@ where impl<'a, T, B> VolatileArrayRef<'a, T, B> where - T: ByteValued, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros, B: BitmapSlice, { /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of @@ -2030,12 +2040,11 @@ mod tests { #[test] fn test_read_from_exceeds_size() { - #[derive(Debug, Default, Copy, Clone)] + #[derive(Debug, Default, Copy, Clone, FromBytes, IntoBytes)] struct BytesToRead { _val1: u128, // 16 bytes _val2: u128, // 16 bytes } - unsafe impl ByteValued for BytesToRead {} let cursor_size = 20; let image = vec![1u8; cursor_size]; @@ -2045,7 +2054,7 @@ mod tests { assert_eq!( image .as_slice() - .read_volatile(&mut bytes_to_read.as_bytes()) + .read_volatile(&mut VolatileSlice::from(bytes_to_read.as_mut_bytes())) .unwrap(), cursor_size ); @@ -2268,7 +2277,7 @@ mod tests { index: usize, page_size: NonZeroUsize, ) where - T: ByteValued + From, + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros + From, { let bitmap = AtomicBitmap::new(size_of_val(buf), page_size); let arr = unsafe { From a10b7a95a62a52e8c9beed9b451406007bb18501 Mon Sep 17 00:00:00 2001 From: Julian Schindel Date: Fri, 27 Mar 2026 22:08:22 +0100 Subject: [PATCH 3/5] zerocopy: simplify two unsafes with zerocopy Only creates a (mutable) slice from a pointer instead of creating a reference to a type `T`. Moves alignement and size checks to `zerocopy`. Signed-off-by: Julian Schindel --- src/volatile_memory.rs | 85 +++++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 34 deletions(-) diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index 1a71d0ce..0e4af418 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -34,7 +34,7 @@ use std::ptr::copy; use std::ptr::{read_volatile, write_volatile}; use std::result; use std::sync::atomic::Ordering; -use zerocopy::{FromBytes, FromZeros, IntoBytes}; +use zerocopy::{CastError, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; use crate::atomic_integer::AtomicInteger; use crate::bitmap::{Bitmap, BitmapSlice, BS}; @@ -206,27 +206,36 @@ pub trait VolatileMemory { /// /// If the resulting pointer is not aligned, this method will return an /// [`Error`](enum.Error.html). - unsafe fn aligned_as_ref( + unsafe fn aligned_as_ref< + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros + Immutable + KnownLayout, + >( &self, offset: usize, ) -> Result<&T> { let slice = self.get_slice(offset, size_of::())?; - slice.check_alignment(align_of::())?; - - assert_eq!( - slice.len(), - size_of::(), - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold one `T`. - // Dereferencing the pointer is safe because we check the alignment above, and the invariants - // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the - // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - unsafe { Ok(&*(slice.addr as *const T)) } + let slice_addr = slice.addr; + let size_of_t = size_of::(); + + // SAFETY: Creating a byte slice from the pointer is safe because the invariants of the + // constructors of VolatileSlice ensure that slice.addr is valid memory of size slice.len() + // and the invariants of this function ensure that no aliasing reference exists. + // Lastly, the lifetime of the returned reference matches that of the VolatileSlice returned + // by get_slice and thus the lifetime one `self`. + let rust_slice = unsafe { core::slice::from_raw_parts(slice_addr, size_of_t) }; + T::ref_from_bytes(rust_slice).map_err(|error| match error { + CastError::<_, T>::Alignment(_) => Error::Misaligned { + addr: slice.addr.addr(), + alignment: align_of::(), + }, + CastError::<_, T>::Size(size) => { + panic!("VolatileMemory::get_slice({}, count) returned slice of length != count, expected: \"{}\" actual: \"{}\".", offset, size_of::(), size.into_src().len()); + } + CastError::<_, T>::Validity(infallible) => { + // Ensures this error is actually infallible. + match infallible {} + } + }) } /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed @@ -245,28 +254,36 @@ pub trait VolatileMemory { // the function is unsafe, and the conversion is safe if following the safety // instrutions above #[allow(clippy::mut_from_ref)] - unsafe fn aligned_as_mut( + unsafe fn aligned_as_mut< + T: Copy + Send + Sync + FromBytes + IntoBytes + FromZeros + KnownLayout, + >( &self, offset: usize, ) -> Result<&mut T> { let slice = self.get_slice(offset, size_of::())?; - slice.check_alignment(align_of::())?; - - assert_eq!( - slice.len(), - size_of::(), - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold one `T`. - // Dereferencing the pointer is safe because we check the alignment above, and the invariants - // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the - // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - unsafe { Ok(&mut *(slice.addr as *mut T)) } + let slice_addr = slice.addr; + let size_of_t = size_of::(); + + // SAFETY: Creating a mutable byte slice from the pointer is safe because the invariants of the + // constructors of VolatileSlice ensure that slice.addr is valid memory of size slice.len() + // and the invariants of this function ensure that no aliasing reference exists. + // Lastly, the lifetime of the returned reference matches that of the VolatileSlice returned + // by get_slice and thus the lifetime one `self`. + let rust_slice = unsafe { core::slice::from_raw_parts_mut(slice_addr, size_of_t) }; + T::mut_from_bytes(rust_slice).map_err(|error| match error { + CastError::<_, T>::Alignment(_) => Error::Misaligned { + addr: slice.addr.addr(), + alignment: align_of::(), + }, + CastError::<_, T>::Size(size) => { + panic!("VolatileMemory::get_slice({}, count) returned slice of length != count, expected: \"{}\" actual: \"{}\".", offset, size_of::(), size.into_src().len()); + } + CastError::<_, T>::Validity(infallible) => { + // Ensures this error is actually infallible. + match infallible {} + } + }) } /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed From 8266cf541f4ce048e13bd0db5ea87b56c9b16b73 Mon Sep 17 00:00:00 2001 From: Julian Schindel Date: Sun, 29 Mar 2026 19:25:22 +0200 Subject: [PATCH 4/5] zerocopy: update documentation after removing `ByteValued` Removes mentions of `ByteValued` and updates the changelog. Signed-off-by: Julian Schindel --- CHANGELOG.md | 8 ++++++++ DESIGN.md | 17 +---------------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 151a36c0..93e80029 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## Upcoming Release + +## Removed + +- \[[#374](https://github.com/rust-vmm/vm-memory/pull/374)\] Removed `ByteValued` trait and `endian` module in favor of the `zerocopy` crate. + `ByteValued`'s functionality is now provided by `zerocopy::IntoBytes`, `zerocopy::FromBytes` and `zerocopy::Zeroed`. + `ByteValued::as_bytes` can be replaced with `VolatileSlices::from` for `&mut [u8]`. + ## 0.18.0 ### Changed diff --git a/DESIGN.md b/DESIGN.md index bcb0c4ec..d485ae17 100644 --- a/DESIGN.md +++ b/DESIGN.md @@ -35,12 +35,11 @@ The `vm-memory` is derived from two upstream projects: The high level abstraction of the VM memory has been heavily refactored to provide a VMM agnostic interface. -The `vm-memory` crate could be divided into four logic parts as: +The `vm-memory` crate could be divided into three logic parts as: - [Abstraction of Address Space](#abstraction-of-address-space) - [Specialization for Virtual Machine Physical Address Space](#specialization-for-virtual-machine-physical-address-space) - [Backend Implementation Based on `mmap`](#backend-implementation-based-on-mmap) -- [Utilities and helpers](#utilities-and-helpers) ### Address Space Abstraction @@ -146,20 +145,6 @@ object to translate I/O virtual addresses (IOVAs) into VMM user addresses (VUAs), which are then passed to the inner `GuestMemoryBackend` implementation (like `GuestMemoryMmap`). -### Utilities and Helpers - -The following utilities and helper traits/macros are imported from the -[crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/) -with minor changes: - -- `ByteValued` (originally `DataInit`): types which are safe to be initialized - from raw data. A type `T` is `ByteValued` if and only if it can be - initialized by reading its contents from a byte array. This is generally true - for all plain-old-data structs. It is notably not true for any type that - includes a reference. -- `{Le,Be}_{16,32,64}`: explicit endian types useful for embedding in structs - or reinterpreting data. - ## Relationships between Traits, Structs and Types **Traits**: From dc5f1849674f9b2bdcea64c76f206309eb73cf85 Mon Sep 17 00:00:00 2001 From: Julian Schindel Date: Thu, 9 Apr 2026 19:27:56 +0200 Subject: [PATCH 5/5] chore: update coverage for x86_64 Adapts expected coverage to code change from `ByteValued` removal. Signed-off-by: Julian Schindel --- coverage_config_x86_64.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 9a32c287..48bb8b50 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 90.56, + "coverage_score": 89.86, "exclude_path": "mmap_windows.rs", "crate_features": "backend-mmap,backend-atomic,backend-bitmap,iommu" }