diff --git a/src/array/array_async_readable.rs b/src/array/array_async_readable.rs index 8be0cedf..8adefdf2 100644 --- a/src/array/array_async_readable.rs +++ b/src/array/array_async_readable.rs @@ -10,7 +10,6 @@ use crate::{ }; use super::{ - chunk_shape_to_array_shape, codec::{ ArrayCodecTraits, ArrayToBytesCodecTraits, AsyncArrayPartialDecoderTraits, AsyncStoragePartialDecoder, @@ -158,12 +157,10 @@ impl Array { &self, chunk_indices: &[u64], ) -> Result, ArrayError> { - let shape = chunk_shape_to_array_shape( - &self - .chunk_grid() - .chunk_shape(chunk_indices, self.shape())? - .ok_or_else(|| ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec()))?, - ); + let shape = &self + .chunk_grid() + .chunk_shape_u64(chunk_indices, self.shape())? + .ok_or_else(|| ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec()))?; array_async_retrieve_ndarray!(self, shape, async_retrieve_chunk_elements(chunk_indices)) } @@ -520,7 +517,7 @@ impl Array { chunk_subset: &ArraySubset, ) -> Result, ArrayError> { let chunk_representation = self.chunk_array_representation(chunk_indices)?; - if !chunk_subset.inbounds(&chunk_shape_to_array_shape(chunk_representation.shape())) { + if !chunk_subset.inbounds(&chunk_representation.shape_u64()) { return Err(ArrayError::InvalidArraySubset( chunk_subset.clone(), self.shape().to_vec(), diff --git a/src/array/array_async_readable_writable.rs b/src/array/array_async_readable_writable.rs index de5de7b4..f9d56943 100644 --- a/src/array/array_async_readable_writable.rs +++ b/src/array/array_async_readable_writable.rs @@ -5,7 +5,7 @@ use crate::{ storage::{data_key, AsyncReadableWritableStorageTraits}, }; -use super::{chunk_shape_to_array_shape, Array, ArrayError}; +use super::{Array, ArrayError}; impl Array { /// Encode `subset_bytes` and store in `array_subset`. @@ -195,8 +195,10 @@ impl Array { chunk_subset_bytes: Vec, ) -> Result<(), ArrayError> { // Validation - if let Some(chunk_shape) = self.chunk_grid().chunk_shape(chunk_indices, self.shape())? { - let chunk_shape = chunk_shape_to_array_shape(&chunk_shape); + if let Some(chunk_shape) = self + .chunk_grid() + .chunk_shape_u64(chunk_indices, self.shape())? + { if std::iter::zip(chunk_subset.end_exc(), &chunk_shape) .any(|(end_exc, shape)| end_exc > *shape) { diff --git a/src/array/array_sync_readable.rs b/src/array/array_sync_readable.rs index bd7af79e..18d6b77a 100644 --- a/src/array/array_sync_readable.rs +++ b/src/array/array_sync_readable.rs @@ -9,7 +9,6 @@ use crate::{ }; use super::{ - chunk_shape_to_array_shape, codec::{ ArrayCodecTraits, ArrayPartialDecoderTraits, ArrayToBytesCodecTraits, StoragePartialDecoder, }, @@ -152,12 +151,10 @@ impl Array { &self, chunk_indices: &[u64], ) -> Result, ArrayError> { - let shape = crate::array::chunk_shape_to_array_shape( - &self - .chunk_grid() - .chunk_shape(chunk_indices, self.shape())? - .ok_or_else(|| ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec()))?, - ); + let shape = self + .chunk_grid() + .chunk_shape_u64(chunk_indices, self.shape())? + .ok_or_else(|| ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec()))?; array_retrieve_ndarray!(self, shape, retrieve_chunk_elements(chunk_indices)) } @@ -600,7 +597,7 @@ impl Array { chunk_subset: &ArraySubset, ) -> Result, ArrayError> { let chunk_representation = self.chunk_array_representation(chunk_indices)?; - if !chunk_subset.inbounds(&chunk_shape_to_array_shape(chunk_representation.shape())) { + if !chunk_subset.inbounds(&chunk_representation.shape_u64()) { return Err(ArrayError::InvalidArraySubset( chunk_subset.clone(), self.shape().to_vec(), diff --git a/src/array/array_sync_readable_writable.rs b/src/array/array_sync_readable_writable.rs index d8253e87..a9c33132 100644 --- a/src/array/array_sync_readable_writable.rs +++ b/src/array/array_sync_readable_writable.rs @@ -5,7 +5,7 @@ use crate::{ storage::{data_key, ReadableWritableStorageTraits}, }; -use super::{chunk_shape_to_array_shape, unravel_index, Array, ArrayError}; +use super::{unravel_index, Array, ArrayError}; impl Array { /// Encode `subset_bytes` and store in `array_subset`. @@ -259,12 +259,10 @@ impl Array { chunk_subset: &ArraySubset, chunk_subset_bytes: Vec, ) -> Result<(), ArrayError> { - let chunk_shape = chunk_shape_to_array_shape( - &self - .chunk_grid() - .chunk_shape(chunk_indices, self.shape())? - .ok_or_else(|| ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec()))?, - ); + let chunk_shape = self + .chunk_grid() + .chunk_shape_u64(chunk_indices, self.shape())? + .ok_or_else(|| ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec()))?; // Validation if std::iter::zip(chunk_subset.end_exc(), &chunk_shape) diff --git a/src/array/chunk_grid.rs b/src/array/chunk_grid.rs index c033d481..650929f0 100644 --- a/src/array/chunk_grid.rs +++ b/src/array/chunk_grid.rs @@ -20,7 +20,6 @@ pub use regular::{RegularChunkGrid, RegularChunkGridConfiguration}; use derive_more::{Deref, From}; use crate::{ - array::chunk_shape_to_array_shape, array_subset::{ArraySubset, IncompatibleDimensionalityError}, metadata::Metadata, plugin::{Plugin, PluginCreateError}, @@ -403,9 +402,8 @@ pub trait ChunkGridTraits: dyn_clone::DynClone + core::fmt::Debug + Send + Sync debug_assert_eq!(self.dimensionality(), chunk_indices.len()); if let (Some(chunk_origin), Some(chunk_shape)) = ( self.chunk_origin_unchecked(chunk_indices, array_shape), - self.chunk_shape_unchecked(chunk_indices, array_shape), + self.chunk_shape_u64_unchecked(chunk_indices, array_shape), ) { - let chunk_shape = chunk_shape_to_array_shape(&chunk_shape); Some(ArraySubset::new_with_start_shape_unchecked( chunk_origin, chunk_shape, diff --git a/src/array/chunk_grid/regular.rs b/src/array/chunk_grid/regular.rs index ba3f490b..17849a69 100644 --- a/src/array/chunk_grid/regular.rs +++ b/src/array/chunk_grid/regular.rs @@ -112,6 +112,16 @@ impl RegularChunkGrid { pub fn chunk_shape(&self) -> &[NonZeroU64] { self.chunk_shape.as_slice() } + + /// Return the chunk shape as an [`ArrayShape`] ([`Vec`]). + #[must_use] + pub fn chunk_shape_u64(&self) -> Vec { + self.chunk_shape + .iter() + .copied() + .map(NonZeroU64::get) + .collect::>() + } } impl ChunkGridTraits for RegularChunkGrid { diff --git a/src/array/codec/array_to_array/transpose/transpose_codec.rs b/src/array/codec/array_to_array/transpose/transpose_codec.rs index 1348eab5..d5394720 100644 --- a/src/array/codec/array_to_array/transpose/transpose_codec.rs +++ b/src/array/codec/array_to_array/transpose/transpose_codec.rs @@ -3,7 +3,6 @@ use thiserror::Error; use crate::{ array::{ - chunk_shape_to_array_shape, codec::{ ArrayCodecTraits, ArrayPartialDecoderTraits, ArrayToArrayCodecTraits, CodecError, CodecTraits, @@ -138,7 +137,7 @@ impl ArrayCodecTraits for TransposeCodec { calculate_order_encode(&self.order, decoded_representation.shape().len()); transpose_array( &order_encode, - &chunk_shape_to_array_shape(decoded_representation.shape()), + &decoded_representation.shape_u64(), decoded_representation.element_size(), &decoded_value, ) @@ -158,10 +157,10 @@ impl ArrayCodecTraits for TransposeCodec { ) -> Result, CodecError> { let order_decode = calculate_order_decode(&self.order, decoded_representation.shape().len()); - let transposed_shape = permute(decoded_representation.shape(), &self.order); + let transposed_shape = permute(&decoded_representation.shape_u64(), &self.order); transpose_array( &order_decode, - &chunk_shape_to_array_shape(&transposed_shape), + &transposed_shape, decoded_representation.element_size(), &encoded_value, ) diff --git a/src/array/codec/array_to_bytes/bytes/bytes_partial_decoder.rs b/src/array/codec/array_to_bytes/bytes/bytes_partial_decoder.rs index fdac98dd..d7142ef9 100644 --- a/src/array/codec/array_to_bytes/bytes/bytes_partial_decoder.rs +++ b/src/array/codec/array_to_bytes/bytes/bytes_partial_decoder.rs @@ -1,6 +1,5 @@ use crate::{ array::{ - chunk_shape_to_array_shape, codec::{ArrayPartialDecoderTraits, ArraySubset, BytesPartialDecoderTraits, CodecError}, ChunkRepresentation, }, @@ -41,7 +40,7 @@ impl ArrayPartialDecoderTraits for BytesPartialDecoder<'_> { parallel: bool, ) -> Result>, CodecError> { let mut bytes = Vec::with_capacity(decoded_regions.len()); - let chunk_shape = chunk_shape_to_array_shape(self.decoded_representation.shape()); + let chunk_shape = self.decoded_representation.shape_u64(); for array_subset in decoded_regions { // Get byte ranges let byte_ranges = array_subset @@ -118,7 +117,7 @@ impl AsyncArrayPartialDecoderTraits for AsyncBytesPartialDecoder<'_> { parallel: bool, ) -> Result>, CodecError> { let mut bytes = Vec::with_capacity(decoded_regions.len()); - let chunk_shape = chunk_shape_to_array_shape(self.decoded_representation.shape()); + let chunk_shape = self.decoded_representation.shape_u64(); for array_subset in decoded_regions { // Get byte ranges let byte_ranges = array_subset diff --git a/src/array/codec/array_to_bytes/pcodec/pcodec_partial_decoder.rs b/src/array/codec/array_to_bytes/pcodec/pcodec_partial_decoder.rs index 01888071..222f679e 100644 --- a/src/array/codec/array_to_bytes/pcodec/pcodec_partial_decoder.rs +++ b/src/array/codec/array_to_bytes/pcodec/pcodec_partial_decoder.rs @@ -1,6 +1,5 @@ use crate::{ array::{ - chunk_shape_to_array_shape, codec::{ArrayPartialDecoderTraits, ArraySubset, BytesPartialDecoderTraits, CodecError}, ChunkRepresentation, DataType, }, @@ -35,7 +34,7 @@ fn do_partial_decode( decoded_representation: &ChunkRepresentation, ) -> Result>, CodecError> { let mut decoded_bytes = Vec::with_capacity(decoded_regions.len()); - let chunk_shape = chunk_shape_to_array_shape(decoded_representation.shape()); + let chunk_shape = decoded_representation.shape_u64(); match decoded { None => { for array_subset in decoded_regions { diff --git a/src/array/codec/array_to_bytes/sharding/sharding_codec.rs b/src/array/codec/array_to_bytes/sharding/sharding_codec.rs index 97ef99c5..896cd26e 100644 --- a/src/array/codec/array_to_bytes/sharding/sharding_codec.rs +++ b/src/array/codec/array_to_bytes/sharding/sharding_codec.rs @@ -384,7 +384,7 @@ impl ShardingCodec { let shard_slice = unsafe { std::slice::from_raw_parts_mut(shard.as_mut_ptr().cast::(), shard.len()) }; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); for (chunk_index, (_chunk_indices, chunk_subset)) in unsafe { ArraySubset::new_with_shape(shard_shape.clone()) .iter_chunks_unchecked(self.chunk_shape.as_slice()) @@ -468,7 +468,7 @@ impl ShardingCodec { ShardingIndexLocation::Start => index_encoded_size, ShardingIndexLocation::End => 0, }; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); for (chunk_index, (_chunk_indices, chunk_subset)) in unsafe { ArraySubset::new_with_shape(shard_shape.clone()) .iter_chunks_unchecked(self.chunk_shape.as_slice()) @@ -562,7 +562,7 @@ impl ShardingCodec { }; let shard_slice = UnsafeCellSlice::new(shard_slice); let shard_index_slice = UnsafeCellSlice::new(&mut shard_index); - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); (0..chunks_per_shard .as_slice() .iter() @@ -660,7 +660,7 @@ impl ShardingCodec { let index_encoded_size = usize::try_from(index_encoded_size).unwrap(); // Find chunks that are not entirely the fill value and collect their decoded bytes - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); let encoded_chunks: Vec<(u64, Vec)> = (0..chunks_per_shard .as_slice() .iter() @@ -780,7 +780,7 @@ impl ShardingCodec { ShardingIndexLocation::Start => index_encoded_size, ShardingIndexLocation::End => 0, }; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); for (chunk_index, (_chunk_indices, chunk_subset)) in unsafe { ArraySubset::new_with_shape(shard_shape.clone()) .iter_chunks_unchecked(self.chunk_shape.as_slice()) @@ -884,7 +884,7 @@ impl ShardingCodec { let shard_slice = unsafe { std::slice::from_raw_parts_mut(shard.as_mut_ptr().cast::(), shard.len()) }; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); for (chunk_index, (_chunk_indices, chunk_subset)) in unsafe { ArraySubset::new_with_shape(shard_shape.clone()) .iter_chunks_unchecked(self.chunk_shape.as_slice()) @@ -998,7 +998,7 @@ impl ShardingCodec { let shard_slice = UnsafeCellSlice::new(shard_slice); let shard_index_slice = UnsafeCellSlice::new(&mut shard_index); let chunks_per_shard = &chunks_per_shard; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); let futures = (0..chunks_per_shard .as_slice() .iter() @@ -1119,7 +1119,7 @@ impl ShardingCodec { let index_encoded_size = usize::try_from(index_encoded_size).unwrap(); // Encode the chunks - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); let encoded_chunks = futures::future::join_all( (0..chunks_per_shard .as_slice() @@ -1332,7 +1332,7 @@ impl ShardingCodec { ) }; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); if parallel { let chunks_per_shard = calculate_chunks_per_shard( shard_representation.shape(), @@ -1387,7 +1387,7 @@ impl ShardingCodec { })?; } else { let element_size = chunk_representation.element_size() as u64; - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); for (chunk_index, (_chunk_indices, chunk_subset)) in unsafe { ArraySubset::new_with_shape(shard_shape.clone()) .iter_chunks_unchecked(self.chunk_shape.as_slice()) @@ -1453,7 +1453,7 @@ impl ShardingCodec { }; // Decode chunks - let shard_shape = chunk_shape_to_array_shape(shard_representation.shape()); + let shard_shape = shard_representation.shape_u64(); if parallel { let chunks_per_shard = calculate_chunks_per_shard( shard_representation.shape(), diff --git a/src/array/codec/array_to_bytes/sharding/sharding_partial_decoder.rs b/src/array/codec/array_to_bytes/sharding/sharding_partial_decoder.rs index c34e155c..a4878eb8 100644 --- a/src/array/codec/array_to_bytes/sharding/sharding_partial_decoder.rs +++ b/src/array/codec/array_to_bytes/sharding/sharding_partial_decoder.rs @@ -681,9 +681,7 @@ impl AsyncArrayPartialDecoderTraits for AsyncShardingPartialDecoder<'_> { }) .collect::>(); if !filled_chunks.is_empty() { - let chunk_array_ss = ArraySubset::new_with_shape(chunk_shape_to_array_shape( - self.chunk_grid.chunk_shape(), - )); + let chunk_array_ss = ArraySubset::new_with_shape(self.chunk_grid.chunk_shape_u64()); let filled_chunk = self .decoded_representation .fill_value() diff --git a/src/array/codec/array_to_bytes/zfp/zfp_partial_decoder.rs b/src/array/codec/array_to_bytes/zfp/zfp_partial_decoder.rs index 7d0028f2..d71ddcb8 100644 --- a/src/array/codec/array_to_bytes/zfp/zfp_partial_decoder.rs +++ b/src/array/codec/array_to_bytes/zfp/zfp_partial_decoder.rs @@ -2,7 +2,6 @@ use zfp_sys::zfp_type; use crate::{ array::{ - chunk_shape_to_array_shape, codec::{ArrayPartialDecoderTraits, BytesPartialDecoderTraits, CodecError}, ChunkRepresentation, }, @@ -56,7 +55,7 @@ impl ArrayPartialDecoderTraits for ZfpPartialDecoder<'_> { ) -> Result>, CodecError> { let encoded_value = self.input_handle.decode_opt(parallel)?; let mut out = Vec::with_capacity(decoded_regions.len()); - let chunk_shape = chunk_shape_to_array_shape(self.decoded_representation.shape()); + let chunk_shape = self.decoded_representation.shape_u64(); match encoded_value { Some(encoded_value) => { let decoded_value = zfp_decode( @@ -136,7 +135,7 @@ impl AsyncArrayPartialDecoderTraits for AsyncZfpPartialDecoder<'_> { parallel: bool, ) -> Result>, CodecError> { let encoded_value = self.input_handle.decode_opt(parallel).await?; - let chunk_shape = chunk_shape_to_array_shape(self.decoded_representation.shape()); + let chunk_shape = self.decoded_representation.shape_u64(); let mut out = Vec::with_capacity(decoded_regions.len()); match encoded_value { Some(encoded_value) => { diff --git a/src/array/codec/partial_decoder_cache.rs b/src/array/codec/partial_decoder_cache.rs index fa633792..2e8d9834 100644 --- a/src/array/codec/partial_decoder_cache.rs +++ b/src/array/codec/partial_decoder_cache.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use crate::{ - array::{chunk_shape_to_array_shape, ChunkRepresentation, MaybeBytes}, + array::{ChunkRepresentation, MaybeBytes}, array_subset::IncompatibleArraySubsetAndShapeError, byte_range::{extract_byte_ranges, ByteRange}, }; @@ -104,9 +104,9 @@ impl<'a> ArrayPartialDecoderCache<'a> { ) -> Result { let cache = input_handle .partial_decode_opt( - &[ArraySubset::new_with_shape(chunk_shape_to_array_shape( - decoded_representation.shape(), - ))], + &[ArraySubset::new_with_shape( + decoded_representation.shape_u64(), + )], parallel, )? .remove(0); @@ -129,9 +129,9 @@ impl<'a> ArrayPartialDecoderCache<'a> { ) -> Result, CodecError> { let cache = input_handle .partial_decode_opt( - &[ArraySubset::new_with_shape(chunk_shape_to_array_shape( - decoded_representation.shape(), - ))], + &[ArraySubset::new_with_shape( + decoded_representation.shape_u64(), + )], parallel, ) .await? @@ -151,7 +151,7 @@ impl<'a> ArrayPartialDecoderTraits for ArrayPartialDecoderCache<'a> { _parallel: bool, ) -> Result>, CodecError> { let mut out: Vec> = Vec::with_capacity(decoded_regions.len()); - let array_shape = chunk_shape_to_array_shape(self.decoded_representation.shape()); + let array_shape = self.decoded_representation.shape_u64(); let element_size = self.decoded_representation.element_size(); for array_subset in decoded_regions { out.push( diff --git a/src/array_subset/array_subset_iterators.rs b/src/array_subset/array_subset_iterators.rs index 14eea017..72a998c2 100644 --- a/src/array_subset/array_subset_iterators.rs +++ b/src/array_subset/array_subset_iterators.rs @@ -292,12 +292,12 @@ impl FusedIterator for ContiguousLinearisedIndicesIterator<'_> {} /// All chunks have the same size, and may extend over the bounds of the array subset. /// /// The iterator item is a ([`ArrayIndices`], [`ArraySubset`]) tuple corresponding to the chunk indices and array subset. -pub struct ChunksIterator<'a> { +pub struct ChunksIterator { inner: IndicesIterator, - chunk_shape: &'a [NonZeroU64], + chunk_shape: Vec, } -impl<'a> ChunksIterator<'a> { +impl ChunksIterator { /// Create a new chunks iterator. /// /// # Errors @@ -305,7 +305,7 @@ impl<'a> ChunksIterator<'a> { /// Returns [`IncompatibleDimensionalityError`] if `chunk_shape` does not match the dimensionality of `subset`. pub fn new( subset: &ArraySubset, - chunk_shape: &'a [NonZeroU64], + chunk_shape: &[NonZeroU64], ) -> Result { if subset.dimensionality() == chunk_shape.len() { Ok(unsafe { Self::new_unchecked(subset, chunk_shape) }) @@ -323,13 +323,14 @@ impl<'a> ChunksIterator<'a> { /// /// The dimensionality of `chunk_shape` must match the dimensionality of `subset`. #[must_use] - pub unsafe fn new_unchecked(subset: &ArraySubset, chunk_shape: &'a [NonZeroU64]) -> Self { + pub unsafe fn new_unchecked(subset: &ArraySubset, chunk_shape: &[NonZeroU64]) -> Self { debug_assert_eq!(subset.dimensionality(), chunk_shape.len()); - let chunk_start: ArrayIndices = std::iter::zip(subset.start(), chunk_shape) - .map(|(s, c)| s / c.get()) + let chunk_shape = chunk_shape_to_array_shape(chunk_shape); + let chunk_start: ArrayIndices = std::iter::zip(subset.start(), &chunk_shape) + .map(|(s, c)| s / c) .collect(); - let chunk_end_inc: ArrayIndices = std::iter::zip(subset.end_inc(), chunk_shape) - .map(|(e, c)| e / c.get()) + let chunk_end_inc: ArrayIndices = std::iter::zip(subset.end_inc(), &chunk_shape) + .map(|(e, c)| e / c) .collect(); let subset_chunks = unsafe { ArraySubset::new_with_start_end_inc_unchecked(chunk_start, chunk_end_inc) }; @@ -338,16 +339,17 @@ impl<'a> ChunksIterator<'a> { } } -impl Iterator for ChunksIterator<'_> { +impl Iterator for ChunksIterator { type Item = (ArrayIndices, ArraySubset); fn next(&mut self) -> Option { self.inner.next().map(|chunk_indices| { - let start = std::iter::zip(&chunk_indices, self.chunk_shape) - .map(|(i, c)| i * c.get()) + let start = std::iter::zip(&chunk_indices, &self.chunk_shape) + .map(|(i, c)| i * c) .collect(); - let shape = chunk_shape_to_array_shape(self.chunk_shape); - let chunk_subset = unsafe { ArraySubset::new_with_start_shape_unchecked(start, shape) }; + let chunk_subset = unsafe { + ArraySubset::new_with_start_shape_unchecked(start, self.chunk_shape.clone()) + }; (chunk_indices, chunk_subset) }) } @@ -357,9 +359,9 @@ impl Iterator for ChunksIterator<'_> { } } -impl ExactSizeIterator for ChunksIterator<'_> {} +impl ExactSizeIterator for ChunksIterator {} -impl FusedIterator for ChunksIterator<'_> {} +impl FusedIterator for ChunksIterator {} #[cfg(test)] mod tests {