
# Objective - Fixes #6370 - Closes #6581 ## Solution - Added the following lints to the workspace: - `std_instead_of_core` - `std_instead_of_alloc` - `alloc_instead_of_core` - Used `cargo +nightly fmt` with [item level use formatting](https://rust-lang.github.io/rustfmt/?version=v1.6.0&search=#Item%5C%3A) to split all `use` statements into single items. - Used `cargo clippy --workspace --all-targets --all-features --fix --allow-dirty` to _attempt_ to resolve the new linting issues, and intervened where the lint was unable to resolve the issue automatically (usually due to needing an `extern crate alloc;` statement in a crate root). - Manually removed certain uses of `std` where negative feature gating prevented `--all-features` from finding the offending uses. - Used `cargo +nightly fmt` with [crate level use formatting](https://rust-lang.github.io/rustfmt/?version=v1.6.0&search=#Crate%5C%3A) to re-merge all `use` statements matching Bevy's previous styling. - Manually fixed cases where the `fmt` tool could not re-merge `use` statements due to conditional compilation attributes. ## Testing - Ran CI locally ## Migration Guide The MSRV is now 1.81. Please update to this version or higher. ## Notes - This is a _massive_ change to try and push through, which is why I've outlined the semi-automatic steps I used to create this PR, in case this fails and someone else tries again in the future. - Making this change has no impact on user code, but does mean Bevy contributors will be warned to use `core` and `alloc` instead of `std` where possible. - This lint is a critical first step towards investigating `no_std` options for Bevy. --------- Co-authored-by: François Mockers <francois.mockers@vleue.com>
266 lines
8.9 KiB
Rust
266 lines
8.9 KiB
Rust
use super::TaskPool;
|
|
|
|
/// Provides functions for mapping read-only slices across a provided [`TaskPool`].
|
|
pub trait ParallelSlice<T: Sync>: AsRef<[T]> {
|
|
/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks
|
|
/// in parallel across the provided `task_pool`. One task is spawned in the task pool
|
|
/// for every chunk.
|
|
///
|
|
/// The iteration function takes the index of the chunk in the original slice as the
|
|
/// first argument, and the chunk as the second argument.
|
|
///
|
|
/// Returns a `Vec` of the mapped results in the same order as the input.
|
|
///
|
|
/// # Example
|
|
///
|
|
/// ```
|
|
/// # use bevy_tasks::prelude::*;
|
|
/// # use bevy_tasks::TaskPool;
|
|
/// let task_pool = TaskPool::new();
|
|
/// let counts = (0..10000).collect::<Vec<u32>>();
|
|
/// let incremented = counts.par_chunk_map(&task_pool, 100, |_index, chunk| {
|
|
/// let mut results = Vec::new();
|
|
/// for count in chunk {
|
|
/// results.push(*count + 2);
|
|
/// }
|
|
/// results
|
|
/// });
|
|
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
|
|
/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());
|
|
/// ```
|
|
///
|
|
/// # See Also
|
|
///
|
|
/// - [`ParallelSliceMut::par_chunk_map_mut`] for mapping mutable slices.
|
|
/// - [`ParallelSlice::par_splat_map`] for mapping when a specific chunk size is unknown.
|
|
fn par_chunk_map<F, R>(&self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>
|
|
where
|
|
F: Fn(usize, &[T]) -> R + Send + Sync,
|
|
R: Send + 'static,
|
|
{
|
|
let slice = self.as_ref();
|
|
let f = &f;
|
|
task_pool.scope(|scope| {
|
|
for (index, chunk) in slice.chunks(chunk_size).enumerate() {
|
|
scope.spawn(async move { f(index, chunk) });
|
|
}
|
|
})
|
|
}
|
|
|
|
/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel
|
|
/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.
|
|
///
|
|
/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in
|
|
/// `task_pool`.
|
|
///
|
|
/// The iteration function takes the index of the chunk in the original slice as the
|
|
/// first argument, and the chunk as the second argument.
|
|
///
|
|
/// Returns a `Vec` of the mapped results in the same order as the input.
|
|
///
|
|
/// # Example
|
|
///
|
|
/// ```
|
|
/// # use bevy_tasks::prelude::*;
|
|
/// # use bevy_tasks::TaskPool;
|
|
/// let task_pool = TaskPool::new();
|
|
/// let counts = (0..10000).collect::<Vec<u32>>();
|
|
/// let incremented = counts.par_splat_map(&task_pool, None, |_index, chunk| {
|
|
/// let mut results = Vec::new();
|
|
/// for count in chunk {
|
|
/// results.push(*count + 2);
|
|
/// }
|
|
/// results
|
|
/// });
|
|
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
|
|
/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());
|
|
/// ```
|
|
///
|
|
/// # See Also
|
|
///
|
|
/// [`ParallelSliceMut::par_splat_map_mut`] for mapping mutable slices.
|
|
/// [`ParallelSlice::par_chunk_map`] for mapping when a specific chunk size is desirable.
|
|
fn par_splat_map<F, R>(&self, task_pool: &TaskPool, max_tasks: Option<usize>, f: F) -> Vec<R>
|
|
where
|
|
F: Fn(usize, &[T]) -> R + Send + Sync,
|
|
R: Send + 'static,
|
|
{
|
|
let slice = self.as_ref();
|
|
let chunk_size = core::cmp::max(
|
|
1,
|
|
core::cmp::max(
|
|
slice.len() / task_pool.thread_num(),
|
|
slice.len() / max_tasks.unwrap_or(usize::MAX),
|
|
),
|
|
);
|
|
|
|
slice.par_chunk_map(task_pool, chunk_size, f)
|
|
}
|
|
}
|
|
|
|
impl<S, T: Sync> ParallelSlice<T> for S where S: AsRef<[T]> {}
|
|
|
|
/// Provides functions for mapping mutable slices across a provided [`TaskPool`].
|
|
pub trait ParallelSliceMut<T: Send>: AsMut<[T]> {
|
|
/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks
|
|
/// in parallel across the provided `task_pool`. One task is spawned in the task pool
|
|
/// for every chunk.
|
|
///
|
|
/// The iteration function takes the index of the chunk in the original slice as the
|
|
/// first argument, and the chunk as the second argument.
|
|
///
|
|
/// Returns a `Vec` of the mapped results in the same order as the input.
|
|
///
|
|
/// # Example
|
|
///
|
|
/// ```
|
|
/// # use bevy_tasks::prelude::*;
|
|
/// # use bevy_tasks::TaskPool;
|
|
/// let task_pool = TaskPool::new();
|
|
/// let mut counts = (0..10000).collect::<Vec<u32>>();
|
|
/// let incremented = counts.par_chunk_map_mut(&task_pool, 100, |_index, chunk| {
|
|
/// let mut results = Vec::new();
|
|
/// for count in chunk {
|
|
/// *count += 5;
|
|
/// results.push(*count - 2);
|
|
/// }
|
|
/// results
|
|
/// });
|
|
///
|
|
/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());
|
|
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
|
|
/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());
|
|
/// ```
|
|
///
|
|
/// # See Also
|
|
///
|
|
/// [`ParallelSlice::par_chunk_map`] for mapping immutable slices.
|
|
/// [`ParallelSliceMut::par_splat_map_mut`] for mapping when a specific chunk size is unknown.
|
|
fn par_chunk_map_mut<F, R>(&mut self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>
|
|
where
|
|
F: Fn(usize, &mut [T]) -> R + Send + Sync,
|
|
R: Send + 'static,
|
|
{
|
|
let slice = self.as_mut();
|
|
let f = &f;
|
|
task_pool.scope(|scope| {
|
|
for (index, chunk) in slice.chunks_mut(chunk_size).enumerate() {
|
|
scope.spawn(async move { f(index, chunk) });
|
|
}
|
|
})
|
|
}
|
|
|
|
/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel
|
|
/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.
|
|
///
|
|
/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in
|
|
/// `task_pool`.
|
|
///
|
|
/// The iteration function takes the index of the chunk in the original slice as the
|
|
/// first argument, and the chunk as the second argument.
|
|
///
|
|
/// Returns a `Vec` of the mapped results in the same order as the input.
|
|
///
|
|
/// # Example
|
|
///
|
|
/// ```
|
|
/// # use bevy_tasks::prelude::*;
|
|
/// # use bevy_tasks::TaskPool;
|
|
/// let task_pool = TaskPool::new();
|
|
/// let mut counts = (0..10000).collect::<Vec<u32>>();
|
|
/// let incremented = counts.par_splat_map_mut(&task_pool, None, |_index, chunk| {
|
|
/// let mut results = Vec::new();
|
|
/// for count in chunk {
|
|
/// *count += 5;
|
|
/// results.push(*count - 2);
|
|
/// }
|
|
/// results
|
|
/// });
|
|
///
|
|
/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());
|
|
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect::<Vec<u32>>();
|
|
/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());
|
|
/// ```
|
|
///
|
|
/// # See Also
|
|
///
|
|
/// [`ParallelSlice::par_splat_map`] for mapping immutable slices.
|
|
/// [`ParallelSliceMut::par_chunk_map_mut`] for mapping when a specific chunk size is desirable.
|
|
fn par_splat_map_mut<F, R>(
|
|
&mut self,
|
|
task_pool: &TaskPool,
|
|
max_tasks: Option<usize>,
|
|
f: F,
|
|
) -> Vec<R>
|
|
where
|
|
F: Fn(usize, &mut [T]) -> R + Send + Sync,
|
|
R: Send + 'static,
|
|
{
|
|
let mut slice = self.as_mut();
|
|
let chunk_size = core::cmp::max(
|
|
1,
|
|
core::cmp::max(
|
|
slice.len() / task_pool.thread_num(),
|
|
slice.len() / max_tasks.unwrap_or(usize::MAX),
|
|
),
|
|
);
|
|
|
|
slice.par_chunk_map_mut(task_pool, chunk_size, f)
|
|
}
|
|
}
|
|
|
|
impl<S, T: Send> ParallelSliceMut<T> for S where S: AsMut<[T]> {}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use crate::*;
|
|
|
|
#[test]
|
|
fn test_par_chunks_map() {
|
|
let v = vec![42; 1000];
|
|
let task_pool = TaskPool::new();
|
|
let outputs = v.par_splat_map(&task_pool, None, |_, numbers| -> i32 {
|
|
numbers.iter().sum()
|
|
});
|
|
|
|
let mut sum = 0;
|
|
for output in outputs {
|
|
sum += output;
|
|
}
|
|
|
|
assert_eq!(sum, 1000 * 42);
|
|
}
|
|
|
|
#[test]
|
|
fn test_par_chunks_map_mut() {
|
|
let mut v = vec![42; 1000];
|
|
let task_pool = TaskPool::new();
|
|
|
|
let outputs = v.par_splat_map_mut(&task_pool, None, |_, numbers| -> i32 {
|
|
for number in numbers.iter_mut() {
|
|
*number *= 2;
|
|
}
|
|
numbers.iter().sum()
|
|
});
|
|
|
|
let mut sum = 0;
|
|
for output in outputs {
|
|
sum += output;
|
|
}
|
|
|
|
assert_eq!(sum, 1000 * 42 * 2);
|
|
assert_eq!(v[0], 84);
|
|
}
|
|
|
|
#[test]
|
|
fn test_par_chunks_map_index() {
|
|
let v = vec![1; 1000];
|
|
let task_pool = TaskPool::new();
|
|
let outputs = v.par_chunk_map(&task_pool, 100, |index, numbers| -> i32 {
|
|
numbers.iter().sum::<i32>() * index as i32
|
|
});
|
|
|
|
assert_eq!(outputs.iter().sum::<i32>(), 100 * (9 * 10) / 2);
|
|
}
|
|
}
|