
# Objective - Fixes #6370 - Closes #6581 ## Solution - Added the following lints to the workspace: - `std_instead_of_core` - `std_instead_of_alloc` - `alloc_instead_of_core` - Used `cargo +nightly fmt` with [item level use formatting](https://rust-lang.github.io/rustfmt/?version=v1.6.0&search=#Item%5C%3A) to split all `use` statements into single items. - Used `cargo clippy --workspace --all-targets --all-features --fix --allow-dirty` to _attempt_ to resolve the new linting issues, and intervened where the lint was unable to resolve the issue automatically (usually due to needing an `extern crate alloc;` statement in a crate root). - Manually removed certain uses of `std` where negative feature gating prevented `--all-features` from finding the offending uses. - Used `cargo +nightly fmt` with [crate level use formatting](https://rust-lang.github.io/rustfmt/?version=v1.6.0&search=#Crate%5C%3A) to re-merge all `use` statements matching Bevy's previous styling. - Manually fixed cases where the `fmt` tool could not re-merge `use` statements due to conditional compilation attributes. ## Testing - Ran CI locally ## Migration Guide The MSRV is now 1.81. Please update to this version or higher. ## Notes - This is a _massive_ change to try and push through, which is why I've outlined the semi-automatic steps I used to create this PR, in case this fails and someone else tries again in the future. - Making this change has no impact on user code, but does mean Bevy contributors will be warned to use `core` and `alloc` instead of `std` where possible. - This lint is a critical first step towards investigating `no_std` options for Bevy. --------- Co-authored-by: François Mockers <francois.mockers@vleue.com>
187 lines
8.0 KiB
Rust
187 lines
8.0 KiB
Rust
// structs containing wgpu types take a long time to compile. this is particularly bad for generic
|
|
// structs containing wgpu structs. we avoid that in debug builds (and for cargo check and rust analyzer)
|
|
// by type-erasing with the `render_resource_wrapper` macro. The resulting type behaves like Arc<$wgpu_type>,
|
|
// but avoids explicitly storing an Arc<$wgpu_type> member.
|
|
// analysis from https://github.com/bevyengine/bevy/pull/5950#issuecomment-1243473071 indicates this is
|
|
// due to `evaluate_obligations`. we should check if this can be removed after a fix lands for
|
|
// https://github.com/rust-lang/rust/issues/99188 (and after other `evaluate_obligations`-related changes).
|
|
#[cfg(debug_assertions)]
|
|
#[macro_export]
|
|
macro_rules! render_resource_wrapper {
|
|
($wrapper_type:ident, $wgpu_type:ty) => {
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
#[derive(Debug)]
|
|
// SAFETY: while self is live, self.0 comes from `into_raw` of an Arc<$wgpu_type> with a strong ref.
|
|
pub struct $wrapper_type(*const ());
|
|
|
|
#[cfg(all(target_arch = "wasm32", target_feature = "atomics"))]
|
|
#[derive(Debug)]
|
|
pub struct $wrapper_type(send_wrapper::SendWrapper<*const ()>);
|
|
|
|
impl $wrapper_type {
|
|
pub fn new(value: $wgpu_type) -> Self {
|
|
let arc = alloc::sync::Arc::new(value);
|
|
let value_ptr = alloc::sync::Arc::into_raw(arc);
|
|
let unit_ptr = value_ptr.cast::<()>();
|
|
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
return Self(unit_ptr);
|
|
#[cfg(all(target_arch = "wasm32", target_feature = "atomics"))]
|
|
return Self(send_wrapper::SendWrapper::new(unit_ptr));
|
|
}
|
|
|
|
pub fn try_unwrap(self) -> Option<$wgpu_type> {
|
|
let value_ptr = self.0.cast::<$wgpu_type>();
|
|
// SAFETY: pointer refers to a valid Arc, and was created from Arc::into_raw.
|
|
let arc = unsafe { alloc::sync::Arc::from_raw(value_ptr) };
|
|
|
|
// we forget ourselves here since the reconstructed arc will be dropped/decremented within this scope
|
|
core::mem::forget(self);
|
|
|
|
alloc::sync::Arc::try_unwrap(arc).ok()
|
|
}
|
|
}
|
|
|
|
impl core::ops::Deref for $wrapper_type {
|
|
type Target = $wgpu_type;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
let value_ptr = self.0.cast::<$wgpu_type>();
|
|
// SAFETY: the arc lives for 'self, so the ref lives for 'self
|
|
let value_ref = unsafe { value_ptr.as_ref() };
|
|
value_ref.unwrap()
|
|
}
|
|
}
|
|
|
|
impl Drop for $wrapper_type {
|
|
fn drop(&mut self) {
|
|
let value_ptr = self.0.cast::<$wgpu_type>();
|
|
// SAFETY: pointer refers to a valid Arc, and was created from Arc::into_raw.
|
|
// this reconstructed arc is dropped/decremented within this scope.
|
|
unsafe { alloc::sync::Arc::from_raw(value_ptr) };
|
|
}
|
|
}
|
|
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
// SAFETY: We manually implement Send and Sync, which is valid for Arc<T> when T: Send + Sync.
|
|
// We ensure correctness by checking that $wgpu_type does implement Send and Sync.
|
|
// If in future there is a case where a wrapper is required for a non-send/sync type
|
|
// we can implement a macro variant that omits these manual Send + Sync impls
|
|
unsafe impl Send for $wrapper_type {}
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
// SAFETY: As explained above, we ensure correctness by checking that $wgpu_type implements Send and Sync.
|
|
unsafe impl Sync for $wrapper_type {}
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
const _: () = {
|
|
trait AssertSendSyncBound: Send + Sync {}
|
|
impl AssertSendSyncBound for $wgpu_type {}
|
|
};
|
|
|
|
impl Clone for $wrapper_type {
|
|
fn clone(&self) -> Self {
|
|
let value_ptr = self.0.cast::<$wgpu_type>();
|
|
// SAFETY: pointer refers to a valid Arc, and was created from Arc::into_raw.
|
|
let arc = unsafe { alloc::sync::Arc::from_raw(value_ptr.cast::<$wgpu_type>()) };
|
|
let cloned = alloc::sync::Arc::clone(&arc);
|
|
// we forget the reconstructed Arc to avoid decrementing the ref counter, as self is still live.
|
|
core::mem::forget(arc);
|
|
let cloned_value_ptr = alloc::sync::Arc::into_raw(cloned);
|
|
let cloned_unit_ptr = cloned_value_ptr.cast::<()>();
|
|
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
return Self(cloned_unit_ptr);
|
|
|
|
// Note: this implementation means that this Clone will panic
|
|
// when called off the wgpu thread.
|
|
#[cfg(all(target_arch = "wasm32", target_feature = "atomics"))]
|
|
return Self(send_wrapper::SendWrapper::new(cloned_unit_ptr));
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
#[cfg(not(debug_assertions))]
|
|
#[macro_export]
|
|
macro_rules! render_resource_wrapper {
|
|
($wrapper_type:ident, $wgpu_type:ty) => {
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
#[derive(Clone, Debug)]
|
|
pub struct $wrapper_type(std::sync::Arc<$wgpu_type>);
|
|
#[cfg(all(target_arch = "wasm32", target_feature = "atomics"))]
|
|
#[derive(Clone, Debug)]
|
|
pub struct $wrapper_type(std::sync::Arc<send_wrapper::SendWrapper<$wgpu_type>>);
|
|
|
|
impl $wrapper_type {
|
|
pub fn new(value: $wgpu_type) -> Self {
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
return Self(std::sync::Arc::new(value));
|
|
|
|
#[cfg(all(target_arch = "wasm32", target_feature = "atomics"))]
|
|
return Self(std::sync::Arc::new(send_wrapper::SendWrapper::new(value)));
|
|
}
|
|
|
|
pub fn try_unwrap(self) -> Option<$wgpu_type> {
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
return std::sync::Arc::try_unwrap(self.0).ok();
|
|
|
|
#[cfg(all(target_arch = "wasm32", target_feature = "atomics"))]
|
|
return std::sync::Arc::try_unwrap(self.0).ok().map(|p| p.take());
|
|
}
|
|
}
|
|
|
|
impl core::ops::Deref for $wrapper_type {
|
|
type Target = $wgpu_type;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
self.0.as_ref()
|
|
}
|
|
}
|
|
|
|
#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))]
|
|
const _: () = {
|
|
trait AssertSendSyncBound: Send + Sync {}
|
|
impl AssertSendSyncBound for $wgpu_type {}
|
|
};
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! define_atomic_id {
|
|
($atomic_id_type:ident) => {
|
|
#[derive(Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord, Debug)]
|
|
pub struct $atomic_id_type(core::num::NonZero<u32>);
|
|
|
|
// We use new instead of default to indicate that each ID created will be unique.
|
|
#[allow(clippy::new_without_default)]
|
|
impl $atomic_id_type {
|
|
pub fn new() -> Self {
|
|
use core::sync::atomic::{AtomicU32, Ordering};
|
|
|
|
static COUNTER: AtomicU32 = AtomicU32::new(1);
|
|
|
|
let counter = COUNTER.fetch_add(1, Ordering::Relaxed);
|
|
Self(core::num::NonZero::<u32>::new(counter).unwrap_or_else(|| {
|
|
panic!(
|
|
"The system ran out of unique `{}`s.",
|
|
stringify!($atomic_id_type)
|
|
);
|
|
}))
|
|
}
|
|
}
|
|
|
|
impl From<$atomic_id_type> for core::num::NonZero<u32> {
|
|
fn from(value: $atomic_id_type) -> Self {
|
|
value.0
|
|
}
|
|
}
|
|
|
|
impl From<core::num::NonZero<u32>> for $atomic_id_type {
|
|
fn from(value: core::num::NonZero<u32>) -> Self {
|
|
Self(value)
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
pub use render_resource_wrapper;
|