spec_v2: minor revisions (#19923)

- renamed `spec_v2` related modules, that commit slipped through the
other pr #17373
- revised struct and trait docs for clarity, and gave a short intro to
specialization
- turns out the derive macro was broken, fixed that too
This commit is contained in:
Emerson Coskey 2025-07-03 10:49:04 -07:00 committed by GitHub
parent 0f1eebed38
commit bbf91a6964
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 158 additions and 101 deletions

View File

@ -4,7 +4,7 @@
mod as_bind_group;
mod extract_component;
mod extract_resource;
mod specialize;
mod specializer;
use bevy_macro_utils::{derive_label, BevyManifest};
use proc_macro::TokenStream;
@ -107,18 +107,18 @@ pub fn derive_render_sub_graph(input: TokenStream) -> TokenStream {
derive_label(input, "RenderSubGraph", &trait_path)
}
/// Derive macro generating an impl of the trait `Specialize`
/// Derive macro generating an impl of the trait `Specializer`
///
/// This only works for structs whose members all implement `Specialize`
#[proc_macro_derive(Specialize, attributes(specialize, key, base_descriptor))]
/// This only works for structs whose members all implement `Specializer`
#[proc_macro_derive(Specializer, attributes(specialize, key, base_descriptor))]
pub fn derive_specialize(input: TokenStream) -> TokenStream {
specialize::impl_specialize(input)
specializer::impl_specializer(input)
}
/// Derive macro generating the most common impl of the trait `SpecializerKey`
#[proc_macro_derive(SpecializerKey)]
pub fn derive_specializer_key(input: TokenStream) -> TokenStream {
specialize::impl_specializer_key(input)
specializer::impl_specializer_key(input)
}
#[proc_macro_derive(ShaderLabel)]

View File

@ -1,14 +1,17 @@
use bevy_macro_utils::fq_std::{FQDefault, FQResult};
use bevy_macro_utils::{
fq_std::{FQDefault, FQResult},
get_struct_fields,
};
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::{format_ident, quote};
use syn::{
parse,
parse::{Parse, ParseStream},
parse_macro_input, parse_quote,
punctuated::Punctuated,
spanned::Spanned,
Data, DataStruct, DeriveInput, Expr, Fields, Ident, Index, Member, Meta, MetaList, Pat, Path,
Token, Type, WherePredicate,
DeriveInput, Expr, Field, Ident, Index, Member, Meta, MetaList, Pat, Path, Token, Type,
WherePredicate,
};
const SPECIALIZE_ATTR_IDENT: &str = "specialize";
@ -91,7 +94,7 @@ impl FieldInfo {
fn key_ty(&self, specialize_path: &Path, target_path: &Path) -> Option<Type> {
let ty = &self.ty;
matches!(self.key, Key::Whole | Key::Index(_))
.then_some(parse_quote!(<#ty as #specialize_path::Specialize<#target_path>>::Key))
.then_some(parse_quote!(<#ty as #specialize_path::Specializer<#target_path>>::Key))
}
fn key_ident(&self, ident: Ident) -> Option<Ident> {
@ -103,15 +106,15 @@ impl FieldInfo {
ty, member, key, ..
} = &self;
let key_expr = key.expr();
parse_quote!(<#ty as #specialize_path::Specialize<#target_path>>::specialize(&self.#member, #key_expr, descriptor))
parse_quote!(<#ty as #specialize_path::Specializer<#target_path>>::specialize(&self.#member, #key_expr, descriptor))
}
fn specialize_predicate(&self, specialize_path: &Path, target_path: &Path) -> WherePredicate {
let ty = &self.ty;
if matches!(&self.key, Key::Default) {
parse_quote!(#ty: #specialize_path::Specialize<#target_path, Key: #FQDefault>)
parse_quote!(#ty: #specialize_path::Specializer<#target_path, Key: #FQDefault>)
} else {
parse_quote!(#ty: #specialize_path::Specialize<#target_path>)
parse_quote!(#ty: #specialize_path::Specializer<#target_path>)
}
}
@ -125,7 +128,10 @@ impl FieldInfo {
}
}
fn get_field_info(fields: &Fields, targets: &SpecializeImplTargets) -> syn::Result<Vec<FieldInfo>> {
fn get_field_info(
fields: &Punctuated<Field, Token![,]>,
targets: &SpecializeImplTargets,
) -> syn::Result<Vec<FieldInfo>> {
let mut field_info: Vec<FieldInfo> = Vec::new();
let mut used_count = 0;
let mut single_index = 0;
@ -153,7 +159,7 @@ fn get_field_info(fields: &Fields, targets: &SpecializeImplTargets) -> syn::Resu
}
Meta::List(MetaList { path, tokens, .. }) if path.is_ident(&KEY_ATTR_IDENT) => {
let owned_tokens = tokens.clone().into();
let Ok(parsed_key) = parse::<Key>(owned_tokens) else {
let Ok(parsed_key) = syn::parse::<Key>(owned_tokens) else {
return Err(syn::Error::new(
attr.span(),
"Invalid key override attribute",
@ -195,20 +201,6 @@ fn get_field_info(fields: &Fields, targets: &SpecializeImplTargets) -> syn::Resu
Ok(field_info)
}
fn get_struct_fields<'a>(ast: &'a DeriveInput, derive_name: &str) -> syn::Result<&'a Fields> {
match &ast.data {
Data::Struct(DataStruct { fields, .. }) => Ok(fields),
Data::Enum(data_enum) => Err(syn::Error::new(
data_enum.enum_token.span(),
format!("#[derive({derive_name})] only supports structs."),
)),
Data::Union(data_union) => Err(syn::Error::new(
data_union.union_token.span(),
format!("#[derive({derive_name})] only supports structs."),
)),
}
}
fn get_specialize_targets(
ast: &DeriveInput,
derive_name: &str,
@ -227,7 +219,7 @@ fn get_specialize_targets(
format!("#[derive({derive_name})] must be accompanied by #[specialize(..targets)].\n Example usages: #[specialize(RenderPipeline)], #[specialize(all)]")
));
};
parse::<SpecializeImplTargets>(specialize_meta_list.tokens.clone().into())
syn::parse::<SpecializeImplTargets>(specialize_meta_list.tokens.clone().into())
}
macro_rules! guard {
@ -239,7 +231,7 @@ macro_rules! guard {
};
}
pub fn impl_specialize(input: TokenStream) -> TokenStream {
pub fn impl_specializer(input: TokenStream) -> TokenStream {
let bevy_render_path: Path = crate::bevy_render_path();
let specialize_path = {
let mut path = bevy_render_path.clone();
@ -250,8 +242,8 @@ pub fn impl_specialize(input: TokenStream) -> TokenStream {
let ecs_path = crate::bevy_ecs_path();
let ast = parse_macro_input!(input as DeriveInput);
let targets = guard!(get_specialize_targets(&ast, "Specialize"));
let fields = guard!(get_struct_fields(&ast, "Specialize"));
let targets = guard!(get_specialize_targets(&ast, "Specializer"));
let fields = guard!(get_struct_fields(&ast.data, "Specializer"));
let field_info = guard!(get_field_info(fields, &targets));
let key_idents: Vec<Option<Ident>> = field_info
@ -362,7 +354,7 @@ fn impl_specialize_all(
let (impl_generics, _, where_clause) = &generics.split_for_impl();
TokenStream::from(quote! {
impl #impl_generics #specialize_path::Specialize<#target_path> for #struct_name #type_generics #where_clause {
impl #impl_generics #specialize_path::Specializer<#target_path> for #struct_name #type_generics #where_clause {
type Key = (#(#key_elems),*);
fn specialize(
@ -399,7 +391,7 @@ fn impl_specialize_specific(
let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl();
TokenStream::from(quote! {
impl #impl_generics #specialize_path::Specialize<#target_path> for #struct_name #type_generics #where_clause {
impl #impl_generics #specialize_path::Specializer<#target_path> for #struct_name #type_generics #where_clause {
type Key = (#(#key_elems),*);
fn specialize(
@ -427,7 +419,7 @@ fn impl_get_base_descriptor_specific(
TokenStream::from(quote!(
impl #impl_generics #specialize_path::GetBaseDescriptor<#target_path> for #struct_name #type_generics #where_clause {
fn get_base_descriptor(&self) -> <#target_path as #specialize_path::Specializable>::Descriptor {
<#field_ty as #specialize_path::GetBaseDescriptor<#target_path>>::base_descriptor(&self.#field_member)
<#field_ty as #specialize_path::GetBaseDescriptor<#target_path>>::get_base_descriptor(&self.#field_member)
}
}
))
@ -458,7 +450,7 @@ fn impl_get_base_descriptor_all(
TokenStream::from(quote! {
impl #impl_generics #specialize_path::GetBaseDescriptor<#target_path> for #struct_name #type_generics #where_clause {
fn get_base_descriptor(&self) -> <#target_path as #specialize_path::Specializable>::Descriptor {
<#field_ty as #specialize_path::GetBaseDescriptor<#target_path>>::base_descriptor(&self.#field_member)
<#field_ty as #specialize_path::GetBaseDescriptor<#target_path>>::get_base_descriptor(&self.#field_member)
}
}
})

View File

@ -12,7 +12,7 @@ mod pipeline_cache;
mod pipeline_specializer;
pub mod resource_macros;
mod shader;
mod specialize;
mod specializer;
mod storage_buffer;
mod texture;
mod uniform_buffer;
@ -29,7 +29,7 @@ pub use pipeline::*;
pub use pipeline_cache::*;
pub use pipeline_specializer::*;
pub use shader::*;
pub use specialize::*;
pub use specializer::*;
pub use storage_buffer::*;
pub use texture::*;
pub use uniform_buffer::*;

View File

@ -18,11 +18,13 @@ use core::{hash::Hash, marker::PhantomData};
use tracing::error;
use variadics_please::all_tuples;
pub use bevy_render_macros::{Specialize, SpecializerKey};
pub use bevy_render_macros::{Specializer, SpecializerKey};
/// Defines a type that is able to be "specialized" and cached by creating and transforming
/// its descriptor type. This is implemented for [`RenderPipeline`] and [`ComputePipeline`], and
/// likely will not have much utility for other types.
///
/// See docs on [`Specializer`] for more info.
pub trait Specializable {
type Descriptor: PartialEq + Clone + Send + Sync;
type CachedId: Clone + Send + Sync;
@ -63,57 +65,72 @@ impl Specializable for ComputePipeline {
}
}
/// Defines a type that is able to transform descriptors for a specializable
/// type T, based on a hashable key type.
/// Defines a type capable of "specializing" values of a type T.
///
/// This is mainly used when "specializing" render
/// pipelines, i.e. specifying shader defs and binding layout based on the key,
/// the result of which can then be cached and accessed quickly later.
/// Specialization is the process of generating variants of a type T
/// from small hashable keys, and specializers themselves can be
/// thought of as [pure functions] from the key type to `T`, that
/// [memoize] their results based on the key.
///
/// <div class="warning">
/// Because specialization is designed for use with render and compute
/// pipelines, specializers act on <i>descriptors</i> of <code>T</code> rather
/// than produce <code>T</code> itself, but the above comparison is still valid.
/// </div>
///
/// Since compiling render and compute pipelines can be so slow,
/// specialization allows a Bevy app to detect when it would compile
/// a duplicate pipeline and reuse what's already in the cache. While
/// pipelines could all be memoized hashing each whole descriptor, this
/// would be much slower and could still create duplicates. In contrast,
/// memoizing groups of *related* pipelines based on a small hashable
/// key is much faster. See the docs on [`SpecializerKey`] for more info.
///
/// ## Composing Specializers
///
/// This trait can be derived with `#[derive(Specializer)]` for structs whose
/// fields all implement [`Specializer`]. The key type will be tuple of the keys
/// of each field, and their specialization logic will be applied in field
/// order. Since derive macros can't have generic parameters, the derive macro
/// requires an additional `#[specialize(..targets)]` attribute to specify a
/// list of types to target for the implementation. `#[specialize(all)]` is
/// also allowed, and will generate a fully generic implementation at the cost
/// of slightly worse error messages.
/// fields all implement [`Specializer`]. This allows for composing multiple
/// specializers together, and makes encapsulation and separating concerns
/// between specializers much nicer. One could make individual specializers
/// for common operations and place them in entirely separate modules, then
/// compose them together with a single `#[derive]`
///
/// Additionally, each field can optionally take a `#[key]` attribute to
/// specify a "key override". This will "hide" that field's key from being
/// exposed by the wrapper, and always use the value given by the attribute.
/// Values for this attribute may either be `default` which will use the key's
/// [`Default`] implementation, or a valid rust
/// expression of the key type.
///
/// Example:
/// ```rs
/// # use super::RenderPipeline;
/// # use super::RenderPipelineDescriptor;
/// ```rust
/// # use bevy_ecs::error::BevyError;
///
/// # use bevy_render::render_resource::Specializer;
/// # use bevy_render::render_resource::SpecializerKey;
/// # use bevy_render::render_resource::RenderPipeline;
/// # use bevy_render::render_resource::RenderPipelineDescriptor;
/// struct A;
/// struct B;
/// #[derive(Copy, Clone, PartialEq, Eq, Hash, SpecializerKey)]
/// struct BKey;
/// struct BKey { contrived_number: u32 };
///
/// impl Specializer<RenderPipeline> for A {
/// type Key = ();
///
/// fn specializer(&self, key: (), descriptor: &mut RenderPipelineDescriptor) -> Result<(), BevyError> {
/// # let _ = (key, descriptor);
/// //...
/// Ok(())
/// fn specialize(
/// &self,
/// key: (),
/// descriptor: &mut RenderPipelineDescriptor
/// ) -> Result<(), BevyError> {
/// # let _ = descriptor;
/// // mutate the descriptor here
/// Ok(key)
/// }
/// }
///
/// impl Specializer<RenderPipeline> for B {
/// type Key = BKey;
///
/// fn specialize(&self, _key: Bkey, _descriptor: &mut RenderPipelineDescriptor) -> Result<BKey, BevyError> {
/// # let _ = (key, descriptor);
/// //...
/// Ok(BKey)
/// fn specialize(
/// &self,
/// key: BKey,
/// descriptor: &mut RenderPipelineDescriptor
/// ) -> Result<BKey, BevyError> {
/// # let _ = descriptor;
/// // mutate the descriptor here
/// Ok(key)
/// }
/// }
///
@ -141,6 +158,23 @@ impl Specializable for ComputePipeline {
/// }
/// */
/// ```
///
/// The key type for a composed specializer will be a tuple of the keys
/// of each field, and their specialization logic will be applied in field
/// order. Since derive macros can't have generic parameters, the derive macro
/// requires an additional `#[specialize(..targets)]` attribute to specify a
/// list of types to target for the implementation. `#[specialize(all)]` is
/// also allowed, and will generate a fully generic implementation at the cost
/// of slightly worse error messages.
///
/// Additionally, each field can optionally take a `#[key]` attribute to
/// specify a "key override". This will hide that field's key from being
/// exposed by the wrapper, and always use the value given by the attribute.
/// Values for this attribute may either be `default` which will use the key's
/// [`Default`] implementation, or a valid rust expression of the key type.
///
/// [pure functions]: https://en.wikipedia.org/wiki/Pure_function
/// [memoize]: https://en.wikipedia.org/wiki/Memoization
pub trait Specializer<T: Specializable>: Send + Sync + 'static {
type Key: SpecializerKey;
fn specialize(
@ -150,20 +184,35 @@ pub trait Specializer<T: Specializable>: Send + Sync + 'static {
) -> Result<Canonical<Self::Key>, BevyError>;
}
/// Defines a type that is able to be used as a key for types that `impl Specialize`
// TODO: update docs for `SpecializerKey` with a more concrete example
// once we've migrated mesh layout specialization
/// Defines a type that is able to be used as a key for [`Specializer`]s
///
/// **Most types should implement this trait with `IS_CANONICAL = true` and `Canonical = Self`**.
/// This is the implementation generated by `#[derive(SpecializerKey)]`
/// <div class = "warning">
/// <strong>Most types should implement this trait with the included derive macro.</strong> <br/>
/// This generates a "canonical" key type, with <code>IS_CANONICAL = true</code>, and <code>Canonical = Self</code>
/// </div>
///
/// In this case, "canonical" means that each unique value of this type will produce
/// a unique specialized result, which isn't true in general. `MeshVertexBufferLayout`
/// is a good example of a type that's `Eq + Hash`, but that isn't canonical: vertex
/// attributes could be specified in any order, or there could be more attributes
/// provided than the specialized pipeline requires. Its `Canonical` key type would
/// be `VertexBufferLayout`, the final layout required by the pipeline.
/// ## What's a "canonical" key?
///
/// Processing keys into canonical keys this way allows the `SpecializedCache` to reuse
/// resources more eagerly where possible.
/// The specialization API memoizes pipelines based on the hash of each key, but this
/// can still produce duplicates. For example, if one used a list of vertex attributes
/// as a key, even if all the same attributes were present they could be in any order.
/// In each case, though the keys would be "different" they would produce the same
/// pipeline.
///
/// To address this, during specialization keys are processed into a [canonical]
/// (or "standard") form that represents the actual descriptor that was produced.
/// In the previous example, that would be the final `VertexBufferLayout` contained
/// by the pipeline descriptor. This new key is used by [`SpecializedCache`] to
/// perform additional checks for duplicates, but only if required. If a key is
/// canonical from the start, then there's no need.
///
/// For implementors: the main property of a canonical key is that if two keys hash
/// differently, they should nearly always produce different descriptors.
///
/// [canonical]: https://en.wikipedia.org/wiki/Canonicalization
pub trait SpecializerKey: Clone + Hash + Eq {
/// Denotes whether this key is canonical or not. This should only be `true`
/// if and only if `Canonical = Self`.
@ -208,6 +257,7 @@ macro_rules! impl_specialization_key_tuple {
};
}
// TODO: How to we fake_variadics this?
all_tuples!(impl_specialization_key_tuple, 0, 12, T);
/// Defines a specializer that can also provide a "base descriptor".
@ -215,38 +265,54 @@ all_tuples!(impl_specialization_key_tuple, 0, 12, T);
/// In order to be composable, [`Specializer`] implementers don't create full
/// descriptors, only transform them. However, [`SpecializedCache`]s need a
/// "base descriptor" at creation time in order to have something for the
/// [`Specializer`] implementation to work off of. This trait allows
/// [`SpecializedCache`] to impl [`FromWorld`] for [`Specializer`]
/// implementations that also satisfy [`FromWorld`] and [`GetBaseDescriptor`].
/// [`Specializer`] to work off of. This trait allows [`SpecializedCache`]
/// to impl [`FromWorld`] for [`Specializer`]s that also satisfy [`FromWorld`]
/// and [`GetBaseDescriptor`].
///
/// This trait can be also derived with `#[derive(Specializer)]`, by marking
/// a field with `#[base_descriptor]` to use its [`GetBaseDescriptor`] implementation.
///
/// Example:
/// ```rs
/// ```rust
/// # use bevy_ecs::error::BevyError;
/// # use bevy_render::render_resource::Specializer;
/// # use bevy_render::render_resource::GetBaseDescriptor;
/// # use bevy_render::render_resource::SpecializerKey;
/// # use bevy_render::render_resource::RenderPipeline;
/// # use bevy_render::render_resource::RenderPipelineDescriptor;
/// struct A;
/// struct B;
///
/// impl Specializer<RenderPipeline> for A {
/// type Key = ();
///
/// fn specialize(&self, _key: (), _descriptor: &mut RenderPipelineDescriptor) {
/// //...
/// }
/// # type Key = ();
/// #
/// # fn specialize(
/// # &self,
/// # key: (),
/// # _descriptor: &mut RenderPipelineDescriptor
/// # ) -> Result<(), BevyError> {
/// # Ok(key)
/// # }
/// // ...
/// }
///
/// impl Specializer<RenderPipeline> for B {
/// type Key = u32;
///
/// fn specialize(&self, _key: u32, _descriptor: &mut RenderPipelineDescriptor) {
/// //...
/// }
/// # type Key = ();
/// #
/// # fn specialize(
/// # &self,
/// # key: (),
/// # _descriptor: &mut RenderPipelineDescriptor
/// # ) -> Result<(), BevyError> {
/// # Ok(key)
/// # }
/// // ...
/// }
///
/// impl GetBaseDescriptor<RenderPipeline> for B {
/// fn get_base_descriptor(&self) -> RenderPipelineDescriptor {
/// # todo!()
/// //...
/// // ...
/// }
/// }
///
@ -254,7 +320,6 @@ all_tuples!(impl_specialization_key_tuple, 0, 12, T);
/// #[derive(Specializer)]
/// #[specialize(RenderPipeline)]
/// struct C {
/// #[key(default)]
/// a: A,
/// #[base_descriptor]
/// b: B,