
# Objective Currently, the observer API looks like this: ```rust app.add_observer(|trigger: Trigger<Explode>| { info!("Entity {} exploded!", trigger.target()); }); ``` Future plans for observers also include "multi-event observers" with a trigger that looks like this (see [Cart's example](https://github.com/bevyengine/bevy/issues/14649#issuecomment-2960402508)): ```rust trigger: Trigger<( OnAdd<Pressed>, OnRemove<Pressed>, OnAdd<InteractionDisabled>, OnRemove<InteractionDisabled>, OnInsert<Hovered>, )>, ``` In scenarios like this, there is a lot of repetition of `On`. These are expected to be very high-traffic APIs especially in UI contexts, so ergonomics and readability are critical. By renaming `Trigger` to `On`, we can make these APIs read more cleanly and get rid of the repetition: ```rust app.add_observer(|trigger: On<Explode>| { info!("Entity {} exploded!", trigger.target()); }); ``` ```rust trigger: On<( Add<Pressed>, Remove<Pressed>, Add<InteractionDisabled>, Remove<InteractionDisabled>, Insert<Hovered>, )>, ``` Names like `On<Add<Pressed>>` emphasize the actual event listener nature more than `Trigger<OnAdd<Pressed>>`, and look cleaner. This *also* frees up the `Trigger` name if we want to use it for the observer event type, splitting them out from buffered events (bikeshedding this is out of scope for this PR though). For prior art: [`bevy_eventlistener`](https://github.com/aevyrie/bevy_eventlistener) used [`On`](https://docs.rs/bevy_eventlistener/latest/bevy_eventlistener/event_listener/struct.On.html) for its event listener type. Though in our case, the observer is the event listener, and `On` is just a type containing information about the triggered event. ## Solution Steal from `bevy_event_listener` by @aevyrie and use `On`. - Rename `Trigger` to `On` - Rename `OnAdd` to `Add` - Rename `OnInsert` to `Insert` - Rename `OnReplace` to `Replace` - Rename `OnRemove` to `Remove` - Rename `OnDespawn` to `Despawn` ## Discussion ### Naming Conflicts?? Using a name like `Add` might initially feel like a very bad idea, since it risks conflict with `core::ops::Add`. However, I don't expect this to be a big problem in practice. - You rarely need to actually implement the `Add` trait, especially in modules that would use the Bevy ECS. - In the rare cases where you *do* get a conflict, it is very easy to fix by just disambiguating, for example using `ops::Add`. - The `Add` event is a struct while the `Add` trait is a trait (duh), so the compiler error should be very obvious. For the record, renaming `OnAdd` to `Add`, I got exactly *zero* errors or conflicts within Bevy itself. But this is of course not entirely representative of actual projects *using* Bevy. You might then wonder, why not use `Added`? This would conflict with the `Added` query filter, so it wouldn't work. Additionally, the current naming convention for observer events does not use past tense. ### Documentation This does make documentation slightly more awkward when referring to `On` or its methods. Previous docs often referred to `Trigger::target` or "sends a `Trigger`" (which is... a bit strange anyway), which would now be `On::target` and "sends an observer `Event`". You can see the diff in this PR to see some of the effects. I think it should be fine though, we may just need to reword more documentation to read better.
223 lines
8.1 KiB
Rust
223 lines
8.1 KiB
Rust
//! Simple example demonstrating the use of the [`Readback`] component to read back data from the GPU
|
|
//! using both a storage buffer and texture.
|
|
|
|
use bevy::{
|
|
prelude::*,
|
|
render::{
|
|
extract_resource::{ExtractResource, ExtractResourcePlugin},
|
|
gpu_readback::{Readback, ReadbackComplete},
|
|
render_asset::{RenderAssetUsages, RenderAssets},
|
|
render_graph::{self, RenderGraph, RenderLabel},
|
|
render_resource::{
|
|
binding_types::{storage_buffer, texture_storage_2d},
|
|
*,
|
|
},
|
|
renderer::{RenderContext, RenderDevice},
|
|
storage::{GpuShaderStorageBuffer, ShaderStorageBuffer},
|
|
texture::GpuImage,
|
|
Render, RenderApp, RenderSystems,
|
|
},
|
|
};
|
|
|
|
/// This example uses a shader source file from the assets subdirectory
|
|
const SHADER_ASSET_PATH: &str = "shaders/gpu_readback.wgsl";
|
|
|
|
// The length of the buffer sent to the gpu
|
|
const BUFFER_LEN: usize = 16;
|
|
|
|
fn main() {
|
|
App::new()
|
|
.add_plugins((
|
|
DefaultPlugins,
|
|
GpuReadbackPlugin,
|
|
ExtractResourcePlugin::<ReadbackBuffer>::default(),
|
|
ExtractResourcePlugin::<ReadbackImage>::default(),
|
|
))
|
|
.insert_resource(ClearColor(Color::BLACK))
|
|
.add_systems(Startup, setup)
|
|
.run();
|
|
}
|
|
|
|
// We need a plugin to organize all the systems and render node required for this example
|
|
struct GpuReadbackPlugin;
|
|
impl Plugin for GpuReadbackPlugin {
|
|
fn build(&self, _app: &mut App) {}
|
|
|
|
fn finish(&self, app: &mut App) {
|
|
let render_app = app.sub_app_mut(RenderApp);
|
|
render_app.init_resource::<ComputePipeline>().add_systems(
|
|
Render,
|
|
prepare_bind_group
|
|
.in_set(RenderSystems::PrepareBindGroups)
|
|
// We don't need to recreate the bind group every frame
|
|
.run_if(not(resource_exists::<GpuBufferBindGroup>)),
|
|
);
|
|
|
|
// Add the compute node as a top level node to the render graph
|
|
// This means it will only execute once per frame
|
|
render_app
|
|
.world_mut()
|
|
.resource_mut::<RenderGraph>()
|
|
.add_node(ComputeNodeLabel, ComputeNode::default());
|
|
}
|
|
}
|
|
|
|
#[derive(Resource, ExtractResource, Clone)]
|
|
struct ReadbackBuffer(Handle<ShaderStorageBuffer>);
|
|
|
|
#[derive(Resource, ExtractResource, Clone)]
|
|
struct ReadbackImage(Handle<Image>);
|
|
|
|
fn setup(
|
|
mut commands: Commands,
|
|
mut images: ResMut<Assets<Image>>,
|
|
mut buffers: ResMut<Assets<ShaderStorageBuffer>>,
|
|
) {
|
|
// Create a storage buffer with some data
|
|
let buffer = vec![0u32; BUFFER_LEN];
|
|
let mut buffer = ShaderStorageBuffer::from(buffer);
|
|
// We need to enable the COPY_SRC usage so we can copy the buffer to the cpu
|
|
buffer.buffer_description.usage |= BufferUsages::COPY_SRC;
|
|
let buffer = buffers.add(buffer);
|
|
|
|
// Create a storage texture with some data
|
|
let size = Extent3d {
|
|
width: BUFFER_LEN as u32,
|
|
height: 1,
|
|
..default()
|
|
};
|
|
// We create an uninitialized image since this texture will only be used for getting data out
|
|
// of the compute shader, not getting data in, so there's no reason for it to exist on the CPU
|
|
let mut image = Image::new_uninit(
|
|
size,
|
|
TextureDimension::D2,
|
|
TextureFormat::R32Uint,
|
|
RenderAssetUsages::RENDER_WORLD,
|
|
);
|
|
// We also need to enable the COPY_SRC, as well as STORAGE_BINDING so we can use it in the
|
|
// compute shader
|
|
image.texture_descriptor.usage |= TextureUsages::COPY_SRC | TextureUsages::STORAGE_BINDING;
|
|
let image = images.add(image);
|
|
|
|
// Spawn the readback components. For each frame, the data will be read back from the GPU
|
|
// asynchronously and trigger the `ReadbackComplete` event on this entity. Despawn the entity
|
|
// to stop reading back the data.
|
|
commands
|
|
.spawn(Readback::buffer(buffer.clone()))
|
|
.observe(|trigger: On<ReadbackComplete>| {
|
|
// This matches the type which was used to create the `ShaderStorageBuffer` above,
|
|
// and is a convenient way to interpret the data.
|
|
let data: Vec<u32> = trigger.event().to_shader_type();
|
|
info!("Buffer {:?}", data);
|
|
});
|
|
// This is just a simple way to pass the buffer handle to the render app for our compute node
|
|
commands.insert_resource(ReadbackBuffer(buffer));
|
|
|
|
// Textures can also be read back from the GPU. Pay careful attention to the format of the
|
|
// texture, as it will affect how the data is interpreted.
|
|
commands
|
|
.spawn(Readback::texture(image.clone()))
|
|
.observe(|trigger: On<ReadbackComplete>| {
|
|
// You probably want to interpret the data as a color rather than a `ShaderType`,
|
|
// but in this case we know the data is a single channel storage texture, so we can
|
|
// interpret it as a `Vec<u32>`
|
|
let data: Vec<u32> = trigger.event().to_shader_type();
|
|
info!("Image {:?}", data);
|
|
});
|
|
commands.insert_resource(ReadbackImage(image));
|
|
}
|
|
|
|
#[derive(Resource)]
|
|
struct GpuBufferBindGroup(BindGroup);
|
|
|
|
fn prepare_bind_group(
|
|
mut commands: Commands,
|
|
pipeline: Res<ComputePipeline>,
|
|
render_device: Res<RenderDevice>,
|
|
buffer: Res<ReadbackBuffer>,
|
|
image: Res<ReadbackImage>,
|
|
buffers: Res<RenderAssets<GpuShaderStorageBuffer>>,
|
|
images: Res<RenderAssets<GpuImage>>,
|
|
) {
|
|
let buffer = buffers.get(&buffer.0).unwrap();
|
|
let image = images.get(&image.0).unwrap();
|
|
let bind_group = render_device.create_bind_group(
|
|
None,
|
|
&pipeline.layout,
|
|
&BindGroupEntries::sequential((
|
|
buffer.buffer.as_entire_buffer_binding(),
|
|
image.texture_view.into_binding(),
|
|
)),
|
|
);
|
|
commands.insert_resource(GpuBufferBindGroup(bind_group));
|
|
}
|
|
|
|
#[derive(Resource)]
|
|
struct ComputePipeline {
|
|
layout: BindGroupLayout,
|
|
pipeline: CachedComputePipelineId,
|
|
}
|
|
|
|
impl FromWorld for ComputePipeline {
|
|
fn from_world(world: &mut World) -> Self {
|
|
let render_device = world.resource::<RenderDevice>();
|
|
let layout = render_device.create_bind_group_layout(
|
|
None,
|
|
&BindGroupLayoutEntries::sequential(
|
|
ShaderStages::COMPUTE,
|
|
(
|
|
storage_buffer::<Vec<u32>>(false),
|
|
texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::WriteOnly),
|
|
),
|
|
),
|
|
);
|
|
let shader = world.load_asset(SHADER_ASSET_PATH);
|
|
let pipeline_cache = world.resource::<PipelineCache>();
|
|
let pipeline = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
|
|
label: Some("GPU readback compute shader".into()),
|
|
layout: vec![layout.clone()],
|
|
push_constant_ranges: Vec::new(),
|
|
shader: shader.clone(),
|
|
shader_defs: Vec::new(),
|
|
entry_point: "main".into(),
|
|
zero_initialize_workgroup_memory: false,
|
|
});
|
|
ComputePipeline { layout, pipeline }
|
|
}
|
|
}
|
|
|
|
/// Label to identify the node in the render graph
|
|
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
|
|
struct ComputeNodeLabel;
|
|
|
|
/// The node that will execute the compute shader
|
|
#[derive(Default)]
|
|
struct ComputeNode {}
|
|
impl render_graph::Node for ComputeNode {
|
|
fn run(
|
|
&self,
|
|
_graph: &mut render_graph::RenderGraphContext,
|
|
render_context: &mut RenderContext,
|
|
world: &World,
|
|
) -> Result<(), render_graph::NodeRunError> {
|
|
let pipeline_cache = world.resource::<PipelineCache>();
|
|
let pipeline = world.resource::<ComputePipeline>();
|
|
let bind_group = world.resource::<GpuBufferBindGroup>();
|
|
|
|
if let Some(init_pipeline) = pipeline_cache.get_compute_pipeline(pipeline.pipeline) {
|
|
let mut pass =
|
|
render_context
|
|
.command_encoder()
|
|
.begin_compute_pass(&ComputePassDescriptor {
|
|
label: Some("GPU readback compute pass"),
|
|
..default()
|
|
});
|
|
|
|
pass.set_bind_group(0, &bind_group.0, &[]);
|
|
pass.set_pipeline(init_pipeline);
|
|
pass.dispatch_workgroups(BUFFER_LEN as u32, 1, 1);
|
|
}
|
|
Ok(())
|
|
}
|
|
}
|