Temporal Antialiasing (TAA) (#7291)

![image](https://user-images.githubusercontent.com/47158642/214374911-412f0986-3927-4f7a-9a6c-413bdee6b389.png)

# Objective

- Implement an alternative antialias technique
- TAA scales based off of view resolution, not geometry complexity
- TAA filters textures, firefly pixels, and other aliasing not covered
by MSAA
- TAA additionally will reduce noise / increase quality in future
stochastic rendering techniques
- Closes https://github.com/bevyengine/bevy/issues/3663

## Solution

- Add a temporal jitter component
- Add a motion vector prepass
- Add a TemporalAntialias component and plugin
- Combine existing MSAA and FXAA examples and add TAA

## Followup Work
- Prepass motion vector support for skinned meshes
- Move uniforms needed for motion vectors into a separate bind group,
instead of using different bind group layouts
- Reuse previous frame's GPU view buffer for motion vectors, instead of
recomputing
- Mip biasing for sharper textures, and or unjitter texture UVs
https://github.com/bevyengine/bevy/issues/7323
- Compute shader for better performance
- Investigate FSR techniques
  - Historical depth based disocclusion tests, for geometry disocclusion
  - Historical luminance/hue based tests, for shading disocclusion
- Pixel "locks" to reduce blending rate / revamp history confidence
mechanism
- Orthographic camera support for TemporalJitter
- Figure out COD's 1-tap bicubic filter

---

## Changelog

- Added MotionVectorPrepass and TemporalJitter
- Added TemporalAntialiasPlugin, TemporalAntialiasBundle, and
TemporalAntialiasSettings

---------

Co-authored-by: IceSentry <c.giguere42@gmail.com>
Co-authored-by: IceSentry <IceSentry@users.noreply.github.com>
Co-authored-by: Robert Swain <robert.swain@gmail.com>
Co-authored-by: Daniel Chia <danstryder@gmail.com>
Co-authored-by: robtfm <50659922+robtfm@users.noreply.github.com>
Co-authored-by: Brandon Dyer <brandondyer64@gmail.com>
Co-authored-by: Edgar Geier <geieredgar@gmail.com>
This commit is contained in:
JMS55 2023-03-27 18:22:40 -04:00 committed by GitHub
parent 3d8c7681a7
commit 53667dea56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 1830 additions and 500 deletions

View File

@ -414,6 +414,16 @@ description = "A scene showcasing the built-in 3D shapes"
category = "3D Rendering"
wasm = true
[[example]]
name = "anti_aliasing"
path = "examples/3d/anti_aliasing.rs"
[package.metadata.example.anti_aliasing]
name = "Anti-aliasing"
description = "Compares different anti-aliasing methods"
category = "3D Rendering"
wasm = false
[[example]]
name = "3d_gizmos"
path = "examples/3d/3d_gizmos.rs"
@ -515,26 +525,6 @@ description = "Compares tonemapping options"
category = "3D Rendering"
wasm = true
[[example]]
name = "fxaa"
path = "examples/3d/fxaa.rs"
[package.metadata.example.fxaa]
name = "FXAA"
description = "Compares MSAA (Multi-Sample Anti-Aliasing) and FXAA (Fast Approximate Anti-Aliasing)"
category = "3D Rendering"
wasm = true
[[example]]
name = "msaa"
path = "examples/3d/msaa.rs"
[package.metadata.example.msaa]
name = "MSAA"
description = "Configures MSAA (Multi-Sample Anti-Aliasing) for smoother edges"
category = "3D Rendering"
wasm = true
[[example]]
name = "orthographic"
path = "examples/3d/orthographic.rs"

View File

@ -5,6 +5,7 @@
struct ShowPrepassSettings {
show_depth: u32,
show_normals: u32,
show_motion_vectors: u32,
padding_1: u32,
padding_2: u32,
}
@ -23,6 +24,9 @@ fn fragment(
} else if settings.show_normals == 1u {
let normal = prepass_normal(frag_coord, sample_index);
return vec4(normal, 1.0);
} else if settings.show_motion_vectors == 1u {
let motion_vector = prepass_motion_vector(frag_coord, sample_index);
return vec4(motion_vector / globals.delta_time, 0.0, 1.0);
}
return vec4(0.0);

View File

@ -21,6 +21,7 @@ tonemapping_luts = []
# bevy
bevy_app = { path = "../bevy_app", version = "0.11.0-dev" }
bevy_asset = { path = "../bevy_asset", version = "0.11.0-dev" }
bevy_core = { path = "../bevy_core", version = "0.11.0-dev" }
bevy_derive = { path = "../bevy_derive", version = "0.11.0-dev" }
bevy_ecs = { path = "../bevy_ecs", version = "0.11.0-dev" }
bevy_reflect = { path = "../bevy_reflect", version = "0.11.0-dev" }

View File

@ -23,8 +23,6 @@ use bevy_render::{
view::ViewTarget,
Render, RenderApp, RenderSet,
};
#[cfg(feature = "trace")]
use bevy_utils::tracing::info_span;
use downsampling_pipeline::{
prepare_downsampling_pipeline, BloomDownsamplingPipeline, BloomDownsamplingPipelineIds,
BloomUniforms,
@ -150,9 +148,6 @@ impl Node for BloomNode {
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
#[cfg(feature = "trace")]
let _bloom_span = info_span!("bloom").entered();
let downsampling_pipeline_res = world.resource::<BloomDownsamplingPipeline>();
let pipeline_cache = world.resource::<PipelineCache>();
let uniforms = world.resource::<ComponentUniforms<BloomUniforms>>();

View File

@ -1,7 +1,7 @@
use crate::{
clear_color::{ClearColor, ClearColorConfig},
core_3d::{AlphaMask3d, Camera3d, Opaque3d, Transparent3d},
prepass::{DepthPrepass, NormalPrepass},
prepass::{DepthPrepass, MotionVectorPrepass, NormalPrepass},
};
use bevy_ecs::prelude::*;
use bevy_render::{
@ -29,6 +29,7 @@ pub struct MainPass3dNode {
&'static ViewDepthTexture,
Option<&'static DepthPrepass>,
Option<&'static NormalPrepass>,
Option<&'static MotionVectorPrepass>,
),
With<ExtractedView>,
>,
@ -64,6 +65,7 @@ impl Node for MainPass3dNode {
depth,
depth_prepass,
normal_prepass,
motion_vector_prepass,
)) = self.query.get_manual(world, view_entity) else {
// No window
return Ok(());
@ -94,7 +96,10 @@ impl Node for MainPass3dNode {
view: &depth.view,
// NOTE: The opaque main pass loads the depth buffer and possibly overwrites it
depth_ops: Some(Operations {
load: if depth_prepass.is_some() || normal_prepass.is_some() {
load: if depth_prepass.is_some()
|| normal_prepass.is_some()
|| motion_vector_prepass.is_some()
{
// if any prepass runs, it will generate a depth buffer so we should use it,
// even if only the normal_prepass is used.
Camera3dDepthLoadOp::Load

View File

@ -7,9 +7,17 @@ pub mod fullscreen_vertex_shader;
pub mod fxaa;
pub mod msaa_writeback;
pub mod prepass;
mod taa;
pub mod tonemapping;
pub mod upscaling;
/// Experimental features that are not yet finished. Please report any issues you encounter!
pub mod experimental {
pub mod taa {
pub use crate::taa::*;
}
}
pub mod prelude {
#[doc(hidden)]
pub use crate::{

View File

@ -1,4 +1,4 @@
//! Run a prepass before the main pass to generate depth and/or normals texture, sometimes called a thin g-buffer.
//! Run a prepass before the main pass to generate depth, normals, and/or motion vectors textures, sometimes called a thin g-buffer.
//! These textures are useful for various screen-space effects and reducing overdraw in the main pass.
//!
//! The prepass only runs for opaque meshes or meshes with an alpha mask. Transparent meshes are ignored.
@ -7,6 +7,7 @@
//!
//! [`DepthPrepass`]
//! [`NormalPrepass`]
//! [`MotionVectorPrepass`]
//!
//! The textures are automatically added to the default mesh view bindings. You can also get the raw textures
//! by querying the [`ViewPrepassTextures`] component on any camera with a prepass component.
@ -15,9 +16,9 @@
//! to a separate texture unless the [`DepthPrepass`] is activated. This means that if any prepass component is present
//! it will always create a depth buffer that will be used by the main pass.
//!
//! When using the default mesh view bindings you should be able to use `prepass_depth()`
//! and `prepass_normal()` to load the related textures. These functions are defined in `bevy_pbr::prepass_utils`.
//! See the `shader_prepass` example that shows how to use it.
//! When using the default mesh view bindings you should be able to use `prepass_depth()`,
//! `prepass_normal()`, and `prepass_motion_vector()` to load the related textures.
//! These functions are defined in `bevy_pbr::prepass_utils`. See the `shader_prepass` example that shows how to use them.
//!
//! The prepass runs for each `Material`. You can control if the prepass should run per-material by setting the `prepass_enabled`
//! flag on the `MaterialPlugin`.
@ -39,6 +40,7 @@ use bevy_utils::FloatOrd;
pub const DEPTH_PREPASS_FORMAT: TextureFormat = TextureFormat::Depth32Float;
pub const NORMAL_PREPASS_FORMAT: TextureFormat = TextureFormat::Rgb10a2Unorm;
pub const MOTION_VECTOR_PREPASS_FORMAT: TextureFormat = TextureFormat::Rg16Float;
/// If added to a [`crate::prelude::Camera3d`] then depth values will be copied to a separate texture available to the main pass.
#[derive(Component, Default, Reflect)]
@ -49,6 +51,10 @@ pub struct DepthPrepass;
#[derive(Component, Default, Reflect)]
pub struct NormalPrepass;
/// If added to a [`crate::prelude::Camera3d`] then screen space motion vectors will be copied to a separate texture available to the main pass.
#[derive(Component, Default, Reflect)]
pub struct MotionVectorPrepass;
/// Textures that are written to by the prepass.
///
/// This component will only be present if any of the relevant prepass components are also present.
@ -60,6 +66,9 @@ pub struct ViewPrepassTextures {
/// The normals texture generated by the prepass.
/// Exists only if [`NormalPrepass`] is added to the `ViewTarget`
pub normal: Option<CachedTexture>,
/// The motion vectors texture generated by the prepass.
/// Exists only if [`MotionVectorPrepass`] is added to the `ViewTarget`
pub motion_vectors: Option<CachedTexture>,
/// The size of the textures.
pub size: Extent3d,
}

View File

@ -64,15 +64,34 @@ impl Node for PrepassNode {
};
let mut color_attachments = vec![];
if let Some(view_normals_texture) = &view_prepass_textures.normal {
color_attachments.push(Some(RenderPassColorAttachment {
view: &view_normals_texture.default_view,
color_attachments.push(
view_prepass_textures
.normal
.as_ref()
.map(|view_normals_texture| RenderPassColorAttachment {
view: &view_normals_texture.default_view,
resolve_target: None,
ops: Operations {
load: LoadOp::Clear(Color::BLACK.into()),
store: true,
},
}),
);
color_attachments.push(view_prepass_textures.motion_vectors.as_ref().map(
|view_motion_vectors_texture| RenderPassColorAttachment {
view: &view_motion_vectors_texture.default_view,
resolve_target: None,
ops: Operations {
load: LoadOp::Clear(Color::BLACK.into()),
// Blue channel doesn't matter, but set to 1.0 for possible faster clear
// https://gpuopen.com/performance/#clears
load: LoadOp::Clear(Color::rgb_linear(1.0, 1.0, 1.0).into()),
store: true,
},
}));
},
));
if color_attachments.iter().all(Option::is_none) {
// all attachments are none: clear the attachment list so that no fragment shader is required
color_attachments.clear();
}
{

View File

@ -0,0 +1,557 @@
use crate::{
fullscreen_vertex_shader::fullscreen_shader_vertex_state,
prelude::Camera3d,
prepass::{DepthPrepass, MotionVectorPrepass, ViewPrepassTextures},
};
use bevy_app::{App, Plugin};
use bevy_asset::{load_internal_asset, HandleUntyped};
use bevy_core::FrameCount;
use bevy_ecs::{
prelude::{Bundle, Component, Entity},
query::{QueryState, With},
schedule::IntoSystemConfigs,
system::{Commands, Query, Res, ResMut, Resource},
world::{FromWorld, World},
};
use bevy_math::vec2;
use bevy_reflect::{Reflect, TypeUuid};
use bevy_render::{
camera::{ExtractedCamera, TemporalJitter},
prelude::{Camera, Projection},
render_graph::{Node, NodeRunError, RenderGraph, RenderGraphContext},
render_resource::{
BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor,
BindGroupLayoutEntry, BindingResource, BindingType, CachedRenderPipelineId,
ColorTargetState, ColorWrites, Extent3d, FilterMode, FragmentState, MultisampleState,
Operations, PipelineCache, PrimitiveState, RenderPassColorAttachment, RenderPassDescriptor,
RenderPipelineDescriptor, Sampler, SamplerBindingType, SamplerDescriptor, Shader,
ShaderStages, SpecializedRenderPipeline, SpecializedRenderPipelines, TextureDescriptor,
TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureViewDimension,
},
renderer::{RenderContext, RenderDevice},
texture::{BevyDefault, CachedTexture, TextureCache},
view::{prepare_view_uniforms, ExtractedView, Msaa, ViewTarget},
ExtractSchedule, MainWorld, Render, RenderApp, RenderSet,
};
mod draw_3d_graph {
pub mod node {
/// Label for the TAA render node.
pub const TAA: &str = "taa";
}
}
const TAA_SHADER_HANDLE: HandleUntyped =
HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 656865235226276);
/// Plugin for temporal anti-aliasing. Disables multisample anti-aliasing (MSAA).
///
/// See [`TemporalAntiAliasSettings`] for more details.
pub struct TemporalAntiAliasPlugin;
impl Plugin for TemporalAntiAliasPlugin {
fn build(&self, app: &mut App) {
load_internal_asset!(app, TAA_SHADER_HANDLE, "taa.wgsl", Shader::from_wgsl);
app.insert_resource(Msaa::Off)
.register_type::<TemporalAntiAliasSettings>();
let Ok(render_app) = app.get_sub_app_mut(RenderApp) else { return };
render_app
.init_resource::<TAAPipeline>()
.init_resource::<SpecializedRenderPipelines<TAAPipeline>>()
.add_systems(ExtractSchedule, extract_taa_settings)
.add_systems(
Render,
(
prepare_taa_jitter
.before(prepare_view_uniforms)
.in_set(RenderSet::Prepare),
prepare_taa_history_textures.in_set(RenderSet::Prepare),
prepare_taa_pipelines.in_set(RenderSet::Prepare),
),
);
let taa_node = TAANode::new(&mut render_app.world);
let mut graph = render_app.world.resource_mut::<RenderGraph>();
let draw_3d_graph = graph
.get_sub_graph_mut(crate::core_3d::graph::NAME)
.unwrap();
draw_3d_graph.add_node(draw_3d_graph::node::TAA, taa_node);
// MAIN_PASS -> TAA -> BLOOM -> TONEMAPPING
draw_3d_graph.add_node_edge(
crate::core_3d::graph::node::MAIN_PASS,
draw_3d_graph::node::TAA,
);
draw_3d_graph.add_node_edge(draw_3d_graph::node::TAA, crate::core_3d::graph::node::BLOOM);
draw_3d_graph.add_node_edge(
draw_3d_graph::node::TAA,
crate::core_3d::graph::node::TONEMAPPING,
);
}
}
/// Bundle to apply temporal anti-aliasing.
#[derive(Bundle, Default)]
pub struct TemporalAntiAliasBundle {
pub settings: TemporalAntiAliasSettings,
pub jitter: TemporalJitter,
pub depth_prepass: DepthPrepass,
pub motion_vector_prepass: MotionVectorPrepass,
}
/// Component to apply temporal anti-aliasing to a 3D perspective camera.
///
/// Temporal anti-aliasing (TAA) is a form of image smoothing/filtering, like
/// multisample anti-aliasing (MSAA), or fast approximate anti-aliasing (FXAA).
/// TAA works by blending (averaging) each frame with the past few frames.
///
/// # Tradeoffs
///
/// Pros:
/// * Cost scales with screen/view resolution, unlike MSAA which scales with number of triangles
/// * Filters more types of aliasing than MSAA, such as textures and singular bright pixels
/// * Greatly increases the quality of stochastic rendering techniques such as SSAO, shadow mapping, etc
///
/// Cons:
/// * Chance of "ghosting" - ghostly trails left behind moving objects
/// * Thin geometry, lighting detail, or texture lines may flicker or disappear
/// * Slightly blurs the image, leading to a softer look (using an additional sharpening pass can reduce this)
///
/// Because TAA blends past frames with the current frame, when the frames differ too much
/// (such as with fast moving objects or camera cuts), ghosting artifacts may occur.
///
/// Artifacts tend to be reduced at higher framerates and rendering resolution.
///
/// # Usage Notes
///
/// Requires that you add [`TemporalAntiAliasPlugin`] to your app,
/// and add the [`DepthPrepass`], [`MotionVectorPrepass`], and [`TemporalJitter`]
/// components to your camera.
///
/// Cannot be used with [`bevy_render::camera::OrthographicProjection`].
///
/// Currently does not support skinned meshes. There will probably be ghosting artifacts if used with them.
/// Does not work well with alpha-blended meshes as it requires depth writing to determine motion.
///
/// It is very important that correct motion vectors are written for everything on screen.
/// Failure to do so will lead to ghosting artifacts. For instance, if particle effects
/// are added using a third party library, the library must either:
/// 1. Write particle motion vectors to the motion vectors prepass texture
/// 2. Render particles after TAA
#[derive(Component, Reflect, Clone)]
pub struct TemporalAntiAliasSettings {
/// Set to true to delete the saved temporal history (past frames).
///
/// Useful for preventing ghosting when the history is no longer
/// representive of the current frame, such as in sudden camera cuts.
///
/// After setting this to true, it will automatically be toggled
/// back to false after one frame.
pub reset: bool,
}
impl Default for TemporalAntiAliasSettings {
fn default() -> Self {
Self { reset: true }
}
}
struct TAANode {
view_query: QueryState<(
&'static ExtractedCamera,
&'static ViewTarget,
&'static TAAHistoryTextures,
&'static ViewPrepassTextures,
&'static TAAPipelineId,
)>,
}
impl TAANode {
fn new(world: &mut World) -> Self {
Self {
view_query: QueryState::new(world),
}
}
}
impl Node for TAANode {
fn update(&mut self, world: &mut World) {
self.view_query.update_archetypes(world);
}
fn run(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let (
Ok((camera, view_target, taa_history_textures, prepass_textures, taa_pipeline_id)),
Some(pipelines),
Some(pipeline_cache),
) = (
self.view_query.get_manual(world, graph.view_entity()),
world.get_resource::<TAAPipeline>(),
world.get_resource::<PipelineCache>(),
) else {
return Ok(());
};
let (
Some(taa_pipeline),
Some(prepass_motion_vectors_texture),
Some(prepass_depth_texture),
) = (
pipeline_cache.get_render_pipeline(taa_pipeline_id.0),
&prepass_textures.motion_vectors,
&prepass_textures.depth,
) else {
return Ok(());
};
let view_target = view_target.post_process_write();
let taa_bind_group =
render_context
.render_device()
.create_bind_group(&BindGroupDescriptor {
label: Some("taa_bind_group"),
layout: &pipelines.taa_bind_group_layout,
entries: &[
BindGroupEntry {
binding: 0,
resource: BindingResource::TextureView(view_target.source),
},
BindGroupEntry {
binding: 1,
resource: BindingResource::TextureView(
&taa_history_textures.read.default_view,
),
},
BindGroupEntry {
binding: 2,
resource: BindingResource::TextureView(
&prepass_motion_vectors_texture.default_view,
),
},
BindGroupEntry {
binding: 3,
resource: BindingResource::TextureView(
&prepass_depth_texture.default_view,
),
},
BindGroupEntry {
binding: 4,
resource: BindingResource::Sampler(&pipelines.nearest_sampler),
},
BindGroupEntry {
binding: 5,
resource: BindingResource::Sampler(&pipelines.linear_sampler),
},
],
});
{
let mut taa_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("taa_pass"),
color_attachments: &[
Some(RenderPassColorAttachment {
view: view_target.destination,
resolve_target: None,
ops: Operations::default(),
}),
Some(RenderPassColorAttachment {
view: &taa_history_textures.write.default_view,
resolve_target: None,
ops: Operations::default(),
}),
],
depth_stencil_attachment: None,
});
taa_pass.set_render_pipeline(taa_pipeline);
taa_pass.set_bind_group(0, &taa_bind_group, &[]);
if let Some(viewport) = camera.viewport.as_ref() {
taa_pass.set_camera_viewport(viewport);
}
taa_pass.draw(0..3, 0..1);
}
Ok(())
}
}
#[derive(Resource)]
struct TAAPipeline {
taa_bind_group_layout: BindGroupLayout,
nearest_sampler: Sampler,
linear_sampler: Sampler,
}
impl FromWorld for TAAPipeline {
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
let nearest_sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("taa_nearest_sampler"),
mag_filter: FilterMode::Nearest,
min_filter: FilterMode::Nearest,
..SamplerDescriptor::default()
});
let linear_sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("taa_linear_sampler"),
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..SamplerDescriptor::default()
});
let taa_bind_group_layout =
render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
label: Some("taa_bind_group_layout"),
entries: &[
// View target (read)
BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
sample_type: TextureSampleType::Float { filterable: true },
view_dimension: TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
// TAA History (read)
BindGroupLayoutEntry {
binding: 1,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
sample_type: TextureSampleType::Float { filterable: true },
view_dimension: TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
// Motion Vectors
BindGroupLayoutEntry {
binding: 2,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
sample_type: TextureSampleType::Float { filterable: true },
view_dimension: TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
// Depth
BindGroupLayoutEntry {
binding: 3,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
sample_type: TextureSampleType::Depth,
view_dimension: TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
// Nearest sampler
BindGroupLayoutEntry {
binding: 4,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Sampler(SamplerBindingType::NonFiltering),
count: None,
},
// Linear sampler
BindGroupLayoutEntry {
binding: 5,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Sampler(SamplerBindingType::Filtering),
count: None,
},
],
});
TAAPipeline {
taa_bind_group_layout,
nearest_sampler,
linear_sampler,
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
struct TAAPipelineKey {
hdr: bool,
reset: bool,
}
impl SpecializedRenderPipeline for TAAPipeline {
type Key = TAAPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mut shader_defs = vec![];
let format = if key.hdr {
shader_defs.push("TONEMAP".into());
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
if key.reset {
shader_defs.push("RESET".into());
}
RenderPipelineDescriptor {
label: Some("taa_pipeline".into()),
layout: vec![self.taa_bind_group_layout.clone()],
vertex: fullscreen_shader_vertex_state(),
fragment: Some(FragmentState {
shader: TAA_SHADER_HANDLE.typed::<Shader>(),
shader_defs,
entry_point: "taa".into(),
targets: vec![
Some(ColorTargetState {
format,
blend: None,
write_mask: ColorWrites::ALL,
}),
Some(ColorTargetState {
format,
blend: None,
write_mask: ColorWrites::ALL,
}),
],
}),
primitive: PrimitiveState::default(),
depth_stencil: None,
multisample: MultisampleState::default(),
push_constant_ranges: Vec::new(),
}
}
}
fn extract_taa_settings(mut commands: Commands, mut main_world: ResMut<MainWorld>) {
let mut cameras_3d = main_world
.query_filtered::<(Entity, &Camera, &Projection, &mut TemporalAntiAliasSettings), (
With<Camera3d>,
With<TemporalJitter>,
With<DepthPrepass>,
With<MotionVectorPrepass>,
)>();
for (entity, camera, camera_projection, mut taa_settings) in
cameras_3d.iter_mut(&mut main_world)
{
let has_perspective_projection = matches!(camera_projection, Projection::Perspective(_));
if camera.is_active && has_perspective_projection {
commands.get_or_spawn(entity).insert(taa_settings.clone());
taa_settings.reset = false;
}
}
}
fn prepare_taa_jitter(
frame_count: Res<FrameCount>,
mut query: Query<&mut TemporalJitter, With<TemporalAntiAliasSettings>>,
) {
// Halton sequence (2, 3) - 0.5, skipping i = 0
let halton_sequence = [
vec2(0.0, -0.16666666),
vec2(-0.25, 0.16666669),
vec2(0.25, -0.3888889),
vec2(-0.375, -0.055555552),
vec2(0.125, 0.2777778),
vec2(-0.125, -0.2777778),
vec2(0.375, 0.055555582),
vec2(-0.4375, 0.3888889),
];
let offset = halton_sequence[frame_count.0 as usize % halton_sequence.len()];
for mut jitter in &mut query {
jitter.offset = offset;
}
}
#[derive(Component)]
struct TAAHistoryTextures {
write: CachedTexture,
read: CachedTexture,
}
fn prepare_taa_history_textures(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
render_device: Res<RenderDevice>,
frame_count: Res<FrameCount>,
views: Query<(Entity, &ExtractedCamera, &ExtractedView), With<TemporalAntiAliasSettings>>,
) {
for (entity, camera, view) in &views {
if let Some(physical_viewport_size) = camera.physical_viewport_size {
let mut texture_descriptor = TextureDescriptor {
label: None,
size: Extent3d {
depth_or_array_layers: 1,
width: physical_viewport_size.x,
height: physical_viewport_size.y,
},
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: if view.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
usage: TextureUsages::TEXTURE_BINDING | TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
};
texture_descriptor.label = Some("taa_history_1_texture");
let history_1_texture = texture_cache.get(&render_device, texture_descriptor.clone());
texture_descriptor.label = Some("taa_history_2_texture");
let history_2_texture = texture_cache.get(&render_device, texture_descriptor);
let textures = if frame_count.0 % 2 == 0 {
TAAHistoryTextures {
write: history_1_texture,
read: history_2_texture,
}
} else {
TAAHistoryTextures {
write: history_2_texture,
read: history_1_texture,
}
};
commands.entity(entity).insert(textures);
}
}
}
#[derive(Component)]
struct TAAPipelineId(CachedRenderPipelineId);
fn prepare_taa_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<TAAPipeline>>,
pipeline: Res<TAAPipeline>,
views: Query<(Entity, &ExtractedView, &TemporalAntiAliasSettings)>,
) {
for (entity, view, taa_settings) in &views {
let mut pipeline_key = TAAPipelineKey {
hdr: view.hdr,
reset: taa_settings.reset,
};
let pipeline_id = pipelines.specialize(&pipeline_cache, &pipeline, pipeline_key.clone());
// Prepare non-reset pipeline anyways - it will be necessary next frame
if pipeline_key.reset {
pipeline_key.reset = false;
pipelines.specialize(&pipeline_cache, &pipeline, pipeline_key);
}
commands.entity(entity).insert(TAAPipelineId(pipeline_id));
}
}

View File

@ -0,0 +1,196 @@
// References:
// https://www.elopezr.com/temporal-aa-and-the-quest-for-the-holy-trail
// http://behindthepixels.io/assets/files/TemporalAA.pdf
// http://leiy.cc/publications/TAA/TAA_EG2020_Talk.pdf
// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING
// Controls how much to blend between the current and past samples
// Lower numbers = less of the current sample and more of the past sample = more smoothing
// Values chosen empirically
const DEFAULT_HISTORY_BLEND_RATE: f32 = 0.1; // Default blend rate to use when no confidence in history
const MIN_HISTORY_BLEND_RATE: f32 = 0.015; // Minimum blend rate allowed, to ensure at least some of the current sample is used
#import bevy_core_pipeline::fullscreen_vertex_shader
@group(0) @binding(0) var view_target: texture_2d<f32>;
@group(0) @binding(1) var history: texture_2d<f32>;
@group(0) @binding(2) var motion_vectors: texture_2d<f32>;
@group(0) @binding(3) var depth: texture_depth_2d;
@group(0) @binding(4) var nearest_sampler: sampler;
@group(0) @binding(5) var linear_sampler: sampler;
struct Output {
@location(0) view_target: vec4<f32>,
@location(1) history: vec4<f32>,
};
// TAA is ideally applied after tonemapping, but before post processing
// Post processing wants to go before tonemapping, which conflicts
// Solution: Put TAA before tonemapping, tonemap TAA input, apply TAA, invert-tonemap TAA output
// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 20
// https://gpuopen.com/learn/optimized-reversible-tonemapper-for-resolve
fn rcp(x: f32) -> f32 { return 1.0 / x; }
fn max3(x: vec3<f32>) -> f32 { return max(x.r, max(x.g, x.b)); }
fn tonemap(color: vec3<f32>) -> vec3<f32> { return color * rcp(max3(color) + 1.0); }
fn reverse_tonemap(color: vec3<f32>) -> vec3<f32> { return color * rcp(1.0 - max3(color)); }
// The following 3 functions are from Playdead (MIT-licensed)
// https://github.com/playdeadgames/temporal/blob/master/Assets/Shaders/TemporalReprojection.shader
fn RGB_to_YCoCg(rgb: vec3<f32>) -> vec3<f32> {
let y = (rgb.r / 4.0) + (rgb.g / 2.0) + (rgb.b / 4.0);
let co = (rgb.r / 2.0) - (rgb.b / 2.0);
let cg = (-rgb.r / 4.0) + (rgb.g / 2.0) - (rgb.b / 4.0);
return vec3(y, co, cg);
}
fn YCoCg_to_RGB(ycocg: vec3<f32>) -> vec3<f32> {
let r = ycocg.x + ycocg.y - ycocg.z;
let g = ycocg.x + ycocg.z;
let b = ycocg.x - ycocg.y - ycocg.z;
return saturate(vec3(r, g, b));
}
fn clip_towards_aabb_center(history_color: vec3<f32>, current_color: vec3<f32>, aabb_min: vec3<f32>, aabb_max: vec3<f32>) -> vec3<f32> {
let p_clip = 0.5 * (aabb_max + aabb_min);
let e_clip = 0.5 * (aabb_max - aabb_min) + 0.00000001;
let v_clip = history_color - p_clip;
let v_unit = v_clip / e_clip;
let a_unit = abs(v_unit);
let ma_unit = max3(a_unit);
if ma_unit > 1.0 {
return p_clip + (v_clip / ma_unit);
} else {
return history_color;
}
}
fn sample_history(u: f32, v: f32) -> vec3<f32> {
return textureSample(history, linear_sampler, vec2(u, v)).rgb;
}
fn sample_view_target(uv: vec2<f32>) -> vec3<f32> {
var sample = textureSample(view_target, nearest_sampler, uv).rgb;
#ifdef TONEMAP
sample = tonemap(sample);
#endif
return RGB_to_YCoCg(sample);
}
@fragment
fn taa(@location(0) uv: vec2<f32>) -> Output {
let texture_size = vec2<f32>(textureDimensions(view_target));
let texel_size = 1.0 / texture_size;
// Fetch the current sample
let original_color = textureSample(view_target, nearest_sampler, uv);
var current_color = original_color.rgb;
#ifdef TONEMAP
current_color = tonemap(current_color);
#endif
#ifndef RESET
// Pick the closest motion_vector from 5 samples (reduces aliasing on the edges of moving entities)
// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 27
let offset = texel_size * 2.0;
let d_uv_tl = uv + vec2(-offset.x, offset.y);
let d_uv_tr = uv + vec2(offset.x, offset.y);
let d_uv_bl = uv + vec2(-offset.x, -offset.y);
let d_uv_br = uv + vec2(offset.x, -offset.y);
var closest_uv = uv;
let d_tl = textureSample(depth, nearest_sampler, d_uv_tl);
let d_tr = textureSample(depth, nearest_sampler, d_uv_tr);
var closest_depth = textureSample(depth, nearest_sampler, uv);
let d_bl = textureSample(depth, nearest_sampler, d_uv_bl);
let d_br = textureSample(depth, nearest_sampler, d_uv_br);
if d_tl > closest_depth {
closest_uv = d_uv_tl;
closest_depth = d_tl;
}
if d_tr > closest_depth {
closest_uv = d_uv_tr;
closest_depth = d_tr;
}
if d_bl > closest_depth {
closest_uv = d_uv_bl;
closest_depth = d_bl;
}
if d_br > closest_depth {
closest_uv = d_uv_br;
}
let closest_motion_vector = textureSample(motion_vectors, nearest_sampler, closest_uv).rg;
// Reproject to find the equivalent sample from the past
// Uses 5-sample Catmull-Rom filtering (reduces blurriness)
// Catmull-Rom filtering: https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1
// Ignoring corners: https://www.activision.com/cdn/research/Dynamic_Temporal_Antialiasing_and_Upsampling_in_Call_of_Duty_v4.pdf#page=68
// Technically we should renormalize the weights since we're skipping the corners, but it's basically the same result
let history_uv = uv - closest_motion_vector;
let sample_position = history_uv * texture_size;
let texel_center = floor(sample_position - 0.5) + 0.5;
let f = sample_position - texel_center;
let w0 = f * (-0.5 + f * (1.0 - 0.5 * f));
let w1 = 1.0 + f * f * (-2.5 + 1.5 * f);
let w2 = f * (0.5 + f * (2.0 - 1.5 * f));
let w3 = f * f * (-0.5 + 0.5 * f);
let w12 = w1 + w2;
let texel_position_0 = (texel_center - 1.0) * texel_size;
let texel_position_3 = (texel_center + 2.0) * texel_size;
let texel_position_12 = (texel_center + (w2 / w12)) * texel_size;
var history_color = sample_history(texel_position_12.x, texel_position_0.y) * w12.x * w0.y;
history_color += sample_history(texel_position_0.x, texel_position_12.y) * w0.x * w12.y;
history_color += sample_history(texel_position_12.x, texel_position_12.y) * w12.x * w12.y;
history_color += sample_history(texel_position_3.x, texel_position_12.y) * w3.x * w12.y;
history_color += sample_history(texel_position_12.x, texel_position_3.y) * w12.x * w3.y;
// Constrain past sample with 3x3 YCoCg variance clipping (reduces ghosting)
// YCoCg: https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 33
// Variance clipping: https://developer.download.nvidia.com/gameworks/events/GDC2016/msalvi_temporal_supersampling.pdf
let s_tl = sample_view_target(uv + vec2(-texel_size.x, texel_size.y));
let s_tm = sample_view_target(uv + vec2( 0.0, texel_size.y));
let s_tr = sample_view_target(uv + vec2( texel_size.x, texel_size.y));
let s_ml = sample_view_target(uv + vec2(-texel_size.x, 0.0));
let s_mm = RGB_to_YCoCg(current_color);
let s_mr = sample_view_target(uv + vec2( texel_size.x, 0.0));
let s_bl = sample_view_target(uv + vec2(-texel_size.x, -texel_size.y));
let s_bm = sample_view_target(uv + vec2( 0.0, -texel_size.y));
let s_br = sample_view_target(uv + vec2( texel_size.x, -texel_size.y));
let moment_1 = s_tl + s_tm + s_tr + s_ml + s_mm + s_mr + s_bl + s_bm + s_br;
let moment_2 = (s_tl * s_tl) + (s_tm * s_tm) + (s_tr * s_tr) + (s_ml * s_ml) + (s_mm * s_mm) + (s_mr * s_mr) + (s_bl * s_bl) + (s_bm * s_bm) + (s_br * s_br);
let mean = moment_1 / 9.0;
let variance = (moment_2 / 9.0) - (mean * mean);
let std_deviation = sqrt(max(variance, vec3(0.0)));
history_color = RGB_to_YCoCg(history_color);
history_color = clip_towards_aabb_center(history_color, s_mm, mean - std_deviation, mean + std_deviation);
history_color = YCoCg_to_RGB(history_color);
// How confident we are that the history is representative of the current frame
var history_confidence = textureSample(history, nearest_sampler, uv).a;
let pixel_motion_vector = abs(closest_motion_vector) * texture_size;
if pixel_motion_vector.x < 0.01 && pixel_motion_vector.y < 0.01 {
// Increment when pixels are not moving
history_confidence += 10.0;
} else {
// Else reset
history_confidence = 1.0;
}
// Blend current and past sample
// Use more of the history if we're confident in it (reduces noise when there is no motion)
// https://hhoppe.com/supersample.pdf, section 4.1
let current_color_factor = clamp(1.0 / history_confidence, MIN_HISTORY_BLEND_RATE, DEFAULT_HISTORY_BLEND_RATE);
current_color = mix(history_color, current_color, current_color_factor);
#endif // #ifndef RESET
// Write output to history and view target
var out: Output;
#ifdef RESET
let history_confidence = 1.0 / MIN_HISTORY_BLEND_RATE;
#endif
out.history = vec4(current_color, history_confidence);
#ifdef TONEMAP
current_color = reverse_tonemap(current_color);
#endif
out.view_target = vec4(current_color, original_color.a);
return out;
}

View File

@ -170,6 +170,7 @@ fn extract_gizmo_data(
MeshUniform {
flags: 0,
transform,
previous_transform: transform,
inverse_transpose_model,
},
),

View File

@ -168,7 +168,7 @@ pub struct StandardMaterial {
/// This is usually generated and stored automatically ("baked") by 3D-modelling software.
///
/// Typically, steep concave parts of a model (such as the armpit of a shirt) are darker,
/// because they have little exposed to light.
/// because they have little exposure to light.
/// An occlusion map specifies those parts of the model that light doesn't reach well.
///
/// The material will be less lit in places where this texture is dark.

View File

@ -1,10 +1,11 @@
use bevy_app::Plugin;
use bevy_app::{Plugin, PreUpdate, Update};
use bevy_asset::{load_internal_asset, AssetServer, Handle, HandleUntyped};
use bevy_core_pipeline::{
prelude::Camera3d,
prepass::{
AlphaMask3dPrepass, DepthPrepass, NormalPrepass, Opaque3dPrepass, ViewPrepassTextures,
DEPTH_PREPASS_FORMAT, NORMAL_PREPASS_FORMAT,
AlphaMask3dPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass, Opaque3dPrepass,
ViewPrepassTextures, DEPTH_PREPASS_FORMAT, MOTION_VECTOR_PREPASS_FORMAT,
NORMAL_PREPASS_FORMAT,
},
};
use bevy_ecs::{
@ -14,6 +15,7 @@ use bevy_ecs::{
SystemParamItem,
},
};
use bevy_math::Mat4;
use bevy_reflect::TypeUuid;
use bevy_render::{
camera::ExtractedCamera,
@ -29,24 +31,24 @@ use bevy_render::{
BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor,
BindGroupLayoutEntry, BindingResource, BindingType, BlendState, BufferBindingType,
ColorTargetState, ColorWrites, CompareFunction, DepthBiasState, DepthStencilState,
Extent3d, FragmentState, FrontFace, MultisampleState, PipelineCache, PolygonMode,
PrimitiveState, RenderPipelineDescriptor, Shader, ShaderDefVal, ShaderRef, ShaderStages,
ShaderType, SpecializedMeshPipeline, SpecializedMeshPipelineError,
DynamicUniformBuffer, Extent3d, FragmentState, FrontFace, MultisampleState, PipelineCache,
PolygonMode, PrimitiveState, RenderPipelineDescriptor, Shader, ShaderDefVal, ShaderRef,
ShaderStages, ShaderType, SpecializedMeshPipeline, SpecializedMeshPipelineError,
SpecializedMeshPipelines, StencilFaceState, StencilState, TextureDescriptor,
TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureViewDimension,
VertexState,
TextureDimension, TextureSampleType, TextureUsages, TextureViewDimension, VertexState,
},
renderer::RenderDevice,
renderer::{RenderDevice, RenderQueue},
texture::{FallbackImagesDepth, FallbackImagesMsaa, TextureCache},
view::{ExtractedView, Msaa, ViewUniform, ViewUniformOffset, ViewUniforms, VisibleEntities},
Extract, ExtractSchedule, Render, RenderApp, RenderSet,
};
use bevy_transform::prelude::GlobalTransform;
use bevy_utils::{tracing::error, HashMap};
use crate::{
AlphaMode, DrawMesh, Material, MaterialPipeline, MaterialPipelineKey, MeshPipeline,
MeshPipelineKey, MeshUniform, RenderMaterials, SetMaterialBindGroup, SetMeshBindGroup,
MAX_CASCADES_PER_LIGHT, MAX_DIRECTIONAL_LIGHTS,
prepare_lights, AlphaMode, DrawMesh, Material, MaterialPipeline, MaterialPipelineKey,
MeshPipeline, MeshPipelineKey, MeshUniform, RenderMaterials, SetMaterialBindGroup,
SetMeshBindGroup, MAX_CASCADES_PER_LIGHT, MAX_DIRECTIONAL_LIGHTS,
};
use std::{hash::Hash, marker::PhantomData};
@ -98,8 +100,8 @@ where
);
let Ok(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
return;
};
render_app
.add_systems(
@ -108,7 +110,8 @@ where
)
.init_resource::<PrepassPipeline<M>>()
.init_resource::<PrepassViewBindGroup>()
.init_resource::<SpecializedMeshPipelines<PrepassPipeline<M>>>();
.init_resource::<SpecializedMeshPipelines<PrepassPipeline<M>>>()
.init_resource::<PreviousViewProjectionUniforms>();
}
}
@ -128,33 +131,95 @@ where
M::Data: PartialEq + Eq + Hash + Clone,
{
fn build(&self, app: &mut bevy_app::App) {
let no_prepass_plugin_loaded = app.world.get_resource::<AnyPrepassPluginLoaded>().is_none();
if no_prepass_plugin_loaded {
app.insert_resource(AnyPrepassPluginLoaded)
.add_systems(Update, update_previous_view_projections)
// At the start of each frame, last frame's GlobalTransforms become this frame's PreviousGlobalTransforms
.add_systems(PreUpdate, update_mesh_previous_global_transforms);
}
let Ok(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
if no_prepass_plugin_loaded {
render_app
.init_resource::<DrawFunctions<Opaque3dPrepass>>()
.init_resource::<DrawFunctions<AlphaMask3dPrepass>>()
.add_systems(ExtractSchedule, extract_camera_prepass_phase)
.add_systems(
Render,
(
prepare_prepass_textures
.in_set(RenderSet::Prepare)
.after(bevy_render::view::prepare_windows),
prepare_previous_view_projection_uniforms
.in_set(RenderSet::Prepare)
.after(PrepassLightsViewFlush),
apply_system_buffers
.in_set(RenderSet::Prepare)
.in_set(PrepassLightsViewFlush)
.after(prepare_lights),
sort_phase_system::<Opaque3dPrepass>.in_set(RenderSet::PhaseSort),
sort_phase_system::<AlphaMask3dPrepass>.in_set(RenderSet::PhaseSort),
),
);
}
render_app
.add_systems(ExtractSchedule, extract_camera_prepass_phase)
.add_render_command::<Opaque3dPrepass, DrawPrepass<M>>()
.add_render_command::<AlphaMask3dPrepass, DrawPrepass<M>>()
.add_systems(
Render,
(
prepare_prepass_textures
.in_set(RenderSet::Prepare)
.after(bevy_render::view::prepare_windows),
queue_prepass_material_meshes::<M>.in_set(RenderSet::Queue),
sort_phase_system::<Opaque3dPrepass>.in_set(RenderSet::PhaseSort),
sort_phase_system::<AlphaMask3dPrepass>.in_set(RenderSet::PhaseSort),
),
)
.init_resource::<DrawFunctions<Opaque3dPrepass>>()
.init_resource::<DrawFunctions<AlphaMask3dPrepass>>()
.add_render_command::<Opaque3dPrepass, DrawPrepass<M>>()
.add_render_command::<AlphaMask3dPrepass, DrawPrepass<M>>();
queue_prepass_material_meshes::<M>.in_set(RenderSet::Queue),
);
}
}
#[derive(Resource)]
struct AnyPrepassPluginLoaded;
#[derive(Component, ShaderType, Clone)]
pub struct PreviousViewProjection {
pub view_proj: Mat4,
}
pub fn update_previous_view_projections(
mut commands: Commands,
query: Query<(Entity, &Camera, &GlobalTransform), (With<Camera3d>, With<MotionVectorPrepass>)>,
) {
for (entity, camera, camera_transform) in &query {
commands.entity(entity).insert(PreviousViewProjection {
view_proj: camera.projection_matrix() * camera_transform.compute_matrix().inverse(),
});
}
}
#[derive(Component)]
pub struct PreviousGlobalTransform(pub Mat4);
pub fn update_mesh_previous_global_transforms(
mut commands: Commands,
views: Query<&Camera, (With<Camera3d>, With<MotionVectorPrepass>)>,
meshes: Query<(Entity, &GlobalTransform), With<Handle<Mesh>>>,
) {
let should_run = views.iter().any(|camera| camera.is_active);
if should_run {
for (entity, transform) in &meshes {
commands
.entity(entity)
.insert(PreviousGlobalTransform(transform.compute_matrix()));
}
}
}
#[derive(Resource)]
pub struct PrepassPipeline<M: Material> {
pub view_layout: BindGroupLayout,
pub view_layout_motion_vectors: BindGroupLayout,
pub view_layout_no_motion_vectors: BindGroupLayout,
pub mesh_layout: BindGroupLayout,
pub skinned_mesh_layout: BindGroupLayout,
pub material_layout: BindGroupLayout,
@ -169,38 +234,80 @@ impl<M: Material> FromWorld for PrepassPipeline<M> {
let render_device = world.resource::<RenderDevice>();
let asset_server = world.resource::<AssetServer>();
let view_layout = render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[
// View
BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::VERTEX_FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(ViewUniform::min_size()),
let view_layout_motion_vectors =
render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[
// View
BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(ViewUniform::min_size()),
},
count: None,
},
count: None,
},
// Globals
BindGroupLayoutEntry {
binding: 1,
visibility: ShaderStages::VERTEX_FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: Some(GlobalsUniform::min_size()),
// Globals
BindGroupLayoutEntry {
binding: 1,
visibility: ShaderStages::VERTEX_FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: Some(GlobalsUniform::min_size()),
},
count: None,
},
count: None,
},
],
label: Some("prepass_view_layout"),
});
// PreviousViewProjection
BindGroupLayoutEntry {
binding: 2,
visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(PreviousViewProjection::min_size()),
},
count: None,
},
],
label: Some("prepass_view_layout_motion_vectors"),
});
let view_layout_no_motion_vectors =
render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[
// View
BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(ViewUniform::min_size()),
},
count: None,
},
// Globals
BindGroupLayoutEntry {
binding: 1,
visibility: ShaderStages::VERTEX_FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: Some(GlobalsUniform::min_size()),
},
count: None,
},
],
label: Some("prepass_view_layout_no_motion_vectors"),
});
let mesh_pipeline = world.resource::<MeshPipeline>();
PrepassPipeline {
view_layout,
view_layout_motion_vectors,
view_layout_no_motion_vectors,
mesh_layout: mesh_pipeline.mesh_layout.clone(),
skinned_mesh_layout: mesh_pipeline.skinned_mesh_layout.clone(),
material_vertex_shader: match M::prepass_vertex_shader() {
@ -231,13 +338,20 @@ where
key: Self::Key,
layout: &MeshVertexBufferLayout,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut bind_group_layout = vec![self.view_layout.clone()];
let mut bind_group_layouts = vec![if key
.mesh_key
.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS)
{
self.view_layout_motion_vectors.clone()
} else {
self.view_layout_no_motion_vectors.clone()
}];
let mut shader_defs = Vec::new();
let mut vertex_attributes = Vec::new();
// NOTE: Eventually, it would be nice to only add this when the shaders are overloaded by the Material.
// The main limitation right now is that bind group order is hardcoded in shaders.
bind_group_layout.insert(1, self.material_layout.clone());
bind_group_layouts.insert(1, self.material_layout.clone());
if key.mesh_key.contains(MeshPipelineKey::DEPTH_PREPASS) {
shader_defs.push("DEPTH_PREPASS".into());
@ -262,13 +376,13 @@ where
vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0));
}
shader_defs.push(ShaderDefVal::Int(
shader_defs.push(ShaderDefVal::UInt(
"MAX_DIRECTIONAL_LIGHTS".to_string(),
MAX_DIRECTIONAL_LIGHTS as i32,
MAX_DIRECTIONAL_LIGHTS as u32,
));
shader_defs.push(ShaderDefVal::Int(
shader_defs.push(ShaderDefVal::UInt(
"MAX_CASCADES_PER_LIGHT".to_string(),
MAX_CASCADES_PER_LIGHT as i32,
MAX_CASCADES_PER_LIGHT as u32,
));
if key.mesh_key.contains(MeshPipelineKey::DEPTH_CLAMP_ORTHO) {
shader_defs.push("DEPTH_CLAMP_ORTHO".into());
@ -289,53 +403,82 @@ where
}
}
if key
.mesh_key
.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS)
{
shader_defs.push("MOTION_VECTOR_PREPASS".into());
}
if key
.mesh_key
.intersects(MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::MOTION_VECTOR_PREPASS)
{
shader_defs.push("PREPASS_FRAGMENT".into());
}
if layout.contains(Mesh::ATTRIBUTE_JOINT_INDEX)
&& layout.contains(Mesh::ATTRIBUTE_JOINT_WEIGHT)
{
shader_defs.push("SKINNED".into());
vertex_attributes.push(Mesh::ATTRIBUTE_JOINT_INDEX.at_shader_location(4));
vertex_attributes.push(Mesh::ATTRIBUTE_JOINT_WEIGHT.at_shader_location(5));
bind_group_layout.insert(2, self.skinned_mesh_layout.clone());
bind_group_layouts.insert(2, self.skinned_mesh_layout.clone());
} else {
bind_group_layout.insert(2, self.mesh_layout.clone());
bind_group_layouts.insert(2, self.mesh_layout.clone());
}
let vertex_buffer_layout = layout.get_layout(&vertex_attributes)?;
// The fragment shader is only used when the normal prepass is enabled
// or the material uses alpha cutoff values and doesn't rely on the standard prepass shader
let fragment = if key.mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS)
// Setup prepass fragment targets - normals in slot 0 (or None if not needed), motion vectors in slot 1
let mut targets = vec![];
targets.push(
key.mesh_key
.contains(MeshPipelineKey::NORMAL_PREPASS)
.then_some(ColorTargetState {
format: NORMAL_PREPASS_FORMAT,
blend: Some(BlendState::REPLACE),
write_mask: ColorWrites::ALL,
}),
);
targets.push(
key.mesh_key
.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS)
.then_some(ColorTargetState {
format: MOTION_VECTOR_PREPASS_FORMAT,
blend: Some(BlendState::REPLACE),
write_mask: ColorWrites::ALL,
}),
);
if targets.iter().all(Option::is_none) {
// if no targets are required then clear the list, so that no fragment shader is required
// (though one may still be used for discarding depth buffer writes)
targets.clear();
}
// The fragment shader is only used when the normal prepass or motion vectors prepass
// is enabled or the material uses alpha cutoff values and doesn't rely on the standard
// prepass shader
let fragment_required = !targets.is_empty()
|| ((key.mesh_key.contains(MeshPipelineKey::ALPHA_MASK)
|| blend_key == MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA
|| blend_key == MeshPipelineKey::BLEND_ALPHA)
&& self.material_fragment_shader.is_some())
{
// Use the fragment shader from the material if present
let frag_shader_handle = if let Some(handle) = &self.material_fragment_shader {
handle.clone()
} else {
PREPASS_SHADER_HANDLE.typed::<Shader>()
&& self.material_fragment_shader.is_some());
let fragment = fragment_required.then(|| {
// Use the fragment shader from the material
let frag_shader_handle = match self.material_fragment_shader.clone() {
Some(frag_shader_handle) => frag_shader_handle,
_ => PREPASS_SHADER_HANDLE.typed::<Shader>(),
};
let mut targets = vec![];
// When the normal prepass is enabled we need a target to be able to write to it.
if key.mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) {
targets.push(Some(ColorTargetState {
format: TextureFormat::Rgb10a2Unorm,
blend: Some(BlendState::REPLACE),
write_mask: ColorWrites::ALL,
}));
}
Some(FragmentState {
FragmentState {
shader: frag_shader_handle,
entry_point: "fragment".into(),
shader_defs: shader_defs.clone(),
targets,
})
} else {
None
};
}
});
// Use the vertex shader from the material if present
let vert_shader_handle = if let Some(handle) = &self.material_vertex_shader {
@ -352,7 +495,7 @@ where
buffers: vec![vertex_buffer_layout],
},
fragment,
layout: bind_group_layout,
layout: bind_group_layouts,
primitive: PrimitiveState {
topology: key.mesh_key.primitive_topology(),
strip_index_format: None,
@ -397,9 +540,9 @@ where
}
pub fn get_bind_group_layout_entries(
bindings: [u32; 2],
bindings: [u32; 3],
multisampled: bool,
) -> [BindGroupLayoutEntry; 2] {
) -> [BindGroupLayoutEntry; 3] {
[
// Depth texture
BindGroupLayoutEntry {
@ -423,6 +566,17 @@ pub fn get_bind_group_layout_entries(
},
count: None,
},
// Motion Vectors texture
BindGroupLayoutEntry {
binding: bindings[2],
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
multisampled,
sample_type: TextureSampleType::Float { filterable: true },
view_dimension: TextureViewDimension::D2,
},
count: None,
},
]
}
@ -431,8 +585,8 @@ pub fn get_bindings<'a>(
fallback_images: &'a mut FallbackImagesMsaa,
fallback_depths: &'a mut FallbackImagesDepth,
msaa: &'a Msaa,
bindings: [u32; 2],
) -> [BindGroupEntry<'a>; 2] {
bindings: [u32; 3],
) -> [BindGroupEntry<'a>; 3] {
let depth_view = match prepass_textures.and_then(|x| x.depth.as_ref()) {
Some(texture) => &texture.default_view,
None => {
@ -442,13 +596,18 @@ pub fn get_bindings<'a>(
}
};
let normal_motion_vectors_fallback = &fallback_images
.image_for_samplecount(msaa.samples())
.texture_view;
let normal_view = match prepass_textures.and_then(|x| x.normal.as_ref()) {
Some(texture) => &texture.default_view,
None => {
&fallback_images
.image_for_samplecount(msaa.samples())
.texture_view
}
None => normal_motion_vectors_fallback,
};
let motion_vectors_view = match prepass_textures.and_then(|x| x.motion_vectors.as_ref()) {
Some(texture) => &texture.default_view,
None => normal_motion_vectors_fallback,
};
[
@ -460,6 +619,10 @@ pub fn get_bindings<'a>(
binding: bindings[1],
resource: BindingResource::TextureView(normal_view),
},
BindGroupEntry {
binding: bindings[2],
resource: BindingResource::TextureView(motion_vectors_view),
},
]
}
@ -473,32 +636,93 @@ pub fn extract_camera_prepass_phase(
&Camera,
Option<&DepthPrepass>,
Option<&NormalPrepass>,
Option<&MotionVectorPrepass>,
Option<&PreviousViewProjection>,
),
With<Camera3d>,
>,
>,
) {
for (entity, camera, depth_prepass, normal_prepass) in cameras_3d.iter() {
if !camera.is_active {
continue;
}
for (
entity,
camera,
depth_prepass,
normal_prepass,
motion_vector_prepass,
maybe_previous_view_proj,
) in cameras_3d.iter()
{
if camera.is_active {
let mut entity = commands.get_or_spawn(entity);
let mut entity = commands.get_or_spawn(entity);
if depth_prepass.is_some() || normal_prepass.is_some() {
entity.insert((
RenderPhase::<Opaque3dPrepass>::default(),
RenderPhase::<AlphaMask3dPrepass>::default(),
));
}
if depth_prepass.is_some() {
entity.insert(DepthPrepass);
}
if normal_prepass.is_some() {
entity.insert(NormalPrepass);
if depth_prepass.is_some()
|| normal_prepass.is_some()
|| motion_vector_prepass.is_some()
{
entity.insert((
RenderPhase::<Opaque3dPrepass>::default(),
RenderPhase::<AlphaMask3dPrepass>::default(),
));
}
if depth_prepass.is_some() {
entity.insert(DepthPrepass);
}
if normal_prepass.is_some() {
entity.insert(NormalPrepass);
}
if motion_vector_prepass.is_some() {
entity.insert(MotionVectorPrepass);
}
if let Some(previous_view) = maybe_previous_view_proj {
entity.insert(previous_view.clone());
}
}
}
}
#[derive(Resource, Default)]
pub struct PreviousViewProjectionUniforms {
pub uniforms: DynamicUniformBuffer<PreviousViewProjection>,
}
#[derive(Component)]
pub struct PreviousViewProjectionUniformOffset {
pub offset: u32,
}
pub fn prepare_previous_view_projection_uniforms(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut view_uniforms: ResMut<PreviousViewProjectionUniforms>,
views: Query<
(Entity, &ExtractedView, Option<&PreviousViewProjection>),
With<MotionVectorPrepass>,
>,
) {
view_uniforms.uniforms.clear();
for (entity, camera, maybe_previous_view_proj) in &views {
let view_projection = match maybe_previous_view_proj {
Some(previous_view) => previous_view.clone(),
None => PreviousViewProjection {
view_proj: camera.projection * camera.transform.compute_matrix().inverse(),
},
};
commands
.entity(entity)
.insert(PreviousViewProjectionUniformOffset {
offset: view_uniforms.uniforms.push(view_projection),
});
}
view_uniforms
.uniforms
.write_buffer(&render_device, &render_queue);
}
// Prepares the textures used by the prepass
pub fn prepare_prepass_textures(
mut commands: Commands,
@ -511,6 +735,7 @@ pub fn prepare_prepass_textures(
&ExtractedCamera,
Option<&DepthPrepass>,
Option<&NormalPrepass>,
Option<&MotionVectorPrepass>,
),
(
With<RenderPhase<Opaque3dPrepass>>,
@ -520,7 +745,8 @@ pub fn prepare_prepass_textures(
) {
let mut depth_textures = HashMap::default();
let mut normal_textures = HashMap::default();
for (entity, camera, depth_prepass, normal_prepass) in &views_3d {
let mut motion_vectors_textures = HashMap::default();
for (entity, camera, depth_prepass, normal_prepass, motion_vector_prepass) in &views_3d {
let Some(physical_target_size) = camera.physical_target_size else {
continue;
};
@ -574,9 +800,32 @@ pub fn prepare_prepass_textures(
.clone()
});
let cached_motion_vectors_texture = motion_vector_prepass.is_some().then(|| {
motion_vectors_textures
.entry(camera.target.clone())
.or_insert_with(|| {
texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("prepass_motion_vectors_textures"),
size,
mip_level_count: 1,
sample_count: msaa.samples(),
dimension: TextureDimension::D2,
format: MOTION_VECTOR_PREPASS_FORMAT,
usage: TextureUsages::RENDER_ATTACHMENT
| TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
)
})
.clone()
});
commands.entity(entity).insert(ViewPrepassTextures {
depth: cached_depth_texture,
normal: cached_normals_texture,
motion_vectors: cached_motion_vectors_texture,
size,
});
}
@ -584,7 +833,8 @@ pub fn prepare_prepass_textures(
#[derive(Default, Resource)]
pub struct PrepassViewBindGroup {
bind_group: Option<BindGroup>,
motion_vectors: Option<BindGroup>,
no_motion_vectors: Option<BindGroup>,
}
pub fn queue_prepass_view_bind_group<M: Material>(
@ -592,25 +842,51 @@ pub fn queue_prepass_view_bind_group<M: Material>(
prepass_pipeline: Res<PrepassPipeline<M>>,
view_uniforms: Res<ViewUniforms>,
globals_buffer: Res<GlobalsBuffer>,
previous_view_proj_uniforms: Res<PreviousViewProjectionUniforms>,
mut prepass_view_bind_group: ResMut<PrepassViewBindGroup>,
) {
let Some(view_binding) = view_uniforms.uniforms.binding() else { return };
let Some(globals_binding) = globals_buffer.buffer.binding() else { return };
prepass_view_bind_group.bind_group =
Some(render_device.create_bind_group(&BindGroupDescriptor {
entries: &[
BindGroupEntry {
binding: 0,
resource: view_binding,
},
BindGroupEntry {
binding: 1,
resource: globals_binding,
},
],
label: Some("prepass_view_bind_group"),
layout: &prepass_pipeline.view_layout,
}));
if let (Some(view_binding), Some(globals_binding)) = (
view_uniforms.uniforms.binding(),
globals_buffer.buffer.binding(),
) {
prepass_view_bind_group.no_motion_vectors =
Some(render_device.create_bind_group(&BindGroupDescriptor {
entries: &[
BindGroupEntry {
binding: 0,
resource: view_binding.clone(),
},
BindGroupEntry {
binding: 1,
resource: globals_binding.clone(),
},
],
label: Some("prepass_view_no_motion_vectors_bind_group"),
layout: &prepass_pipeline.view_layout_no_motion_vectors,
}));
if let Some(previous_view_proj_binding) = previous_view_proj_uniforms.uniforms.binding() {
prepass_view_bind_group.motion_vectors =
Some(render_device.create_bind_group(&BindGroupDescriptor {
entries: &[
BindGroupEntry {
binding: 0,
resource: view_binding,
},
BindGroupEntry {
binding: 1,
resource: globals_binding,
},
BindGroupEntry {
binding: 2,
resource: previous_view_proj_binding,
},
],
label: Some("prepass_view_motion_vectors_bind_group"),
layout: &prepass_pipeline.view_layout_motion_vectors,
}));
}
}
}
#[allow(clippy::too_many_arguments)]
@ -631,6 +907,7 @@ pub fn queue_prepass_material_meshes<M: Material>(
&mut RenderPhase<AlphaMask3dPrepass>,
Option<&DepthPrepass>,
Option<&NormalPrepass>,
Option<&MotionVectorPrepass>,
)>,
) where
M::Data: PartialEq + Eq + Hash + Clone,
@ -650,6 +927,7 @@ pub fn queue_prepass_material_meshes<M: Material>(
mut alpha_mask_phase,
depth_prepass,
normal_prepass,
motion_vector_prepass,
) in &mut views
{
let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples());
@ -659,6 +937,9 @@ pub fn queue_prepass_material_meshes<M: Material>(
if normal_prepass.is_some() {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if motion_vector_prepass.is_some() {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
let rangefinder = view.rangefinder3d();
@ -734,23 +1015,44 @@ pub fn queue_prepass_material_meshes<M: Material>(
pub struct SetPrepassViewBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetPrepassViewBindGroup<I> {
type Param = SRes<PrepassViewBindGroup>;
type ViewWorldQuery = Read<ViewUniformOffset>;
type ViewWorldQuery = (
Read<ViewUniformOffset>,
Option<Read<PreviousViewProjectionUniformOffset>>,
);
type ItemWorldQuery = ();
#[inline]
fn render<'w>(
_item: &P,
view_uniform_offset: &'_ ViewUniformOffset,
(view_uniform_offset, previous_view_projection_uniform_offset): (
&'_ ViewUniformOffset,
Option<&'_ PreviousViewProjectionUniformOffset>,
),
_entity: (),
prepass_view_bind_group: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let prepass_view_bind_group = prepass_view_bind_group.into_inner();
pass.set_bind_group(
I,
prepass_view_bind_group.bind_group.as_ref().unwrap(),
&[view_uniform_offset.offset],
);
if let Some(previous_view_projection_uniform_offset) =
previous_view_projection_uniform_offset
{
pass.set_bind_group(
I,
prepass_view_bind_group.motion_vectors.as_ref().unwrap(),
&[
view_uniform_offset.offset,
previous_view_projection_uniform_offset.offset,
],
);
} else {
pass.set_bind_group(
I,
prepass_view_bind_group.no_motion_vectors.as_ref().unwrap(),
&[view_uniform_offset.offset],
);
}
RenderCommandResult::Success
}
}
@ -762,3 +1064,6 @@ pub type DrawPrepass<M> = (
SetMeshBindGroup<2>,
DrawMesh,
);
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)]
struct PrepassLightsViewFlush;

View File

@ -36,6 +36,11 @@ struct VertexOutput {
@location(2) world_tangent: vec4<f32>,
#endif // VERTEX_TANGENTS
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
@location(3) world_position: vec4<f32>,
@location(4) previous_world_position: vec4<f32>,
#endif // MOTION_VECTOR_PREPASS
}
@vertex
@ -69,16 +74,58 @@ fn vertex(vertex: Vertex) -> VertexOutput {
#endif // VERTEX_TANGENTS
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
out.world_position = mesh_position_local_to_world(model, vec4<f32>(vertex.position, 1.0));
out.previous_world_position = mesh_position_local_to_world(mesh.previous_model, vec4<f32>(vertex.position, 1.0));
#endif // MOTION_VECTOR_PREPASS
return out;
}
#ifdef NORMAL_PREPASS
#ifdef PREPASS_FRAGMENT
struct FragmentInput {
#ifdef NORMAL_PREPASS
@location(1) world_normal: vec3<f32>,
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
@location(3) world_position: vec4<f32>,
@location(4) previous_world_position: vec4<f32>,
#endif // MOTION_VECTOR_PREPASS
}
struct FragmentOutput {
#ifdef NORMAL_PREPASS
@location(0) normal: vec4<f32>,
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
@location(1) motion_vector: vec2<f32>,
#endif // MOTION_VECTOR_PREPASS
}
@fragment
fn fragment(in: FragmentInput) -> @location(0) vec4<f32> {
return vec4(in.world_normal * 0.5 + vec3(0.5), 1.0);
fn fragment(in: FragmentInput) -> FragmentOutput {
var out: FragmentOutput;
#ifdef NORMAL_PREPASS
out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0);
#endif
#ifdef MOTION_VECTOR_PREPASS
let clip_position_t = view.unjittered_view_proj * in.world_position;
let clip_position = clip_position_t.xy / clip_position_t.w;
let previous_clip_position_t = previous_view_proj * in.previous_world_position;
let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w;
// These motion vectors are used as offsets to UV positions and are stored
// in the range -1,1 to allow offsetting from the one corner to the
// diagonally-opposite corner in UV coordinates, in either direction.
// A difference between diagonally-opposite corners of clip space is in the
// range -2,2, so this needs to be scaled by 0.5. And the V direction goes
// down where clip space y goes up, so y needs to be flipped.
out.motion_vector = (clip_position - previous_clip_position) * vec2(0.5, -0.5);
#endif // MOTION_VECTOR_PREPASS
return out;
}
#endif // NORMAL_PREPASS
#endif // PREPASS_FRAGMENT

View File

@ -5,10 +5,14 @@
@group(0) @binding(0)
var<uniform> view: View;
@group(0) @binding(1)
var<uniform> globals: Globals;
#ifdef MOTION_VECTOR_PREPASS
@group(0) @binding(2)
var<uniform> previous_view_proj: mat4x4<f32>;
#endif // MOTION_VECTOR_PREPASS
// Material bindings will be in @group(1)
@group(2) @binding(0)
@ -19,4 +23,3 @@ var<uniform> mesh: Mesh;
var<uniform> joint_matrices: SkinnedMesh;
#import bevy_pbr::skinning
#endif

View File

@ -1,5 +1,16 @@
#define_import_path bevy_pbr::prepass_utils
#ifndef DEPTH_PREPASS
fn prepass_depth(frag_coord: vec4<f32>, sample_index: u32) -> f32 {
#ifdef MULTISAMPLED
let depth_sample = textureLoad(depth_prepass_texture, vec2<i32>(frag_coord.xy), i32(sample_index));
#else
let depth_sample = textureLoad(depth_prepass_texture, vec2<i32>(frag_coord.xy), 0);
#endif
return depth_sample;
}
#endif // DEPTH_PREPASS
#ifndef NORMAL_PREPASS
fn prepass_normal(frag_coord: vec4<f32>, sample_index: u32) -> vec3<f32> {
#ifdef MULTISAMPLED
@ -11,13 +22,13 @@ fn prepass_normal(frag_coord: vec4<f32>, sample_index: u32) -> vec3<f32> {
}
#endif // NORMAL_PREPASS
#ifndef DEPTH_PREPASS
fn prepass_depth(frag_coord: vec4<f32>, sample_index: u32) -> f32 {
#ifndef MOTION_VECTOR_PREPASS
fn prepass_motion_vector(frag_coord: vec4<f32>, sample_index: u32) -> vec2<f32> {
#ifdef MULTISAMPLED
let depth_sample = textureLoad(depth_prepass_texture, vec2<i32>(frag_coord.xy), i32(sample_index));
let motion_vector_sample = textureLoad(motion_vector_prepass_texture, vec2<i32>(frag_coord.xy), i32(sample_index));
#else
let depth_sample = textureLoad(depth_prepass_texture, vec2<i32>(frag_coord.xy), 0);
#endif // MULTISAMPLED
return depth_sample;
let motion_vector_sample = textureLoad(motion_vector_prepass_texture, vec2<i32>(frag_coord.xy), 0);
#endif
return motion_vector_sample.rg;
}
#endif // DEPTH_PREPASS
#endif // MOTION_VECTOR_PREPASS

View File

@ -1,8 +1,9 @@
use crate::{
environment_map, prepass, EnvironmentMapLight, FogMeta, GlobalLightMeta, GpuFog, GpuLights,
GpuPointLights, LightMeta, NotShadowCaster, NotShadowReceiver, ShadowSamplers,
ViewClusterBindings, ViewFogUniformOffset, ViewLightsUniformOffset, ViewShadowBindings,
CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT, MAX_CASCADES_PER_LIGHT, MAX_DIRECTIONAL_LIGHTS,
GpuPointLights, LightMeta, NotShadowCaster, NotShadowReceiver, PreviousGlobalTransform,
ShadowSamplers, ViewClusterBindings, ViewFogUniformOffset, ViewLightsUniformOffset,
ViewShadowBindings, CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT, MAX_CASCADES_PER_LIGHT,
MAX_DIRECTIONAL_LIGHTS,
};
use bevy_app::Plugin;
use bevy_asset::{load_internal_asset, Assets, Handle, HandleUntyped};
@ -123,6 +124,7 @@ impl Plugin for MeshRenderPlugin {
#[derive(Component, ShaderType, Clone)]
pub struct MeshUniform {
pub transform: Mat4,
pub previous_transform: Mat4,
pub inverse_transpose_model: Mat4,
pub flags: u32,
}
@ -149,6 +151,7 @@ pub fn extract_meshes(
Entity,
&ComputedVisibility,
&GlobalTransform,
Option<&PreviousGlobalTransform>,
&Handle<Mesh>,
Option<With<NotShadowReceiver>>,
Option<With<NotShadowCaster>>,
@ -159,8 +162,11 @@ pub fn extract_meshes(
let mut not_caster_commands = Vec::with_capacity(*prev_not_caster_commands_len);
let visible_meshes = meshes_query.iter().filter(|(_, vis, ..)| vis.is_visible());
for (entity, _, transform, handle, not_receiver, not_caster) in visible_meshes {
for (entity, _, transform, previous_transform, handle, not_receiver, not_caster) in
visible_meshes
{
let transform = transform.compute_matrix();
let previous_transform = previous_transform.map(|t| t.0).unwrap_or(transform);
let mut flags = if not_receiver.is_some() {
MeshFlags::empty()
} else {
@ -172,6 +178,7 @@ pub fn extract_meshes(
let uniform = MeshUniform {
flags: flags.bits,
transform,
previous_transform,
inverse_transpose_model: transform.inverse().transpose(),
};
if not_caster.is_some() {
@ -434,7 +441,7 @@ impl FromWorld for MeshPipeline {
if cfg!(not(feature = "webgl")) || (cfg!(feature = "webgl") && !multisampled) {
entries.extend_from_slice(&prepass::get_bind_group_layout_entries(
[16, 17],
[16, 17, 18],
multisampled,
));
}
@ -572,9 +579,10 @@ bitflags::bitflags! {
const DEBAND_DITHER = (1 << 2);
const DEPTH_PREPASS = (1 << 3);
const NORMAL_PREPASS = (1 << 4);
const ALPHA_MASK = (1 << 5);
const ENVIRONMENT_MAP = (1 << 6);
const DEPTH_CLAMP_ORTHO = (1 << 7);
const MOTION_VECTOR_PREPASS = (1 << 5);
const ALPHA_MASK = (1 << 6);
const ENVIRONMENT_MAP = (1 << 7);
const DEPTH_CLAMP_ORTHO = (1 << 8);
const BLEND_RESERVED_BITS = Self::BLEND_MASK_BITS << Self::BLEND_SHIFT_BITS; // ← Bitmask reserving bits for the blend state
const BLEND_OPAQUE = (0 << Self::BLEND_SHIFT_BITS); // ← Values are just sequential within the mask, and can range from 0 to 3
const BLEND_PREMULTIPLIED_ALPHA = (1 << Self::BLEND_SHIFT_BITS); //
@ -1059,7 +1067,7 @@ pub fn queue_mesh_view_bind_groups(
&mut fallback_images,
&mut fallback_depths,
&msaa,
[16, 17],
[16, 17, 18],
));
}

View File

@ -2,6 +2,7 @@
struct Mesh {
model: mat4x4<f32>,
previous_model: mat4x4<f32>,
inverse_transpose_model: mat4x4<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,

View File

@ -63,9 +63,13 @@ var dt_lut_sampler: sampler;
var depth_prepass_texture: texture_depth_multisampled_2d;
@group(0) @binding(17)
var normal_prepass_texture: texture_multisampled_2d<f32>;
@group(0) @binding(18)
var motion_vector_prepass_texture: texture_multisampled_2d<f32>;
#else
@group(0) @binding(16)
var depth_prepass_texture: texture_depth_2d;
@group(0) @binding(17)
var normal_prepass_texture: texture_2d<f32>;
@group(0) @binding(18)
var motion_vector_prepass_texture: texture_2d<f32>;
#endif

View File

@ -158,7 +158,7 @@ fn pbr_input_new() -> PbrInput {
return pbr_input;
}
#ifndef NORMAL_PREPASS
#ifndef PREPASS_FRAGMENT
fn pbr(
in: PbrInput,
) -> vec4<f32> {
@ -268,9 +268,9 @@ fn pbr(
return output_color;
}
#endif // NORMAL_PREPASS
#endif // PREPASS_FRAGMENT
#ifndef NORMAL_PREPASS
#ifndef PREPASS_FRAGMENT
fn apply_fog(input_color: vec4<f32>, fragment_world_position: vec3<f32>, view_world_position: vec3<f32>) -> vec4<f32> {
let view_to_world = fragment_world_position.xyz - view_world_position.xyz;
@ -308,7 +308,7 @@ fn apply_fog(input_color: vec4<f32>, fragment_world_position: vec3<f32>, view_wo
return input_color;
}
}
#endif
#endif // PREPASS_FRAGMENT
#ifdef PREMULTIPLY_ALPHA
fn premultiply_alpha(standard_material_flags: u32, color: vec4<f32>) -> vec4<f32> {

View File

@ -10,12 +10,18 @@ struct FragmentInput {
#ifdef VERTEX_UVS
@location(0) uv: vec2<f32>,
#endif // VERTEX_UVS
#ifdef NORMAL_PREPASS
@location(1) world_normal: vec3<f32>,
#ifdef VERTEX_TANGENTS
@location(2) world_tangent: vec4<f32>,
#endif // VERTEX_TANGENTS
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
@location(3) world_position: vec4<f32>,
@location(4) previous_world_position: vec4<f32>,
#endif // MOTION_VECTOR_PREPASS
};
// Cutoff used for the premultiplied alpha modes BLEND and ADD.
@ -51,10 +57,10 @@ fn prepass_alpha_discard(in: FragmentInput) {
}
#else // BLEND_PREMULTIPLIED_ALPHA || BLEND_ALPHA
let alpha_mode = material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS;
if (alpha_mode == STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND || alpha_mode == STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD)
if (alpha_mode == STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND || alpha_mode == STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD)
&& output_color.a < PREMULTIPLIED_ALPHA_CUTOFF {
discard;
} else if alpha_mode == STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED
} else if alpha_mode == STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED
&& all(output_color < vec4(PREMULTIPLIED_ALPHA_CUTOFF)) {
discard;
}
@ -63,12 +69,24 @@ fn prepass_alpha_discard(in: FragmentInput) {
#endif // EMPTY_PREPASS_ALPHA_DISCARD not defined
}
#ifdef PREPASS_FRAGMENT
struct FragmentOutput {
#ifdef NORMAL_PREPASS
@location(0) normal: vec4<f32>,
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
@location(1) motion_vector: vec2<f32>,
#endif // MOTION_VECTOR_PREPASS
}
@fragment
fn fragment(in: FragmentInput) -> @location(0) vec4<f32> {
fn fragment(in: FragmentInput) -> FragmentOutput {
prepass_alpha_discard(in);
var out: FragmentOutput;
#ifdef NORMAL_PREPASS
// NOTE: Unlit bit not set means == 0 is true, so the true case is if lit
if (material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u {
let world_normal = prepare_world_normal(
@ -90,18 +108,31 @@ fn fragment(in: FragmentInput) -> @location(0) vec4<f32> {
#endif // VERTEX_UVS
);
return vec4(normal * 0.5 + vec3(0.5), 1.0);
out.normal = vec4(normal * 0.5 + vec3(0.5), 1.0);
} else {
return vec4(in.world_normal * 0.5 + vec3(0.5), 1.0);
out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0);
}
#endif // NORMAL_PREPASS
#ifdef MOTION_VECTOR_PREPASS
let clip_position_t = view.unjittered_view_proj * in.world_position;
let clip_position = clip_position_t.xy / clip_position_t.w;
let previous_clip_position_t = previous_view_proj * in.previous_world_position;
let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w;
// These motion vectors are used as offsets to UV positions and are stored
// in the range -1,1 to allow offsetting from the one corner to the
// diagonally-opposite corner in UV coordinates, in either direction.
// A difference between diagonally-opposite corners of clip space is in the
// range -2,2, so this needs to be scaled by 0.5. And the V direction goes
// down where clip space y goes up, so y needs to be flipped.
out.motion_vector = (clip_position - previous_clip_position) * vec2(0.5, -0.5);
#endif // MOTION_VECTOR_PREPASS
return out;
}
#else // NORMAL_PREPASS
#else
@fragment
fn fragment(in: FragmentInput) {
prepass_alpha_discard(in);
}
#endif // NORMAL_PREPASS
#endif // PREPASS_FRAGMENT

View File

@ -115,7 +115,7 @@ pub struct Camera {
/// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's
/// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to
/// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure
/// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
/// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
pub msaa_writeback: bool,
}
@ -361,8 +361,8 @@ impl CameraRenderGraph {
Self(name.into())
}
#[inline]
/// Sets the graph name.
#[inline]
pub fn set<T: Into<Cow<'static, str>>>(&mut self, name: T) {
self.0 = name.into();
}
@ -574,19 +574,28 @@ pub fn extract_cameras(
&GlobalTransform,
&VisibleEntities,
Option<&ColorGrading>,
Option<&TemporalJitter>,
)>,
>,
primary_window: Extract<Query<Entity, With<PrimaryWindow>>>,
) {
let primary_window = primary_window.iter().next();
for (entity, camera, camera_render_graph, transform, visible_entities, color_grading) in
query.iter()
for (
entity,
camera,
camera_render_graph,
transform,
visible_entities,
color_grading,
temporal_jitter,
) in query.iter()
{
let color_grading = *color_grading.unwrap_or(&ColorGrading::default());
if !camera.is_active {
continue;
}
if let (Some((viewport_origin, _)), Some(viewport_size), Some(target_size)) = (
camera.physical_viewport_rect(),
camera.physical_viewport_size(),
@ -595,7 +604,10 @@ pub fn extract_cameras(
if target_size.x == 0 || target_size.y == 0 {
continue;
}
commands.get_or_spawn(entity).insert((
let mut commands = commands.get_or_spawn(entity);
commands.insert((
ExtractedCamera {
target: camera.target.normalize(primary_window),
viewport: camera.viewport.clone(),
@ -623,6 +635,10 @@ pub fn extract_cameras(
},
visible_entities.clone(),
));
if let Some(temporal_jitter) = temporal_jitter {
commands.insert(temporal_jitter.clone());
}
}
}
}
@ -686,3 +702,32 @@ pub fn sort_cameras(
);
}
}
/// A subpixel offset to jitter a perspective camera's fustrum by.
///
/// Useful for temporal rendering techniques.
///
/// Do not use with [`OrthographicProjection`].
///
/// [`OrthographicProjection`]: crate::camera::OrthographicProjection
#[derive(Component, Clone, Default)]
pub struct TemporalJitter {
/// Offset is in range [-0.5, 0.5].
pub offset: Vec2,
}
impl TemporalJitter {
pub fn jitter_projection(&self, projection: &mut Mat4, view_size: Vec2) {
if projection.w_axis.w == 1.0 {
warn!(
"TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
);
return;
}
let jitter = self.offset / view_size;
projection.z_axis.x += jitter.x;
projection.z_axis.y += jitter.y;
}
}

View File

@ -6,7 +6,7 @@ pub use visibility::*;
pub use window::*;
use crate::{
camera::ExtractedCamera,
camera::{ExtractedCamera, TemporalJitter},
extract_resource::{ExtractResource, ExtractResourcePlugin},
prelude::{Image, Shader},
render_asset::RenderAssets,
@ -18,7 +18,7 @@ use crate::{
};
use bevy_app::{App, Plugin};
use bevy_ecs::prelude::*;
use bevy_math::{Mat4, UVec4, Vec3, Vec4};
use bevy_math::{Mat4, UVec4, Vec3, Vec4, Vec4Swizzles};
use bevy_reflect::{Reflect, TypeUuid};
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashMap;
@ -161,6 +161,7 @@ impl Default for ColorGrading {
#[derive(Clone, ShaderType)]
pub struct ViewUniform {
view_proj: Mat4,
unjittered_view_proj: Mat4,
inverse_view_proj: Mat4,
view: Mat4,
inverse_view: Mat4,
@ -307,31 +308,41 @@ pub struct ViewDepthTexture {
pub view: TextureView,
}
fn prepare_view_uniforms(
pub fn prepare_view_uniforms(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut view_uniforms: ResMut<ViewUniforms>,
views: Query<(Entity, &ExtractedView)>,
views: Query<(Entity, &ExtractedView, Option<&TemporalJitter>)>,
) {
view_uniforms.uniforms.clear();
for (entity, camera) in &views {
let projection = camera.projection;
for (entity, camera, temporal_jitter) in &views {
let viewport = camera.viewport.as_vec4();
let unjittered_projection = camera.projection;
let mut projection = unjittered_projection;
if let Some(temporal_jitter) = temporal_jitter {
temporal_jitter.jitter_projection(&mut projection, viewport.zw());
}
let inverse_projection = projection.inverse();
let view = camera.transform.compute_matrix();
let inverse_view = view.inverse();
let view_uniforms = ViewUniformOffset {
offset: view_uniforms.uniforms.push(ViewUniform {
view_proj: camera
.view_projection
.unwrap_or_else(|| projection * inverse_view),
unjittered_view_proj: unjittered_projection * inverse_view,
inverse_view_proj: view * inverse_projection,
view,
inverse_view,
projection,
inverse_projection,
world_position: camera.transform.translation(),
viewport: camera.viewport.as_vec4(),
viewport,
color_grading: camera.color_grading,
}),
};

View File

@ -9,6 +9,7 @@ struct ColorGrading {
struct View {
view_proj: mat4x4<f32>,
unjittered_view_proj: mat4x4<f32>,
inverse_view_proj: mat4x4<f32>,
view: mat4x4<f32>,
inverse_view: mat4x4<f32>,

View File

@ -0,0 +1,320 @@
//! This example compares MSAA (Multi-Sample Anti-aliasing), FXAA (Fast Approximate Anti-aliasing), and TAA (Temporal Anti-aliasing).
use std::f32::consts::PI;
use bevy::{
core_pipeline::{
experimental::taa::{
TemporalAntiAliasBundle, TemporalAntiAliasPlugin, TemporalAntiAliasSettings,
},
fxaa::{Fxaa, Sensitivity},
},
pbr::CascadeShadowConfigBuilder,
prelude::*,
render::{
render_resource::{Extent3d, SamplerDescriptor, TextureDimension, TextureFormat},
texture::ImageSampler,
},
};
fn main() {
App::new()
.insert_resource(Msaa::Off)
.add_plugins(DefaultPlugins)
.add_plugin(TemporalAntiAliasPlugin)
.add_systems(Startup, setup)
.add_systems(Update, (modify_aa, update_ui))
.run();
}
fn modify_aa(
keys: Res<Input<KeyCode>>,
mut camera: Query<
(
Entity,
Option<&mut Fxaa>,
Option<&TemporalAntiAliasSettings>,
),
With<Camera>,
>,
mut msaa: ResMut<Msaa>,
mut commands: Commands,
) {
let (camera_entity, fxaa, taa) = camera.single_mut();
let mut camera = commands.entity(camera_entity);
// No AA
if keys.just_pressed(KeyCode::Key1) {
*msaa = Msaa::Off;
camera.remove::<Fxaa>();
camera.remove::<TemporalAntiAliasBundle>();
}
// MSAA
if keys.just_pressed(KeyCode::Key2) && *msaa == Msaa::Off {
camera.remove::<Fxaa>();
camera.remove::<TemporalAntiAliasBundle>();
*msaa = Msaa::Sample4;
}
// MSAA Sample Count
if *msaa != Msaa::Off {
if keys.just_pressed(KeyCode::Q) {
*msaa = Msaa::Sample2;
}
if keys.just_pressed(KeyCode::W) {
*msaa = Msaa::Sample4;
}
if keys.just_pressed(KeyCode::E) {
*msaa = Msaa::Sample8;
}
}
// FXAA
if keys.just_pressed(KeyCode::Key3) && fxaa.is_none() {
*msaa = Msaa::Off;
camera.remove::<TemporalAntiAliasBundle>();
camera.insert(Fxaa::default());
}
// FXAA Settings
if let Some(mut fxaa) = fxaa {
if keys.just_pressed(KeyCode::Q) {
fxaa.edge_threshold = Sensitivity::Low;
fxaa.edge_threshold_min = Sensitivity::Low;
}
if keys.just_pressed(KeyCode::W) {
fxaa.edge_threshold = Sensitivity::Medium;
fxaa.edge_threshold_min = Sensitivity::Medium;
}
if keys.just_pressed(KeyCode::E) {
fxaa.edge_threshold = Sensitivity::High;
fxaa.edge_threshold_min = Sensitivity::High;
}
if keys.just_pressed(KeyCode::R) {
fxaa.edge_threshold = Sensitivity::Ultra;
fxaa.edge_threshold_min = Sensitivity::Ultra;
}
if keys.just_pressed(KeyCode::T) {
fxaa.edge_threshold = Sensitivity::Extreme;
fxaa.edge_threshold_min = Sensitivity::Extreme;
}
}
// TAA
if keys.just_pressed(KeyCode::Key4) && taa.is_none() {
*msaa = Msaa::Off;
camera.remove::<Fxaa>();
camera.insert(TemporalAntiAliasBundle::default());
}
}
fn update_ui(
camera: Query<(Option<&Fxaa>, Option<&TemporalAntiAliasSettings>), With<Camera>>,
msaa: Res<Msaa>,
mut ui: Query<&mut Text>,
) {
let (fxaa, taa) = camera.single();
let mut ui = ui.single_mut();
let ui = &mut ui.sections[0].value;
*ui = "Antialias Method\n".to_string();
if *msaa == Msaa::Off && fxaa.is_none() && taa.is_none() {
ui.push_str("(1) *No AA*\n");
} else {
ui.push_str("(1) No AA\n");
}
if *msaa != Msaa::Off {
ui.push_str("(2) *MSAA*\n");
} else {
ui.push_str("(2) MSAA\n");
}
if fxaa.is_some() {
ui.push_str("(3) *FXAA*\n");
} else {
ui.push_str("(3) FXAA\n");
}
if taa.is_some() {
ui.push_str("(4) *TAA*");
} else {
ui.push_str("(4) TAA");
}
if *msaa != Msaa::Off {
ui.push_str("\n\n----------\n\nSample Count\n");
if *msaa == Msaa::Sample2 {
ui.push_str("(Q) *2*\n");
} else {
ui.push_str("(Q) 2\n");
}
if *msaa == Msaa::Sample4 {
ui.push_str("(W) *4*\n");
} else {
ui.push_str("(W) 4\n");
}
if *msaa == Msaa::Sample8 {
ui.push_str("(E) *8*");
} else {
ui.push_str("(E) 8");
}
}
if let Some(fxaa) = fxaa {
ui.push_str("\n\n----------\n\nSensitivity\n");
if fxaa.edge_threshold == Sensitivity::Low {
ui.push_str("(Q) *Low*\n");
} else {
ui.push_str("(Q) Low\n");
}
if fxaa.edge_threshold == Sensitivity::Medium {
ui.push_str("(W) *Medium*\n");
} else {
ui.push_str("(W) Medium\n");
}
if fxaa.edge_threshold == Sensitivity::High {
ui.push_str("(E) *High*\n");
} else {
ui.push_str("(E) High\n");
}
if fxaa.edge_threshold == Sensitivity::Ultra {
ui.push_str("(R) *Ultra*\n");
} else {
ui.push_str("(R) Ultra\n");
}
if fxaa.edge_threshold == Sensitivity::Extreme {
ui.push_str("(T) *Extreme*");
} else {
ui.push_str("(T) Extreme");
}
}
}
/// Set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut images: ResMut<Assets<Image>>,
asset_server: Res<AssetServer>,
) {
// Plane
commands.spawn(PbrBundle {
mesh: meshes.add(shape::Plane::from_size(5.0).into()),
material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()),
..default()
});
let cube_material = materials.add(StandardMaterial {
base_color_texture: Some(images.add(uv_debug_texture())),
..default()
});
// Cubes
for i in 0..5 {
commands.spawn(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 0.25 })),
material: cube_material.clone(),
transform: Transform::from_xyz(i as f32 * 0.25 - 1.0, 0.125, -i as f32 * 0.5),
..default()
});
}
// Flight Helmet
commands.spawn(SceneBundle {
scene: asset_server.load("models/FlightHelmet/FlightHelmet.gltf#Scene0"),
..default()
});
// Light
commands.spawn(DirectionalLightBundle {
directional_light: DirectionalLight {
shadows_enabled: true,
..default()
},
transform: Transform::from_rotation(Quat::from_euler(
EulerRot::ZYX,
0.0,
PI * -0.15,
PI * -0.15,
)),
cascade_shadow_config: CascadeShadowConfigBuilder {
maximum_distance: 3.0,
first_cascade_far_bound: 0.9,
..default()
}
.into(),
..default()
});
// Camera
commands.spawn(Camera3dBundle {
camera: Camera {
hdr: true,
..default()
},
transform: Transform::from_xyz(0.7, 0.7, 1.0).looking_at(Vec3::new(0.0, 0.3, 0.0), Vec3::Y),
..default()
});
// UI
commands.spawn(
TextBundle::from_section(
"",
TextStyle {
font: asset_server.load("fonts/FiraMono-Medium.ttf"),
font_size: 20.0,
color: Color::BLACK,
},
)
.with_style(Style {
position_type: PositionType::Absolute,
top: Val::Px(12.0),
left: Val::Px(12.0),
..default()
}),
);
}
/// Creates a colorful test pattern
fn uv_debug_texture() -> Image {
const TEXTURE_SIZE: usize = 8;
let mut palette: [u8; 32] = [
255, 102, 159, 255, 255, 159, 102, 255, 236, 255, 102, 255, 121, 255, 102, 255, 102, 255,
198, 255, 102, 198, 255, 255, 121, 102, 255, 255, 236, 102, 255, 255,
];
let mut texture_data = [0; TEXTURE_SIZE * TEXTURE_SIZE * 4];
for y in 0..TEXTURE_SIZE {
let offset = TEXTURE_SIZE * y * 4;
texture_data[offset..(offset + TEXTURE_SIZE * 4)].copy_from_slice(&palette);
palette.rotate_right(4);
}
let mut img = Image::new_fill(
Extent3d {
width: TEXTURE_SIZE as u32,
height: TEXTURE_SIZE as u32,
depth_or_array_layers: 1,
},
TextureDimension::D2,
&texture_data,
TextureFormat::Rgba8UnormSrgb,
);
img.sampler_descriptor = ImageSampler::Descriptor(SamplerDescriptor::default());
img
}

View File

@ -1,185 +0,0 @@
//! This examples compares MSAA (Multi-Sample Anti-Aliasing) and FXAA (Fast Approximate Anti-Aliasing).
use std::f32::consts::PI;
use bevy::{
core_pipeline::fxaa::{Fxaa, Sensitivity},
pbr::CascadeShadowConfigBuilder,
prelude::*,
render::{
render_resource::{Extent3d, SamplerDescriptor, TextureDimension, TextureFormat},
texture::ImageSampler,
},
};
fn main() {
App::new()
// Disable MSAA by default
.insert_resource(Msaa::Off)
.add_plugins(DefaultPlugins)
.add_systems(Startup, setup)
.add_systems(Update, toggle_fxaa)
.run();
}
/// set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut images: ResMut<Assets<Image>>,
asset_server: Res<AssetServer>,
) {
println!("Toggle with:");
println!("1 - NO AA");
println!("2 - MSAA 4");
println!("3 - FXAA (default)");
println!("Threshold:");
println!("6 - LOW");
println!("7 - MEDIUM");
println!("8 - HIGH (default)");
println!("9 - ULTRA");
println!("0 - EXTREME");
// plane
commands.spawn(PbrBundle {
mesh: meshes.add(shape::Plane::from_size(5.0).into()),
material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()),
..default()
});
let cube_material = materials.add(StandardMaterial {
base_color_texture: Some(images.add(uv_debug_texture())),
..default()
});
// cubes
for i in 0..5 {
commands.spawn(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 0.25 })),
material: cube_material.clone(),
transform: Transform::from_xyz(i as f32 * 0.25 - 1.0, 0.125, -i as f32 * 0.5),
..default()
});
}
// Flight Helmet
commands.spawn(SceneBundle {
scene: asset_server.load("models/FlightHelmet/FlightHelmet.gltf#Scene0"),
..default()
});
// light
commands.spawn(DirectionalLightBundle {
directional_light: DirectionalLight {
shadows_enabled: true,
..default()
},
transform: Transform::from_rotation(Quat::from_euler(
EulerRot::ZYX,
0.0,
PI * -0.15,
PI * -0.15,
)),
cascade_shadow_config: CascadeShadowConfigBuilder {
maximum_distance: 3.0,
first_cascade_far_bound: 0.9,
..default()
}
.into(),
..default()
});
// camera
commands
.spawn(Camera3dBundle {
camera: Camera {
hdr: false, // Works with and without hdr
..default()
},
transform: Transform::from_xyz(0.7, 0.7, 1.0)
.looking_at(Vec3::new(0.0, 0.3, 0.0), Vec3::Y),
..default()
})
.insert(Fxaa::default());
}
fn toggle_fxaa(keys: Res<Input<KeyCode>>, mut query: Query<&mut Fxaa>, mut msaa: ResMut<Msaa>) {
let set_no_aa = keys.just_pressed(KeyCode::Key1);
let set_msaa = keys.just_pressed(KeyCode::Key2);
let set_fxaa = keys.just_pressed(KeyCode::Key3);
let fxaa_low = keys.just_pressed(KeyCode::Key6);
let fxaa_med = keys.just_pressed(KeyCode::Key7);
let fxaa_high = keys.just_pressed(KeyCode::Key8);
let fxaa_ultra = keys.just_pressed(KeyCode::Key9);
let fxaa_extreme = keys.just_pressed(KeyCode::Key0);
let set_fxaa = set_fxaa | fxaa_low | fxaa_med | fxaa_high | fxaa_ultra | fxaa_extreme;
for mut fxaa in &mut query {
if set_msaa {
fxaa.enabled = false;
*msaa = Msaa::Sample4;
info!("MSAA 4x");
}
if set_no_aa {
fxaa.enabled = false;
*msaa = Msaa::Off;
info!("NO AA");
}
if set_no_aa | set_fxaa {
*msaa = Msaa::Off;
}
if fxaa_low {
fxaa.edge_threshold = Sensitivity::Low;
fxaa.edge_threshold_min = Sensitivity::Low;
} else if fxaa_med {
fxaa.edge_threshold = Sensitivity::Medium;
fxaa.edge_threshold_min = Sensitivity::Medium;
} else if fxaa_high {
fxaa.edge_threshold = Sensitivity::High;
fxaa.edge_threshold_min = Sensitivity::High;
} else if fxaa_ultra {
fxaa.edge_threshold = Sensitivity::Ultra;
fxaa.edge_threshold_min = Sensitivity::Ultra;
} else if fxaa_extreme {
fxaa.edge_threshold = Sensitivity::Extreme;
fxaa.edge_threshold_min = Sensitivity::Extreme;
}
if set_fxaa {
fxaa.enabled = true;
*msaa = Msaa::Off;
info!("FXAA {}", fxaa.edge_threshold.get_str());
}
}
}
/// Creates a colorful test pattern
fn uv_debug_texture() -> Image {
const TEXTURE_SIZE: usize = 8;
let mut palette: [u8; 32] = [
255, 102, 159, 255, 255, 159, 102, 255, 236, 255, 102, 255, 121, 255, 102, 255, 102, 255,
198, 255, 102, 198, 255, 255, 121, 102, 255, 255, 236, 102, 255, 255,
];
let mut texture_data = [0; TEXTURE_SIZE * TEXTURE_SIZE * 4];
for y in 0..TEXTURE_SIZE {
let offset = TEXTURE_SIZE * y * 4;
texture_data[offset..(offset + TEXTURE_SIZE * 4)].copy_from_slice(&palette);
palette.rotate_right(4);
}
let mut img = Image::new_fill(
Extent3d {
width: TEXTURE_SIZE as u32,
height: TEXTURE_SIZE as u32,
depth_or_array_layers: 1,
},
TextureDimension::D2,
&texture_data,
TextureFormat::Rgba8UnormSrgb,
);
img.sampler_descriptor = ImageSampler::Descriptor(SamplerDescriptor::default());
img
}

View File

@ -1,65 +0,0 @@
//! This example shows how to configure Multi-Sample Anti-Aliasing. Setting the sample count higher
//! will result in smoother edges, but it will also increase the cost to render those edges. The
//! range should generally be somewhere between 1 (no multi sampling, but cheap) to 8 (crisp but
//! expensive).
//! Note that web currently only supports 1 or 4 samples.
use bevy::prelude::*;
fn main() {
App::new()
.insert_resource(Msaa::default())
.add_plugins(DefaultPlugins)
.add_systems(Startup, setup)
.add_systems(Update, cycle_msaa)
.run();
}
/// set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
) {
info!("Press '1/2/4/8' respectively to set MSAA sample count");
info!("Using 4x MSAA");
// cube
commands.spawn(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 2.0 })),
material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()),
..default()
});
// light
commands.spawn(PointLightBundle {
transform: Transform::from_xyz(4.0, 8.0, 4.0),
..default()
});
// camera
commands.spawn(Camera3dBundle {
transform: Transform::from_xyz(-3.0, 3.0, 5.0).looking_at(Vec3::ZERO, Vec3::Y),
..default()
});
}
fn cycle_msaa(input: Res<Input<KeyCode>>, mut msaa: ResMut<Msaa>) {
if input.just_pressed(KeyCode::Key1) {
info!("Not using MSAA");
*msaa = Msaa::Off;
}
if input.just_pressed(KeyCode::Key2) {
info!("Using 2x MSAA");
*msaa = Msaa::Sample2;
}
if input.just_pressed(KeyCode::Key4) {
info!("Using 4x MSAA");
*msaa = Msaa::Sample4;
}
if input.just_pressed(KeyCode::Key8) {
info!("Using 8x MSAA");
*msaa = Msaa::Sample8;
}
}

View File

@ -111,14 +111,13 @@ Example | Description
[3D Gizmos](../examples/3d/3d_gizmos.rs) | A scene showcasing 3D gizmos
[3D Scene](../examples/3d/3d_scene.rs) | Simple 3D scene with basic shapes and lighting
[3D Shapes](../examples/3d/3d_shapes.rs) | A scene showcasing the built-in 3D shapes
[Anti-aliasing](../examples/3d/anti_aliasing.rs) | Compares different anti-aliasing methods
[Atmospheric Fog](../examples/3d/atmospheric_fog.rs) | A scene showcasing the atmospheric fog effect
[Blend Modes](../examples/3d/blend_modes.rs) | Showcases different blend modes
[FXAA](../examples/3d/fxaa.rs) | Compares MSAA (Multi-Sample Anti-Aliasing) and FXAA (Fast Approximate Anti-Aliasing)
[Fog](../examples/3d/fog.rs) | A scene showcasing the distance fog effect
[Lighting](../examples/3d/lighting.rs) | Illustrates various lighting options in a simple scene
[Lines](../examples/3d/lines.rs) | Create a custom material to draw 3d lines
[Load glTF](../examples/3d/load_gltf.rs) | Loads and renders a glTF file as a scene
[MSAA](../examples/3d/msaa.rs) | Configures MSAA (Multi-Sample Anti-Aliasing) for smoother edges
[Orthographic View](../examples/3d/orthographic.rs) | Shows how to create a 3D orthographic view (for isometric-look in games or CAD applications)
[Parenting](../examples/3d/parenting.rs) | Demonstrates parent->child relationships and relative transformations
[Physically Based Rendering](../examples/3d/pbr.rs) | Demonstrates use of Physically Based Rendering (PBR) properties

View File

@ -1,12 +1,9 @@
//! Bevy has an optional prepass that is controlled per-material. A prepass is a rendering pass that runs before the main pass.
//! It will optionally generate various view textures. Currently it supports depth and normal textures.
//! It will optionally generate various view textures. Currently it supports depth, normal, and motion vector textures.
//! The textures are not generated for any material using alpha blending.
//!
//! # WARNING
//! The prepass currently doesn't work on `WebGL`.
use bevy::{
core_pipeline::prepass::{DepthPrepass, NormalPrepass},
core_pipeline::prepass::{DepthPrepass, MotionVectorPrepass, NormalPrepass},
pbr::{NotShadowCaster, PbrPlugin},
prelude::*,
reflect::TypeUuid,
@ -18,6 +15,7 @@ fn main() {
.add_plugins(DefaultPlugins.set(PbrPlugin {
// The prepass is enabled by default on the StandardMaterial,
// but you can disable it if you need to.
//
// prepass_enabled: false,
..default()
}))
@ -53,6 +51,8 @@ fn setup(
DepthPrepass,
// This will generate a texture containing world normals (with normal maps applied)
NormalPrepass,
// This will generate a texture containing screen space pixel motion vectors
MotionVectorPrepass,
));
// plane
@ -192,6 +192,7 @@ fn rotate(mut q: Query<&mut Transform, With<Rotates>>, time: Res<Time>) {
struct ShowPrepassSettings {
show_depth: u32,
show_normals: u32,
show_motion_vectors: u32,
padding_1: u32,
padding_2: u32,
}
@ -217,30 +218,38 @@ impl Material for PrepassOutputMaterial {
/// Every time you press space, it will cycle between transparent, depth and normals view
fn toggle_prepass_view(
mut prepass_view: Local<u32>,
keycode: Res<Input<KeyCode>>,
material_handle: Query<&Handle<PrepassOutputMaterial>>,
mut materials: ResMut<Assets<PrepassOutputMaterial>>,
mut text: Query<&mut Text>,
) {
if keycode.just_pressed(KeyCode::Space) {
let handle = material_handle.single();
let mat = materials.get_mut(handle).unwrap();
let out_text;
if mat.settings.show_depth == 1 {
out_text = "normal";
mat.settings.show_depth = 0;
mat.settings.show_normals = 1;
} else if mat.settings.show_normals == 1 {
out_text = "transparent";
mat.settings.show_depth = 0;
mat.settings.show_normals = 0;
*prepass_view = (*prepass_view + 1) % 4;
let label = match *prepass_view {
0 => "transparent",
1 => "depth",
2 => "normals",
3 => "motion vectors",
_ => unreachable!(),
};
let text_color = if *prepass_view == 3 {
Color::BLACK
} else {
out_text = "depth";
mat.settings.show_depth = 1;
mat.settings.show_normals = 0;
}
Color::WHITE
};
let mut text = text.single_mut();
text.sections[0].value = format!("Prepass Output: {out_text}\n");
text.sections[0].value = format!("Prepass Output: {label}\n");
for section in &mut text.sections {
section.style.color = text_color;
}
let handle = material_handle.single();
let mat = materials.get_mut(handle).unwrap();
mat.settings.show_depth = (*prepass_view == 1) as u32;
mat.settings.show_normals = (*prepass_view == 2) as u32;
mat.settings.show_motion_vectors = (*prepass_view == 3) as u32;
}
}