Merge branch 'main' into proper-json-schema
This commit is contained in:
commit
4b17c8463d
@ -15,6 +15,7 @@ use bevy_utils::{default, prelude::DebugName, TypeIdMap};
|
||||
use core::{
|
||||
any::{Any, TypeId},
|
||||
fmt::{Debug, Write},
|
||||
ops::Range,
|
||||
};
|
||||
use fixedbitset::FixedBitSet;
|
||||
use log::{error, info, warn};
|
||||
@ -752,11 +753,31 @@ new_key_type! {
|
||||
pub struct SystemSetKey;
|
||||
}
|
||||
|
||||
/// A node in a [`ScheduleGraph`] with a system or conditions that have not been
|
||||
/// initialized yet.
|
||||
///
|
||||
/// We have to defer initialization of nodes in the graph until we have
|
||||
/// `&mut World` access, so we store these in a list ([`ScheduleGraph::uninit`])
|
||||
/// until then. In most cases, initialization occurs upon the first run of the
|
||||
/// schedule.
|
||||
enum UninitializedId {
|
||||
/// A system and its conditions that have not been initialized yet.
|
||||
System(SystemKey),
|
||||
/// A system set's conditions that have not been initialized yet.
|
||||
Set {
|
||||
key: SystemSetKey,
|
||||
first_uninit_condition: usize,
|
||||
/// The range of indices in [`SystemSets::conditions`] that correspond
|
||||
/// to conditions that have not been initialized yet.
|
||||
///
|
||||
/// [`SystemSets::conditions`] for a given set may be appended to
|
||||
/// multiple times (e.g. when `configure_sets` is called multiple with
|
||||
/// the same set), so we need to track which conditions in that list
|
||||
/// are newly added and not yet initialized.
|
||||
///
|
||||
/// Systems don't need this tracking because each `add_systems` call
|
||||
/// creates separate nodes in the graph with their own conditions,
|
||||
/// so all conditions are initialized together.
|
||||
uninitialized_conditions: Range<usize>,
|
||||
},
|
||||
}
|
||||
|
||||
@ -793,8 +814,8 @@ pub struct ScheduleGraph {
|
||||
pub system_conditions: SecondaryMap<SystemKey, Vec<ConditionWithAccess>>,
|
||||
/// Data about system sets in the schedule
|
||||
system_sets: SystemSets,
|
||||
/// Systems that have not been initialized yet; for system sets, we store the index of the first uninitialized condition
|
||||
/// (all the conditions after that index still need to be initialized)
|
||||
/// Systems, their conditions, and system set conditions that need to be
|
||||
/// initialized before the schedule can be run.
|
||||
uninit: Vec<UninitializedId>,
|
||||
/// Directed acyclic graph of the hierarchy (which systems/sets are children of which sets)
|
||||
hierarchy: Dag,
|
||||
@ -807,7 +828,6 @@ pub struct ScheduleGraph {
|
||||
anonymous_sets: usize,
|
||||
changed: bool,
|
||||
settings: ScheduleBuildSettings,
|
||||
|
||||
passes: BTreeMap<TypeId, Box<dyn ScheduleBuildPassObj>>,
|
||||
}
|
||||
|
||||
@ -1101,9 +1121,10 @@ impl ScheduleGraph {
|
||||
|
||||
// system init has to be deferred (need `&mut World`)
|
||||
let system_set_conditions = self.system_sets.conditions.entry(key).unwrap().or_default();
|
||||
let start = system_set_conditions.len();
|
||||
self.uninit.push(UninitializedId::Set {
|
||||
key,
|
||||
first_uninit_condition: system_set_conditions.len(),
|
||||
uninitialized_conditions: start..(start + conditions.len()),
|
||||
});
|
||||
system_set_conditions.extend(conditions.into_iter().map(ConditionWithAccess::new));
|
||||
|
||||
@ -1189,11 +1210,9 @@ impl ScheduleGraph {
|
||||
}
|
||||
UninitializedId::Set {
|
||||
key,
|
||||
first_uninit_condition,
|
||||
uninitialized_conditions,
|
||||
} => {
|
||||
for condition in self.system_sets.conditions[key]
|
||||
.iter_mut()
|
||||
.skip(first_uninit_condition)
|
||||
for condition in &mut self.system_sets.conditions[key][uninitialized_conditions]
|
||||
{
|
||||
condition.access = condition.condition.initialize(world);
|
||||
}
|
||||
|
@ -386,6 +386,23 @@ pub struct StandardMaterial {
|
||||
///
|
||||
/// [`Mesh::generate_tangents`]: bevy_render::mesh::Mesh::generate_tangents
|
||||
/// [`Mesh::with_generated_tangents`]: bevy_render::mesh::Mesh::with_generated_tangents
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// ```
|
||||
/// # use bevy_asset::{AssetServer, Handle};
|
||||
/// # use bevy_ecs::change_detection::Res;
|
||||
/// # use bevy_image::{Image, ImageLoaderSettings};
|
||||
/// #
|
||||
/// fn load_normal_map(asset_server: Res<AssetServer>) {
|
||||
/// let normal_handle: Handle<Image> = asset_server.load_with_settings(
|
||||
/// "textures/parallax_example/cube_normal.png",
|
||||
/// // The normal map texture is in linear color space. Lighting won't look correct
|
||||
/// // if `is_srgb` is `true`, which is the default.
|
||||
/// |settings: &mut ImageLoaderSettings| settings.is_srgb = false,
|
||||
/// );
|
||||
/// }
|
||||
/// ```
|
||||
#[texture(9)]
|
||||
#[sampler(10)]
|
||||
#[dependency]
|
||||
|
@ -3016,7 +3016,7 @@ impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshBindGroup<I> {
|
||||
);
|
||||
};
|
||||
|
||||
let mut dynamic_offsets: [u32; 3] = Default::default();
|
||||
let mut dynamic_offsets: [u32; 5] = Default::default();
|
||||
let mut offset_count = 0;
|
||||
if let PhaseItemExtraIndex::DynamicOffset(dynamic_offset) = item.extra_index() {
|
||||
dynamic_offsets[offset_count] = dynamic_offset;
|
||||
|
@ -377,7 +377,6 @@ fn pbr_input_from_standard_material(
|
||||
var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness;
|
||||
#endif // BINDLESS
|
||||
|
||||
let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness);
|
||||
#ifdef VERTEX_UVS
|
||||
if ((flags & pbr_types::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) {
|
||||
let metallic_roughness =
|
||||
@ -627,7 +626,7 @@ fn pbr_input_from_standard_material(
|
||||
var specular_occlusion: f32 = 1.0;
|
||||
#ifdef VERTEX_UVS
|
||||
if ((flags & pbr_types::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) {
|
||||
diffuse_occlusion *=
|
||||
diffuse_occlusion *=
|
||||
#ifdef MESHLET_MESH_MATERIAL_PASS
|
||||
textureSampleGrad(
|
||||
#else // MESHLET_MESH_MATERIAL_PASS
|
||||
@ -660,7 +659,8 @@ fn pbr_input_from_standard_material(
|
||||
diffuse_occlusion = min(diffuse_occlusion, ssao_multibounce);
|
||||
// Use SSAO to estimate the specular occlusion.
|
||||
// Lagarde and Rousiers 2014, "Moving Frostbite to Physically Based Rendering"
|
||||
specular_occlusion = saturate(pow(NdotV + ssao, exp2(-16.0 * roughness - 1.0)) - 1.0 + ssao);
|
||||
let roughness = lighting::perceptualRoughnessToRoughness(pbr_input.material.perceptual_roughness);
|
||||
specular_occlusion = saturate(pow(NdotV + ssao, exp2(-16.0 * roughness - 1.0)) - 1.0 + ssao);
|
||||
#endif
|
||||
pbr_input.diffuse_occlusion = diffuse_occlusion;
|
||||
pbr_input.specular_occlusion = specular_occlusion;
|
||||
|
@ -235,16 +235,13 @@ fn fresnel(f0: vec3<f32>, LdotH: f32) -> vec3<f32> {
|
||||
// Multiscattering approximation:
|
||||
// <https://google.github.io/filament/Filament.html#listing_energycompensationimpl>
|
||||
fn specular_multiscatter(
|
||||
input: ptr<function, LightingInput>,
|
||||
D: f32,
|
||||
V: f32,
|
||||
F: vec3<f32>,
|
||||
F0: vec3<f32>,
|
||||
F_ab: vec2<f32>,
|
||||
specular_intensity: f32,
|
||||
) -> vec3<f32> {
|
||||
// Unpack.
|
||||
let F0 = (*input).F0_;
|
||||
let F_ab = (*input).F_ab;
|
||||
|
||||
var Fr = (specular_intensity * D * V) * F;
|
||||
Fr *= 1.0 + F0 * (1.0 / F_ab.x - 1.0);
|
||||
return Fr;
|
||||
@ -329,7 +326,7 @@ fn specular(
|
||||
let F = fresnel(F0, LdotH);
|
||||
|
||||
// Calculate the specular light.
|
||||
let Fr = specular_multiscatter(input, D, V, F, specular_intensity);
|
||||
let Fr = specular_multiscatter(D, V, F, F0, (*input).F_ab, specular_intensity);
|
||||
return Fr;
|
||||
}
|
||||
|
||||
@ -397,7 +394,7 @@ fn specular_anisotropy(
|
||||
let Fa = fresnel(F0, LdotH);
|
||||
|
||||
// Calculate the specular light.
|
||||
let Fr = specular_multiscatter(input, Da, Va, Fa, specular_intensity);
|
||||
let Fr = specular_multiscatter(Da, Va, Fa, F0, (*input).F_ab, specular_intensity);
|
||||
return Fr;
|
||||
}
|
||||
|
||||
@ -482,7 +479,7 @@ fn cubemap_uv(direction: vec3<f32>, cubemap_type: u32) -> vec2<f32> {
|
||||
),
|
||||
max_axis != abs_direction.x
|
||||
);
|
||||
|
||||
|
||||
var face_uv: vec2<f32>;
|
||||
var divisor: f32;
|
||||
var corner_uv: vec2<u32> = vec2(0, 0);
|
||||
@ -500,12 +497,12 @@ fn cubemap_uv(direction: vec3<f32>, cubemap_type: u32) -> vec2<f32> {
|
||||
face_uv = (face_uv / divisor) * 0.5 + 0.5;
|
||||
|
||||
switch cubemap_type {
|
||||
case CUBEMAP_TYPE_CROSS_VERTICAL: {
|
||||
face_size = vec2(1.0/3.0, 1.0/4.0);
|
||||
case CUBEMAP_TYPE_CROSS_VERTICAL: {
|
||||
face_size = vec2(1.0/3.0, 1.0/4.0);
|
||||
corner_uv = vec2<u32>((0x111102u >> (4 * face_index)) & 0xFu, (0x132011u >> (4 * face_index)) & 0xFu);
|
||||
}
|
||||
case CUBEMAP_TYPE_CROSS_HORIZONTAL: {
|
||||
face_size = vec2(1.0/4.0, 1.0/3.0);
|
||||
case CUBEMAP_TYPE_CROSS_HORIZONTAL: {
|
||||
face_size = vec2(1.0/4.0, 1.0/3.0);
|
||||
corner_uv = vec2<u32>((0x131102u >> (4 * face_index)) & 0xFu, (0x112011u >> (4 * face_index)) & 0xFu);
|
||||
}
|
||||
case CUBEMAP_TYPE_SEQUENCE_HORIZONTAL: {
|
||||
@ -765,7 +762,7 @@ fn directional_light(
|
||||
view_bindings::clustered_decal_sampler,
|
||||
decal_uv - floor(decal_uv),
|
||||
0.0
|
||||
).r;
|
||||
).r;
|
||||
} else {
|
||||
texture_sample = 0f;
|
||||
}
|
||||
|
@ -422,11 +422,7 @@ fn sample_shadow_cubemap_gaussian(
|
||||
) -> f32 {
|
||||
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
||||
// cubemap.
|
||||
var up = vec3(0.0, 1.0, 0.0);
|
||||
if (dot(up, normalize(light_local)) > 0.99) {
|
||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
||||
}
|
||||
let basis = orthonormalize(light_local, up) * scale * distance_to_light;
|
||||
let basis = orthonormalize(normalize(light_local)) * scale * distance_to_light;
|
||||
|
||||
var sum: f32 = 0.0;
|
||||
sum += sample_shadow_cubemap_at_offset(
|
||||
@ -469,11 +465,7 @@ fn sample_shadow_cubemap_jittered(
|
||||
) -> f32 {
|
||||
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
||||
// cubemap.
|
||||
var up = vec3(0.0, 1.0, 0.0);
|
||||
if (dot(up, normalize(light_local)) > 0.99) {
|
||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
||||
}
|
||||
let basis = orthonormalize(light_local, up) * scale * distance_to_light;
|
||||
let basis = orthonormalize(normalize(light_local)) * scale * distance_to_light;
|
||||
|
||||
let rotation_matrix = random_rotation_matrix(vec2(1.0), temporal);
|
||||
|
||||
@ -553,11 +545,7 @@ fn search_for_blockers_in_shadow_cubemap(
|
||||
) -> f32 {
|
||||
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
||||
// cubemap.
|
||||
var up = vec3(0.0, 1.0, 0.0);
|
||||
if (dot(up, normalize(light_local)) > 0.99) {
|
||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
||||
}
|
||||
let basis = orthonormalize(light_local, up) * scale * distance_to_light;
|
||||
let basis = orthonormalize(normalize(light_local)) * scale * distance_to_light;
|
||||
|
||||
var sum: vec2<f32> = vec2(0.0);
|
||||
sum += search_for_blockers_in_shadow_cubemap_at_offset(
|
||||
|
@ -63,17 +63,19 @@ fn mat4x4_to_mat3x3(m: mat4x4<f32>) -> mat3x3<f32> {
|
||||
return mat3x3<f32>(m[0].xyz, m[1].xyz, m[2].xyz);
|
||||
}
|
||||
|
||||
// Creates an orthonormal basis given a Z vector and an up vector (which becomes
|
||||
// Y after orthonormalization).
|
||||
// Creates an orthonormal basis given a normalized Z vector.
|
||||
//
|
||||
// The results are equivalent to the Gram-Schmidt process [1].
|
||||
//
|
||||
// [1]: https://math.stackexchange.com/a/1849294
|
||||
fn orthonormalize(z_unnormalized: vec3<f32>, up: vec3<f32>) -> mat3x3<f32> {
|
||||
let z_basis = normalize(z_unnormalized);
|
||||
let x_basis = normalize(cross(z_basis, up));
|
||||
let y_basis = cross(z_basis, x_basis);
|
||||
return mat3x3(x_basis, y_basis, z_basis);
|
||||
fn orthonormalize(z_normalized: vec3<f32>) -> mat3x3<f32> {
|
||||
var up = vec3(0.0, 1.0, 0.0);
|
||||
if (abs(dot(up, z_normalized)) > 0.99) {
|
||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
||||
}
|
||||
let x_basis = normalize(cross(z_normalized, up));
|
||||
let y_basis = cross(z_normalized, x_basis);
|
||||
return mat3x3(x_basis, y_basis, z_normalized);
|
||||
}
|
||||
|
||||
// Returns true if any part of a sphere is on the positive side of a plane.
|
||||
|
@ -7,7 +7,7 @@ use bevy_tasks::ComputeTaskPool;
|
||||
use bevy_utils::WgpuWrapper;
|
||||
pub use graph_runner::*;
|
||||
pub use render_device::*;
|
||||
use tracing::{debug, error, info, info_span, trace, warn};
|
||||
use tracing::{debug, error, info, info_span, warn};
|
||||
|
||||
use crate::{
|
||||
diagnostic::{internal::DiagnosticsRecorder, RecordDiagnostics},
|
||||
@ -145,6 +145,33 @@ const GPU_NOT_FOUND_ERROR_MESSAGE: &str = if cfg!(target_os = "linux") {
|
||||
"Unable to find a GPU! Make sure you have installed required drivers!"
|
||||
};
|
||||
|
||||
#[cfg(not(target_family = "wasm"))]
|
||||
fn find_adapter_by_name(
|
||||
instance: &Instance,
|
||||
options: &WgpuSettings,
|
||||
compatible_surface: Option<&wgpu::Surface<'_>>,
|
||||
adapter_name: &str,
|
||||
) -> Option<Adapter> {
|
||||
for adapter in
|
||||
instance.enumerate_adapters(options.backends.expect(
|
||||
"The `backends` field of `WgpuSettings` must be set to use a specific adapter.",
|
||||
))
|
||||
{
|
||||
tracing::trace!("Checking adapter: {:?}", adapter.get_info());
|
||||
let info = adapter.get_info();
|
||||
if let Some(surface) = compatible_surface {
|
||||
if !adapter.is_surface_supported(surface) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if info.name.eq_ignore_ascii_case(adapter_name) {
|
||||
return Some(adapter);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Initializes the renderer by retrieving and preparing the GPU instance, device and queue
|
||||
/// for the specified backend.
|
||||
pub async fn initialize_renderer(
|
||||
@ -153,36 +180,30 @@ pub async fn initialize_renderer(
|
||||
request_adapter_options: &RequestAdapterOptions<'_, '_>,
|
||||
desired_adapter_name: Option<String>,
|
||||
) -> (RenderDevice, RenderQueue, RenderAdapterInfo, RenderAdapter) {
|
||||
#[cfg(not(target_family = "wasm"))]
|
||||
let mut selected_adapter = desired_adapter_name.and_then(|adapter_name| {
|
||||
find_adapter_by_name(
|
||||
instance,
|
||||
options,
|
||||
request_adapter_options.compatible_surface,
|
||||
&adapter_name,
|
||||
)
|
||||
});
|
||||
#[cfg(target_family = "wasm")]
|
||||
let mut selected_adapter = None;
|
||||
if let Some(adapter_name) = &desired_adapter_name {
|
||||
debug!("Searching for adapter with name: {}", adapter_name);
|
||||
for adapter in instance.enumerate_adapters(options.backends.expect(
|
||||
"The `backends` field of `WgpuSettings` must be set to use a specific adapter.",
|
||||
)) {
|
||||
trace!("Checking adapter: {:?}", adapter.get_info());
|
||||
let info = adapter.get_info();
|
||||
if let Some(surface) = request_adapter_options.compatible_surface {
|
||||
if !adapter.is_surface_supported(surface) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if info
|
||||
.name
|
||||
.to_lowercase()
|
||||
.contains(&adapter_name.to_lowercase())
|
||||
{
|
||||
selected_adapter = Some(adapter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#[cfg(target_family = "wasm")]
|
||||
if desired_adapter_name.is_some() {
|
||||
warn!("Choosing an adapter is not supported on wasm.");
|
||||
}
|
||||
|
||||
if selected_adapter.is_none() {
|
||||
debug!(
|
||||
"Searching for adapter with options: {:?}",
|
||||
request_adapter_options
|
||||
);
|
||||
selected_adapter = instance.request_adapter(request_adapter_options).await.ok();
|
||||
};
|
||||
}
|
||||
|
||||
let adapter = selected_adapter.expect(GPU_NOT_FOUND_ERROR_MESSAGE);
|
||||
let adapter_info = adapter.get_info();
|
||||
|
@ -1,10 +1,12 @@
|
||||
use crate::{ui_transform::UiGlobalTransform, ComputedNode, ComputedNodeTarget, Node, UiStack};
|
||||
use crate::{
|
||||
ui_transform::UiGlobalTransform, ComputedNode, ComputedNodeTarget, Node, OverrideClip, UiStack,
|
||||
};
|
||||
use bevy_ecs::{
|
||||
change_detection::DetectChangesMut,
|
||||
entity::{ContainsEntity, Entity},
|
||||
hierarchy::ChildOf,
|
||||
prelude::{Component, With},
|
||||
query::QueryData,
|
||||
query::{QueryData, Without},
|
||||
reflect::ReflectComponent,
|
||||
system::{Local, Query, Res},
|
||||
};
|
||||
@ -157,7 +159,7 @@ pub fn ui_focus_system(
|
||||
ui_stack: Res<UiStack>,
|
||||
mut node_query: Query<NodeQuery>,
|
||||
clipping_query: Query<(&ComputedNode, &UiGlobalTransform, &Node)>,
|
||||
child_of_query: Query<&ChildOf>,
|
||||
child_of_query: Query<&ChildOf, Without<OverrideClip>>,
|
||||
) {
|
||||
let primary_window = primary_window.iter().next();
|
||||
|
||||
@ -325,11 +327,12 @@ pub fn ui_focus_system(
|
||||
}
|
||||
|
||||
/// Walk up the tree child-to-parent checking that `point` is not clipped by any ancestor node.
|
||||
/// If `entity` has an [`OverrideClip`] component it ignores any inherited clipping and returns true.
|
||||
pub fn clip_check_recursive(
|
||||
point: Vec2,
|
||||
entity: Entity,
|
||||
clipping_query: &Query<'_, '_, (&ComputedNode, &UiGlobalTransform, &Node)>,
|
||||
child_of_query: &Query<&ChildOf>,
|
||||
child_of_query: &Query<&ChildOf, Without<OverrideClip>>,
|
||||
) -> bool {
|
||||
if let Ok(child_of) = child_of_query.get(entity) {
|
||||
let parent = child_of.0;
|
||||
|
@ -44,6 +44,24 @@ impl ColorStop {
|
||||
}
|
||||
}
|
||||
|
||||
/// A color stop with its position in logical pixels.
|
||||
pub fn px(color: impl Into<Color>, px: f32) -> Self {
|
||||
Self {
|
||||
color: color.into(),
|
||||
point: Val::Px(px),
|
||||
hint: 0.5,
|
||||
}
|
||||
}
|
||||
|
||||
/// A color stop with a percentage position.
|
||||
pub fn percent(color: impl Into<Color>, percent: f32) -> Self {
|
||||
Self {
|
||||
color: color.into(),
|
||||
point: Val::Percent(percent),
|
||||
hint: 0.5,
|
||||
}
|
||||
}
|
||||
|
||||
// Set the interpolation midpoint between this and the following stop
|
||||
pub fn with_hint(mut self, hint: f32) -> Self {
|
||||
self.hint = hint;
|
||||
|
@ -109,7 +109,7 @@ pub fn ui_picking(
|
||||
node_query: Query<NodeQuery>,
|
||||
mut output: EventWriter<PointerHits>,
|
||||
clipping_query: Query<(&ComputedNode, &UiGlobalTransform, &Node)>,
|
||||
child_of_query: Query<&ChildOf>,
|
||||
child_of_query: Query<&ChildOf, Without<OverrideClip>>,
|
||||
) {
|
||||
// For each camera, the pointer and its position
|
||||
let mut pointer_pos_by_camera = HashMap::<Entity, HashMap<PointerId, Vec2>>::default();
|
||||
|
7
release-content/release-notes/scene-type-crates.md
Normal file
7
release-content/release-notes/scene-type-crates.md
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
title: Define scenes without depending on bevy_render
|
||||
authors: ["@atlv24"]
|
||||
pull_requests: [19997, 19991, 20000, 19949, 19943, 19953]
|
||||
---
|
||||
|
||||
It is now possible to use cameras, lights, and meshes without depending on the Bevy renderer. This makes it possible for 3rd party custom renderers to be drop-in replacements for rendering existing scenes.
|
Loading…
Reference in New Issue
Block a user