Merge branch 'main' into typed_rpc_commands
This commit is contained in:
commit
a550b92daf
@ -15,6 +15,7 @@ use bevy_utils::{default, prelude::DebugName, TypeIdMap};
|
|||||||
use core::{
|
use core::{
|
||||||
any::{Any, TypeId},
|
any::{Any, TypeId},
|
||||||
fmt::{Debug, Write},
|
fmt::{Debug, Write},
|
||||||
|
ops::Range,
|
||||||
};
|
};
|
||||||
use fixedbitset::FixedBitSet;
|
use fixedbitset::FixedBitSet;
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
@ -752,11 +753,31 @@ new_key_type! {
|
|||||||
pub struct SystemSetKey;
|
pub struct SystemSetKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A node in a [`ScheduleGraph`] with a system or conditions that have not been
|
||||||
|
/// initialized yet.
|
||||||
|
///
|
||||||
|
/// We have to defer initialization of nodes in the graph until we have
|
||||||
|
/// `&mut World` access, so we store these in a list ([`ScheduleGraph::uninit`])
|
||||||
|
/// until then. In most cases, initialization occurs upon the first run of the
|
||||||
|
/// schedule.
|
||||||
enum UninitializedId {
|
enum UninitializedId {
|
||||||
|
/// A system and its conditions that have not been initialized yet.
|
||||||
System(SystemKey),
|
System(SystemKey),
|
||||||
|
/// A system set's conditions that have not been initialized yet.
|
||||||
Set {
|
Set {
|
||||||
key: SystemSetKey,
|
key: SystemSetKey,
|
||||||
first_uninit_condition: usize,
|
/// The range of indices in [`SystemSets::conditions`] that correspond
|
||||||
|
/// to conditions that have not been initialized yet.
|
||||||
|
///
|
||||||
|
/// [`SystemSets::conditions`] for a given set may be appended to
|
||||||
|
/// multiple times (e.g. when `configure_sets` is called multiple with
|
||||||
|
/// the same set), so we need to track which conditions in that list
|
||||||
|
/// are newly added and not yet initialized.
|
||||||
|
///
|
||||||
|
/// Systems don't need this tracking because each `add_systems` call
|
||||||
|
/// creates separate nodes in the graph with their own conditions,
|
||||||
|
/// so all conditions are initialized together.
|
||||||
|
uninitialized_conditions: Range<usize>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -793,8 +814,8 @@ pub struct ScheduleGraph {
|
|||||||
pub system_conditions: SecondaryMap<SystemKey, Vec<ConditionWithAccess>>,
|
pub system_conditions: SecondaryMap<SystemKey, Vec<ConditionWithAccess>>,
|
||||||
/// Data about system sets in the schedule
|
/// Data about system sets in the schedule
|
||||||
system_sets: SystemSets,
|
system_sets: SystemSets,
|
||||||
/// Systems that have not been initialized yet; for system sets, we store the index of the first uninitialized condition
|
/// Systems, their conditions, and system set conditions that need to be
|
||||||
/// (all the conditions after that index still need to be initialized)
|
/// initialized before the schedule can be run.
|
||||||
uninit: Vec<UninitializedId>,
|
uninit: Vec<UninitializedId>,
|
||||||
/// Directed acyclic graph of the hierarchy (which systems/sets are children of which sets)
|
/// Directed acyclic graph of the hierarchy (which systems/sets are children of which sets)
|
||||||
hierarchy: Dag,
|
hierarchy: Dag,
|
||||||
@ -807,7 +828,6 @@ pub struct ScheduleGraph {
|
|||||||
anonymous_sets: usize,
|
anonymous_sets: usize,
|
||||||
changed: bool,
|
changed: bool,
|
||||||
settings: ScheduleBuildSettings,
|
settings: ScheduleBuildSettings,
|
||||||
|
|
||||||
passes: BTreeMap<TypeId, Box<dyn ScheduleBuildPassObj>>,
|
passes: BTreeMap<TypeId, Box<dyn ScheduleBuildPassObj>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1101,9 +1121,10 @@ impl ScheduleGraph {
|
|||||||
|
|
||||||
// system init has to be deferred (need `&mut World`)
|
// system init has to be deferred (need `&mut World`)
|
||||||
let system_set_conditions = self.system_sets.conditions.entry(key).unwrap().or_default();
|
let system_set_conditions = self.system_sets.conditions.entry(key).unwrap().or_default();
|
||||||
|
let start = system_set_conditions.len();
|
||||||
self.uninit.push(UninitializedId::Set {
|
self.uninit.push(UninitializedId::Set {
|
||||||
key,
|
key,
|
||||||
first_uninit_condition: system_set_conditions.len(),
|
uninitialized_conditions: start..(start + conditions.len()),
|
||||||
});
|
});
|
||||||
system_set_conditions.extend(conditions.into_iter().map(ConditionWithAccess::new));
|
system_set_conditions.extend(conditions.into_iter().map(ConditionWithAccess::new));
|
||||||
|
|
||||||
@ -1189,11 +1210,9 @@ impl ScheduleGraph {
|
|||||||
}
|
}
|
||||||
UninitializedId::Set {
|
UninitializedId::Set {
|
||||||
key,
|
key,
|
||||||
first_uninit_condition,
|
uninitialized_conditions,
|
||||||
} => {
|
} => {
|
||||||
for condition in self.system_sets.conditions[key]
|
for condition in &mut self.system_sets.conditions[key][uninitialized_conditions]
|
||||||
.iter_mut()
|
|
||||||
.skip(first_uninit_condition)
|
|
||||||
{
|
{
|
||||||
condition.access = condition.condition.initialize(world);
|
condition.access = condition.condition.initialize(world);
|
||||||
}
|
}
|
||||||
|
@ -422,11 +422,7 @@ fn sample_shadow_cubemap_gaussian(
|
|||||||
) -> f32 {
|
) -> f32 {
|
||||||
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
||||||
// cubemap.
|
// cubemap.
|
||||||
var up = vec3(0.0, 1.0, 0.0);
|
let basis = orthonormalize(normalize(light_local)) * scale * distance_to_light;
|
||||||
if (dot(up, normalize(light_local)) > 0.99) {
|
|
||||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
|
||||||
}
|
|
||||||
let basis = orthonormalize(light_local, up) * scale * distance_to_light;
|
|
||||||
|
|
||||||
var sum: f32 = 0.0;
|
var sum: f32 = 0.0;
|
||||||
sum += sample_shadow_cubemap_at_offset(
|
sum += sample_shadow_cubemap_at_offset(
|
||||||
@ -469,11 +465,7 @@ fn sample_shadow_cubemap_jittered(
|
|||||||
) -> f32 {
|
) -> f32 {
|
||||||
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
||||||
// cubemap.
|
// cubemap.
|
||||||
var up = vec3(0.0, 1.0, 0.0);
|
let basis = orthonormalize(normalize(light_local)) * scale * distance_to_light;
|
||||||
if (dot(up, normalize(light_local)) > 0.99) {
|
|
||||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
|
||||||
}
|
|
||||||
let basis = orthonormalize(light_local, up) * scale * distance_to_light;
|
|
||||||
|
|
||||||
let rotation_matrix = random_rotation_matrix(vec2(1.0), temporal);
|
let rotation_matrix = random_rotation_matrix(vec2(1.0), temporal);
|
||||||
|
|
||||||
@ -553,11 +545,7 @@ fn search_for_blockers_in_shadow_cubemap(
|
|||||||
) -> f32 {
|
) -> f32 {
|
||||||
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
// Create an orthonormal basis so we can apply a 2D sampling pattern to a
|
||||||
// cubemap.
|
// cubemap.
|
||||||
var up = vec3(0.0, 1.0, 0.0);
|
let basis = orthonormalize(normalize(light_local)) * scale * distance_to_light;
|
||||||
if (dot(up, normalize(light_local)) > 0.99) {
|
|
||||||
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
|
||||||
}
|
|
||||||
let basis = orthonormalize(light_local, up) * scale * distance_to_light;
|
|
||||||
|
|
||||||
var sum: vec2<f32> = vec2(0.0);
|
var sum: vec2<f32> = vec2(0.0);
|
||||||
sum += search_for_blockers_in_shadow_cubemap_at_offset(
|
sum += search_for_blockers_in_shadow_cubemap_at_offset(
|
||||||
|
@ -63,17 +63,19 @@ fn mat4x4_to_mat3x3(m: mat4x4<f32>) -> mat3x3<f32> {
|
|||||||
return mat3x3<f32>(m[0].xyz, m[1].xyz, m[2].xyz);
|
return mat3x3<f32>(m[0].xyz, m[1].xyz, m[2].xyz);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates an orthonormal basis given a Z vector and an up vector (which becomes
|
// Creates an orthonormal basis given a normalized Z vector.
|
||||||
// Y after orthonormalization).
|
|
||||||
//
|
//
|
||||||
// The results are equivalent to the Gram-Schmidt process [1].
|
// The results are equivalent to the Gram-Schmidt process [1].
|
||||||
//
|
//
|
||||||
// [1]: https://math.stackexchange.com/a/1849294
|
// [1]: https://math.stackexchange.com/a/1849294
|
||||||
fn orthonormalize(z_unnormalized: vec3<f32>, up: vec3<f32>) -> mat3x3<f32> {
|
fn orthonormalize(z_normalized: vec3<f32>) -> mat3x3<f32> {
|
||||||
let z_basis = normalize(z_unnormalized);
|
var up = vec3(0.0, 1.0, 0.0);
|
||||||
let x_basis = normalize(cross(z_basis, up));
|
if (abs(dot(up, z_normalized)) > 0.99) {
|
||||||
let y_basis = cross(z_basis, x_basis);
|
up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis.
|
||||||
return mat3x3(x_basis, y_basis, z_basis);
|
}
|
||||||
|
let x_basis = normalize(cross(z_normalized, up));
|
||||||
|
let y_basis = cross(z_normalized, x_basis);
|
||||||
|
return mat3x3(x_basis, y_basis, z_normalized);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if any part of a sphere is on the positive side of a plane.
|
// Returns true if any part of a sphere is on the positive side of a plane.
|
||||||
|
@ -7,7 +7,7 @@ use bevy_tasks::ComputeTaskPool;
|
|||||||
use bevy_utils::WgpuWrapper;
|
use bevy_utils::WgpuWrapper;
|
||||||
pub use graph_runner::*;
|
pub use graph_runner::*;
|
||||||
pub use render_device::*;
|
pub use render_device::*;
|
||||||
use tracing::{debug, error, info, info_span, trace, warn};
|
use tracing::{debug, error, info, info_span, warn};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
diagnostic::{internal::DiagnosticsRecorder, RecordDiagnostics},
|
diagnostic::{internal::DiagnosticsRecorder, RecordDiagnostics},
|
||||||
@ -145,6 +145,33 @@ const GPU_NOT_FOUND_ERROR_MESSAGE: &str = if cfg!(target_os = "linux") {
|
|||||||
"Unable to find a GPU! Make sure you have installed required drivers!"
|
"Unable to find a GPU! Make sure you have installed required drivers!"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(not(target_family = "wasm"))]
|
||||||
|
fn find_adapter_by_name(
|
||||||
|
instance: &Instance,
|
||||||
|
options: &WgpuSettings,
|
||||||
|
compatible_surface: Option<&wgpu::Surface<'_>>,
|
||||||
|
adapter_name: &str,
|
||||||
|
) -> Option<Adapter> {
|
||||||
|
for adapter in
|
||||||
|
instance.enumerate_adapters(options.backends.expect(
|
||||||
|
"The `backends` field of `WgpuSettings` must be set to use a specific adapter.",
|
||||||
|
))
|
||||||
|
{
|
||||||
|
tracing::trace!("Checking adapter: {:?}", adapter.get_info());
|
||||||
|
let info = adapter.get_info();
|
||||||
|
if let Some(surface) = compatible_surface {
|
||||||
|
if !adapter.is_surface_supported(surface) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.name.eq_ignore_ascii_case(adapter_name) {
|
||||||
|
return Some(adapter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Initializes the renderer by retrieving and preparing the GPU instance, device and queue
|
/// Initializes the renderer by retrieving and preparing the GPU instance, device and queue
|
||||||
/// for the specified backend.
|
/// for the specified backend.
|
||||||
pub async fn initialize_renderer(
|
pub async fn initialize_renderer(
|
||||||
@ -153,36 +180,30 @@ pub async fn initialize_renderer(
|
|||||||
request_adapter_options: &RequestAdapterOptions<'_, '_>,
|
request_adapter_options: &RequestAdapterOptions<'_, '_>,
|
||||||
desired_adapter_name: Option<String>,
|
desired_adapter_name: Option<String>,
|
||||||
) -> (RenderDevice, RenderQueue, RenderAdapterInfo, RenderAdapter) {
|
) -> (RenderDevice, RenderQueue, RenderAdapterInfo, RenderAdapter) {
|
||||||
|
#[cfg(not(target_family = "wasm"))]
|
||||||
|
let mut selected_adapter = desired_adapter_name.and_then(|adapter_name| {
|
||||||
|
find_adapter_by_name(
|
||||||
|
instance,
|
||||||
|
options,
|
||||||
|
request_adapter_options.compatible_surface,
|
||||||
|
&adapter_name,
|
||||||
|
)
|
||||||
|
});
|
||||||
|
#[cfg(target_family = "wasm")]
|
||||||
let mut selected_adapter = None;
|
let mut selected_adapter = None;
|
||||||
if let Some(adapter_name) = &desired_adapter_name {
|
|
||||||
debug!("Searching for adapter with name: {}", adapter_name);
|
|
||||||
for adapter in instance.enumerate_adapters(options.backends.expect(
|
|
||||||
"The `backends` field of `WgpuSettings` must be set to use a specific adapter.",
|
|
||||||
)) {
|
|
||||||
trace!("Checking adapter: {:?}", adapter.get_info());
|
|
||||||
let info = adapter.get_info();
|
|
||||||
if let Some(surface) = request_adapter_options.compatible_surface {
|
|
||||||
if !adapter.is_surface_supported(surface) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if info
|
#[cfg(target_family = "wasm")]
|
||||||
.name
|
if desired_adapter_name.is_some() {
|
||||||
.to_lowercase()
|
warn!("Choosing an adapter is not supported on wasm.");
|
||||||
.contains(&adapter_name.to_lowercase())
|
}
|
||||||
{
|
|
||||||
selected_adapter = Some(adapter);
|
if selected_adapter.is_none() {
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
debug!(
|
debug!(
|
||||||
"Searching for adapter with options: {:?}",
|
"Searching for adapter with options: {:?}",
|
||||||
request_adapter_options
|
request_adapter_options
|
||||||
);
|
);
|
||||||
selected_adapter = instance.request_adapter(request_adapter_options).await.ok();
|
selected_adapter = instance.request_adapter(request_adapter_options).await.ok();
|
||||||
};
|
}
|
||||||
|
|
||||||
let adapter = selected_adapter.expect(GPU_NOT_FOUND_ERROR_MESSAGE);
|
let adapter = selected_adapter.expect(GPU_NOT_FOUND_ERROR_MESSAGE);
|
||||||
let adapter_info = adapter.get_info();
|
let adapter_info = adapter.get_info();
|
||||||
|
Loading…
Reference in New Issue
Block a user