Support operations for render layers and fix equality comparisons (#13310)

# Objective

Allow combining render layers with a more-ergonomic syntax than
`RenderLayers::from_iter(a.iter().chain(b.iter()))`.

## Solution

Add the `or` operation (and corresponding `const` method) to allow
computing the union of a set of render layers. While we're here, also
added `and` and `xor` operations. Someone might find them useful

## Testing

Added a simple unit test.
This commit is contained in:
Joseph 2024-07-01 06:55:25 -07:00 committed by François
parent fda2e4b59c
commit d7a0cc6bce
No known key found for this signature in database

View File

@ -22,7 +22,10 @@ pub type Layer = usize;
/// Entities without this component belong to layer `0`.
#[derive(Component, Clone, Reflect, PartialEq, Eq, PartialOrd, Ord)]
#[reflect(Component, Default, PartialEq)]
pub struct RenderLayers(SmallVec<[u64; 1]>);
pub struct RenderLayers(SmallVec<[u64; INLINE_BLOCKS]>);
/// The number of memory blocks stored inline
const INLINE_BLOCKS: usize = 1;
impl Default for &RenderLayers {
fn default() -> Self {
@ -47,8 +50,7 @@ impl FromIterator<Layer> for RenderLayers {
impl Default for RenderLayers {
/// By default, this structure includes layer `0`, which represents the first layer.
fn default() -> Self {
let (_, bit) = Self::layer_info(0);
RenderLayers(SmallVec::from_const([bit]))
Self::layer(0)
}
}
@ -57,15 +59,17 @@ impl RenderLayers {
pub const fn layer(n: Layer) -> Self {
let (buffer_index, bit) = Self::layer_info(n);
assert!(
buffer_index < 1,
buffer_index < INLINE_BLOCKS,
"layer is out of bounds for const construction"
);
RenderLayers(SmallVec::from_const([bit]))
let mut buffer = [0; INLINE_BLOCKS];
buffer[buffer_index] = bit;
RenderLayers(SmallVec::from_const(buffer))
}
/// Create a new `RenderLayers` that belongs to no layers.
pub const fn none() -> Self {
RenderLayers(SmallVec::from_const([0]))
RenderLayers(SmallVec::from_const([0; INLINE_BLOCKS]))
}
/// Create a `RenderLayers` from a list of layers.
@ -91,6 +95,11 @@ impl RenderLayers {
let (buffer_index, bit) = Self::layer_info(layer);
if buffer_index < self.0.len() {
self.0[buffer_index] &= !bit;
// Drop trailing zero memory blocks.
// NOTE: This is not just an optimization, it is necessary for the derived PartialEq impl to be correct.
if buffer_index == self.0.len() - 1 {
self = self.shrink();
}
}
self
}
@ -154,6 +163,81 @@ impl RenderLayers {
Some(layer - 1)
})
}
/// Returns the set of [layers](Layer) shared by two instances of [`RenderLayers`].
///
/// This corresponds to the `self & other` operation.
pub fn intersection(&self, other: &Self) -> Self {
self.combine_blocks(other, |a, b| a & b).shrink()
}
/// Returns all [layers](Layer) included in either instance of [`RenderLayers`].
///
/// This corresponds to the `self | other` operation.
pub fn union(&self, other: &Self) -> Self {
self.combine_blocks(other, |a, b| a | b) // doesn't need to be shrunk, if the inputs are nonzero then the result will be too
}
/// Returns all [layers](Layer) included in exactly one of the instances of [`RenderLayers`].
///
/// This corresponds to the "exclusive or" (XOR) operation: `self ^ other`.
pub fn symmetric_difference(&self, other: &Self) -> Self {
self.combine_blocks(other, |a, b| a ^ b).shrink()
}
/// Deallocates any trailing-zero memory blocks from this instance
fn shrink(mut self) -> Self {
let mut any_dropped = false;
while self.0.len() > INLINE_BLOCKS && self.0.last() == Some(&0) {
self.0.pop();
any_dropped = true;
}
if any_dropped && self.0.len() <= INLINE_BLOCKS {
self.0.shrink_to_fit();
}
self
}
/// Creates a new instance of [`RenderLayers`] by applying a function to the memory blocks
/// of self and another instance.
///
/// If the function `f` might return `0` for non-zero inputs, you should call [`Self::shrink`]
/// on the output to ensure that there are no trailing zero memory blocks that would break
/// this type's equality comparison.
fn combine_blocks(&self, other: &Self, mut f: impl FnMut(u64, u64) -> u64) -> Self {
let mut a = self.0.iter();
let mut b = other.0.iter();
let mask = std::iter::from_fn(|| {
let a = a.next().copied();
let b = b.next().copied();
if a.is_none() && b.is_none() {
return None;
}
Some(f(a.unwrap_or_default(), b.unwrap_or_default()))
});
Self(mask.collect())
}
}
impl std::ops::BitAnd for RenderLayers {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
self.intersection(&rhs)
}
}
impl std::ops::BitOr for RenderLayers {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.union(&rhs)
}
}
impl std::ops::BitXor for RenderLayers {
type Output = Self;
fn bitxor(self, rhs: Self) -> Self::Output {
self.symmetric_difference(&rhs)
}
}
#[cfg(test)]
@ -242,4 +326,33 @@ mod rendering_mask_tests {
let out = layers.iter().collect::<Vec<_>>();
assert_eq!(tricky_layers, out, "tricky layers roundtrip");
}
const MANY: RenderLayers = RenderLayers(SmallVec::from_const([u64::MAX]));
#[test]
fn render_layer_ops() {
let a = RenderLayers::from_layers(&[2, 4, 6]);
let b = RenderLayers::from_layers(&[1, 2, 3, 4, 5]);
assert_eq!(
a.clone() | b.clone(),
RenderLayers::from_layers(&[1, 2, 3, 4, 5, 6])
);
assert_eq!(a.clone() & b.clone(), RenderLayers::from_layers(&[2, 4]));
assert_eq!(a ^ b, RenderLayers::from_layers(&[1, 3, 5, 6]));
assert_eq!(RenderLayers::none() & MANY, RenderLayers::none());
assert_eq!(RenderLayers::none() | MANY, MANY);
assert_eq!(RenderLayers::none() ^ MANY, MANY);
}
#[test]
fn render_layer_shrink() {
// Since it has layers greater than 64, the instance should take up two memory blocks
let layers = RenderLayers::from_layers(&[1, 77]);
assert!(layers.0.len() == 2);
// When excluding that layer, it should drop the extra memory block
let layers = layers.without(77);
assert!(layers.0.len() == 1);
}
}