small ecs cleanup and remove_bundle drop bugfix (#2172)

- simplified code around archetype generations a little bit, as the special case value is not actually needed
- removed unnecessary UnsafeCell around pointer value that is never updated through shared references
- fixed and added a test for correct drop behaviour when removing sparse components through remove_bundle command
This commit is contained in:
Paweł Grabarz 2021-05-18 19:25:57 +00:00
parent 4563e69e06
commit 93cc7219bc
8 changed files with 85 additions and 76 deletions

View File

@ -317,8 +317,8 @@ pub struct ArchetypeGeneration(usize);
impl ArchetypeGeneration { impl ArchetypeGeneration {
#[inline] #[inline]
pub fn new(generation: usize) -> Self { pub const fn initial() -> Self {
ArchetypeGeneration(generation) ArchetypeGeneration(0)
} }
#[inline] #[inline]

View File

@ -54,7 +54,7 @@ where
let mut state = Self { let mut state = Self {
world_id: world.id(), world_id: world.id(),
archetype_generation: ArchetypeGeneration::new(usize::MAX), archetype_generation: ArchetypeGeneration::initial(),
matched_table_ids: Vec::new(), matched_table_ids: Vec::new(),
matched_archetype_ids: Vec::new(), matched_archetype_ids: Vec::new(),
fetch_state, fetch_state,
@ -74,17 +74,10 @@ where
std::any::type_name::<Self>()); std::any::type_name::<Self>());
} }
let archetypes = world.archetypes(); let archetypes = world.archetypes();
let old_generation = self.archetype_generation; let new_generation = archetypes.generation();
let archetype_index_range = if old_generation == archetypes.generation() { let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
0..0 let archetype_index_range = old_generation.value()..new_generation.value();
} else {
self.archetype_generation = archetypes.generation();
if old_generation.value() == usize::MAX {
0..archetypes.len()
} else {
old_generation.value()..archetypes.len()
}
};
for archetype_index in archetype_index_range { for archetype_index in archetype_index_range {
self.new_archetype(&archetypes[ArchetypeId::new(archetype_index)]); self.new_archetype(&archetypes[ArchetypeId::new(archetype_index)]);
} }

View File

@ -18,8 +18,7 @@ pub struct SingleThreadedExecutor {
impl Default for SingleThreadedExecutor { impl Default for SingleThreadedExecutor {
fn default() -> Self { fn default() -> Self {
Self { Self {
// MAX ensures access information will be initialized on first run. archetype_generation: ArchetypeGeneration::initial(),
archetype_generation: ArchetypeGeneration::new(usize::MAX),
} }
} }
} }
@ -46,24 +45,15 @@ impl SingleThreadedExecutor {
/// [update_archetypes] and updates cached archetype_component_access. /// [update_archetypes] and updates cached archetype_component_access.
fn update_archetypes(&mut self, systems: &mut [ParallelSystemContainer], world: &World) { fn update_archetypes(&mut self, systems: &mut [ParallelSystemContainer], world: &World) {
let archetypes = world.archetypes(); let archetypes = world.archetypes();
let old_generation = self.archetype_generation;
let new_generation = archetypes.generation(); let new_generation = archetypes.generation();
if old_generation == new_generation { let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
return; let archetype_index_range = old_generation.value()..new_generation.value();
}
let archetype_index_range = if old_generation.value() == usize::MAX {
0..archetypes.len()
} else {
old_generation.value()..archetypes.len()
};
for archetype in archetypes.archetypes[archetype_index_range].iter() { for archetype in archetypes.archetypes[archetype_index_range].iter() {
for container in systems.iter_mut() { for container in systems.iter_mut() {
let system = container.system_mut(); let system = container.system_mut();
system.new_archetype(archetype); system.new_archetype(archetype);
} }
} }
self.archetype_generation = new_generation;
} }
} }

View File

@ -58,8 +58,7 @@ impl Default for ParallelExecutor {
fn default() -> Self { fn default() -> Self {
let (finish_sender, finish_receiver) = async_channel::unbounded(); let (finish_sender, finish_receiver) = async_channel::unbounded();
Self { Self {
// MAX ensures access information will be initialized on first run. archetype_generation: ArchetypeGeneration::initial(),
archetype_generation: ArchetypeGeneration::new(usize::MAX),
system_metadata: Default::default(), system_metadata: Default::default(),
finish_sender, finish_sender,
finish_receiver, finish_receiver,
@ -152,17 +151,10 @@ impl ParallelExecutor {
/// [update_archetypes] and updates cached archetype_component_access. /// [update_archetypes] and updates cached archetype_component_access.
fn update_archetypes(&mut self, systems: &mut [ParallelSystemContainer], world: &World) { fn update_archetypes(&mut self, systems: &mut [ParallelSystemContainer], world: &World) {
let archetypes = world.archetypes(); let archetypes = world.archetypes();
let old_generation = self.archetype_generation;
let new_generation = archetypes.generation(); let new_generation = archetypes.generation();
if old_generation == new_generation { let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
return; let archetype_index_range = old_generation.value()..new_generation.value();
}
let archetype_index_range = if old_generation.value() == usize::MAX {
0..archetypes.len()
} else {
old_generation.value()..archetypes.len()
};
for archetype in archetypes.archetypes[archetype_index_range].iter() { for archetype in archetypes.archetypes[archetype_index_range].iter() {
for (index, container) in systems.iter_mut().enumerate() { for (index, container) in systems.iter_mut().enumerate() {
let meta = &mut self.system_metadata[index]; let meta = &mut self.system_metadata[index];
@ -172,8 +164,6 @@ impl ParallelExecutor {
.extend(system.archetype_component_access()); .extend(system.archetype_component_access());
} }
} }
self.archetype_generation = new_generation;
} }
/// Populates `should_run` bitset, spawns tasks for systems that should run this iteration, /// Populates `should_run` bitset, spawns tasks for systems that should run this iteration,

View File

@ -1,6 +1,5 @@
use std::{ use std::{
alloc::{handle_alloc_error, Layout}, alloc::{handle_alloc_error, Layout},
cell::UnsafeCell,
ptr::NonNull, ptr::NonNull,
}; };
@ -9,8 +8,8 @@ pub struct BlobVec {
item_layout: Layout, item_layout: Layout,
capacity: usize, capacity: usize,
len: usize, len: usize,
data: UnsafeCell<NonNull<u8>>, data: NonNull<u8>,
swap_scratch: UnsafeCell<NonNull<u8>>, swap_scratch: NonNull<u8>,
drop: unsafe fn(*mut u8), drop: unsafe fn(*mut u8),
} }
@ -18,8 +17,8 @@ impl BlobVec {
pub fn new(item_layout: Layout, drop: unsafe fn(*mut u8), capacity: usize) -> BlobVec { pub fn new(item_layout: Layout, drop: unsafe fn(*mut u8), capacity: usize) -> BlobVec {
if item_layout.size() == 0 { if item_layout.size() == 0 {
BlobVec { BlobVec {
swap_scratch: UnsafeCell::new(NonNull::dangling()), swap_scratch: NonNull::dangling(),
data: UnsafeCell::new(NonNull::dangling()), data: NonNull::dangling(),
capacity: usize::MAX, capacity: usize::MAX,
len: 0, len: 0,
item_layout, item_layout,
@ -29,8 +28,8 @@ impl BlobVec {
let swap_scratch = NonNull::new(unsafe { std::alloc::alloc(item_layout) }) let swap_scratch = NonNull::new(unsafe { std::alloc::alloc(item_layout) })
.unwrap_or_else(|| std::alloc::handle_alloc_error(item_layout)); .unwrap_or_else(|| std::alloc::handle_alloc_error(item_layout));
let mut blob_vec = BlobVec { let mut blob_vec = BlobVec {
swap_scratch: UnsafeCell::new(swap_scratch), swap_scratch,
data: UnsafeCell::new(NonNull::dangling()), data: NonNull::dangling(),
capacity: 0, capacity: 0,
len: 0, len: 0,
item_layout, item_layout,
@ -81,9 +80,7 @@ impl BlobVec {
) )
}; };
self.data = UnsafeCell::new( self.data = NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout));
NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout)),
);
} }
self.capacity = new_capacity; self.capacity = new_capacity;
} }
@ -132,7 +129,7 @@ impl BlobVec {
pub unsafe fn swap_remove_and_forget_unchecked(&mut self, index: usize) -> *mut u8 { pub unsafe fn swap_remove_and_forget_unchecked(&mut self, index: usize) -> *mut u8 {
debug_assert!(index < self.len()); debug_assert!(index < self.len());
let last = self.len - 1; let last = self.len - 1;
let swap_scratch = (*self.swap_scratch.get()).as_ptr(); let swap_scratch = self.swap_scratch.as_ptr();
std::ptr::copy_nonoverlapping( std::ptr::copy_nonoverlapping(
self.get_unchecked(index), self.get_unchecked(index),
swap_scratch, swap_scratch,
@ -170,7 +167,7 @@ impl BlobVec {
/// must ensure rust mutability rules are not violated /// must ensure rust mutability rules are not violated
#[inline] #[inline]
pub unsafe fn get_ptr(&self) -> NonNull<u8> { pub unsafe fn get_ptr(&self) -> NonNull<u8> {
*self.data.get() self.data
} }
pub fn clear(&mut self) { pub fn clear(&mut self) {
@ -199,7 +196,7 @@ impl Drop for BlobVec {
array_layout(&self.item_layout, self.capacity) array_layout(&self.item_layout, self.capacity)
.expect("array layout should be valid"), .expect("array layout should be valid"),
); );
std::alloc::dealloc((*self.swap_scratch.get()).as_ptr(), self.item_layout); std::alloc::dealloc(self.swap_scratch.as_ptr(), self.item_layout);
} }
} }
} }

View File

@ -180,10 +180,7 @@ impl ComponentSparseSet {
/// returned). /// returned).
pub fn remove_and_forget(&mut self, entity: Entity) -> Option<*mut u8> { pub fn remove_and_forget(&mut self, entity: Entity) -> Option<*mut u8> {
self.sparse.remove(entity).map(|dense_index| { self.sparse.remove(entity).map(|dense_index| {
// SAFE: unique access to ticks self.ticks.get_mut().swap_remove(dense_index);
unsafe {
(*self.ticks.get()).swap_remove(dense_index);
}
self.entities.swap_remove(dense_index); self.entities.swap_remove(dense_index);
let is_last = dense_index == self.dense.len() - 1; let is_last = dense_index == self.dense.len() - 1;
// SAFE: dense_index was just removed from `sparse`, which ensures that it is valid // SAFE: dense_index was just removed from `sparse`, which ensures that it is valid

View File

@ -417,9 +417,29 @@ impl<T: Component> Command for RemoveResource<T> {
#[allow(clippy::float_cmp, clippy::approx_constant)] #[allow(clippy::float_cmp, clippy::approx_constant)]
mod tests { mod tests {
use crate::{ use crate::{
component::{ComponentDescriptor, StorageType},
system::{CommandQueue, Commands}, system::{CommandQueue, Commands},
world::World, world::World,
}; };
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
#[derive(Clone, Debug)]
struct DropCk(Arc<AtomicUsize>);
impl DropCk {
fn new_pair() -> (Self, Arc<AtomicUsize>) {
let atomic = Arc::new(AtomicUsize::new(0));
(DropCk(atomic.clone()), atomic)
}
}
impl Drop for DropCk {
fn drop(&mut self) {
self.0.as_ref().fetch_add(1, Ordering::Relaxed);
}
}
#[test] #[test]
fn commands() { fn commands() {
@ -454,10 +474,20 @@ mod tests {
#[test] #[test]
fn remove_components() { fn remove_components() {
let mut world = World::default(); let mut world = World::default();
struct DenseDropCk(DropCk);
world
.register_component(ComponentDescriptor::new::<DropCk>(StorageType::SparseSet))
.unwrap();
let mut command_queue = CommandQueue::default(); let mut command_queue = CommandQueue::default();
let (dense_dropck, dense_is_dropped) = DropCk::new_pair();
let dense_dropck = DenseDropCk(dense_dropck);
let (sparse_dropck, sparse_is_dropped) = DropCk::new_pair();
let entity = Commands::new(&mut command_queue, &world) let entity = Commands::new(&mut command_queue, &world)
.spawn() .spawn()
.insert_bundle((1u32, 2u64)) .insert_bundle((1u32, 2u64, dense_dropck, sparse_dropck))
.id(); .id();
command_queue.apply(&mut world); command_queue.apply(&mut world);
let results_before = world let results_before = world
@ -471,8 +501,14 @@ mod tests {
Commands::new(&mut command_queue, &world) Commands::new(&mut command_queue, &world)
.entity(entity) .entity(entity)
.remove::<u32>() .remove::<u32>()
.remove_bundle::<(u32, u64)>(); .remove_bundle::<(u32, u64, DenseDropCk, DropCk)>();
assert_eq!(dense_is_dropped.load(Ordering::Relaxed), 0);
assert_eq!(sparse_is_dropped.load(Ordering::Relaxed), 0);
command_queue.apply(&mut world); command_queue.apply(&mut world);
assert_eq!(dense_is_dropped.load(Ordering::Relaxed), 1);
assert_eq!(sparse_is_dropped.load(Ordering::Relaxed), 1);
let results_after = world let results_after = world
.query::<(&u32, &u64)>() .query::<(&u32, &u64)>()
.iter(&world) .iter(&world)

View File

@ -325,7 +325,7 @@ impl<'w> EntityMut<'w> {
T::from_components(|| { T::from_components(|| {
let component_id = bundle_components.next().unwrap(); let component_id = bundle_components.next().unwrap();
// SAFE: entity location is valid and table row is removed below // SAFE: entity location is valid and table row is removed below
remove_component( take_component(
components, components,
storages, storages,
old_archetype, old_archetype,
@ -406,17 +406,18 @@ impl<'w> EntityMut<'w> {
let entity = self.entity; let entity = self.entity;
for component_id in bundle_info.component_ids.iter().cloned() { for component_id in bundle_info.component_ids.iter().cloned() {
if old_archetype.contains(component_id) { if old_archetype.contains(component_id) {
// SAFE: entity location is valid and table row is removed below removed_components
unsafe { .get_or_insert_with(component_id, Vec::new)
remove_component( .push(entity);
components,
storages, // Make sure to drop components stored in sparse sets.
old_archetype, // Dense components are dropped later in `move_to_and_drop_missing_unchecked`.
removed_components, if let Some(StorageType::SparseSet) = old_archetype.get_storage_type(component_id) {
component_id, storages
entity, .sparse_sets
old_location, .get_mut(component_id)
); .unwrap()
.remove(entity);
} }
} }
} }
@ -586,13 +587,18 @@ unsafe fn get_component_and_ticks(
} }
} }
/// Moves component data out of storage.
///
/// This function leaves the underlying memory unchanged, but the component behind
/// returned pointer is semantically owned by the caller and will not be dropped in its original location.
/// Caller is responsible to drop component data behind returned pointer.
///
/// # Safety /// # Safety
// `entity_location` must be within bounds of the given archetype and `entity` must exist inside the /// - `entity_location` must be within bounds of the given archetype and `entity` must exist inside the archetype
// archetype /// - `component_id` must be valid
/// The relevant table row must be removed separately /// - The relevant table row **must be removed** by the caller once all components are taken
/// `component_id` must be valid
#[inline] #[inline]
unsafe fn remove_component( unsafe fn take_component(
components: &Components, components: &Components,
storages: &mut Storages, storages: &mut Storages,
archetype: &Archetype, archetype: &Archetype,