diff --git a/src/entity/ealloc/recycling.rs b/src/entity/ealloc/recycling.rs index 2fafbb7729..8e30defe59 100644 --- a/src/entity/ealloc/recycling.rs +++ b/src/entity/ealloc/recycling.rs @@ -15,29 +15,29 @@ type MutableShards = Vec>>; /// The default allocator supporting atomically-allocated new IDs and arbitrary recycler. #[derive(Debug)] -pub struct Recycling, S: ShardAssigner> { +pub struct Recycling, S: ShardAssigner> { /// Whether `mark_need_flush` was called. flush_mark: bool, /// The next ID to allocate into shards. - global_gauge: Arc, + global_gauge: Arc, /// A sorted list of recycled IDs during the last join. - recyclable: Arc>, + recyclable: Arc>, /// The actual IDs assigned to different shards. recycler_shards: MutableShards, /// The assigned shard. shard_assigner: S, /// The queue of deallocated IDs to distribute. - dealloc_queue: Vec, + dealloc_queue: Vec, /// The queue of allocated IDs during online, to be synced to recyclable after join. - reuse_queue_shards: MutableShards>, + reuse_queue_shards: MutableShards>, } -impl, S: ShardAssigner> Recycling { +impl, S: ShardAssigner> Recycling { /// Creates a new recycling allocator with a custom shard assigner. /// This can only be used for unit testing since the Archetype API does not support dynamic /// shard assigners. pub(crate) fn new_with_shard_assigner(num_shards: usize, shard_assigner: S) -> Self { - let global_gauge = E::new(); + let global_gauge = RawT::new(); Self { flush_mark: false, global_gauge: Arc::new(global_gauge), @@ -55,24 +55,24 @@ impl, S: ShardAssigner> Recycling { } fn get_reuse_queue_offline( - reuse_queues: &mut MutableShards>, + reuse_queues: &mut MutableShards>, index: usize, - ) -> &mut Vec { + ) -> &mut Vec { let arc = reuse_queues.get_mut(index).expect("index out of bounds"); Arc::get_mut(arc).expect("shards are dropped in offline mode").get_mut() } fn iter_allocated_chunks_offline( &mut self, - ) -> impl iter::FusedIterator> + '_ { + ) -> impl iter::FusedIterator> + '_ { iter_gaps(self.global_gauge.load(), self.recyclable.iter().copied()) } } -impl, S: ShardAssigner> Ealloc for Recycling { - type Raw = E; +impl, S: ShardAssigner> Ealloc for Recycling { + type Raw = RawT; type AllocHint = T::Hint; - type Shard = impl Shard; + type Shard = impl Shard; fn new(num_shards: usize) -> Self { Self::new_with_shard_assigner(num_shards, S::default()) } @@ -109,7 +109,7 @@ impl, S: ShardAssigner> Ealloc for Recycling { shard.allocate(hint) } - fn queue_deallocate(&mut self, id: E) { self.dealloc_queue.push(id); } + fn queue_deallocate(&mut self, id: RawT) { self.dealloc_queue.push(id); } fn flush(&mut self) { self.flush_mark = false; @@ -205,18 +205,18 @@ pub struct RecyclingShard { reuse_queue: ReuseQueueRef, } -impl Shard +impl Shard for RecyclingShard where - GaugeRef: ops::Deref + Send + 'static, + GaugeRef: ops::Deref + Send + 'static, RecyclerRef: ops::DerefMut + Send + 'static, - ::Target: Recycler, - ReuseQueueRef: ops::DerefMut> + Send + 'static, + ::Target: Recycler, + ReuseQueueRef: ops::DerefMut> + Send + 'static, { - type Raw = E; - type Hint = >::Hint; + type Raw = RawT; + type Hint = >::Hint; - fn allocate(&mut self, hint: Self::Hint) -> E { + fn allocate(&mut self, hint: Self::Hint) -> RawT { if let Some(id) = self.recycler.poll(hint) { id } else { @@ -225,14 +225,14 @@ where } } -impl, GaugeRef, RecyclerRef, ReuseQueueRef> +impl, GaugeRef, RecyclerRef, ReuseQueueRef> RecyclingShard where - GaugeRef: ops::Deref, + GaugeRef: ops::Deref, RecyclerRef: ops::DerefMut, - ReuseQueueRef: ops::DerefMut>, + ReuseQueueRef: ops::DerefMut>, { - fn allocate(&mut self, hint: T::Hint) -> E { + fn allocate(&mut self, hint: T::Hint) -> RawT { if let Some(id) = self.recycler.poll(hint) { self.reuse_queue.push(id); id diff --git a/src/storage/tree.rs b/src/storage/tree.rs index 5f2e035529..c7c677059e 100644 --- a/src/storage/tree.rs +++ b/src/storage/tree.rs @@ -6,20 +6,20 @@ use super::{Access, ChunkMut, ChunkRef, Partition, Storage}; use crate::entity; /// A storage based on [`BTreeMap`]. -pub struct Tree { +pub struct Tree { // `SyncUnsafeCell` here must be treated as a normal `C` // unless the whole storage is mutably locked, // which means the current function exclusively manages this map. // `&Tree` must not be used to access the cells mutably. - data: BTreeMap>, + data: BTreeMap>, } -impl Default for Tree { +impl Default for Tree { fn default() -> Self { Self { data: BTreeMap::new() } } } -impl Access for Tree { - type RawEntity = E; +impl Access for Tree { + type RawEntity = RawT; type Comp = C; fn get_mut(&mut self, id: Self::RawEntity) -> Option<&mut C> { @@ -32,7 +32,7 @@ impl Access for Tree { } } -impl Storage for Tree { +impl Storage for Tree { fn get(&self, id: Self::RawEntity) -> Option<&C> { self.data.get(&id).map(|cell| unsafe { // Safety: `&self` implies that nobody else can mutate the values. @@ -71,21 +71,21 @@ impl Storage for Tree { .map(|(entity, item)| ChunkMut { slice: slice::from_mut(item), start: entity }) } - type Partition<'t> = StoragePartition<'t, E, C>; + type Partition<'t> = StoragePartition<'t, RawT, C>; fn as_partition(&mut self) -> Self::Partition<'_> { StoragePartition { data: &self.data, lower_bound: None, upper_bound: None } } } /// Return value of [`Tree::split_at`]. -pub struct StoragePartition<'t, E: entity::Raw, C> { - data: &'t BTreeMap>, - lower_bound: Option, - upper_bound: Option, +pub struct StoragePartition<'t, RawT: entity::Raw, C> { + data: &'t BTreeMap>, + lower_bound: Option, + upper_bound: Option, } -impl<'t, E: entity::Raw, C> StoragePartition<'t, E, C> { - fn assert_bounds(&self, entity: E) { +impl<'t, RawT: entity::Raw, C> StoragePartition<'t, RawT, C> { + fn assert_bounds(&self, entity: RawT) { if let Some(bound) = self.lower_bound { assert!(entity >= bound, "Entity {entity:?} is not in the partition {bound:?}.."); } @@ -95,11 +95,11 @@ impl<'t, E: entity::Raw, C> StoragePartition<'t, E, C> { } } -impl<'t, E: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'t, E, C> { - type RawEntity = E; +impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'t, RawT, C> { + type RawEntity = RawT; type Comp = C; - fn get_mut(&mut self, entity: E) -> Option<&mut C> { + fn get_mut(&mut self, entity: RawT) -> Option<&mut C> { self.assert_bounds(entity); let cell = self.data.get(&entity)?; @@ -115,8 +115,10 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<' fn iter_mut(&mut self) -> Self::IterMut<'_> { self.by_ref().into_iter_mut() } } -impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePartition<'t, E, C> { - type ByRef<'u> = StoragePartition<'u, E, C> where Self: 'u; +impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> Partition<'t> + for StoragePartition<'t, RawT, C> +{ + type ByRef<'u> = StoragePartition<'u, RawT, C> where Self: 'u; fn by_ref(&mut self) -> Self::ByRef<'_> { StoragePartition { data: self.data, @@ -125,11 +127,11 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart } } - type IntoIterMut = impl Iterator; + type IntoIterMut = impl Iterator; fn into_iter_mut(self) -> Self::IntoIterMut { let iter = match (self.lower_bound, self.upper_bound) { (Some(lower), Some(upper)) => Box::new(self.data.range(lower..upper)) - as Box)>>, + as Box)>>, (Some(lower), None) => Box::new(self.data.range(lower..)), (None, Some(upper)) => Box::new(self.data.range(..upper)), (None, None) => Box::new(self.data.iter()), @@ -156,7 +158,7 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart } } - fn split_out(&mut self, entity: E) -> Self { + fn split_out(&mut self, entity: RawT) -> Self { self.assert_bounds(entity); let right = Self { diff --git a/src/storage/vec.rs b/src/storage/vec.rs index 249174ae3c..2f25c07c69 100644 --- a/src/storage/vec.rs +++ b/src/storage/vec.rs @@ -11,14 +11,14 @@ use super::{ use crate::{entity, util}; /// The basic storage indexed by entity IDs directly. -pub struct VecStorage { +pub struct VecStorage { cardinality: usize, bits: BitVec, data: Vec>, - _ph: PhantomData, + _ph: PhantomData, } -impl VecStorage { +impl VecStorage { fn bit(&self, index: usize) -> bool { match self.bits.get(index) { Some(bit) => *bit, @@ -47,7 +47,7 @@ impl VecStorage { } } -impl Default for VecStorage { +impl Default for VecStorage { fn default() -> Self { Self { cardinality: 0, @@ -58,11 +58,11 @@ impl Default for VecStorage { } } -impl Access for VecStorage { - type RawEntity = E; +impl Access for VecStorage { + type RawEntity = RawT; type Comp = C; - fn get_mut(&mut self, id: E) -> Option<&mut C> { + fn get_mut(&mut self, id: RawT) -> Option<&mut C> { let index = id.to_primitive(); if self.bit(index) { @@ -74,12 +74,12 @@ impl Access for VecStorage { } } - type IterMut<'t> = impl Iterator + 't; + type IterMut<'t> = impl Iterator + 't; fn iter_mut(&mut self) -> Self::IterMut<'_> { iter_mut(0, &self.bits, &mut self.data) } } -impl Storage for VecStorage { - fn get(&self, id: E) -> Option<&C> { +impl Storage for VecStorage { + fn get(&self, id: RawT) -> Option<&C> { let index = id.to_primitive(); if self.bit(index) { @@ -91,7 +91,7 @@ impl Storage for VecStorage { } } - fn set(&mut self, id: E, new: Option) -> Option { + fn set(&mut self, id: RawT, new: Option) -> Option { let index = id.to_primitive(); let old = if self.bit(index) { @@ -123,13 +123,13 @@ impl Storage for VecStorage { fn cardinality(&self) -> usize { self.cardinality } - type Iter<'t> = impl Iterator + 't; + type Iter<'t> = impl Iterator + 't; fn iter(&self) -> Self::Iter<'_> { let indices = self.bits.iter_ones(); let data = &self.data; indices.map(move |index| { - let entity = E::from_primitive(index); + let entity = RawT::from_primitive(index); let value = data.get(index).expect("bits mismatch"); let value = unsafe { value.assume_init_ref() }; (entity, value) @@ -140,7 +140,7 @@ impl Storage for VecStorage { fn iter_chunks(&self) -> Self::IterChunks<'_> { new_iter_chunks_ref(&self.bits, &self.data[..]).map(|(start_index, chunk)| ChunkRef { slice: unsafe { slice_assume_init_ref(chunk) }, - start: E::from_primitive(start_index), + start: RawT::from_primitive(start_index), }) } @@ -148,23 +148,23 @@ impl Storage for VecStorage { fn iter_chunks_mut(&mut self) -> Self::IterChunksMut<'_> { new_iter_chunks_mut(&self.bits, &mut self.data[..]).map(|(start_index, chunk)| ChunkMut { slice: unsafe { slice_assume_init_mut(chunk) }, - start: E::from_primitive(start_index), + start: RawT::from_primitive(start_index), }) } - type Partition<'t> = StoragePartition<'t, E, C>; + type Partition<'t> = StoragePartition<'t, RawT, C>; fn as_partition(&mut self) -> Self::Partition<'_> { self.as_partition_chunk() } } -fn iter_mut<'storage, E: entity::Raw, C: 'static>( +fn iter_mut<'storage, RawT: entity::Raw, C: 'static>( start_offset: usize, bits: &'storage bitvec::slice::BitSlice, data: &'storage mut [MaybeUninit], -) -> impl Iterator + 'storage { +) -> impl Iterator + 'storage { let indices = bits.iter_ones(); indices.map(move |index| { - let entity = E::from_primitive(start_offset + index); + let entity = RawT::from_primitive(start_offset + index); let value = data.get_mut(index).expect("bits mismatch"); let value = unsafe { value.assume_init_mut() }; let value = unsafe { mem::transmute::<&mut C, &mut C>(value) }; @@ -173,25 +173,27 @@ fn iter_mut<'storage, E: entity::Raw, C: 'static>( } /// Return value of [`VecStorage::split_at`]. -pub struct StoragePartition<'t, E: entity::Raw, C> { +pub struct StoragePartition<'t, RawT: entity::Raw, C> { bits: &'t BitSlice, data: &'t mut [MaybeUninit], offset: usize, - _ph: PhantomData, + _ph: PhantomData, } -impl<'t, E: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'t, E, C> { - type RawEntity = E; +impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'t, RawT, C> { + type RawEntity = RawT; type Comp = C; - fn get_mut(&mut self, entity: E) -> Option<&mut C> { self.by_ref().into_mut(entity) } + fn get_mut(&mut self, entity: RawT) -> Option<&mut C> { self.by_ref().into_mut(entity) } - type IterMut<'u> = impl Iterator + 'u where Self: 'u; + type IterMut<'u> = impl Iterator + 'u where Self: 'u; fn iter_mut(&mut self) -> Self::IterMut<'_> { self.by_ref().into_iter_mut() } } -impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePartition<'t, E, C> { - type ByRef<'u> = StoragePartition<'u, E, C> where Self: 'u; +impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> Partition<'t> + for StoragePartition<'t, RawT, C> +{ + type ByRef<'u> = StoragePartition<'u, RawT, C> where Self: 'u; fn by_ref(&mut self) -> Self::ByRef<'_> { StoragePartition { bits: self.bits, @@ -201,10 +203,10 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart } } - type IntoIterMut = impl Iterator; + type IntoIterMut = impl Iterator; fn into_iter_mut(self) -> Self::IntoIterMut { iter_mut(self.offset, self.bits, self.data) } - fn into_mut(self, entity: E) -> Option<&'t mut C> { + fn into_mut(self, entity: RawT) -> Option<&'t mut C> { let index = match entity.to_primitive().checked_sub(self.offset) { Some(index) => index, None => panic!("Entity {entity:?} is not in the partition {:?}..", self.offset), @@ -222,7 +224,7 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart } } - fn split_out(&mut self, entity: E) -> Self { + fn split_out(&mut self, entity: RawT) -> Self { let index = entity.to_primitive().checked_sub(self.offset).expect("parameter out of bounds"); assert!( @@ -246,8 +248,8 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart } } -impl AccessChunked for VecStorage { - fn get_chunk_mut(&mut self, start: E, end: E) -> Option<&mut [C]> { +impl AccessChunked for VecStorage { + fn get_chunk_mut(&mut self, start: RawT, end: RawT) -> Option<&mut [C]> { let range = start.to_primitive()..end.to_primitive(); let bits = match self.bits.get(range.clone()) { Some(bits) => bits, @@ -265,8 +267,8 @@ impl AccessChunked for VecStorage Chunked for VecStorage { - fn get_chunk(&self, start: E, end: E) -> Option<&[C]> { +impl Chunked for VecStorage { + fn get_chunk(&self, start: RawT, end: RawT) -> Option<&[C]> { let range = start.to_primitive()..end.to_primitive(); let bits = match self.bits.get(range.clone()) { Some(bits) => bits, @@ -292,16 +294,18 @@ impl Chunked for VecStorage { } } -impl<'t, E: entity::Raw, C: Send + Sync + 'static> AccessChunked for StoragePartition<'t, E, C> { - fn get_chunk_mut(&mut self, start: E, end: E) -> Option<&mut [C]> { +impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> AccessChunked + for StoragePartition<'t, RawT, C> +{ + fn get_chunk_mut(&mut self, start: RawT, end: RawT) -> Option<&mut [C]> { self.by_ref().into_chunk_mut(start, end) } } -impl<'t, E: entity::Raw, C: Send + Sync + 'static> PartitionChunked<'t> - for StoragePartition<'t, E, C> +impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> PartitionChunked<'t> + for StoragePartition<'t, RawT, C> { - fn into_chunk_mut(self, start: E, end: E) -> Option<&'t mut [C]> { + fn into_chunk_mut(self, start: RawT, end: RawT) -> Option<&'t mut [C]> { let (start, end) = (start.to_primitive() - self.offset, end.to_primitive() - self.offset); let range = start..end; @@ -320,14 +324,14 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> PartitionChunked<'t> Some(unsafe { slice_assume_init_mut(data) }) } - type IntoIterChunksMut = impl Iterator; + type IntoIterChunksMut = impl Iterator; fn into_iter_chunks_mut(self) -> Self::IntoIterChunksMut { // check correctness: // `bits[i]` corresponds to `self.data[i]`, of which the index `i` matches `(last_zero ?? -1) + 1 + i` let iter = new_iter_chunks_mut(self.bits, self.data); let offset = self.offset; iter.map(move |(start_index, chunk)| { - (E::from_primitive(start_index + offset), unsafe { slice_assume_init_mut(chunk) }) + (RawT::from_primitive(start_index + offset), unsafe { slice_assume_init_mut(chunk) }) }) } }