Skip to content

Commit

Permalink
chore: E: entity:Raw is a confusing name
Browse files Browse the repository at this point in the history
  • Loading branch information
SOF3 committed Oct 14, 2023
1 parent c9a83c8 commit 76de889
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 87 deletions.
50 changes: 25 additions & 25 deletions src/entity/ealloc/recycling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,29 +15,29 @@ type MutableShards<T> = Vec<Arc<Mutex<T>>>;

/// The default allocator supporting atomically-allocated new IDs and arbitrary recycler.
#[derive(Debug)]
pub struct Recycling<E: Raw, T: Recycler<E>, S: ShardAssigner> {
pub struct Recycling<RawT: Raw, T: Recycler<RawT>, S: ShardAssigner> {
/// Whether `mark_need_flush` was called.
flush_mark: bool,
/// The next ID to allocate into shards.
global_gauge: Arc<E::Atomic>,
global_gauge: Arc<RawT::Atomic>,
/// A sorted list of recycled IDs during the last join.
recyclable: Arc<BTreeSet<E>>,
recyclable: Arc<BTreeSet<RawT>>,
/// The actual IDs assigned to different shards.
recycler_shards: MutableShards<T>,
/// The assigned shard.
shard_assigner: S,
/// The queue of deallocated IDs to distribute.
dealloc_queue: Vec<E>,
dealloc_queue: Vec<RawT>,
/// The queue of allocated IDs during online, to be synced to recyclable after join.
reuse_queue_shards: MutableShards<Vec<E>>,
reuse_queue_shards: MutableShards<Vec<RawT>>,
}

impl<E: Raw, T: Recycler<E>, S: ShardAssigner> Recycling<E, T, S> {
impl<RawT: Raw, T: Recycler<RawT>, S: ShardAssigner> Recycling<RawT, T, S> {
/// Creates a new recycling allocator with a custom shard assigner.
/// This can only be used for unit testing since the Archetype API does not support dynamic
/// shard assigners.
pub(crate) fn new_with_shard_assigner(num_shards: usize, shard_assigner: S) -> Self {
let global_gauge = E::new();
let global_gauge = RawT::new();
Self {
flush_mark: false,
global_gauge: Arc::new(global_gauge),
Expand All @@ -55,24 +55,24 @@ impl<E: Raw, T: Recycler<E>, S: ShardAssigner> Recycling<E, T, S> {
}

fn get_reuse_queue_offline(
reuse_queues: &mut MutableShards<Vec<E>>,
reuse_queues: &mut MutableShards<Vec<RawT>>,
index: usize,
) -> &mut Vec<E> {
) -> &mut Vec<RawT> {
let arc = reuse_queues.get_mut(index).expect("index out of bounds");
Arc::get_mut(arc).expect("shards are dropped in offline mode").get_mut()
}

fn iter_allocated_chunks_offline(
&mut self,
) -> impl iter::FusedIterator<Item = ops::Range<E>> + '_ {
) -> impl iter::FusedIterator<Item = ops::Range<RawT>> + '_ {
iter_gaps(self.global_gauge.load(), self.recyclable.iter().copied())
}
}

impl<E: Raw, T: Recycler<E>, S: ShardAssigner> Ealloc for Recycling<E, T, S> {
type Raw = E;
impl<RawT: Raw, T: Recycler<RawT>, S: ShardAssigner> Ealloc for Recycling<RawT, T, S> {
type Raw = RawT;
type AllocHint = T::Hint;
type Shard = impl Shard<Raw = E, Hint = T::Hint>;
type Shard = impl Shard<Raw = RawT, Hint = T::Hint>;

fn new(num_shards: usize) -> Self { Self::new_with_shard_assigner(num_shards, S::default()) }

Expand Down Expand Up @@ -109,7 +109,7 @@ impl<E: Raw, T: Recycler<E>, S: ShardAssigner> Ealloc for Recycling<E, T, S> {
shard.allocate(hint)
}

fn queue_deallocate(&mut self, id: E) { self.dealloc_queue.push(id); }
fn queue_deallocate(&mut self, id: RawT) { self.dealloc_queue.push(id); }

fn flush(&mut self) {
self.flush_mark = false;
Expand Down Expand Up @@ -205,18 +205,18 @@ pub struct RecyclingShard<GaugeRef, RecyclerRef, ReuseQueueRef> {
reuse_queue: ReuseQueueRef,
}

impl<E: Raw, GaugeRef, RecyclerRef, ReuseQueueRef> Shard
impl<RawT: Raw, GaugeRef, RecyclerRef, ReuseQueueRef> Shard
for RecyclingShard<GaugeRef, RecyclerRef, ReuseQueueRef>
where
GaugeRef: ops::Deref<Target = E::Atomic> + Send + 'static,
GaugeRef: ops::Deref<Target = RawT::Atomic> + Send + 'static,
RecyclerRef: ops::DerefMut + Send + 'static,
<RecyclerRef as ops::Deref>::Target: Recycler<E>,
ReuseQueueRef: ops::DerefMut<Target = Vec<E>> + Send + 'static,
<RecyclerRef as ops::Deref>::Target: Recycler<RawT>,
ReuseQueueRef: ops::DerefMut<Target = Vec<RawT>> + Send + 'static,
{
type Raw = E;
type Hint = <RecyclerRef::Target as Recycler<E>>::Hint;
type Raw = RawT;
type Hint = <RecyclerRef::Target as Recycler<RawT>>::Hint;

fn allocate(&mut self, hint: Self::Hint) -> E {
fn allocate(&mut self, hint: Self::Hint) -> RawT {
if let Some(id) = self.recycler.poll(hint) {
id
} else {
Expand All @@ -225,14 +225,14 @@ where
}
}

impl<E: Raw, T: Recycler<E>, GaugeRef, RecyclerRef, ReuseQueueRef>
impl<RawT: Raw, T: Recycler<RawT>, GaugeRef, RecyclerRef, ReuseQueueRef>
RecyclingShard<GaugeRef, RecyclerRef, ReuseQueueRef>
where
GaugeRef: ops::Deref<Target = E::Atomic>,
GaugeRef: ops::Deref<Target = RawT::Atomic>,
RecyclerRef: ops::DerefMut<Target = T>,
ReuseQueueRef: ops::DerefMut<Target = Vec<E>>,
ReuseQueueRef: ops::DerefMut<Target = Vec<RawT>>,
{
fn allocate(&mut self, hint: T::Hint) -> E {
fn allocate(&mut self, hint: T::Hint) -> RawT {
if let Some(id) = self.recycler.poll(hint) {
self.reuse_queue.push(id);
id
Expand Down
44 changes: 23 additions & 21 deletions src/storage/tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,20 @@ use super::{Access, ChunkMut, ChunkRef, Partition, Storage};
use crate::entity;

/// A storage based on [`BTreeMap`].
pub struct Tree<E: entity::Raw, C> {
pub struct Tree<RawT: entity::Raw, C> {
// `SyncUnsafeCell<C>` here must be treated as a normal `C`
// unless the whole storage is mutably locked,
// which means the current function exclusively manages this map.
// `&Tree` must not be used to access the cells mutably.
data: BTreeMap<E, SyncUnsafeCell<C>>,
data: BTreeMap<RawT, SyncUnsafeCell<C>>,
}

impl<E: entity::Raw, C> Default for Tree<E, C> {
impl<RawT: entity::Raw, C> Default for Tree<RawT, C> {
fn default() -> Self { Self { data: BTreeMap::new() } }
}

impl<E: entity::Raw, C: Send + Sync + 'static> Access for Tree<E, C> {
type RawEntity = E;
impl<RawT: entity::Raw, C: Send + Sync + 'static> Access for Tree<RawT, C> {
type RawEntity = RawT;
type Comp = C;

fn get_mut(&mut self, id: Self::RawEntity) -> Option<&mut C> {
Expand All @@ -32,7 +32,7 @@ impl<E: entity::Raw, C: Send + Sync + 'static> Access for Tree<E, C> {
}
}

impl<E: entity::Raw, C: Send + Sync + 'static> Storage for Tree<E, C> {
impl<RawT: entity::Raw, C: Send + Sync + 'static> Storage for Tree<RawT, C> {
fn get(&self, id: Self::RawEntity) -> Option<&C> {
self.data.get(&id).map(|cell| unsafe {
// Safety: `&self` implies that nobody else can mutate the values.
Expand Down Expand Up @@ -71,21 +71,21 @@ impl<E: entity::Raw, C: Send + Sync + 'static> Storage for Tree<E, C> {
.map(|(entity, item)| ChunkMut { slice: slice::from_mut(item), start: entity })
}

type Partition<'t> = StoragePartition<'t, E, C>;
type Partition<'t> = StoragePartition<'t, RawT, C>;
fn as_partition(&mut self) -> Self::Partition<'_> {
StoragePartition { data: &self.data, lower_bound: None, upper_bound: None }
}
}

/// Return value of [`Tree::split_at`].
pub struct StoragePartition<'t, E: entity::Raw, C> {
data: &'t BTreeMap<E, SyncUnsafeCell<C>>,
lower_bound: Option<E>,
upper_bound: Option<E>,
pub struct StoragePartition<'t, RawT: entity::Raw, C> {
data: &'t BTreeMap<RawT, SyncUnsafeCell<C>>,
lower_bound: Option<RawT>,
upper_bound: Option<RawT>,
}

impl<'t, E: entity::Raw, C> StoragePartition<'t, E, C> {
fn assert_bounds(&self, entity: E) {
impl<'t, RawT: entity::Raw, C> StoragePartition<'t, RawT, C> {
fn assert_bounds(&self, entity: RawT) {
if let Some(bound) = self.lower_bound {
assert!(entity >= bound, "Entity {entity:?} is not in the partition {bound:?}..");
}
Expand All @@ -95,11 +95,11 @@ impl<'t, E: entity::Raw, C> StoragePartition<'t, E, C> {
}
}

impl<'t, E: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'t, E, C> {
type RawEntity = E;
impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'t, RawT, C> {
type RawEntity = RawT;
type Comp = C;

fn get_mut(&mut self, entity: E) -> Option<&mut C> {
fn get_mut(&mut self, entity: RawT) -> Option<&mut C> {
self.assert_bounds(entity);

let cell = self.data.get(&entity)?;
Expand All @@ -115,8 +115,10 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Access for StoragePartition<'
fn iter_mut(&mut self) -> Self::IterMut<'_> { self.by_ref().into_iter_mut() }
}

impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePartition<'t, E, C> {
type ByRef<'u> = StoragePartition<'u, E, C> where Self: 'u;
impl<'t, RawT: entity::Raw, C: Send + Sync + 'static> Partition<'t>
for StoragePartition<'t, RawT, C>
{
type ByRef<'u> = StoragePartition<'u, RawT, C> where Self: 'u;
fn by_ref(&mut self) -> Self::ByRef<'_> {
StoragePartition {
data: self.data,
Expand All @@ -125,11 +127,11 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart
}
}

type IntoIterMut = impl Iterator<Item = (E, &'t mut C)>;
type IntoIterMut = impl Iterator<Item = (RawT, &'t mut C)>;
fn into_iter_mut(self) -> Self::IntoIterMut {
let iter = match (self.lower_bound, self.upper_bound) {
(Some(lower), Some(upper)) => Box::new(self.data.range(lower..upper))
as Box<dyn Iterator<Item = (&E, &SyncUnsafeCell<C>)>>,
as Box<dyn Iterator<Item = (&RawT, &SyncUnsafeCell<C>)>>,
(Some(lower), None) => Box::new(self.data.range(lower..)),
(None, Some(upper)) => Box::new(self.data.range(..upper)),
(None, None) => Box::new(self.data.iter()),
Expand All @@ -156,7 +158,7 @@ impl<'t, E: entity::Raw, C: Send + Sync + 'static> Partition<'t> for StoragePart
}
}

fn split_out(&mut self, entity: E) -> Self {
fn split_out(&mut self, entity: RawT) -> Self {
self.assert_bounds(entity);

let right = Self {
Expand Down
Loading

0 comments on commit 76de889

Please sign in to comment.