307 lines
9.4 KiB
Rust
307 lines
9.4 KiB
Rust
use std::{ops::Deref, ptr};
|
|
|
|
use parking_lot::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard};
|
|
|
|
use super::NodeHeadInner;
|
|
|
|
#[repr(transparent)]
|
|
pub struct Node<'ll, T>(RwLock<NodeInner<'ll, T>>);
|
|
type NodeHead<'ll, T> = RwLock<NodeHeadInner<'ll, T>>;
|
|
pub enum NodeBackPtr<'ll, T> {
|
|
Head(&'ll NodeHead<'ll, T>),
|
|
Node(&'ll Node<'ll, T>),
|
|
}
|
|
|
|
// yes the whole purpose is docs, might add Isolated Guards around nodes here
|
|
pub mod topology_safety;
|
|
|
|
// TODO: RwLock the ptrs only instead of the node
|
|
// Box<(RwLock<(&prev, &next)>, T)>
|
|
// instead of
|
|
// Box<RwLock<(&prev, &next, T)>>
|
|
// allows user to opt out of RwLock, allowing changes to adyacent nodes while T is being externally
|
|
// used and enables T: ?Sized
|
|
pub struct NodeInner<'ll, T> {
|
|
pub(crate) prev: NodeBackPtr<'ll, T>,
|
|
pub(crate) next: Option<&'ll Node<'ll, T>>,
|
|
pub data: T,
|
|
}
|
|
|
|
impl<'ll, T> Deref for Node<'ll, T> {
|
|
type Target = RwLock<NodeInner<'ll, T>>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.0
|
|
}
|
|
}
|
|
|
|
impl<T> Deref for NodeInner<'_, T> {
|
|
type Target = T;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.data
|
|
}
|
|
}
|
|
|
|
impl<T> Copy for NodeBackPtr<'_, T> {}
|
|
impl<T> Clone for NodeBackPtr<'_, T> {
|
|
fn clone(&self) -> Self {
|
|
*self
|
|
}
|
|
}
|
|
type WriteAndBackDoublet<'ll, T> = (
|
|
BackNodeWriteLock<'ll, T>,
|
|
RwLockUpgradableReadGuard<'ll, NodeInner<'ll, T>>,
|
|
);
|
|
type WriteSurroundTriplet<'ll, T> = (
|
|
BackNodeWriteLock<'ll, T>,
|
|
RwLockWriteGuard<'ll, NodeInner<'ll, T>>,
|
|
Option<RwLockWriteGuard<'ll, NodeInner<'ll, T>>>,
|
|
);
|
|
type WriteOnlyAroundTriplet<'ll, T> = (
|
|
BackNodeWriteLock<'ll, T>,
|
|
Option<RwLockWriteGuard<'ll, NodeInner<'ll, T>>>,
|
|
);
|
|
impl<'ll, T> Node<'ll, T> {
|
|
// TODO: think about the isolaed state of the following 3 fn's
|
|
|
|
/// Creates a new node in the heap, will link to `prev` and `next` but will be isolated, it can
|
|
/// be thought of just ita data and the two pointers, having it isolated doesn't guarantee any
|
|
/// integration into the linked list.
|
|
///
|
|
/// As long as this node exists and is not properly integrated into a linked list, it's
|
|
/// considered that the `prev` and `next` refs are being held.
|
|
#[must_use]
|
|
pub fn new(data: T, prev: NodeBackPtr<'ll, T>, next: Option<&'ll Node<'ll, T>>) -> Self {
|
|
Self(RwLock::new(NodeInner { prev, next, data }))
|
|
}
|
|
|
|
/// Boxes [`self`]
|
|
#[must_use]
|
|
pub fn boxed(self) -> Box<Self> {
|
|
Box::new(self)
|
|
}
|
|
|
|
/// Leaks [`self`] as a [`Box<Self>`]
|
|
#[must_use]
|
|
pub fn leak(self: Box<Self>) -> &'static mut Self {
|
|
Box::leak(self)
|
|
}
|
|
|
|
pub fn new_leaked(
|
|
data: T,
|
|
prev: NodeBackPtr<'ll, T>,
|
|
next: Option<&'ll Node<'ll, T>>,
|
|
) -> &'ll mut Self {
|
|
Box::leak(Box::new(Self::new(data, prev, next)))
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// The [`self`] pointer must come from a [`Box`] allocation like [`Self::boxed`] and
|
|
/// [`Self::leak`].
|
|
pub unsafe fn free(self: *mut Self) {
|
|
drop(unsafe { Box::from_raw(self) });
|
|
}
|
|
|
|
/// Frees the current node but waits until the inner [`RwLock`] has no waiters.
|
|
///
|
|
/// # Safety
|
|
///
|
|
/// There must be no references left to [`self`]
|
|
pub unsafe fn wait_free(&self) {
|
|
loop {
|
|
if self.is_locked() {
|
|
drop(self.write());
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
|
|
let myself = ptr::from_ref(self).cast_mut();
|
|
unsafe { myself.free() }
|
|
}
|
|
|
|
pub fn lock_and_back(&'ll self) -> WriteAndBackDoublet<'ll, T> {
|
|
let mut self_read = self.upgradable_read();
|
|
// "soft" back lock
|
|
match self_read.prev.try_write() {
|
|
Some(prev_write) => (prev_write, self_read),
|
|
None => {
|
|
// already locked, no worries but we have to clear for the lock before use its
|
|
// possible way forward, we can also wait until `prev` is accesible either case
|
|
// (the task holding it could modify us or if it doesn't we need to lock that same
|
|
// node)
|
|
loop {
|
|
let old_prev = self_read.prev;
|
|
let old_prev_write =
|
|
RwLockUpgradableReadGuard::unlocked_fair(&mut self_read, move || {
|
|
old_prev.write()
|
|
});
|
|
// we reaquire ourselves after `unlocked_fair` so `self_read` couls have
|
|
// changed
|
|
if NodeBackPtr::ptr_eq(&self_read.prev, &old_prev) {
|
|
break (old_prev_write, self_read);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Attempts to get a write lock on the surrouding nodes
|
|
pub fn write_surround(&'ll self) -> WriteSurroundTriplet<'ll, T> {
|
|
// backward blocking must be try
|
|
let (prev_write, self_read) = self.lock_and_back();
|
|
// Now `prev` is write locked and we can block forwards
|
|
let self_write = RwLockUpgradableReadGuard::upgrade(self_read);
|
|
let next_write = self_write.next.map(|n| n.write());
|
|
|
|
(prev_write, self_write, next_write)
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// The passed locks must also be consecutive for this to respect topology.
|
|
///
|
|
/// This node will remain isolated. See [`topology_safety`].
|
|
pub unsafe fn isolate(self_read: &NodeInner<'ll, T>, locks: WriteOnlyAroundTriplet<'ll, T>) {
|
|
let (mut back_write, next_write) = locks;
|
|
|
|
back_write.set_next(self_read.next);
|
|
if let Some(mut next_write) = next_write {
|
|
next_write.prev = self_read.prev;
|
|
}
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// The passed locks must be surrounding for this to respect topology.
|
|
///
|
|
/// This taken node ([`self`]) must be an isolated node. See [`topology_safety`].
|
|
pub unsafe fn integrate(&'ll self, locks: WriteOnlyAroundTriplet<'ll, T>) {
|
|
let (mut back_write, next_write) = locks;
|
|
|
|
back_write.set_next(Some(self));
|
|
if let Some(mut next_write) = next_write {
|
|
next_write.prev = NodeBackPtr::new_node(self);
|
|
}
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// [`self`] must be integrated into the linked list. See [`topology_safety`].
|
|
///
|
|
/// Assumes there's no other external references into this node when called as it will be
|
|
/// deallocated. This will also wait for all waiters into the node lock to finish before really
|
|
/// freeing it, this includes concurrent calls to this same node.
|
|
pub unsafe fn remove(&'ll self) {
|
|
let surround_locks = self.write_surround();
|
|
let (prev, myself, next) = surround_locks;
|
|
let around_locks = (prev, next);
|
|
// Should be integrated and the surrounding locks are consesutive and locked all along
|
|
unsafe { Self::isolate(&myself, around_locks) }
|
|
|
|
// lazy-wait for no readers remaining
|
|
drop(myself);
|
|
// SAFETY: The node is isolated so good to be freed.
|
|
unsafe { self.wait_free() }
|
|
}
|
|
}
|
|
|
|
/// Generic Write Lock of a [`NodeBackPtr`]
|
|
pub enum BackNodeWriteLock<'ll, T> {
|
|
Head(RwLockWriteGuard<'ll, NodeHeadInner<'ll, T>>),
|
|
Node(RwLockWriteGuard<'ll, NodeInner<'ll, T>>),
|
|
}
|
|
|
|
/// Generic Read Lock of a [`NodeBackPtr`]
|
|
pub enum BackNodeReadLock<'ll, T> {
|
|
Head(RwLockReadGuard<'ll, NodeHeadInner<'ll, T>>),
|
|
Node(RwLockReadGuard<'ll, NodeInner<'ll, T>>),
|
|
}
|
|
|
|
#[allow(clippy::enum_glob_use)]
|
|
impl<'ll, T> NodeBackPtr<'ll, T> {
|
|
#[must_use]
|
|
pub fn ptr_eq(&self, other: &Self) -> bool {
|
|
use NodeBackPtr::*;
|
|
|
|
match (self, other) {
|
|
(Head(h1), Head(h2)) => ptr::eq(h1, h2),
|
|
(Node(n1), Node(n2)) => ptr::eq(n1, n2),
|
|
_ => false,
|
|
}
|
|
}
|
|
|
|
#[must_use]
|
|
pub fn new_node(node: &'ll Node<'ll, T>) -> Self {
|
|
Self::Node(node)
|
|
}
|
|
#[must_use]
|
|
pub fn new_head(head: &'ll NodeHead<'ll, T>) -> Self {
|
|
Self::Head(head)
|
|
}
|
|
|
|
/// Analogous to [`RwLock::write`]
|
|
#[must_use]
|
|
pub fn write(&self) -> BackNodeWriteLock<'ll, T> {
|
|
use BackNodeWriteLock as WL;
|
|
use NodeBackPtr::*;
|
|
|
|
match self {
|
|
Head(h) => WL::Head(h.write()),
|
|
Node(n) => WL::Node(n.write()),
|
|
}
|
|
}
|
|
|
|
/// Analogous to [`RwLock::read`]
|
|
#[must_use]
|
|
pub fn read(&self) -> BackNodeReadLock<'ll, T> {
|
|
use BackNodeReadLock as RL;
|
|
use NodeBackPtr::*;
|
|
|
|
match self {
|
|
Head(h) => RL::Head(h.read()),
|
|
Node(n) => RL::Node(n.read()),
|
|
}
|
|
}
|
|
|
|
/// Analogous to [`RwLock::try_write`]
|
|
#[must_use]
|
|
pub fn try_write(&self) -> Option<BackNodeWriteLock<'ll, T>> {
|
|
use BackNodeWriteLock as WL;
|
|
use NodeBackPtr::*;
|
|
|
|
Some(match self {
|
|
Head(h) => WL::Head(h.try_write()?),
|
|
Node(n) => WL::Node(n.try_write()?),
|
|
})
|
|
}
|
|
|
|
/// Analogous to [`RwLock::try_read`]
|
|
#[must_use]
|
|
pub fn try_read(&self) -> Option<BackNodeReadLock<'ll, T>> {
|
|
use BackNodeReadLock as RL;
|
|
use NodeBackPtr::*;
|
|
|
|
Some(match self {
|
|
Head(h) => RL::Head(h.try_read()?),
|
|
Node(n) => RL::Node(n.try_read()?),
|
|
})
|
|
}
|
|
}
|
|
|
|
impl<'ll, T> BackNodeWriteLock<'ll, T> {
|
|
#[allow(clippy::enum_glob_use)]
|
|
fn set_next(&mut self, next: Option<&'ll Node<'ll, T>>) {
|
|
use BackNodeWriteLock::*;
|
|
|
|
match self {
|
|
Head(h) => h.start = next,
|
|
Node(n) => n.next = next,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T> NodeInner<'_, T> {}
|