clean docs and way chiller lifetimes and api
This commit is contained in:
112
src/double/mod.rs
Normal file
112
src/double/mod.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
//! Doubly non-Arc linked list.
|
||||
//!
|
||||
//! Doubly as each node points to the next and previous node.
|
||||
|
||||
use std::ops::Deref;
|
||||
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use crate::double::node::{BackNodeWriteLock, Node, NodeBackPtr};
|
||||
|
||||
pub mod node;
|
||||
|
||||
// # Rules to prevent deadlocks
|
||||
//
|
||||
// Left locking must be `try_` and if it fails at any point, the way rightwards must be cleared in
|
||||
// case the task holding the left lock is moving rightwards.
|
||||
// Rightwards locking can be blocking.
|
||||
|
||||
pub struct NodeHeadInner<'ll, T> {
|
||||
start: Option<&'ll node::Node<'ll, T>>,
|
||||
}
|
||||
|
||||
impl<T> Default for NodeHeadInner<'_, T> {
|
||||
fn default() -> Self {
|
||||
Self { start: None }
|
||||
}
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// impl<'ll, T> Drop for LinkedList<'ll, T> {
|
||||
// fn drop(&mut self) {
|
||||
// // SAFETY: this is the very last ref of &self so it can pretty much assume no external
|
||||
// // refs into the inner data as they would be invalid to live after this
|
||||
// while unsafe { self.pop().is_some() } {}
|
||||
// }
|
||||
// }
|
||||
|
||||
pub struct LinkedList<'ll, T>(RwLock<NodeHeadInner<'ll, T>>);
|
||||
|
||||
impl<T> Default for LinkedList<'_, T> {
|
||||
#[must_use]
|
||||
fn default() -> Self {
|
||||
Self(RwLock::new(NodeHeadInner::default()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ll, T> Deref for LinkedList<'ll, T> {
|
||||
type Target = RwLock<NodeHeadInner<'ll, T>>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ll, T> LinkedList<'ll, T> {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn prepend(&'ll self, data: T) {
|
||||
let self_lock = self.write();
|
||||
let next = self_lock.start;
|
||||
let next_lock = next.map(|n| n.write());
|
||||
let new_node = Node::new_leaked(data, NodeBackPtr::new_head(self), next);
|
||||
// SAFETY: ptrs are surrounding and they've been locked all along
|
||||
unsafe { new_node.integrate((BackNodeWriteLock::Head(self_lock), next_lock)) };
|
||||
}
|
||||
|
||||
/// Returns [`None`] if there's no next node.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// There must be no outer references to the first node.
|
||||
pub unsafe fn pop(&'ll self) -> Option<()> {
|
||||
let self_lock = self.write();
|
||||
let pop_node = self_lock.start?;
|
||||
let pop_node_lock = pop_node.write();
|
||||
let next_node_lock = pop_node_lock.next.map(|n| n.write());
|
||||
|
||||
// SAFETY: locked all along and consecutive nodes
|
||||
unsafe {
|
||||
Node::isolate(
|
||||
&pop_node_lock,
|
||||
(BackNodeWriteLock::Head(self_lock), next_node_lock),
|
||||
);
|
||||
}
|
||||
|
||||
drop(pop_node_lock);
|
||||
// SAFETY: node has been isolated so no references out
|
||||
unsafe { pop_node.wait_free() }
|
||||
|
||||
Some(())
|
||||
}
|
||||
|
||||
pub fn clone_into_vec(&self) -> Vec<T>
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
let mut total = Vec::new();
|
||||
let mut next_node = self.read().start;
|
||||
while let Some(node) = next_node {
|
||||
let read = node.read();
|
||||
total.push(read.data.clone());
|
||||
next_node = read.next;
|
||||
}
|
||||
total
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
Reference in New Issue
Block a user