Compare commits

...

7 Commits

Author SHA1 Message Date
64f503633d feat: unsafely support ?Sized 2025-07-21 20:34:09 +02:00
1b2c8c01a0 chore: canonical copy impl works 2025-07-20 01:38:44 +02:00
af5e5ff19e safety: what I think is a safe macro for linked list creation
+ some docs changes and cleanup
2025-07-20 01:36:48 +02:00
0d8780017b add drop impl 2025-07-19 23:26:22 +02:00
50b35de725 clean docs and way chiller lifetimes and api 2025-07-19 19:44:12 +02:00
4af59d5ae0 fixes: (read comment)
- `remove` lock better synchronization
- `remove` better drop impl
- `remove` fixed potential race condition / deadlock
- `append` forgor to modify first node's `prev`
- feat: add `pop` fn

note: for now test with `cargo test -- --nocapture`
2025-07-18 02:26:16 +02:00
326ad9822d rm: udeps 2025-07-18 01:00:15 +02:00
9 changed files with 801 additions and 366 deletions

57
Cargo.lock generated
View File

@@ -2,17 +2,6 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "atomic_enum"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99e1aca718ea7b89985790c94aad72d77533063fe00bc497bb79a7c2dae6a661"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "autocfg"
version = "1.5.0"
@@ -35,10 +24,15 @@ checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
name = "concurrent-linked-list"
version = "0.1.0"
dependencies = [
"atomic_enum",
"dst-clone",
"parking_lot",
]
[[package]]
name = "dst-clone"
version = "0.1.0"
source = "git+https://git.javalsai.tuxcord.net/tuxcord/dst-clone#6a32bfa59455adafde07bafd3bc9f2182cd5127b"
[[package]]
name = "libc"
version = "0.2.174"
@@ -78,29 +72,11 @@ dependencies = [
"windows-targets",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.5.13"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6"
checksum = "7e8af0dde094006011e6a740d4879319439489813bd0bcdc7d821beaeeff48ec"
dependencies = [
"bitflags",
]
@@ -117,23 +93,6 @@ version = "1.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "syn"
version = "2.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "windows-targets"
version = "0.52.6"

View File

@@ -4,5 +4,5 @@ version = "0.1.0"
edition = "2024"
[dependencies]
atomic_enum = "0.3.0"
dst-clone = { git = "https://git.javalsai.tuxcord.net/tuxcord/dst-clone", version = "0" }
parking_lot = { version = "0", default-features = false }

14
README.md Normal file
View File

@@ -0,0 +1,14 @@
A **linked list** implementation avoiding the use of [`Arc`]s in favor of unsafe manual removal of nodes when the caller knows all possible references are left unused.
The point of this crate is to offer [`Pin`] guarantees on the references into the list while allowing it to be modified. The implementation of all this doesn't require mutable access to the linked list itself so as a side effect it's possible to use the list in concurrent manners.
This means that it will try as smartly as possible to allow concurrent modifications to it as long as the nodes affected are unrelated.
# Types
There could be different types of linked list implementations in the future, like safer ones with [`Arc`], single-threaded ones, etc. But right now there's only:
* [`DoublyLinkedList`]: [`crate::double`] doubly linked list only in the heap with manual unsafe removal of items in it.
---
`cargo doc` is supported and is the main documentation of the library. But there's no official hosting of the document files.

263
src/double/mod.rs Normal file
View File

@@ -0,0 +1,263 @@
//! Doubly non-Arc linked list.
//!
//! Doubly as each node points to the next and previous node.
use std::{marker::PhantomPinned, mem::transmute, ops::Deref, pin::Pin};
use parking_lot::RwLock;
use crate::double::node::{BackNodeWriteLock, Node};
pub mod node;
// # Rules to prevent deadlocks
//
// Left locking must be `try_` and if it fails at any point, the way rightwards must be cleared in
// case the task holding the left lock is moving rightwards.
// Rightwards locking can be blocking.
pub struct NodeHeadInner<'ll, T: ?Sized> {
start: Option<&'ll node::Node<'ll, T>>,
}
impl<T: ?Sized> Default for NodeHeadInner<'_, T> {
fn default() -> Self {
Self { start: None }
}
}
pub struct NodeHead<'ll, T: ?Sized>(RwLock<NodeHeadInner<'ll, T>>);
impl<T: ?Sized> Default for NodeHead<'_, T> {
#[must_use]
fn default() -> Self {
Self(RwLock::new(NodeHeadInner::default()))
}
}
impl<'ll, T: ?Sized> Deref for NodeHead<'ll, T> {
type Target = RwLock<NodeHeadInner<'ll, T>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'ll, T: ?Sized> NodeHead<'ll, T> {
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// # Safety
///
/// The only context in which it's safe to call this when [`self`] was pinned immediately after
/// its creation so that it can be guaranteed that the returned reference is valid for all the
/// [`LinkedList`]'s lifetime.
///
/// The issue arises from the [`Drop`] implementation. It takes a `&mut` reference, that means
/// that all previous immutable references are dropped. But most methods of the linked require
/// you to promise the borrow of [`self`] is valid for all `'ll` and that's only true if no
/// destructor runs. This makes [`Drop`] incompatible with the use of methods of the form
/// `fn(&'myself self)`.
///
/// In turn to this, the [`Drop`] implementation must assume it's not the only reference even
/// thought it's `&mut`. Anyhow it should only be called when the scope of [`self`] is about to
/// end and the other references would be invalidated after the call to [`Drop`], though in
/// reality they really have no use before that call but to guarantee the "internal" self
/// references in the linked list remain valid through the destructor. It's kinda a really edge
/// case in the language with shared structures that require references with lifetimes as long
/// as self to guarantee their validity but still have [`Drop`] implementations.
#[must_use]
pub unsafe fn get_self_ref(myself: Pin<&Self>) -> &'ll Self {
unsafe { transmute::<&Self, &'ll Self>(myself.get_ref()) }
}
/// # Safety
///
/// Must be at the end at the end of its scope, when there's no references into it left.
pub unsafe fn manual_drop(&'ll self) {
// SAFETY: this is the drop impl so we can guarantee the reference is valid for the
// lifetime of the struct itself and external references would be invalidated right after
// the [`Drop`] of it (this fn). I don't think there's a way to differenciate the lifetimes
// by a drop implementation so this would be safe as no external references lifetimes would
// be valid after drop finishes
while unsafe { self.pop().is_some() } {}
}
pub fn prepend(&'ll self, data: T) -> &'ll Node<'ll, T>
where
T: Sized,
{
let self_lock = self.write();
let next = self_lock.start;
let next_lock = next.map(|n| n.ptrs.write());
let new_node = Node::new_leaked(data, node::NodeBackPtr::new_head(self), next);
// SAFETY: ptrs are surrounding and they've been locked all along
unsafe { new_node.integrate((BackNodeWriteLock::Head(self_lock), next_lock)) };
new_node
}
pub fn prepend_dst(&'ll self, data: &T) -> &'ll Node<'ll, T>
where
T: dst_clone::DstClone,
{
let self_lock = self.write();
let next = self_lock.start;
let next_lock = next.map(|n| n.ptrs.write());
let new_node = Node::leaked_from_dst(data, node::NodeBackPtr::new_head(self), next);
// SAFETY: ptrs are surrounding and they've been locked all along
unsafe { new_node.integrate((BackNodeWriteLock::Head(self_lock), next_lock)) };
new_node
}
/// Returns [`None`] if there's no next node.
///
/// # Safety
///
/// There must be no outer references to the first node.
pub unsafe fn pop(&'ll self) -> Option<()> {
let self_lock = self.write();
let pop_node = self_lock.start?;
let pop_node_lock = pop_node.ptrs.write();
let next_node_lock = pop_node_lock.next.map(|n| n.ptrs.write());
// SAFETY: locked all along and consecutive nodes
unsafe {
Node::isolate(
&pop_node_lock,
(BackNodeWriteLock::Head(self_lock), next_node_lock),
);
}
drop(pop_node_lock);
// SAFETY: node has been isolated so no references out
// TODO: return a droppable guard with a ptr to the node instead
unsafe { pop_node.wait_free() }
Some(())
}
pub fn clone_into_vec(&self) -> Vec<T>
where
T: Clone,
{
let mut total = Vec::new();
let mut next_node = self.read().start;
while let Some(node) = next_node {
let read = node.ptrs.read();
total.push(node.data.clone());
next_node = read.next;
}
total
}
}
// Can't quite make this work how I want 😭
/// Attempt to safe wrap around a [`NodeHead`] to allow a sound API with [`Drop`] implementation.
///
/// Please see [`create_ll`] and [`del_ll`]
pub struct LinkedList<'ll, T: ?Sized> {
head: NodeHead<'ll, T>,
_pinned: PhantomPinned,
}
impl<'ll, T: ?Sized> LinkedList<'ll, T> {
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// # Safety
///
/// NEVER EVER EVER extend the lifetime of this beyond the end of scope of the linked list
/// itself, it's all good and safe UNTIL it's dropped.
///
/// Allows you to associate the lifetime of this to something else to prevent accidentall
/// over-extending the lifetime.
pub unsafe fn extend_for<'scope>(&self, _: &'scope impl std::any::Any) -> &'ll NodeHead<'ll, T>
where
'scope: 'll,
{
unsafe { transmute(&self.head) }
}
}
impl<T: ?Sized> Default for LinkedList<'_, T> {
#[must_use]
fn default() -> Self {
Self {
head: NodeHead::new(),
_pinned: PhantomPinned,
}
}
}
impl<T: ?Sized> Drop for LinkedList<'_, T> {
fn drop(&mut self) {
// Extend to 'static so the compiler doesn't cry, we know this covers 'll
let myself = unsafe { self.extend_for(&()) };
// And this is `Drop` so there shouldn't be any refs as the end of this function would be
// where their lifetime ('ll) ends
unsafe { myself.manual_drop() }
}
}
// I'm not so sure this is that much bulletproof but behaves sollidly enough
/// Unsafe macro that automatically creates a linked list and an extended reference.
///
/// Also creates a `scope = ()` variable at the same level as `$val` to mimic its scope and prevent
/// accidental expansion of the unsafe lifetime beyond the current scope.
///
/// For example:
///
/// ```
/// use concurrent_linked_list::double::{create_ll, del_ll, LinkedList};
///
/// create_ll!(LinkedList::<String>::new(), ll_val, ll);
/// ll.prepend("test".to_string());
/// del_ll!(ll_val, ll);
/// ```
///
/// But trying to use `ll` after the deletion should fail:
///
/// ```compile_fail
/// use concurrent_linked_list::double::{create_ll, del_ll, LinkedList};
///
/// create_ll!(LinkedList::<String>::new(), ll_val, ll);
/// ll.prepend("test".to_string());
/// del_ll!(ll_val, ll);
/// ll.prepend("test2".to_string());
/// ```
///
/// Or trying to expand `ll` beyond the scope it was defined in, even if not deleted:
///
/// ```compile_fail
/// use concurrent_linked_list::double::{create_ll, del_ll, LinkedList};
///
/// let ll = {
/// create_ll!(LinkedList::<String>::new(), ll_val, ll);
/// ll.prepend("test".to_string());
/// ll
/// }
/// ll.prepend("test2".to_string());
/// ```
pub macro create_ll($rhs:expr, $val:ident, $ref:ident) {
let scope = ();
let $val = $rhs;
let $ref = unsafe { $val.extend_for(&scope) };
}
/// Macro that attempts to run some higene cleanup on [`create_ll`] to avoid accidental use ot the
/// reference further too. Other functions could still extend the lifetime beyond acceptable
/// though.
pub macro del_ll($val:ident, $ref:ident) {
#[allow(unused_variables)]
let $ref = ();
drop($val);
}
#[cfg(test)]
mod tests;

335
src/double/node.rs Normal file
View File

@@ -0,0 +1,335 @@
use std::{
alloc::{Layout, alloc, handle_alloc_error},
ops::Deref,
ptr::{self, metadata},
};
use parking_lot::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard};
use super::NodeHeadInner;
type NodeHead<'ll, T> = RwLock<NodeHeadInner<'ll, T>>;
pub enum NodeBackPtr<'ll, T: ?Sized> {
Head(&'ll NodeHead<'ll, T>),
Node(&'ll Node<'ll, T>),
}
// yes the whole purpose is docs, might add Isolated Guards around nodes here
pub mod topology_safety;
pub struct NodePtrPair<'ll, T: ?Sized> {
pub prev: NodeBackPtr<'ll, T>,
pub next: Option<&'ll Node<'ll, T>>,
}
#[repr(C)]
pub struct Node<'ll, T: ?Sized> {
pub ptrs: RwLock<NodePtrPair<'ll, T>>,
pub data: T,
}
impl<T: ?Sized> Deref for Node<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T: ?Sized> Copy for NodeBackPtr<'_, T> {}
impl<T: ?Sized> Clone for NodeBackPtr<'_, T> {
fn clone(&self) -> Self {
*self
}
}
type WriteAndBackDoublet<'ll, T> = (
BackNodeWriteLock<'ll, T>,
RwLockUpgradableReadGuard<'ll, NodePtrPair<'ll, T>>,
);
type WriteSurroundTriplet<'ll, T> = (
BackNodeWriteLock<'ll, T>,
RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>,
Option<RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>>,
);
type WriteOnlyAroundTriplet<'ll, T> = (
BackNodeWriteLock<'ll, T>,
Option<RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>>,
);
impl<'ll, T> Node<'ll, T> {
/// Creates a new node in the heap, will link to `prev` and `next` but will be isolated, it can
/// be thought of just ita data and the two pointers, having it isolated doesn't guarantee any
/// integration into the linked list.
///
/// As long as this node exists and is not properly integrated into a linked list, it's
/// considered that the `prev` and `next` refs are being held.
#[must_use]
pub fn new(data: T, prev: NodeBackPtr<'ll, T>, next: Option<&'ll Node<'ll, T>>) -> Self {
Self {
ptrs: RwLock::new(NodePtrPair { prev, next }),
data,
}
}
pub fn new_leaked(
data: T,
prev: NodeBackPtr<'ll, T>,
next: Option<&'ll Node<'ll, T>>,
) -> &'ll mut Self {
Box::leak(Box::new(Self::new(data, prev, next)))
}
/// Boxes [`self`]
#[must_use]
pub fn boxed(self) -> Box<Self> {
Box::new(self)
}
}
impl<'ll, T: ?Sized> Node<'ll, T> {
/// # Panics
///
/// On arithmetic overflow
fn layout_for(data: &T) -> (Layout, usize) {
Layout::new::<RwLock<NodePtrPair<'ll, T>>>()
.extend(Layout::for_value(data))
.unwrap()
}
/// [`Self::new_leaked`] from a DST type
pub fn leaked_from_dst(
data: &T,
prev: NodeBackPtr<'ll, T>,
next: Option<&'ll Node<'ll, T>>,
) -> &'ll mut Self
where
T: dst_clone::DstClone,
{
let (layout, data_offset) = Self::layout_for(data);
let ptr = unsafe { alloc(layout) };
if ptr.is_null() {
handle_alloc_error(layout)
}
unsafe {
*(ptr.cast::<RwLock<NodePtrPair<'ll, T>>>()) = RwLock::new(NodePtrPair { prev, next });
data.copy(ptr.add(data_offset));
}
Box::leak(unsafe { Box::from_raw(ptr::from_raw_parts_mut(ptr, metadata(data))) })
}
/// Leaks [`self`] as a [`Box<Self>`]
#[must_use]
pub fn leak(self: Box<Self>) -> &'static mut Self {
Box::leak(self)
}
/// # Safety
///
/// The [`self`] pointer must come from a [`Box`] allocation like [`Self::boxed`] and
/// [`Self::leak`].
pub unsafe fn free(self: *mut Self) {
drop(unsafe { Box::from_raw(self) });
}
/// Frees the current node but waits until the inner [`RwLock`] has no waiters.
///
/// # Safety
///
/// There must be no references left to [`self`]
pub unsafe fn wait_free(&self) {
loop {
if self.ptrs.is_locked() {
drop(self.ptrs.write());
} else {
break;
}
}
let myself = ptr::from_ref(self).cast_mut();
unsafe { myself.free() }
}
pub fn lock_and_back(&'ll self) -> WriteAndBackDoublet<'ll, T> {
let mut self_read = self.ptrs.upgradable_read();
// "soft" back lock
match self_read.prev.try_write() {
Some(prev_write) => (prev_write, self_read),
None => {
// already locked, no worries but we have to clear for the lock before use its
// possible way forward, we can also wait until `prev` is accesible either case
// (the task holding it could modify us or if it doesn't we need to lock that same
// node)
loop {
let old_prev = self_read.prev;
let old_prev_write =
RwLockUpgradableReadGuard::unlocked_fair(&mut self_read, move || {
old_prev.write()
});
// we reaquire ourselves after `unlocked_fair` so `self_read` couls have
// changed
if NodeBackPtr::ptr_eq(&self_read.prev, &old_prev) {
break (old_prev_write, self_read);
}
}
}
}
}
/// Attempts to get a write lock on the surrouding nodes
pub fn write_surround(&'ll self) -> WriteSurroundTriplet<'ll, T> {
// backward blocking must be try
let (prev_write, self_read) = self.lock_and_back();
// Now `prev` is write locked and we can block forwards
let self_write = RwLockUpgradableReadGuard::upgrade(self_read);
let next_write = self_write.next.map(|n| n.ptrs.write());
(prev_write, self_write, next_write)
}
/// # Safety
///
/// The passed locks must also be consecutive for this to respect topology.
///
/// This node will remain isolated. See [`topology_safety`].
pub unsafe fn isolate(self_read: &NodePtrPair<'ll, T>, locks: WriteOnlyAroundTriplet<'ll, T>) {
let (mut back_write, next_write) = locks;
back_write.set_next(self_read.next);
if let Some(mut next_write) = next_write {
next_write.prev = self_read.prev;
}
}
/// # Safety
///
/// The passed locks must be surrounding for this to respect topology.
///
/// This taken node ([`self`]) must be an isolated node. See [`topology_safety`].
pub unsafe fn integrate(&'ll self, locks: WriteOnlyAroundTriplet<'ll, T>) {
let (mut back_write, next_write) = locks;
back_write.set_next(Some(self));
if let Some(mut next_write) = next_write {
next_write.prev = NodeBackPtr::new_node(self);
}
}
/// # Safety
///
/// [`self`] must be integrated into the linked list. See [`topology_safety`].
///
/// Assumes there's no other external references into this node when called as it will be
/// deallocated. This will also wait for all waiters into the node lock to finish before really
/// freeing it, this includes concurrent calls to this same node.
pub unsafe fn remove(&'ll self) {
let surround_locks = self.write_surround();
let (prev, myself, next) = surround_locks;
let around_locks = (prev, next);
// Should be integrated and the surrounding locks are consesutive and locked all along
unsafe { Self::isolate(&myself, around_locks) }
// lazy-wait for no readers remaining
drop(myself);
// SAFETY: The node is isolated so good to be freed.
unsafe { self.wait_free() }
}
}
/// Generic Write Lock of a [`NodeBackPtr`]
pub enum BackNodeWriteLock<'ll, T: ?Sized> {
Head(RwLockWriteGuard<'ll, NodeHeadInner<'ll, T>>),
Node(RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>),
}
/// Generic Read Lock of a [`NodeBackPtr`]
pub enum BackNodeReadLock<'ll, T: ?Sized> {
Head(RwLockReadGuard<'ll, NodeHeadInner<'ll, T>>),
Node(RwLockReadGuard<'ll, NodePtrPair<'ll, T>>),
}
#[allow(clippy::enum_glob_use)]
impl<'ll, T: ?Sized> NodeBackPtr<'ll, T> {
#[must_use]
pub fn ptr_eq(&self, other: &Self) -> bool {
use NodeBackPtr::*;
match (self, other) {
(Head(h1), Head(h2)) => ptr::eq(h1, h2),
(Node(n1), Node(n2)) => ptr::eq(n1, n2),
_ => false,
}
}
#[must_use]
pub fn new_node(node: &'ll Node<'ll, T>) -> Self {
Self::Node(node)
}
#[must_use]
pub fn new_head(head: &'ll NodeHead<'ll, T>) -> Self {
Self::Head(head)
}
/// Analogous to [`RwLock::write`]
#[must_use]
pub fn write(&self) -> BackNodeWriteLock<'ll, T> {
use BackNodeWriteLock as WL;
use NodeBackPtr::*;
match self {
Head(h) => WL::Head(h.write()),
Node(n) => WL::Node(n.ptrs.write()),
}
}
/// Analogous to [`RwLock::read`]
#[must_use]
pub fn read(&self) -> BackNodeReadLock<'ll, T> {
use BackNodeReadLock as RL;
use NodeBackPtr::*;
match self {
Head(h) => RL::Head(h.read()),
Node(n) => RL::Node(n.ptrs.read()),
}
}
/// Analogous to [`RwLock::try_write`]
#[must_use]
pub fn try_write(&self) -> Option<BackNodeWriteLock<'ll, T>> {
use BackNodeWriteLock as WL;
use NodeBackPtr::*;
Some(match self {
Head(h) => WL::Head(h.try_write()?),
Node(n) => WL::Node(n.ptrs.try_write()?),
})
}
/// Analogous to [`RwLock::try_read`]
#[must_use]
pub fn try_read(&self) -> Option<BackNodeReadLock<'ll, T>> {
use BackNodeReadLock as RL;
use NodeBackPtr::*;
Some(match self {
Head(h) => RL::Head(h.try_read()?),
Node(n) => RL::Node(n.ptrs.try_read()?),
})
}
}
impl<'ll, T: ?Sized> BackNodeWriteLock<'ll, T> {
#[allow(clippy::enum_glob_use)]
fn set_next(&mut self, next: Option<&'ll Node<'ll, T>>) {
use BackNodeWriteLock::*;
match self {
Head(h) => h.start = next,
Node(n) => n.next = next,
}
}
}
impl<T> Node<'_, T> {}

View File

@@ -0,0 +1,74 @@
//! The linked list is supposed to have bilinear continuity (can be iterated forwards and
//! backwards for each node and it will be linear).
//!
//! "Topology Safety" is the term I made for functions that if used incorrectly can break this
//! topology.
//!
//! # Side Effects
//!
//! Side effects to breaking the topology could be:
//! * Leaving unrelated nodes out of the linearity of the node.
//! * Creating loops in the linked list.
//!
//! I'm not so sure of these two but it depends on how hard you break the topology.
//!
//! It's completely fine although not recommended to unsafely manipulate the topology of the
//! linked list if you know what you are doing.
//!
//! However, [`LinkedList`]'s [`Drop`] implementation assumes the list can be iterated forward
//! and will attempt to drop each element it finds. If you broke the topology where this is
//! undoable you might want to use [`ManuallyDrop`] on it.
//!
//! # Safety
//!
//! For these functions to be topology safe, if they take locks to nodes, you must make sure
//! any related nodes and adyacent ones (as they hold pointers into that section) are locked
//! for all the operation's duration.
//!
//! If any node to be integrated had the adyacent nodes locked since its creation, it would be
//! safe to integrate in.
//!
//! # Examples
//!
//! Assume the following [`LinkedList`]:
//! ```txt
//! A -> B
//! ```
//!
//! If you then create `C` and `D` isolated after `A`. The [`LinkedList`] would remain the same
//! (as they are isolated) but `C` and `D` would have broken topology views of the list.
//!
//! If you then were to integrate `C` and create another `E` isolated after the newly
//! integrated `C`.
//! ```txt
//! A -> C -> B
//! // But D and E would thing
//! D: A -> D -> B
//! E: A -> C -> E -> B
//! ```
//!
//! If you now integrate `D`, `C` would be isolated indirectly without it knowing. But `E` is
//! also unaware of this and thinks `C` is integrated. So finally integrating `E` would lead
//! to:
//! ```txt
//! ╭─> A -> D -> B <╮
//! │ C <- E <──╯ │
//! ╰───┴────────────╯
//! ```
//!
//! No reading the chain from different points of view leads to:
//! ```txt
//! [A] -> D -> B
//! A -> C -> E -> [B]
//! A -> [C] -> E -> B
//! A -> [D] -> B
//! A -> C -> [E] -> B
//! ```
//!
//! This is more a graph than a linked list.
#[allow(unused_imports)]
use {
super::{super::LinkedList, NodeHeadInner},
std::mem::ManuallyDrop,
};

95
src/double/tests.rs Normal file
View File

@@ -0,0 +1,95 @@
#![allow(clippy::items_after_statements)]
use std::{
fmt::Debug,
ptr,
sync::{
Barrier,
atomic::{AtomicUsize, Ordering},
},
thread,
};
use super::*;
#[test]
fn concurrency_and_scoped_drop() {
static DROP_C: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone)]
struct StringWithDrop(String);
impl From<String> for StringWithDrop {
fn from(value: String) -> Self {
Self(value)
}
}
impl Drop for StringWithDrop {
fn drop(&mut self) {
DROP_C.fetch_add(1, Ordering::Relaxed);
}
}
impl Debug for StringWithDrop {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
<String as Debug>::fmt(&self.0, f)
}
}
const CREATE1: usize = 100;
const CREATE2: usize = 100;
const NOT_POP: usize = 20;
{
create_ll!(LinkedList::<StringWithDrop>::new(), ll_val, ll);
let barrier = Barrier::new(3);
thread::scope(|s| {
s.spawn(|| {
barrier.wait();
for n in 0..CREATE1 {
ll.prepend(format!("A {n}").into());
}
});
s.spawn(|| {
barrier.wait();
for n in 0..CREATE2 {
ll.prepend(format!("B {n}").into());
}
});
s.spawn(|| {
barrier.wait();
for _ in 0..(CREATE1 + CREATE2 - NOT_POP) {
unsafe {
while ll.pop().is_none() {
std::thread::yield_now();
}
}
}
});
});
assert_eq!(DROP_C.load(Ordering::Relaxed), CREATE1 + CREATE2 - NOT_POP);
del_ll!(ll_val, ll);
assert_eq!(DROP_C.load(Ordering::Relaxed), CREATE1 + CREATE2);
}
}
#[test]
fn unsized_store() {
create_ll!(LinkedList::<str>::new(), ll_val, ll);
let inserted = ll.prepend_dst("test");
let first_node = ll.0.read().start.expect("first node to be some");
assert!(ptr::addr_eq(inserted, first_node));
assert_eq!(&**inserted, "test");
del_ll!(ll_val, ll);
}

View File

@@ -1,305 +1,15 @@
#![feature(arbitrary_self_types)]
#![doc = include_str!("../README.md")]
#![feature(
arbitrary_self_types,
arbitrary_self_types_pointers,
decl_macro,
ptr_metadata
)]
#![warn(clippy::pedantic)]
use std::{
alloc::{Layout, dealloc},
hint::unreachable_unchecked,
mem::MaybeUninit,
ops::Deref,
};
#[cfg(doc)]
use std::{pin::Pin, sync::Arc};
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
pub mod double;
mod docs {
//! Rules for soundness of modifications.
//! To modify the pointer that goes to a node a write lock must be held to it: e.g:
//! - Bidirectional consistency is not guaranteed. If you "walk" a list you must only do so in
//! the same direction, that continuity will be guaranteed.
//! - If N node is to be removed, write lock it. Update adyacent pointers first and keep them
//! locked until the N node is freed, then release the adyacenty locks properly.
//! - The previous prevents deadlocks because by having a write lock of the previous node
//! before locking itself it guaratees that the previous lock can't get read access to
//! itself to get the ptr to the node of ourselves and update our prev ptr.
//! - For every operation only a single item in the list must be write blocked to prevent
//! deadlocks.
}
pub type NodeHead<T> = LinkedList<T>;
pub enum NodeDiscr<T: 'static> {
Head(RwLock<NodeHead<T>>),
Node(Node<T>),
}
impl<T> Default for NodeDiscr<T> {
fn default() -> Self {
Self::Head(RwLock::new(LinkedList::default()))
}
}
impl<T: 'static> NodeDiscr<T> {
#[must_use]
pub fn new(value: LinkedList<T>) -> Self {
Self::Head(RwLock::new(value))
}
/// # Safety
/// UB if [`self`] is not [`Self::Head`].
pub unsafe fn as_head_unchecked(&self) -> &RwLock<NodeHead<T>> {
let Self::Head(head) = self else {
unsafe { unreachable_unchecked() }
};
head
}
}
#[allow(dead_code)] // We dont even read variants, just hold whatever lock
enum NodeDiscrWriteLocks<'a, T: 'static> {
Head(RwLockWriteGuard<'a, NodeHead<T>>),
Node(RwLockWriteGuard<'a, NodeInner<T>>),
}
#[repr(transparent)]
pub struct Node<T: 'static>(RwLock<NodeInner<T>>);
/// It's safe to assume `next` and `prev` are initialized. But any function which would break this
/// assumption should be considered unsafe.
struct NodeInner<T: 'static> {
next: MaybeUninit<Option<&'static Node<T>>>,
prev: MaybeUninit<&'static NodeDiscr<T>>,
data: T,
}
impl<T: 'static> Deref for NodeInner<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> NodeInner<T> {
fn prev(&self) -> &'static NodeDiscr<T> {
unsafe { self.prev.assume_init() }
}
/// Could also leak memory
///
/// # Safety
/// The `prev` self ptr is valid as long as the write lock is held, as soon as it's dropped it
/// becomes invalid.
fn update_prev(&self) -> NodeDiscrWriteLocks<'static, T> {
match self.prev() {
NodeDiscr::Head(h) => {
let mut lock = h.write();
lock.start = unsafe { self.next.assume_init() };
NodeDiscrWriteLocks::Head(lock)
}
NodeDiscr::Node(n) => {
let mut lock = n.0.write();
lock.next = self.next;
NodeDiscrWriteLocks::Node(lock)
}
}
}
fn next(&self) -> Option<&'static Node<T>> {
unsafe { self.next.assume_init() }
}
/// Could also leak memory
///
/// # Safety
/// The `next`self ptr is valid as long as the write lock is held, as soon as it's dropped it
/// becomes invalid.
fn update_next(&self) -> Option<RwLockWriteGuard<'static, NodeInner<T>>> {
if let Some(next) = self.next() {
let mut lock = next.0.write();
lock.prev = self.prev;
Some(lock)
} else {
None
}
}
}
impl<T> Node<T> {
/// # Safety
///
/// Node is uninitialized.
///
/// Will leak if not handled properly.
#[must_use]
#[allow(dead_code)]
unsafe fn alloc_new(data: T) -> &'static mut Self {
Box::leak(Box::new(Node(RwLock::new(NodeInner {
next: MaybeUninit::uninit(),
prev: MaybeUninit::uninit(),
data,
}))))
}
/// # Safety
///
/// Will leak if not handled properly.
#[must_use]
#[allow(clippy::mut_from_ref)]
fn alloc_new_with_ptrs(
data: T,
next: Option<&'static Node<T>>,
prev: &'static NodeDiscr<T>,
) -> &'static mut Self {
Box::leak(Box::new(Node(RwLock::new(NodeInner {
next: MaybeUninit::new(next),
prev: MaybeUninit::new(prev),
data,
}))))
}
/// Isolates the node from surrounding ones and returns a `ReadGuard` to the dangling node that
/// would leak unless freed or managed. This guard could still have readers or writers
/// awaiting. Adyacent write locks are also sent back to prevent their modification since the
/// isolation and make the pointers of self still valid.
///
/// # Safety
///
/// Its unsafe to access `next` and `prev` ptr's after the adge locks are dropped.
#[allow(clippy::type_complexity)]
fn isolate(
&'_ self,
) -> (
RwLockReadGuard<'_, NodeInner<T>>,
(
NodeDiscrWriteLocks<'static, T>,
Option<RwLockWriteGuard<'static, NodeInner<T>>>,
),
) {
let node = self.0.read();
let edge_locks = (node.update_prev(), node.update_next());
(node, edge_locks)
}
/// # Safety
///
/// Will remove this pointer from memory, there must be no external pointers to this as they
/// will point to invalid data and UB.
///
/// Will busy wait for no read/write locks to this slot and assume it's been completely
/// isolated then. Any access attempts while it's being freed (after waiting for locks) can
/// lead to weird UB.
pub unsafe fn remove(&self) {
unsafe {
let (lock, edge_locks) = self.isolate();
// Drop the allocated data, edge ptrs remain valid meanwhile
drop(lock); // let other readers/writers finish with this item
let data = loop {
if self.0.is_locked() {
std::thread::yield_now();
} else {
break self.0.data_ptr();
}
};
std::ptr::drop_in_place(data);
drop(edge_locks); // edge ptrs become invalid form now on
// Now that we are the only ref to ourselves its ok to take outselves as mutable
let myself = std::ptr::from_ref(self).cast_mut();
// And free this
dealloct(myself);
}
}
}
unsafe fn dealloct<T>(data: *mut T) {
unsafe {
dealloc(data.cast(), Layout::new::<T>());
}
}
pub struct LinkedList<T: 'static> {
start: Option<&'static Node<T>>,
}
impl<T: 'static> Default for LinkedList<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> LinkedList<T> {
#[must_use]
pub fn new() -> Self {
Self { start: None }
}
/// # Safety
///
/// `head_ref` MUST be the [`NodeDiscr`] wrapped around the [`RwLock`] that `self` is locked
/// by. This is asserted in debug mode.
unsafe fn prepend(
mut self: RwLockWriteGuard<'_, Self>,
head_ref: &'static NodeDiscr<T>,
data: T,
) {
#[cfg(debug_assertions)]
{
let NodeDiscr::Head(ll_head) = head_ref else {
panic!("passed head_ref doesnt match lock");
};
debug_assert!(std::ptr::eq(RwLockWriteGuard::rwlock(&self), ll_head));
}
let first_node = self.start;
let new_node = Node::alloc_new_with_ptrs(data, first_node, head_ref);
self.start = Some(new_node);
}
}
pub struct LinkedListWrapper<T: 'static> {
// Safety: MUST be of the `Head` variant at all moments
inner: NodeDiscr<T>,
}
impl<T: 'static> Default for LinkedListWrapper<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: 'static> LinkedListWrapper<T> {
#[must_use]
pub fn new() -> Self {
Self {
inner: NodeDiscr::default(),
}
}
pub fn as_head(&'static self) -> &'static RwLock<LinkedList<T>> {
unsafe { self.inner.as_head_unchecked() }
}
pub fn prepend(&'static self, data: T) {
let lock = self.as_head().write();
unsafe {
LinkedList::prepend(lock, &self.inner, data);
}
}
pub fn clone_into_vec(&'static self) -> Vec<T>
where
T: Clone,
{
let mut total = Vec::new();
let mut next_node = self.as_head().read().start;
while let Some(node) = next_node {
let read = node.0.read();
total.push(read.data.clone());
next_node = read.next();
}
total
}
}
#[cfg(test)]
mod tests;
pub use double::LinkedList as DoublyLinkedList;

View File

@@ -1,15 +0,0 @@
use super::*;
#[test]
fn it_works() {
let ll = Box::leak(Box::new(LinkedListWrapper::new()));
println!("{:#?}", ll.clone_into_vec());
ll.prepend("test");
ll.prepend("another test");
println!("{:#?}", ll.clone_into_vec());
assert_eq!(4, 4);
}