init version

This commit is contained in:
2025-07-17 23:46:07 +02:00
commit 08884ebffb
6 changed files with 645 additions and 0 deletions

305
src/lib.rs Normal file
View File

@@ -0,0 +1,305 @@
#![feature(arbitrary_self_types)]
#![warn(clippy::pedantic)]
use std::{
alloc::{Layout, dealloc},
hint::unreachable_unchecked,
mem::MaybeUninit,
ops::Deref,
};
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
mod docs {
//! Rules for soundness of modifications.
//! To modify the pointer that goes to a node a write lock must be held to it: e.g:
//! - Bidirectional consistency is not guaranteed. If you "walk" a list you must only do so in
//! the same direction, that continuity will be guaranteed.
//! - If N node is to be removed, write lock it. Update adyacent pointers first and keep them
//! locked until the N node is freed, then release the adyacenty locks properly.
//! - The previous prevents deadlocks because by having a write lock of the previous node
//! before locking itself it guaratees that the previous lock can't get read access to
//! itself to get the ptr to the node of ourselves and update our prev ptr.
//! - For every operation only a single item in the list must be write blocked to prevent
//! deadlocks.
}
pub type NodeHead<T> = LinkedList<T>;
pub enum NodeDiscr<T: 'static> {
Head(RwLock<NodeHead<T>>),
Node(Node<T>),
}
impl<T> Default for NodeDiscr<T> {
fn default() -> Self {
Self::Head(RwLock::new(LinkedList::default()))
}
}
impl<T: 'static> NodeDiscr<T> {
#[must_use]
pub fn new(value: LinkedList<T>) -> Self {
Self::Head(RwLock::new(value))
}
/// # Safety
/// UB if [`self`] is not [`Self::Head`].
pub unsafe fn as_head_unchecked(&self) -> &RwLock<NodeHead<T>> {
let Self::Head(head) = self else {
unsafe { unreachable_unchecked() }
};
head
}
}
#[allow(dead_code)] // We dont even read variants, just hold whatever lock
enum NodeDiscrWriteLocks<'a, T: 'static> {
Head(RwLockWriteGuard<'a, NodeHead<T>>),
Node(RwLockWriteGuard<'a, NodeInner<T>>),
}
#[repr(transparent)]
pub struct Node<T: 'static>(RwLock<NodeInner<T>>);
/// It's safe to assume `next` and `prev` are initialized. But any function which would break this
/// assumption should be considered unsafe.
struct NodeInner<T: 'static> {
next: MaybeUninit<Option<&'static Node<T>>>,
prev: MaybeUninit<&'static NodeDiscr<T>>,
data: T,
}
impl<T: 'static> Deref for NodeInner<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> NodeInner<T> {
fn prev(&self) -> &'static NodeDiscr<T> {
unsafe { self.prev.assume_init() }
}
/// Could also leak memory
///
/// # Safety
/// The `prev` self ptr is valid as long as the write lock is held, as soon as it's dropped it
/// becomes invalid.
fn update_prev(&self) -> NodeDiscrWriteLocks<'static, T> {
match self.prev() {
NodeDiscr::Head(h) => {
let mut lock = h.write();
lock.start = unsafe { self.next.assume_init() };
NodeDiscrWriteLocks::Head(lock)
}
NodeDiscr::Node(n) => {
let mut lock = n.0.write();
lock.next = self.next;
NodeDiscrWriteLocks::Node(lock)
}
}
}
fn next(&self) -> Option<&'static Node<T>> {
unsafe { self.next.assume_init() }
}
/// Could also leak memory
///
/// # Safety
/// The `next`self ptr is valid as long as the write lock is held, as soon as it's dropped it
/// becomes invalid.
fn update_next(&self) -> Option<RwLockWriteGuard<'static, NodeInner<T>>> {
if let Some(next) = self.next() {
let mut lock = next.0.write();
lock.prev = self.prev;
Some(lock)
} else {
None
}
}
}
impl<T> Node<T> {
/// # Safety
///
/// Node is uninitialized.
///
/// Will leak if not handled properly.
#[must_use]
#[allow(dead_code)]
unsafe fn alloc_new(data: T) -> &'static mut Self {
Box::leak(Box::new(Node(RwLock::new(NodeInner {
next: MaybeUninit::uninit(),
prev: MaybeUninit::uninit(),
data,
}))))
}
/// # Safety
///
/// Will leak if not handled properly.
#[must_use]
#[allow(clippy::mut_from_ref)]
fn alloc_new_with_ptrs(
data: T,
next: Option<&'static Node<T>>,
prev: &'static NodeDiscr<T>,
) -> &'static mut Self {
Box::leak(Box::new(Node(RwLock::new(NodeInner {
next: MaybeUninit::new(next),
prev: MaybeUninit::new(prev),
data,
}))))
}
/// Isolates the node from surrounding ones and returns a `ReadGuard` to the dangling node that
/// would leak unless freed or managed. This guard could still have readers or writers
/// awaiting. Adyacent write locks are also sent back to prevent their modification since the
/// isolation and make the pointers of self still valid.
///
/// # Safety
///
/// Its unsafe to access `next` and `prev` ptr's after the adge locks are dropped.
#[allow(clippy::type_complexity)]
fn isolate(
&'_ self,
) -> (
RwLockReadGuard<'_, NodeInner<T>>,
(
NodeDiscrWriteLocks<'static, T>,
Option<RwLockWriteGuard<'static, NodeInner<T>>>,
),
) {
let node = self.0.read();
let edge_locks = (node.update_prev(), node.update_next());
(node, edge_locks)
}
/// # Safety
///
/// Will remove this pointer from memory, there must be no external pointers to this as they
/// will point to invalid data and UB.
///
/// Will busy wait for no read/write locks to this slot and assume it's been completely
/// isolated then. Any access attempts while it's being freed (after waiting for locks) can
/// lead to weird UB.
pub unsafe fn remove(&self) {
unsafe {
let (lock, edge_locks) = self.isolate();
// Drop the allocated data, edge ptrs remain valid meanwhile
drop(lock); // let other readers/writers finish with this item
let data = loop {
if self.0.is_locked() {
std::thread::yield_now();
} else {
break self.0.data_ptr();
}
};
std::ptr::drop_in_place(data);
drop(edge_locks); // edge ptrs become invalid form now on
// Now that we are the only ref to ourselves its ok to take outselves as mutable
let myself = std::ptr::from_ref(self).cast_mut();
// And free this
dealloct(myself);
}
}
}
unsafe fn dealloct<T>(data: *mut T) {
unsafe {
dealloc(data.cast(), Layout::new::<T>());
}
}
pub struct LinkedList<T: 'static> {
start: Option<&'static Node<T>>,
}
impl<T: 'static> Default for LinkedList<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> LinkedList<T> {
#[must_use]
pub fn new() -> Self {
Self { start: None }
}
/// # Safety
///
/// `head_ref` MUST be the [`NodeDiscr`] wrapped around the [`RwLock`] that `self` is locked
/// by. This is asserted in debug mode.
unsafe fn prepend(
mut self: RwLockWriteGuard<'_, Self>,
head_ref: &'static NodeDiscr<T>,
data: T,
) {
#[cfg(debug_assertions)]
{
let NodeDiscr::Head(ll_head) = head_ref else {
panic!("passed head_ref doesnt match lock");
};
debug_assert!(std::ptr::eq(RwLockWriteGuard::rwlock(&self), ll_head));
}
let first_node = self.start;
let new_node = Node::alloc_new_with_ptrs(data, first_node, head_ref);
self.start = Some(new_node);
}
}
pub struct LinkedListWrapper<T: 'static> {
// Safety: MUST be of the `Head` variant at all moments
inner: NodeDiscr<T>,
}
impl<T: 'static> Default for LinkedListWrapper<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: 'static> LinkedListWrapper<T> {
#[must_use]
pub fn new() -> Self {
Self {
inner: NodeDiscr::default(),
}
}
pub fn as_head(&'static self) -> &'static RwLock<LinkedList<T>> {
unsafe { self.inner.as_head_unchecked() }
}
pub fn prepend(&'static self, data: T) {
let lock = self.as_head().write();
unsafe {
LinkedList::prepend(lock, &self.inner, data);
}
}
pub fn clone_into_vec(&'static self) -> Vec<T>
where
T: Clone,
{
let mut total = Vec::new();
let mut next_node = self.as_head().read().start;
while let Some(node) = next_node {
let read = node.0.read();
total.push(read.data.clone());
next_node = read.next();
}
total
}
}
#[cfg(test)]
mod tests;

15
src/tests.rs Normal file
View File

@@ -0,0 +1,15 @@
use super::*;
#[test]
fn it_works() {
let ll = Box::leak(Box::new(LinkedListWrapper::new()));
println!("{:#?}", ll.clone_into_vec());
ll.prepend("test");
ll.prepend("another test");
println!("{:#?}", ll.clone_into_vec());
assert_eq!(4, 4);
}