Compare commits

...

3 Commits

Author SHA1 Message Date
64f503633d feat: unsafely support ?Sized 2025-07-21 20:34:09 +02:00
1b2c8c01a0 chore: canonical copy impl works 2025-07-20 01:38:44 +02:00
af5e5ff19e safety: what I think is a safe macro for linked list creation
+ some docs changes and cleanup
2025-07-20 01:36:48 +02:00
8 changed files with 307 additions and 133 deletions

10
Cargo.lock generated
View File

@@ -24,9 +24,15 @@ checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
name = "concurrent-linked-list"
version = "0.1.0"
dependencies = [
"dst-clone",
"parking_lot",
]
[[package]]
name = "dst-clone"
version = "0.1.0"
source = "git+https://git.javalsai.tuxcord.net/tuxcord/dst-clone#6a32bfa59455adafde07bafd3bc9f2182cd5127b"
[[package]]
name = "libc"
version = "0.2.174"
@@ -68,9 +74,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
version = "0.5.13"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6"
checksum = "7e8af0dde094006011e6a740d4879319439489813bd0bcdc7d821beaeeff48ec"
dependencies = [
"bitflags",
]

View File

@@ -4,4 +4,5 @@ version = "0.1.0"
edition = "2024"
[dependencies]
dst-clone = { git = "https://git.javalsai.tuxcord.net/tuxcord/dst-clone", version = "0" }
parking_lot = { version = "0", default-features = false }

View File

@@ -1,9 +1,14 @@
A [`LinkedList`] implementation avoiding the use of [`Arc`]s in favor of unsafe manual removal of nodes when the caller knows all possible references are left unused.
A **linked list** implementation avoiding the use of [`Arc`]s in favor of unsafe manual removal of nodes when the caller knows all possible references are left unused.
The point of this crate is to offer [`Pin`] guarantees on the references into the list while allowing it to be modified. The implementation of all this doesn't require mutable access to the linked list itself so as a side effect it's possible to use the list in concurrent manners.
This means that it will try as smartly as possible to allow concurrent modifications to it as long as the nodes affected are unrelated.
# Types
There could be different types of linked list implementations in the future, like safer ones with [`Arc`], single-threaded ones, etc. But right now there's only:
* [`DoublyLinkedList`]: [`crate::double`] doubly linked list only in the heap with manual unsafe removal of items in it.
---
`cargo doc` is supported and is the main documentation of the library. But there's no official hosting of the document files.

View File

@@ -2,11 +2,11 @@
//!
//! Doubly as each node points to the next and previous node.
use std::{mem::transmute, ops::Deref, pin::Pin};
use std::{marker::PhantomPinned, mem::transmute, ops::Deref, pin::Pin};
use parking_lot::RwLock;
use crate::double::node::{BackNodeWriteLock, Node, NodeBackPtr};
use crate::double::node::{BackNodeWriteLock, Node};
pub mod node;
@@ -16,38 +16,26 @@ pub mod node;
// case the task holding the left lock is moving rightwards.
// Rightwards locking can be blocking.
pub struct NodeHeadInner<'ll, T> {
pub struct NodeHeadInner<'ll, T: ?Sized> {
start: Option<&'ll node::Node<'ll, T>>,
}
impl<T> Default for NodeHeadInner<'_, T> {
impl<T: ?Sized> Default for NodeHeadInner<'_, T> {
fn default() -> Self {
Self { start: None }
}
}
pub struct LinkedList<'ll, T>(RwLock<NodeHeadInner<'ll, T>>);
pub struct NodeHead<'ll, T: ?Sized>(RwLock<NodeHeadInner<'ll, T>>);
impl<'ll, T> Drop for LinkedList<'ll, T> {
fn drop(&mut self) {
// SAFETY: this is the drop impl so we can guarantee the reference is valid for the
// lifetime of the struct itself and external references would be invalidated right after
// the [`Drop`] of it (this fn). I don't think there's a way to differenciate the lifetimes
// by a drop implementation so this would be safe as no external references lifetimes would
// be valid after drop finishes
let myself = unsafe { transmute::<&mut Self, &'ll mut Self>(self) };
while unsafe { myself.pop().is_some() } {}
}
}
impl<T> Default for LinkedList<'_, T> {
impl<T: ?Sized> Default for NodeHead<'_, T> {
#[must_use]
fn default() -> Self {
Self(RwLock::new(NodeHeadInner::default()))
}
}
impl<'ll, T> Deref for LinkedList<'ll, T> {
impl<'ll, T: ?Sized> Deref for NodeHead<'ll, T> {
type Target = RwLock<NodeHeadInner<'ll, T>>;
fn deref(&self) -> &Self::Target {
@@ -55,7 +43,7 @@ impl<'ll, T> Deref for LinkedList<'ll, T> {
}
}
impl<'ll, T> LinkedList<'ll, T> {
impl<'ll, T: ?Sized> NodeHead<'ll, T> {
#[must_use]
pub fn new() -> Self {
Self::default()
@@ -85,13 +73,42 @@ impl<'ll, T> LinkedList<'ll, T> {
unsafe { transmute::<&Self, &'ll Self>(myself.get_ref()) }
}
pub fn prepend(&'ll self, data: T) {
/// # Safety
///
/// Must be at the end at the end of its scope, when there's no references into it left.
pub unsafe fn manual_drop(&'ll self) {
// SAFETY: this is the drop impl so we can guarantee the reference is valid for the
// lifetime of the struct itself and external references would be invalidated right after
// the [`Drop`] of it (this fn). I don't think there's a way to differenciate the lifetimes
// by a drop implementation so this would be safe as no external references lifetimes would
// be valid after drop finishes
while unsafe { self.pop().is_some() } {}
}
pub fn prepend(&'ll self, data: T) -> &'ll Node<'ll, T>
where
T: Sized,
{
let self_lock = self.write();
let next = self_lock.start;
let next_lock = next.map(|n| n.write());
let new_node = Node::new_leaked(data, NodeBackPtr::new_head(self), next);
let next_lock = next.map(|n| n.ptrs.write());
let new_node = Node::new_leaked(data, node::NodeBackPtr::new_head(self), next);
// SAFETY: ptrs are surrounding and they've been locked all along
unsafe { new_node.integrate((BackNodeWriteLock::Head(self_lock), next_lock)) };
new_node
}
pub fn prepend_dst(&'ll self, data: &T) -> &'ll Node<'ll, T>
where
T: dst_clone::DstClone,
{
let self_lock = self.write();
let next = self_lock.start;
let next_lock = next.map(|n| n.ptrs.write());
let new_node = Node::leaked_from_dst(data, node::NodeBackPtr::new_head(self), next);
// SAFETY: ptrs are surrounding and they've been locked all along
unsafe { new_node.integrate((BackNodeWriteLock::Head(self_lock), next_lock)) };
new_node
}
/// Returns [`None`] if there's no next node.
@@ -102,8 +119,8 @@ impl<'ll, T> LinkedList<'ll, T> {
pub unsafe fn pop(&'ll self) -> Option<()> {
let self_lock = self.write();
let pop_node = self_lock.start?;
let pop_node_lock = pop_node.write();
let next_node_lock = pop_node_lock.next.map(|n| n.write());
let pop_node_lock = pop_node.ptrs.write();
let next_node_lock = pop_node_lock.next.map(|n| n.ptrs.write());
// SAFETY: locked all along and consecutive nodes
unsafe {
@@ -115,6 +132,7 @@ impl<'ll, T> LinkedList<'ll, T> {
drop(pop_node_lock);
// SAFETY: node has been isolated so no references out
// TODO: return a droppable guard with a ptr to the node instead
unsafe { pop_node.wait_free() }
Some(())
@@ -127,13 +145,119 @@ impl<'ll, T> LinkedList<'ll, T> {
let mut total = Vec::new();
let mut next_node = self.read().start;
while let Some(node) = next_node {
let read = node.read();
total.push(read.data.clone());
let read = node.ptrs.read();
total.push(node.data.clone());
next_node = read.next;
}
total
}
}
// Can't quite make this work how I want 😭
/// Attempt to safe wrap around a [`NodeHead`] to allow a sound API with [`Drop`] implementation.
///
/// Please see [`create_ll`] and [`del_ll`]
pub struct LinkedList<'ll, T: ?Sized> {
head: NodeHead<'ll, T>,
_pinned: PhantomPinned,
}
impl<'ll, T: ?Sized> LinkedList<'ll, T> {
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// # Safety
///
/// NEVER EVER EVER extend the lifetime of this beyond the end of scope of the linked list
/// itself, it's all good and safe UNTIL it's dropped.
///
/// Allows you to associate the lifetime of this to something else to prevent accidentall
/// over-extending the lifetime.
pub unsafe fn extend_for<'scope>(&self, _: &'scope impl std::any::Any) -> &'ll NodeHead<'ll, T>
where
'scope: 'll,
{
unsafe { transmute(&self.head) }
}
}
impl<T: ?Sized> Default for LinkedList<'_, T> {
#[must_use]
fn default() -> Self {
Self {
head: NodeHead::new(),
_pinned: PhantomPinned,
}
}
}
impl<T: ?Sized> Drop for LinkedList<'_, T> {
fn drop(&mut self) {
// Extend to 'static so the compiler doesn't cry, we know this covers 'll
let myself = unsafe { self.extend_for(&()) };
// And this is `Drop` so there shouldn't be any refs as the end of this function would be
// where their lifetime ('ll) ends
unsafe { myself.manual_drop() }
}
}
// I'm not so sure this is that much bulletproof but behaves sollidly enough
/// Unsafe macro that automatically creates a linked list and an extended reference.
///
/// Also creates a `scope = ()` variable at the same level as `$val` to mimic its scope and prevent
/// accidental expansion of the unsafe lifetime beyond the current scope.
///
/// For example:
///
/// ```
/// use concurrent_linked_list::double::{create_ll, del_ll, LinkedList};
///
/// create_ll!(LinkedList::<String>::new(), ll_val, ll);
/// ll.prepend("test".to_string());
/// del_ll!(ll_val, ll);
/// ```
///
/// But trying to use `ll` after the deletion should fail:
///
/// ```compile_fail
/// use concurrent_linked_list::double::{create_ll, del_ll, LinkedList};
///
/// create_ll!(LinkedList::<String>::new(), ll_val, ll);
/// ll.prepend("test".to_string());
/// del_ll!(ll_val, ll);
/// ll.prepend("test2".to_string());
/// ```
///
/// Or trying to expand `ll` beyond the scope it was defined in, even if not deleted:
///
/// ```compile_fail
/// use concurrent_linked_list::double::{create_ll, del_ll, LinkedList};
///
/// let ll = {
/// create_ll!(LinkedList::<String>::new(), ll_val, ll);
/// ll.prepend("test".to_string());
/// ll
/// }
/// ll.prepend("test2".to_string());
/// ```
pub macro create_ll($rhs:expr, $val:ident, $ref:ident) {
let scope = ();
let $val = $rhs;
let $ref = unsafe { $val.extend_for(&scope) };
}
/// Macro that attempts to run some higene cleanup on [`create_ll`] to avoid accidental use ot the
/// reference further too. Other functions could still extend the lifetime beyond acceptable
/// though.
pub macro del_ll($val:ident, $ref:ident) {
#[allow(unused_variables)]
let $ref = ();
drop($val);
}
#[cfg(test)]
mod tests;

View File

@@ -1,13 +1,15 @@
use std::{ops::Deref, ptr};
use std::{
alloc::{Layout, alloc, handle_alloc_error},
ops::Deref,
ptr::{self, metadata},
};
use parking_lot::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard};
use super::NodeHeadInner;
#[repr(transparent)]
pub struct Node<'ll, T>(RwLock<NodeInner<'ll, T>>);
type NodeHead<'ll, T> = RwLock<NodeHeadInner<'ll, T>>;
pub enum NodeBackPtr<'ll, T> {
pub enum NodeBackPtr<'ll, T: ?Sized> {
Head(&'ll NodeHead<'ll, T>),
Node(&'ll Node<'ll, T>),
}
@@ -15,27 +17,18 @@ pub enum NodeBackPtr<'ll, T> {
// yes the whole purpose is docs, might add Isolated Guards around nodes here
pub mod topology_safety;
// TODO: RwLock the ptrs only instead of the node
// Box<(RwLock<(&prev, &next)>, T)>
// instead of
// Box<RwLock<(&prev, &next, T)>>
// allows user to opt out of RwLock, allowing changes to adyacent nodes while T is being externally
// used and enables T: ?Sized
pub struct NodeInner<'ll, T> {
pub(crate) prev: NodeBackPtr<'ll, T>,
pub(crate) next: Option<&'ll Node<'ll, T>>,
pub struct NodePtrPair<'ll, T: ?Sized> {
pub prev: NodeBackPtr<'ll, T>,
pub next: Option<&'ll Node<'ll, T>>,
}
#[repr(C)]
pub struct Node<'ll, T: ?Sized> {
pub ptrs: RwLock<NodePtrPair<'ll, T>>,
pub data: T,
}
impl<'ll, T> Deref for Node<'ll, T> {
type Target = RwLock<NodeInner<'ll, T>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> Deref for NodeInner<'_, T> {
impl<T: ?Sized> Deref for Node<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@@ -43,36 +36,27 @@ impl<T> Deref for NodeInner<'_, T> {
}
}
impl<T> Copy for NodeBackPtr<'_, T> {}
impl<T> Clone for NodeBackPtr<'_, T> {
// # TODO: check if this works as expected with the cacnonical clone impl as I'm not sure if
// Copy would make it recursive or not
#[allow(clippy::enum_glob_use, clippy::non_canonical_clone_impl)]
impl<T: ?Sized> Copy for NodeBackPtr<'_, T> {}
impl<T: ?Sized> Clone for NodeBackPtr<'_, T> {
fn clone(&self) -> Self {
use NodeBackPtr::*;
match self {
Head(h) => Head(h),
Node(n) => Node(n),
}
*self
}
}
type WriteAndBackDoublet<'ll, T> = (
BackNodeWriteLock<'ll, T>,
RwLockUpgradableReadGuard<'ll, NodeInner<'ll, T>>,
RwLockUpgradableReadGuard<'ll, NodePtrPair<'ll, T>>,
);
type WriteSurroundTriplet<'ll, T> = (
BackNodeWriteLock<'ll, T>,
RwLockWriteGuard<'ll, NodeInner<'ll, T>>,
Option<RwLockWriteGuard<'ll, NodeInner<'ll, T>>>,
RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>,
Option<RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>>,
);
type WriteOnlyAroundTriplet<'ll, T> = (
BackNodeWriteLock<'ll, T>,
Option<RwLockWriteGuard<'ll, NodeInner<'ll, T>>>,
Option<RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>>,
);
impl<'ll, T> Node<'ll, T> {
// TODO: think about the isolaed state of the following 3 fn's
impl<'ll, T> Node<'ll, T> {
/// Creates a new node in the heap, will link to `prev` and `next` but will be isolated, it can
/// be thought of just ita data and the two pointers, having it isolated doesn't guarantee any
/// integration into the linked list.
@@ -81,19 +65,10 @@ impl<'ll, T> Node<'ll, T> {
/// considered that the `prev` and `next` refs are being held.
#[must_use]
pub fn new(data: T, prev: NodeBackPtr<'ll, T>, next: Option<&'ll Node<'ll, T>>) -> Self {
Self(RwLock::new(NodeInner { prev, next, data }))
Self {
ptrs: RwLock::new(NodePtrPair { prev, next }),
data,
}
/// Boxes [`self`]
#[must_use]
pub fn boxed(self) -> Box<Self> {
Box::new(self)
}
/// Leaks [`self`] as a [`Box<Self>`]
#[must_use]
pub fn leak(self: Box<Self>) -> &'static mut Self {
Box::leak(self)
}
pub fn new_leaked(
@@ -104,6 +79,52 @@ impl<'ll, T> Node<'ll, T> {
Box::leak(Box::new(Self::new(data, prev, next)))
}
/// Boxes [`self`]
#[must_use]
pub fn boxed(self) -> Box<Self> {
Box::new(self)
}
}
impl<'ll, T: ?Sized> Node<'ll, T> {
/// # Panics
///
/// On arithmetic overflow
fn layout_for(data: &T) -> (Layout, usize) {
Layout::new::<RwLock<NodePtrPair<'ll, T>>>()
.extend(Layout::for_value(data))
.unwrap()
}
/// [`Self::new_leaked`] from a DST type
pub fn leaked_from_dst(
data: &T,
prev: NodeBackPtr<'ll, T>,
next: Option<&'ll Node<'ll, T>>,
) -> &'ll mut Self
where
T: dst_clone::DstClone,
{
let (layout, data_offset) = Self::layout_for(data);
let ptr = unsafe { alloc(layout) };
if ptr.is_null() {
handle_alloc_error(layout)
}
unsafe {
*(ptr.cast::<RwLock<NodePtrPair<'ll, T>>>()) = RwLock::new(NodePtrPair { prev, next });
data.copy(ptr.add(data_offset));
}
Box::leak(unsafe { Box::from_raw(ptr::from_raw_parts_mut(ptr, metadata(data))) })
}
/// Leaks [`self`] as a [`Box<Self>`]
#[must_use]
pub fn leak(self: Box<Self>) -> &'static mut Self {
Box::leak(self)
}
/// # Safety
///
/// The [`self`] pointer must come from a [`Box`] allocation like [`Self::boxed`] and
@@ -119,8 +140,8 @@ impl<'ll, T> Node<'ll, T> {
/// There must be no references left to [`self`]
pub unsafe fn wait_free(&self) {
loop {
if self.is_locked() {
drop(self.write());
if self.ptrs.is_locked() {
drop(self.ptrs.write());
} else {
break;
}
@@ -131,7 +152,7 @@ impl<'ll, T> Node<'ll, T> {
}
pub fn lock_and_back(&'ll self) -> WriteAndBackDoublet<'ll, T> {
let mut self_read = self.upgradable_read();
let mut self_read = self.ptrs.upgradable_read();
// "soft" back lock
match self_read.prev.try_write() {
Some(prev_write) => (prev_write, self_read),
@@ -162,7 +183,7 @@ impl<'ll, T> Node<'ll, T> {
let (prev_write, self_read) = self.lock_and_back();
// Now `prev` is write locked and we can block forwards
let self_write = RwLockUpgradableReadGuard::upgrade(self_read);
let next_write = self_write.next.map(|n| n.write());
let next_write = self_write.next.map(|n| n.ptrs.write());
(prev_write, self_write, next_write)
}
@@ -172,7 +193,7 @@ impl<'ll, T> Node<'ll, T> {
/// The passed locks must also be consecutive for this to respect topology.
///
/// This node will remain isolated. See [`topology_safety`].
pub unsafe fn isolate(self_read: &NodeInner<'ll, T>, locks: WriteOnlyAroundTriplet<'ll, T>) {
pub unsafe fn isolate(self_read: &NodePtrPair<'ll, T>, locks: WriteOnlyAroundTriplet<'ll, T>) {
let (mut back_write, next_write) = locks;
back_write.set_next(self_read.next);
@@ -217,19 +238,19 @@ impl<'ll, T> Node<'ll, T> {
}
/// Generic Write Lock of a [`NodeBackPtr`]
pub enum BackNodeWriteLock<'ll, T> {
pub enum BackNodeWriteLock<'ll, T: ?Sized> {
Head(RwLockWriteGuard<'ll, NodeHeadInner<'ll, T>>),
Node(RwLockWriteGuard<'ll, NodeInner<'ll, T>>),
Node(RwLockWriteGuard<'ll, NodePtrPair<'ll, T>>),
}
/// Generic Read Lock of a [`NodeBackPtr`]
pub enum BackNodeReadLock<'ll, T> {
pub enum BackNodeReadLock<'ll, T: ?Sized> {
Head(RwLockReadGuard<'ll, NodeHeadInner<'ll, T>>),
Node(RwLockReadGuard<'ll, NodeInner<'ll, T>>),
Node(RwLockReadGuard<'ll, NodePtrPair<'ll, T>>),
}
#[allow(clippy::enum_glob_use)]
impl<'ll, T> NodeBackPtr<'ll, T> {
impl<'ll, T: ?Sized> NodeBackPtr<'ll, T> {
#[must_use]
pub fn ptr_eq(&self, other: &Self) -> bool {
use NodeBackPtr::*;
@@ -258,7 +279,7 @@ impl<'ll, T> NodeBackPtr<'ll, T> {
match self {
Head(h) => WL::Head(h.write()),
Node(n) => WL::Node(n.write()),
Node(n) => WL::Node(n.ptrs.write()),
}
}
@@ -270,7 +291,7 @@ impl<'ll, T> NodeBackPtr<'ll, T> {
match self {
Head(h) => RL::Head(h.read()),
Node(n) => RL::Node(n.read()),
Node(n) => RL::Node(n.ptrs.read()),
}
}
@@ -282,7 +303,7 @@ impl<'ll, T> NodeBackPtr<'ll, T> {
Some(match self {
Head(h) => WL::Head(h.try_write()?),
Node(n) => WL::Node(n.try_write()?),
Node(n) => WL::Node(n.ptrs.try_write()?),
})
}
@@ -294,12 +315,12 @@ impl<'ll, T> NodeBackPtr<'ll, T> {
Some(match self {
Head(h) => RL::Head(h.try_read()?),
Node(n) => RL::Node(n.try_read()?),
Node(n) => RL::Node(n.ptrs.try_read()?),
})
}
}
impl<'ll, T> BackNodeWriteLock<'ll, T> {
impl<'ll, T: ?Sized> BackNodeWriteLock<'ll, T> {
#[allow(clippy::enum_glob_use)]
fn set_next(&mut self, next: Option<&'ll Node<'ll, T>>) {
use BackNodeWriteLock::*;
@@ -311,4 +332,4 @@ impl<'ll, T> BackNodeWriteLock<'ll, T> {
}
}
impl<T> NodeInner<'_, T> {}
impl<T> Node<'_, T> {}

View File

@@ -68,4 +68,7 @@
//! This is more a graph than a linked list.
#[allow(unused_imports)]
use {super::NodeHeadInner, std::mem::ManuallyDrop};
use {
super::{super::LinkedList, NodeHeadInner},
std::mem::ManuallyDrop,
};

View File

@@ -2,7 +2,7 @@
use std::{
fmt::Debug,
pin::pin,
ptr,
sync::{
Barrier,
atomic::{AtomicUsize, Ordering},
@@ -12,40 +12,37 @@ use std::{
use super::*;
static DROP_C: AtomicUsize = AtomicUsize::new(0);
#[test]
fn concurrency_and_scoped_drop() {
static DROP_C: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone)]
struct StringWithDrop(String);
#[derive(Clone)]
struct StringWithDrop(String);
impl From<String> for StringWithDrop {
impl From<String> for StringWithDrop {
fn from(value: String) -> Self {
Self(value)
}
}
}
impl Drop for StringWithDrop {
impl Drop for StringWithDrop {
fn drop(&mut self) {
DROP_C.fetch_add(1, Ordering::Relaxed);
}
}
}
impl Debug for StringWithDrop {
impl Debug for StringWithDrop {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
<String as Debug>::fmt(&self.0, f)
}
}
}
#[test]
fn concurrency_and_scoped_drop() {
const CREATE1: usize = 100;
const CREATE2: usize = 100;
const NOT_POP: usize = 20;
{
// TODO: make this a macro or a "guard" struct that can be dropped
let ll = pin!(LinkedList::<StringWithDrop>::new());
let llref = unsafe { LinkedList::get_self_ref(ll.as_ref()) };
create_ll!(LinkedList::<StringWithDrop>::new(), ll_val, ll);
let barrier = Barrier::new(3);
thread::scope(|s| {
@@ -53,14 +50,14 @@ fn concurrency_and_scoped_drop() {
barrier.wait();
for n in 0..CREATE1 {
llref.prepend(format!("A {n}").into());
ll.prepend(format!("A {n}").into());
}
});
s.spawn(|| {
barrier.wait();
for n in 0..CREATE2 {
llref.prepend(format!("B {n}").into());
ll.prepend(format!("B {n}").into());
}
});
s.spawn(|| {
@@ -68,7 +65,7 @@ fn concurrency_and_scoped_drop() {
for _ in 0..(CREATE1 + CREATE2 - NOT_POP) {
unsafe {
while llref.pop().is_none() {
while ll.pop().is_none() {
std::thread::yield_now();
}
}
@@ -77,7 +74,22 @@ fn concurrency_and_scoped_drop() {
});
assert_eq!(DROP_C.load(Ordering::Relaxed), CREATE1 + CREATE2 - NOT_POP);
}
del_ll!(ll_val, ll);
assert_eq!(DROP_C.load(Ordering::Relaxed), CREATE1 + CREATE2);
}
}
#[test]
fn unsized_store() {
create_ll!(LinkedList::<str>::new(), ll_val, ll);
let inserted = ll.prepend_dst("test");
let first_node = ll.0.read().start.expect("first node to be some");
assert!(ptr::addr_eq(inserted, first_node));
assert_eq!(&**inserted, "test");
del_ll!(ll_val, ll);
}

View File

@@ -2,6 +2,8 @@
#![feature(
arbitrary_self_types,
arbitrary_self_types_pointers,
decl_macro,
ptr_metadata
)]
#![warn(clippy::pedantic)]
@@ -10,4 +12,4 @@ use std::{pin::Pin, sync::Arc};
pub mod double;
pub use double::NodeHeadInner as DoublyLinkedList;
pub use double::LinkedList as DoublyLinkedList;