pub trait LinkedList: Sized {
// Required method
fn link_ref(&self) -> &AtomicShared<Self>;
// Provided methods
fn is_clear(&self, order: Ordering) -> bool { ... }
fn mark(&self, order: Ordering) -> bool { ... }
fn unmark(&self, order: Ordering) -> bool { ... }
fn is_marked(&self, order: Ordering) -> bool { ... }
fn delete_self(&self, order: Ordering) -> bool { ... }
fn is_deleted(&self, order: Ordering) -> bool { ... }
fn push_back<'g>(
&self,
entry: Shared<Self>,
mark: bool,
order: Ordering,
guard: &'g Guard,
) -> Result<Ptr<'g, Self>, Shared<Self>> { ... }
fn next_ptr<'g>(&self, order: Ordering, guard: &'g Guard) -> Ptr<'g, Self> { ... }
fn next_shared(
&self,
order: Ordering,
guard: &Guard,
) -> Option<Shared<Self>> { ... }
}Expand description
LinkedList is a type trait implementing a lock-free singly linked list.
Required Methods§
Sourcefn link_ref(&self) -> &AtomicShared<Self>
fn link_ref(&self) -> &AtomicShared<Self>
Returns a reference to the forward link.
The pointer value may be tagged if Self::mark or Self::delete_self has been
invoked. The AtomicShared must only be updated through LinkedList in order to keep
the linked list consistent.
Provided Methods§
Sourcefn is_clear(&self, order: Ordering) -> bool
fn is_clear(&self, order: Ordering) -> bool
Returns true if self is reachable and not marked.
§Examples
use scc::LinkedList;
use scc::ebr::{AtomicShared, Tag};
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let head: L = L::default();
assert!(head.is_clear(Relaxed));
assert!(head.mark(Relaxed));
assert!(!head.is_clear(Relaxed));
assert!(head.delete_self(Relaxed));
assert!(!head.is_clear(Relaxed));Sourcefn mark(&self, order: Ordering) -> bool
fn mark(&self, order: Ordering) -> bool
Marks self with an internal flag to denote that self is in a special state.
Returns false if a flag has already been marked on self.
§Examples
use scc::LinkedList;
use scc::ebr::AtomicShared;
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let head: L = L::default();
assert!(head.mark(Relaxed));Sourcefn unmark(&self, order: Ordering) -> bool
fn unmark(&self, order: Ordering) -> bool
Removes any mark from self.
Returns false if no flag has been marked on self.
§Examples
use scc::LinkedList;
use scc::ebr::AtomicShared;
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let head: L = L::default();
assert!(!head.unmark(Relaxed));
assert!(head.mark(Relaxed));
assert!(head.unmark(Relaxed));
assert!(!head.is_marked(Relaxed));Sourcefn is_marked(&self, order: Ordering) -> bool
fn is_marked(&self, order: Ordering) -> bool
Returns true if self has a mark on it.
§Examples
use scc::LinkedList;
use scc::ebr::AtomicShared;
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let head: L = L::default();
assert!(!head.is_marked(Relaxed));
assert!(head.mark(Relaxed));
assert!(head.is_marked(Relaxed));Sourcefn delete_self(&self, order: Ordering) -> bool
fn delete_self(&self, order: Ordering) -> bool
Deletes self.
Returns false if self already has deleted marked on it.
§Examples
use scc::LinkedList;
use scc::ebr::{AtomicShared, Guard, Shared};
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let guard = Guard::new();
let head: L = L::default();
let tail: Shared<L> = Shared::new(L::default());
assert!(head.push_back(tail.clone(), false, Relaxed, &guard).is_ok());
tail.delete_self(Relaxed);
assert!(head.next_ptr(Relaxed, &guard).as_ref().is_none());Sourcefn is_deleted(&self, order: Ordering) -> bool
fn is_deleted(&self, order: Ordering) -> bool
Returns true if self has been deleted.
§Examples
use scc::LinkedList;
use scc::ebr::AtomicShared;
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let entry: L = L::default();
assert!(!entry.is_deleted(Relaxed));
entry.delete_self(Relaxed);
assert!(entry.is_deleted(Relaxed));Sourcefn push_back<'g>(
&self,
entry: Shared<Self>,
mark: bool,
order: Ordering,
guard: &'g Guard,
) -> Result<Ptr<'g, Self>, Shared<Self>>
fn push_back<'g>( &self, entry: Shared<Self>, mark: bool, order: Ordering, guard: &'g Guard, ) -> Result<Ptr<'g, Self>, Shared<Self>>
Appends the given entry to self and returns a pointer to the entry.
If mark is given true, it atomically marks an internal flag on self when updating
the linked list, otherwise it removes marks.
§Errors
Returns the supplied Shared when it finds self deleted.
§Examples
use scc::LinkedList;
use scc::ebr::{AtomicShared, Guard, Shared};
use std::sync::atomic::Ordering::{Relaxed, Release};
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let guard = Guard::new();
let head: L = L::default();
assert!(head.push_back(Shared::new(L::default()), true, Release, &guard).is_ok());
assert!(head.is_marked(Relaxed));
assert!(head.push_back(Shared::new(L::default()), false, Release, &guard).is_ok());
assert!(!head.is_marked(Relaxed));
head.delete_self(Relaxed);
assert!(!head.is_marked(Relaxed));
assert!(head.push_back(Shared::new(L::default()), false, Release, &guard).is_err());Sourcefn next_ptr<'g>(&self, order: Ordering, guard: &'g Guard) -> Ptr<'g, Self>
fn next_ptr<'g>(&self, order: Ordering, guard: &'g Guard) -> Ptr<'g, Self>
Returns the closest next valid entry.
It unlinks deleted entries until it reaches a valid one.
§Examples
use scc::LinkedList;
use scc::ebr::{AtomicShared, Guard, Shared};
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let guard = Guard::new();
let head: L = L::default();
assert!(
head.push_back(Shared::new(L(AtomicShared::null(), 1)), false, Release, &guard).is_ok());
head.mark(Relaxed);
let next_ptr = head.next_ptr(Acquire, &guard);
assert_eq!(next_ptr.as_ref().unwrap().1, 1);
assert!(head.is_marked(Relaxed));Returns a Shared handle to the closest next valid entry.
It unlinks deleted entries until it reaches a valid one.
§Examples
use scc::LinkedList;
use scc::ebr::{AtomicShared, Guard, Shared};
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
#[derive(Default)]
struct L(AtomicShared<L>, usize);
impl LinkedList for L {
fn link_ref(&self) -> &AtomicShared<L> {
&self.0
}
}
let guard = Guard::new();
let head: L = L::default();
assert!(
head.push_back(Shared::new(L(AtomicShared::null(), 1)), false, Release, &guard).is_ok());
head.mark(Relaxed);
let next_shared = head.next_shared(Acquire, &guard);
assert_eq!(next_shared.unwrap().1, 1);
assert!(head.is_marked(Relaxed));Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.