From 56b4c2d46bd13ebf36ef231dcd77ef084169e059 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tin=20=C5=A0vagelj?= Date: Fri, 22 Mar 2024 04:24:40 +0100 Subject: [PATCH] Ready 0.5 release. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tin Å vagelj --- Cargo.toml | 4 +- README.md | 9 +- doc/crate.md | 2 +- doc/struct.md | 247 --------------------------------------- examples/default_impl.rs | 4 + examples/unsafe_impl.rs | 7 +- src/error.rs | 25 ++-- src/lib.rs | 110 +++++++---------- src/memory.rs | 212 ++++++++++++++------------------- src/range.rs | 7 +- src/raw.rs | 44 ++++++- src/reference.rs | 84 ++++++------- src/types.rs | 78 +++++++++---- test.rs | 3 - 14 files changed, 302 insertions(+), 534 deletions(-) delete mode 100644 doc/struct.md delete mode 100644 test.rs diff --git a/Cargo.toml b/Cargo.toml index ed78f37..04b8b14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,10 +15,10 @@ path = "examples/ptr_metadata.rs" required-features = ["ptr_metadata"] [features] -default = ["unsafe_impl", "ptr_metadata", "debug"] +default = ["unsafe_impl", "debug"] no_std = [] # No-std support -debug = [] # Enable debug attributes +debug = [] # Enable debug attributes # Implementations unsafe_impl = [] diff --git a/README.md b/README.md index d57aeca..c566764 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # contiguous_mem contiguous_mem is a vector like collection that can store entries with -heterogeneous layouts while retaining type safety at the reference level. +heterogeneous layouts while retaining type information at the reference level. [![Crate](https://img.shields.io/crates/v/contiguous_mem?style=for-the-badge&logo=docs.rs)](https://crates.io/crates/contiguous_mem) [![Documentation](https://img.shields.io/docsrs/contiguous-mem?style=for-the-badge&logo=rust)](https://docs.rs/contiguous-mem) @@ -32,7 +32,8 @@ over when your scope and requirements shift. ## Use cases - Storing differently typed/sized data. ([example](./examples/default_impl.rs)) -- Ensuring stored data is placed adjacently in memory. ([example](./examples/game_loading.rs)) +- Ensuring stored data is placed adjacently in memory. + ([example](./examples/game_loading.rs)) - Note that returned references are **not** contiguous, only data they refer to is. @@ -42,14 +43,14 @@ Add the crate to your dependencies: ```toml [dependencies] -contiguous_mem = { version = "0.4" } +contiguous_mem = { version = "0.5" } ``` Optionally enable `no_std` feature to use in `no_std` environment: ```toml [dependencies] -contiguous_mem = { version = "0.4", features = ["no_std"] } +contiguous_mem = { version = "0.5", features = ["no_std"] } ``` ### Features diff --git a/doc/crate.md b/doc/crate.md index f75179d..e16a2b6 100644 --- a/doc/crate.md +++ b/doc/crate.md @@ -1,5 +1,5 @@ contiguous_mem is a vector like collection that can store entries with -heterogeneous layouts while retaining type safety at the reference level. +heterogeneous layouts while retaining type information at the reference level. ## Features diff --git a/doc/struct.md b/doc/struct.md deleted file mode 100644 index f672103..0000000 --- a/doc/struct.md +++ /dev/null @@ -1,247 +0,0 @@ - - A contiguous growable array type, written as `Vec`, short for 'vector'. - - # Examples - - ``` - let mut vec = Vec::new(); - vec.push(1); - vec.push(2); - - assert_eq!(vec.len(), 2); - assert_eq!(vec[0], 1); - - assert_eq!(vec.pop(), Some(2)); - assert_eq!(vec.len(), 1); - - vec[0] = 7; - assert_eq!(vec[0], 7); - - vec.extend([1, 2, 3]); - - for x in &vec { - println!("{x}"); - } - assert_eq!(vec, [7, 1, 2, 3]); - ``` - - The [`vec!`] macro is provided for convenient initialization: - - ``` - let mut vec1 = vec![1, 2, 3]; - vec1.push(4); - let vec2 = Vec::from([1, 2, 3, 4]); - assert_eq!(vec1, vec2); - ``` - - It can also initialize each element of a `Vec` with a given value. - This may be more efficient than performing allocation and initialization - in separate steps, especially when initializing a vector of zeros: - - ``` - let vec = vec![0; 5]; - assert_eq!(vec, [0, 0, 0, 0, 0]); - - // The following is equivalent, but potentially slower: - let mut vec = Vec::with_capacity(5); - vec.resize(5, 0); - assert_eq!(vec, [0, 0, 0, 0, 0]); - ``` - - For more information, see - [Capacity and Reallocation](#capacity-and-reallocation). - - Use a `Vec` as an efficient stack: - - ``` - let mut stack = Vec::new(); - - stack.push(1); - stack.push(2); - stack.push(3); - - while let Some(top) = stack.pop() { - // Prints 3, 2, 1 - println!("{top}"); - } - ``` - - # Indexing - - The `Vec` type allows access to values by index, because it implements the - [`Index`] trait. An example will be more explicit: - - ``` - let v = vec![0, 2, 4, 6]; - println!("{}", v[1]); // it will display '2' - ``` - - However be careful: if you try to access an index which isn't in the `Vec`, - your software will panic! You cannot do this: - - ```should_panic - let v = vec![0, 2, 4, 6]; - println!("{}", v[6]); // it will panic! - ``` - - Use [`get`] and [`get_mut`] if you want to check whether the index is in - the `Vec`. - - # Slicing - - A `Vec` can be mutable. On the other hand, slices are read-only objects. - To get a [slice][prim@slice], use [`&`]. Example: - - ``` - fn read_slice(slice: &[usize]) { - // ... - } - - let v = vec![0, 1]; - read_slice(&v); - - // ... and that's all! - // you can also do it like this: - let u: &[usize] = &v; - // or like this: - let u: &[_] = &v; - ``` - - In Rust, it's more common to pass slices as arguments rather than vectors - when you just want to provide read access. The same goes for [`String`] and - [`&str`]. - - # Capacity and reallocation - - The capacity of a vector is the amount of space allocated for any future - elements that will be added onto the vector. This is not to be confused with - the *length* of a vector, which specifies the number of actual elements - within the vector. If a vector's length exceeds its capacity, its capacity - will automatically be increased, but its elements will have to be - reallocated. - - For example, a vector with capacity 10 and length 0 would be an empty vector - with space for 10 more elements. Pushing 10 or fewer elements onto the - vector will not change its capacity or cause reallocation to occur. However, - if the vector's length is increased to 11, it will have to reallocate, which - can be slow. For this reason, it is recommended to use [`Vec::with_capacity`] - whenever possible to specify how big the vector is expected to get. - - # Guarantees - - Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees - about its design. This ensures that it's as low-overhead as possible in - the general case, and can be correctly manipulated in primitive ways - by unsafe code. Note that these guarantees refer to an unqualified `Vec`. - If additional type parameters are added (e.g., to support custom allocators), - overriding their defaults may change the behavior. - - Most fundamentally, `Vec` is and always will be a (pointer, capacity, length) - triplet. No more, no less. The order of these fields is completely - unspecified, and you should use the appropriate methods to modify these. - The pointer will never be null, so this type is null-pointer-optimized. - - However, the pointer might not actually point to allocated memory. In particular, - if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`], - [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`] - on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized - types inside a `Vec`, it will not allocate space for them. *Note that in this case - the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only - if [mem::size_of::\]\() * [capacity]\() > 0. In general, `Vec`'s allocation - details are very subtle --- if you intend to allocate memory using a `Vec` - and use it for something else (either to pass to unsafe code, or to build your - own memory-backed collection), be sure to deallocate this memory by using - `from_raw_parts` to recover the `Vec` and then dropping it. - - If a `Vec` *has* allocated memory, then the memory it points to is on the heap - (as defined by the allocator Rust is configured to use by default), and its - pointer points to [`len`] initialized, contiguous elements in order (what - you would see if you coerced it to a slice), followed by [capacity] - [len] - logically uninitialized, contiguous elements. - - A vector containing the elements `'a'` and `'b'` with capacity 4 can be - visualized as below. The top part is the `Vec` struct, it contains a - pointer to the head of the allocation in the heap, length and capacity. - The bottom part is the allocation on the heap, a contiguous memory block. - - ```text - ptr len capacity - +--------+--------+--------+ - | 0x0123 | 2 | 4 | - +--------+--------+--------+ - | - v - Heap +--------+--------+--------+--------+ - | 'a' | 'b' | uninit | uninit | - +--------+--------+--------+--------+ - ``` - - - **uninit** represents memory that is not initialized, see [`MaybeUninit`]. - - Note: the ABI is not stable and `Vec` makes no guarantees about its memory - layout (including the order of fields). - - `Vec` will never perform a "small optimization" where elements are actually - stored on the stack for two reasons: - - * It would make it more difficult for unsafe code to correctly manipulate - a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were - only moved, and it would be more difficult to determine if a `Vec` had - actually allocated memory. - - * It would penalize the general case, incurring an additional branch - on every access. - - `Vec` will never automatically shrink itself, even if completely empty. This - ensures no unnecessary allocations or deallocations occur. Emptying a `Vec` - and then filling it back up to the same [`len`] should incur no calls to - the allocator. If you wish to free up unused memory, use - [`shrink_to_fit`] or [`shrink_to`]. - - [`push`] and [`insert`] will never (re)allocate if the reported capacity is - sufficient. [`push`] and [`insert`] *will* (re)allocate if - [len] == [capacity]. That is, the reported capacity is completely - accurate, and can be relied on. It can even be used to manually free the memory - allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even - when not necessary. - - `Vec` does not guarantee any particular growth strategy when reallocating - when full, nor when [`reserve`] is called. The current strategy is basic - and it may prove desirable to use a non-constant growth factor. Whatever - strategy is used will of course guarantee *O*(1) amortized [`push`]. - - `vec![x; n]`, `vec![a, b, c, d]`, and - [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec` - with exactly the requested capacity. If [len] == [capacity], - (as is the case for the [`vec!`] macro), then a `Vec` can be converted to - and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements. - - `Vec` will not specifically overwrite any data that is removed from it, - but also won't specifically preserve it. Its uninitialized memory is - scratch space that it may use however it wants. It will generally just do - whatever is most efficient or otherwise easy to implement. Do not rely on - removed data to be erased for security purposes. Even if you drop a `Vec`, its - buffer may simply be reused by another allocation. Even if you zero a `Vec`'s memory - first, that might not actually happen because the optimizer does not consider - this a side-effect that must be preserved. There is one case which we will - not break, however: using `unsafe` code to write to the excess capacity, - and then increasing the length to match, is always valid. - - Currently, `Vec` does not guarantee the order in which elements are dropped. - The order has changed in the past and may change again. - - [`get`]: slice::get - [`get_mut`]: slice::get_mut - [`String`]: crate::string::String - [`&str`]: type@str - [`shrink_to_fit`]: Vec::shrink_to_fit - [`shrink_to`]: Vec::shrink_to - [capacity]: Vec::capacity - [`capacity`]: Vec::capacity - [mem::size_of::\]: core::mem::size_of - [len]: Vec::len - [`len`]: Vec::len - [`push`]: Vec::push - [`insert`]: Vec::insert - [`reserve`]: Vec::reserve - [`MaybeUninit`]: core::mem::MaybeUninit - [owned slice]: Box \ No newline at end of file diff --git a/examples/default_impl.rs b/examples/default_impl.rs index b33e71d..b3afac2 100644 --- a/examples/default_impl.rs +++ b/examples/default_impl.rs @@ -17,4 +17,8 @@ fn main() { // Retrieve and use the stored data assert_eq!(*stored_data.get(), data); assert_eq!(*stored_number.get(), 22); + + // All stored data gets cleaned up once `memory` goes out of scope, or we + // can forget it existed: + memory.forget(); } diff --git a/examples/unsafe_impl.rs b/examples/unsafe_impl.rs index bfd8d5f..208bb90 100644 --- a/examples/unsafe_impl.rs +++ b/examples/unsafe_impl.rs @@ -6,7 +6,8 @@ struct Data { } fn main() { - // Create a ContiguousMemory instance with a capacity of 1024 bytes and 1-byte alignment + // Create a ContiguousMemory instance with a capacity of 1024 bytes and + // 1-byte alignment let mut memory = ContiguousMemory::::with_capacity(1024); // Store data in the memory container @@ -22,4 +23,8 @@ fn main() { assert!(!stored_number.is_null()); assert_eq!(*stored_number, 22); } + + // All stored data gets cleaned up once `memory` goes out of scope, or we + // can forget it existed: + memory.forget(); } diff --git a/src/error.rs b/src/error.rs index 91c8407..d7d3db0 100644 --- a/src/error.rs +++ b/src/error.rs @@ -3,19 +3,14 @@ #[cfg(any(feature = "error_in_core", not(feature = "no_std")))] use crate::types::Error; +use core::fmt::Debug; #[cfg(any(not(feature = "no_std"), feature = "error_in_core"))] use core::fmt::{Display, Formatter, Result as FmtResult}; -use core::{cell::Ref, fmt::Debug}; -use crate::{ - memory::ManageMemory, - range::ByteRange, - reference::BorrowState, - types::{ImplDetails, ImplReferencing, ReadableInner, WritableInner}, -}; +use crate::{range::ByteRange, reference::BorrowState}; /// Represents a class of errors returned by invalid memory operations and -/// allocator failiure. +/// allocator failure. #[derive(Debug, Clone, Copy)] pub enum MemoryError { /// Tried allocating container capacity larger than `isize::MAX` @@ -23,11 +18,19 @@ pub enum MemoryError { /// Allocation failure caused by either resource exhaustion or invalid /// arguments being provided to an allocator. Allocator( - #[cfg(feature = "allocator_api")] core::alloc::AllocError, + /// Cause allocator error. + #[cfg(feature = "allocator_api")] + core::alloc::AllocError, #[cfg(not(feature = "allocator_api"))] (), ), } +impl From for MemoryError { + fn from(_: core::alloc::LayoutError) -> Self { + Self::TooLarge + } +} + #[cfg(any(not(feature = "no_std"), feature = "error_in_core"))] impl Display for MemoryError { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { @@ -59,8 +62,8 @@ impl From for MemoryError { } } -/// Error returned when concurrent mutable access is attempted to the same -/// memory region. +/// Error returned when concurrent mutable access to the same memory region is +/// attempted. #[derive(Debug)] pub struct RegionBorrowError { /// Range that was attempted to be borrowed. diff --git a/src/lib.rs b/src/lib.rs index 84f6a93..a7f5fab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,7 +20,7 @@ pub mod types; // Re-exports pub use error::*; use reference::ConstructReference; -pub use reference::{CERef, EntryRef}; +pub use reference::EntryRef; use core::mem::align_of; use core::{ @@ -41,9 +41,16 @@ use types::*; /// /// # Examples /// +/// ## Default Implementation +/// /// ``` #[doc = include_str!("../examples/default_impl.rs")] /// ``` +/// +/// ## Unsafe Implementation +/// ``` +#[doc = include_str!("../examples/unsafe_impl.rs")] +/// ``` pub struct ContiguousMemory< Impl: ImplDetails = ImplDefault, A: ManageMemory = DefaultMemoryManager, @@ -471,24 +478,15 @@ impl, A: ManageMemory> ContiguousMemory { pub fn try_grow_to(&mut self, new_capacity: usize) -> Result, MemoryError> { let mut base = WritableInner::write(&self.inner.base).unwrap(); - let old_capacity = base.size(); let new_capacity = WritableInner::write(&self.inner.tracker) .unwrap() .grow(new_capacity); - if new_capacity == old_capacity { + if new_capacity == base.size() { return Ok(None); }; - let old_layout = base.layout(); - let new_layout = Layout::from_size_align(new_capacity, base.alignment()) - .map_err(|_| MemoryError::TooLarge)?; - let prev_base = *base; - base.address = unsafe { - self.inner - .alloc - .grow(prev_base.address, old_layout, new_layout)? - }; + base.address = unsafe { self.inner.alloc.grow(prev_base, new_capacity)? }; Ok(if base.address != prev_base.address { Some(*base) @@ -560,9 +558,8 @@ impl, A: ManageMemory> ContiguousMemory { } /// Grows the underlying memory to ensure container has a free segment that - /// can store `capacity`. - /// This function might allocate more than requested amount of memory to - /// reduce number of reallocations. + /// can store `capacity`. This function might allocate more than requested + /// amount of memory to reduce number of reallocations. /// /// If the base address changed due to reallocation, new [`BasePtr`] is /// returned as `Ok(Some(BasePtr))`, if base address stayed the same the @@ -585,9 +582,8 @@ impl, A: ManageMemory> ContiguousMemory { } /// Tries growing the underlying memory to ensure container has a free - /// segment that can store `capacity`. - /// This function might allocate more than requested amount of memory to - /// reduce number of reallocations. + /// segment that can store `capacity`. This function might allocate more + /// than requested amount of memory to reduce number of reallocations. /// /// If the base address changed due to reallocation, new [`BasePtr`] is /// returned as `Ok(Some(BasePtr))`, if base address stayed the same the @@ -612,8 +608,8 @@ impl, A: ManageMemory> ContiguousMemory { /// returned as `Ok(Some(BasePtr))`, if base address stayed the same the /// result is `Ok(None)`. /// - /// After calling this function, new capacity will be equal to: - /// `self.size() + capacity`. + /// After calling this function, new capacity will be equal to: `self.size() + /// + capacity`. /// /// # Panics /// @@ -654,8 +650,8 @@ impl, A: ManageMemory> ContiguousMemory { /// If the new capacity exceeds `isize::MAX` or the allocator couldn't /// allocate required memory, a [`MemoryError`] is returned. /// - /// After calling this function, new capacity will be equal to: - /// `self.size() + capacity`. + /// After calling this function, new capacity will be equal to: `self.size() + /// + capacity`. /// /// # Examples /// ``` @@ -686,9 +682,8 @@ impl, A: ManageMemory> ContiguousMemory { } /// Grows the underlying memory to ensure container has a free segment that - /// can store a value with provided `layout`. - /// This function might allocate more than requested amount of memory to - /// reduce number of reallocations. + /// can store a value with provided `layout`. This function might allocate + /// more than requested amount of memory to reduce number of reallocations. /// /// If the base address changed due to reallocation, new [`BasePtr`] is /// returned as `Ok(Some(BasePtr))`, if base address stayed the same the @@ -711,9 +706,9 @@ impl, A: ManageMemory> ContiguousMemory { } /// Tries growing the underlying memory to ensure container has a free - /// segment that can store a value with provided `layout`. - /// This function might allocate more than requested amount of memory to - /// reduce number of reallocations. + /// segment that can store a value with provided `layout`. This function + /// might allocate more than requested amount of memory to reduce number of + /// reallocations. /// /// If the base address changed due to reallocation, new [`BasePtr`] is /// returned as `Ok(Some(BasePtr))`, if base address stayed the same the @@ -768,8 +763,8 @@ impl, A: ManageMemory> ContiguousMemory { /// If the new capacity exceeds `isize::MAX` or the allocator couldn't /// allocate required memory, a [`MemoryError`] is returned. /// - /// After calling this function, new capacity will be equal to: - /// `self.size() + padding + layout.size()`. + /// After calling this function, new capacity will be equal to: `self.size() + /// + padding + layout.size()`. pub fn try_reserve_layout_exact( &mut self, layout: impl HasLayout, @@ -791,24 +786,12 @@ impl, A: ManageMemory> ContiguousMemory { let mut tracker = WritableInner::write(&self.inner.tracker).unwrap(); let new_capacity = tracker.shrink(new_capacity); let mut base = WritableInner::write(&self.inner.base).unwrap(); - - let old_layout = self.layout(); - if new_capacity == old_layout.size() { + if new_capacity == base.size() { return *base; } - let new_layout = unsafe { - // SAFETY: Previous layout was valid and had valid alignment, - // new one is smaller with same alignment so it must be - // valid as well. - Layout::from_size_align_unchecked(new_capacity, old_layout.align()) - }; - base.address = unsafe { - self.inner - .alloc - .shrink(base.address, self.layout(), new_layout) - } - .expect("unable to shrink the container"); + base.address = unsafe { self.inner.alloc.shrink(*base, new_capacity) } + .expect("unable to shrink the container"); *base } @@ -824,19 +807,8 @@ impl, A: ManageMemory> ContiguousMemory { Some(it) => it, None => return *base, }; - let old_layout = self.layout(); - let new_layout = unsafe { - // SAFETY: Previous layout was valid and had valid alignment, - // new one is smaller with same alignment so it must be - // valid as well. - Layout::from_size_align_unchecked(new_capacity, old_layout.align()) - }; - base.address = unsafe { - self.inner - .alloc - .shrink(base.address, self.layout(), new_layout) - } - .expect("unable to shrink the container"); + base.address = unsafe { self.inner.alloc.shrink(*base, new_capacity) } + .expect("unable to shrink the container"); *base } @@ -967,14 +939,14 @@ impl, A: ManageMemory> ContiguousMemory { result } - /// Assumes value is stored at the provided _relative_ `position` in - /// managed memory and returns a pointer or a reference to it. + /// Assumes value is stored at the provided _relative_ `position` in managed + /// memory and returns a pointer or a reference to it. /// /// # Safety /// /// This function isn't unsafe because creating an invalid pointer isn't - /// considered unsafe. Responsibility for guaranteeing safety falls on - /// code that's dereferencing the pointer. + /// considered unsafe. Responsibility for guaranteeing safety falls on code + /// that's dereferencing the pointer. pub fn assume_stored(&self, position: usize) -> Impl::PushResult { ConstructReference::new(&self.inner, ByteRange(position, position + size_of::())) } @@ -994,7 +966,7 @@ impl, A: ManageMemory> ContiguousMemory { Some(base) => unsafe { core::ptr::copy_nonoverlapping( base.as_ptr() as *const (), - result.base().as_ptr_mut_unchecked() as *mut (), + result.base().as_ptr_mut_unchecked(), current_layout.size(), ); }, @@ -1006,8 +978,8 @@ impl, A: ManageMemory> ContiguousMemory { result } - /// Marks the entire contents of the container as free, allowing new data - /// to be stored in place of previously stored data. + /// Marks the entire contents of the container as free, allowing new data to + /// be stored in place of previously stored data. /// /// This allows clearing persisted entries created with /// [`ContiguousMemory::push_persisted`] and @@ -1056,12 +1028,12 @@ impl, A: ManageMemory> ContiguousMemory { /// to state will not be dropped even when all of the created references go /// out of scope. As this method takes ownership of the container, calling /// it also ensures that dereferencing pointers created by - /// [`as_ptr`](refs::EntryRef::as_ptr) and related - /// `EntryRef` functions is guaranteed to be safe. + /// [`as_ptr`](EntryRef::as_ptr) and related [`EntryRef`] functions is + /// guaranteed to be safe. /// /// This method isn't unsafe as leaking data doesn't cause undefined - /// behavior. - /// ([_see details_](https://doc.rust-lang.org/nomicon/leaking.html)) + /// behavior. ([_see + /// details_](https://doc.rust-lang.org/nomicon/leaking.html)) pub fn forget(self) -> MemoryBase { let base = self.base(); core::mem::forget(self); diff --git a/src/memory.rs b/src/memory.rs index 36eca7f..833b948 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -3,8 +3,7 @@ use core::cmp; use core::{alloc::Layout, ptr::NonNull}; -use crate::raw::MemoryBase; -pub use crate::raw::{BaseAddress, BasePtr}; +pub use crate::raw::{BaseAddress, BasePtr, MemoryBase}; use crate::types::HasLayout; #[cfg(feature = "no_std")] @@ -21,9 +20,9 @@ use alloc::Allocator; /// A structure that keeps track of unoccupied regions of memory. /// -/// This is used by [`ContiguousMemory`] to manage -/// positions of stored items while preventing overlap of assigned regions and -/// proper alignment of stored data. +/// This is used by [`ContiguousMemory`] to manage positions of stored items +/// while preventing overlap of assigned regions and proper alignment of stored +/// data. /// /// # Placement strategy /// @@ -112,7 +111,7 @@ impl SegmentTracker { let reduction = self.size - new_size; let reduction = cmp::min(reduction, last.len()); last.1 -= reduction; - if last.len() == 0 { + if last.is_empty() { self.unoccupied.pop(); } self.size -= reduction; @@ -137,9 +136,9 @@ impl SegmentTracker { Some(self.size) } - /// Returns `true` if the provided type `layout` can ne stored within any + /// Returns `true` if the provided type `layout` can be stored within any /// unused segments of the represented memory region. - pub fn can_store(&self, base_address: MemoryBase, layout: impl HasLayout) -> bool { + pub fn can_store(&self, base: MemoryBase, layout: impl HasLayout) -> bool { let layout = layout.as_layout(); if layout.size() == 0 { return true; @@ -148,7 +147,7 @@ impl SegmentTracker { } self.unoccupied.iter().enumerate().any(|(_, it)| { - it.offset(base_address.pos_or_align()) // absolute range + it.offset(base.pos_or_align()) // absolute range .aligned(layout.align()) // aligned to value .len() >= layout.size() @@ -162,8 +161,8 @@ impl SegmentTracker { /// represented memory region, `None` is returned instead. /// /// This function mutably borrows because the returned `Location` is only - /// valid until this tracker gets mutated from somewhere else. - /// The returned value can also apply mutation on `self` via a call to + /// valid until this tracker gets mutated from somewhere else. The returned + /// value can also apply mutation on `self` via a call to /// [`Location::mark_occupied`]. pub fn peek_next(&mut self, base_pos: usize, layout: impl HasLayout) -> Option> { let layout = layout.as_layout(); @@ -189,12 +188,7 @@ impl SegmentTracker { let available = found_range.aligned(layout.align()).cap_size(layout.size()); - Some(Location::new( - self, - found_position, - found_range.clone(), - available, - )) + Some(Location::new(self, found_position, *found_range, available)) } /// Returns either a start position of a free byte range at the end of the @@ -265,7 +259,7 @@ impl SegmentTracker { .iter() .enumerate() .find(|it| it.0 > region.0) - { + { self.unoccupied.insert(i, region); } else { self.unoccupied.push(region); @@ -297,7 +291,7 @@ mod tests { fn new_allocation_tracker() { let tracker = SegmentTracker::new(1024); assert_eq!(tracker.size(), 1024); - assert_eq!(tracker.is_full(), false); + assert!(!tracker.is_full()); assert_eq!(tracker.whole_range(), ByteRange(0, 1024)); } @@ -311,7 +305,7 @@ mod tests { assert_eq!(range, ByteRange(0, 32)); tracker.release(range); - assert_eq!(tracker.is_full(), false); + assert!(!tracker.is_full()); } #[test] @@ -324,9 +318,8 @@ mod tests { } } -/// A result of [`SegmentTracker::peek_next`] which contains information -/// about available allocation slot and wherein a certain [`Layout`] could be -/// placed. +/// A result of [`SegmentTracker::peek_next`] which contains information about +/// available allocation slot and wherein a certain [`Layout`] could be placed. /// /// `'a` is the lifetime of the [`SegmentTracker`] that produced this struct. /// The reference is stored because it prevents any mutations from ocurring on @@ -382,13 +375,13 @@ impl<'a> Location<'a> { self.usable } - /// Returns `true` if the pointed to location is zero-sized. + /// Returns `true` if the pointed-to location is zero-sized. #[inline] pub fn is_zero_sized(&self) -> bool { - self.usable.len() == 0 + self.usable.is_empty() } - /// Marks the pointed to location as occupied. + /// Marks the pointed-to location as occupied. pub fn mark_occupied(&mut self) { if self.is_zero_sized() { return; @@ -438,16 +431,14 @@ pub trait ManageMemory { /// `layout` argument. fn allocate(&self, layout: Layout) -> Result; - /// Deallocates a block of memory of provided `layout` at the specified - /// `address`. + /// Deallocates a block of memory of provided `base`. /// /// # Safety /// /// See: [alloc::Allocator::deallocate] - unsafe fn deallocate(&self, address: BaseAddress, layout: Layout); + unsafe fn deallocate(&self, base: MemoryBase); - /// Shrinks the container underlying memory from `old_layout` size to - /// `new_layout`. + /// Shrinks the provided memory slice to `new_size`. /// /// Generally doesn't cause a move, but an implementation can choose to do /// so. @@ -455,25 +446,14 @@ pub trait ManageMemory { /// # Safety /// /// See: [alloc::Allocator::shrink] - unsafe fn shrink( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result; - - /// Grows the container underlying memory from `old_layout` size to - /// `new_layout`. + unsafe fn shrink(&self, base: MemoryBase, new_size: usize) -> Result; + + /// Grows the provided memory slice to `new_size`. /// /// # Safety /// /// See: [alloc::Allocator::grow] - unsafe fn grow( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result; + unsafe fn grow(&self, base: MemoryBase, new_size: usize) -> Result; } /// Default [memory manager](ManageMemory) that uses the methods exposed by @@ -494,50 +474,46 @@ impl ManageMemory for DefaultMemoryManager { } } - unsafe fn deallocate(&self, address: BaseAddress, layout: Layout) { - if let Some(it) = address { - alloc::dealloc(it.as_ptr() as *mut u8, layout); + unsafe fn deallocate(&self, base: MemoryBase) { + if let MemoryBase { + address: Some(it), .. + } = base + { + alloc::dealloc(it.as_ptr() as *mut u8, base.layout()); } } - unsafe fn shrink( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result { - match address { - Some(it) => Ok(if new_layout.size() > 0 { - Some(NonNull::from(core::slice::from_raw_parts( - alloc::realloc(it.as_ptr() as *mut u8, old_layout, new_layout.size()), - new_layout.size(), - ))) - } else { - alloc::dealloc(it.as_ptr() as *mut u8, old_layout); - None + unsafe fn shrink(&self, base: MemoryBase, new_size: usize) -> Result { + match base.address { + Some(it) => Ok({ + if new_size > 0 { + Some(NonNull::from(core::slice::from_raw_parts( + alloc::realloc(it.as_ptr() as *mut u8, base.layout(), new_size), + new_size, + ))) + } else { + alloc::dealloc(it.as_ptr() as *mut u8, base.layout()); + None + } }), None => Ok(None), } } - unsafe fn grow( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result { - match address { + unsafe fn grow(&self, base: MemoryBase, new_size: usize) -> Result { + match base.address { Some(it) => Ok(Some(NonNull::from(core::slice::from_raw_parts( - alloc::realloc(it.as_ptr() as *mut u8, old_layout, new_layout.size()), - new_layout.size(), + alloc::realloc(it.as_ptr() as *mut u8, base.layout(), new_size), + new_size, )))), None => Ok({ - if new_layout.size() == 0 { + if new_size == 0 { None } else { + let new_layout = Layout::from_size_align(new_size, base.alignment())?; Some(NonNull::from(core::slice::from_raw_parts( alloc::alloc(new_layout), - new_layout.size(), + new_size, ))) } }), @@ -557,29 +533,27 @@ impl ManageMemory for A { } } - unsafe fn deallocate(&self, address: BaseAddress, layout: Layout) { - if let Some(allocated) = address { - Allocator::deallocate( - self, - NonNull::new_unchecked(allocated.as_ptr() as *mut u8), - layout, - ) + unsafe fn deallocate(&self, base: MemoryBase) { + if base.is_allocated() { + unsafe { + Allocator::deallocate( + self, + NonNull::new_unchecked(base.as_ptr_mut()), + base.layout(), + ) + } } } - unsafe fn shrink( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result { - match address { + unsafe fn shrink(&self, base: MemoryBase, new_size: usize) -> Result { + match base.address { Some(it) => { - if new_layout.size() > 0 { + if new_size > 0 { + let new_layout = Layout::from_size_align(new_size, base.alignment())?; Allocator::shrink( self, NonNull::new_unchecked(it.as_ptr() as *mut u8), - old_layout, + base.layout(), new_layout, ) .map(Some) @@ -588,7 +562,7 @@ impl ManageMemory for A { Allocator::deallocate( self, NonNull::new_unchecked(it.as_ptr() as *mut u8), - old_layout, + base.layout(), ); Ok(None) } @@ -597,25 +571,24 @@ impl ManageMemory for A { } } - unsafe fn grow( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result { - match address { - Some(it) => Allocator::grow( - self, - NonNull::new_unchecked(it.as_ptr() as *mut u8), - old_layout, - new_layout, - ) - .map(Some) - .map_err(MemoryError::from), + unsafe fn grow(&self, base: MemoryBase, new_size: usize) -> Result { + match base.address { + Some(it) => { + let new_layout = Layout::from_size_align(new_size, base.alignment())?; + Allocator::grow( + self, + NonNull::new_unchecked(it.as_ptr() as *mut u8), + base.layout(), + new_layout, + ) + .map(Some) + .map_err(MemoryError::from) + } None => { - if new_layout.size() == 0 { + if new_size == 0 { Ok(None) } else { + let new_layout = Layout::from_size_align(new_size, base.alignment())?; Allocator::allocate(self, new_layout) .map(Some) .map_err(MemoryError::from) @@ -625,6 +598,7 @@ impl ManageMemory for A { } } +#[cfg(not(feature = "allocator_api"))] impl ManageMemory for D where D::Target: ManageMemory, @@ -633,25 +607,15 @@ where self.deref().allocate(layout) } - unsafe fn deallocate(&self, address: BaseAddress, layout: Layout) { - self.deref().deallocate(address, layout) + unsafe fn deallocate(&self, base: MemoryBase) { + self.deref().deallocate(base) } - unsafe fn shrink( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result { - self.deref().shrink(address, old_layout, new_layout) + unsafe fn shrink(&self, base: MemoryBase, new_size: usize) -> Result { + self.deref().shrink(base, new_size) } - unsafe fn grow( - &self, - address: BaseAddress, - old_layout: Layout, - new_layout: Layout, - ) -> Result { - self.deref().grow(address, old_layout, new_layout) + unsafe fn grow(&self, base: MemoryBase, new_size: usize) -> Result { + self.deref().grow(base, new_size) } } diff --git a/src/range.rs b/src/range.rs index e4c931a..9861013 100644 --- a/src/range.rs +++ b/src/range.rs @@ -15,6 +15,7 @@ pub struct ByteRange( #[allow(unused)] impl ByteRange { + /// An empty byte range. pub const EMPTY: ByteRange = ByteRange(0, 0); /// Constructs a new byte range, ensuring that `from` and `to` are ordered @@ -80,7 +81,7 @@ impl ByteRange { self.1 - self.0 } - /// Returns true if this byte range is zero-sized. + /// Returns `true` if this byte range is zero-sized. #[inline] pub fn is_empty(&self) -> bool { self.0 == self.1 @@ -106,8 +107,8 @@ impl ByteRange { ByteRange(self.0.min(other.0), self.1.max(other.1)) } - /// Merges another `other` byte range into this one, resulting in a byte - /// range that contains both. + /// Merges `other` byte range into this one, resulting in a byte range that + /// contains both. pub fn apply_union_unchecked(&mut self, other: Self) { self.0 = self.0.min(other.0); self.1 = self.1.max(other.1); diff --git a/src/raw.rs b/src/raw.rs index 4e07375..f7fd2c5 100644 --- a/src/raw.rs +++ b/src/raw.rs @@ -57,7 +57,7 @@ impl, A: ManageMemory> MemoryState { impl, A: ManageMemory + Clone> Clone for MemoryState { fn clone(&self) -> Self { MemoryState { - base: Impl::Base::from(ReadableInner::read(&self.base).unwrap().clone()), + base: Impl::Base::from(*ReadableInner::read(&self.base).unwrap()), tracker: Impl::Tracker::from(ReadableInner::read(&self.tracker).unwrap().clone()), alloc: self.alloc.clone(), } @@ -67,30 +67,48 @@ impl, A: ManageMemory + Clone> Clone for MemoryState, A: ManageMemory> Drop for MemoryState { fn drop(&mut self) { if let Ok(base) = ReadableInner::read(&self.base) { - unsafe { A::deallocate(&self.alloc, base.address, base.layout()) } + unsafe { A::deallocate(&self.alloc, *base) } } } } +/// Memory allocation details. +/// +/// Unlike a fat pointer, this struct also stores information on expected +/// alignment the slice was allocated with, unifying [`Layout`] and pointer +/// types. +/// +/// It contains _any_ information required for allocation and deallocation, the +/// exact details and layout of that data is an internal implementation detail. #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Clone, Copy, PartialEq, Eq)] pub struct MemoryBase { - pub address: BaseAddress, + pub(crate) address: BaseAddress, alignment: usize, } impl MemoryBase { + /// Returns a const raw pointer to the first byte or `null` if not + /// allocated. #[inline] pub fn as_ptr(&self) -> *const u8 { self.address .map(|it| it.as_ptr() as *const u8) .unwrap_or_else(core::ptr::null) } + /// Returns a const raw pointer to the first byte. + /// + /// # Safety + /// + /// This method assumes the base address exists (has been allocated). + /// Calling it is UB if the slice wasn't yet allocated. #[inline] pub unsafe fn as_ptr_unchecked(&self) -> *const T { self.address.unwrap_unchecked().as_ptr() as *const T } + /// Returns a mutable raw pointer to the first byte or `null` if not + /// allocated. #[inline] pub fn as_ptr_mut(&self) -> *mut u8 { self.address @@ -98,16 +116,26 @@ impl MemoryBase { .unwrap_or_else(core::ptr::null_mut) } + /// Returns a mutable raw pointer to the first byte. + /// + /// # Safety + /// + /// This method assumes the base address exists (slice has been allocated). + /// Calling it is UB if the slice wasn't yet allocate. #[inline] pub unsafe fn as_ptr_mut_unchecked(&self) -> *mut T { self.address.unwrap_unchecked().as_ptr() as *mut T } + /// Returns the absolute position of the slice in memory or 0 if not + /// allocated. #[inline] pub fn as_pos(&self) -> usize { self.as_ptr() as usize } + /// Returns the absolute position of the slice in memory or targeted + /// alignment if not allocated. #[inline] pub fn pos_or_align(&self) -> usize { self.address @@ -115,6 +143,14 @@ impl MemoryBase { .unwrap_or(self.alignment) } + /// Returns `true` if the slice has been allocated. + #[inline] + pub fn is_allocated(&self) -> bool { + self.address.is_some() + } + + /// Returns the size of the allocation, or 0 if the slice hasn't been + /// allocated. #[inline] pub fn size(&self) -> usize { match self.address { @@ -123,11 +159,13 @@ impl MemoryBase { } } + /// Returns the (tageted) alignment of the memory slice. #[inline] pub fn alignment(&self) -> usize { self.alignment } + /// Returns the layout of the memory slice. #[inline] pub fn layout(&self) -> Layout { unsafe { Layout::from_size_align_unchecked(self.size(), self.alignment) } diff --git a/src/reference.rs b/src/reference.rs index 7f6da37..266c4f1 100644 --- a/src/reference.rs +++ b/src/reference.rs @@ -1,12 +1,12 @@ //! Returned reference types and read/write guards. //! -//! See [`ContiguousMemoryStorage::push`](crate::ContiguousMemory::push) -//! for information on implementation specific return values. +//! See [`ContiguousMemory::push`](crate::ContiguousMemory::push) for +//! information on implementation specific return values. use core::{ marker::PhantomData, ops::{Deref, DerefMut}, - ptr::{null, null_mut}, + ptr::null_mut, }; use crate::{ @@ -19,16 +19,13 @@ use core::marker::Unsize; use core::ptr::Pointee; /// A reference to an entry of type `T` stored in -/// [`ContiguousMemoryStorage`](crate::ContiguousMemory). +/// [`ContiguousMemory`](crate::ContiguousMemory). pub struct EntryRef = ImplDefault> { pub(crate) inner: Impl::SharedRef>, #[cfg(feature = "ptr_metadata")] pub(crate) metadata: ::Metadata, } -/// A shorter type name for [`EntryRef`]. -pub type CERef = EntryRef; - impl> EntryRef { /// Returns a byte range within container memory this reference points to. pub fn range(&self) -> ByteRange { @@ -57,7 +54,7 @@ impl> EntryRef Ok(MemoryReadGuard { state: self.inner.clone(), #[cfg(not(feature = "ptr_metadata"))] - value: &*pos, + value: &*(pos as *const T), #[cfg(feature = "ptr_metadata")] value: &*core::ptr::from_raw_parts::(pos as *const (), self.metadata), }) @@ -79,8 +76,8 @@ impl> EntryRef } /// Returns a reference to data at its current location or a - /// [`RegionBorrowError`] error if the represented memory region is - /// mutably borrowed. + /// [`RegionBorrowError`] error if the represented memory region is mutably + /// borrowed. pub fn try_get(&self) -> Result, RegionBorrowError> where T: RefSizeReq, @@ -110,7 +107,7 @@ impl> EntryRef Ok(MemoryWriteGuard { state: self.inner.clone(), #[cfg(not(feature = "ptr_metadata"))] - value: &mut *(pos), + value: &mut *(pos as *mut T), #[cfg(feature = "ptr_metadata")] value: &mut *core::ptr::from_raw_parts_mut::(pos as *mut (), self.metadata), }) @@ -132,8 +129,8 @@ impl> EntryRef } /// Returns a mutable reference to data at its current location or a - /// [`RegionBorrowError`] error if the represented memory region is - /// already borrowed. + /// [`RegionBorrowError`] error if the represented memory region is already + /// borrowed. pub fn try_get_mut(&mut self) -> Result, RegionBorrowError> where T: RefSizeReq, @@ -152,13 +149,13 @@ impl> EntryRef EntryRef { inner: unsafe { - // SAFETY: Reinterpretation of T to R is safe because both EntryRefs - // are equally sized bc T is phantom. As A and Impl of the result - // are the same, and Unsize requirement is satisfied, this pointer - // cast is safe. + // SAFETY: Reinterpretation of T to R is safe because both + // EntryRefs are equally sized bc T is phantom. As A and Impl of + // the result are the same, and Unsize requirement is satisfied, + // this pointer cast is safe. // - // Transform would be used, but it can't see the types are equally - // sized due to use of type arguments. + // Transform would be used, but it can't see the types are + // equally sized due to use of type arguments. std::ptr::read( &self.inner as *const Impl::SharedRef> as *const Impl::SharedRef>, @@ -194,10 +191,10 @@ impl> EntryRef /// /// # Safety /// - /// This function is unsafe because it assumes any `T` to implement `R`, - /// as the original type of stored data can be erased through - /// [`into_dyn`](EntryRef::into_dyn) it's impossible to check - /// whether the initial struct actually implements `R`. + /// This function is unsafe because it assumes any `T` to implement `R`, as + /// the original type of stored data can be erased through + /// [`into_dyn`](EntryRef::into_dyn) it's impossible to check whether the + /// initial struct actually implements `R`. /// /// Calling methods from an incorrect vtable will cause undefined behavior. #[cfg(feature = "ptr_metadata")] @@ -226,10 +223,10 @@ impl> EntryRef /// segment to be moved. /// /// When the reference goes out of scope, its region will be marked as free - /// which means that a subsequent call to [`ContiguousMemoryStorage::push`] + /// which means that a subsequent call to [`ContiguousMemory::push`] /// or friends can cause undefined behavior when dereferencing the pointer. /// - /// [`ContiguousMemoryStorage::push`]: crate::ContiguousMemory::push + /// [`ContiguousMemory::push`]: crate::ContiguousMemory::push pub unsafe fn as_ptr(&self) -> *const T where T: RefSizeReq, @@ -241,9 +238,9 @@ impl> EntryRef /// /// # Safety /// - /// In addition to concerns noted in [`EntryRef::as_ptr`], - /// this function also provides mutable access to the underlying data - /// allowing potential data races. + /// In addition to concerns noted in [`EntryRef::as_ptr`], this function + /// also provides mutable access to the underlying data allowing potential + /// data races. pub unsafe fn as_ptr_mut(&self) -> *mut T where T: RefSizeReq, @@ -253,7 +250,7 @@ impl> EntryRef #[cfg(not(feature = "ptr_metadata"))] { - pos + pos as *mut T } #[cfg(feature = "ptr_metadata")] { @@ -276,14 +273,14 @@ impl> EntryRef self.into_ptr_mut() as *const T } - /// Creates a mutable pointer to underlying data while also preventing - /// the occupied memory region from being marked as free. + /// Creates a mutable pointer to underlying data while also preventing the + /// occupied memory region from being marked as free. /// /// # Safety /// - /// In addition to concerns noted in - /// [`EntryRef::into_ptr`], this function also provides - /// mutable access to the underlying data allowing potential data races. + /// In addition to concerns noted in [`EntryRef::into_ptr`], this function + /// also provides mutable access to the underlying data allowing potential + /// data races. pub unsafe fn into_ptr_mut(self) -> *mut T where T: RefSizeReq, @@ -304,8 +301,6 @@ impl Clone for EntryRef { inner: self.inner.clone(), #[cfg(feature = "ptr_metadata")] metadata: self.metadata, - #[cfg(not(feature = "ptr_metadata"))] - _phantom: PhantomData, } } } @@ -368,14 +363,19 @@ use state::*; /// Used for modelling XOR borrow semantics at runtime. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BorrowState { + /// The memory is being immutably accessed. + /// + /// The value of `0` represents number of immutable references crated to + /// memory location. Read(usize), + /// The memory is being mutably accessed. Write, } /// Size requirements for types pointed to by references /// -/// This is a sealed marker trait that allows `ptr_metadata` to control -/// whether Reference +/// This is a sealed marker trait that allows `ptr_metadata` to control whether +/// Reference #[cfg(feature = "ptr_metadata")] pub trait RefSizeReq: Sealed {} #[cfg(feature = "ptr_metadata")] @@ -430,8 +430,8 @@ impl ConstructReference for *mut T { } } -/// A smart reference wrapper responsible for tracking and managing a flag -/// that indicates whether the memory segment is actively being written to. +/// A smart reference wrapper responsible for tracking and managing a flag that +/// indicates whether the memory segment is actively being written to. #[cfg_attr(feature = "debug", derive(Debug))] pub struct MemoryWriteGuard<'a, T: ?Sized, A: ManageMemory, Impl: ImplReferencing = ImplDefault> { @@ -466,8 +466,8 @@ impl<'a, T: ?Sized, Impl: ImplReferencing, A: ManageMemory> Drop } } -/// A smart reference wrapper responsible for tracking and managing a flag -/// that indicates whether the memory segment is actively being read from. +/// A smart reference wrapper responsible for tracking and managing a flag that +/// indicates whether the memory segment is actively being read from. #[cfg_attr(feature = "debug", derive(Debug))] pub struct MemoryReadGuard<'a, T: ?Sized, A: ManageMemory, Impl: ImplReferencing = ImplDefault> { state: Impl::SharedRef>, diff --git a/src/types.rs b/src/types.rs index 959500a..5cdeee7 100644 --- a/src/types.rs +++ b/src/types.rs @@ -34,7 +34,7 @@ use crate::{reference::{state::ReferenceState, BorrowState, EntryRef}, memory::{ /// All implemented functions should be `#[inline]`d to ensure that final code /// behaves as if this abstraction didn't exist. /// -/// This allows different [`ContigousMemory`](crate::ContigousMemory) +/// This allows different [`ContigousMemory`](crate::ContiguousMemory) /// implementation details to use the same code base while staying correct for /// the strictest one. pub trait ReadableInner { @@ -47,13 +47,27 @@ pub trait ReadableInner { where Self: 'a; - + /// Error returned when calling [`read`](ReadableInner::read) or + /// [`try_read`](ReadableInner::try_read) fails. #[cfg(not(any(feature = "error_in_core", not(feature = "no_std"))))] type BorrowError; + /// Error returned when calling [`read`](ReadableInner::read) or + /// [`try_read`](ReadableInner::try_read) fails. #[cfg(any(feature = "error_in_core", not(feature = "no_std")))] type BorrowError: Error; + /// Returns the [read guard](ReadableInner::ReadGuard) for `T` if the + /// wrapped readable can be read, or an [error](ReadableInner::BorrowError) + /// if that's not possible (usually due to container being poisoned). + /// + /// This method will block for implementations of concurrent containers + /// (such as `Mutex`), for non-blocking access use + /// [`try_read`](ReadableInner::try_read). fn read(&self) -> Result, Self::BorrowError>; + /// Returns the [read guard](ReadableInner::ReadGuard) for `T` if the + /// wrapped readable can be read, or an [error](ReadableInner::BorrowError) + /// if it's being mutably accessed from somewhere else or if read isn't + /// possible (usually due to container being poisoned). fn try_read(&self) -> Result, Self::BorrowError> { self.read() } @@ -72,13 +86,30 @@ pub trait WritableInner: ReadableInner { where Self: 'a; + /// Error returned when calling [`write`](WritableInner::write) or + /// [`try_write`](WritableInner::try_write) fails. #[cfg(not(any(feature = "error_in_core", not(feature = "no_std"))))] type MutBorrowError; + /// Error returned when calling [`write`](WritableInner::write) or + /// [`try_write`](WritableInner::try_write) fails. #[cfg(any(feature = "error_in_core", not(feature = "no_std")))] type MutBorrowError: Error; + /// Returns the [write guard](WritableInner::WriteGuard) for `T` if the + /// wrapped writable can be written to, or an + /// [error](WritableInner::MutBorrowError) if that's not possible (usually + /// due to container being poisoned). + /// + /// This method will block for implementations of concurrent containers + /// (such as `Mutex`), for non-blocking access use + /// [`try_write`](WritableInner::try_write). fn write(&self) -> Result, Self::MutBorrowError>; + /// Returns the [write guard](WritableInner::WriteGuard) for `T` if the + /// wrapped writable can be written to, or an + /// [error](WritableInner::MutBorrowError) if it's being mutably accessed + /// from somewhere else or if write isn't possible (usually due to container + /// being poisoned). fn try_write( &self, ) -> Result, Self::MutBorrowError> { @@ -112,7 +143,7 @@ impl WritableInner for core::cell::Cell { fn write( &self, ) -> Result, Infallible> { - Ok(CellWriteGuard { parent: &self, value: self.get() }) + Ok(CellWriteGuard { parent: self, value: self.get() }) } } impl ReadableInner for core::cell::RefCell { @@ -173,9 +204,8 @@ impl WritableInner for UnsafeCell { /// A fake [`Reference`]-like wrapper for [unsafe implementation](ImplUnsafe) /// state. /// -/// As a directly owned value doesn't implement a [`Deref`], this wrapper fills -/// that gap so no matter the implementation details, the state wrapper can be -/// dereferenced into inner value. +/// As an owned value `T` doesn't implement a [`Deref`], this wrapper fills that +/// gap in order to unify implementation details. #[derive(Debug)] #[repr(transparent)] #[cfg(feature = "unsafe_impl")] @@ -259,10 +289,9 @@ pub(crate) mod sealed { } pub(crate) use sealed::Sealed; -/// Implementation details shared between [storage](StorageDetails) and -/// [`reference`](ReferenceDetails) implementations. +/// Implementation details shared between memory container and reference types. pub trait ImplDetails: Sized { - /// A reference to internal state + /// A reference to internal state. type StateRef: Reference; /// A wrapper for [`MemoryBase`]. @@ -271,15 +300,15 @@ pub trait ImplDetails: Sized { /// A wrapper for [`SegmentTracker`]. type Tracker: WritableInner + From; + /// Reference type returned when data is pushed into this implementation. type PushResult: ConstructReference; + /// Indicates whether this implementation is allowed to grow. const GROW: bool = true; } -/// Implementation that's not thread-safe but performs faster as it avoids -/// mutexes and locks. -/// -/// For example usage of default implementation see: [`ContiguousMemory`](crate::ContiguousMemory) +/// Default implementation that uses [`std::cell::RefCell`] for storage and +/// [`Rc`] for state references. #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Clone, Copy, PartialEq, Eq)] pub struct ImplDefault; @@ -291,9 +320,9 @@ impl ImplDetails for ImplDefault { } /// Implementation which provides direct (unsafe) access to stored entries. -/// -/// For example usage of default implementation see: -/// [`UnsafeContiguousMemory`](crate::UnsafeContiguousMemory) +/// +/// Uses [`Cell`](std::cell::Cell) for storage and the stored data is [`Owned`] +/// by the caller. #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Clone, Copy, PartialEq, Eq)] #[cfg(feature = "unsafe_impl")] @@ -308,11 +337,12 @@ impl ImplDetails for ImplUnsafe { const GROW: bool = false; } +/// Represents contigous memory that uses smart references. pub trait ImplReferencing: ImplDetails { /// The type handling concurrent mutable access exclusion. type BorrowLock: WritableInner; - /// A shared reference to data + /// A shared reference to data. type SharedRef: Reference + Clone; /// Marks reference state as no longer being borrowed. @@ -333,12 +363,12 @@ impl ImplReferencing for ImplDefault { } } -/// Returns [`Pointee`] metadata for provided pair of struct `S` and some -/// unsized type (e.g. a trait) `T`. +/// Returns [`Pointee`](core::ptr::Pointee) metadata for provided pair of struct +/// `S` and some unsized type (e.g. a trait) `T`. /// -/// This metadata is usually a pointer to vtable of `T` implementation for -/// `S`, but can be something else and the value is considered internal to -/// the compiler. +/// This metadata is usually a pointer to vtable of `T` implementation for `S`, +/// but can be something else and the value is considered internal to the +/// compiler. #[cfg(feature = "ptr_metadata")] pub const fn static_metadata() -> ::Metadata where @@ -365,8 +395,8 @@ pub(crate) const fn is_layout_valid(size: usize, align: usize) -> bool { size <= isize::MAX as usize - (align - 1) } -/// Trait that unifies passing either a [`Layout`] directly or a `&T` where -/// `T: Sized` as an argument to a function which requires a type layout. +/// Trait that unifies passing either a [`Layout`] directly or a `&T` where `T: +/// Sized` as an argument to a function which requires a type layout. /// /// This trait is sealed to prevent users from implementing it for arbitrary /// types which would voilate its intention and cause bloat. diff --git a/test.rs b/test.rs deleted file mode 100644 index f4e368e..0000000 --- a/test.rs +++ /dev/null @@ -1,3 +0,0 @@ -fn main() -> Result<(), std::fmt::Error> { - Ok(()) -}