diff --git a/crates/db/src/tb_worldstate.rs b/crates/db/src/tb_worldstate.rs index 1796120d..4b044515 100644 --- a/crates/db/src/tb_worldstate.rs +++ b/crates/db/src/tb_worldstate.rs @@ -22,7 +22,7 @@ use tracing::warn; use uuid::Uuid; use crate::tuplebox::TupleError; -use moor_values::model::defset::HasUuid; +use moor_values::model::defset::{HasUuid, Named}; use moor_values::model::objects::{ObjAttrs, ObjFlag}; use moor_values::model::objset::ObjSet; use moor_values::model::propdef::{PropDef, PropDefs}; @@ -1099,7 +1099,7 @@ mod tests { use strum::{EnumCount, IntoEnumIterator}; - use moor_values::model::defset::HasUuid; + use moor_values::model::defset::{HasUuid, Named}; use moor_values::model::objects::ObjAttrs; use moor_values::model::objset::ObjSet; use moor_values::model::r#match::VerbArgsSpec; @@ -1135,7 +1135,7 @@ mod tests { #[tokio::test] async fn test_create_object() { let db = test_db().await; - let tx = TupleBoxTransaction::new(db); + let tx = TupleBoxTransaction::new(db.clone()); let oid = tx .create_object( None, @@ -1156,6 +1156,11 @@ mod tests { assert_eq!(tx.get_object_location(oid).await.unwrap(), NOTHING); assert_eq!(tx.get_object_name(oid).await.unwrap(), "test"); assert_eq!(tx.commit().await, Ok(CommitResult::Success)); + + // Verify existence in a new transaction. + let tx = TupleBoxTransaction::new(db); + assert!(tx.object_valid(oid).await.unwrap()); + assert_eq!(tx.get_object_owner(oid).await.unwrap(), NOTHING); } #[tokio::test] @@ -1607,7 +1612,7 @@ mod tests { #[tokio::test] async fn test_verb_add_update() { let db = test_db().await; - let tx = TupleBoxTransaction::new(db); + let tx = TupleBoxTransaction::new(db.clone()); let oid = tx .create_object( None, @@ -1658,6 +1663,13 @@ mod tests { // resolve with the new name. let vh = tx.resolve_verb(oid, "test2".into(), None).await.unwrap(); assert_eq!(vh.names(), vec!["test2"]); + + // Now commit, and try to resolve again. + assert_eq!(tx.commit().await, Ok(CommitResult::Success)); + let tx = TupleBoxTransaction::new(db); + let vh = tx.resolve_verb(oid, "test2".into(), None).await.unwrap(); + assert_eq!(vh.names(), vec!["test2"]); + assert_eq!(tx.commit().await, Ok(CommitResult::Success)); } #[tokio::test] @@ -1797,7 +1809,7 @@ mod tests { #[tokio::test] async fn test_verb_resolve() { let db = test_db().await; - let tx = TupleBoxTransaction::new(db); + let tx = TupleBoxTransaction::new(db.clone()); let a = tx .create_object( @@ -1847,6 +1859,46 @@ mod tests { .unwrap() .uuid(); assert_eq!(tx.get_verb_binary(a, v_uuid).await.unwrap(), vec![]); + + // Add a second verb with a different name + tx.add_object_verb( + a, + a, + vec!["test2".into()], + vec![], + BinaryType::LambdaMoo18X, + BitEnum::new(), + VerbArgsSpec::this_none_this(), + ) + .await + .unwrap(); + + // Verify we can get it + assert_eq!( + tx.resolve_verb(a, "test2".into(), None) + .await + .unwrap() + .names(), + vec!["test2"] + ); + assert_eq!(tx.commit().await, Ok(CommitResult::Success)); + + // Verify existence in a new transaction. + let tx = TupleBoxTransaction::new(db); + assert_eq!( + tx.resolve_verb(a, "test".into(), None) + .await + .unwrap() + .names(), + vec!["test"] + ); + assert_eq!( + tx.resolve_verb(a, "test2".into(), None) + .await + .unwrap() + .names(), + vec!["test2"] + ); assert_eq!(tx.commit().await, Ok(CommitResult::Success)); } diff --git a/crates/db/src/tuplebox/tuples/mod.rs b/crates/db/src/tuplebox/tuples/mod.rs index dff63765..71814912 100644 --- a/crates/db/src/tuplebox/tuples/mod.rs +++ b/crates/db/src/tuplebox/tuples/mod.rs @@ -15,13 +15,13 @@ use thiserror::Error; pub use slotbox::{PageId, SlotBox, SlotBoxError, SlotId}; -pub use tuple::TupleRef; +pub use tuple_ref::TupleRef; pub use tx_tuple::TxTuple; -mod slot_ptr; mod slotbox; mod slotted_page; -mod tuple; +mod tuple_ptr; +mod tuple_ref; mod tx_tuple; #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] diff --git a/crates/db/src/tuplebox/tuples/slotbox.rs b/crates/db/src/tuplebox/tuples/slotbox.rs index 75274bef..c7bba51a 100644 --- a/crates/db/src/tuplebox/tuples/slotbox.rs +++ b/crates/db/src/tuplebox/tuples/slotbox.rs @@ -40,11 +40,11 @@ use thiserror::Error; use tracing::error; use crate::tuplebox::pool::{Bid, BufferPool, PagerError}; -use crate::tuplebox::tuples::slot_ptr::SlotPtr; pub use crate::tuplebox::tuples::slotted_page::SlotId; use crate::tuplebox::tuples::slotted_page::{ slot_index_overhead, slot_page_empty_size, SlottedPage, }; +use crate::tuplebox::tuples::tuple_ptr::TuplePtr; use crate::tuplebox::tuples::{TupleId, TupleRef}; use crate::tuplebox::RelationId; @@ -104,11 +104,11 @@ impl SlotBox { let mut refs = vec![]; for (slot, buflen, addr) in slot_ids.into_iter() { let tuple_id = TupleId { page: id, slot }; - let swizref = Box::pin(SlotPtr::create(self.clone(), tuple_id, addr, buflen)); + let swizref = Box::pin(TuplePtr::create(self.clone(), tuple_id, addr, buflen)); inner.swizrefs.insert(tuple_id, swizref); let swizref = inner.swizrefs.get_mut(&tuple_id).unwrap(); let sp = unsafe { Pin::into_inner_unchecked(swizref.as_mut()) }; - let ptr = sp as *mut SlotPtr; + let ptr = sp as *mut TuplePtr; let tuple_ref = TupleRef::at_ptr(ptr); refs.push(tuple_ref); } @@ -214,7 +214,7 @@ struct Inner { /// pointers in the TupleRefs themselves. // TODO: This needs to be broken down by page id, too, so that we can manage swap-in/swap-out at // the page granularity. - swizrefs: HashMap>>, + swizrefs: HashMap>>, } impl Inner { @@ -236,46 +236,42 @@ impl Inner { let tuple_size = size + slot_index_overhead(); let page_size = max(32768, tuple_size.next_power_of_two()); - // Check if we have a free spot for this relation that can fit the tuple. - let (page, offset) = - { self.find_space(relation_id, tuple_size, slot_page_empty_size(page_size))? }; - - let mut page_handle = self.page_for(page)?; - - let free_space = page_handle.available_content_bytes(); - let mut page_write_lock = page_handle.write_lock(); - if let Ok((slot, page_remaining, mut buf)) = page_write_lock.allocate(size, initial_value) { - self.finish_alloc(page, relation_id, offset, page_remaining); - - // Make a swizzlable ptr reference and shove it in our set, and then return a tuple ref - // which has a ptr to it. - let buflen = buf.as_ref().len(); - let bufaddr = buf.as_mut_ptr(); - let tuple_id = TupleId { page, slot }; - - // Heap allocate the swizref, and and pin it, take the address of it, then stick the swizref - // in our set. - let mut swizref = Box::pin(SlotPtr::create(sb.clone(), tuple_id, bufaddr, buflen)); - let swizaddr = unsafe { swizref.as_mut().get_unchecked_mut() } as *mut SlotPtr; - self.swizrefs.insert(tuple_id, swizref); - - // Establish initial refcount using this existing lock. - page_write_lock.upcount(slot).unwrap(); - - return Ok(TupleRef::at_ptr(swizaddr)); + // Our selected page should not in theory get taken while we're holding this allocation lock, + // but to be paranoid, we'll loop around and try again if it does. + let mut tries = 0; + loop { + // Check if we have a free spot for this relation that can fit the tuple. + let (page, offset) = + { self.find_space(relation_id, tuple_size, slot_page_empty_size(page_size))? }; + let mut page_handle = self.page_for(page)?; + let mut page_write_lock = page_handle.write_lock(); + if let Ok((slot, page_remaining, mut buf)) = + page_write_lock.allocate(size, initial_value) + { + self.finish_alloc(page, relation_id, offset, page_remaining); + + // Make a swizzlable ptr reference and shove it in our set, and then return a tuple ref + // which has a ptr to it. + let buflen = buf.as_ref().len(); + let bufaddr = buf.as_mut_ptr(); + let tuple_id = TupleId { page, slot }; + + // Heap allocate the swizref, and and pin it, take the address of it, then stick the swizref + // in our set. + let mut swizref = Box::pin(TuplePtr::create(sb.clone(), tuple_id, bufaddr, buflen)); + let swizaddr = unsafe { swizref.as_mut().get_unchecked_mut() } as *mut TuplePtr; + self.swizrefs.insert(tuple_id, swizref); + + // Establish initial refcount using this existing lock. + page_write_lock.upcount(slot).unwrap(); + + return Ok(TupleRef::at_ptr(swizaddr)); + } + tries += 1; + if tries > 50 { + panic!("Could not allocate tuple after {tries} tries"); + } } - - // If we get here, then we failed to allocate on the page we wanted to, which means there's - // data coherence issues between the pages last-reported free space and the actual free - panic!( - "Page {} failed to allocate, we wanted {} bytes, but it only has {},\ - but our records show it has {}, and its pid in that offset is {:?}", - page, - size, - free_space, - self.available_page_space[relation_id.0].entries[offset] >> 64, - self.available_page_space[relation_id.0].entries[offset] & 0xFFFF_FFFF_FFFF - ); } fn do_restore_page<'a>(&mut self, id: PageId) -> Result, SlotBoxError> { @@ -601,8 +597,18 @@ mod tests { tuples.push((tuple_id, tuple)); } for (tuple, expected_value) in tuples { - let retrieved = tuple.slot_buffer(); - assert_eq!(expected_value, retrieved.as_slice()); + let retrieved_domain = tuple.slot_buffer(); + let retrieved_codomain = tuple.slot_buffer(); + assert_eq!( + expected_value, + retrieved_domain.as_slice(), + "Tuple domain mismatch" + ); + assert_eq!( + expected_value, + retrieved_codomain.as_slice(), + "Tuple codomain mismatch" + ); } } @@ -645,10 +651,17 @@ mod tests { let mut sb = Arc::new(SlotBox::new(32768 * 32)); let mut tuples = fill_until_full(&mut sb); for (i, (tuple, expected_value)) in tuples.iter().enumerate() { - let retrieved = tuple.domain(); + let retrieved_domain = tuple.domain(); + let retrieved_codomain = tuple.codomain(); + assert_eq!( + *expected_value, + retrieved_domain.as_slice(), + "Mismatch at {}th tuple", + i + ); assert_eq!( *expected_value, - retrieved.as_slice(), + retrieved_codomain.as_slice(), "Mismatch at {}th tuple", i ); @@ -698,8 +711,10 @@ mod tests { // What we expected to still be there is there. for (tuple, expected_value) in &tuples { - let retrieved = tuple.domain(); - assert_eq!(*expected_value, retrieved.as_slice()); + let retrieved_domain = tuple.domain(); + let retrieved_codomain = tuple.codomain(); + assert_eq!(*expected_value, retrieved_domain.as_slice()); + assert_eq!(*expected_value, retrieved_codomain.as_slice()); } // What we expected to not be there is not there. for (id, _) in freed_tuples { @@ -709,17 +724,21 @@ mod tests { let new_tuples = fill_until_full(&mut sb); // Verify both the new tuples and the old tuples are there. for (tuple, expected) in new_tuples { - let retrieved = tuple.domain(); - assert_eq!(expected, retrieved.as_slice()); + let retrieved_domain = tuple.domain(); + let retrieved_codomain = tuple.codomain(); + assert_eq!(expected, retrieved_domain.as_slice()); + assert_eq!(expected, retrieved_codomain.as_slice()); } for (tuple, expected) in tuples { - let retrieved = tuple.domain(); - assert_eq!(expected, retrieved.as_slice()); + let retrieved_domain = tuple.domain(); + let retrieved_codomain = tuple.codomain(); + assert_eq!(expected, retrieved_domain.as_slice()); + assert_eq!(expected, retrieved_codomain.as_slice()); } } #[test] - fn encode_decode() { + fn alloc_encode_decode() { let pid = 12345; let available = 54321; let encoded = super::encode(pid, available); diff --git a/crates/db/src/tuplebox/tuples/slotted_page.rs b/crates/db/src/tuplebox/tuples/slotted_page.rs index 07133594..03b2a1fa 100644 --- a/crates/db/src/tuplebox/tuples/slotted_page.rs +++ b/crates/db/src/tuplebox/tuples/slotted_page.rs @@ -44,7 +44,7 @@ pub type SlotId = u32; // In this way both a madvise DONTNEEDed and an otherwise empty page generally are effectively // identical, and we can just start using them right away in a null-state without doing any // initialization. -#[repr(C)] +#[repr(C, align(8))] struct SlottedPageHeader { // The number of bytes used in the page used_bytes: u32, @@ -92,12 +92,13 @@ impl SlottedPageHeader { // Update accounting for the presence of a new entry. fn add_entry(mut self: Pin<&mut Self>, size: usize) -> SlotId { + let padded_size = (size + 7) & !7; unsafe { let new_slot = self.num_slots as SlotId; let header = self.as_mut().get_unchecked_mut(); header.used_bytes += size as u32; header.num_slots += 1; - header.content_length += size as u32; + header.content_length += padded_size as u32; header.index_length += std::mem::size_of::() as u32; new_slot } @@ -114,7 +115,7 @@ impl SlottedPageHeader { } } -#[repr(C)] +#[repr(C, align(8))] struct SlotIndexEntry { used: bool, // The number of live references to this slot @@ -132,14 +133,32 @@ struct SlotIndexEntry { impl SlotIndexEntry { // Update accounting for the presence of a new entry. - fn alloc(mut self: Pin<&mut Self>, content_position: usize, size: usize) { + fn alloc( + mut self: Pin<&mut Self>, + content_position: usize, + content_length: usize, + tuple_size: usize, + ) { + // The content must be always rounded up to the nearest 8-byte boundary. + assert_eq!( + content_position % 8, + 0, + "content position {} is not 8-byte aligned", + content_position + ); + assert_eq!( + content_length % 8, + 0, + "content length {} is not 8-byte aligned", + content_length + ); unsafe { let index_entry = self.as_mut().get_unchecked_mut(); index_entry.offset = content_position as u32; // Net-new slots always have their full size used in their new index entry. - index_entry.used_bytes = size as u32; - index_entry.allocated = size as u32; + index_entry.used_bytes = tuple_size as u32; + index_entry.allocated = content_length as u32; index_entry.refcount = 0; index_entry.used = true; } @@ -207,7 +226,7 @@ impl<'a> SlottedPage<'a> { let used = (header.num_slots * std::mem::size_of::() as u32) as usize + header.used_bytes as usize + std::mem::size_of::(); - self.page_size as usize - used + (self.page_size as usize).saturating_sub(used) } /// How many bytes are available for appending to this page (i.e. not counting the space @@ -218,7 +237,8 @@ impl<'a> SlottedPage<'a> { let index_length = header.index_length as usize; let header_size = std::mem::size_of::(); - self.page_size as usize - (index_length + content_length + header_size) + let avail = index_length + content_length + header_size; + (self.page_size as usize).saturating_sub(avail) } /// Add the slot into the page, copying it into the memory region, and returning the slot id @@ -233,6 +253,7 @@ impl<'a> SlottedPage<'a> { if !can_fit { return Err(SlotBoxError::BoxFull(size, self.available_content_bytes())); } + let header = self.header_mut(); if let Some(fit_slot) = fit_slot { let content_position = self.offset_of(fit_slot).unwrap().0; let memory_as_slice = unsafe { @@ -250,7 +271,7 @@ impl<'a> SlottedPage<'a> { index_entry.as_mut().mark_used(size); // Update used bytes in the header - let header = self.header_mut(); + let header = header; header.add_used(size); let slc = unsafe { @@ -259,40 +280,49 @@ impl<'a> SlottedPage<'a> { return Ok((fit_slot, self.available_content_bytes(), slc)); } - // Do we have enough room to add new content? - let avail = self.available_content_bytes(); - if avail < size + std::mem::size_of::() { + // Find position and verify that we can fit the slot. + let current_content_length = header.content_length as usize; + let current_index_end = header.index_length as usize; + let content_size = (size + 7) & !7; + let content_start_position = + self.page_size as usize - current_content_length - content_size; + + // Align to 8-byte boundary cuz that's what we'll actually need. + let content_start_position = (content_start_position + 7) & !7; + + // If the content start bleeds over into the index (+ our new entry), then we can't fit the slot. + let index_entry_size = std::mem::size_of::(); + if content_start_position <= current_index_end + index_entry_size { return Err(SlotBoxError::BoxFull( - size + std::mem::size_of::(), - avail, + size + index_entry_size, + self.available_content_bytes(), )); } - // Add the slot to the content region. The start offset is PAGE_SIZE - content_length - - // slot_length. So first thing, copy the bytes into the content region at that position. - let content_length = self.header_mut().content_length as usize; - let content_position = self.page_size as usize - content_length - size; let memory_as_slice = unsafe { std::slice::from_raw_parts_mut(self.base_address, self.page_size as usize) }; // If there's an initial value provided, copy it in. if let Some(initial_value) = initial_value { - assert_eq!(initial_value.len(), size); - memory_as_slice[content_position..content_position + size] + memory_as_slice[content_start_position..content_start_position + size] .copy_from_slice(initial_value); } // Add the index entry and expand the index region. - let mut index_entry = self.get_index_entry_mut(self.header_mut().num_slots as SlotId); - index_entry.as_mut().alloc(content_position, size); + let mut index_entry = self.get_index_entry_mut(header.num_slots as SlotId); + index_entry + .as_mut() + .alloc(content_start_position, content_size, size); - // Update the header - let header = self.header_mut(); + // Update the header to subtract the used space. + let header = header; let new_slot = header.add_entry(size); // Return the slot id and the number of bytes remaining to append at the end. let slc = unsafe { - Pin::new_unchecked(&mut memory_as_slice[content_position..content_position + size]) + Pin::new_unchecked( + &mut memory_as_slice[content_start_position..content_start_position + size], + ) }; Ok((new_slot, self.available_content_bytes(), slc)) } @@ -399,6 +429,9 @@ impl<'a> SlottedPage<'a> { let offset = index_entry.offset as usize; let length = index_entry.used_bytes as usize; + // Must be 8-byte aligned. + assert_eq!(offset % 8, 0, "slot {} is not 8-byte aligned", slot_id); + let memory_as_slice = unsafe { std::slice::from_raw_parts(self.base_address, self.page_size as usize) }; Ok(unsafe { Pin::new_unchecked(&memory_as_slice[offset..offset + length]) }) @@ -419,6 +452,9 @@ impl<'a> SlottedPage<'a> { let offset = index_entry.offset as usize; let length = index_entry.used_bytes as usize; + // Must be 8-byte aligned. + assert_eq!(offset % 8, 0, "slot {} is not 8-byte aligned", slot_id); + assert!( offset + length <= self.page_size as usize, "slot {} is out of bounds", @@ -561,7 +597,12 @@ impl<'a> SlottedPage<'a> { let index_length = header.index_length as isize; let content_length = header.content_length as isize; let header_size = std::mem::size_of::() as isize; - let avail = (self.page_size as isize) - (index_length + content_length + header_size); + let total_needed = index_length + content_length + header_size; + + // Align to 8-byte boundary cuz that's what we'll actually need. + let total_needed = (total_needed + 7) & !7; + + let avail = (self.page_size as isize) - total_needed; if avail < size as isize { return (true, None); } @@ -576,6 +617,13 @@ impl<'a> SlottedPage<'a> { unsafe { let slot_address = base_address.add(index_offset); + + assert_eq!( + slot_address as usize % 8, + 0, + "slot {} is not 8-byte aligned", + slot_id + ); Pin::new_unchecked(&*(slot_address as *const SlotIndexEntry)) } } @@ -587,6 +635,13 @@ impl<'a> SlottedPage<'a> { unsafe { let slot_address = base_address.add(index_offset); + + assert_eq!( + slot_address as usize % 8, + 0, + "slot {} is not 8-byte aligned", + slot_id + ); Pin::new_unchecked(&mut *(slot_address as *mut SlotIndexEntry)) } } @@ -714,6 +769,10 @@ mod tests { assert!(matches!(result, Err(SlotBoxError::BoxFull(_, _)))); break; } + // Sometimes we can cease allocation because that's how the cookie crumbles with padding, + if matches!(result, Err(SlotBoxError::BoxFull(_, _))) { + break; + } // Otherwise, we should be getting a slot id and a new avail that is smaller than the // previous avail. let (tid, new_avail, _) = result.unwrap(); @@ -839,16 +898,21 @@ mod tests { // Fill the page with random slots. let collected_slots = random_fill(&page); // Then remove them all and verify it is now completely empty. - let mut remaining = page.free_space_bytes(); + let mut old_remaining = page.available_content_bytes(); let mut last_is_empty = false; - for (tid, _) in &collected_slots { + for (i, (tid, _)) in collected_slots.iter().enumerate() { let (new_remaining, _, empty) = page.remove_slot(*tid).unwrap(); - assert!(new_remaining >= remaining); - remaining = new_remaining; + assert!( + new_remaining >= old_remaining, + "new_remaining {} should be >= old_remaining {} on {i}th iteration", + new_remaining, + old_remaining + ); + old_remaining = new_remaining; last_is_empty = empty; } assert!(last_is_empty); - assert_eq!(remaining, slot_page_empty_size(4096)); + assert_eq!(old_remaining, slot_page_empty_size(4096)); assert_eq!(page.free_space_bytes(), slot_page_empty_size(4096)); } } diff --git a/crates/db/src/tuplebox/tuples/slot_ptr.rs b/crates/db/src/tuplebox/tuples/tuple_ptr.rs similarity index 91% rename from crates/db/src/tuplebox/tuples/slot_ptr.rs rename to crates/db/src/tuplebox/tuples/tuple_ptr.rs index d943e3a1..602afa4e 100644 --- a/crates/db/src/tuplebox/tuples/slot_ptr.rs +++ b/crates/db/src/tuplebox/tuples/tuple_ptr.rs @@ -23,7 +23,7 @@ use crate::tuplebox::tuples::{SlotBox, TupleId}; /// which allows the SlotBox to manage the lifetime of the tuple, swizzling it in and out of memory as needed. /// Adds a layer of indirection to each tuple access, but is better than passing around tuple ids + slotbox /// references. -pub struct SlotPtr { +pub struct TuplePtr { sb: Arc, id: TupleId, buflen: u32, @@ -32,17 +32,17 @@ pub struct SlotPtr { _pin: std::marker::PhantomPinned, } -unsafe impl Send for SlotPtr {} -unsafe impl Sync for SlotPtr {} +unsafe impl Send for TuplePtr {} +unsafe impl Sync for TuplePtr {} -impl SlotPtr { +impl TuplePtr { pub(crate) fn create( sb: Arc, tuple_id: TupleId, bufaddr: *mut u8, buflen: usize, ) -> Self { - SlotPtr { + TuplePtr { sb: sb.clone(), id: tuple_id, bufaddr, @@ -52,21 +52,21 @@ impl SlotPtr { } } -impl PartialEq for SlotPtr { +impl PartialEq for TuplePtr { fn eq(&self, other: &Self) -> bool { self.id == other.id } } -impl Eq for SlotPtr {} +impl Eq for TuplePtr {} -impl Hash for SlotPtr { +impl Hash for TuplePtr { fn hash(&self, state: &mut H) { self.id.hash(state) } } -impl SlotPtr { +impl TuplePtr { #[inline] pub fn id(&self) -> TupleId { self.id @@ -91,7 +91,7 @@ impl SlotPtr { #[inline] pub fn byte_source(&self) -> SlotByteSource { SlotByteSource { - ptr: self as *const SlotPtr, + ptr: self as *const TuplePtr, } } @@ -108,7 +108,7 @@ impl SlotPtr { /// So we can build SliceRefs off of SlotPtrs pub struct SlotByteSource { - ptr: *const SlotPtr, + ptr: *const TuplePtr, } unsafe impl Send for SlotByteSource {} diff --git a/crates/db/src/tuplebox/tuples/tuple.rs b/crates/db/src/tuplebox/tuples/tuple_ref.rs similarity index 86% rename from crates/db/src/tuplebox/tuples/tuple.rs rename to crates/db/src/tuplebox/tuples/tuple_ref.rs index b17877e0..091ee3bf 100644 --- a/crates/db/src/tuplebox/tuples/tuple.rs +++ b/crates/db/src/tuplebox/tuples/tuple_ref.rs @@ -18,18 +18,20 @@ use std::sync::Arc; use moor_values::util::slice_ref::SliceRef; -use crate::tuplebox::tuples::slot_ptr::SlotPtr; +use crate::tuplebox::tuples::tuple_ptr::TuplePtr; use crate::tuplebox::tuples::{SlotBox, SlotBoxError, TupleId}; use crate::tuplebox::RelationId; pub struct TupleRef { - sp: *mut SlotPtr, + // Yo dawg I heard you like pointers, so I put a pointer in your pointer. + sp: *mut TuplePtr, } -#[repr(C)] +#[repr(C, align(8))] struct Header { ts: u64, domain_size: u32, + codomain_size: u32, } unsafe impl Send for TupleRef {} @@ -38,7 +40,7 @@ impl TupleRef { // Wrap an existing SlotPtr. // Note: to avoid deadlocking at construction, assumes that the tuple is already upcounted by the // caller. - pub(crate) fn at_ptr(sp: *mut SlotPtr) -> Self { + pub(crate) fn at_ptr(sp: *mut TuplePtr) -> Self { Self { sp } } /// Allocate the given tuple in a slotbox. @@ -53,15 +55,19 @@ impl TupleRef { let tuple_ref = sb.clone().allocate(total_size, relation_id, None)?; sb.update_with(tuple_ref.id(), |mut buffer| { let domain_len = domain.len(); + let codomain_len = codomain.len(); { let header_ptr = buffer.as_mut().as_mut_ptr() as *mut Header; let header = unsafe { &mut *header_ptr }; header.ts = ts; header.domain_size = domain_len as u32; + header.codomain_size = codomain_len as u32; } let start_pos = std::mem::size_of::
(); + let codomain_start = start_pos + domain_len; + let codomain_end = codomain_start + codomain_len; buffer[start_pos..start_pos + domain_len].copy_from_slice(domain); - buffer[start_pos + domain_len..].copy_from_slice(codomain); + buffer[codomain_start..codomain_end].copy_from_slice(codomain); })?; Ok(tuple_ref) } @@ -101,9 +107,10 @@ impl TupleRef { pub fn codomain(&self) -> SliceRef { let header = self.header(); let domain_size = header.domain_size as usize; + let codomain_size = header.codomain_size as usize; let buffer = self.slot_buffer(); let codomain_start = std::mem::size_of::
() + domain_size; - buffer.slice(codomain_start..) + buffer.slice(codomain_start..codomain_start + codomain_size) } /// The raw buffer of the tuple, including the header, not dividing up the domain and codomain. @@ -129,7 +136,7 @@ impl TupleRef { } #[inline] - fn resolve_slot_ptr(&self) -> Pin<&mut SlotPtr> { + fn resolve_slot_ptr(&self) -> Pin<&mut TuplePtr> { unsafe { Pin::new_unchecked(&mut *self.sp) } } diff --git a/crates/db/src/tuplebox/tuples/tx_tuple.rs b/crates/db/src/tuplebox/tuples/tx_tuple.rs index 520802c8..7bbde439 100644 --- a/crates/db/src/tuplebox/tuples/tx_tuple.rs +++ b/crates/db/src/tuplebox/tuples/tx_tuple.rs @@ -17,7 +17,7 @@ use moor_values::util::slice_ref::SliceRef; use crate::tuplebox::tuples::TupleId; use crate::tuplebox::tuples::TupleRef; -/// Possible operations on tuples, in the context of a transaction . +/// Possible operations on tuples, in the context local to a transaction. #[derive(Clone)] pub enum TxTuple { /// Insert tuple into the relation. diff --git a/crates/kernel/src/builtins/bf_objects.rs b/crates/kernel/src/builtins/bf_objects.rs index b0045e31..10500d4c 100644 --- a/crates/kernel/src/builtins/bf_objects.rs +++ b/crates/kernel/src/builtins/bf_objects.rs @@ -33,6 +33,7 @@ use crate::tasks::VerbCall; use crate::vm::ExecutionResult::ContinueVerb; use crate::vm::VM; use moor_compiler::builtins::offset_for_builtin; +use moor_values::model::defset::Named; /* Function: int valid (obj object) diff --git a/crates/kernel/src/builtins/bf_verbs.rs b/crates/kernel/src/builtins/bf_verbs.rs index 57ea132d..2e00ea1d 100644 --- a/crates/kernel/src/builtins/bf_verbs.rs +++ b/crates/kernel/src/builtins/bf_verbs.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use async_trait::async_trait; -use moor_values::model::defset::HasUuid; +use moor_values::model::defset::{HasUuid, Named}; use moor_values::model::objects::ObjFlag; use moor_values::AsByteBuffer; use strum::EnumCount; diff --git a/crates/kernel/src/vm/vm_unwind.rs b/crates/kernel/src/vm/vm_unwind.rs index d7b245b2..fd6d3824 100644 --- a/crates/kernel/src/vm/vm_unwind.rs +++ b/crates/kernel/src/vm/vm_unwind.rs @@ -25,6 +25,7 @@ use crate::vm::activation::{Activation, HandlerType}; use crate::vm::{ExecutionResult, VMExecState, VM}; use moor_compiler::builtins::BUILTIN_DESCRIPTORS; use moor_compiler::labels::{Label, Offset}; +use moor_values::model::defset::Named; #[derive(Clone, Eq, PartialEq, Debug, Decode, Encode)] pub struct UncaughtException { diff --git a/crates/kernel/testsuite/basic/basic_suite.rs b/crates/kernel/testsuite/basic/basic_suite.rs index a3ccb496..55f7d10c 100644 --- a/crates/kernel/testsuite/basic/basic_suite.rs +++ b/crates/kernel/testsuite/basic/basic_suite.rs @@ -19,9 +19,11 @@ use moor_db::Database; use moor_kernel::tasks::sessions::NoopClientSession; use moor_kernel::tasks::vm_test_utils::call_verb; use moor_kernel::textdump::load_db::textdump_load; +use moor_values::model::defset::Named; use moor_values::model::r#match::VerbArgsSpec; use moor_values::model::verbs::{BinaryType, VerbFlag}; use moor_values::model::world_state::WorldStateSource; +use moor_values::model::CommitResult; use moor_values::var::objid::Objid; use moor_values::var::Var; use moor_values::{AsByteBuffer, SYSTEM_OBJECT}; @@ -34,7 +36,7 @@ fn testsuite_dir() -> PathBuf { } /// Create a minimal Db to support the test harness. -async fn load_db(db: &mut TupleBoxWorldStateSource) { +async fn load_textdump(db: &mut TupleBoxWorldStateSource) { let mut tx = db.loader_client().unwrap(); textdump_load( tx.as_mut(), @@ -42,7 +44,7 @@ async fn load_db(db: &mut TupleBoxWorldStateSource) { ) .await .expect("Could not load textdump"); - tx.commit().await.unwrap(); + assert_eq!(tx.commit().await.unwrap(), CommitResult::Success); } async fn compile_verbs(db: &mut TupleBoxWorldStateSource, verbs: &[(&str, &Program)]) { @@ -61,8 +63,26 @@ async fn compile_verbs(db: &mut TupleBoxWorldStateSource, verbs: &[(&str, &Progr ) .await .unwrap(); + + // Verify it was added. + let verb = tx + .get_verb(Objid(3), SYSTEM_OBJECT, verb_name) + .await + .unwrap(); + assert!(verb.matches_name(verb_name)); + } + assert_eq!(tx.commit().await.unwrap(), CommitResult::Success); + + // And then verify that again in a new transaction. + let mut tx = db.new_world_state().await.unwrap(); + for (verb_name, _) in verbs { + let verb = tx + .get_verb(Objid(3), SYSTEM_OBJECT, verb_name) + .await + .unwrap(); + assert!(verb.matches_name(verb_name)); } - tx.commit().await.unwrap(); + assert_eq!(tx.commit().await.unwrap(), CommitResult::Success); } async fn eval(db: &mut TupleBoxWorldStateSource, expression: &str) -> Var { @@ -104,7 +124,7 @@ async fn run_basic_test(test_dir: &str) { // Frustratingly the individual test lines are not independent, so we need to run them in a // single database. let (mut db, _) = TupleBoxWorldStateSource::open(None, 1 << 30).await; - load_db(&mut db).await; + load_textdump(&mut db).await; for (line_num, (input, expected_output)) in zipped.enumerate() { let evaluated = eval(&mut db, input).await; let output = eval(&mut db, expected_output).await; diff --git a/crates/values/src/model/defset.rs b/crates/values/src/model/defset.rs index 1009d801..ca9726ea 100644 --- a/crates/values/src/model/defset.rs +++ b/crates/values/src/model/defset.rs @@ -15,7 +15,9 @@ use crate::util::slice_ref::SliceRef; use crate::AsByteBuffer; use bytes::BufMut; +use itertools::Itertools; use std::convert::TryInto; +use std::fmt::{Debug, Display, Formatter}; use uuid::Uuid; pub trait HasUuid { @@ -24,16 +26,34 @@ pub trait HasUuid { pub trait Named { fn matches_name(&self, name: &str) -> bool; + fn names(&self) -> Vec<&str>; } /// A container for verb or property defs. /// Immutable, and can be iterated over in sequence, or searched by name. -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq)] pub struct Defs { bytes: SliceRef, _phantom: std::marker::PhantomData, } +impl Display for Defs { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let names = self + .iter() + .map(|p| p.names().iter().map(|s| s.to_string()).join(":")) + .collect::>() + .join(", "); + f.write_fmt(format_args!("{{{}}}", names)) + } +} + +impl Debug for Defs { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + // Just use the display + Display::fmt(self, f) + } +} pub struct DefsIter { position: usize, buffer: SliceRef, diff --git a/crates/values/src/model/propdef.rs b/crates/values/src/model/propdef.rs index 3d724262..99948bde 100644 --- a/crates/values/src/model/propdef.rs +++ b/crates/values/src/model/propdef.rs @@ -129,6 +129,10 @@ impl Named for PropDef { fn matches_name(&self, name: &str) -> bool { self.name().to_lowercase() == name.to_lowercase().as_str() } + + fn names(&self) -> Vec<&str> { + vec![self.name()] + } } impl HasUuid for PropDef { diff --git a/crates/values/src/model/verbdef.rs b/crates/values/src/model/verbdef.rs index 71a550ec..fa0c8c8d 100644 --- a/crates/values/src/model/verbdef.rs +++ b/crates/values/src/model/verbdef.rs @@ -117,9 +117,17 @@ impl VerbDef { let view = self.get_header_view(); view.args().read() } +} + +impl Named for VerbDef { + fn matches_name(&self, name: &str) -> bool { + self.names() + .iter() + .any(|verb| verbname_cmp(verb.to_lowercase().as_str(), name.to_lowercase().as_str())) + } #[must_use] - pub fn names(&self) -> Vec<&str> { + fn names(&self) -> Vec<&str> { let view = self.get_header_view(); let num_names = view.num_names().read() as usize; let offset = verbdef::names::OFFSET; @@ -137,14 +145,6 @@ impl VerbDef { } } -impl Named for VerbDef { - fn matches_name(&self, name: &str) -> bool { - self.names() - .iter() - .any(|verb| verbname_cmp(verb.to_lowercase().as_str(), name.to_lowercase().as_str())) - } -} - impl HasUuid for VerbDef { fn uuid(&self) -> Uuid { let view = self.get_header_view(); @@ -178,7 +178,7 @@ pub type VerbDefs = Defs; #[cfg(test)] mod tests { - use crate::model::defset::HasUuid; + use crate::model::defset::{HasUuid, Named}; use crate::model::r#match::VerbArgsSpec; use crate::model::verbdef::{VerbDef, VerbDefs}; use crate::model::verbs::VerbFlag;