From f090ce634c74e6e8dcc976fab4869e6516c3e8e8 Mon Sep 17 00:00:00 2001 From: Ryan Daum Date: Sun, 7 Jan 2024 10:56:42 -0500 Subject: [PATCH] A round of changes related to problems & style violations found by clippy (Note that in the process found a bug in the slotbox reference counts established during initial textdump load that will have to be resolved, though so far it seems like a "harmless" warning is sufficient) Should now have a clean output for --all-targets --all-features run for Clippy. --- crates/compiler/src/parse.rs | 20 ++++++------ crates/daemon/src/connections_tb.rs | 2 ++ crates/daemon/src/rpc_server.rs | 6 ++-- crates/db/benches/tb_single_thread.rs | 5 --- crates/db/src/db_tx.rs | 2 ++ crates/db/src/tuplebox/base_relation.rs | 2 +- crates/db/src/tuplebox/coldstorage.rs | 8 ++--- crates/db/src/tuplebox/page_storage.rs | 12 +++---- crates/db/src/tuplebox/tb.rs | 10 +++--- crates/db/src/tuplebox/tuples/slotbox.rs | 32 +++++++++++-------- crates/db/src/tuplebox/tuples/slotted_page.rs | 10 ++++-- crates/db/src/tuplebox/tuples/tuple_ptr.rs | 7 +++- crates/db/src/tuplebox/tuples/tuple_ref.rs | 18 ++++++++--- crates/db/src/tuplebox/tx/transaction.rs | 2 +- crates/db/src/tuplebox/tx/working_set.rs | 4 +++ crates/kernel/src/tasks/scheduler.rs | 4 +++ crates/kernel/src/tasks/task.rs | 2 ++ crates/kernel/src/tasks/vm_host.rs | 8 ++--- crates/kernel/src/vm/activation.rs | 2 +- crates/kernel/src/vm/vm_execute.rs | 2 +- crates/values/src/model/world_state.rs | 4 +++ crates/values/src/util/bitarray.rs | 2 +- crates/web-host/src/host/mod.rs | 4 +-- 23 files changed, 101 insertions(+), 67 deletions(-) diff --git a/crates/compiler/src/parse.rs b/crates/compiler/src/parse.rs index 374b32f7..ccb61312 100644 --- a/crates/compiler/src/parse.rs +++ b/crates/compiler/src/parse.rs @@ -143,17 +143,15 @@ fn parse_arglist( names: Rc>, pairs: pest::iterators::Pairs, ) -> Result, CompileError> { - for pair in pairs { - match pair.as_rule() { - Rule::exprlist => { - return parse_exprlist(names, pair.into_inner()); - } - _ => { - panic!("Unimplemented arglist: {:?}", pair); - } - } - } - Ok(vec![]) + let Some(first) = pairs.peek() else { + return Ok(vec![]); + }; + + let Rule::exprlist = first.as_rule() else { + panic!("Unimplemented arglist: {:?}", first); + }; + + return parse_exprlist(names, first.into_inner()); } fn parse_except_codes( diff --git a/crates/daemon/src/connections_tb.rs b/crates/daemon/src/connections_tb.rs index 0bd7d2c8..2f301923 100644 --- a/crates/daemon/src/connections_tb.rs +++ b/crates/daemon/src/connections_tb.rs @@ -60,6 +60,8 @@ impl ConnectionsTb { #[repr(usize)] #[derive(Copy, Clone, Debug, Eq, PartialEq, EnumIter, EnumCount, Display)] +// Don't warn about same-prefix, "I did that on purpose" +#[allow(clippy::enum_variant_names)] enum ConnectionRelation { // One to many, client id <-> connection/player object. Secondary index will seek on object id. ClientConnection = 0, diff --git a/crates/daemon/src/rpc_server.rs b/crates/daemon/src/rpc_server.rs index fcee8e7a..cc98eac8 100644 --- a/crates/daemon/src/rpc_server.rs +++ b/crates/daemon/src/rpc_server.rs @@ -1003,8 +1003,7 @@ impl RpcServer { client_id: Uuid, ) -> Result<(), SessionError> { let key: Key<32> = Key::from(&self.keypair[32..]); - let pk: PasetoAsymmetricPublicKey = - PasetoAsymmetricPublicKey::try_from(&key).unwrap(); + let pk: PasetoAsymmetricPublicKey = PasetoAsymmetricPublicKey::from(&key); let verified_token = Paseto::::try_verify( token.0.as_str(), &pk, @@ -1059,8 +1058,7 @@ impl RpcServer { objid: Option, ) -> Result { let key: Key<32> = Key::from(&self.keypair[32..]); - let pk: PasetoAsymmetricPublicKey = - PasetoAsymmetricPublicKey::try_from(&key).unwrap(); + let pk: PasetoAsymmetricPublicKey = PasetoAsymmetricPublicKey::from(&key); let verified_token = Paseto::::try_verify( token.0.as_str(), &pk, diff --git a/crates/db/benches/tb_single_thread.rs b/crates/db/benches/tb_single_thread.rs index 6df68c96..2d921525 100644 --- a/crates/db/benches/tb_single_thread.rs +++ b/crates/db/benches/tb_single_thread.rs @@ -45,11 +45,6 @@ async fn test_db() -> Arc { fn from_val(value: i64) -> SliceRef { SliceRef::from_bytes(&value.to_le_bytes()[..]) } -fn to_val(value: SliceRef) -> i64 { - let mut bytes = [0; 8]; - bytes.copy_from_slice(value.as_slice()); - i64::from_le_bytes(bytes) -} fn load_history() -> Vec { let lines = include_str!("list-append-dataset.json") diff --git a/crates/db/src/db_tx.rs b/crates/db/src/db_tx.rs index 6a6eac5b..a46339bb 100644 --- a/crates/db/src/db_tx.rs +++ b/crates/db/src/db_tx.rs @@ -127,6 +127,8 @@ pub trait DbTransaction { ) -> Result<(), WorldStateError>; /// Define a new verb on the given object. + // Yes yes I know it's a lot of arguments, but wrapper object here is redundant. + #[allow(clippy::too_many_arguments)] async fn add_object_verb( &self, location: Objid, diff --git a/crates/db/src/tuplebox/base_relation.rs b/crates/db/src/tuplebox/base_relation.rs index a4deb80b..26019ae4 100644 --- a/crates/db/src/tuplebox/base_relation.rs +++ b/crates/db/src/tuplebox/base_relation.rs @@ -90,7 +90,7 @@ impl BaseRelation { /// Establish indexes for a tuple initial-loaded from secondary storage. Basically a, "trust us, /// this exists" move. - pub fn index_tuple(&mut self, tuple: TupleRef) { + pub fn index_tuple(&mut self, mut tuple: TupleRef) { self.tuples.insert(tuple.clone()); // Reset timestamp to 0, since this is a tuple initial-loaded from secondary storage. diff --git a/crates/db/src/tuplebox/coldstorage.rs b/crates/db/src/tuplebox/coldstorage.rs index 5bb70854..89b92a5d 100644 --- a/crates/db/src/tuplebox/coldstorage.rs +++ b/crates/db/src/tuplebox/coldstorage.rs @@ -443,7 +443,7 @@ impl WalManager { to_evict: &mut Vec, ) { // The first N bytes have to be WAL_MAGIC or this is an invalid chunk. - if chunk.len() < wal_entry::header::OFFSET { + if chunk.len() < wal_entry::data::OFFSET { warn!("Chunk is too small to be valid"); return; } @@ -470,7 +470,7 @@ impl WalManager { // page number. let relation_id = RelationId(wal_entry.header().relation_id().read() as usize); - write_mutations.push(PageStoreMutation::SyncRelationPage( + write_mutations.push(PageStoreMutation::SyncRelation( relation_id, pid as PageId, data, @@ -479,13 +479,13 @@ impl WalManager { WalEntryType::SequenceSync => { // Write current state of sequences to the sequence page. Ignores page id, slot id. // Data is the contents of the sequence page. - write_mutations.push(PageStoreMutation::SyncSequencePage(data)); + write_mutations.push(PageStoreMutation::SyncSequence(data)); } WalEntryType::Delete => { // Delete let relation_id = RelationId(wal_entry.header().relation_id().read() as usize); let slot_id = wal_entry.header().slot_id().read(); - write_mutations.push(PageStoreMutation::DeleteRelationPage( + write_mutations.push(PageStoreMutation::DeleteRelation( pid as PageId, relation_id, )); diff --git a/crates/db/src/tuplebox/page_storage.rs b/crates/db/src/tuplebox/page_storage.rs index 45bd495b..02aadd69 100644 --- a/crates/db/src/tuplebox/page_storage.rs +++ b/crates/db/src/tuplebox/page_storage.rs @@ -29,9 +29,9 @@ use std::thread::yield_now; use tokio_eventfd::EventFd; pub(crate) enum PageStoreMutation { - SyncRelationPage(RelationId, PageId, Box<[u8]>), - SyncSequencePage(Box<[u8]>), - DeleteRelationPage(PageId, RelationId), + SyncRelation(RelationId, PageId, Box<[u8]>), + SyncSequence(Box<[u8]>), + DeleteRelation(PageId, RelationId), } /// Manages the directory of pages, one file per page. @@ -183,7 +183,7 @@ impl PageStore { let request_id = self.next_request_id; self.next_request_id += 1; match mutation { - PageStoreMutation::SyncRelationPage(relation_id, page_id, data) => { + PageStoreMutation::SyncRelation(relation_id, page_id, data) => { let path = self.dir.join(format!("{}_{}.page", page_id, relation_id.0)); let len = data.len(); let mut options = OpenOptions::new(); @@ -217,7 +217,7 @@ impl PageStore { .expect("Unable to push fsync to submission queue"); } } - PageStoreMutation::SyncSequencePage(data) => { + PageStoreMutation::SyncSequence(data) => { let path = self.dir.join("sequences.page"); let len = data.len(); @@ -250,7 +250,7 @@ impl PageStore { .expect("Unable to push fsync to submission queue"); } } - PageStoreMutation::DeleteRelationPage(_, _) => { + PageStoreMutation::DeleteRelation(_, _) => { // TODO } } diff --git a/crates/db/src/tuplebox/tb.rs b/crates/db/src/tuplebox/tb.rs index 950a7602..3d353f6d 100644 --- a/crates/db/src/tuplebox/tb.rs +++ b/crates/db/src/tuplebox/tb.rs @@ -178,17 +178,17 @@ impl TupleBox { pub(crate) async fn prepare_commit_set<'a>( &self, commit_ts: u64, - tx_working_set: &WorkingSet, + tx_working_set: &mut WorkingSet, ) -> Result { let mut commitset = CommitSet::new(commit_ts); - for (_, local_relation) in tx_working_set.relations.iter() { + for (_, local_relation) in tx_working_set.relations.iter_mut() { let relation_id = local_relation.id; // scan through the local working set, and for each tuple, check to see if it's safe to // commit. If it is, then we'll add it to the commit set. // note we're not actually committing yet, just producing a candidate commit set let canonical = &self.canonical.read().await[relation_id.0]; - for tuple in local_relation.tuples() { + for mut tuple in local_relation.tuples_mut() { let canon_tuple = canonical.seek_by_domain(tuple.domain().clone()); // If there's no value there, and our local is not tombstoned and we're not doing @@ -197,7 +197,7 @@ impl TupleBox { // TODO: it should be possible to do this without having the fork logic exist twice // here. let Some(cv) = canon_tuple else { - match &tuple { + match &mut tuple { TxTuple::Insert(t) => { t.update_timestamp(commit_ts); let forked_relation = commitset.fork(relation_id, canonical); @@ -232,7 +232,7 @@ impl TupleBox { // Otherwise apply the change into a new canonical relation, which is a CoW // branching of the old one. let forked_relation = commitset.fork(relation_id, canonical); - match &tuple { + match &mut tuple { TxTuple::Insert(t) | TxTuple::Update(t) => { t.update_timestamp(commit_ts); let forked_relation = commitset.fork(relation_id, canonical); diff --git a/crates/db/src/tuplebox/tuples/slotbox.rs b/crates/db/src/tuplebox/tuples/slotbox.rs index 043a6527..1d32157c 100644 --- a/crates/db/src/tuplebox/tuples/slotbox.rs +++ b/crates/db/src/tuplebox/tuples/slotbox.rs @@ -37,7 +37,7 @@ use std::sync::{Arc, Mutex}; use moor_values::util::{BitArray, Bitset64}; use thiserror::Error; -use tracing::error; +use tracing::{error, warn}; use crate::tuplebox::pool::{Bid, BufferPool, PagerError}; pub use crate::tuplebox::tuples::slotted_page::SlotId; @@ -122,6 +122,12 @@ impl SlotBox { inner.page_for(id) } + pub fn refcount(&self, id: TupleId) -> Result { + let inner = self.inner.lock().unwrap(); + let page_handle = inner.page_for(id.page)?; + page_handle.refcount(id.slot) + } + pub fn upcount(&self, id: TupleId) -> Result<(), SlotBoxError> { let inner = self.inner.lock().unwrap(); let page_handle = inner.page_for(id.page)?; @@ -188,7 +194,7 @@ impl SlotBox { } pub fn num_pages(&self) -> usize { - let mut inner = self.inner.lock().unwrap(); + let inner = self.inner.lock().unwrap(); inner.available_page_space.len() } @@ -394,8 +400,6 @@ impl Inner { } fn report_free(&mut self, pid: PageId, new_size: usize, is_empty: bool) { - // Seek the page in the available_page_space vectors, and add the bytes back to its free space. - // We don't know the relation id here, so we have to linear scan all of them. for (_, available_page_space) in self.available_page_space.iter_mut() { if available_page_space.update_page(pid, new_size, is_empty) { if is_empty { @@ -405,10 +409,11 @@ impl Inner { } return; } - return; } - error!( + // TODO: initial textdump load seems to have a problem with initial inserts having a too-low refcount? + // but once the DB is established, it's fine. So maybe this is a problem with insert tuple allocation? + warn!( "Page not found in used pages in allocator on free; pid {}; could be double-free, dangling weak reference?", pid ); @@ -460,6 +465,7 @@ impl PageSpace { /// Update the allocation record for the page. fn update_page(&mut self, pid: PageId, available: usize, is_empty: bool) -> bool { + // Page does not exist in this relation, so we can't update it. let Some(index) = self.seek(pid) else { return false; }; @@ -642,8 +648,8 @@ mod tests { // and then scan back and verify their presence/equality. #[test] fn test_basic_add_fill_etc() { - let mut sb = Arc::new(SlotBox::new(32768 * 32)); - let mut tuples = fill_until_full(&mut sb); + let sb = Arc::new(SlotBox::new(32768 * 32)); + let mut tuples = fill_until_full(&sb); for (i, (tuple, expected_value)) in tuples.iter().enumerate() { let retrieved_domain = tuple.domain(); let retrieved_codomain = tuple.codomain(); @@ -671,8 +677,8 @@ mod tests { // everything mmap DONTNEED'd, and we should be able to re-fill it again, too. #[test] fn test_full_fill_and_empty() { - let mut sb = Arc::new(SlotBox::new(32768 * 64)); - let mut tuples = fill_until_full(&mut sb); + let sb = Arc::new(SlotBox::new(32768 * 64)); + let mut tuples = fill_until_full(&sb); // Collect the manual ids of the tuples we've allocated, so we can check them for refcount goodness. let ids = tuples.iter().map(|(t, _)| t.id()).collect::>(); @@ -688,8 +694,8 @@ mod tests { // fill back up again and verify the new presence. #[test] fn test_fill_and_free_and_refill_etc() { - let mut sb = Arc::new(SlotBox::new(32768 * 64)); - let mut tuples = fill_until_full(&mut sb); + let sb = Arc::new(SlotBox::new(32768 * 64)); + let mut tuples = fill_until_full(&sb); let mut rng = thread_rng(); let mut freed_tuples = Vec::new(); @@ -715,7 +721,7 @@ mod tests { assert!(sb.get(id).is_err()); } // Now fill back up again. - let new_tuples = fill_until_full(&mut sb); + let new_tuples = fill_until_full(&sb); // Verify both the new tuples and the old tuples are there. for (tuple, expected) in new_tuples { let retrieved_domain = tuple.domain(); diff --git a/crates/db/src/tuplebox/tuples/slotted_page.rs b/crates/db/src/tuplebox/tuples/slotted_page.rs index 03b2a1fa..939c6241 100644 --- a/crates/db/src/tuplebox/tuples/slotted_page.rs +++ b/crates/db/src/tuplebox/tuples/slotted_page.rs @@ -271,7 +271,6 @@ impl<'a> SlottedPage<'a> { index_entry.as_mut().mark_used(size); // Update used bytes in the header - let header = header; header.add_used(size); let slc = unsafe { @@ -315,7 +314,6 @@ impl<'a> SlottedPage<'a> { .alloc(content_start_position, content_size, size); // Update the header to subtract the used space. - let header = header; let new_slot = header.add_entry(size); // Return the slot id and the number of bytes remaining to append at the end. @@ -394,6 +392,14 @@ impl<'a> SlottedPage<'a> { Ok((self.available_content_bytes(), slot_size, is_empty)) } + pub(crate) fn refcount(&self, slot_id: SlotId) -> Result { + let index_entry = self.get_index_entry(slot_id); + if !index_entry.used { + return Err(SlotBoxError::TupleNotFound(slot_id as usize)); + } + Ok(index_entry.refcount) + } + pub(crate) fn upcount(&self, slot_id: SlotId) -> Result<(), SlotBoxError> { let mut index_entry = self.get_index_entry_mut(slot_id); unsafe { index_entry.as_mut().get_unchecked_mut() }.refcount += 1; diff --git a/crates/db/src/tuplebox/tuples/tuple_ptr.rs b/crates/db/src/tuplebox/tuples/tuple_ptr.rs index 602afa4e..8ab30055 100644 --- a/crates/db/src/tuplebox/tuples/tuple_ptr.rs +++ b/crates/db/src/tuplebox/tuples/tuple_ptr.rs @@ -78,7 +78,7 @@ impl TuplePtr { } #[inline] - pub(crate) fn as_mut_ptr(&self) -> *mut T { + pub(crate) fn as_mut_ptr(&mut self) -> *mut T { self.bufaddr as *mut T } @@ -95,6 +95,11 @@ impl TuplePtr { } } + #[inline] + pub fn refcount(&self) -> u16 { + self.sb.refcount(self.id).unwrap() + } + #[inline] pub fn upcount(&self) { self.sb.upcount(self.id).unwrap(); diff --git a/crates/db/src/tuplebox/tuples/tuple_ref.rs b/crates/db/src/tuplebox/tuples/tuple_ref.rs index 091ee3bf..afe13334 100644 --- a/crates/db/src/tuplebox/tuples/tuple_ref.rs +++ b/crates/db/src/tuplebox/tuples/tuple_ref.rs @@ -69,6 +69,9 @@ impl TupleRef { buffer[start_pos..start_pos + domain_len].copy_from_slice(domain); buffer[codomain_start..codomain_end].copy_from_slice(codomain); })?; + + // Initial refcount should be 1, because we have a reference to it. + assert_eq!(tuple_ref.resolve_slot_ptr().refcount(), 1); Ok(tuple_ref) } @@ -80,7 +83,7 @@ impl TupleRef { /// Update the timestamp of the tuple. #[inline] - pub fn update_timestamp(&self, ts: u64) { + pub fn update_timestamp(&mut self, ts: u64) { let header = self.header_mut(); header.ts = ts; } @@ -129,14 +132,19 @@ impl TupleRef { } #[inline] - fn header_mut(&self) -> &mut Header { - let slot_ptr = self.resolve_slot_ptr(); - let header: *mut Header = slot_ptr.as_mut_ptr(); + fn header_mut(&mut self) -> &mut Header { + let slot_ptr = self.resolve_slot_ptr_mut(); + let header: *mut Header = unsafe { slot_ptr.get_unchecked_mut() }.as_mut_ptr(); unsafe { &mut *header } } #[inline] - fn resolve_slot_ptr(&self) -> Pin<&mut TuplePtr> { + fn resolve_slot_ptr(&self) -> Pin<&TuplePtr> { + unsafe { Pin::new_unchecked(&*self.sp) } + } + + #[inline] + fn resolve_slot_ptr_mut(&mut self) -> Pin<&mut TuplePtr> { unsafe { Pin::new_unchecked(&mut *self.sp) } } diff --git a/crates/db/src/tuplebox/tx/transaction.rs b/crates/db/src/tuplebox/tx/transaction.rs index 606097a3..6c2f4ee7 100644 --- a/crates/db/src/tuplebox/tx/transaction.rs +++ b/crates/db/src/tuplebox/tx/transaction.rs @@ -89,7 +89,7 @@ impl Transaction { let mut working_set = self.working_set.write().await; let commit_set = self .db - .prepare_commit_set(commit_ts, working_set.as_ref().unwrap()) + .prepare_commit_set(commit_ts, working_set.as_mut().unwrap()) .await?; match self.db.try_commit(commit_set).await { Ok(_) => { diff --git a/crates/db/src/tuplebox/tx/working_set.rs b/crates/db/src/tuplebox/tx/working_set.rs index 3ba4ec09..ed46f3b8 100644 --- a/crates/db/src/tuplebox/tx/working_set.rs +++ b/crates/db/src/tuplebox/tx/working_set.rs @@ -488,6 +488,10 @@ impl TxBaseRelation { self.tuples.iter() } + pub fn tuples_mut(&mut self) -> impl Iterator { + self.tuples.iter_mut() + } + pub(crate) fn clear(&mut self) { self.tuples.clear(); self.domain_index.clear(); diff --git a/crates/kernel/src/tasks/scheduler.rs b/crates/kernel/src/tasks/scheduler.rs index dc5fa596..954a9dd1 100644 --- a/crates/kernel/src/tasks/scheduler.rs +++ b/crates/kernel/src/tasks/scheduler.rs @@ -285,6 +285,8 @@ impl Scheduler { /// (This path is really only used for the invocations from the serving processes like login, /// user_connected, or the do_command invocation which precedes an internal parser attempt.) #[instrument(skip(self, session))] + // Yes yes I know it's a lot of arguments, but wrapper object here is redundant. + #[allow(clippy::too_many_arguments)] pub async fn submit_verb_task( &self, player: Objid, @@ -1160,6 +1162,8 @@ impl Inner { } } + // Yes yes I know it's a lot of arguments, but wrapper object here is redundant. + #[allow(clippy::too_many_arguments)] async fn new_task( &mut self, task_start: TaskStart, diff --git a/crates/kernel/src/tasks/task.rs b/crates/kernel/src/tasks/task.rs index ab23208d..31a5a755 100644 --- a/crates/kernel/src/tasks/task.rs +++ b/crates/kernel/src/tasks/task.rs @@ -151,6 +151,8 @@ async fn max_vm_values(_ws: &mut dyn WorldState, is_background: bool) -> (usize, } impl Task { + // Yes yes I know it's a lot of arguments, but wrapper object here is redundant. + #[allow(clippy::too_many_arguments)] pub async fn run( task_id: TaskId, task_start: TaskStart, diff --git a/crates/kernel/src/tasks/vm_host.rs b/crates/kernel/src/tasks/vm_host.rs index cf10e139..0b9cb188 100644 --- a/crates/kernel/src/tasks/vm_host.rs +++ b/crates/kernel/src/tasks/vm_host.rs @@ -206,7 +206,7 @@ impl VmHost { if self.vm_exec_state.tick_count >= self.max_ticks { return AbortLimit(AbortLimitReason::Ticks(self.vm_exec_state.tick_count)); } - let mut exec_params = VmExecParams { + let exec_params = VmExecParams { scheduler_sender: self.scheduler_control_sender.clone(), max_stack_depth: self.max_stack_depth, ticks_left: self.max_ticks - self.vm_exec_state.tick_count, @@ -218,7 +218,7 @@ impl VmHost { let mut result = self .vm .exec( - &mut exec_params, + &exec_params, &mut self.vm_exec_state, world_state, self.sessions.clone(), @@ -281,7 +281,7 @@ impl VmHost { bf_func_num: bf_offset, arguments: args, } => { - let mut exec_params = VmExecParams { + let exec_params = VmExecParams { max_stack_depth: self.max_stack_depth, scheduler_sender: self.scheduler_control_sender.clone(), ticks_left: self.max_ticks - self.vm_exec_state.tick_count, @@ -296,7 +296,7 @@ impl VmHost { &mut self.vm_exec_state, bf_offset, &args, - &mut exec_params, + &exec_params, world_state, self.sessions.clone(), ) diff --git a/crates/kernel/src/vm/activation.rs b/crates/kernel/src/vm/activation.rs index de0f6f6d..6c72042e 100644 --- a/crates/kernel/src/vm/activation.rs +++ b/crates/kernel/src/vm/activation.rs @@ -359,7 +359,7 @@ impl Activation { #[inline] pub(crate) fn peek_abs(&self, amt: usize) -> &Var { - return &self.valstack[amt]; + &self.valstack[amt] } #[inline] diff --git a/crates/kernel/src/vm/vm_execute.rs b/crates/kernel/src/vm/vm_execute.rs index f149b5b5..f24f8b84 100644 --- a/crates/kernel/src/vm/vm_execute.rs +++ b/crates/kernel/src/vm/vm_execute.rs @@ -347,7 +347,7 @@ impl VM { }; // TODO: quota check SVO_MAX_LIST_CONCAT -> E_QUOTA - let new_list = list.append(&tail); + let new_list = list.append(tail); state.update(0, &new_list); } Op::IndexSet => { diff --git a/crates/values/src/model/world_state.rs b/crates/values/src/model/world_state.rs index 399310b9..14e92f85 100644 --- a/crates/values/src/model/world_state.rs +++ b/crates/values/src/model/world_state.rs @@ -155,6 +155,8 @@ pub trait WorldState: Send + Sync { ) -> Result<(), WorldStateError>; /// Add a property for the given object. + // Yes yes I know it's a lot of arguments, but wrapper object here is redundant. + #[allow(clippy::too_many_arguments)] async fn define_property( &mut self, perms: Objid, @@ -174,6 +176,8 @@ pub trait WorldState: Send + Sync { ) -> Result<(), WorldStateError>; /// Add a verb to the given object. + // Yes yes I know it's a lot of arguments, but wrapper object here is redundant. + #[allow(clippy::too_many_arguments)] async fn add_verb( &mut self, perms: Objid, diff --git a/crates/values/src/util/bitarray.rs b/crates/values/src/util/bitarray.rs index c3ae6de2..634bce40 100644 --- a/crates/values/src/util/bitarray.rs +++ b/crates/values/src/util/bitarray.rs @@ -169,7 +169,7 @@ where } #[inline] - pub fn len(&mut self) -> usize { + pub fn len(&self) -> usize { self.bitset.size() } diff --git a/crates/web-host/src/host/mod.rs b/crates/web-host/src/host/mod.rs index a52f0743..a2046b45 100644 --- a/crates/web-host/src/host/mod.rs +++ b/crates/web-host/src/host/mod.rs @@ -27,7 +27,7 @@ pub use web_host::{ }; #[derive(Serialize, Deserialize)] -struct OID { +struct Oid { oid: i64, } @@ -42,7 +42,7 @@ pub fn var_as_json(v: &Var) -> serde_json::Value { match v.variant() { Variant::None => serde_json::Value::Null, Variant::Str(s) => serde_json::Value::String(s.to_string()), - Variant::Obj(o) => json!(OID { oid: o.0 }), + Variant::Obj(o) => json!(Oid { oid: o.0 }), Variant::Int(i) => serde_json::Value::Number(Number::from(*i)), Variant::Float(f) => json!(*f), Variant::Err(e) => json!(Error {