Skip to content

Commit

Permalink
the (rdaum) suffix in the TODOs didn't do what I thought it would do
Browse files Browse the repository at this point in the history
  • Loading branch information
rdaum committed Feb 10, 2024
1 parent b39aa12 commit f0ad4fd
Show file tree
Hide file tree
Showing 50 changed files with 89 additions and 89 deletions.
2 changes: 1 addition & 1 deletion crates/compiler/src/ast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ pub struct Stmt {
/// Note that this is not necessarily the same as the line number that will be reported into
/// codegen, and may not correspond to what shows as a result of `unparse`; that line number
/// is derived from the AST, not the parser.
/// TODO(rdaum): I may or may not keep this field around.
/// TODO(): I may or may not keep this field around.
pub parser_line_no: usize,
/// This line number is generated during a second pass over the tree, and is used to generate
/// the line number spans in the bytecode.
Expand Down
2 changes: 1 addition & 1 deletion crates/compiler/src/builtins.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ pub struct Builtin {
}

// Originally generated using ./generate_bf_list.py
// TODO(rdaum): this list is inconsistently used, and falls out of date. It's only used for generating
// TODO(): this list is inconsistently used, and falls out of date. It's only used for generating
// the list of functions for the `function_info` built-in right now. It could be used for
// validating arguments, and could be part of the registration process for the actual builtin
// implementations.
Expand Down
2 changes: 1 addition & 1 deletion crates/compiler/src/codegen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,7 @@ impl CodegenState {
}

fn generate_arg_list(&mut self, args: &Vec<Arg>) -> Result<(), CompileError> {
// TODO(rdaum): Check recursion down to see if all literal values, and if so reduce to a Imm value with the full list,
// TODO(): Check recursion down to see if all literal values, and if so reduce to a Imm value with the full list,
// instead of concatenation with MkSingletonList.
if args.is_empty() {
self.emit(Op::ImmEmptyList);
Expand Down
6 changes: 3 additions & 3 deletions crates/compiler/src/decompile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ impl Decompile {
));
}
// Same as above, but with id.
// TODO(rdaum): we may want to consider collapsing these two VM opcodes
// TODO(): we may want to consider collapsing these two VM opcodes
Op::WhileId {
id,
end_label: loop_end_label,
Expand Down Expand Up @@ -654,7 +654,7 @@ impl Decompile {
}
// Decompile the body.
// Means decompiling until we hit EndExcept, so scan forward for that.
// TODO(rdaum): make sure that this doesn't fail with nested try/excepts?
// TODO(): make sure that this doesn't fail with nested try/excepts?
let (body, end_except) =
self.decompile_statements_until_match(|_, o| matches!(o, Op::EndExcept(_)))?;
let Op::EndExcept(end_label) = end_except else {
Expand Down Expand Up @@ -822,7 +822,7 @@ impl Decompile {
}
Op::EndCatch(_) | Op::Continue | Op::EndExcept(_) | Op::EndFinally => {
// Early exit; main logic is in TRY_FINALLY or CATCH etc case, above
// TODO(rdaum): MOO has "return ptr - 2;" -- doing something with the iteration, that
// TODO(): MOO has "return ptr - 2;" -- doing something with the iteration, that
// I may not be able to do with the current structure. See if I need to
unreachable!("should have been handled other decompilation branches")
}
Expand Down
2 changes: 1 addition & 1 deletion crates/compiler/src/opcode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ mod tests {
use crate::{Label, Name, Offset};

/// Verify we don't go over our 16 byte budget for opcodes.
// TODO(rdaum): This is still rather bloated.
// TODO(): This is still rather bloated.
#[test]
fn size_opcode() {
use crate::opcode::Op;
Expand Down
2 changes: 1 addition & 1 deletion crates/compiler/src/parse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ fn parse_expr(
.op(Op::infix(Rule::lor, Assoc::Left))
// 11. Logical and.
.op(Op::infix(Rule::land, Assoc::Left))
// TODO(rdaum): bitwise operators here (| 10, ^ XOR 9, & 8) if we ever get them.
// TODO(): bitwise operators here (| 10, ^ XOR 9, & 8) if we ever get them.
// 7
// Equality/inequality
.op(Op::infix(Rule::eq, Assoc::Left) | Op::infix(Rule::neq, Assoc::Left))
Expand Down
4 changes: 2 additions & 2 deletions crates/compiler/src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pub struct Program {
pub fork_vectors: Vec<Vec<Op>>,
/// As each statement is pushed, the line number is recorded, along with its offset in the main
/// vector.
/// TODO(rdaum): fork vector offsets... Have to think about that one.
/// TODO(): fork vector offsets... Have to think about that one.
pub line_number_spans: Vec<(usize, usize)>,
}

Expand Down Expand Up @@ -98,7 +98,7 @@ impl Display for Program {
writeln!(f, "V{}: {}", i, v)?;
}

// TODO(rdaum): print fork vectors
// TODO(): print fork vectors

// Display main vector (program); opcodes are indexed by their offset
for (i, op) in self.main_vector.iter().enumerate() {
Expand Down
2 changes: 1 addition & 1 deletion crates/compiler/src/unparse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ impl Expr {
// directly on http://en.cppreference.com/w/cpp/language/operator_precedence
// Should be kept in sync with the pratt parser in `parse.rs`
// Starting from lowest to highest precedence...
// TODO(rdaum): drive Pratt and this from one common precedence table.
// TODO(): drive Pratt and this from one common precedence table.
let cpp_ref_prep = match self {
Expr::Scatter(_, _) | Expr::Assign { .. } => 14,
Expr::Cond { .. } => 13,
Expand Down
2 changes: 1 addition & 1 deletion crates/console-host/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ fn console_loop(

let mut rl = DefaultEditor::new().unwrap();
loop {
// TODO(rdaum): unprovoked output from the narrative stream screws up the prompt midstream,
// TODO(): unprovoked output from the narrative stream screws up the prompt midstream,
// but we have no real way to signal to this loop that it should newline for
// cleanliness. Need to figure out something for this.
let input_request_id = input_request_id.lock().unwrap().take();
Expand Down
8 changes: 4 additions & 4 deletions crates/daemon/src/rpc_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ impl RpcServer {
args: Vec<String>,
attach: bool,
) -> Result<RpcResponse, RpcRequestError> {
// TODO(rdaum): change result of login to return this information, rather than just Objid, so
// TODO(): change result of login to return this information, rather than just Objid, so
// we're not dependent on this.
let connect_type = if args.first() == Some(&"create".to_string()) {
ConnectType::Created
Expand Down Expand Up @@ -590,7 +590,7 @@ impl RpcServer {
};

// Try to submit to do_command as a verb call first and only parse_command after that fails.
// TODO(rdaum): fold this functionality into Task.
// TODO(): fold this functionality into Task.

let arguments = parse_into_words(command.as_str());
if let Ok(task_id) = self.clone().scheduler.submit_verb_task(
Expand Down Expand Up @@ -660,7 +660,7 @@ impl RpcServer {
return Err(RpcRequestError::InternalError(e.to_string()));
}

// TODO(rdaum): do we need a new response for this? Maybe just a "Thanks"?
// TODO(): do we need a new response for this? Maybe just a "Thanks"?
Ok(RpcResponse::InputThanks)
}

Expand Down Expand Up @@ -994,7 +994,7 @@ impl RpcServer {
}
}

// TODO(rdaum): we will need to verify that the player object id inside the token is valid inside
// TODO(): we will need to verify that the player object id inside the token is valid inside
// moor itself. And really only something with a WorldState can do that. So it's not
// enough to have validated the auth token here, we will need to pepper the scheduler/task
// code with checks to make sure that the player objid is valid before letting it go
Expand Down
4 changes: 2 additions & 2 deletions crates/daemon/src/rpc_session.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ pub struct RpcSession {
client_id: Uuid,
rpc_server: Arc<RpcServer>,
player: Objid,
// TODO(rdaum): manage this buffer better -- e.g. if it grows too big, for long-running tasks, etc. it
// TODO(): manage this buffer better -- e.g. if it grows too big, for long-running tasks, etc. it
// should be mmap'd to disk or something.
// TODO(rdaum): We could also use Boxcar or other append-only lockless container for this, since we only
// TODO(): We could also use Boxcar or other append-only lockless container for this, since we only
// ever append.
session_buffer: Mutex<Vec<(Objid, NarrativeEvent)>>,
}
Expand Down
2 changes: 1 addition & 1 deletion crates/db/src/db_tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ pub trait DbTransaction {
fn get_verbs(&self, obj: Objid) -> Result<VerbDefs, WorldStateError>;

/// Get the binary of the given verb.
// TODO(rdaum): "binaries" returned from the db should be SliceRefs, not Vecs.
// TODO(): "binaries" returned from the db should be SliceRefs, not Vecs.
fn get_verb_binary(&self, obj: Objid, uuid: Uuid) -> Result<Vec<u8>, WorldStateError>;

/// Find & get the verb with the given name on the given object.
Expand Down
2 changes: 1 addition & 1 deletion crates/db/src/db_worldstate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ impl WorldState for DbTxWorldState {

let owner = (owner != NOTHING).then_some(owner);

// TODO(rdaum): ownership_quota support
// TODO(): ownership_quota support
// If the intended owner of the new object has a property named `ownership_quota' and the value of that property is an integer, then `create()' treats that value
// as a "quota". If the quota is less than or equal to zero, then the quota is considered to be exhausted and `create()' raises `E_QUOTA' instead of creating an
// object. Otherwise, the quota is decremented and stored back into the `ownership_quota' property as a part of the creation of the new object.
Expand Down
2 changes: 1 addition & 1 deletion crates/db/src/odb/rb_worldstate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ impl DbTransaction for RelBoxTransaction {
}

fn get_players(&self) -> Result<ObjSet, WorldStateError> {
// TODO(rdaum): Improve get_players retrieval in world state
// TODO(): Improve get_players retrieval in world state
// this is going to be not-at-all performant in the long run, and we'll need a way to
// cache this or index it better
get_all_object_keys_matching(
Expand Down
8 changes: 4 additions & 4 deletions crates/db/src/rdb/base_relation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,16 @@ use crate::rdb::RelationId;
/// stored and managed as raw byte-arrays and it is up to layers above to interpret the the values
/// correctly.
///
// TODO(rdaum): Add some kind of 'type' flag to the relation & tuple values,
// TODO(): Add some kind of 'type' flag to the relation & tuple values,
// so that we can do type-checking on the values, though for our purposes this may be overkill at this time.
// TODO(rdaum): Base relations right now are not true binary relations, they are key-value pairs.
// TODO(): Base relations right now are not true binary relations, they are key-value pairs.
// all the 'seek' type operations should be returning a *set* of tuples that match, not
// a single one. right now this is behaving like a key-value pair, not a proper binary relation.
// means changing the indexes here to point to sets of tuples, not single tuples. right now
// for moor's purposes this is irrelevant, but it will be important for proper implementation of
// joins and other relational operations.
// TODO(rdaum): Indexes should be paged.
// TODO(rdaum): support ordered indexes, not just hash indexes.
// TODO(): Indexes should be paged.
// TODO(): support ordered indexes, not just hash indexes.
// if we're staying with in-memory, use an Adaptive Radix Tree; my implementation, but hopefully
// modified to support CoW/shared ownership of the tree nodes, like the im::HashMap does.
// if we're going to support on-disk indexes, use a CoW B+Tree, which I have implemented elsewhere,
Expand Down
6 changes: 3 additions & 3 deletions crates/db/src/rdb/paging/cold_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// this program. If not, see <https://www.gnu.org/licenses/>.
//

//! TODO(rdaum): replace OkayWAL with our own WAL implementation
//! TODO(): replace OkayWAL with our own WAL implementation
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
Expand All @@ -34,7 +34,7 @@ use crate::rdb::tx::WorkingSet;

use super::backing::{BackingStoreClient, WriterMessage};

// TODO(rdaum): move "cold storage" functionality under the pager rather than above it.
// TODO(): move "cold storage" functionality under the pager rather than above it.

/// Uses WAL + custom page store as the persistent backing store & write-ahead-log for the rdb.
pub struct ColdStorage {}
Expand Down Expand Up @@ -190,7 +190,7 @@ impl ColdStorage {
// Where we stick all the page mutations we're going to write out.
let mut write_batch = vec![];

// TODO(rdaum): sequences shouldn't mutate if they haven't changed during the
// TODO(): sequences shouldn't mutate if they haven't changed during the
// transaction, so we need some kind of signal from above that they have
// changed.

Expand Down
16 changes: 8 additions & 8 deletions crates/db/src/rdb/paging/page_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// this program. If not, see <https://www.gnu.org/licenses/>.
//

// TODO(rdaum): Robustness testing and proofing for page storage and WAL.
// TODO(): Robustness testing and proofing for page storage and WAL.
// A battery of tests is going to be needed on this, to verify the ACIDity.

use std::collections::{HashMap, HashSet};
Expand All @@ -37,7 +37,7 @@ use crate::rdb::RelationId;
/// The size of the submission queue for the io_uring, in requests.
/// We currently do not have any way to handle backpressure, so we will not be able to handle WAL
/// writes faster than this so this is set very large.
// TODO(rdaum): we should probably have a way to handle io_uring backpressure.
// TODO(): we should probably have a way to handle io_uring backpressure.
const IO_URING_SUBMISSION_Q_SIZE: u32 = 4096;

#[derive(Debug)]
Expand All @@ -62,13 +62,13 @@ pub enum PageStoreMutation {
/// Each page is a fixed size.
/// Uses io_uring to do the writes async. Reads are synchronous
///
/// TODO(rdaum): deleted pages are not destroyed, they are just left on disk, which means if the same
/// TODO(): deleted pages are not destroyed, they are just left on disk, which means if the same
/// page id is re-used, the old data could be read.
/// TODO(rdaum): right now page storage is page-per-file which is maybe not the most efficient.
/// TODO(rdaum): verify the fsync chained to writes via io_uring is actually working, and that
/// TODO(): right now page storage is page-per-file which is maybe not the most efficient.
/// TODO(): verify the fsync chained to writes via io_uring is actually working, and that
/// the durability guarantees are, at least approximately, correct.
/// TODO(rdaum): we'll need reads once eviction/paging is implemented.
/// TODO(rdaum): we should have CRCs on disk-bound pages, and verify them on reads.
/// TODO(): we'll need reads once eviction/paging is implemented.
/// TODO(): we should have CRCs on disk-bound pages, and verify them on reads.
/// could live in the page header maybe
pub struct PageStore {
Expand Down Expand Up @@ -297,7 +297,7 @@ impl PageStore {

// Open all the pages mentioned in the batch and index them by page id so we only have one file descriptor
// open per page file.
// TODO(rdaum): it's possible to preregister file descriptors with io_uring to potentially speed things up, but
// TODO(): it's possible to preregister file descriptors with io_uring to potentially speed things up, but
// I had some trixky issues with that, so for now we'll just open a new file descriptor for each batch.
let mut pages = HashMap::new();
for mutation in &batch {
Expand Down
4 changes: 2 additions & 2 deletions crates/db/src/rdb/paging/slotted_page.rs
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ impl<'a> SlottedPage<'a> {
}

fn remove_slot(&self, slot_id: SlotId) -> Result<(usize, usize, bool), TupleBoxError> {
// TODO(rdaum): slots at start of content-length can be removed by shrinking the content-length
// TODO(): slots at start of content-length can be removed by shrinking the content-length
// portion.

let mut index_entry = self.get_index_entry_mut(slot_id);
Expand All @@ -376,7 +376,7 @@ impl<'a> SlottedPage<'a> {
let mut header = self.header_mut();
header.as_mut().sub_used(slot_size);

// TODO(rdaum): join adjacent free tuple slots.
// TODO(): join adjacent free tuple slots.
// Likewise at insert, support splitting slots.
let is_empty = header.used_bytes == 0;
if is_empty {
Expand Down
12 changes: 6 additions & 6 deletions crates/db/src/rdb/paging/tuple_box.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,16 @@
// this program. If not, see <https://www.gnu.org/licenses/>.
//

// TODO(rdaum): add fixed-size slotted page impl for Sized items,
// TODO(): add fixed-size slotted page impl for Sized items,
// should be way more efficient for the most common case of fixed-size tuples.
// TODO(rdaum): implement the ability to expire and page-out tuples
// TODO(): implement the ability to expire and page-out tuples
// based on LRU or random/second
// chance eviction (ala leanstore). will require separate PageIds from Bids, and will
// involve rewriting TuplePtr on the fly to point to a new page when restored.
// TuplePtr will also get a new field for last-access-time, so that we can do our eviction
// TODO(rdaum): verify locking/concurrency safety of the pager & tuple storage
// TODO(): verify locking/concurrency safety of the pager & tuple storage
// loom test, stateright, or jepsen, etc.
// TODO(rdaum): improve dynamic slot allocation packing in slotted page
// TODO(): improve dynamic slot allocation packing in slotted page
// there is still some really gross stuff in here about the management of free space in
// pages in the allocator list. It's probably causing excessive fragmentation because we're
// considering only the reported available "content" area when fitting slots, and there seems
Expand Down Expand Up @@ -196,7 +196,7 @@ impl TupleBox {
}

struct Inner {
// TODO(rdaum): Consolidate page allocation vs buffer locking
// TODO(): Consolidate page allocation vs buffer locking
// buffer pool has its own locks per size class, so we might not need this inside another lock
// *but* the other two items here are not thread-safe, and we need to maintain consistency across the three.
// so we can maybe get rid of the locks in the buffer pool...
Expand Down Expand Up @@ -432,7 +432,7 @@ impl Inner {
}
}

// TODO(rdaum): initial textdump load seems to have a problem with initial inserts having a too-low refcount?
// TODO(): initial textdump load seems to have a problem with initial inserts having a too-low refcount?
// but once the DB is established, it's fine. So maybe this is a problem with insert tuple allocation?
warn!(
"Page not found in used pages in allocator on free; pid {}; could be double-free, dangling weak reference?",
Expand Down
4 changes: 2 additions & 2 deletions crates/db/src/rdb/paging/tuple_ptr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use crate::rdb::tuples::TupleId;
/// Adds a layer of indirection to each tuple access, but is better than passing around tuple ids + TupleBox
/// references.
// TODO(rdaum): rather than decoding a tuple out of a buffer in a slot, the slot should just hold the tuple structure
// TODO(): rather than decoding a tuple out of a buffer in a slot, the slot should just hold the tuple structure
pub struct TuplePtr {
tb: Arc<TupleBox>,
id: TupleId,
Expand Down Expand Up @@ -101,7 +101,7 @@ impl TuplePtr {

#[inline]
pub(crate) fn as_mut_ptr<T>(&mut self) -> *mut T {
// TODO(rdaum): if the ptr is null, this is a page fault, and we'll
// TODO(): if the ptr is null, this is a page fault, and we'll
// need to ask the tuplebox to ask the pager to page us in
self.bufaddr.load(std::sync::atomic::Ordering::SeqCst) as *mut T
}
Expand Down
2 changes: 1 addition & 1 deletion crates/db/src/rdb/paging/wal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ binary_layout!(wal_entry_header, LittleEndian, {
relation_id: u8,
});

// TODO(rdaum): use builder pattern for WAL entry construction
// TODO(): use builder pattern for WAL entry construction
#[allow(clippy::too_many_arguments)]
pub fn make_wal_entry<BF: FnMut(&mut [u8])>(
typ: WalEntryType,
Expand Down
2 changes: 1 addition & 1 deletion crates/db/src/rdb/pool/buffer_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use crate::rdb::pool::size_class::SizeClass;
use crate::rdb::pool::{Bid, PagerError};

// 32k -> 1MB page sizes supported.
// TODO(rdaum): Handle storage of big-values / big-pages / blobs
// TODO(): Handle storage of big-values / big-pages / blobs
// If we end up with values bigger than 1MB, they should probably be handled by "external" pages,
// that is, pages that are not part of the buffer pool, but are instead read directly from file
// references as needed, because they are likely to just thrash the crap out of the buffer pool
Expand Down
Loading

0 comments on commit f0ad4fd

Please sign in to comment.