diff --git a/.noindex b/.noindex new file mode 100644 index 0000000..0211da7 --- /dev/null +++ b/.noindex @@ -0,0 +1,11 @@ +ui/node_modules/** +ui/dist/** +ui/workdir/** +ui/pkg/** +ui/public/** +wasm/target/** +rec/target/** +cmp/target/** +parser/target/** +mcp/target/** +wasm/pkg/** diff --git a/README.md b/README.md index 545ebfe..ce09ad8 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,18 @@ echo $? The exit code will be 0 if the test passed (outputs match) or 1 if there are differences. +## Web-based UI + +CLT includes a web-based user interface for editing and running tests. The UI provides an intuitive interface for managing test files, with real-time pattern matching and diff comparison. To learn more about the UI, its features, and how to set it up, see the [UI documentation](ui/README.md). + +Key UI features include: +- File tree explorer for navigating test files +- Real-time comparison with pattern matching +- Docker image configuration for test validation +- GitHub authentication for access control + +We utilize bash to initiate an interactive environment when you record a test. It's important to note that we reset the environment to ensure maximum compatibility with various operating systems. As of now, there is no option to pass environment variables from outside into the test environment. + ### Refining Tests After recording, you can refine your test to handle dynamic outputs: diff --git a/bin/aarch64/clt-mcp b/bin/aarch64/clt-mcp new file mode 100755 index 0000000..2fccf72 Binary files /dev/null and b/bin/aarch64/clt-mcp differ diff --git a/bin/aarch64/cmp b/bin/aarch64/cmp index 1d06cbb..25a0cd2 100755 Binary files a/bin/aarch64/cmp and b/bin/aarch64/cmp differ diff --git a/bin/aarch64/rec b/bin/aarch64/rec index 10755e4..3ad8619 100755 Binary files a/bin/aarch64/rec and b/bin/aarch64/rec differ diff --git a/bin/cross-build b/bin/cross-build index 0c267e7..05ae6ff 100755 --- a/bin/cross-build +++ b/bin/cross-build @@ -1,17 +1,17 @@ #!/usr/bin/env bash set -e - -for folder in rec cmp; do - cd $folder +declare -A bin_map +bin_map=([rec]="rec" [cmp]="cmp" [mcp]="clt-mcp") +for folder in rec cmp mcp; do # https://github.com/joseluisq/rust-linux-darwin-builder - parser_src="$(pwd)/../parser" - docker run --network host --rm -v "${parser_src}:/root/parser" -v "$(pwd):/root/src" -w /root/src -it joseluisq/rust-linux-darwin-builder:1.84.1 bash -c "\ + docker run --network host --rm -v "$(pwd):/root/src" -w /root/src -it joseluisq/rust-linux-darwin-builder:1.84.1 bash -c "\ + cd $folder; \ cargo clean; \ cargo build --target=x86_64-unknown-linux-musl --release; \ cargo build --target=aarch64-unknown-linux-musl --release; \ " - cd .. + binary=${bin_map[$folder]} # Copy binaries - cp "$folder/target/x86_64-unknown-linux-musl/release/$folder" bin/x86_64/ - cp "$folder/target/aarch64-unknown-linux-musl/release/$folder" bin/aarch64/ + cp "$folder/target/x86_64-unknown-linux-musl/release/$binary" bin/x86_64/ + cp "$folder/target/aarch64-unknown-linux-musl/release/$binary" bin/aarch64/ done diff --git a/bin/x86_64/clt-mcp b/bin/x86_64/clt-mcp new file mode 100755 index 0000000..0dee942 Binary files /dev/null and b/bin/x86_64/clt-mcp differ diff --git a/bin/x86_64/cmp b/bin/x86_64/cmp index 0e31ef6..8207b7b 100755 Binary files a/bin/x86_64/cmp and b/bin/x86_64/cmp differ diff --git a/bin/x86_64/rec b/bin/x86_64/rec index d02b0da..3329dc0 100755 Binary files a/bin/x86_64/rec and b/bin/x86_64/rec differ diff --git a/cmp/Cargo.lock b/cmp/Cargo.lock index 141cd8d..2f05b86 100644 --- a/cmp/Cargo.lock +++ b/cmp/Cargo.lock @@ -107,6 +107,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + [[package]] name = "lazy_static" version = "1.4.0" @@ -154,6 +160,26 @@ version = "0.1.0" dependencies = [ "anyhow", "regex", + "serde", + "serde_json", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", ] [[package]] @@ -198,6 +224,55 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "2.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "tempfile" version = "3.16.0" @@ -221,6 +296,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + [[package]] name = "wasi" version = "0.13.3+wasi-0.2.2" diff --git a/cmp/src/lib.rs b/cmp/src/lib.rs index b7edd82..67863fa 100644 --- a/cmp/src/lib.rs +++ b/cmp/src/lib.rs @@ -102,7 +102,7 @@ impl PatternMatcher { let mut config: HashMap = HashMap::new(); let file_path = std::path::Path::new(&file_name); - let file = File::open(&file_path)?; + let file = File::open(file_path)?; let reader = BufReader::new(file); for line in reader.lines() { diff --git a/cmp/src/main.rs b/cmp/src/main.rs index 2a8c732..87a081a 100644 --- a/cmp/src/main.rs +++ b/cmp/src/main.rs @@ -20,7 +20,6 @@ use std::env; use std::path::Path; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; use std::io::Write; -use tempfile; // Import from lib pub use cmp::{PatternMatcher, MatchingPart}; @@ -118,7 +117,7 @@ fn main() { }, parser::Statement::Output => { - let pos = file1_reader.seek(SeekFrom::Current(0)).unwrap(); + let pos = file1_reader.stream_position().unwrap(); let mut line = String::new(); file1_reader.read_line(&mut line).unwrap(); @@ -203,24 +202,22 @@ fn main() { print_diff(&mut stdout, l1, Diff::Minus); print_diff(&mut stdout, l2, Diff::Plus); } - } else if debug_mode { + } else { writeln!(stdout, "{}", l1).unwrap(); } }, _ => {}, } } - } else { - if debug_mode { - // Show all lines when in debug mode - for line in &lines1 { - writeln!(stdout, "{}", line).unwrap(); - } - } else { - // Just show OK when no differences and not in debug mode - writeln!(stdout, "OK").unwrap(); - } - } } + } else if debug_mode { + // Show all lines when in debug mode + for line in &lines1 { + writeln!(stdout, "{}", line).unwrap(); + } + } else { + // Just show OK when no differences and not in debug mode + writeln!(stdout, "OK").unwrap(); + } } } _ => { // For any other section we simply print the next line from either file. @@ -241,7 +238,7 @@ fn move_cursor_to_line(reader: &mut R, command_prefix: &str) let mut line = String::new(); loop { - let pos = reader.seek(SeekFrom::Current(0))?; + let pos = reader.stream_position()?; let len = reader.read_line(&mut line)?; if len == 0 { @@ -261,7 +258,7 @@ fn move_cursor_to_line(reader: &mut R, command_prefix: &str) /// Peek the statement and return it from the given reader fn peek_statement(reader: &mut R) -> io::Result> { - let pos = reader.seek(SeekFrom::Current(0))?; + let pos = reader.stream_position()?; let mut line = String::new(); let len = reader.read_line(&mut line)?; reader.seek(SeekFrom::Start(pos))?; @@ -281,7 +278,7 @@ fn buffer_block(reader: &mut R) -> io::Result> { let mut parsed = false; loop { - let pos = reader.seek(SeekFrom::Current(0))?; + let pos = reader.stream_position()?; line.clear(); let len = reader.read_line(&mut line)?; if len == 0 { @@ -315,7 +312,7 @@ fn buffer_block(reader: &mut R) -> io::Result> { /// fn reader_at_end(reader: &mut R) -> bool { match reader.fill_buf() { - Ok(buf) if buf.is_empty() => true, + Ok([]) => true, _ => false, } } @@ -428,7 +425,7 @@ fn block_has_differences(lines1: &[String], lines2: &[String], pattern_matcher: // A helper to skip non-Input/Output blocks in the reader. fn skip_non_command_blocks(reader: &mut R) -> io::Result<()> { loop { - let pos = reader.seek(SeekFrom::Current(0))?; + let pos = reader.stream_position()?; if let Some(stmt) = peek_statement(reader)? { if stmt == parser::Statement::Input || stmt == parser::Statement::Output { // Found a valid command block, so rewind and exit. diff --git a/lib/container.sh b/lib/container.sh index ac40451..2f02d7d 100644 --- a/lib/container.sh +++ b/lib/container.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e +# ! We are handling exit codes so we cannot use set -e here # Detect proper path to the binary to run ARCH=$(arch) @@ -27,11 +27,13 @@ container_exec() { directory=${3:-tests} interactive=${4:-} if [ ! -d "$directory" ]; then - >&2 echo "Directory with tests does not exist: $directory" && exit 1 + >&2 echo "Directory with tests does not exist: $directory" + return 1 fi if [ -z "$image" ] || [ -z "$command" ]; then - >&2 echo 'Usage: container_exec "image" "command"' && exit 1 + >&2 echo 'Usage: container_exec "image" "command"' + return 1 fi # Merge base of patterns @@ -63,14 +65,21 @@ container_exec() { if [ -n "$interactive" ]; then eval "$process" + exit_code=$? else eval "$process" & pid=$! trap "kill -s INT '$pid'; exit 130" SIGINT trap "kill -s TERM '$pid'; exit 143" SIGTERM wait "$pid" + exit_code=$? trap - SIGINT SIGTERM - wait "$pid" fi + + # Clean up temp file + rm -f "$temp_file" + + return $exit_code } + diff --git a/lib/rec.sh b/lib/rec.sh index a6e6eee..83a8826 100644 --- a/lib/rec.sh +++ b/lib/rec.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e +# ! We are handling exit codes so we cannot use set -e here source "$PROJECT_DIR/lib/container.sh" # Run recording of a new test in container with specified Docker image @@ -23,7 +23,8 @@ record() { image=$1 record_file=$2 if [ -z "$image" ] || [ -z "$record_file" ]; then - >&2 echo 'Usage: record "image" "record_file"' && exit 1 + >&2 echo 'Usage: record "image" "record_file"' + return 1 fi # Validate that record_file dir exists and create if not @@ -34,13 +35,15 @@ record() { # Validate that record file does not exist if [ -f "$record_file" ]; then - >&2 echo "File to record exists, please, remove it first: $record_file" && exit 1 + >&2 echo "File to record exists, please, remove it first: $record_file" + return 1 fi echo "Recording data to file: $record_file" echo "Run commands one by one and after you finish press ^D to save" container_exec "$image" "clt-rec -O '$record_file'" "$record_dir" "1" + return $? } # Replay recorded test from the file @@ -49,11 +52,13 @@ replay() { record_file=$2 delay=${3:-$DEFAULT_DELAY} if [ -z "$image" ] || [ -z "$record_file" ]; then - >&2 echo 'Usage: replay "image" "record_file"' && exit 1 + >&2 echo 'Usage: replay "image" "record_file"' + return 1 fi if [ ! -f "$record_file" ]; then - >&2 echo "The record file does not exist: $record_file" && exit 1 + >&2 echo "The record file does not exist: $record_file" + return 5 # Return validation error exit code fi record_dir=$(dirname "${record_file}" | cut -d/ -f1) @@ -69,6 +74,7 @@ replay() { cmd+=("-p" "$prompt") done container_exec "$image" "${cmd[*]}" "$record_dir" + return $? } # Run compare binary @@ -78,7 +84,8 @@ compare() { replay_file=$3 no_color=$4 if [ -z "$image" ] || [ -z "$record_file" ] || [ -z "$replay_file" ]; then - >&2 echo 'Usage: compare "image" "record_file" "replay_file"' && exit 1 + >&2 echo 'Usage: compare "image" "record_file" "replay_file"' + return 1 fi prefix= @@ -89,6 +96,7 @@ compare() { record_dir=$(dirname "${record_file}" | cut -d/ -f1) # We validate file existence in cmp tool, so it's fine to skip it here container_exec "$image" "${prefix}clt-cmp '$record_file' '$replay_file'" "$record_dir" + return $? } # Replay recorded test and launch refine @@ -101,26 +109,37 @@ refine() { fi if [ -z "$editor" ]; then - >&2 echo 'You need an editor installed to run refine process' && exit 1 + >&2 echo 'You need an editor installed to run refine process' + return 1 fi # Validate input args image=$1 record_file=$2 if [ -z "$image" ] || [ -z "$record_file" ]; then - >&2 echo 'Usage: refine "image" "record_file"' && exit 1 + >&2 echo 'Usage: refine "image" "record_file"' + return 1 fi if [ ! -f "$record_file" ]; then - >&2 echo "The record file does not exist: $record_file" && exit 1 + >&2 echo "The record file does not exist: $record_file" + return 5 # Return validation error exit code fi replay_file="${record_file%.*}.rep" replay "$image" "$record_file" - compare "$image" "$record_file" "$replay_file" "1" > "$record_file.cmp" 2>&1 || true - mv -f "$record_file.cmp" "$record_file" - $editor "$record_file" + replay_exit_code=$? + + # Only proceed with compare if replay was successful + if [ $replay_exit_code -eq 0 ]; then + compare "$image" "$record_file" "$replay_file" "1" > "$record_file.cmp" 2>&1 || true + mv -f "$record_file.cmp" "$record_file" + $editor "$record_file" + return $? + else + return $replay_exit_code + fi } # Replay and test against record file with cmp tool @@ -131,20 +150,43 @@ test() { show_diff=${3:-0} delay=${4:-$DEFAULT_DELAY} if [ -z "$image" ] || [ -z "$record_file" ]; then - >&2 echo 'Usage: test "image" "record_file"' && exit 1 + >&2 echo 'Usage: test "image" "record_file"' + return 1 fi if [ ! -f "$record_file" ]; then - >&2 echo "The record file does not exist: $record_file" && exit 1 + >&2 echo "The record file does not exist: $record_file" + return 5 # Return validation error exit code fi replay_file="${record_file%.*}.rep" replay "$image" "$record_file" "$delay" + replay_exit_code=$? + + # Only proceed with compare if replay was successful + if [ $replay_exit_code -ne 0 ]; then + return $replay_exit_code + fi + output="${record_file%.*}.cmp" + + # Detect if terminal has a color + if [ -t 1 ] && [ "$(tput colors 2>/dev/null || echo 0)" -ge 8 ]; then + no_color= + else + no_color="1" + fi + + if [ -n "$CLT_NO_COLOR" ]; then + no_color="1" + fi + if [ "$show_diff" -eq 1 ]; then - compare "$image" "$record_file" "$replay_file" 2>&1 + compare "$image" "$record_file" "$replay_file" "$no_color" 2>&1 + return $? else - compare "$image" "$record_file" "$replay_file" > "$output" 2>&1 + compare "$image" "$record_file" "$replay_file" "$no_color" > "$output" 2>&1 + return $? fi } diff --git a/mcp/.gitignore b/mcp/.gitignore new file mode 100644 index 0000000..54466f5 --- /dev/null +++ b/mcp/.gitignore @@ -0,0 +1,2 @@ +/target + diff --git a/mcp/Cargo.lock b/mcp/Cargo.lock index a9565fe..6dc0e15 100644 --- a/mcp/Cargo.lock +++ b/mcp/Cargo.lock @@ -441,6 +441,8 @@ version = "0.1.0" dependencies = [ "anyhow", "regex", + "serde", + "serde_json", ] [[package]] diff --git a/mcp/Cargo.toml b/mcp/Cargo.toml index 3cdfdc0..876e663 100644 --- a/mcp/Cargo.toml +++ b/mcp/Cargo.toml @@ -43,17 +43,4 @@ tokio-test = "0.4" strip = true opt-level = "z" lto = true -codegen-units = 1 - -# Static linking configuration -[target.x86_64-unknown-linux-musl] -rustflags = ["-C", "target-feature=+crt-static"] - -[target.aarch64-unknown-linux-musl] -rustflags = ["-C", "target-feature=+crt-static"] - -[target.x86_64-apple-darwin] -rustflags = ["-C", "target-feature=+crt-static"] - -[target.aarch64-apple-darwin] -rustflags = ["-C", "target-feature=+crt-static"] \ No newline at end of file +codegen-units = 1 \ No newline at end of file diff --git a/mcp/src/main.rs b/mcp/src/main.rs index 9dcf386..8dfc12f 100644 --- a/mcp/src/main.rs +++ b/mcp/src/main.rs @@ -1,19 +1,15 @@ mod mcp_protocol; mod pattern_refiner; -mod structured_test; mod test_runner; +mod server; -use mcp_protocol::*; -use pattern_refiner::PatternRefiner; -use test_runner::TestRunner; +// External crates +use cmp; +use parser; use anyhow::Result; use clap::Parser; -use serde::Deserialize; -use serde_json::{json, Value}; -use std::collections::HashMap; -use std::fs; -use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader as AsyncBufReader}; +use server::McpServer; /// CLT MCP Server - Model Context Protocol server for Command Line Tester #[derive(Parser, Debug)] @@ -50,2028 +46,6 @@ struct Args { workdir_path: Option, } -#[derive(Debug)] -struct McpServer { - #[allow(dead_code)] - docker_image: String, - clt_binary_path: Option, - workdir_path: String, - test_runner: TestRunner, - pattern_refiner: PatternRefiner, -} - -impl McpServer { - fn new( - docker_image: String, - clt_binary_path: Option, - workdir_path: Option, - ) -> Result { - // Resolve working directory - use provided path or current directory - let workdir_path = match workdir_path { - Some(path) => { - let path_buf = std::path::PathBuf::from(&path); - if !path_buf.exists() { - return Err(anyhow::anyhow!( - "Working directory does not exist: {}", - path - )); - } - if !path_buf.is_dir() { - return Err(anyhow::anyhow!( - "Working directory path is not a directory: {}", - path - )); - } - // Convert to absolute path - std::fs::canonicalize(path_buf) - .map_err(|e| { - anyhow::anyhow!("Failed to resolve working directory path: {}", e) - })? - .to_string_lossy() - .to_string() - } - None => { - // Use current working directory - std::env::current_dir() - .map_err(|e| anyhow::anyhow!("Failed to get current working directory: {}", e))? - .to_string_lossy() - .to_string() - } - }; - - let test_runner = TestRunner::new( - docker_image.clone(), - clt_binary_path.clone(), - workdir_path.clone(), - )?; - let pattern_refiner = PatternRefiner::new()?; - - Ok(Self { - docker_image, - clt_binary_path, - workdir_path, - test_runner, - pattern_refiner, - }) - } - - async fn run(&mut self) -> Result<()> { - let stdin = tokio::io::stdin(); - let mut reader = AsyncBufReader::new(stdin); - let mut stdout = tokio::io::stdout(); - - let mut line = String::new(); - loop { - line.clear(); - - // Handle EOF or read errors gracefully - let bytes_read = match reader.read_line(&mut line).await { - Ok(0) => break, // EOF - client disconnected - Ok(n) => n, - Err(e) => { - // Check if it's a broken pipe or connection reset - if e.kind() == std::io::ErrorKind::BrokenPipe - || e.kind() == std::io::ErrorKind::ConnectionReset - || e.kind() == std::io::ErrorKind::ConnectionAborted - { - // Client disconnected - exit gracefully - break; - } - // For other errors, continue trying - continue; - } - }; - - if bytes_read == 0 { - break; // EOF - } - - // Parse JSON and handle errors properly - let response = match serde_json::from_str::(line.trim()) { - Ok(request) => self.handle_request(request).await, - Err(_) => { - // Send error response for malformed JSON - McpResponse::error(None, -32700, "Parse error: Invalid JSON".to_string()) - } - }; - - // Send response with proper error handling - if let Err(e) = self.send_response(&mut stdout, &response).await { - // Check if it's a broken pipe or connection issue - if e.kind() == std::io::ErrorKind::BrokenPipe - || e.kind() == std::io::ErrorKind::ConnectionReset - || e.kind() == std::io::ErrorKind::ConnectionAborted - { - // Client disconnected - exit gracefully - break; - } - // For other errors, continue trying - continue; - } - } - - Ok(()) - } - - async fn send_response( - &self, - stdout: &mut tokio::io::Stdout, - response: &McpResponse, - ) -> std::io::Result<()> { - let response_json = serde_json::to_string(response) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - - stdout.write_all(response_json.as_bytes()).await?; - stdout.write_all(b"\n").await?; - stdout.flush().await?; - - Ok(()) - } - - async fn handle_request(&mut self, request: McpRequest) -> McpResponse { - match request.method.as_str() { - "initialize" => self.handle_initialize(request.id, request.params), - "tools/list" => self.handle_tools_list(request.id), - "tools/call" => self.handle_tools_call(request.id, request.params).await, - _ => McpResponse::error( - request.id, - -32601, - format!("Method not found: {}", request.method), - ), - } - } - - fn handle_initialize(&self, id: Option, _params: Option) -> McpResponse { - let result = InitializeResult { - protocol_version: "2024-11-05".to_string(), - capabilities: ServerCapabilities { - tools: Some(HashMap::new()), - }, - server_info: ServerInfo { - name: "CLT MCP Server".to_string(), - version: "0.1.0 - Command Line Tester integration for automated testing of CLI applications in Docker containers with pattern matching support".to_string(), - }, - }; - - McpResponse::success(id, json!(result)) - } - - fn handle_tools_list(&self, id: Option) -> McpResponse { - let tools = vec![ - McpTool { - name: "run_test".to_string(), - description: format!("Execute a CLT test file in a Docker container and return the results. Compares actual output with expected output and reports success/failure. The docker_image parameter is optional and defaults to '{}' (configured when the MCP server was started).", self.docker_image), - input_schema: json!({ - "type": "object", - "properties": { - "test_file": { - "type": "string", - "description": "Path to the test file to execute" - }, - "docker_image": { - "type": "string", - "description": format!("Docker image to use for test execution. Optional - defaults to '{}' if not specified.", self.docker_image), - "default": self.docker_image - } - }, - "required": ["test_file"], - "additionalProperties": false - }), - }, - McpTool { - name: "refine_output".to_string(), - description: "Analyze differences between expected and actual command outputs, then suggest patterns to handle dynamic content. This tool uses diff analysis to identify parts that change between test runs (like timestamps, PIDs, version numbers) and suggests compatible patterns to make tests more robust. Use this when test outputs contain dynamic data that changes between runs.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "expected": { - "type": "string", - "description": "The expected output string from your test. This can already contain patterns for dynamic content. Example: 'Process started with PID 1234'" - }, - "actual": { - "type": "string", - "description": "The actual output string that was produced during test execution. This is what you want to compare against the expected output. Example: 'Process started with PID 5678'" - } - }, - "required": ["expected", "actual"], - "additionalProperties": false - }), - }, - McpTool { - name: "test_match".to_string(), - description: "Compare expected vs actual output strings using pattern matching. This tool understands pattern syntax and performs intelligent matching that can handle dynamic content. It returns a clear line-by-line diff showing exactly what differs between expected and actual output, similar to git diff format. Use this to validate if test outputs match expectations, especially when they contain patterns for dynamic data.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "expected": { - "type": "string", - "description": "Expected output string with optional patterns. Patterns can match dynamic content like version numbers, IP addresses, dates, times, and custom regex patterns. Example: 'Server started on %{IPADDR} at %{TIME}'" - }, - "actual": { - "type": "string", - "description": "Actual output string to compare against the expected pattern. This should be the literal text output from your command or application. Example: 'Server started on 192.168.1.100 at 14:30:22'" - } - }, - "required": ["expected", "actual"], - "additionalProperties": false - }), - }, - McpTool { - name: "clt_help".to_string(), - description: "Get comprehensive documentation about CLT (Command Line Tester) concepts, testing workflows, pattern syntax, and examples. This tool provides detailed explanations of how CLT works and step-by-step examples for common testing scenarios. Use this to understand CLT concepts before using other tools.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "topic": { - "type": "string", - "description": "Help topic to explain. Options: 'overview' (CLT introduction), 'test_format' (structured test format guide), 'patterns' (pattern syntax guide), 'blocks' (reusable test blocks), 'workflow' (testing workflow), 'examples' (practical examples), 'troubleshooting' (common issues), 'structured_tests' (AI-friendly JSON format)", - "enum": ["overview", "test_format", "patterns", "blocks", "workflow", "examples", "troubleshooting", "structured_tests"] - } - }, - "required": ["topic"], - "additionalProperties": false - }), - }, - McpTool { - name: "get_patterns".to_string(), - description: "Get all available patterns for the current CLT project. Returns predefined patterns that can be used in test outputs for dynamic content matching.".to_string(), - input_schema: json!({ - "type": "object", - "properties": {}, - "additionalProperties": false - }), - }, - McpTool { - name: "read_test".to_string(), - description: "Read a CLT test file and return its structured representation. The test is returned as a sequence of steps including commands, expected outputs, comments, and reusable blocks.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "test_file": { - "type": "string", - "description": "Path to the test file to read" - } - }, - "required": ["test_file"], - "additionalProperties": false - }), - }, - McpTool { - name: "write_test".to_string(), - description: "Write a CLT test file from structured format. Creates a test file that can be executed with run_test. Supports commands, expected outputs, comments, and reusable blocks.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "test_file": { - "type": "string", - "description": "Path where the test file should be written" - }, - "test_structure": { - "type": "object", - "description": "Structured test definition", - "properties": { - "description": { - "type": "string", - "description": "Optional description text that appears at the beginning of the test file. Can be multiline." - }, - "steps": { - "type": "array", - "description": "Sequence of test steps", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["input", "output", "comment", "block"], - "description": "Type of step: input (command to execute), output (expected result), comment (documentation), block (reusable test sequence)" - }, - "args": { - "type": "array", - "items": {"type": "string"}, - "description": "Arguments for the statement. For output: optional custom checker name. For block: relative path to block file." - }, - "content": { - "type": ["string", "null"], - "description": "Content of the step. Command text for input, expected output for output, comment text for comment, null for block." - }, - "steps": { - "type": "array", - "description": "Nested steps for block types (resolved block content)" - } - }, - "required": ["type", "args"] - } - } - }, - "required": ["steps"] - } - }, - "required": ["test_file", "test_structure"], - "additionalProperties": false - }), - }, - McpTool { - name: "update_test".to_string(), - description: "Replace specific test steps in an existing CLT test file. Finds the old test structure and replaces it with the new test structure. Returns error if old structure is not found or matches multiple locations.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "test_file": { - "type": "string", - "description": "Path to the test file to modify" - }, - "old_test_structure": { - "type": "object", - "description": "Test structure to find and replace. Must match exactly in the original file.", - "properties": { - "description": { - "type": "string", - "description": "Optional description text (not used for matching, only for context)" - }, - "steps": { - "type": "array", - "description": "Sequence of test steps to find and replace. Must match exactly.", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["input", "output", "comment", "block"], - "description": "Type of step" - }, - "args": { - "type": "array", - "items": {"type": "string"}, - "description": "Arguments for the step" - }, - "content": { - "type": ["string", "null"], - "description": "Content of the step" - }, - "steps": { - "type": "array", - "description": "Nested steps for block types" - } - }, - "required": ["type", "args"] - } - } - }, - "required": ["steps"] - }, - "new_test_structure": { - "type": "object", - "description": "Test structure to replace the old structure with", - "properties": { - "description": { - "type": "string", - "description": "Optional description text. If provided, will replace the file's description." - }, - "steps": { - "type": "array", - "description": "New sequence of test steps", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["input", "output", "comment", "block"], - "description": "Type of step" - }, - "args": { - "type": "array", - "items": {"type": "string"}, - "description": "Arguments for the step" - }, - "content": { - "type": ["string", "null"], - "description": "Content of the step" - }, - "steps": { - "type": "array", - "description": "Nested steps for block types" - } - }, - "required": ["type", "args"] - } - } - }, - "required": ["steps"] - } - }, - "required": ["test_file", "old_test_structure", "new_test_structure"], - "additionalProperties": false - }), - }, - McpTool { - name: "append_test".to_string(), - description: "Append new test steps to an existing CLT test file. Adds the new steps to the end of the existing test file while preserving the original content.".to_string(), - input_schema: json!({ - "type": "object", - "properties": { - "test_file": { - "type": "string", - "description": "Path to the test file to modify" - }, - "test_structure": { - "type": "object", - "description": "Test structure to append to the existing file", - "properties": { - "description": { - "type": "string", - "description": "Optional description text. Only used if the original file has no description." - }, - "steps": { - "type": "array", - "description": "Sequence of test steps to append", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["input", "output", "comment", "block"], - "description": "Type of step" - }, - "args": { - "type": "array", - "items": {"type": "string"}, - "description": "Arguments for the step" - }, - "content": { - "type": ["string", "null"], - "description": "Content of the step" - }, - "steps": { - "type": "array", - "description": "Nested steps for block types" - } - }, - "required": ["type", "args"] - } - } - }, - "required": ["steps"] - } - }, - "required": ["test_file", "test_structure"], - "additionalProperties": false - }), - }, - ]; - - let result = json!({ - "tools": tools - }); - - McpResponse::success(id, result) - } - - async fn handle_tools_call(&mut self, id: Option, params: Option) -> McpResponse { - let params = match params { - Some(p) => p, - None => return McpResponse::error(id, -32602, "Missing parameters".to_string()), - }; - - let tool_call: ToolCallParams = match serde_json::from_value(params) { - Ok(tc) => tc, - Err(e) => return McpResponse::error(id, -32602, format!("Invalid parameters: {}", e)), - }; - - let result = match self - .execute_tool(&tool_call.name, tool_call.arguments) - .await - { - Ok(content) => ToolCallResult { - content: vec![ToolContent { - content_type: "text".to_string(), - text: content, - }], - is_error: None, - }, - Err(e) => ToolCallResult { - content: vec![ToolContent { - content_type: "text".to_string(), - text: format!("Error: {}", e), - }], - is_error: Some(true), - }, - }; - - McpResponse::success(id, json!(result)) - } - - async fn execute_tool(&mut self, tool_name: &str, arguments: Option) -> Result { - // Wrap the entire tool execution in a comprehensive error handler - let result = match tool_name { - "run_test" => { - let input: RunTestInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - - // Safely resolve test path with proper error handling - let resolved_test_path = match self.resolve_test_path(&input.test_file) { - Ok(path) => path, - Err(e) => { - // Return a structured error response instead of crashing - let error_output = json!({ - "tool": "run_test", - "description": "CLT test execution failed during path resolution", - "test_file": input.test_file, - "result": { - "success": false, - "errors": [{ - "command": "path_resolution", - "expected": "Valid test file path", - "actual": format!("Path resolution failed: {}", e), - "step": 0 - }], - "summary": format!("Path resolution error: {}", e) - }, - "help": { - "error_type": "path_resolution", - "suggestion": "Check that the test file path is correct and accessible", - "working_directory": self.workdir_path - } - }); - return Ok(serde_json::to_string_pretty(&error_output)?); - } - }; - - // Safely execute test with proper error handling - let output = match self - .test_runner - .run_test(&resolved_test_path, input.docker_image.as_deref()) - { - Ok(result) => result, - Err(e) => { - // Convert test runner errors to structured output - let error_output = json!({ - "tool": "run_test", - "description": "CLT test execution failed", - "test_file": input.test_file, - "result": { - "success": false, - "errors": [{ - "command": "test_execution", - "expected": "Successful test execution", - "actual": format!("Test execution failed: {}", e), - "step": 0 - }], - "summary": format!("Test execution error: {}", e) - }, - "help": { - "error_type": "test_execution", - "suggestion": "Check CLT binary path, Docker availability, and test file format", - "working_directory": self.workdir_path - } - }); - return Ok(serde_json::to_string_pretty(&error_output)?); - } - }; - - // Add helpful context to the output - let docker_image_used = input.docker_image.as_deref().unwrap_or(&self.docker_image); - let enhanced_output = json!({ - "tool": "run_test", - "description": "CLT test execution results", - "test_file": input.test_file, - "docker_image": docker_image_used, - "result": output, - "help": { - "success_meaning": "true = test passed, all commands executed and outputs matched expectations", - "errors_meaning": "Array of specific mismatches between expected and actual outputs. step refers to the position in the test steps array (0-based)", - "next_steps": "If test failed, use 'refine_output' tool to suggest patterns for dynamic content", - "docker_image_info": format!("Test executed in Docker image: {} (default: {})", docker_image_used, self.docker_image) - } - }); - - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - "refine_output" => { - let input: RefineOutputInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - let output = self - .pattern_refiner - .refine_output(&input.expected, &input.actual)?; - - // Add helpful context and examples - let enhanced_output = json!({ - "tool": "refine_output", - "description": "Pattern suggestions for handling dynamic content in test outputs", - "result": output, - "help": { - "pattern_types": { - "named_patterns": "Use %{PATTERN_NAME} syntax. Available: SEMVER, IPADDR, DATE, TIME, NUMBER, PATH", - "regex_patterns": "Use #!/regex/!# syntax. Example: #!/[0-9]+/!# for any number", - "examples": { - "version": "Replace '1.2.3' with '%{SEMVER}' or '#!/[0-9]+\\.[0-9]+\\.[0-9]+/!#'", - "timestamp": "Replace '2023-12-25 14:30:22' with '%{DATE} %{TIME}' or '#!/[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/!#'", - "process_id": "Replace 'PID: 1234' with 'PID: %{NUMBER}' or 'PID: #!/[0-9]+/!#'" - } - }, - "usage": "Copy the 'refined_output' and use it as the expected output in your .rec test file" - } - }); - - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - "test_match" => { - let input: TestMatchInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - let output = self.execute_test_match(&input.expected, &input.actual)?; - - // Add helpful context - let enhanced_output = json!({ - "tool": "test_match", - "description": "Pattern matching results using CLT's intelligent comparison engine", - "comparison": { - "expected": input.expected, - "actual": input.actual - }, - "result": output, - "help": { - "matches_meaning": "true = strings match (considering patterns), false = mismatch found", - "diff_lines_details": "Git-style diff showing line-by-line differences between expected and actual output", - "pattern_support": "Understands %{PATTERN} and #!/regex/!# syntax for dynamic content", - "next_steps": "If match fails, check diff_lines array for specific differences, then use 'refine_output' to suggest patterns" - } - }); - - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - "clt_help" => { - #[derive(Deserialize)] - struct HelpInput { - topic: String, - } - - let input: HelpInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - - let help_content = self.get_help_content(&input.topic); - Ok(serde_json::to_string_pretty(&help_content)?) - } - "get_patterns" => { - let patterns = structured_test::get_patterns(self.clt_binary_path.as_deref())?; - - let enhanced_output = json!({ - "tool": "get_patterns", - "description": "Available patterns for CLT tests", - "patterns": patterns, - "help": { - "usage": "Use these patterns in test outputs like %{PATTERN_NAME}", - "example": "Replace '1.2.3' with '%{SEMVER}' to match any semantic version" - } - }); - - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - "read_test" => { - let input: mcp_protocol::ReadTestInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - - let test_structure = - structured_test::read_test_file(&self.resolve_test_path(&input.test_file)?)?; - - let enhanced_output = json!({ - "tool": "read_test", - "description": "Structured representation of CLT test file", - "test_file": input.test_file, - "result": test_structure, - "help": { - "structure": "JSON format with 'steps' array containing test steps", - "step_types": "input (commands), output (expected results), comment (documentation), block (reusable components)", - "nested_blocks": "Block steps contain resolved content in 'steps' field", - "usage": "Modify this structure and use 'write_test' to save changes" - } - }); - - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - "write_test" => { - let input: mcp_protocol::WriteTestInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - - // Safely resolve test path with proper error handling - let resolved_test_path = match self.resolve_test_path(&input.test_file) { - Ok(path) => path, - Err(e) => { - let error_output = json!({ - "tool": "write_test", - "description": "CLT test file write failed during path resolution", - "test_file": input.test_file, - "result": { - "success": false, - "error": format!("Path resolution failed: {}", e) - }, - "help": { - "error_type": "path_resolution", - "suggestion": "Check that the test file path is valid and the directory is writable", - "working_directory": self.workdir_path - } - }); - return Ok(serde_json::to_string_pretty(&error_output)?); - } - }; - - // Safely write test file with proper error handling - match structured_test::write_test_file(&resolved_test_path, &input.test_structure) { - Ok(()) => { - let enhanced_output = json!({ - "tool": "write_test", - "description": "CLT test file written successfully", - "test_file": input.test_file, - "result": { - "success": true - }, - "help": { - "next_steps": "Use 'run_test' to execute the written test file" - } - }); - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - Err(e) => { - let error_output = json!({ - "tool": "write_test", - "description": "CLT test file write failed", - "test_file": input.test_file, - "result": { - "success": false, - "error": format!("Write operation failed: {}", e) - }, - "help": { - "error_type": "write_failure", - "suggestion": "Check file permissions and disk space", - "working_directory": self.workdir_path - } - }); - Ok(serde_json::to_string_pretty(&error_output)?) - } - } - } - "update_test" => { - let input: mcp_protocol::TestReplaceInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - - // Safely resolve test path with proper error handling - let resolved_test_path = match self.resolve_test_path(&input.test_file) { - Ok(path) => path, - Err(e) => { - let error_output = json!({ - "tool": "update_test", - "description": "Test structure update failed during path resolution", - "test_file": input.test_file, - "result": { - "success": false, - "message": format!("Path resolution failed: {}", e) - }, - "help": { - "error_type": "path_resolution", - "suggestion": "Check that the test file path is correct and accessible", - "working_directory": self.workdir_path - } - }); - return Ok(serde_json::to_string_pretty(&error_output)?); - } - }; - - match structured_test::replace_test_structure( - &resolved_test_path, - &input.old_test_structure, - &input.new_test_structure, - ) { - Ok(()) => { - let enhanced_output = json!({ - "tool": "update_test", - "description": "Test structure replaced successfully", - "test_file": input.test_file, - "result": { - "success": true, - "message": "Old test structure found and replaced with new structure" - }, - "help": { - "next_steps": "Use 'run_test' to execute the modified test file", - "replacement_info": "The old test structure was found and replaced exactly once" - } - }); - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - Err(e) => { - let enhanced_output = json!({ - "tool": "update_test", - "description": "Test structure replacement failed", - "test_file": input.test_file, - "result": { - "success": false, - "message": e.to_string() - }, - "help": { - "common_errors": { - "not_found": "Old test structure not found in file - check exact match of steps, content, and args", - "ambiguous": "Old test structure matches multiple locations - make it more specific", - "file_not_found": "Test file doesn't exist - check the path" - }, - "matching_rules": "Steps must match exactly: type, args, content, and nested steps (if any)" - } - }); - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - } - } - "append_test" => { - let input: mcp_protocol::TestAppendInput = serde_json::from_value( - arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, - )?; - - match structured_test::append_test_structure( - &self.resolve_test_path(&input.test_file)?, - &input.test_structure, - ) { - Ok(steps_added) => { - let enhanced_output = json!({ - "tool": "append_test", - "description": "Test steps appended successfully", - "test_file": input.test_file, - "result": { - "success": true, - "message": format!("Successfully appended {} test steps to the file", steps_added), - "steps_added": steps_added - }, - "help": { - "next_steps": "Use 'run_test' to execute the updated test file", - "append_info": "New steps were added to the end of the existing test file" - } - }); - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - Err(e) => { - let enhanced_output = json!({ - "tool": "append_test", - "description": "Test append operation failed", - "test_file": input.test_file, - "result": { - "success": false, - "message": e.to_string(), - "steps_added": 0 - }, - "help": { - "common_errors": { - "file_not_found": "Test file doesn't exist - check the path", - "permission_denied": "Cannot write to file - check file permissions", - "invalid_structure": "Test structure is invalid - check step format" - } - } - }); - Ok(serde_json::to_string_pretty(&enhanced_output)?) - } - } - } - _ => { - // Return a proper error response instead of panicking - let error_output = json!({ - "tool": tool_name, - "description": "Unknown tool requested", - "result": { - "success": false, - "error": format!("Unknown tool: {}", tool_name) - }, - "help": { - "available_tools": [ - "run_test", "refine_output", "test_match", "clt_help", - "get_patterns", "read_test", "write_test", "update_test", "append_test" - ], - "suggestion": "Use one of the available tools listed above" - } - }); - return Ok(serde_json::to_string_pretty(&error_output)?); - } - }; - - // If we get here, one of the tools above should have returned a result - result - } - - fn get_help_content(&self, topic: &str) -> Value { - match topic { - "overview" => json!({ - "topic": "CLT Overview", - "description": "CLT (Command Line Tester) is a testing framework for command-line applications", - "content": { - "what_is_clt": "CLT allows you to record interactive command sessions, save them as test files, and replay them to verify consistent behavior. All commands run inside Docker containers for reproducible environments.", - "key_features": [ - "Record interactive command sessions", - "Replay tests to verify behavior", - "Pattern matching for dynamic content (timestamps, IDs, etc.)", - "Docker container isolation", - "Structured error reporting" - ], - "typical_workflow": [ - "1. Record a test session: clt record ubuntu:20.04", - "2. Execute commands interactively (all recorded)", - "3. Exit with Ctrl+D to save the test file", - "4. Replay test: clt test -t mytest -d ubuntu:20.04", - "5. Refine patterns if dynamic content causes failures" - ], - "file_types": { - "test_files": "Test recording files with input/output sections", - "result_files": "Test replay results (generated during test execution)", - "block_files": "Reusable test blocks that can be included in test files" - } - } - }), - "test_format" => json!({ - "topic": "Structured Test Format", - "description": "Complete guide to CLT's structured JSON test format for AI-friendly test creation", - "content": { - "overview": "CLT uses a structured JSON format that makes it easy for AI to create, read, and modify tests. This format abstracts away complex syntax and provides a clear, hierarchical representation of test steps.", - "basic_structure": { - "description": "A test consists of an optional description and an array of steps", - "schema": { - "description": "Optional text description of what the test does (appears at top of test file)", - "steps": "Array of test steps to execute in sequence" - }, - "minimal_example": { - "description": "Simple test with one command", - "steps": [ - { - "type": "input", - "args": [], - "content": "echo 'Hello World'" - }, - { - "type": "output", - "args": [], - "content": "Hello World" - } - ] - } - }, - "step_types": { - "input": { - "purpose": "Command to execute in the test environment", - "structure": { - "type": "input", - "args": "Always empty array []", - "content": "Command string to execute" - }, - "example": { - "type": "input", - "args": [], - "content": "ls -la /tmp" - } - }, - "output": { - "purpose": "Expected result from the previous command", - "structure": { - "type": "output", - "args": "Empty [] or [\"checker-name\"] for custom validation", - "content": "Expected output string (can contain patterns)" - }, - "examples": { - "basic": { - "type": "output", - "args": [], - "content": "total 0" - }, - "with_patterns": { - "type": "output", - "args": [], - "content": "Process started with PID %{NUMBER}" - }, - "with_custom_checker": { - "type": "output", - "args": ["json-validator"], - "content": "{\"status\": \"success\"}" - } - } - }, - "comment": { - "purpose": "Documentation and notes within the test (ignored during execution)", - "structure": { - "type": "comment", - "args": "Always empty array []", - "content": "Comment text" - }, - "example": { - "type": "comment", - "args": [], - "content": "This test validates the file listing functionality" - } - }, - "block": { - "purpose": "Reference to reusable test sequence from another file", - "structure": { - "type": "block", - "args": "[\"relative/path/to/block\"]", - "content": "Always null", - "steps": "Array of resolved steps from the block file" - }, - "example": { - "type": "block", - "args": ["auth/login"], - "content": null, - "steps": [ - { - "type": "input", - "args": [], - "content": "login admin" - }, - { - "type": "output", - "args": [], - "content": "Login successful" - } - ] - } - } - }, - "complete_examples": { - "simple_test": { - "description": "Basic command test with description", - "test": { - "description": "Test the echo command functionality", - "steps": [ - { - "type": "comment", - "args": [], - "content": "Test basic echo command" - }, - { - "type": "input", - "args": [], - "content": "echo 'Hello CLT'" - }, - { - "type": "output", - "args": [], - "content": "Hello CLT" - } - ] - } - }, - "test_with_patterns": { - "description": "Test using patterns for dynamic content", - "test": { - "description": "Application startup test with dynamic content", - "steps": [ - { - "type": "input", - "args": [], - "content": "./myapp --version" - }, - { - "type": "output", - "args": [], - "content": "MyApp version %{SEMVER}" - }, - { - "type": "input", - "args": [], - "content": "./myapp start" - }, - { - "type": "output", - "args": [], - "content": "Server started on %{IPADDR}:%{NUMBER}" - } - ] - } - }, - "test_with_blocks": { - "description": "Test using reusable blocks", - "test": { - "description": "Database integration test", - "steps": [ - { - "type": "comment", - "args": [], - "content": "Setup database connection" - }, - { - "type": "block", - "args": ["database/connect"], - "content": null, - "steps": [ - { - "type": "input", - "args": [], - "content": "mysql -u testuser -p" - }, - { - "type": "output", - "args": [], - "content": "Enter password:" - } - ] - }, - { - "type": "input", - "args": [], - "content": "SELECT COUNT(*) FROM users;" - }, - { - "type": "output", - "args": [], - "content": "%{NUMBER}" - } - ] - } - }, - "test_with_custom_checker": { - "description": "Test using custom output validation", - "test": { - "description": "API response validation test", - "steps": [ - { - "type": "input", - "args": [], - "content": "curl -s http://api.example.com/status" - }, - { - "type": "output", - "args": ["json-validator"], - "content": "{\"status\": \"healthy\", \"timestamp\": \"%{NUMBER}\"}" - } - ] - } - } - }, - "workflow_example": { - "description": "Complete workflow from creation to execution", - "steps": [ - "1. Create test structure using JSON format", - "2. Use write_test tool to save as test file", - "3. Use run_test tool to execute the test", - "4. Use read_test tool to load existing tests for modification", - "5. Use get_patterns tool to see available patterns for dynamic content" - ], - "example_workflow": { - "step1_create": { - "description": "Create test structure", - "json": { - "description": "Test file operations", - "steps": [ - { - "type": "input", - "args": [], - "content": "touch /tmp/testfile.txt" - }, - { - "type": "output", - "args": [], - "content": "" - }, - { - "type": "input", - "args": [], - "content": "ls -la /tmp/testfile.txt" - }, - { - "type": "output", - "args": [], - "content": "-rw-r--r-- 1 %{USERNAME} %{USERNAME} 0 %{DATE} %{TIME} /tmp/testfile.txt" - } - ] - } - }, - "step2_write": "Use write_test with test_file='/tmp/mytest.rec' and test_structure=", - "step3_run": "Use run_test with test_file='/tmp/mytest.rec' and docker_image='ubuntu:20.04' (or omit docker_image to use server default)", - "step4_modify": "Use read_test with test_file='/tmp/mytest.rec' to load for modifications" - } - }, - "best_practices": { - "structure": [ - "Always include a descriptive 'description' field for your tests", - "Use comment statements to document complex test sections", - "Group related commands and outputs together logically", - "Use meaningful names for block references" - ], - "patterns": [ - "Use %{PATTERN_NAME} for common dynamic content (dates, numbers, IPs)", - "Prefer named patterns over custom regex when available", - "Use get_patterns tool to see all available patterns", - "Test pattern matching with test_match tool before using in tests" - ], - "blocks": [ - "Create reusable blocks for common sequences (login, setup, cleanup)", - "Use relative paths for block references", - "Keep block files focused on single responsibilities", - "Document block purposes with comments" - ], - "validation": [ - "Use custom checkers for complex output validation (JSON, XML, etc.)", - "Test your structured format with write_test before execution", - "Use read_test to verify your written tests parse correctly" - ] - }, - "common_patterns": { - "test_sequence": "input β†’ output β†’ input β†’ output (commands and their expected results)", - "setup_test_cleanup": "block(setup) β†’ test_steps β†’ block(cleanup)", - "documented_test": "comment β†’ input β†’ output (with documentation)", - "conditional_validation": "input β†’ output(with_custom_checker) (for complex validation)" - }, - "troubleshooting": { - "invalid_structure": { - "symptom": "write_test fails with structure errors", - "solutions": [ - "Ensure all steps have required 'statement' and 'args' fields", - "Check that 'statement' values are: input, output, comment, or block", - "Verify 'args' is always an array (even if empty)", - "For blocks: ensure 'content' is null and 'args' contains path" - ] - }, - "block_resolution": { - "symptom": "Block references not working", - "solutions": [ - "Check block path is relative to the test file location", - "Ensure block file exists at the specified path", - "Verify block file contains valid test structure", - "Use forward slashes (/) in paths on all platforms" - ] - }, - "pattern_issues": { - "symptom": "Patterns not matching as expected", - "solutions": [ - "Use get_patterns to see available pattern names", - "Test patterns with test_match tool first", - "Ensure pattern syntax is %{PATTERN_NAME}", - "Check pattern is appropriate for the content type" - ] - } - } - } - }), - "patterns" => json!({ - "topic": "CLT Pattern Syntax", - "description": "How to handle dynamic content in test outputs using patterns", - "content": { - "why_patterns": "Command outputs often contain dynamic data (timestamps, process IDs, version numbers) that change between test runs. Patterns allow tests to match the structure while ignoring variable content.", - "named_patterns": { - "syntax": "%{PATTERN_NAME}", - "description": "Predefined patterns from .clt/patterns file", - "examples": { - "%{SEMVER}": "Semantic versions like 1.2.3, 10.0.1", - "%{IPADDR}": "IP addresses like 192.168.1.1, 10.0.0.1", - "%{DATE}": "Dates like 2023-12-25", - "%{TIME}": "Times like 14:30:22", - "%{NUMBER}": "Any number like 42, 1234", - "%{PATH}": "File paths like /usr/bin/app", - "%{YEAR}": "4-digit years like 2023" - } - }, - "regex_patterns": { - "syntax": "#!/regex/!#", - "description": "Custom regular expressions for specific matching", - "examples": { - "#!/[0-9]+/!#": "Any number (same as %{NUMBER})", - "#!/[0-9]{4}-[0-9]{2}-[0-9]{2}/!#": "Date format YYYY-MM-DD", - "#!/[a-f0-9]{40}/!#": "SHA1 hash (40 hex characters)", - "#!/PID: [0-9]+/!#": "Process ID with prefix", - "#!/v[0-9]+\\.[0-9]+\\.[0-9]+/!#": "Version with 'v' prefix" - } - }, - "pattern_examples": [ - { - "scenario": "Process started with varying PID", - "original_output": "Process started with PID 1234", - "with_pattern": "Process started with PID %{NUMBER}", - "alternative": "Process started with PID #!/[0-9]+/!#" - }, - { - "scenario": "Application version in output", - "original_output": "MyApp version 2.1.3 starting", - "with_pattern": "MyApp version %{SEMVER} starting", - "alternative": "MyApp version #!/[0-9]+\\.[0-9]+\\.[0-9]+/!# starting" - } - ] - } - }), - "blocks" => json!({ - "topic": "CLT Reusable Blocks", - "description": "How to create and use reusable test blocks with .recb files", - "content": { - "what_are_blocks": "Blocks are reusable test sequences stored in .recb files that can be included in multiple .rec test files. They help avoid duplication and create modular test components.", - "key_concepts": { - "block_files": "Files with .recb extension containing reusable test sequences", - "relative_paths": "Block files must be located relative to the .rec file that includes them", - "nested_blocks": "Block files can include other blocks, creating hierarchical test structures", - "same_format": "Block files use the same format as .rec files (input/output sections)" - }, - "file_organization": { - "basic_structure": [ - "tests/", - "β”œβ”€β”€ main-test.rec # Main test file", - "β”œβ”€β”€ login.recb # Block in same directory", - "β”œβ”€β”€ setup.recb # Another block", - "└── auth/", - " β”œβ”€β”€ admin-login.recb # Block in subdirectory", - " └── user-login.recb # Another auth block" - ], - "path_rules": [ - "Always relative to the .rec file containing the block statement", - "Same directory: ––– block: login –––", - "Subdirectory: ––– block: auth/admin-login –––", - "Parent directory: ––– block: ../common/setup –––", - "Multiple levels: ––– block: shared/auth/login –––" - ] - }, - "block_syntax": { - "inclusion": "––– block: relative-path-to-block –––", - "examples": [ - "––– block: login ––– # login.recb in same directory", - "––– block: auth/admin-login ––– # auth/admin-login.recb", - "––– block: ../common/setup ––– # ../common/setup.recb", - "––– block: shared/database/connect ––– # shared/database/connect.recb" - ], - "important_notes": [ - "Do not include .recb extension in block statement", - "Use forward slashes (/) for path separators on all platforms", - "Paths are always relative, never absolute", - "Block files must exist at the specified relative path" - ] - }, - "creating_blocks": { - "step1": "Create a .recb file with reusable test sequence", - "step2": "Use same format as .rec files (input/output sections)", - "step3": "Place file relative to where it will be used", - "step4": "Include in .rec files using ––– block: path –––", - "example_block_file": { - "filename": "database-connect.recb", - "content": [ - "––– comment –––", - "Reusable database connection sequence", - "––– input –––", - "mysql -h localhost -u testuser -p", - "––– output –––", - "Enter password:", - "––– input –––", - "testpass123", - "––– output –––", - "Welcome to the MySQL monitor.", - "––– input –––", - "USE testdb;", - "––– output –––", - "Database changed." - ] - } - }, - "using_blocks": { - "in_main_test": [ - "––– comment –––", - "Main test using database connection block", - "––– block: database-connect –––", - "––– input –––", - "SELECT COUNT(*) FROM users;", - "––– output –––", - "%{NUMBER}", - "––– input –––", - "EXIT;", - "––– output –––", - "Bye" - ], - "multiple_blocks": [ - "––– comment –––", - "Test using multiple blocks", - "––– block: setup/environment –––", - "––– block: auth/login –––", - "––– input –––", - "echo 'Custom command after blocks'", - "––– output –––", - "Custom command after blocks", - "––– block: cleanup/teardown –––" - ] - }, - "nested_blocks": { - "description": "Blocks can include other blocks, creating hierarchical test structures", - "example_structure": [ - "tests/", - "β”œβ”€β”€ full-integration-test.rec", - "β”œβ”€β”€ full-setup.recb # Includes multiple setup blocks", - "β”œβ”€β”€ auth/", - "β”‚ β”œβ”€β”€ login.recb # Basic login", - "β”‚ └── permissions.recb # Permission setup", - "└── database/", - " β”œβ”€β”€ connect.recb # Database connection", - " └── schema.recb # Schema setup" - ], - "full_setup_block": [ - "––– comment –––", - "full-setup.recb - Complete environment setup", - "––– block: auth/login –––", - "––– block: auth/permissions –––", - "––– block: database/connect –––", - "––– block: database/schema –––", - "––– input –––", - "echo 'Environment ready'", - "––– output –––", - "Environment ready" - ], - "main_test_using_nested": [ - "––– comment –––", - "full-integration-test.rec - Uses nested blocks", - "––– block: full-setup –––", - "––– input –––", - "run-integration-tests.sh", - "––– output –––", - "All tests passed: %{NUMBER} tests" - ] - }, - "best_practices": [ - "Keep blocks focused on single responsibilities (login, setup, cleanup)", - "Use descriptive names for block files (database-connect.recb, not db.recb)", - "Organize blocks in logical directory structures", - "Document block purposes with comment sections", - "Test blocks independently before using in main tests", - "Avoid deep nesting of blocks (2-3 levels maximum)", - "Use relative paths consistently across your test suite" - ], - "common_patterns": { - "authentication": "––– block: auth/login –––", - "environment_setup": "––– block: setup/environment –––", - "database_operations": "––– block: database/connect –––", - "cleanup": "––– block: cleanup/teardown –––", - "service_startup": "––– block: services/start-all –––" - }, - "troubleshooting": { - "block_not_found": { - "error": "Block file not found", - "causes": [ - "Incorrect relative path", - "Missing .recb file", - "Wrong directory structure" - ], - "solutions": [ - "Verify .recb file exists at specified path", - "Check path is relative to .rec file location", - "Use forward slashes for path separators" - ] - }, - "circular_dependency": { - "error": "Circular dependency detected", - "cause": "Block A includes Block B, which includes Block A", - "solution": "Restructure blocks to avoid circular references" - }, - "path_issues": { - "common_mistakes": [ - "Using absolute paths instead of relative", - "Including .recb extension in block statement", - "Using backslashes on Windows", - "Incorrect relative path calculation" - ], - "correct_examples": [ - "βœ“ ––– block: login –––", - "βœ“ ––– block: auth/admin –––", - "βœ“ ––– block: ../shared/setup –––", - "βœ— ––– block: login.recb –––", - "βœ— ––– block: /absolute/path/login –––", - "βœ— ––– block: auth\\\\admin –––" - ] - } - } - } - }), - "workflow" => json!({ - "topic": "CLT Testing Workflow", - "description": "Step-by-step process for creating and maintaining CLT tests", - "content": { - "initial_recording": { - "step1": "Start recording: clt record ubuntu:20.04", - "step2": "Execute your commands interactively", - "step3": "Exit with Ctrl+D to save the .rec file", - "step4": "Note the saved file path for later use" - }, - "test_execution": { - "step1": "Run test: clt test -t mytest.rec -d ubuntu:20.04", - "step2": "Check exit code: 0 = success, 1 = failure", - "step3": "Review any error output for mismatches" - }, - "handling_failures": { - "step1": "Identify dynamic content causing failures", - "step2": "Use refine_output tool to get pattern suggestions", - "step3": "Edit .rec file to replace dynamic content with patterns", - "step4": "Re-run test to verify fixes" - }, - "maintenance": { - "step1": "Regularly run tests to catch regressions", - "step2": "Update patterns when output formats change", - "step3": "Use blocks (.recb files) for common sequences", - "step4": "Document test purposes with comment sections" - }, - "best_practices": [ - "Keep tests focused on specific functionality", - "Use descriptive names for test files", - "Group related tests in directories", - "Document complex patterns with comments", - "Test both success and failure scenarios" - ] - } - }), - "examples" => json!({ - "topic": "CLT Practical Examples", - "description": "Real-world examples of CLT test files and usage patterns", - "content": { - "basic_command_test": { - "description": "Testing a simple echo command", - "rec_file": [ - "––– comment –––", - "Basic echo test", - "––– input –––", - "echo 'Hello CLT'", - "––– output –––", - "Hello CLT" - ] - }, - "dynamic_content_test": { - "description": "Testing command with dynamic output", - "rec_file": [ - "––– comment –––", - "Test with current date", - "––– input –––", - "date +%Y-%m-%d", - "––– output –––", - "%{DATE}" - ] - }, - "application_startup_test": { - "description": "Testing application startup with version and PID", - "rec_file": [ - "––– comment –––", - "Application startup test", - "––– input –––", - "./myapp --version", - "––– output –––", - "MyApp version %{SEMVER}", - "––– input –––", - "./myapp start &", - "––– output –––", - "Starting MyApp...", - "Process ID: %{NUMBER}", - "Listening on %{IPADDR}:8080" - ] - }, - "file_operations_test": { - "description": "Testing file creation and listing", - "rec_file": [ - "––– input –––", - "touch testfile.txt", - "––– output –––", - "", - "––– input –––", - "ls -la testfile.txt", - "––– output –––", - "#!/-rw-r--r--\\s+1\\s+\\w+\\s+\\w+\\s+0\\s+%{DATE}\\s+%{TIME}\\s+testfile\\.txt/!#" - ] - }, - "using_blocks": { - "description": "Reusable test blocks for common operations", - "block_file_rules": [ - "Block files use .recb extension", - "Must be placed relative to the .rec file that includes them", - "Can be in same directory or subdirectories", - "Block files use same format as .rec files" - ], - "login_block_file": { - "filename": "login-sequence.recb", - "location": "Same directory as main test file", - "content": [ - "––– comment –––", - "Reusable login sequence", - "––– input –––", - "mysql -u root -p", - "––– output –––", - "Enter password:", - "––– input –––", - "password123", - "––– output –––", - "Welcome to the MySQL monitor." - ] - }, - "main_test_using_block": { - "filename": "database-test.rec", - "content": [ - "––– comment –––", - "Database test using login block", - "––– block: login-sequence –––", - "––– input –––", - "SHOW DATABASES;", - "––– output –––", - "#!/\\+.*\\+/!#", - "#!/\\|.*Database.*\\|/!#", - "#!/\\+.*\\+/!#" - ] - }, - "nested_blocks_example": { - "description": "Blocks can include other blocks", - "structure": [ - "tests/", - "β”œβ”€β”€ full-test.rec", - "β”œβ”€β”€ setup.recb # Includes login.recb", - "└── auth/", - " └── login.recb # Basic login" - ], - "setup_block": [ - "––– comment –––", - "setup.recb - Full setup including login", - "––– block: auth/login –––", - "––– input –––", - "use testdb;", - "––– output –––", - "Database changed." - ], - "main_test": [ - "––– comment –––", - "full-test.rec - Uses nested blocks", - "––– block: setup –––", - "––– input –––", - "SELECT COUNT(*) FROM users;", - "––– output –––", - "%{NUMBER}" - ] - } - } - } - }), - "troubleshooting" => json!({ - "topic": "CLT Troubleshooting Guide", - "description": "Common issues and solutions when working with CLT", - "content": { - "test_failures": { - "symptom": "Test fails with output mismatch", - "causes": [ - "Dynamic content (timestamps, IDs) in output", - "Whitespace differences", - "Different environment variables", - "Changed application behavior" - ], - "solutions": [ - "Use refine_output tool to identify patterns needed", - "Add patterns for dynamic content (%{DATE}, %{NUMBER}, etc.)", - "Check for trailing whitespace or newlines", - "Verify Docker image and environment consistency" - ] - }, - "pattern_issues": { - "symptom": "Patterns not matching as expected", - "causes": [ - "Incorrect pattern syntax", - "Pattern too restrictive or too broad", - "Special characters not escaped in regex" - ], - "solutions": [ - "Use test_match tool to verify pattern behavior", - "Test patterns with simple examples first", - "Escape special regex characters with backslashes", - "Use named patterns when available instead of custom regex" - ] - }, - "docker_issues": { - "symptom": "Cannot run tests, Docker errors", - "causes": [ - "Docker daemon not running", - "Image not available locally", - "Permission issues" - ], - "solutions": [ - "Start Docker daemon", - "Pull required image: docker pull ubuntu:20.04", - "Check Docker permissions for current user" - ] - }, - "file_format_issues": { - "symptom": "CLT cannot parse .rec file", - "causes": [ - "Using regular hyphens (-) instead of en dashes (–)", - "Malformed section markers", - "Missing newlines" - ], - "solutions": [ - "Use en dashes (–) in section markers: ––– input –––", - "Ensure section markers are on their own lines", - "Check file encoding (should be UTF-8)" - ] - }, - "debugging_tips": [ - "Use test_match tool to isolate pattern matching issues", - "Start with simple tests and gradually add complexity", - "Use comment sections to document test intentions", - "Check .rep files to see actual vs expected output", - "Verify patterns work with refine_output before using in tests" - ] - } - }), - "structured_tests" => json!({ - "topic": "Structured Test Format", - "description": "AI-friendly JSON format for creating and modifying CLT tests", - "content": { - "overview": "The structured test format provides a JSON representation of CLT tests that's easier for AI to work with than the raw .rec format", - "new_tools": { - "get_patterns": { - "purpose": "Get all available patterns for the current project", - "description": "Returns patterns from both system (.clt/patterns in CLT binary directory) and project (.clt/patterns in current directory)", - "usage": "Use to see what patterns are available for dynamic content matching" - }, - "read_test": { - "purpose": "Convert .rec file to structured JSON format", - "description": "Parses CLT .rec files and converts them to AI-friendly JSON with nested block resolution", - "usage": "Use to analyze existing tests or prepare them for modification" - }, - "write_test": { - "purpose": "Convert structured JSON to .rec file", - "description": "Takes JSON test structure and writes proper .rec file with CLT syntax. Automatically creates parent directories if needed.", - "usage": "Use to create new tests or save modified tests after working with JSON structure" - } - }, - "json_structure": { - "root": { - "description": "Optional description text for the test file (appears before statements)", - "steps": "Array of test steps to execute" - }, - "step_object": { - "type": "Step type: 'input', 'output', 'comment', or 'block'", - "args": "Array of arguments (checker names for output, block paths for block)", - "content": "Step content (commands, expected output, comment text, null for blocks)", - "steps": "Nested steps array (only for block types with resolved content)" - } - }, - "workflow": [ - "1. Use 'read_test' to convert existing .rec file to JSON", - "2. Modify the JSON structure as needed", - "3. Use 'write_test' to save the JSON back to .rec format", - "4. Use 'run_test' to execute the .rec file", - "5. Use 'get_patterns' to see available patterns for dynamic content" - ], - "advantages": [ - "No need to learn CLT's en-dash syntax", - "Easy programmatic generation and modification", - "Structured representation of nested blocks", - "Full compatibility with existing CLT infrastructure" - ], - "examples": { - "simple_test": { - "description": "A simple test with description", - "steps": [ - { - "type": "input", - "args": [], - "content": "echo 'Hello World'" - }, - { - "type": "output", - "args": [], - "content": "Hello World" - } - ] - }, - "with_checker": { - "steps": [ - { - "type": "output", - "args": ["custom-checker"], - "content": "Expected output" - } - ] - }, - "with_block": { - "steps": [ - { - "type": "block", - "args": ["setup/database"], - "content": null, - "steps": [ - { - "type": "input", - "args": [], - "content": "setup database" - } - ] - } - ] - } - } - } - }), - _ => json!({ - "error": "Unknown help topic", - "available_topics": ["overview", "test_format", "patterns", "blocks", "workflow", "examples", "troubleshooting", "structured_tests"], - "usage": "Use clt_help tool with one of the available topics to get detailed information" - }), - } - } - - /// Helper function to create a line-based diff similar to git diff format - /// This makes the output much more AI-friendly than character-level mismatches - fn create_line_diff( - &self, - expected: &str, - actual: &str, - pattern_matcher: &cmp::PatternMatcher, - ) -> Vec { - let expected_lines: Vec<&str> = expected.lines().collect(); - let actual_lines: Vec<&str> = actual.lines().collect(); - let mut diff_lines = Vec::new(); - - // Check if we have any differences at all - let has_any_diff = expected_lines.len() != actual_lines.len() - || expected_lines - .iter() - .zip(actual_lines.iter()) - .any(|(exp, act)| pattern_matcher.has_diff(exp.to_string(), act.to_string())); - - if !has_any_diff { - return diff_lines; // No differences - } - - // Add diff header - diff_lines.push("--- expected".to_string()); - diff_lines.push("+++ actual".to_string()); - - let max_lines = expected_lines.len().max(actual_lines.len()); - - for i in 0..max_lines { - match (expected_lines.get(i), actual_lines.get(i)) { - (Some(exp_line), Some(act_line)) => { - // Both lines exist - check if they differ - if pattern_matcher.has_diff(exp_line.to_string(), act_line.to_string()) { - diff_lines.push(format!("-{}", exp_line)); - diff_lines.push(format!("+{}", act_line)); - } else { - // Lines match (considering patterns) - show as context - diff_lines.push(format!(" {}", exp_line)); - } - } - (Some(exp_line), None) => { - // Line only in expected (deletion) - diff_lines.push(format!("-{}", exp_line)); - } - (None, Some(act_line)) => { - // Line only in actual (addition) - diff_lines.push(format!("+{}", act_line)); - } - (None, None) => break, // Should not happen given max_lines logic - } - } - - diff_lines - } - - /// Generate a clear, human-readable summary of what differs - fn create_diff_summary( - &self, - expected: &str, - actual: &str, - pattern_matcher: &cmp::PatternMatcher, - ) -> String { - let expected_lines: Vec<&str> = expected.lines().collect(); - let actual_lines: Vec<&str> = actual.lines().collect(); - - let mut mismatched_lines = 0; - let mut extra_lines_in_actual = 0; - let mut missing_lines_in_actual = 0; - - let max_lines = expected_lines.len().max(actual_lines.len()); - - for i in 0..max_lines { - match (expected_lines.get(i), actual_lines.get(i)) { - (Some(exp_line), Some(act_line)) => { - if pattern_matcher.has_diff(exp_line.to_string(), act_line.to_string()) { - mismatched_lines += 1; - } - } - (Some(_), None) => missing_lines_in_actual += 1, - (None, Some(_)) => extra_lines_in_actual += 1, - (None, None) => break, - } - } - - let mut summary_parts = Vec::new(); - - if mismatched_lines > 0 { - summary_parts.push(format!( - "{} line(s) with content differences", - mismatched_lines - )); - } - if missing_lines_in_actual > 0 { - summary_parts.push(format!( - "{} line(s) missing in actual output", - missing_lines_in_actual - )); - } - if extra_lines_in_actual > 0 { - summary_parts.push(format!( - "{} extra line(s) in actual output", - extra_lines_in_actual - )); - } - - if summary_parts.is_empty() { - "Output matches expected pattern".to_string() - } else { - format!("Output differences found: {}", summary_parts.join(", ")) - } - } - - /// Execute test_match tool with improved diff-based output - /// - /// This function compares expected vs actual strings using CLT's pattern matching - /// and returns a clear, AI-friendly diff format instead of complex character-level mismatches. - /// - /// Returns: - /// - matches: boolean indicating if strings match (considering patterns) - /// - diff_lines: git-style diff showing line-by-line differences - /// - summary: human-readable explanation of differences - fn execute_test_match(&self, expected: &str, actual: &str) -> Result { - // Use the same pattern loading logic as get_patterns tool - let patterns = structured_test::get_patterns(self.clt_binary_path.as_deref())?; - - // Create a temporary patterns file for the cmp crate - let temp_patterns_file = if !patterns.is_empty() { - let temp_file = std::env::temp_dir().join("clt_patterns_temp"); - let mut pattern_lines = Vec::new(); - for (name, regex) in &patterns { - pattern_lines.push(format!("{} {}", name, regex)); - } - fs::write(&temp_file, pattern_lines.join("\n"))?; - Some(temp_file.to_string_lossy().to_string()) - } else { - None - }; - - let pattern_matcher = cmp::PatternMatcher::new(temp_patterns_file) - .map_err(|e| anyhow::anyhow!("Failed to create pattern matcher: {}", e))?; - - let has_diff = pattern_matcher.has_diff(expected.to_string(), actual.to_string()); - - let (diff_lines, summary) = if has_diff { - let diff = self.create_line_diff(expected, actual, &pattern_matcher); - let summary = self.create_diff_summary(expected, actual, &pattern_matcher); - (diff, summary) - } else { - (Vec::new(), "Output matches expected pattern".to_string()) - }; - - Ok(TestMatchOutput { - matches: !has_diff, - diff_lines, - summary, - }) - } - - /// Resolve test file path to absolute path based on working directory - fn resolve_test_path(&self, test_file: &str) -> Result { - let test_path = std::path::Path::new(test_file); - - if test_path.is_absolute() { - // Already absolute, validate it exists or can be created - let canonical_path = match std::fs::canonicalize(test_path) { - Ok(path) => path, - Err(_) => { - // If canonicalize fails, check if parent directory exists - if let Some(parent) = test_path.parent() { - if !parent.exists() { - return Err(anyhow::anyhow!( - "Parent directory does not exist for test file: {}", - test_path.display() - )); - } - } - test_path.to_path_buf() - } - }; - Ok(canonical_path.to_string_lossy().to_string()) - } else { - // Resolve relative to working directory - let workdir = std::path::Path::new(&self.workdir_path); - - // Ensure working directory exists - if !workdir.exists() { - return Err(anyhow::anyhow!( - "Working directory does not exist: {}", - workdir.display() - )); - } - - let resolved = workdir.join(test_path); - - // For relative paths, we need to ensure the parent directory exists for write operations - if let Some(parent) = resolved.parent() { - if !parent.exists() { - // This is not necessarily an error for read operations, but we should note it - // The actual file operations will handle this appropriately - } - } - - Ok(resolved.to_string_lossy().to_string()) - } - } -} - #[tokio::main] async fn main() -> Result<()> { let args = Args::parse(); @@ -2088,7 +62,8 @@ mod tests { use super::*; use std::io::Write; use tempfile::NamedTempFile; - use tokio_test; + use serde_json::json; + use crate::mcp_protocol::*; fn create_fake_clt_binary() -> NamedTempFile { let mut temp_file = NamedTempFile::new().unwrap(); @@ -2121,152 +96,6 @@ mod tests { .contains("CLT binary not found")); } - #[test] - fn test_handle_initialize() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let response = server.handle_initialize(Some(json!(1)), None); - - assert_eq!(response.jsonrpc, "2.0"); - assert_eq!(response.id, Some(json!(1))); - assert!(response.result.is_some()); - assert!(response.error.is_none()); - - let result = response.result.unwrap(); - assert_eq!(result["protocolVersion"], "2024-11-05"); - assert_eq!(result["serverInfo"]["name"], "CLT MCP Server"); - } - - #[test] - fn test_handle_tools_list() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let response = server.handle_tools_list(Some(json!(2))); - - assert_eq!(response.jsonrpc, "2.0"); - assert_eq!(response.id, Some(json!(2))); - assert!(response.result.is_some()); - - let result = response.result.unwrap(); - let tools = result["tools"].as_array().unwrap(); - assert_eq!(tools.len(), 9); - - let tool_names: Vec<&str> = tools.iter().map(|t| t["name"].as_str().unwrap()).collect(); - assert!(tool_names.contains(&"run_test")); - assert!(tool_names.contains(&"refine_output")); - assert!(tool_names.contains(&"test_match")); - assert!(tool_names.contains(&"clt_help")); - assert!(tool_names.contains(&"get_patterns")); - assert!(tool_names.contains(&"read_test")); - assert!(tool_names.contains(&"write_test")); - assert!(tool_names.contains(&"update_test")); - assert!(tool_names.contains(&"append_test")); - } - - #[tokio::test] - async fn test_execute_test_match_tool() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let mut server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let args = json!({ - "expected": "Hello World", - "actual": "Hello World" - }); - - let result = server.execute_tool("test_match", Some(args)).await.unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); - let test_result = &parsed["result"]; - - assert!(test_result["matches"].as_bool().unwrap()); - assert!(test_result["diff_lines"].as_array().unwrap().is_empty()); - assert_eq!(test_result["summary"], "Output matches expected pattern"); - } - - #[tokio::test] - async fn test_execute_test_match_tool_with_mismatch() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let mut server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let args = json!({ - "expected": "Hello World", - "actual": "Hello Universe" - }); - - let result = server.execute_tool("test_match", Some(args)).await.unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); - let test_result = &parsed["result"]; - - assert!(!test_result["matches"].as_bool().unwrap()); - assert!(!test_result["diff_lines"].as_array().unwrap().is_empty()); - assert!(test_result["summary"] - .as_str() - .unwrap() - .contains("differences")); - } - - #[tokio::test] - async fn test_execute_refine_output_tool() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let mut server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let args = json!({ - "expected": "Version: 1.2.3", - "actual": "Version: 2.4.6" - }); - - let result = server - .execute_tool("refine_output", Some(args)) - .await - .unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); - let refine_result = &parsed["result"]; - - // Should provide some suggestions or patterns - assert!( - !refine_result["suggestions"].as_array().unwrap().is_empty() - || !refine_result["patterns_applied"] - .as_array() - .unwrap() - .is_empty() - ); - } - - #[tokio::test] - async fn test_execute_run_test_tool_with_nonexistent_file() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let mut server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let args = json!({ - "test_file": "/nonexistent/test.rec" - }); - - let result = server.execute_tool("run_test", Some(args)).await.unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); - let test_result = &parsed["result"]; - - assert!(!test_result["success"].as_bool().unwrap()); - assert_eq!(test_result["errors"].as_array().unwrap().len(), 1); - assert_eq!(test_result["errors"][0]["command"], "file_check"); - assert!(test_result["errors"][0]["actual"] - .as_str() - .unwrap() - .contains("File not found")); - } - #[tokio::test] async fn test_execute_unknown_tool() { let temp_file = create_fake_clt_binary(); @@ -2280,45 +109,6 @@ mod tests { assert!(result.unwrap_err().to_string().contains("Unknown tool")); } - #[tokio::test] - async fn test_execute_clt_help_tool() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let mut server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let args = json!({ - "topic": "overview" - }); - - let result = server.execute_tool("clt_help", Some(args)).await.unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); - - assert_eq!(parsed["topic"], "CLT Overview"); - assert!(parsed["content"]["what_is_clt"].is_string()); - assert!(parsed["content"]["key_features"].is_array()); - } - - #[test] - fn test_handle_request_unknown_method() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); - - let mut server = McpServer::new("test-image".to_string(), Some(temp_path), None).unwrap(); - - let request = McpRequest { - jsonrpc: "2.0".to_string(), - id: Some(json!(1)), - method: "unknown_method".to_string(), - params: None, - }; - - let response = tokio_test::block_on(server.handle_request(request)); - - assert!(response.error.is_some()); - assert_eq!(response.error.unwrap().code, -32601); - } - #[test] fn test_mcp_response_constructors() { let success_response = McpResponse::success(Some(json!(1)), json!({"test": "data"})); @@ -2335,57 +125,123 @@ mod tests { assert!(error_response.error.is_some()); assert_eq!(error_response.error.unwrap().code, -32602); } +} - #[tokio::test] - async fn test_run_test_with_custom_docker_image() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); +#[cfg(test)] +mod test_string_format { + use super::*; + use serde_json::json; + use crate::mcp_protocol; - let mut server = - McpServer::new("default-image".to_string(), Some(temp_path), None).unwrap(); + #[test] + fn test_test_structure_object_format() { + let json_input = json!({ + "test_file": "test.rec", + "test_structure": { + "description": "Test description", + "steps": [ + { + "type": "input", + "args": [], + "content": "echo hello" + } + ] + } + }); - // Test with custom docker_image parameter - let args = json!({ - "test_file": "/nonexistent/test.rec", - "docker_image": "custom-image" + let result: mcp_protocol::WriteTestInputWithWarning = + serde_json::from_value(json_input).unwrap(); + assert_eq!(result.test_file, "test.rec"); + assert!(!result.test_structure.was_string); + assert_eq!( + result.test_structure.structure.description, + Some("Test description".to_string()) + ); + } + + #[test] + fn test_test_structure_string_format() { + let test_structure_json = json!({ + "description": "Test description", + "steps": [ + { + "type": "input", + "args": [], + "content": "echo hello" + } + ] }); - let result = server.execute_tool("run_test", Some(args)).await.unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); + let json_input = json!({ + "test_file": "test.rec", + "test_structure": serde_json::to_string(&test_structure_json).unwrap() + }); - // Verify the custom docker image is used - assert_eq!(parsed["docker_image"], "custom-image"); - assert!(parsed["help"]["docker_image_info"] - .as_str() - .unwrap() - .contains("custom-image")); - assert!(parsed["help"]["docker_image_info"] - .as_str() - .unwrap() - .contains("default: default-image")); + let result: mcp_protocol::WriteTestInputWithWarning = + serde_json::from_value(json_input).unwrap(); + assert_eq!(result.test_file, "test.rec"); + assert!(result.test_structure.was_string); // Should be true for string format + assert_eq!( + result.test_structure.structure.description, + Some("Test description".to_string()) + ); } - #[tokio::test] - async fn test_run_test_with_default_docker_image() { - let temp_file = create_fake_clt_binary(); - let temp_path = temp_file.path().to_string_lossy().to_string(); + #[test] + fn test_test_replace_input_string_format() { + let test_structure_json = json!({ + "description": "Test description", + "steps": [ + { + "type": "input", + "args": [], + "content": "echo hello" + } + ] + }); - let mut server = - McpServer::new("default-image".to_string(), Some(temp_path), None).unwrap(); + let json_input = json!({ + "test_file": "test.rec", + "old_test_structure": serde_json::to_string(&test_structure_json).unwrap(), + "new_test_structure": test_structure_json + }); + + let result: mcp_protocol::TestReplaceInputWithWarning = + serde_json::from_value(json_input).unwrap(); + assert_eq!(result.test_file, "test.rec"); + assert!(result.old_test_structure.was_string); // String format + assert!(!result.new_test_structure.was_string); // Object format + } - // Test without docker_image parameter (should use default) - let args = json!({ - "test_file": "/nonexistent/test.rec" + #[test] + fn test_invalid_json_string() { + let json_input = json!({ + "test_file": "test.rec", + "test_structure": "invalid json string" }); - let result = server.execute_tool("run_test", Some(args)).await.unwrap(); - let parsed: Value = serde_json::from_str(&result).unwrap(); + let result: Result = + serde_json::from_value(json_input); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Invalid JSON string")); + } + + #[test] + fn test_invalid_type() { + let json_input = json!({ + "test_file": "test.rec", + "test_structure": 123 // Invalid type + }); - // Verify the default docker image is used - assert_eq!(parsed["docker_image"], "default-image"); - assert!(parsed["help"]["docker_image_info"] - .as_str() - .unwrap() - .contains("default-image")); + let result: Result = + serde_json::from_value(json_input); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("must be an object or a JSON string")); } -} +} \ No newline at end of file diff --git a/mcp/src/mcp_protocol.rs b/mcp/src/mcp_protocol.rs index 8217dc0..ba828a2 100644 --- a/mcp/src/mcp_protocol.rs +++ b/mcp/src/mcp_protocol.rs @@ -1,6 +1,52 @@ -use serde::{Deserialize, Serialize}; +pub use parser::{TestStep, TestStructure}; +use serde::{Deserialize, Deserializer, Serialize}; use std::collections::HashMap; +/// Wrapper for TestStructure that tracks if it was parsed from a string +#[derive(Debug)] +pub struct TestStructureWithWarning { + pub structure: TestStructure, + pub was_string: bool, +} + +impl<'de> Deserialize<'de> for TestStructureWithWarning { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + use serde::de::Error; + use serde_json::Value; + + let value = Value::deserialize(deserializer)?; + + match value { + // Try to deserialize as TestStructure object first + Value::Object(_) => { + let structure = TestStructure::deserialize(value).map_err(D::Error::custom)?; + Ok(TestStructureWithWarning { + structure, + was_string: false, + }) + } + // If it's a string, try to parse it as JSON + Value::String(s) => { + let parsed_value: Value = serde_json::from_str(&s).map_err(|e| { + D::Error::custom(format!("Invalid JSON string in test_structure: {}", e)) + })?; + let structure = + TestStructure::deserialize(parsed_value).map_err(D::Error::custom)?; + Ok(TestStructureWithWarning { + structure, + was_string: true, + }) + } + _ => Err(D::Error::custom( + "test_structure must be an object or a JSON string", + )), + } + } +} + /// MCP JSON-RPC 2.0 Request #[derive(Debug, Deserialize)] #[allow(dead_code)] @@ -177,21 +223,23 @@ pub struct ReadTestOutput { pub steps: Vec, } +/// Version of WriteTestInput that tracks if test_structure was parsed from string #[derive(Debug, Deserialize)] -pub struct WriteTestInput { +pub struct WriteTestInputWithWarning { pub test_file: String, - pub test_structure: TestStructure, + pub test_structure: TestStructureWithWarning, } #[derive(Debug, Serialize)] pub struct WriteTestOutput { pub success: bool, } +/// Version of TestReplaceInput that tracks if test_structure was parsed from string #[derive(Debug, Deserialize)] -pub struct TestReplaceInput { +pub struct TestReplaceInputWithWarning { pub test_file: String, - pub old_test_structure: TestStructure, - pub new_test_structure: TestStructure, + pub old_test_structure: TestStructureWithWarning, + pub new_test_structure: TestStructureWithWarning, } #[derive(Debug, Serialize)] @@ -200,10 +248,11 @@ pub struct TestReplaceOutput { pub message: String, } +/// Version of TestAppendInput that tracks if test_structure was parsed from string #[derive(Debug, Deserialize)] -pub struct TestAppendInput { +pub struct TestAppendInputWithWarning { pub test_file: String, - pub test_structure: TestStructure, + pub test_structure: TestStructureWithWarning, } #[derive(Debug, Serialize)] @@ -218,23 +267,7 @@ pub struct GetPatternsOutput { pub patterns: std::collections::HashMap, } -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct TestStructure { - #[serde(skip_serializing_if = "Option::is_none")] - pub description: Option, - pub steps: Vec, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct TestStep { - #[serde(rename = "type")] - pub step_type: String, - pub args: Vec, - #[serde(skip_serializing_if = "Option::is_none")] - pub content: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub steps: Option>, // For nested blocks -} +// TestStructure and TestStep are now imported from parser crate impl McpResponse { pub fn success(id: Option, result: serde_json::Value) -> Self { diff --git a/mcp/src/server/config.rs b/mcp/src/server/config.rs new file mode 100644 index 0000000..7f67c03 --- /dev/null +++ b/mcp/src/server/config.rs @@ -0,0 +1,56 @@ +use anyhow::Result; + +/// Server configuration extracted from command line arguments +#[derive(Debug, Clone)] +pub struct ServerConfig { + pub docker_image: String, + pub clt_binary_path: Option, + pub workdir_path: String, +} + +impl ServerConfig { + pub fn new( + docker_image: String, + clt_binary_path: Option, + workdir_path: Option, + ) -> Result { + // Resolve working directory - use provided path or current directory + let workdir_path = match workdir_path { + Some(path) => { + let path_buf = std::path::PathBuf::from(&path); + if !path_buf.exists() { + return Err(anyhow::anyhow!( + "Working directory does not exist: {}", + path + )); + } + if !path_buf.is_dir() { + return Err(anyhow::anyhow!( + "Working directory path is not a directory: {}", + path + )); + } + // Convert to absolute path + std::fs::canonicalize(path_buf) + .map_err(|e| { + anyhow::anyhow!("Failed to resolve working directory path: {}", e) + })? + .to_string_lossy() + .to_string() + } + None => { + // Use current working directory + std::env::current_dir() + .map_err(|e| anyhow::anyhow!("Failed to get current working directory: {}", e))? + .to_string_lossy() + .to_string() + } + }; + + Ok(Self { + docker_image, + clt_binary_path, + workdir_path, + }) + } +} \ No newline at end of file diff --git a/mcp/src/server/handlers.rs b/mcp/src/server/handlers.rs new file mode 100644 index 0000000..6979cfc --- /dev/null +++ b/mcp/src/server/handlers.rs @@ -0,0 +1,465 @@ +use anyhow::Result; +use serde::Deserialize; +use serde_json::{json, Value}; +use std::fs; + +use crate::mcp_protocol::{self, *}; +use crate::pattern_refiner::PatternRefiner; +use crate::test_runner::TestRunner; +use crate::{cmp, parser}; + +use super::config::ServerConfig; +use super::help::HelpProvider; +use super::utils::{DiffUtils, PathUtils}; + +/// Individual tool handlers +#[derive(Debug)] +pub struct ToolHandlers { + pub config: ServerConfig, + pub test_runner: TestRunner, + pub pattern_refiner: PatternRefiner, +} + +impl ToolHandlers { + pub fn new(config: ServerConfig) -> Result { + let test_runner = TestRunner::new( + config.docker_image.clone(), + config.clt_binary_path.clone(), + config.workdir_path.clone(), + )?; + let pattern_refiner = PatternRefiner::new()?; + + Ok(Self { + config, + test_runner, + pattern_refiner, + }) + } + + pub async fn handle_run_test(&mut self, arguments: Option) -> Result { + let input: RunTestInput = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + + // Safely resolve test path with proper error handling + let resolved_test_path = match PathUtils::resolve_test_path(&self.config.workdir_path, &input.test_file) { + Ok(path) => path, + Err(e) => { + // Return a structured error response instead of crashing + let error_output = json!({ + "tool": "run_test", + "status": "ERROR", + "test_file": input.test_file, + "docker_image": input.docker_image.as_deref().unwrap_or(&self.config.docker_image), + "result": { + "success": false, + "errors": [{ + "command": "path_resolution", + "expected": "Valid test file path", + "actual": format!("Path error: {}", e), + "step": 0 + }], + "summary": format!("Path error: {}", e) + }, + "working_directory": self.config.workdir_path + }); + return Ok(serde_json::to_string_pretty(&error_output)?); + } + }; + + // Safely execute test with proper error handling + let output = match self + .test_runner + .run_test(&resolved_test_path, input.docker_image.as_deref()) + { + Ok(result) => result, + Err(e) => { + // Convert test runner errors to structured output + let error_output = json!({ + "tool": "run_test", + "status": "ERROR", + "test_file": input.test_file, + "docker_image": input.docker_image.as_deref().unwrap_or(&self.config.docker_image), + "result": { + "success": false, + "errors": [{ + "command": "test_execution", + "expected": "Successful test execution", + "actual": format!("Execution failed: {}", e), + "step": 0 + }], + "summary": format!("Execution failed: {}", e) + }, + "working_directory": self.config.workdir_path + }); + return Ok(serde_json::to_string_pretty(&error_output)?); + } + }; + + // Add helpful context to the output with better exit code information + let docker_image_used = input.docker_image.as_deref().unwrap_or(&self.config.docker_image); + + let test_status = if output.success { + "PASSED" + } else { + "FAILED" + }; + + let enhanced_output = json!({ + "tool": "run_test", + "status": test_status, + "test_file": input.test_file, + "docker_image": docker_image_used, + "result": output, + "exit_codes": { + "0": "Test passed - all commands executed successfully and outputs matched", + "1": "Test failed - commands ran but outputs didn't match expectations", + "2+": "System error - compilation, setup, validation, or crash occurred" + } + }); + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + + pub fn handle_refine_output(&mut self, arguments: Option) -> Result { + let input: RefineOutputInput = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + let output = self + .pattern_refiner + .refine_output(&input.expected, &input.actual)?; + + let enhanced_output = json!({ + "tool": "refine_output", + "result": output, + "usage": "Copy 'refined_output' and use as expected output in your .rec test file" + }); + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + + pub fn handle_test_match(&self, arguments: Option) -> Result { + let input: TestMatchInput = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + let output = self.execute_test_match(&input.expected, &input.actual)?; + + let enhanced_output = json!({ + "tool": "test_match", + "result": output + }); + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + + pub fn handle_clt_help(&self, arguments: Option) -> Result { + #[derive(Deserialize)] + struct HelpInput { + topic: String, + } + + let input: HelpInput = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + + let help_content = HelpProvider::get_help_content(&input.topic); + Ok(serde_json::to_string_pretty(&help_content)?) + } + + pub fn handle_get_patterns(&self, _arguments: Option) -> Result { + let patterns = parser::get_patterns(self.config.clt_binary_path.as_deref())?; + + let enhanced_output = json!({ + "tool": "get_patterns", + "patterns": patterns + }); + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + + pub fn handle_read_test(&self, arguments: Option) -> Result { + let input: mcp_protocol::ReadTestInput = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + + let test_structure = + parser::read_test_file(&PathUtils::resolve_test_path(&self.config.workdir_path, &input.test_file)?)?; + + let enhanced_output = json!({ + "tool": "read_test", + "test_file": input.test_file, + "result": test_structure + }); + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + + pub fn handle_write_test(&self, arguments: Option) -> Result { + let input: mcp_protocol::WriteTestInputWithWarning = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + + // Check if we need to add a warning about string parsing + let mut warnings = Vec::new(); + if input.test_structure.was_string { + warnings.push("test_structure was provided as a JSON string instead of an object. While this works, it's recommended to pass it as a direct JSON object for better performance and clarity.".to_string()); + } + + // Safely resolve test path with proper error handling + let resolved_test_path = match PathUtils::resolve_test_path(&self.config.workdir_path, &input.test_file) { + Ok(path) => path, + Err(e) => { + let mut error_output = json!({ + "tool": "write_test", + "test_file": input.test_file, + "success": false, + "error": format!("Path error: {}", e) + }); + + if !warnings.is_empty() { + error_output["warnings"] = json!(warnings); + } + + return Ok(serde_json::to_string_pretty(&error_output)?); + } + }; + + // Safely write test file with proper error handling + match parser::write_test_file(&resolved_test_path, &input.test_structure.structure) + { + Ok(()) => { + let mut enhanced_output = json!({ + "tool": "write_test", + "test_file": input.test_file, + "success": true + }); + + if !warnings.is_empty() { + enhanced_output["warnings"] = json!(warnings); + } + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + Err(e) => { + let mut error_output = json!({ + "tool": "write_test", + "test_file": input.test_file, + "success": false, + "error": format!("Write failed: {}", e) + }); + + if !warnings.is_empty() { + error_output["warnings"] = json!(warnings); + } + + Ok(serde_json::to_string_pretty(&error_output)?) + } + } + } + + pub fn handle_update_test(&self, arguments: Option) -> Result { + let input: mcp_protocol::TestReplaceInputWithWarning = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + + // Check if we need to add warnings about string parsing + let mut warnings = Vec::new(); + if input.old_test_structure.was_string { + warnings.push("old_test_structure was provided as a JSON string instead of an object. While this works, it's recommended to pass it as a direct JSON object for better performance and clarity.".to_string()); + } + if input.new_test_structure.was_string { + warnings.push("new_test_structure was provided as a JSON string instead of an object. While this works, it's recommended to pass it as a direct JSON object for better performance and clarity.".to_string()); + } + + // Safely resolve test path with proper error handling + let resolved_test_path = match PathUtils::resolve_test_path(&self.config.workdir_path, &input.test_file) { + Ok(path) => path, + Err(e) => { + let mut error_output = json!({ + "tool": "update_test", + "description": "Test structure update failed during path resolution", + "test_file": input.test_file, + "result": { + "success": false, + "message": format!("Path resolution failed: {}", e) + }, + "help": { + "error_type": "path_resolution", + "suggestion": "Check that the test file path is correct and accessible", + "working_directory": self.config.workdir_path + } + }); + + if !warnings.is_empty() { + error_output["warnings"] = json!(warnings); + } + + return Ok(serde_json::to_string_pretty(&error_output)?); + } + }; + + match parser::replace_test_structure( + &resolved_test_path, + &input.old_test_structure.structure, + &input.new_test_structure.structure, + ) { + Ok(()) => { + let mut enhanced_output = json!({ + "tool": "update_test", + "description": "Test structure replaced successfully", + "test_file": input.test_file, + "result": { + "success": true, + "message": "Old test structure found and replaced with new structure" + }, + "help": { + "next_steps": "Use 'run_test' to execute the modified test file", + "replacement_info": "The old test structure was found and replaced exactly once" + } + }); + + if !warnings.is_empty() { + enhanced_output["warnings"] = json!(warnings); + } + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + Err(e) => { + let mut enhanced_output = json!({ + "tool": "update_test", + "description": "Test structure replacement failed", + "test_file": input.test_file, + "result": { + "success": false, + "message": e.to_string() + }, + "help": { + "common_errors": { + "not_found": "Old test structure not found in file - check exact match of steps, content, and args", + "ambiguous": "Old test structure matches multiple locations - make it more specific", + "file_not_found": "Test file doesn't exist - check the path" + }, + "matching_rules": "Steps must match exactly: type, args, content, and nested steps (if any)" + } + }); + + if !warnings.is_empty() { + enhanced_output["warnings"] = json!(warnings); + } + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + } + } + + pub fn handle_append_test(&self, arguments: Option) -> Result { + let input: mcp_protocol::TestAppendInputWithWarning = serde_json::from_value( + arguments.ok_or_else(|| anyhow::anyhow!("Missing arguments"))?, + )?; + + // Check if we need to add a warning about string parsing + let mut warnings = Vec::new(); + if input.test_structure.was_string { + warnings.push("test_structure was provided as a JSON string instead of an object. While this works, it's recommended to pass it as a direct JSON object for better performance and clarity.".to_string()); + } + + match parser::append_test_structure( + &PathUtils::resolve_test_path(&self.config.workdir_path, &input.test_file)?, + &input.test_structure.structure, + ) { + Ok(steps_added) => { + let mut enhanced_output = json!({ + "tool": "append_test", + "description": "Test steps appended successfully", + "test_file": input.test_file, + "result": { + "success": true, + "message": format!("Successfully appended {} test steps to the file", steps_added), + "steps_added": steps_added + }, + "help": { + "next_steps": "Use 'run_test' to execute the updated test file", + "append_info": "New steps were added to the end of the existing test file" + } + }); + + if !warnings.is_empty() { + enhanced_output["warnings"] = json!(warnings); + } + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + Err(e) => { + let mut enhanced_output = json!({ + "tool": "append_test", + "description": "Test append operation failed", + "test_file": input.test_file, + "result": { + "success": false, + "message": e.to_string(), + "steps_added": 0 + }, + "help": { + "common_errors": { + "file_not_found": "Test file doesn't exist - check the path", + "permission_denied": "Cannot write to file - check file permissions", + "invalid_structure": "Test structure is invalid - check step format" + } + } + }); + + if !warnings.is_empty() { + enhanced_output["warnings"] = json!(warnings); + } + + Ok(serde_json::to_string_pretty(&enhanced_output)?) + } + } + } + + /// Execute test_match tool with improved diff-based output + /// + /// This function compares expected vs actual strings using CLT's pattern matching + /// and returns a clear, AI-friendly diff format instead of complex character-level mismatches. + /// + /// Returns: + /// - matches: boolean indicating if strings match (considering patterns) + /// - diff_lines: git-style diff showing line-by-line differences + /// - summary: human-readable explanation of differences + fn execute_test_match(&self, expected: &str, actual: &str) -> Result { + // Use the same pattern loading logic as get_patterns tool + let patterns = parser::get_patterns(self.config.clt_binary_path.as_deref())?; + + // Create a temporary patterns file for the cmp crate + let temp_patterns_file = if !patterns.is_empty() { + let temp_file = std::env::temp_dir().join("clt_patterns_temp"); + let mut pattern_lines = Vec::new(); + for (name, regex) in &patterns { + pattern_lines.push(format!("{} {}", name, regex)); + } + fs::write(&temp_file, pattern_lines.join("\n"))?; + Some(temp_file.to_string_lossy().to_string()) + } else { + None + }; + + let pattern_matcher = cmp::PatternMatcher::new(temp_patterns_file) + .map_err(|e| anyhow::anyhow!("Failed to create pattern matcher: {}", e))?; + + let has_diff = pattern_matcher.has_diff(expected.to_string(), actual.to_string()); + + let (diff_lines, summary) = if has_diff { + let diff = DiffUtils::create_line_diff(expected, actual, &pattern_matcher); + let summary = DiffUtils::create_diff_summary(expected, actual, &pattern_matcher); + (diff, summary) + } else { + (Vec::new(), "Output matches expected pattern".to_string()) + }; + + Ok(TestMatchOutput { + matches: !has_diff, + diff_lines, + summary, + }) + } +} \ No newline at end of file diff --git a/mcp/src/server/help.rs b/mcp/src/server/help.rs new file mode 100644 index 0000000..7b7a3ae --- /dev/null +++ b/mcp/src/server/help.rs @@ -0,0 +1,209 @@ +use serde_json::{json, Value}; + +/// Help content provider for CLT documentation +pub struct HelpProvider; + +impl HelpProvider { + pub fn get_help_content(topic: &str) -> Value { + match topic { + "overview" => Self::overview_help(), + "test_format" => Self::test_format_help(), + "patterns" => Self::patterns_help(), + "blocks" => Self::blocks_help(), + "workflow" => Self::workflow_help(), + "examples" => Self::examples_help(), + "troubleshooting" => Self::troubleshooting_help(), + "structured_tests" => Self::structured_tests_help(), + _ => Self::unknown_topic_help(), + } + } + + fn overview_help() -> Value { + json!({ + "topic": "CLT Overview", + "description": "CLT (Command Line Tester) is a testing framework for command-line applications", + "content": { + "what_is_clt": "CLT allows you to record interactive command sessions, save them as test files, and replay them to verify consistent behavior. All commands run inside Docker containers for reproducible environments.", + "key_features": [ + "Record interactive command sessions", + "Replay tests to verify behavior", + "Pattern matching for dynamic content (timestamps, IDs, etc.)", + "Docker container isolation", + "Structured error reporting" + ], + "typical_workflow": [ + "1. Record a test session: clt record ubuntu:20.04", + "2. Execute commands interactively (all recorded)", + "3. Exit with Ctrl+D to save the test file", + "4. Replay test: clt test -t mytest -d ubuntu:20.04", + "5. Refine patterns if dynamic content causes failures" + ], + "file_types": { + "test_files": "Test recording files with input/output sections", + "result_files": "Test replay results (generated during test execution)", + "block_files": "Reusable test blocks that can be included in test files" + } + } + }) + } + + fn test_format_help() -> Value { + json!({ + "topic": "Structured Test Format", + "description": "Complete guide to CLT's structured JSON test format for AI-friendly test creation", + "content": { + "overview": "CLT uses a structured JSON format that makes it easy for AI to create, read, and modify tests. This format abstracts away complex syntax and provides a clear, hierarchical representation of test steps.", + "basic_structure": { + "description": "A test consists of an optional description and an array of steps", + "schema": { + "description": "Optional text description of what the test does (appears at top of test file)", + "steps": "Array of test steps to execute in sequence" + }, + "minimal_example": { + "description": "Simple test with one command", + "steps": [ + { + "type": "input", + "args": [], + "content": "echo 'Hello World'" + }, + { + "type": "output", + "args": [], + "content": "Hello World" + } + ] + } + } + } + }) + } + + fn patterns_help() -> Value { + json!({ + "topic": "CLT Pattern Syntax", + "description": "How to handle dynamic content in test outputs using patterns", + "content": { + "why_patterns": "Command outputs often contain dynamic data (timestamps, process IDs, version numbers) that change between test runs. Patterns allow tests to match the structure while ignoring variable content.", + "named_patterns": { + "syntax": "%{PATTERN_NAME}", + "description": "Predefined patterns from .clt/patterns file", + "examples": { + "%{SEMVER}": "Semantic versions like 1.2.3, 10.0.1", + "%{IPADDR}": "IP addresses like 192.168.1.1, 10.0.0.1", + "%{DATE}": "Dates like 2023-12-25", + "%{TIME}": "Times like 14:30:22", + "%{NUMBER}": "Any number like 42, 1234", + "%{PATH}": "File paths like /usr/bin/app", + "%{YEAR}": "4-digit years like 2023" + } + } + } + }) + } + + fn blocks_help() -> Value { + json!({ + "topic": "CLT Reusable Blocks", + "description": "How to create and use reusable test blocks with .recb files", + "content": { + "what_are_blocks": "Blocks are reusable test sequences stored in .recb files that can be included in multiple .rec test files. They help avoid duplication and create modular test components.", + "key_concepts": { + "block_files": "Files with .recb extension containing reusable test sequences", + "relative_paths": "Block files must be located relative to the .rec file that includes them", + "nested_blocks": "Block files can include other blocks, creating hierarchical test structures", + "same_format": "Block files use the same format as .rec files (input/output sections)" + } + } + }) + } + + fn workflow_help() -> Value { + json!({ + "topic": "CLT Testing Workflow", + "description": "Step-by-step process for creating and maintaining CLT tests", + "content": { + "initial_recording": { + "step1": "Start recording: clt record ubuntu:20.04", + "step2": "Execute your commands interactively", + "step3": "Exit with Ctrl+D to save the .rec file", + "step4": "Note the saved file path for later use" + }, + "test_execution": { + "step1": "Run test: clt test -t mytest.rec -d ubuntu:20.04", + "step2": "Check exit code: 0 = success, 1 = failure", + "step3": "Review any error output for mismatches" + } + } + }) + } + + fn examples_help() -> Value { + json!({ + "topic": "CLT Practical Examples", + "description": "Real-world examples of CLT test files and usage patterns", + "content": { + "basic_command_test": { + "description": "Testing a simple echo command", + "rec_file": [ + "––– comment –––", + "Basic echo test", + "––– input –––", + "echo 'Hello CLT'", + "––– output –––", + "Hello CLT" + ] + } + } + }) + } + + fn troubleshooting_help() -> Value { + json!({ + "topic": "CLT Troubleshooting Guide", + "description": "Common issues and solutions when working with CLT", + "content": { + "test_failures": { + "symptom": "Test fails with output mismatch", + "causes": [ + "Dynamic content (timestamps, IDs) in output", + "Whitespace differences", + "Different environment variables", + "Changed application behavior" + ], + "solutions": [ + "Use refine_output tool to identify patterns needed", + "Add patterns for dynamic content (%{DATE}, %{NUMBER}, etc.)", + "Check for trailing whitespace or newlines", + "Verify Docker image and environment consistency" + ] + } + } + }) + } + + fn structured_tests_help() -> Value { + json!({ + "topic": "Structured Test Format", + "description": "AI-friendly JSON format for creating and modifying CLT tests", + "content": { + "overview": "The structured test format provides a JSON representation of CLT tests that's easier for AI to work with than the raw .rec format", + "workflow": [ + "1. Use 'read_test' to convert existing .rec file to JSON", + "2. Modify the JSON structure as needed", + "3. Use 'write_test' to save the JSON back to .rec format", + "4. Use 'run_test' to execute the .rec file", + "5. Use 'get_patterns' to see available patterns for dynamic content" + ] + } + }) + } + + fn unknown_topic_help() -> Value { + json!({ + "error": "Unknown help topic", + "available_topics": ["overview", "test_format", "patterns", "blocks", "workflow", "examples", "troubleshooting", "structured_tests"], + "usage": "Use clt_help tool with one of the available topics to get detailed information" + }) + } +} \ No newline at end of file diff --git a/mcp/src/server/mod.rs b/mcp/src/server/mod.rs new file mode 100644 index 0000000..b7e9536 --- /dev/null +++ b/mcp/src/server/mod.rs @@ -0,0 +1,8 @@ +pub mod config; +pub mod handlers; +pub mod protocol; +pub mod tools; +pub mod help; +pub mod utils; + +pub use protocol::McpServer; \ No newline at end of file diff --git a/mcp/src/server/protocol.rs b/mcp/src/server/protocol.rs new file mode 100644 index 0000000..4f33537 --- /dev/null +++ b/mcp/src/server/protocol.rs @@ -0,0 +1,193 @@ +use anyhow::Result; +use serde_json::{json, Value}; +use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader as AsyncBufReader}; + +use crate::mcp_protocol::*; + +use super::config::ServerConfig; +use super::handlers::ToolHandlers; +use super::tools::ToolDefinitions; + +/// MCP Protocol server implementation +#[derive(Debug)] +pub struct McpServer { + tool_handlers: ToolHandlers, + tool_definitions: ToolDefinitions, +} + +impl McpServer { + pub fn new( + docker_image: String, + clt_binary_path: Option, + workdir_path: Option, + ) -> Result { + let config = ServerConfig::new(docker_image.clone(), clt_binary_path, workdir_path)?; + let tool_handlers = ToolHandlers::new(config.clone())?; + let tool_definitions = ToolDefinitions::new(docker_image); + + Ok(Self { + tool_handlers, + tool_definitions, + }) + } + + pub async fn run(&mut self) -> Result<()> { + let stdin = tokio::io::stdin(); + let mut reader = AsyncBufReader::new(stdin); + let mut stdout = tokio::io::stdout(); + + let mut line = String::new(); + loop { + line.clear(); + + // Handle EOF or read errors gracefully + let bytes_read = match reader.read_line(&mut line).await { + Ok(0) => break, // EOF - client disconnected + Ok(n) => n, + Err(e) => { + // Check if it's a broken pipe or connection reset + if e.kind() == std::io::ErrorKind::BrokenPipe + || e.kind() == std::io::ErrorKind::ConnectionReset + || e.kind() == std::io::ErrorKind::ConnectionAborted + { + // Client disconnected - exit gracefully + break; + } + // For other errors, continue trying + continue; + } + }; + + if bytes_read == 0 { + break; // EOF + } + + // Parse JSON and handle errors properly + let response = match serde_json::from_str::(line.trim()) { + Ok(request) => self.handle_request(request).await, + Err(_) => { + // Send error response for malformed JSON + McpResponse::error(None, -32700, "Parse error: Invalid JSON".to_string()) + } + }; + + // Send response with proper error handling + if let Err(e) = self.send_response(&mut stdout, &response).await { + // Check if it's a broken pipe or connection issue + if e.kind() == std::io::ErrorKind::BrokenPipe + || e.kind() == std::io::ErrorKind::ConnectionReset + || e.kind() == std::io::ErrorKind::ConnectionAborted + { + // Client disconnected - exit gracefully + break; + } + // For other errors, continue trying + continue; + } + } + + Ok(()) + } + + async fn send_response( + &self, + stdout: &mut tokio::io::Stdout, + response: &McpResponse, + ) -> std::io::Result<()> { + let response_json = serde_json::to_string(response) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + + stdout.write_all(response_json.as_bytes()).await?; + stdout.write_all(b"\n").await?; + stdout.flush().await?; + + Ok(()) + } + + async fn handle_request(&mut self, request: McpRequest) -> McpResponse { + match request.method.as_str() { + "initialize" => self.handle_initialize(request.id, request.params), + "tools/list" => self.handle_tools_list(request.id), + "tools/call" => self.handle_tools_call(request.id, request.params).await, + _ => McpResponse::error( + request.id, + -32601, + format!("Method not found: {}", request.method), + ), + } + } + + fn handle_initialize(&self, id: Option, _params: Option) -> McpResponse { + let result = InitializeResult { + protocol_version: "2024-11-05".to_string(), + capabilities: ServerCapabilities { + tools: Some(std::collections::HashMap::new()), + }, + server_info: ServerInfo { + name: "CLT MCP Server".to_string(), + version: "0.1.0 - Command Line Tester integration for automated testing of CLI applications in Docker containers with pattern matching support".to_string(), + }, + }; + + McpResponse::success(id, json!(result)) + } + + fn handle_tools_list(&self, id: Option) -> McpResponse { + let tools = self.tool_definitions.get_tools(); + let result = json!({ + "tools": tools + }); + + McpResponse::success(id, result) + } + + async fn handle_tools_call(&mut self, id: Option, params: Option) -> McpResponse { + let params = match params { + Some(p) => p, + None => return McpResponse::error(id, -32602, "Missing parameters".to_string()), + }; + + let tool_call: ToolCallParams = match serde_json::from_value(params) { + Ok(tc) => tc, + Err(e) => return McpResponse::error(id, -32602, format!("Invalid parameters: {}", e)), + }; + + let result = match self + .execute_tool(&tool_call.name, tool_call.arguments) + .await + { + Ok(content) => ToolCallResult { + content: vec![ToolContent { + content_type: "text".to_string(), + text: content, + }], + is_error: None, + }, + Err(e) => ToolCallResult { + content: vec![ToolContent { + content_type: "text".to_string(), + text: format!("Error: {}", e), + }], + is_error: Some(true), + }, + }; + + McpResponse::success(id, json!(result)) + } + + pub async fn execute_tool(&mut self, tool_name: &str, arguments: Option) -> Result { + // Wrap the entire tool execution in a comprehensive error handler + match tool_name { + "run_test" => self.tool_handlers.handle_run_test(arguments).await, + "refine_output" => self.tool_handlers.handle_refine_output(arguments), + "test_match" => self.tool_handlers.handle_test_match(arguments), + "clt_help" => self.tool_handlers.handle_clt_help(arguments), + "get_patterns" => self.tool_handlers.handle_get_patterns(arguments), + "read_test" => self.tool_handlers.handle_read_test(arguments), + "write_test" => self.tool_handlers.handle_write_test(arguments), + "update_test" => self.tool_handlers.handle_update_test(arguments), + "append_test" => self.tool_handlers.handle_append_test(arguments), + _ => Err(anyhow::anyhow!("Unknown tool: {}", tool_name)), + } + } +} \ No newline at end of file diff --git a/mcp/src/server/tools.rs b/mcp/src/server/tools.rs new file mode 100644 index 0000000..648a130 --- /dev/null +++ b/mcp/src/server/tools.rs @@ -0,0 +1,357 @@ +use crate::mcp_protocol::*; +use serde_json::json; + +/// Tool definitions and schemas for MCP server +#[derive(Debug)] +pub struct ToolDefinitions { + pub docker_image: String, +} + +impl ToolDefinitions { + pub fn new(docker_image: String) -> Self { + Self { docker_image } + } + + pub fn get_tools(&self) -> Vec { + vec![ + self.run_test_tool(), + self.refine_output_tool(), + self.test_match_tool(), + self.clt_help_tool(), + self.get_patterns_tool(), + self.read_test_tool(), + self.write_test_tool(), + self.update_test_tool(), + self.append_test_tool(), + ] + } + + fn run_test_tool(&self) -> McpTool { + McpTool { + name: "run_test".to_string(), + description: format!( + "Execute a CLT test file in Docker container. Returns status: PASSED (exit 0), FAILED (exit 1 - test ran but outputs didn't match), or ERROR (exit 2+ - system/validation error). Docker image defaults to '{}' if not specified.", + self.docker_image + ), + input_schema: json!({ + "type": "object", + "properties": { + "test_file": { + "type": "string", + "description": "Path to the test file to execute" + }, + "docker_image": { + "type": "string", + "description": format!("Docker image to use for test execution. Optional - defaults to '{}' if not specified.", self.docker_image), + "default": self.docker_image + } + }, + "required": ["test_file"], + "additionalProperties": false + }), + } + } + + fn refine_output_tool(&self) -> McpTool { + McpTool { + name: "refine_output".to_string(), + description: "Analyze differences between expected and actual command outputs, then suggest patterns to handle dynamic content. This tool uses diff analysis to identify parts that change between test runs (like timestamps, PIDs, version numbers) and suggests compatible patterns to make tests more robust. Use this when test outputs contain dynamic data that changes between runs.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "expected": { + "type": "string", + "description": "The expected output string from your test. This can already contain patterns for dynamic content. Example: 'Process started with PID 1234'" + }, + "actual": { + "type": "string", + "description": "The actual output string that was produced during test execution. This is what you want to compare against the expected output. Example: 'Process started with PID 5678'" + } + }, + "required": ["expected", "actual"], + "additionalProperties": false + }), + } + } + + fn test_match_tool(&self) -> McpTool { + McpTool { + name: "test_match".to_string(), + description: "Compare expected vs actual output strings using pattern matching. This tool understands pattern syntax and performs intelligent matching that can handle dynamic content. It returns a clear line-by-line diff showing exactly what differs between expected and actual output, similar to git diff format. Use this to validate if test outputs match expectations, especially when they contain patterns for dynamic data.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "expected": { + "type": "string", + "description": "Expected output string with optional patterns. Patterns can match dynamic content like version numbers, IP addresses, dates, times, and custom regex patterns. Example: 'Server started on %{IPADDR} at %{TIME}'" + }, + "actual": { + "type": "string", + "description": "Actual output string to compare against the expected pattern. This should be the literal text output from your command or application. Example: 'Server started on 192.168.1.100 at 14:30:22'" + } + }, + "required": ["expected", "actual"], + "additionalProperties": false + }), + } + } + + fn clt_help_tool(&self) -> McpTool { + McpTool { + name: "clt_help".to_string(), + description: "Get comprehensive documentation about CLT (Command Line Tester) concepts, testing workflows, pattern syntax, and examples. This tool provides detailed explanations of how CLT works and step-by-step examples for common testing scenarios. Use this to understand CLT concepts before using other tools.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "topic": { + "type": "string", + "description": "Help topic to explain. Options: 'overview' (CLT introduction), 'test_format' (structured test format guide), 'patterns' (pattern syntax guide), 'blocks' (reusable test blocks), 'workflow' (testing workflow), 'examples' (practical examples), 'troubleshooting' (common issues), 'structured_tests' (AI-friendly JSON format)", + "enum": ["overview", "test_format", "patterns", "blocks", "workflow", "examples", "troubleshooting", "structured_tests"] + } + }, + "required": ["topic"], + "additionalProperties": false + }), + } + } + + fn get_patterns_tool(&self) -> McpTool { + McpTool { + name: "get_patterns".to_string(), + description: "Get all available patterns for the current CLT project. Returns predefined patterns that can be used in test outputs for dynamic content matching.".to_string(), + input_schema: json!({ + "type": "object", + "properties": {}, + "additionalProperties": false + }), + } + } + + fn read_test_tool(&self) -> McpTool { + McpTool { + name: "read_test".to_string(), + description: "Read a CLT test file and return its structured representation. The test is returned as a sequence of steps including commands, expected outputs, comments, and reusable blocks.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "test_file": { + "type": "string", + "description": "Path to the test file to read" + } + }, + "required": ["test_file"], + "additionalProperties": false + }), + } + } + + fn write_test_tool(&self) -> McpTool { + McpTool { + name: "write_test".to_string(), + description: "Write a CLT test file from structured format. Creates a test file that can be executed with run_test. Supports commands, expected outputs, comments, and reusable blocks.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "test_file": { + "type": "string", + "description": "Path where the test file should be written" + }, + "test_structure": { + "type": "object", + "description": "Structured test definition", + "properties": { + "description": { + "type": "string", + "description": "Optional description text that appears at the beginning of the test file. Can be multiline." + }, + "steps": { + "type": "array", + "description": "Sequence of test steps", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["input", "output", "comment", "block"], + "description": "Type of step: input (command to execute), output (expected result), comment (documentation), block (reusable test sequence)" + }, + "args": { + "type": "array", + "items": {"type": "string"}, + "description": "Arguments for the statement. For output: optional custom checker name. For block: relative path to block file." + }, + "content": { + "type": ["string", "null"], + "description": "Content of the step. Command text for input, expected output for output, comment text for comment, null for block." + }, + "steps": { + "type": "array", + "description": "Nested steps for block types (resolved block content)" + } + }, + "required": ["type", "args"] + } + } + }, + "required": ["steps"] + } + }, + "required": ["test_file", "test_structure"], + "additionalProperties": false + }), + } + } + + fn update_test_tool(&self) -> McpTool { + McpTool { + name: "update_test".to_string(), + description: "Replace specific test steps in an existing CLT test file. Finds the old test structure and replaces it with the new test structure. Returns error if old structure is not found or matches multiple locations.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "test_file": { + "type": "string", + "description": "Path to the test file to modify" + }, + "old_test_structure": { + "type": "object", + "description": "Test structure to find and replace. Must match exactly in the original file.", + "properties": { + "description": { + "type": "string", + "description": "Optional description text (not used for matching, only for context)" + }, + "steps": { + "type": "array", + "description": "Sequence of test steps to find and replace. Must match exactly.", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["input", "output", "comment", "block"], + "description": "Type of step" + }, + "args": { + "type": "array", + "items": {"type": "string"}, + "description": "Arguments for the step" + }, + "content": { + "type": ["string", "null"], + "description": "Content of the step" + }, + "steps": { + "type": "array", + "description": "Nested steps for block types" + } + }, + "required": ["type", "args"] + } + } + }, + "required": ["steps"] + }, + "new_test_structure": { + "type": "object", + "description": "Test structure to replace the old structure with", + "properties": { + "description": { + "type": "string", + "description": "Optional description text. If provided, will replace the file's description." + }, + "steps": { + "type": "array", + "description": "New sequence of test steps", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["input", "output", "comment", "block"], + "description": "Type of step" + }, + "args": { + "type": "array", + "items": {"type": "string"}, + "description": "Arguments for the step" + }, + "content": { + "type": ["string", "null"], + "description": "Content of the step" + }, + "steps": { + "type": "array", + "description": "Nested steps for block types" + } + }, + "required": ["type", "args"] + } + } + }, + "required": ["steps"] + } + }, + "required": ["test_file", "old_test_structure", "new_test_structure"], + "additionalProperties": false + }), + } + } + + fn append_test_tool(&self) -> McpTool { + McpTool { + name: "append_test".to_string(), + description: "Append new test steps to an existing CLT test file. Adds the new steps to the end of the existing test file while preserving the original content.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "test_file": { + "type": "string", + "description": "Path to the test file to modify" + }, + "test_structure": { + "type": "object", + "description": "Test structure to append to the existing file", + "properties": { + "description": { + "type": "string", + "description": "Optional description text. Only used if the original file has no description." + }, + "steps": { + "type": "array", + "description": "Sequence of test steps to append", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["input", "output", "comment", "block"], + "description": "Type of step" + }, + "args": { + "type": "array", + "items": {"type": "string"}, + "description": "Arguments for the step" + }, + "content": { + "type": ["string", "null"], + "description": "Content of the step" + }, + "steps": { + "type": "array", + "description": "Nested steps for block types" + } + }, + "required": ["type", "args"] + } + } + }, + "required": ["steps"] + } + }, + "required": ["test_file", "test_structure"], + "additionalProperties": false + }), + } + } +} \ No newline at end of file diff --git a/mcp/src/server/utils.rs b/mcp/src/server/utils.rs new file mode 100644 index 0000000..0626bd1 --- /dev/null +++ b/mcp/src/server/utils.rs @@ -0,0 +1,172 @@ +use anyhow::Result; + +use crate::cmp; + +/// Utility functions for path resolution and diff creation +pub struct PathUtils; + +impl PathUtils { + /// Resolve test file path to absolute path based on working directory + pub fn resolve_test_path(workdir_path: &str, test_file: &str) -> Result { + let test_path = std::path::Path::new(test_file); + + if test_path.is_absolute() { + // Already absolute, validate it exists or can be created + let canonical_path = match std::fs::canonicalize(test_path) { + Ok(path) => path, + Err(_) => { + // If canonicalize fails, check if parent directory exists + if let Some(parent) = test_path.parent() { + if !parent.exists() { + return Err(anyhow::anyhow!( + "Parent directory does not exist for test file: {}", + test_path.display() + )); + } + } + test_path.to_path_buf() + } + }; + Ok(canonical_path.to_string_lossy().to_string()) + } else { + // Resolve relative to working directory + let workdir = std::path::Path::new(workdir_path); + + // Ensure working directory exists + if !workdir.exists() { + return Err(anyhow::anyhow!( + "Working directory does not exist: {}", + workdir.display() + )); + } + + let resolved = workdir.join(test_path); + + // For relative paths, we need to ensure the parent directory exists for write operations + if let Some(parent) = resolved.parent() { + if !parent.exists() { + // This is not necessarily an error for read operations, but we should note it + // The actual file operations will handle this appropriately + } + } + + Ok(resolved.to_string_lossy().to_string()) + } + } +} + +/// Diff utilities for comparing expected vs actual output +pub struct DiffUtils; + +impl DiffUtils { + /// Helper function to create a line-based diff similar to git diff format + /// This makes the output much more AI-friendly than character-level mismatches + pub fn create_line_diff( + expected: &str, + actual: &str, + pattern_matcher: &cmp::PatternMatcher, + ) -> Vec { + let expected_lines: Vec<&str> = expected.lines().collect(); + let actual_lines: Vec<&str> = actual.lines().collect(); + let mut diff_lines = Vec::new(); + + // Check if we have any differences at all + let has_any_diff = expected_lines.len() != actual_lines.len() + || expected_lines + .iter() + .zip(actual_lines.iter()) + .any(|(exp, act)| pattern_matcher.has_diff(exp.to_string(), act.to_string())); + + if !has_any_diff { + return diff_lines; // No differences + } + + // Add diff header + diff_lines.push("--- expected".to_string()); + diff_lines.push("+++ actual".to_string()); + + let max_lines = expected_lines.len().max(actual_lines.len()); + + for i in 0..max_lines { + match (expected_lines.get(i), actual_lines.get(i)) { + (Some(exp_line), Some(act_line)) => { + // Both lines exist - check if they differ + if pattern_matcher.has_diff(exp_line.to_string(), act_line.to_string()) { + diff_lines.push(format!("-{}", exp_line)); + diff_lines.push(format!("+{}", act_line)); + } else { + // Lines match (considering patterns) - show as context + diff_lines.push(format!(" {}", exp_line)); + } + } + (Some(exp_line), None) => { + // Line only in expected (deletion) + diff_lines.push(format!("-{}", exp_line)); + } + (None, Some(act_line)) => { + // Line only in actual (addition) + diff_lines.push(format!("+{}", act_line)); + } + (None, None) => break, // Should not happen given max_lines logic + } + } + + diff_lines + } + + /// Generate a clear, human-readable summary of what differs + pub fn create_diff_summary( + expected: &str, + actual: &str, + pattern_matcher: &cmp::PatternMatcher, + ) -> String { + let expected_lines: Vec<&str> = expected.lines().collect(); + let actual_lines: Vec<&str> = actual.lines().collect(); + + let mut mismatched_lines = 0; + let mut extra_lines_in_actual = 0; + let mut missing_lines_in_actual = 0; + + let max_lines = expected_lines.len().max(actual_lines.len()); + + for i in 0..max_lines { + match (expected_lines.get(i), actual_lines.get(i)) { + (Some(exp_line), Some(act_line)) => { + if pattern_matcher.has_diff(exp_line.to_string(), act_line.to_string()) { + mismatched_lines += 1; + } + } + (Some(_), None) => missing_lines_in_actual += 1, + (None, Some(_)) => extra_lines_in_actual += 1, + (None, None) => break, + } + } + + let mut summary_parts = Vec::new(); + + if mismatched_lines > 0 { + summary_parts.push(format!( + "{} line(s) with content differences", + mismatched_lines + )); + } + if missing_lines_in_actual > 0 { + summary_parts.push(format!( + "{} line(s) missing in actual output", + missing_lines_in_actual + )); + } + if extra_lines_in_actual > 0 { + summary_parts.push(format!( + "{} extra line(s) in actual output", + extra_lines_in_actual + )); + } + + if summary_parts.is_empty() { + "Output matches expected pattern".to_string() + } else { + format!("Output differences found: {}", summary_parts.join(", ")) + } + } +} \ No newline at end of file diff --git a/mcp/src/structured_test.rs b/mcp/src/structured_test.rs deleted file mode 100644 index 22ff34e..0000000 --- a/mcp/src/structured_test.rs +++ /dev/null @@ -1,746 +0,0 @@ -use crate::mcp_protocol::{TestStep, TestStructure}; -use anyhow::{anyhow, Result}; -use parser::{parse_statement, Statement}; -use std::collections::HashMap; -use std::fs; -use std::path::Path; - -/// Convert a .rec file to structured JSON format -pub fn read_test_file(test_file_path: &str) -> Result { - let content = fs::read_to_string(test_file_path)?; - let test_dir = Path::new(test_file_path) - .parent() - .ok_or_else(|| anyhow!("Cannot determine parent directory of test file"))?; - - parse_rec_content(&content, test_dir) -} - -/// Parse .rec content and convert to structured format -pub fn parse_rec_content(content: &str, base_dir: &Path) -> Result { - let lines: Vec<&str> = content.lines().collect(); - let mut steps = Vec::new(); - let mut i = 0; - - // First, extract description (everything before the first statement) - let mut description_lines = Vec::new(); - - while i < lines.len() { - let line = lines[i].trim(); - - // Check if this is a statement line - if line.starts_with("––– ") && line.ends_with(" –––") { - break; - } - - // Skip empty lines at the beginning if no content yet - if description_lines.is_empty() && line.is_empty() { - i += 1; - continue; - } - - description_lines.push(lines[i]); // Keep original line with whitespace - i += 1; - } - - // Trim trailing empty lines from description - while let Some(last) = description_lines.last() { - if last.trim().is_empty() { - description_lines.pop(); - } else { - break; - } - } - - let description = if description_lines.is_empty() { - None - } else { - Some(description_lines.join("\n")) - }; - - // Now parse the statements starting from where we left off - while i < lines.len() { - let line = lines[i].trim(); - - // Skip empty lines - if line.is_empty() { - i += 1; - continue; - } - - // Check if this is a statement line - if line.starts_with("––– ") && line.ends_with(" –––") { - let (statement, arg) = parse_statement(line)?; - let step = match statement { - Statement::Input => { - // Collect input content until next statement - let (content, next_idx) = collect_content(&lines, i + 1)?; - i = next_idx; - TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some(content), - steps: None, - } - } - Statement::Output => { - // Collect output content until next statement - let (content, next_idx) = collect_content(&lines, i + 1)?; - i = next_idx; - let args = if let Some(checker) = arg { - vec![checker] - } else { - vec![] - }; - TestStep { - step_type: "output".to_string(), - args, - content: Some(content), - steps: None, - } - } - Statement::Comment => { - // Collect comment content until next statement - let (content, next_idx) = collect_content(&lines, i + 1)?; - i = next_idx; - TestStep { - step_type: "comment".to_string(), - args: vec![], - content: Some(content), - steps: None, - } - } - Statement::Block => { - let block_path = - arg.ok_or_else(|| anyhow!("Block statement missing path argument"))?; - - // Resolve block file and parse recursively - let nested_steps = resolve_block(&block_path, base_dir)?; - i += 1; // Move past the block statement line - - TestStep { - step_type: "block".to_string(), - args: vec![block_path], - content: None, - steps: Some(nested_steps), - } - } - Statement::Duration => { - // Skip duration statements (they're auto-generated) - i += 1; - continue; - } - }; - steps.push(step); - } else { - // This shouldn't happen in a well-formed .rec file - return Err(anyhow!("Unexpected line format: {}", line)); - } - } - - Ok(TestStructure { description, steps }) -} - -/// Collect content lines until the next statement or end of file -fn collect_content(lines: &[&str], start_idx: usize) -> Result<(String, usize)> { - let mut content_lines = Vec::new(); - let mut i = start_idx; - - while i < lines.len() { - let line = lines[i]; - - // Check if this is a statement line - if line.trim().starts_with("––– ") && line.trim().ends_with(" –––") { - break; - } - - content_lines.push(line); - i += 1; - } - - // Join lines and trim trailing whitespace - let content = content_lines.join("\n").trim_end().to_string(); - Ok((content, i)) -} - -/// Resolve a block reference by loading and parsing the .recb file -fn resolve_block(block_path: &str, base_dir: &Path) -> Result> { - let block_file_path = base_dir.join(format!("{}.recb", block_path)); - - if !block_file_path.exists() { - return Err(anyhow!( - "Block file not found: {}", - block_file_path.display() - )); - } - - let block_content = fs::read_to_string(&block_file_path)?; - let block_dir = block_file_path - .parent() - .ok_or_else(|| anyhow!("Cannot determine parent directory of block file"))?; - - let block_structure = parse_rec_content(&block_content, block_dir)?; - Ok(block_structure.steps) -} - -/// Convert structured JSON format back to .rec file content -pub fn write_test_file(test_file_path: &str, test_structure: &TestStructure) -> Result<()> { - // Validate test file path - let test_path = Path::new(test_file_path); - - // Create parent directories if they don't exist - if let Some(parent_dir) = test_path.parent() { - if !parent_dir.exists() { - fs::create_dir_all(parent_dir).map_err(|e| { - anyhow!("Failed to create directory {}: {}", parent_dir.display(), e) - })?; - } - - // Validate that parent directory is writable - if let Err(e) = fs::metadata(parent_dir) { - return Err(anyhow!( - "Cannot access parent directory {}: {}", - parent_dir.display(), - e - )); - } - } - - // Convert structure to REC format with error handling - let rec_content = convert_structure_to_rec(test_structure) - .map_err(|e| anyhow!("Failed to convert test structure to .rec format: {}", e))?; - - // Write file with proper error handling - fs::write(test_file_path, rec_content) - .map_err(|e| anyhow!("Failed to write test file {}: {}", test_file_path, e))?; - - Ok(()) -} - -/// Replace old test structure with new test structure in existing file -pub fn replace_test_structure( - test_file_path: &str, - old_structure: &TestStructure, - new_structure: &TestStructure, -) -> Result<()> { - // Read the current test file - let current_structure = read_test_file(test_file_path)?; - - // Find the old structure in the current structure - let replacement_result = - find_and_replace_structure(¤t_structure, old_structure, new_structure)?; - - // Write the modified structure back to the file - write_test_file(test_file_path, &replacement_result)?; - Ok(()) -} - -/// Append test structure to existing file -pub fn append_test_structure( - test_file_path: &str, - append_structure: &TestStructure, -) -> Result { - // Read the current test file - let mut current_structure = read_test_file(test_file_path)?; - - // If append_structure has a description and current doesn't, use the append description - if current_structure.description.is_none() && append_structure.description.is_some() { - current_structure.description = append_structure.description.clone(); - } - - // Count steps being added - let steps_added = append_structure.steps.len(); - - // Append the new steps - current_structure - .steps - .extend(append_structure.steps.clone()); - - // Write the modified structure back to the file - write_test_file(test_file_path, ¤t_structure)?; - - Ok(steps_added) -} - -/// Find and replace a test structure within another test structure -fn find_and_replace_structure( - current: &TestStructure, - old: &TestStructure, - new: &TestStructure, -) -> Result { - // Simple approach: find exact sequence match in steps - let old_steps = &old.steps; - let current_steps = ¤t.steps; - - if old_steps.is_empty() { - return Err(anyhow!("Old test structure cannot be empty")); - } - - // Look for the sequence of old steps in current steps - let mut found_at = None; - for i in 0..=current_steps.len().saturating_sub(old_steps.len()) { - if steps_match_sequence(¤t_steps[i..i + old_steps.len()], old_steps) { - if found_at.is_some() { - return Err(anyhow!("Ambiguous replacement: old test structure matches multiple locations in the file")); - } - found_at = Some(i); - } - } - - let start_idx = - found_at.ok_or_else(|| anyhow!("Old test structure not found in the current file"))?; - - // Create new structure with replacement - let mut new_steps = Vec::new(); - - // Add steps before the match - new_steps.extend_from_slice(¤t_steps[..start_idx]); - - // Add the new steps - new_steps.extend(new.steps.clone()); - - // Add steps after the match - new_steps.extend_from_slice(¤t_steps[start_idx + old_steps.len()..]); - - // Handle description replacement logic - let final_description = if new.description.is_some() { - // If new structure has description, use it - new.description.clone() - } else { - // Otherwise keep current description - current.description.clone() - }; - - Ok(TestStructure { - description: final_description, - steps: new_steps, - }) -} - -/// Check if two step sequences match exactly -fn steps_match_sequence(seq1: &[TestStep], seq2: &[TestStep]) -> bool { - if seq1.len() != seq2.len() { - return false; - } - - for (step1, step2) in seq1.iter().zip(seq2.iter()) { - if !steps_match(step1, step2) { - return false; - } - } - - true -} - -/// Check if two test steps match exactly -fn steps_match(step1: &TestStep, step2: &TestStep) -> bool { - step1.step_type == step2.step_type - && step1.args == step2.args - && step1.content == step2.content - && match (&step1.steps, &step2.steps) { - (None, None) => true, - (Some(s1), Some(s2)) => steps_match_sequence(s1, s2), - _ => false, - } -} - -/// Convert TestStructure to .rec file format -fn convert_structure_to_rec(test_structure: &TestStructure) -> Result { - let mut lines = Vec::new(); - - // Add description at the beginning if present - if let Some(description) = &test_structure.description { - lines.push(description.clone()); - // Add empty line after description if there are steps - if !test_structure.steps.is_empty() { - lines.push("".to_string()); - } - } - - for step in &test_structure.steps { - match step.step_type.as_str() { - "input" => { - lines.push("––– input –––".to_string()); - if let Some(content) = &step.content { - if !content.is_empty() { - lines.push(content.clone()); - } - } - } - "output" => { - if step.args.is_empty() { - lines.push("––– output –––".to_string()); - } else { - lines.push(format!("––– output: {} –––", step.args[0])); - } - if let Some(content) = &step.content { - if !content.is_empty() { - lines.push(content.clone()); - } - } - } - "comment" => { - lines.push("––– comment –––".to_string()); - if let Some(content) = &step.content { - if !content.is_empty() { - lines.push(content.clone()); - } - } - } - "block" => { - if step.args.is_empty() { - return Err(anyhow!("Block step missing path argument")); - } - lines.push(format!("––– block: {} –––", step.args[0])); - - // Note: We don't write the nested steps to the .rec file - // The block reference will be resolved when the file is read - } - _ => { - return Err(anyhow!("Unknown step type: {}", step.step_type)); - } - } - } - - Ok(lines.join("\n")) -} - -/// Get all available patterns from system and project .clt/patterns files -pub fn get_patterns(clt_binary_path: Option<&str>) -> Result> { - let mut patterns = HashMap::new(); - - // First, load system patterns from CLT binary directory - if let Some(binary_path) = clt_binary_path { - let binary_dir = Path::new(binary_path) - .parent() - .ok_or_else(|| anyhow!("Cannot determine CLT binary directory"))?; - let system_patterns_path = binary_dir.join(".clt/patterns"); - - if system_patterns_path.exists() { - load_patterns_from_file(&system_patterns_path, &mut patterns)?; - } - } - - // Then, load project patterns from current directory (these override system patterns) - let project_patterns_path = Path::new(".clt/patterns"); - if project_patterns_path.exists() { - load_patterns_from_file(project_patterns_path, &mut patterns)?; - } - - Ok(patterns) -} - -/// Load patterns from a specific file into the patterns map -fn load_patterns_from_file(file_path: &Path, patterns: &mut HashMap) -> Result<()> { - let content = fs::read_to_string(file_path)?; - - for line in content.lines() { - let line = line.trim(); - if line.is_empty() || line.starts_with('#') { - continue; - } - - // Parse pattern line: PATTERN_NAME REGEX_PATTERN - let parts: Vec<&str> = line.splitn(2, ' ').collect(); - if parts.len() == 2 { - patterns.insert(parts[0].to_string(), parts[1].to_string()); - } - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use std::fs; - use tempfile::tempdir; - - #[test] - fn test_parse_simple_rec_content() { - let content = r#"This is a test description -that spans multiple lines - -––– input ––– -echo "hello" -––– output ––– -hello -"#; - - let temp_dir = tempdir().unwrap(); - let result = parse_rec_content(content, temp_dir.path()).unwrap(); - - assert_eq!( - result.description, - Some("This is a test description\nthat spans multiple lines".to_string()) - ); - assert_eq!(result.steps.len(), 2); - assert_eq!(result.steps[0].step_type, "input"); - assert_eq!(result.steps[0].content, Some("echo \"hello\"".to_string())); - assert_eq!(result.steps[1].step_type, "output"); - assert_eq!(result.steps[1].content, Some("hello".to_string())); - } - - #[test] - fn test_convert_structure_to_rec() { - let structure = TestStructure { - description: Some("Test description\nwith multiple lines".to_string()), - steps: vec![ - TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"hello\"".to_string()), - steps: None, - }, - TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("hello".to_string()), - steps: None, - }, - ], - }; - - let result = convert_structure_to_rec(&structure).unwrap(); - let expected = "Test description\nwith multiple lines\n\n––– input –––\necho \"hello\"\n––– output –––\nhello"; - assert_eq!(result, expected); - } - - #[test] - fn test_convert_structure_to_rec_with_empty_content() { - let structure = TestStructure { - description: None, - steps: vec![ - TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"hello\"".to_string()), - steps: None, - }, - TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("".to_string()), // Empty content - steps: None, - }, - TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"world\"".to_string()), - steps: None, - }, - ], - }; - - let result = convert_structure_to_rec(&structure).unwrap(); - let expected = - "––– input –––\necho \"hello\"\n––– output –––\n––– input –––\necho \"world\""; - assert_eq!(result, expected); - } - - #[test] - fn test_write_test_file_creates_directories() { - let temp_dir = tempdir().unwrap(); - let test_file_path = temp_dir.path().join("nested/deep/directory/test.rec"); - - let structure = TestStructure { - description: Some("Test with nested directory".to_string()), - steps: vec![TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"test\"".to_string()), - steps: None, - }], - }; - - // This should create the nested directory structure - write_test_file(test_file_path.to_str().unwrap(), &structure).unwrap(); - - // Verify the file was created - assert!(test_file_path.exists()); - - // Verify the content is correct - let content = fs::read_to_string(&test_file_path).unwrap(); - assert!(content.starts_with("Test with nested directory")); - assert!(content.contains("––– input –––")); - assert!(content.contains("echo \"test\"")); - } - #[test] - fn update_test_test_structure() { - let temp_dir = tempdir().unwrap(); - let test_file_path = temp_dir.path().join("test.rec"); - - // Create initial test file - let initial_structure = TestStructure { - description: Some("Initial test".to_string()), - steps: vec![ - TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"hello\"".to_string()), - steps: None, - }, - TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("hello".to_string()), - steps: None, - }, - TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"world\"".to_string()), - steps: None, - }, - ], - }; - - write_test_file(test_file_path.to_str().unwrap(), &initial_structure).unwrap(); - - // Define old structure to replace (middle step) - let old_structure = TestStructure { - description: None, - steps: vec![TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("hello".to_string()), - steps: None, - }], - }; - - // Define new structure - let new_structure = TestStructure { - description: None, - steps: vec![ - TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("HELLO WORLD".to_string()), - steps: None, - }, - TestStep { - step_type: "comment".to_string(), - args: vec![], - content: Some("This was replaced".to_string()), - steps: None, - }, - ], - }; - - // Perform replacement - replace_test_structure( - test_file_path.to_str().unwrap(), - &old_structure, - &new_structure, - ) - .unwrap(); - - // Verify the result - let result = read_test_file(test_file_path.to_str().unwrap()).unwrap(); - assert_eq!(result.steps.len(), 4); // Original 3 steps, but middle replaced with 2 - assert_eq!(result.steps[0].content, Some("echo \"hello\"".to_string())); - assert_eq!(result.steps[1].content, Some("HELLO WORLD".to_string())); - assert_eq!( - result.steps[2].content, - Some("This was replaced".to_string()) - ); - assert_eq!(result.steps[3].content, Some("echo \"world\"".to_string())); - } - - #[test] - fn update_test_test_structure_not_found() { - let temp_dir = tempdir().unwrap(); - let test_file_path = temp_dir.path().join("test.rec"); - - // Create initial test file - let initial_structure = TestStructure { - description: None, - steps: vec![TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"hello\"".to_string()), - steps: None, - }], - }; - - write_test_file(test_file_path.to_str().unwrap(), &initial_structure).unwrap(); - - // Define old structure that doesn't exist - let old_structure = TestStructure { - description: None, - steps: vec![TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("nonexistent".to_string()), - steps: None, - }], - }; - - let new_structure = TestStructure { - description: None, - steps: vec![], - }; - - // Should return error - let result = replace_test_structure( - test_file_path.to_str().unwrap(), - &old_structure, - &new_structure, - ); - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("not found")); - } - - #[test] - fn append_test_test_structure() { - let temp_dir = tempdir().unwrap(); - let test_file_path = temp_dir.path().join("test.rec"); - - // Create initial test file - let initial_structure = TestStructure { - description: Some("Initial test".to_string()), - steps: vec![TestStep { - step_type: "input".to_string(), - args: vec![], - content: Some("echo \"hello\"".to_string()), - steps: None, - }], - }; - - write_test_file(test_file_path.to_str().unwrap(), &initial_structure).unwrap(); - - // Define structure to append - let append_structure = TestStructure { - description: None, - steps: vec![ - TestStep { - step_type: "output".to_string(), - args: vec![], - content: Some("hello".to_string()), - steps: None, - }, - TestStep { - step_type: "comment".to_string(), - args: vec![], - content: Some("This was appended".to_string()), - steps: None, - }, - ], - }; - - // Perform append - let steps_added = - append_test_structure(test_file_path.to_str().unwrap(), &append_structure).unwrap(); - - // Verify the result - assert_eq!(steps_added, 2); - let result = read_test_file(test_file_path.to_str().unwrap()).unwrap(); - assert_eq!(result.steps.len(), 3); // Original 1 + 2 appended - assert_eq!(result.description, Some("Initial test".to_string())); // Original description preserved - assert_eq!(result.steps[0].content, Some("echo \"hello\"".to_string())); - assert_eq!(result.steps[1].content, Some("hello".to_string())); - assert_eq!( - result.steps[2].content, - Some("This was appended".to_string()) - ); - } -} diff --git a/mcp/src/test_runner.rs b/mcp/src/test_runner.rs index 8bcce45..571a800 100644 --- a/mcp/src/test_runner.rs +++ b/mcp/src/test_runner.rs @@ -1,5 +1,6 @@ -use crate::mcp_protocol::{RunTestOutput, TestError, TestStructure}; +use crate::mcp_protocol::{RunTestOutput, TestError}; use anyhow::{Context, Result}; +use parser::{parse_rec_content, TestStructure}; use std::fs; use std::path::Path; use std::process::Command; @@ -142,41 +143,76 @@ impl TestRunner { } }; - let exit_success = output.status.success(); + let exit_code = output.status.code().unwrap_or(-1); let stderr = String::from_utf8_lossy(&output.stderr); - if exit_success { - Ok(RunTestOutput { - success: true, - errors: vec![], - summary: "Test passed successfully".to_string(), - }) - } else { - // Parse failures from .rep file comparison with error handling - let errors = match self.parse_test_failures_from_rep_file(test_path) { - Ok(errors) => errors, - Err(e) => { - // If we can't parse the rep file, create a generic error - vec![TestError { - command: "rep_file_parsing".to_string(), - expected: "Should be able to parse test results".to_string(), - actual: format!("Failed to parse test results: {}", e), - step: 0, - }] - } - }; + match exit_code { + 0 => { + // Test passed successfully + Ok(RunTestOutput { + success: true, + errors: vec![], + summary: "Test passed successfully".to_string(), + }) + } + 1 => { + // Test failed but ran (expected test failure) + let errors = match self.parse_test_failures_from_rep_file(test_path) { + Ok(errors) => errors, + Err(e) => { + // If we can't parse the rep file, create a generic error + vec![TestError { + command: "rep_file_parsing".to_string(), + expected: "Should be able to parse test results".to_string(), + actual: format!("Failed to parse test results: {}", e), + step: 0, + }] + } + }; - let summary = if errors.is_empty() { - format!("Test failed: {}", stderr.trim()) - } else { - format!("Test failed with {} error(s)", errors.len()) - }; + let summary = if errors.is_empty() { + "Test failed - no specific errors identified".to_string() + } else { + format!("Test failed with {} error(s)", errors.len()) + }; - Ok(RunTestOutput { - success: false, - errors, - summary, - }) + Ok(RunTestOutput { + success: false, + errors, + summary, + }) + } + code => { + // System error, validation error, or crash (exit code 2+) + let error_type = match code { + 2 => "compilation_error", + 3 => "setup_error", + 4 => "recording_error", + 5 => "validation_error", + _ if code >= 129 && code <= 143 => "signal_termination", + _ => "system_error", + }; + + let error_description = match code { + 2 => "Compilation or build error occurred".to_string(), + 3 => "Test setup or environment error".to_string(), + 4 => "Recording or file system error".to_string(), + 5 => "Test validation or format error".to_string(), + _ if code >= 129 && code <= 143 => format!("Process terminated by signal {}", code - 128), + _ => format!("System error (exit code {})", code), + }; + + Ok(RunTestOutput { + success: false, + errors: vec![TestError { + command: error_type.to_string(), + expected: "Successful test execution".to_string(), + actual: format!("{}: {}", error_description, stderr.trim()), + step: 0, + }], + summary: format!("System error: {} (exit code {})", error_description, code), + }) + } } } @@ -233,8 +269,7 @@ impl TestRunner { ) })?; - let test_structure = match crate::structured_test::parse_rec_content(&rec_content, base_dir) - { + let test_structure = match parse_rec_content(&rec_content, base_dir) { Ok(structure) => structure, Err(e) => { // If we can't parse the REC file, return a parsing error @@ -319,7 +354,7 @@ impl TestRunner { fn extract_outputs_from_steps( &self, - steps: &[crate::mcp_protocol::TestStep], + steps: &[parser::TestStep], outputs: &mut Vec, global_step_index: &mut usize, ) { diff --git a/parser/Cargo.lock b/parser/Cargo.lock index c6fb07c..eda04fa 100644 --- a/parser/Cargo.lock +++ b/parser/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "aho-corasick" @@ -17,6 +17,12 @@ version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + [[package]] name = "memchr" version = "2.5.0" @@ -29,6 +35,26 @@ version = "0.1.0" dependencies = [ "anyhow", "regex", + "serde", + "serde_json", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", ] [[package]] @@ -59,3 +85,58 @@ name = "regex-syntax" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "2.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" diff --git a/parser/Cargo.toml b/parser/Cargo.toml index 4d87626..c95ab6c 100644 --- a/parser/Cargo.toml +++ b/parser/Cargo.toml @@ -8,3 +8,5 @@ edition = "2021" [dependencies] regex = "^1.8.4" anyhow = "1.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" diff --git a/parser/src/lib.rs b/parser/src/lib.rs index bef793b..5b5612c 100644 --- a/parser/src/lib.rs +++ b/parser/src/lib.rs @@ -1,11 +1,12 @@ use anyhow::{Context, Result}; -use std::collections::HashSet; -use std::fs::File; +use std::collections::{HashSet, HashMap}; +use std::fs::{self, File}; use std::io::{BufRead, BufReader}; use std::error::Error; use std::str::FromStr; use std::path::{Path, PathBuf}; use regex::Regex; +use serde::{Serialize, Deserialize}; pub const BLOCK_REGEX: &str = r"(?m)^––– block: ([\.a-zA-Z0-9\-\/\_]+) –––$"; pub const DURATION_REGEX: &str = r"(?m)^––– duration: ([0-9\.]+)ms \(([0-9\.]+)%\) –––$"; @@ -200,3 +201,1266 @@ macro_rules! check_statement { } }}; } + +/// Test validation error structure +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TestError { + pub command: String, + pub expected: String, + pub actual: String, + pub step: usize, +} + +/// Test validation result +#[derive(Debug, Serialize, Deserialize)] +pub struct ValidationResult { + pub success: bool, + pub errors: Vec, + pub summary: String, +} + +// ===== REC FILE STRUCTURED PARSING ===== + +/// Represents a structured test with description and steps +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct TestStructure { + pub description: Option, + pub steps: Vec, +} + +/// Represents a single test step +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct TestStep { + #[serde(rename = "type")] + pub step_type: String, + pub args: Vec, + pub content: Option, + pub steps: Option>, // For block types with resolved content +} + +/// Convert a .rec file to structured JSON format +pub fn read_test_file(test_file_path: &str) -> Result { + let content = fs::read_to_string(test_file_path)?; + let test_dir = Path::new(test_file_path) + .parent() + .ok_or_else(|| anyhow::anyhow!("Cannot determine parent directory of test file"))?; + + parse_rec_content(&content, test_dir) +} + +/// Parse .rec content and convert to structured format +pub fn parse_rec_content(content: &str, base_dir: &Path) -> Result { + let lines: Vec<&str> = content.lines().collect(); + let mut steps = Vec::new(); + let mut i = 0; + + // First, extract description (everything before the first statement) + let mut description_lines = Vec::new(); + + while i < lines.len() { + let line = lines[i].trim(); + + // Check if this is a statement line + if line.starts_with("––– ") && line.ends_with(" –––") { + break; + } + + // Skip empty lines at the beginning if no content yet + if description_lines.is_empty() && line.is_empty() { + i += 1; + continue; + } + + description_lines.push(lines[i]); // Keep original line with whitespace + i += 1; + } + + // Trim trailing empty lines from description + while let Some(last) = description_lines.last() { + if last.trim().is_empty() { + description_lines.pop(); + } else { + break; + } + } + + let description = if description_lines.is_empty() { + None + } else { + Some(description_lines.join("\n")) + }; + + // Now parse the statements starting from where we left off + while i < lines.len() { + let line = lines[i].trim(); + + // Skip empty lines + if line.is_empty() { + i += 1; + continue; + } + + // Check if this is a statement line + if line.starts_with("––– ") && line.ends_with(" –––") { + let (statement, arg) = parse_statement(line)?; + let step = match statement { + Statement::Input => { + // Collect input content until next statement + let (content, next_idx) = collect_content(&lines, i + 1)?; + i = next_idx; + TestStep { + step_type: "input".to_string(), + args: vec![], + content: Some(content), + steps: None, + } + } + Statement::Output => { + // Collect output content until next statement + let (content, next_idx) = collect_content(&lines, i + 1)?; + i = next_idx; + let args = if let Some(checker) = arg { + vec![checker] + } else { + vec![] + }; + TestStep { + step_type: "output".to_string(), + args, + content: Some(content), + steps: None, + } + } + Statement::Comment => { + // Collect comment content until next statement + let (content, next_idx) = collect_content(&lines, i + 1)?; + i = next_idx; + TestStep { + step_type: "comment".to_string(), + args: vec![], + content: Some(content), + steps: None, + } + } + Statement::Block => { + let block_path = + arg.ok_or_else(|| anyhow::anyhow!("Block statement missing path argument"))?; + + // Resolve block file and parse recursively + let nested_steps = resolve_block(&block_path, base_dir)?; + i += 1; // Move past the block statement line + + TestStep { + step_type: "block".to_string(), + args: vec![block_path], + content: None, + steps: Some(nested_steps), + } + } + Statement::Duration => { + // Skip duration statements (they're auto-generated) + i += 1; + continue; + } + }; + steps.push(step); + } else { + // This shouldn't happen in a well-formed .rec file + return Err(anyhow::anyhow!("Unexpected line format: {}", line)); + } + } + + Ok(TestStructure { description, steps }) +} + +/// Collect content lines until the next statement or end of file +fn collect_content(lines: &[&str], start_idx: usize) -> Result<(String, usize)> { + let mut content_lines = Vec::new(); + let mut i = start_idx; + + while i < lines.len() { + let line = lines[i]; + + // Check if this is a statement line + if line.trim().starts_with("––– ") && line.trim().ends_with(" –––") { + break; + } + + content_lines.push(line); + i += 1; + } + + // Join lines and trim trailing whitespace + let content = content_lines.join("\n").trim_end().to_string(); + Ok((content, i)) +} + +/// Resolve a block reference by loading and parsing the .recb file +fn resolve_block(block_path: &str, base_dir: &Path) -> Result> { + let block_file_path = base_dir.join(format!("{}.recb", block_path)); + + if !block_file_path.exists() { + return Err(anyhow::anyhow!( + "Block file not found: {}", + block_file_path.display() + )); + } + + let block_content = fs::read_to_string(&block_file_path)?; + let block_dir = block_file_path + .parent() + .ok_or_else(|| anyhow::anyhow!("Cannot determine parent directory of block file"))?; + + let block_structure = parse_rec_content(&block_content, block_dir)?; + Ok(block_structure.steps) +} + +/// Convert structured JSON format back to .rec file content +pub fn write_test_file(test_file_path: &str, test_structure: &TestStructure) -> Result<()> { + // Validate test file path + let test_path = Path::new(test_file_path); + + // Create parent directories if they don't exist + if let Some(parent_dir) = test_path.parent() { + if !parent_dir.exists() { + fs::create_dir_all(parent_dir).map_err(|e| { + anyhow::anyhow!("Failed to create directory {}: {}", parent_dir.display(), e) + })?; + } + + // Validate that parent directory is writable + if let Err(e) = fs::metadata(parent_dir) { + return Err(anyhow::anyhow!( + "Cannot access parent directory {}: {}", + parent_dir.display(), + e + )); + } + } + + // Convert structure to REC format with error handling + let mut rec_content = convert_structure_to_rec(test_structure) + .map_err(|e| anyhow::anyhow!("Failed to convert test structure to .rec format: {}", e))?; + + // Ensure the file always ends with a newline + if !rec_content.ends_with('\n') { + rec_content.push('\n'); + } + + // Write file with proper error handling + fs::write(test_file_path, rec_content) + .map_err(|e| anyhow::anyhow!("Failed to write test file {}: {}", test_file_path, e))?; + + Ok(()) +} + +/// Replace old test structure with new test structure in existing file +pub fn replace_test_structure( + test_file_path: &str, + old_structure: &TestStructure, + new_structure: &TestStructure, +) -> Result<()> { + // Read the current test file + let current_structure = read_test_file(test_file_path)?; + + // Find the old structure in the current structure + let replacement_result = + find_and_replace_structure(¤t_structure, old_structure, new_structure)?; + + // Write the modified structure back to the file + write_test_file(test_file_path, &replacement_result)?; + Ok(()) +} + +/// Append test structure to existing file +pub fn append_test_structure( + test_file_path: &str, + append_structure: &TestStructure, +) -> Result { + // Read the current test file + let mut current_structure = read_test_file(test_file_path)?; + + // If append_structure has a description and current doesn't, use the append description + if current_structure.description.is_none() && append_structure.description.is_some() { + current_structure.description = append_structure.description.clone(); + } + + // Count steps being added + let steps_added = append_structure.steps.len(); + + // Append the new steps + current_structure + .steps + .extend(append_structure.steps.clone()); + + // Write the modified structure back to the file + write_test_file(test_file_path, ¤t_structure)?; + + Ok(steps_added) +} + +/// Find and replace a test structure within another test structure +fn find_and_replace_structure( + current: &TestStructure, + old: &TestStructure, + new: &TestStructure, +) -> Result { + // Simple approach: find exact sequence match in steps + let old_steps = &old.steps; + let current_steps = ¤t.steps; + + if old_steps.is_empty() { + return Err(anyhow::anyhow!("Old test structure cannot be empty")); + } + + // Look for the sequence of old steps in current steps + let mut found_at = None; + for i in 0..=current_steps.len().saturating_sub(old_steps.len()) { + if steps_match_sequence(¤t_steps[i..i + old_steps.len()], old_steps) { + if found_at.is_some() { + return Err(anyhow::anyhow!("Ambiguous replacement: old test structure matches multiple locations in the file")); + } + found_at = Some(i); + } + } + + let start_idx = + found_at.ok_or_else(|| anyhow::anyhow!("Old test structure not found in the current file"))?; + + // Create new structure with replacement + let mut new_steps = Vec::new(); + + // Add steps before the match + new_steps.extend_from_slice(¤t_steps[..start_idx]); + + // Add the new steps + new_steps.extend(new.steps.clone()); + + // Add steps after the match + new_steps.extend_from_slice(¤t_steps[start_idx + old_steps.len()..]); + + // Handle description replacement logic + let final_description = if new.description.is_some() { + // If new structure has description, use it + new.description.clone() + } else { + // Otherwise keep current description + current.description.clone() + }; + + Ok(TestStructure { + description: final_description, + steps: new_steps, + }) +} + +/// Check if two step sequences match exactly +fn steps_match_sequence(seq1: &[TestStep], seq2: &[TestStep]) -> bool { + if seq1.len() != seq2.len() { + return false; + } + + for (step1, step2) in seq1.iter().zip(seq2.iter()) { + if !steps_match(step1, step2) { + return false; + } + } + + true +} + +/// Check if two test steps match exactly +fn steps_match(step1: &TestStep, step2: &TestStep) -> bool { + step1.step_type == step2.step_type + && step1.args == step2.args + && step1.content == step2.content + && match (&step1.steps, &step2.steps) { + (None, None) => true, + (Some(s1), Some(s2)) => steps_match_sequence(s1, s2), + _ => false, + } +} + +/// Convert TestStructure to .rec file format +fn convert_structure_to_rec(test_structure: &TestStructure) -> Result { + let mut lines = Vec::new(); + + // Add description at the beginning if present + if let Some(description) = &test_structure.description { + lines.push(description.clone()); + // Add empty line after description if there are steps + if !test_structure.steps.is_empty() { + lines.push("".to_string()); + } + } + + for step in &test_structure.steps { + match step.step_type.as_str() { + "input" => { + lines.push("––– input –––".to_string()); + if let Some(content) = &step.content { + if !content.is_empty() { + lines.push(content.clone()); + } + } + } + "output" => { + if step.args.is_empty() { + lines.push("––– output –––".to_string()); + } else { + lines.push(format!("––– output: {} –––", step.args[0])); + } + if let Some(content) = &step.content { + if !content.is_empty() { + lines.push(content.clone()); + } + } + } + "comment" => { + lines.push("––– comment –––".to_string()); + if let Some(content) = &step.content { + if !content.is_empty() { + lines.push(content.clone()); + } + } + } + "block" => { + if step.args.is_empty() { + return Err(anyhow::anyhow!("Block step missing path argument")); + } + lines.push(format!("––– block: {} –––", step.args[0])); + + // Note: We don't write the nested steps to the .rec file + // The block reference will be resolved when the file is read + } + _ => { + return Err(anyhow::anyhow!("Unknown step type: {}", step.step_type)); + } + } + } + + Ok(lines.join("\n")) +} + +/// Get all available patterns from system and project .clt/patterns files +pub fn get_patterns(clt_binary_path: Option<&str>) -> Result> { + let mut patterns = HashMap::new(); + + // First, load system patterns from CLT binary directory + if let Some(binary_path) = clt_binary_path { + let binary_dir = Path::new(binary_path) + .parent() + .ok_or_else(|| anyhow::anyhow!("Cannot determine CLT binary directory"))?; + let system_patterns_path = binary_dir.join(".clt/patterns"); + + if system_patterns_path.exists() { + load_patterns_from_file(&system_patterns_path, &mut patterns)?; + } + } + + // Then, load project patterns from current directory (these override system patterns) + let project_patterns_path = Path::new(".clt/patterns"); + if project_patterns_path.exists() { + load_patterns_from_file(project_patterns_path, &mut patterns)?; + } + + Ok(patterns) +} + +// ===== TEST VALIDATION LOGIC ===== + +#[derive(Debug, Clone)] +struct OutputExpectation { + expected_content: String, + command: String, // The input command that should produce this output + command_index: usize, // Index of the step in the test structure (for error reporting) +} + +#[derive(Debug, Clone)] +struct ActualOutput { + actual_content: String, +} + +/// Validate a test by comparing .rec file with its .rep result file +/// Input: path to .rec file, .rep file will be found automatically +pub fn validate_test(rec_file_path: &str) -> Result { + let rec_path = Path::new(rec_file_path); + + // Find corresponding .rep file + let rep_path = rec_path.with_extension("rep"); + if !rep_path.exists() { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "file_check".to_string(), + expected: "Test result file should exist".to_string(), + actual: format!("No .rep file found at: {}", rep_path.display()), + step: 0, + }], + summary: "Test result file not found".to_string(), + }); + } + + // Read both files with proper error handling + let rec_content = fs::read_to_string(rec_path) + .map_err(|e| anyhow::anyhow!("Failed to read .rec file: {}", e))?; + let rep_content = fs::read_to_string(&rep_path) + .map_err(|e| anyhow::anyhow!("Failed to read .rep file: {}", e))?; + + // Parse REC file into structured format + let base_dir = rec_path.parent().ok_or_else(|| { + anyhow::anyhow!("Cannot determine parent directory of .rec file: {}", rec_path.display()) + })?; + + let test_structure = match parse_rec_content(&rec_content, base_dir) { + Ok(structure) => structure, + Err(e) => { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "rec_file_parsing".to_string(), + expected: "Valid .rec file format".to_string(), + actual: format!("Failed to parse .rec file: {}", e), + step: 0, + }], + summary: "Failed to parse test file".to_string(), + }); + } + }; + + // Extract all expected outputs from structured REC (handles blocks, nesting, etc.) + let expected_outputs = extract_all_outputs_from_structured(&test_structure); + + // Extract all actual outputs from flat REP file + let actual_outputs = match extract_all_outputs_from_rep(&rep_content) { + Ok(outputs) => outputs, + Err(e) => { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "rep_file_parsing".to_string(), + expected: "Valid .rep file format".to_string(), + actual: format!("Failed to parse .rep file: {}", e), + step: 0, + }], + summary: "Failed to parse test result file".to_string(), + }); + } + }; + + // Find pattern file for comparison (same logic as CLT) + let pattern_file = find_pattern_file(rec_path); + + // Compare output sequences using pattern matching logic + let mut errors = Vec::new(); + match compare_output_sequences(&expected_outputs, &actual_outputs, pattern_file) { + Ok(comparison_errors) => { + errors.extend(comparison_errors); + } + Err(e) => { + errors.push(TestError { + command: "output_comparison".to_string(), + expected: "Successful output comparison".to_string(), + actual: format!("Output comparison failed: {}", e), + step: 0, + }); + } + } + + let success = errors.is_empty(); + let summary = if success { + "All outputs match expected results".to_string() + } else { + format!("{} validation error(s) found", errors.len()) + }; + + Ok(ValidationResult { + success, + errors, + summary, + }) +} + +fn find_pattern_file(rec_path: &Path) -> Option { + // Look for .clt/patterns file in the same way CLT does + if let Some(parent) = rec_path.parent() { + let patterns_path = parent.join(".clt").join("patterns"); + if patterns_path.exists() { + return Some(patterns_path.to_string_lossy().to_string()); + } + } + None +} + +fn extract_all_outputs_from_structured(test_structure: &TestStructure) -> Vec { + let mut outputs = Vec::new(); + let mut global_step_index = 0; + + extract_outputs_from_steps(&test_structure.steps, &mut outputs, &mut global_step_index); + outputs +} + +fn extract_outputs_from_steps( + steps: &[TestStep], + outputs: &mut Vec, + global_step_index: &mut usize, +) { + let mut current_input: Option<(String, usize)> = None; + + for step in steps { + let current_step_index = *global_step_index; + *global_step_index += 1; + + match step.step_type.as_str() { + "input" => { + if let Some(content) = &step.content { + current_input = Some((content.clone(), current_step_index)); + } + } + "output" => { + if let Some(content) = &step.content { + if let Some((input_command, input_step_index)) = ¤t_input { + outputs.push(OutputExpectation { + expected_content: content.clone(), + command: input_command.clone(), + command_index: *input_step_index, + }); + } + } + } + "block" => { + // Process nested steps in blocks + if let Some(nested_steps) = &step.steps { + extract_outputs_from_steps(nested_steps, outputs, global_step_index); + } + } + _ => {} // Skip comments and other step types + } + } +} + +fn extract_all_outputs_from_rep(rep_content: &str) -> Result> { + let mut outputs = Vec::new(); + let mut current_section = None; + let mut current_content = Vec::new(); + + for line in rep_content.lines() { + // Check if this is a section marker + if line.starts_with("––– ") && line.ends_with(" –––") { + // Save previous section if it was an output + if let Some("output") = current_section { + outputs.push(ActualOutput { + actual_content: current_content.join("\n"), + }); + current_content.clear(); + } + + // Determine new section type + current_section = if line.contains("input") { + Some("input") + } else if line.contains("output") { + Some("output") + } else { + None + }; + } else if let Some(section) = current_section { + // Add content to current section + if section == "output" { + current_content.push(line); + } + } + } + + // Handle the last section if it was an output + if let Some("output") = current_section { + outputs.push(ActualOutput { + actual_content: current_content.join("\n"), + }); + } + + Ok(outputs) +} + +fn compare_output_sequences( + expected: &[OutputExpectation], + actual: &[ActualOutput], + pattern_file: Option, +) -> Result> { + let mut errors = Vec::new(); + + // Simple pattern matching logic (extracted from cmp crate to avoid circular dependency) + let patterns = if let Some(pattern_file_path) = pattern_file { + load_patterns_for_validation(&PathBuf::from(pattern_file_path)) + .unwrap_or_default() + } else { + HashMap::new() + }; + + // Compare each expected output with actual output + for (exp, act) in expected.iter().zip(actual.iter()) { + // Use simple pattern matching for comparison + if has_diff_simple(&exp.expected_content, &act.actual_content, &patterns) { + errors.push(TestError { + command: exp.command.clone(), + expected: exp.expected_content.clone(), + actual: act.actual_content.clone(), + step: exp.command_index, + }); + } + } + + // Check for count mismatch + if expected.len() != actual.len() { + errors.push(TestError { + command: "output_count_mismatch".to_string(), + expected: format!("{} outputs expected", expected.len()), + actual: format!("{} outputs found", actual.len()), + step: 0, + }); + } + + Ok(errors) +} + +// COPY the working PatternMatcher from CMP - DON'T REINVENT +#[derive(Debug)] +pub enum MatchingPart { + Static(String), + Pattern(String), +} + +pub struct PatternMatcher { + config: HashMap, + var_regex: Regex, +} + +impl PatternMatcher { + /// Initialize with patterns HashMap (for WASM use) + pub fn from_patterns(patterns: HashMap) -> Self { + // Convert patterns to CMP format: PATTERN_NAME REGEX -> PATTERN_NAME #!/REGEX/!# + let config: HashMap = patterns.iter() + .map(|(name, regex)| (name.clone(), format!("#!/{}/!#", regex))) + .collect(); + + let var_regex = Regex::new(r"%\{[A-Z]{1}[A-Z_0-9]*\}").unwrap(); + Self { config, var_regex } + } + + /// COPY the working has_diff method from CMP + pub fn has_diff(&self, rec_line: String, rep_line: String) -> bool { + let rec_line = self.replace_vars_to_patterns(rec_line); + let parts = self.split_into_parts(&rec_line); + let mut last_index = 0; + + for part in parts { + match part { + MatchingPart::Static(static_part) => { + if rep_line[last_index..].starts_with(&static_part) { + last_index += static_part.len(); + } else { + return true; + } + } + MatchingPart::Pattern(pattern) => { + let pattern_regex = Regex::new(&pattern).unwrap(); + if let Some(mat) = pattern_regex.find(&rep_line[last_index..]) { + last_index += mat.end(); + } else { + return true; + } + } + } + } + + last_index != rep_line.len() + } + + /// COPY split_into_parts from CMP + pub fn split_into_parts(&self, rec_line: &str) -> Vec { + let mut parts = Vec::new(); + + let first_splits: Vec<&str> = rec_line.split("#!/").collect(); + for first_split in first_splits { + let second_splits: Vec<&str> = first_split.split("/!#").collect(); + if second_splits.len() == 1 { + parts.push(MatchingPart::Static(second_splits.first().unwrap().to_string())); + } else { + for (i, second_split) in second_splits.iter().enumerate() { + if i % 2 == 1 { + parts.push(MatchingPart::Static(second_split.to_string())); + } else { + parts.push(MatchingPart::Pattern(second_split.to_string())); + } + } + } + } + parts + } + + /// COPY replace_vars_to_patterns from CMP + pub fn replace_vars_to_patterns(&self, line: String) -> String { + let result = self.var_regex.replace_all(&line, |caps: ®ex::Captures| { + let matched = &caps[0]; + let key = matched[2..matched.len() - 1].to_string(); + self.config.get(&key).unwrap_or(&matched.to_string()).clone() + }); + + result.into_owned() + } +} + +// Use the WORKING CMP PatternMatcher instead of broken logic +fn has_diff_simple(expected: &str, actual: &str, patterns: &HashMap) -> bool { + let pattern_matcher = PatternMatcher::from_patterns(patterns.clone()); + pattern_matcher.has_diff(expected.to_string(), actual.to_string()) +} + +/// Load patterns from a specific file into the patterns map +fn load_patterns_for_validation(file_path: &Path) -> Result> { + let mut patterns = HashMap::new(); + + if !file_path.exists() { + return Ok(patterns); + } + + let content = fs::read_to_string(file_path)?; + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + // Parse pattern line: PATTERN_NAME REGEX_PATTERN + let parts: Vec<&str> = line.splitn(2, ' ').collect(); + if parts.len() == 2 { + patterns.insert(parts[0].to_string(), parts[1].to_string()); + } + } + + Ok(patterns) +} + +/// Load patterns from a specific file into the patterns map +fn load_patterns_from_file(file_path: &Path, patterns: &mut HashMap) -> Result<()> { + let content = fs::read_to_string(file_path)?; + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + // Parse pattern line: PATTERN_NAME REGEX_PATTERN + let parts: Vec<&str> = line.splitn(2, ' ').collect(); + if parts.len() == 2 { + patterns.insert(parts[0].to_string(), parts[1].to_string()); + } + } + + Ok(()) +} + +// ===== WASM-COMPATIBLE FUNCTIONS (NO FILE SYSTEM OPERATIONS) ===== + +/// WASM-compatible function to read and parse test file using file content map +pub fn read_test_file_from_map( + main_file_path: &str, + file_map: &HashMap +) -> Result { + // Get the main file content from the map + let main_content = file_map.get(main_file_path) + .ok_or_else(|| anyhow::anyhow!("Main file not found in file map: {}", main_file_path))?; + + // Parse using the existing logic but with file map override + parse_rec_content_with_file_map(main_content, file_map) +} + +/// Modified version of parse_rec_content that uses file map instead of file system +fn parse_rec_content_with_file_map(content: &str, file_map: &HashMap) -> Result { + let lines: Vec<&str> = content.lines().collect(); + let mut steps = Vec::new(); + let mut i = 0; + + // First, extract description (everything before the first statement) + let mut description_lines = Vec::new(); + + while i < lines.len() { + let line = lines[i].trim(); + + // Check if this is a statement line + if line.starts_with("––– ") && line.ends_with(" –––") { + break; + } + + // Skip empty lines at the beginning if no content yet + if description_lines.is_empty() && line.is_empty() { + i += 1; + continue; + } + + description_lines.push(lines[i]); // Keep original line with whitespace + i += 1; + } + + // Trim trailing empty lines from description + while let Some(last) = description_lines.last() { + if last.trim().is_empty() { + description_lines.pop(); + } else { + break; + } + } + + let description = if description_lines.is_empty() { + None + } else { + Some(description_lines.join("\n")) + }; + + // Now parse the statements starting from where we left off - COPY EXACT LOGIC + while i < lines.len() { + let line = lines[i].trim(); + + // Skip empty lines + if line.is_empty() { + i += 1; + continue; + } + + // Check if this is a statement line + if line.starts_with("––– ") && line.ends_with(" –––") { + let (statement, arg) = parse_statement(line)?; + let step = match statement { + Statement::Input => { + // Collect input content until next statement + let (content, next_idx) = collect_content(&lines, i + 1)?; + i = next_idx; + TestStep { + step_type: "input".to_string(), + args: vec![], + content: Some(content), + steps: None, + } + } + Statement::Output => { + // Collect output content until next statement + let (content, next_idx) = collect_content(&lines, i + 1)?; + i = next_idx; + let args = if let Some(checker) = arg { + vec![checker] + } else { + vec![] + }; + TestStep { + step_type: "output".to_string(), + args, + content: Some(content), + steps: None, + } + } + Statement::Comment => { + // Collect comment content until next statement + let (content, next_idx) = collect_content(&lines, i + 1)?; + i = next_idx; + TestStep { + step_type: "comment".to_string(), + args: vec![], + content: Some(content), + steps: None, + } + } + Statement::Block => { + let block_path = + arg.ok_or_else(|| anyhow::anyhow!("Block statement missing path argument"))?; + + // Resolve block file using file map instead of file system + let nested_steps = resolve_block_with_file_map(&block_path, file_map)?; + i += 1; // Move past the block statement line + + TestStep { + step_type: "block".to_string(), + args: vec![block_path], + content: None, + steps: Some(nested_steps), + } + } + Statement::Duration => { + // Skip duration statements (they're auto-generated) + i += 1; + continue; + } + }; + + steps.push(step); + } else { + return Err(anyhow::anyhow!("Unexpected line: {}", line)); + } + } + + Ok(TestStructure { + description, + steps, + }) +} + +/// Resolve a block reference using file map instead of file system +fn resolve_block_with_file_map(block_path: &str, file_map: &HashMap) -> Result> { + let block_file_key = format!("{}.recb", block_path); + + if let Some(block_content) = file_map.get(&block_file_key) { + let block_structure = parse_rec_content_with_file_map(block_content, file_map)?; + Ok(block_structure.steps) + } else { + Err(anyhow::anyhow!("Block file not found in file map: {}", block_file_key)) + } +} + +/// WASM-compatible function that returns file content map for writing +pub fn write_test_file_to_map( + test_file_path: &str, + test_structure: &TestStructure +) -> Result> { + // Use the existing convert_structure_to_rec function + let content = convert_structure_to_rec(test_structure)?; + let mut file_map = HashMap::new(); + file_map.insert(test_file_path.to_string(), content); + Ok(file_map) +} + +/// WASM-compatible function to validate a test using file content map +/// This avoids file system operations that are not supported in WASM +/// Input: rec_file_path (key in file_map), file_map containing all files (.rec, .rep, .recb, patterns) +pub fn validate_test_from_map( + rec_file_path: &str, + file_map: &HashMap +) -> Result { + // Get REC file content from map + let rec_content = file_map.get(rec_file_path) + .ok_or_else(|| anyhow::anyhow!("REC file not found in file map: {}", rec_file_path))?; + + // Derive REP file path by replacing .rec with .rep + let rep_file_path = rec_file_path.replace(".rec", ".rep"); + + // Get REP file content from map + let rep_content = file_map.get(&rep_file_path) + .ok_or_else(|| anyhow::anyhow!("REP file not found in file map: {}", rep_file_path))?; + + // Parse REC file into structured format using file map for block resolution + let test_structure = match parse_rec_content_with_file_map(rec_content, file_map) { + Ok(structure) => structure, + Err(e) => { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "rec_file_parsing".to_string(), + expected: "Valid .rec file format".to_string(), + actual: format!("Failed to parse .rec file: {}", e), + step: 0, + }], + summary: "Failed to parse test file".to_string(), + }); + } + }; + + // Extract all expected outputs from structured REC (handles blocks, nesting, etc.) + let expected_outputs = extract_all_outputs_from_structured(&test_structure); + + // Extract all actual outputs from flat REP file + let actual_outputs = match extract_all_outputs_from_rep(rep_content) { + Ok(outputs) => outputs, + Err(e) => { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "rep_file_parsing".to_string(), + expected: "Valid .rep file format".to_string(), + actual: format!("Failed to parse .rep file: {}", e), + step: 0, + }], + summary: "Failed to parse test result file".to_string(), + }); + } + }; + + // For WASM compatibility, we can't use file system to find pattern files + // Instead, we'll check if a pattern file exists in the file map + let pattern_file = find_pattern_file_from_map(rec_file_path, file_map); + + // Compare output sequences using pattern matching logic + let mut errors = Vec::new(); + match compare_output_sequences(&expected_outputs, &actual_outputs, pattern_file) { + Ok(comparison_errors) => { + errors.extend(comparison_errors); + } + Err(e) => { + errors.push(TestError { + command: "output_comparison".to_string(), + expected: "Successful output comparison".to_string(), + actual: format!("Output comparison failed: {}", e), + step: 0, + }); + } + } + + let success = errors.is_empty(); + let summary = if success { + "All outputs match expected results".to_string() + } else { + format!("{} validation error(s) found", errors.len()) + }; + + Ok(ValidationResult { + success, + errors, + summary, + }) +} + +/// WASM-compatible function to validate a test using file content map with optional patterns +/// This version accepts patterns directly instead of trying to discover them from file map +pub fn validate_test_from_map_with_patterns( + rec_file_path: &str, + file_map: &HashMap, + patterns: Option> +) -> Result { + // Get REC file content from map + let rec_content = file_map.get(rec_file_path) + .ok_or_else(|| anyhow::anyhow!("REC file not found in file map: {}", rec_file_path))?; + + // Derive REP file path by replacing .rec with .rep + let rep_file_path = rec_file_path.replace(".rec", ".rep"); + + // Get REP file content from map + let rep_content = file_map.get(&rep_file_path) + .ok_or_else(|| anyhow::anyhow!("REP file not found in file map: {}", rep_file_path))?; + + // Parse REC file into structured format using file map for block resolution + let test_structure = match parse_rec_content_with_file_map(rec_content, file_map) { + Ok(structure) => structure, + Err(e) => { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "rec_file_parsing".to_string(), + expected: "Valid .rec file format".to_string(), + actual: format!("Failed to parse .rec file: {}", e), + step: 0, + }], + summary: "Failed to parse test file".to_string(), + }); + } + }; + + // Extract all expected outputs from structured REC (handles blocks, nesting, etc.) + let expected_outputs = extract_all_outputs_from_structured(&test_structure); + + // Extract all actual outputs from flat REP file + let actual_outputs = match extract_all_outputs_from_rep(rep_content) { + Ok(outputs) => outputs, + Err(e) => { + return Ok(ValidationResult { + success: false, + errors: vec![TestError { + command: "rep_file_parsing".to_string(), + expected: "Valid .rep file format".to_string(), + actual: format!("Failed to parse .rep file: {}", e), + step: 0, + }], + summary: "Failed to parse test result file".to_string(), + }); + } + }; + + // Use provided patterns or fall back to file map discovery + let pattern_file_path = if let Some(patterns_map) = patterns { + // Write to a temporary location that compare_output_sequences can read + // Actually, let's not use files - let's modify the approach + eprintln!("πŸ”₯ USING PROVIDED PATTERNS: {} patterns", patterns_map.len()); + + // Use the working comparison logic directly with our patterns + let mut errors = Vec::new(); + for (exp, act) in expected_outputs.iter().zip(actual_outputs.iter()) { + if has_diff_simple(&exp.expected_content, &act.actual_content, &patterns_map) { + errors.push(TestError { + command: exp.command.clone(), + expected: exp.expected_content.clone(), + actual: act.actual_content.clone(), + step: exp.command_index, + }); + } + } + + // Check for count mismatch + if expected_outputs.len() != actual_outputs.len() { + errors.push(TestError { + command: "output_count_mismatch".to_string(), + expected: format!("{} outputs expected", expected_outputs.len()), + actual: format!("{} outputs found", actual_outputs.len()), + step: 0, + }); + } + + let success = errors.is_empty(); + let summary = if success { + "All outputs match expected results".to_string() + } else { + format!("{} validation error(s) found", errors.len()) + }; + + return Ok(ValidationResult { + success, + errors, + summary, + }); + } else { + // Fallback: try to find patterns in file map (existing behavior) + let pattern_file = find_pattern_file_from_map(rec_file_path, file_map); + pattern_file + }; + + // Use the WORKING compare_output_sequences function + let mut errors = Vec::new(); + match compare_output_sequences(&expected_outputs, &actual_outputs, pattern_file_path) { + Ok(comparison_errors) => { + errors.extend(comparison_errors); + } + Err(e) => { + errors.push(TestError { + command: "output_comparison".to_string(), + expected: "Successful output comparison".to_string(), + actual: format!("Output comparison failed: {}", e), + step: 0, + }); + } + } + + let success = errors.is_empty(); + let summary = if success { + "All outputs match expected results".to_string() + } else { + format!("{} validation error(s) found", errors.len()) + }; + + Ok(ValidationResult { + success, + errors, + summary, + }) +} + +/// Helper function to find pattern file from file map instead of filesystem +fn find_pattern_file_from_map(rec_file_path: &str, file_map: &HashMap) -> Option { + // Try to find pattern file in the same directory as the rec file + let rec_path = std::path::Path::new(rec_file_path); + let dir = rec_path.parent()?.to_str()?; + + // Look for patterns file in the same directory + let patterns_path = if dir.is_empty() { + "patterns".to_string() + } else { + format!("{}/patterns", dir) + }; + + file_map.get(&patterns_path).cloned() +} diff --git a/rec/Cargo.lock b/rec/Cargo.lock index e95a691..154b059 100644 --- a/rec/Cargo.lock +++ b/rec/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -330,6 +330,8 @@ version = "0.1.0" dependencies = [ "anyhow", "regex", + "serde", + "serde_json", ] [[package]] @@ -509,12 +511,49 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "serde" +version = "1.0.179" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.179" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "serde_json" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0652c533506ad7a2e353cce269330d6afd8bdfb6d75e0ace5b35aacbd7b9e9" +dependencies = [ + "itoa", + "ryu", + "serde", +] + [[package]] name = "signal-hook-registry" version = "1.4.1" diff --git a/rec/src/main.rs b/rec/src/main.rs index 19f44f4..8e12f04 100644 --- a/rec/src/main.rs +++ b/rec/src/main.rs @@ -22,6 +22,67 @@ use tokio::time::Instant; use tokio::process::{Child, Command}; use std::process::Stdio; use std::sync::Arc; + +// Exit code constants +const EXIT_SUCCESS: i32 = 0; // Success (recording completed OR replay completed) +const EXIT_TEST_EXECUTION_FAILED: i32 = 1; // Test execution failed (replay mode crashes) +const EXIT_COMPILATION_ERROR: i32 = 2; // Input file compilation/parsing errors +const EXIT_SETUP_ERROR: i32 = 3; // Environment setup errors (shell, paths) +const EXIT_RECORDING_ERROR: i32 = 4; // Recording infrastructure errors +const EXIT_VALIDATION_ERROR: i32 = 5; // Validation errors (file not found, invalid paths) +// Signal exit codes already defined: 129, 130, 143 + +// Custom error types for better error handling and exit code management +#[derive(Debug)] +pub enum RecError { + // Test execution errors (replay mode) + TestExecutionFailed(String), + + // Compilation and parsing errors + CompilationError(String), + + // Setup and environment errors + SetupError(String), + + // Recording infrastructure errors (recording mode) + RecordingError(String), + + // Validation errors (file not found, invalid paths) + ValidationError(String), +} + +impl RecError { + pub fn exit_code(&self) -> i32 { + match self { + RecError::TestExecutionFailed(_) => EXIT_TEST_EXECUTION_FAILED, + RecError::CompilationError(_) => EXIT_COMPILATION_ERROR, + RecError::SetupError(_) => EXIT_SETUP_ERROR, + RecError::RecordingError(_) => EXIT_RECORDING_ERROR, + RecError::ValidationError(_) => EXIT_VALIDATION_ERROR, + } + } +} + +impl std::fmt::Display for RecError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RecError::TestExecutionFailed(msg) => write!(f, "Test execution failed: {}", msg), + RecError::CompilationError(msg) => write!(f, "Compilation error: {}", msg), + RecError::SetupError(msg) => write!(f, "Setup error: {}", msg), + RecError::RecordingError(msg) => write!(f, "Recording error: {}", msg), + RecError::ValidationError(msg) => write!(f, "Validation error: {}", msg), + } + } +} + +impl std::error::Error for RecError {} + +// Helper function for safe string conversion +fn safe_string_conversion(os_string: std::ffi::OsString, context: &str) -> Result { + os_string.into_string() + .map_err(|_| RecError::SetupError(format!("Failed to convert path to string: {}", context))) +} + #[derive(Debug, structopt::StructOpt)] #[structopt( name = "rec", @@ -78,6 +139,9 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { let start_time = Instant::now(); let Opt { input_file, output_file, delay } = opt; + // Determine mode early for proper error handling + let _is_replay_mode = input_file.is_some(); + let mut binding = Command::new("bash"); let process = binding .arg("--noprofile") @@ -90,7 +154,10 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { let mut child_stdin = child.stdin.take().expect("Failed to get stdin"); let child_stdout = child.stdout.take().expect("Failed to get stdout"); - child_stdin.write_all(INIT_CMD).await.unwrap(); + child_stdin.write_all(INIT_CMD).await + .map_err(|e| RecError::SetupError( + format!("Failed to initialize shell environment: {}", e) + ))?; let child_arc = Arc::new(Mutex::new(child)); @@ -104,10 +171,24 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { // If we have input file passed, we replay, otherwise – record // Replay the input_file and save results in output_file if let Some(input_file) = input_file { - let input_file = input_file.into_string().unwrap(); + let input_file = safe_string_conversion(input_file, "input file path")?; let input_content = match parser::compile(&input_file) { Ok(content) => content, - Err(e) => panic!("Failed to compile input file: {}", e), + Err(e) => { + // Check if this is a file not found error (validation) vs parsing error (compilation) + let error_msg = e.to_string(); + if error_msg.contains("No such file or directory") || + error_msg.contains("not found") || + error_msg.contains("does not exist") { + return Err(RecError::ValidationError( + format!("The record file does not exist: {}", input_file) + ).into()); + } else { + return Err(RecError::CompilationError( + format!("Failed to compile test file '{}': {}", input_file, e) + ).into()); + } + } }; // Split compiled file into lines to process it next @@ -148,8 +229,14 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { let mut stdout_reader = BufReader::new(child_stdout); for command in commands { let command_with_marker = format!("{}\necho '{}'\n", command, END_MARKER); - child_stdin.write_all(command_with_marker.as_bytes()).await.unwrap(); - child_stdin.flush().await.unwrap(); + child_stdin.write_all(command_with_marker.as_bytes()).await + .map_err(|e| RecError::TestExecutionFailed( + format!("Failed to execute test command: {}", e) + ))?; + child_stdin.flush().await + .map_err(|e| RecError::TestExecutionFailed( + format!("Failed to flush command to shell: {}", e) + ))?; let input_line = parser::get_statement_line(parser::Statement::Input, None); let output_line = parser::get_statement_line(parser::Statement::Output, None); @@ -167,9 +254,10 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { // Check for end marker let read_str = String::from_utf8_lossy(read_data); if read_str.contains(END_MARKER) { - let end_pos = read_str.find(END_MARKER).unwrap(); - output.push_str(&read_str[..end_pos]); - break; + if let Some(end_pos) = read_str.find(END_MARKER) { + output.push_str(&read_str[..end_pos]); + break; + } } // Append the raw bytes to output @@ -246,8 +334,14 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { // In the stdout handler let mut stdout = tokio::io::stdout(); - stdout.write_all(SHELL_PROMPT.as_bytes()).await.unwrap(); - stdout.flush().await.unwrap(); + stdout.write_all(SHELL_PROMPT.as_bytes()).await + .map_err(|e| RecError::RecordingError( + format!("Failed to write shell prompt during recording: {}", e) + ))?; + stdout.flush().await + .map_err(|e| RecError::RecordingError( + format!("Failed to flush stdout during recording: {}", e) + ))?; stdout_handle = Some(tokio::spawn(async move { let mut reader = BufReader::new(child_stdout); @@ -299,8 +393,14 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { // Update command start time for next command command_start = Instant::now(); - stdout.write_all(SHELL_PROMPT.as_bytes()).await.unwrap(); - stdout.flush().await.unwrap(); + if let Err(e) = stdout.write_all(SHELL_PROMPT.as_bytes()).await { + eprintln!("Failed to write shell prompt: {}", e); + break; + } + if let Err(e) = stdout.flush().await { + eprintln!("Failed to flush stdout: {}", e); + break; + } } else { // Write to stdout and store in buffer @@ -330,14 +430,14 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { child_guard.wait().await?; // Cancel the I/O handlers - if stdin_handle.is_some() { - stdin_handle.unwrap().abort(); + if let Some(handle) = stdin_handle { + handle.abort(); } - if stdout_handle.is_some() { - stdout_handle.unwrap().abort(); + if let Some(handle) = stdout_handle { + handle.abort(); } - if signal_handle.is_some() { - signal_handle.unwrap().abort(); + if let Some(handle) = signal_handle { + handle.abort(); } flush_output_file(output_file, start_time).await; @@ -348,13 +448,22 @@ async fn async_main(opt: Opt) -> anyhow::Result<()> { #[paw::main] fn main(opt: Opt) { - match async_main(opt) { - Ok(_) => (), - Err(e) => { - eprintln!("rec: {}", e); - std::process::exit(1); - } - }; + let exit_code = match async_main(opt) { + Ok(_) => EXIT_SUCCESS, + Err(e) => { + eprintln!("rec: {}", e); + + // Try to downcast to RecError to get specific exit code + if let Some(rec_error) = e.downcast_ref::() { + rec_error.exit_code() + } else { + // For any other anyhow::Error, use general error code + EXIT_TEST_EXECUTION_FAILED + } + } + }; + + std::process::exit(exit_code); } /// This function cleans up all empty lines and removes the last line containing "exit" to make the consistent output @@ -419,9 +528,31 @@ fn get_duration_line(duration: parser::Duration) -> String { /// Handle signals async fn handle_signals(child: &Arc>) { - let mut sigterm = signal(SignalKind::terminate()).unwrap(); - let mut sigint = signal(SignalKind::interrupt()).unwrap(); - let mut sighup = signal(SignalKind::hangup()).unwrap(); + let sigterm = match signal(SignalKind::terminate()) { + Ok(sig) => sig, + Err(e) => { + eprintln!("Warning: Failed to setup SIGTERM handler: {}", e); + return; + } + }; + let sigint = match signal(SignalKind::interrupt()) { + Ok(sig) => sig, + Err(e) => { + eprintln!("Warning: Failed to setup SIGINT handler: {}", e); + return; + } + }; + let sighup = match signal(SignalKind::hangup()) { + Ok(sig) => sig, + Err(e) => { + eprintln!("Warning: Failed to setup SIGHUP handler: {}", e); + return; + } + }; + + let mut sigterm = sigterm; + let mut sigint = sigint; + let mut sighup = sighup; tokio::select! { _ = sigterm.recv() => { @@ -446,7 +577,15 @@ async fn handle_signals(child: &Arc>) { } async fn flush_output_file(output_file: std::ffi::OsString, start_time: Instant) { - let file_path = output_file.into_string().unwrap(); + let file_path = match output_file.into_string() { + Ok(path) => path, + Err(_) => { + eprintln!("Warning: Failed to convert output file path to string"); + return; + } + }; let total_duration = Instant::now() - start_time; - cleanup_file(file_path, total_duration.as_millis()).await.unwrap(); + if let Err(e) = cleanup_file(file_path, total_duration.as_millis()).await { + eprintln!("Warning: Failed to cleanup output file: {}", e); + } } diff --git a/src/record.sh b/src/record.sh index 694c6c4..4e2a72c 100644 --- a/src/record.sh +++ b/src/record.sh @@ -15,8 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e - +# ! We are source "$PROJECT_DIR/lib/rec.sh" source "$PROJECT_DIR/lib/argument.sh" @@ -57,8 +56,13 @@ while [[ $# -gt 0 ]]; do done record "$docker_image" "$record_file" +record_exit_code=$? -# Check if we have refine -if [ $refine -eq 1 ]; then +# Only proceed to refine if record was successful +if [ $record_exit_code -eq 0 ] && [ $refine -eq 1 ]; then refine "$docker_image" "$record_file" + refine_exit_code=$? + exit $refine_exit_code +else + exit $record_exit_code fi diff --git a/src/refine.sh b/src/refine.sh index 31386a9..a1a2415 100644 --- a/src/refine.sh +++ b/src/refine.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e +# ! We are handling exit codes so we cannot use set -e here source "$PROJECT_DIR/lib/rec.sh" source "$PROJECT_DIR/lib/argument.sh" @@ -42,4 +42,5 @@ while [[ $# -gt 0 ]]; do esac done -refine "$docker_image" "$record_file" \ No newline at end of file +refine "$docker_image" "$record_file" +exit $? diff --git a/src/test.sh b/src/test.sh index 2ccd8de..ec732c5 100644 --- a/src/test.sh +++ b/src/test.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e +# ! We are handling exit codes so we cannot use set -e here source "$PROJECT_DIR/lib/rec.sh" source "$PROJECT_DIR/lib/argument.sh" @@ -56,4 +56,5 @@ while [[ $# -gt 0 ]]; do done test "$docker_image" "$record_file" "$show_diff" "$delay" +exit $? diff --git a/tests/.gitkeep b/tests/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/ui/.clt/patterns b/ui/.clt/patterns new file mode 100644 index 0000000..ca6665b --- /dev/null +++ b/ui/.clt/patterns @@ -0,0 +1,10 @@ +COMMITDATE [a-z0-9]{7}@[0-9]{6} +DATE [0-9]{4}\-[0-9]{2}\-[0-9]{2} +DATETIME [0-9]{4}\-[0-9]{2}\-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2} +IPADDR [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ +NUMBER [0-9]+ +PATH [A-Za-z0-9\/\.\-\_]+ +SEMVER [0-9]+\.[0-9]+\.[0-9]+ +TIME [0-9]{2}:[0-9]{2}:[0-9]{2} +YEAR [0-9]{4} +FILEPERMS ([d|-])([r|-][w|-][x|-])([r|-][w|-][x|-])([r|-][w|-][x|-])\. \ No newline at end of file diff --git a/ui/.env.example b/ui/.env.example new file mode 100644 index 0000000..13d919d --- /dev/null +++ b/ui/.env.example @@ -0,0 +1,36 @@ +# GitHub OAuth Configuration +GITHUB_CLIENT_ID=your_github_client_id +GITHUB_CLIENT_SECRET=your_github_client_secret +GITHUB_CALLBACK_URL=http://localhost:3000/auth/github/callback + +# Authentication Options +# Comma-separated list of GitHub usernames that are allowed to access the UI +ALLOWED_GITHUB_USERS=username1,username2,username3 + +# Set to 'true' to skip authentication (for development only) +# SKIP_AUTH=true + +# Session configuration +SESSION_SECRET=change_this_to_a_random_string + +VITE_API_URL=http://localhost:9150 +FRONTEND_PORT=9151 +BACKEND_PORT=9150 + +# Repository Configuration +REPO_URL=https://github.com/your-org/your-repo.git + +# Interactive Command Configuration +# Command for Ask AI feature - configured via environment only +# This command will receive user input via stdin +# You can proxy here some CLI tool that will be able to run +# an interactive session in the background and report to stdout +ASK_AI_COMMAND=docker run --rm -i ubuntu:latest bash -c "echo \"Input received:\"; cat; echo \"\nSleeping for 2 seconds...\"; sleep 2; echo \"Done!\"" + +# Timeout for Ask AI commands in milliseconds (default: 30 seconds) +ASK_AI_TIMEOUT=30000 + +# Directory for persistent session logs (optional) +# If not set, sessions will only be stored in memory and localStorage +# When set, all session logs will be saved to individual files +# ASK_AI_LOG=/path/to/logs/directory diff --git a/ui/.gitignore b/ui/.gitignore new file mode 100644 index 0000000..bf66950 --- /dev/null +++ b/ui/.gitignore @@ -0,0 +1,27 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? +tests/** +.env +workdir/** diff --git a/ui/.vscode/extensions.json b/ui/.vscode/extensions.json new file mode 100644 index 0000000..bdef820 --- /dev/null +++ b/ui/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["svelte.svelte-vscode"] +} diff --git a/ui/README.md b/ui/README.md new file mode 100644 index 0000000..c3a1081 --- /dev/null +++ b/ui/README.md @@ -0,0 +1,287 @@ +# CLT UI - Command Line Tool Test Interface + +A comprehensive Svelte-based web application for managing and testing Command Line Tool (CLT) test files with advanced Git integration and Docker support. + +## πŸ—οΈ Architecture Overview + +### Technology Stack +- **Frontend**: Svelte 5.20.2 with TypeScript 5.7.2 +- **Build Tool**: Vite 6.2.0 with hot-reload development +- **Styling**: TailwindCSS 4.1.3 with PostCSS processing +- **Backend**: Express.js 4.18.3 with Node.js ES modules +- **Authentication**: Passport.js with GitHub OAuth2 strategy +- **Git Operations**: simple-git 3.27.0 for repository management +- **File Management**: Native Node.js fs/promises with security validation +- **WASM Module**: Custom Rust-based pattern matching engine (wasm_diff) +- **Session Management**: express-session 1.18.1 with secure cookies + +## πŸ“ Project Structure + +``` +ui/ +β”œβ”€β”€ src/ # Frontend source code +β”‚ β”œβ”€β”€ components/ # Svelte components +β”‚ β”‚ β”œβ”€β”€ Header.svelte # Navigation, Docker settings, git status +β”‚ β”‚ β”œβ”€β”€ FileExplorer.svelte # File tree with drag-and-drop +β”‚ β”‚ β”œβ”€β”€ Editor.svelte # Main .rec file editor with WASM +β”‚ β”‚ └── PullRequestModal.svelte # GitHub PR creation +β”‚ β”œβ”€β”€ stores/ # Svelte state management +β”‚ β”‚ β”œβ”€β”€ filesStore.ts # File operations & test execution +β”‚ β”‚ β”œβ”€β”€ authStore.ts # GitHub authentication state +β”‚ β”‚ β”œβ”€β”€ branchStore.ts # Git branch operations +β”‚ β”‚ └── githubStore.ts # Pull request management +β”‚ β”œβ”€β”€ App.svelte # Root component with auth flow +β”‚ β”œβ”€β”€ main.ts # Application entry point +β”‚ └── config.js # API configuration +β”œβ”€β”€ pkg/ # WASM module (Rust-compiled) +β”‚ β”œβ”€β”€ wasm_diff.js # WASM JavaScript bindings +β”‚ β”œβ”€β”€ wasm_diff_bg.wasm # Compiled WASM binary +β”‚ └── *.d.ts # TypeScript definitions +β”œβ”€β”€ public/ # Static assets +β”‚ └── auth/login.html # Login page +β”œβ”€β”€ config/ # Server configuration +β”‚ └── auth.js # Authentication settings +β”œβ”€β”€ server.js # Express backend server +β”œβ”€β”€ auth.js # Passport.js authentication +β”œβ”€β”€ dev.js # Development server runner +└── package.json # Dependencies and scripts +``` + +## πŸš€ Quick Start + +### Prerequisites + +- Node.js (v18 or higher recommended) +- npm or yarn +- GitHub OAuth application (for authentication) +- Docker (for test execution) +- Git CLI tools + +### Installation + +```bash +# Navigate to the ui directory +cd ui + +# Install dependencies +npm install + +# Create .env file from the example +cp .env.example .env + +# Edit the .env file with your GitHub OAuth credentials and allowed usernames + +# Start development servers (frontend + backend) +npm run dev +``` + +The application will be available at http://localhost:5173/ (or another port if 5173 is in use). + +## WebAssembly Pattern Matching + +The UI uses a WebAssembly module (`wasm-diff`) that provides real-time comparison between expected and actual output with support for pattern matching, mirroring the functionality of the CLI version. + +### How Pattern Matching Works + +1. The UI reads patterns from `.clt/patterns` file in the UI directory or falls back to the project's `.clt/patterns` file +2. Patterns are loaded into the wasm-diff module which uses them for regex-based variable matching +3. When you type in the expected output, the UI automatically compares it with the actual output in real-time +4. Differences are highlighted with intelligent pattern recognition for variables like timestamps, IP addresses, etc. + +### Pattern File Format + +The pattern file follows the same format as the CLI version: + +``` +PATTERN_NAME REGEX_PATTERN +``` + +For example: +``` +SEMVER [0-9]+\.[0-9]+\.[0-9]+ +YEAR [0-9]{4} +IPADDR [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ +``` + +In your test files, you can use these patterns with the syntax `%{PATTERN_NAME}` which will be replaced with the corresponding regex pattern during comparison. + +## Authentication Configuration + +The application uses GitHub OAuth for authentication. You need to configure the following: + +1. Create a GitHub OAuth application at https://github.com/settings/developers +2. Set the callback URL to `http://localhost:3000/auth/github/callback` (or your custom domain) +3. Configure the `.env` file with your OAuth credentials: + +``` +GITHUB_CLIENT_ID=your_client_id +GITHUB_CLIENT_SECRET=your_client_secret +ALLOWED_GITHUB_USERS=username1,username2,username3 +``` + +### Server Configuration + +You can configure the server to listen on different ports and hosts: + +``` +HOST=localhost # Set to '0.0.0.0' to listen on all interfaces +FRONTEND_PORT=5173 # Default frontend port (Vite) +BACKEND_PORT=3000 # Default backend port (Express) +``` + +### Development Mode + +For development without authentication, you can set: + +``` +SKIP_AUTH=true +``` + +This will bypass the authentication check and allow you to access the application without logging in. + +## Usage + +1. Log in with your GitHub account (if authentication is enabled) +2. The file explorer on the left shows available .rec files +3. Click on a file to open it in the editor +4. Add commands and expected outputs +5. Configure the Docker image at the top for validation +6. Save files as needed +7. Run tests to see real-time diff comparison with pattern recognition + +## Project Structure + +- `src/components/` - Svelte components +- `src/stores/` - Svelte stores for state management +- `src/lib/` - Utility functions +- `config/` - Application configuration +- `pkg/` - WebAssembly module for diff comparison (compiled from wasm-diff) +- `.clt/patterns` - Pattern definitions for variable matching in tests + +## Development + +### Building the wasm-diff Module + +The wasm-diff module is a WebAssembly component written in Rust that handles pattern-based diff comparison. To update it: + +```bash +cd wasm-diff +wasm-pack build --target web +cp -r pkg/* ../ui/pkg/ +``` + +### Building for Production + +```bash +# Build the UI +npm run build + +# Start the production server +node server.js +``` + +This will create a production-ready build in the `dist` directory and start the server. + +### Running Tests + +```bash +npm run test +``` + +## πŸ”§ Backend Architecture (server.js) + +### Core Features + +1. **User Repository Management** + - Per-user Git repository cloning + - Secure directory isolation + - Token-based authentication for Git operations + +2. **File Operations API** + ```javascript + GET /api/get-file-tree # Hierarchical file listing + GET /api/get-file # File content retrieval + POST /api/save-file # File content saving + POST /api/move-file # File/directory movement + DELETE /api/delete-file # File/directory deletion + ``` + +3. **Test Execution Engine** + ```javascript + POST /api/run-test # Execute CLT tests with Docker + ``` + - Docker container orchestration + - .rec/.recb file processing + - Output comparison and status reporting + - Duration tracking and performance metrics + +4. **Git Integration** + ```javascript + GET /api/git-status # Repository status + GET /api/current-branch # Branch information + POST /api/reset-to-branch # Branch reset operations + POST /api/create-pr # Pull request creation + ``` + +5. **Authentication System** + ```javascript + GET /auth/github # GitHub OAuth initiation + GET /auth/github/callback # OAuth callback handler + GET /api/current-user # User session validation + GET /logout # Session termination + ``` + +### Security Model + +1. **Path Validation**: All file operations validate paths within user directories +2. **Authentication**: GitHub OAuth with configurable user allowlist +3. **Session Security**: Secure cookie configuration with SameSite protection +4. **CORS Configuration**: Development-friendly CORS with production security + +## πŸ“Š Performance Optimizations + +### Frontend Optimizations +- **Debounced Auto-save**: Configurable delay to prevent excessive API calls +- **Optimistic Updates**: Immediate UI feedback for file operations +- **Batch Operations**: Efficient multi-file operations +- **WASM Acceleration**: High-performance pattern matching + +### Backend Optimizations +- **Streaming File Operations**: Efficient handling of large files +- **Git Operation Caching**: Reduced redundant Git operations +- **Process Isolation**: Secure and efficient Docker container management + +## πŸ” Technical Details + +### Real-time Comparison + +The UI performs real-time comparison between expected and actual output as you type. This works by: + +1. Loading patterns from the patterns file into the WebAssembly module +2. Converting variable patterns like `%{IPADDR}` to their regex equivalents +3. Applying regex-based pattern matching to normalize variables +4. Generating a diff that highlights only meaningful differences + +The comparison is intelligent enough to ignore differences in variable values that match defined patterns, making test development much faster and less error-prone. + +## 🚦 Deployment Considerations + +### Production Build +- **Asset Optimization**: Minified and compressed static assets +- **Code Splitting**: Optimized bundle loading +- **Security Headers**: Production-ready security configuration + +### Environment Requirements +- **Node.js**: ES modules support required +- **Docker**: Container runtime for test execution +- **Git**: Repository operations and CLI tools +- **GitHub CLI**: Required for pull request creation + +## ⚠️ Limitations + +- Performance may be affected when dealing with very large output files +- Authentication is currently limited to GitHub OAuth +- Wasm-diff module must be compiled and placed in the pkg directory +- GitHub CLI must be installed for pull request creation functionality + +This architecture provides a robust, scalable foundation for CLT test management with modern web technologies and comprehensive Git integration. \ No newline at end of file diff --git a/ui/auth.js b/ui/auth.js new file mode 100644 index 0000000..a9ea127 --- /dev/null +++ b/ui/auth.js @@ -0,0 +1,218 @@ +import passport from 'passport'; +import { Strategy as GitHubStrategy } from 'passport-github2'; +import { getAuthConfig } from './config/auth.js'; + +// Configure Passport with GitHub strategy +export function setupPassport() { + // Get fresh auth config after environment variables are loaded + const authConfig = getAuthConfig(); + + console.log('Setting up Passport with GitHub strategy'); + console.log('Env config', process.env); + console.log('Auth config:', { + clientID: authConfig.github.clientID ? 'Configured' : 'Not configured', + callbackURL: authConfig.github.callbackURL, + skipAuth: authConfig.skipAuth + }); + + // Serialize user to the session + passport.serializeUser((user, done) => { + console.log('Serializing user:', user.username); + done(null, user); + }); + + // Deserialize user from the session + passport.deserializeUser((user, done) => { + console.log('Deserializing user:', user?.username || 'unknown'); + done(null, user); + }); + + // Create a custom GitHub strategy that doesn't require email scope + const githubStrategy = new GitHubStrategy( + authConfig.github, + (accessToken, refreshToken, profile, done) => { + // Debug logging + console.log('GitHub OAuth callback executed'); + console.log('Profile:', profile.username); + console.log('Allowed users:', authConfig.allowedUsers); + + // Check if the user is in the allowed list + const username = profile.username; + if ( + authConfig.allowedUsers.length === 0 || + authConfig.allowedUsers.includes(username) + ) { + // Store just the necessary user info + const user = { + id: profile.id, + username: profile.username, + displayName: profile.displayName || profile.username, + avatarUrl: profile.photos?.[0]?.value || '', + token: accessToken, + }; + console.log('User authenticated successfully:', username); + return done(null, user); + } else { + // User not in the allowed list + console.log('User not in allowed list:', username); + return done(null, false, { + message: 'You are not authorized to access this application.', + }); + } + } + ); + + // Override the strategy's userProfile method to skip the email fetch if scope doesn't include it + // This patching prevents the 'Failed to fetch user emails' error + const originalUserProfile = githubStrategy._userProfile; + githubStrategy._userProfile = function(accessToken, done) { + originalUserProfile.call(this, accessToken, (err, profile) => { + if (err) { return done(err); } + // Skip the email fetch by providing a complete profile + return done(null, profile); + }); + }; + + passport.use(githubStrategy); + + return passport; +} + +// Middleware to check if the user is authenticated +export function isAuthenticated(req, res, next) { + // Get fresh auth config + const authConfig = getAuthConfig(); + + // Debug logging + console.log(`[Auth Check] Path: ${req.path}`); + console.log(`[Auth Check] Session ID: ${req.sessionID}`); + console.log(`[Auth Check] Authenticated: ${req.isAuthenticated()}`); + console.log(`[Auth Check] Skip Auth: ${authConfig.skipAuth}`); + + // Skip authentication if SKIP_AUTH is true + if (authConfig.skipAuth) { + console.log('[Auth Check] Skipping auth check - SKIP_AUTH enabled'); + return next(); + } + + // Check if the user is authenticated + if (req.isAuthenticated()) { + console.log('[Auth Check] User is authenticated, proceeding'); + return next(); + } + + // Handle API requests differently from page requests + if (req.path.startsWith('/api/')) { + console.log('[Auth Check] API request but not authenticated, returning 401'); + return res.status(401).json({ error: 'Unauthorized', message: 'You must be logged in to access this resource' }); + } + + // For server-rendered pages (not SPA routes handled by client) + // we'll redirect to login + if (req.path === '/login' || req.path.startsWith('/auth/') || req.path.startsWith('/public/')) { + console.log('[Auth Check] Public path, allowing access'); + return next(); + } + + // For SPA routes, we'll just serve the index.html and let the client handle auth + // The client-side code will show the login button when not authenticated + console.log('[Auth Check] Non-API request, serving index.html and letting client handle auth'); + next(); +} + +// Function to add auth routes to express app +export function addAuthRoutes(app) { + // Get fresh auth config + const authConfig = getAuthConfig(); + + // GitHub authentication routes + app.get('/auth/github', (req, res, next) => { + console.log('GitHub auth route accessed'); + passport.authenticate('github', { scope: authConfig.github.scope })(req, res, next); + }); + + app.get( + '/auth/github/callback', + (req, res, next) => { + console.log('GitHub callback received', req.query); + passport.authenticate('github', { + // Redirect to the frontend URL after successful login + successRedirect: process.env.GITHUB_SUCCESS_URL || `http://${process.env.HOST || 'localhost'}:${process.env.FRONTEND_PORT || 5173}`, + // Redirect to the frontend login page on failure + failureRedirect: (process.env.GITHUB_FAILURE_URL || `http://${process.env.HOST || 'localhost'}:${process.env.FRONTEND_PORT || 5173}`) + + '?error=Authentication%20failed.%20You%20might%20not%20be%20authorized%20to%20access%20this%20application.', + })(req, res, next); + } + ); + + // Login page + app.get('/login', (req, res) => { + // Log to debug + console.log("Serving login page"); + res.sendFile('auth/login.html', { root: './public' }); + }); + + // Logout route + app.get('/logout', (req, res, next) => { + // Get the frontend URL for redirect + const frontendUrl = process.env.FRONTEND_URL || `http://${process.env.HOST || 'localhost'}:${process.env.FRONTEND_PORT || 5173}`; + + // Destroy the session completely + req.session.destroy((err) => { + if (err) { + console.error('Session destroy error:', err); + return next(err); + } + + // Clear the authentication cookies + res.clearCookie('connect.sid'); + + // Respond with a success status for AJAX calls + res.status(200).json({ success: true, message: 'Logged out successfully' }); + }); + }); + + // Route to get current user info (for client-side auth state) + app.get('/api/current-user', (req, res) => { + if (authConfig.skipAuth) { + console.log('Auth skipped, returning dev-mode user'); + return res.json({ + isAuthenticated: true, + skipAuth: true, + user: { username: 'dev-mode' } + }); + } + + // For debugging + console.log('Session ID:', req.sessionID); + console.log('Session:', req.session); + console.log('Session Cookie:', req.headers.cookie); + console.log('Authenticated:', req.isAuthenticated()); + console.log('User:', req.user); + + if (req.isAuthenticated() && req.user) { + console.log('User is authenticated, returning user info'); + return res.json({ + isAuthenticated: true, + user: req.user + }); + } + + console.log('User not authenticated'); + return res.status(401).json({ + isAuthenticated: false, + message: 'Authentication required. Please log in again.' + }); + }); + + // Debug route + app.get('/api/auth-debug', (req, res) => { + return res.json({ + session: req.session, + isAuthenticated: req.isAuthenticated(), + user: req.user, + skipAuth: authConfig.skipAuth, + allowedUsers: authConfig.allowedUsers + }); + }); +} diff --git a/ui/config/auth.js b/ui/config/auth.js new file mode 100644 index 0000000..223b7ad --- /dev/null +++ b/ui/config/auth.js @@ -0,0 +1,19 @@ +// Authentication configuration +export function getAuthConfig() { + return { + // GitHub OAuth configuration + github: { + clientID: process.env.GITHUB_CLIENT_ID || '', + clientSecret: process.env.GITHUB_CLIENT_SECRET || '', + callbackURL: process.env.GITHUB_CALLBACK_URL || `http://${process.env.HOST || 'localhost'}:${process.env.BACKEND_PORT || 3000}/auth/github/callback`, + scope: ['repo'], // Remove email scope, we'll just use the basic profile info + }, + // List of allowed GitHub usernames + allowedUsers: (process.env.ALLOWED_GITHUB_USERS || '').split(',').filter(Boolean), + // Skip authentication if this is set to 'true' + skipAuth: process.env.SKIP_AUTH === 'true', + }; +} + +// For backward compatibility, also export default +export default getAuthConfig(); diff --git a/ui/dev.js b/ui/dev.js new file mode 100644 index 0000000..e19569b --- /dev/null +++ b/ui/dev.js @@ -0,0 +1,108 @@ +// dev.js - Development server runner +import { spawn } from 'child_process'; +import * as path from 'path'; +import { fileURLToPath } from 'url'; +import { config } from 'dotenv'; + +// Load environment variables from .env file +config(); + +// Define port and host constants +const FRONTEND_PORT = process.env.FRONTEND_PORT || 5173; +const BACKEND_PORT = process.env.BACKEND_PORT || 3000; +const HOST = process.env.HOST || 'localhost'; + +// Important: Set process.env.BACKEND_PORT and FRONTEND_PORT for child processes +process.env.BACKEND_PORT = BACKEND_PORT; +process.env.FRONTEND_PORT = FRONTEND_PORT; +process.env.HOST = HOST; + +// Log important environment variables for debugging (without secrets) +console.log('Environment Configuration:'); +console.log('- HOST:', HOST); +console.log('- FRONTEND_PORT:', FRONTEND_PORT); +console.log('- BACKEND_PORT:', BACKEND_PORT); +console.log('- GITHUB_CALLBACK_URL:', process.env.GITHUB_CALLBACK_URL); +console.log('- SKIP_AUTH:', process.env.SKIP_AUTH); +console.log('- ALLOWED_GITHUB_USERS:', process.env.ALLOWED_GITHUB_USERS ? 'Configured' : 'Not configured'); + +// Override callback URL to use the specified host/port if not set or using dev2.manticoresearch.com +if (!process.env.GITHUB_CALLBACK_URL) { + console.error(`⚠️ Warning: GITHUB_CALLBACK_URL not set.`); +} + +// Set frontend URL if not already set +if (!process.env.FRONTEND_URL) { + console.log(`⚠️ Setting frontend URL to ${HOST}:${FRONTEND_PORT}`); + process.env.FRONTEND_URL = `http://${HOST}:${FRONTEND_PORT}`; +} + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Colors for console output +const colors = { + reset: '\x1b[0m', + bright: '\x1b[1m', + red: '\x1b[31m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + magenta: '\x1b[35m', + cyan: '\x1b[36m' +}; + +// Run a command and prefix its output +function runCommand(command, args, name, color) { + const prefix = `${color}[${name}]${colors.reset} `; + + console.log(`${colors.bright}${color}Starting ${name}...${colors.reset}`); + + const proc = spawn(command, args, { + stdio: ['inherit', 'pipe', 'pipe'], + env: process.env // Pass environment variables to the child process + }); + + proc.stdout.on('data', (data) => { + const lines = data.toString().trim().split('\n'); + lines.forEach(line => { + if (line.trim()) console.log(prefix + line); + }); + }); + + proc.stderr.on('data', (data) => { + const lines = data.toString().trim().split('\n'); + lines.forEach(line => { + if (line.trim()) console.error(`${prefix}${colors.red}${line}${colors.reset}`); + }); + }); + + proc.on('close', (code) => { + if (code !== 0) { + console.error(`${prefix}${colors.red}Process exited with code ${code}${colors.reset}`); + } else { + console.log(`${prefix}${colors.green}Process completed successfully${colors.reset}`); + } + }); + + return proc; +} + +// Run Vite development server with the specified port and host +const viteServer = runCommand('npx', ['vite', '--port', FRONTEND_PORT, '--host', HOST], 'Frontend', colors.cyan); + +// Run Express API server +const apiServer = runCommand('node', ['--experimental-modules', 'server.js'], 'Backend', colors.magenta); + +// Handle process termination +process.on('SIGINT', () => { + console.log(`\n${colors.yellow}Shutting down servers...${colors.reset}`); + viteServer.kill(); + apiServer.kill(); + process.exit(0); +}); + +console.log(`\n${colors.bright}${colors.green}Development servers started:${colors.reset}`); +console.log(`${colors.cyan}Frontend: ${colors.reset}http://${HOST}:${FRONTEND_PORT}`); +console.log(`${colors.magenta}Backend API: ${colors.reset}http://${HOST}:${BACKEND_PORT}/api`); +console.log(`\n${colors.bright}${colors.yellow}Press Ctrl+C to stop${colors.reset}\n`); diff --git a/ui/doc/API.md b/ui/doc/API.md new file mode 100644 index 0000000..8475fa4 --- /dev/null +++ b/ui/doc/API.md @@ -0,0 +1,235 @@ +# CLT UI Backend API Reference + +## Core Endpoints + +### File Management + +#### GET `/api/get-file-tree` +**Purpose**: Get file tree for file explorer +**Auth**: Required +**Response**: Recursive file tree with .rec/.recb files only +```json +{ + "fileTree": [ + { + "name": "test.rec", + "path": "test.rec", + "isDirectory": false, + "isSymlink": false + } + ] +} +``` + +#### GET `/api/get-file?path=` +**Purpose**: Get file content for editing +**Auth**: Required +**Response**: File content as text +```json +{ + "content": "––– input –––\necho hello\n––– output –––\nhello" +} +``` + +#### POST `/api/save-file` +**Purpose**: Save file changes +**Auth**: Required +**Body**: `{ "path": "test.rec", "content": "file content" }` + +### Test Execution + +#### POST `/api/run-test` +**Purpose**: Execute CLT test with optional Docker image +**Auth**: Required +**Body**: `{ "filePath": "test.rec", "dockerImage": "ubuntu:latest" }` +**Response**: Test results with command statuses and outputs + +### Ask AI Interactive Sessions + +#### POST `/api/interactive/start` +**Purpose**: Start new interactive command session +**Auth**: Required +**Body**: `{ "input": "user command" }` +**Limitation**: One session per user +**Response**: `{ "sessionId": "unique-id", "status": "started" }` + +#### GET `/api/interactive/status/:sessionId` +**Purpose**: Poll session status and get live logs +**Auth**: Required +**Response**: +```json +{ + "sessionId": "unique-id", + "running": true, + "completed": false, + "logs": ["output line 1", "output line 2"], + "output": "final output when completed", + "exitCode": 0 +} +``` + +#### POST `/api/interactive/cancel/:sessionId` +**Purpose**: Cancel running session +**Auth**: Required +**Response**: `{ "status": "cancelled" }` + +### Git Operations + +#### GET `/api/git-status` +**Purpose**: Get git status for PR button state +**Auth**: Required +**Response**: Modified files and change detection + +#### POST `/api/commit-changes` +**Purpose**: Commit changes and optionally create PR +**Auth**: Required +**Body**: `{ "title": "commit message", "description": "PR body", "createPr": true }` + +#### GET `/api/current-branch` +**Purpose**: Get current git branch info +**Auth**: Required + +#### POST `/api/reset-to-branch` +**Purpose**: Reset current branch to specified branch (hard reset) +**Auth**: Required +**Body**: `{ "branch": "main" }` +**Response**: Success status and branch information +**Safety**: Checks for unstaged changes, stashes if needed + +#### POST `/api/checkout-and-pull` +**Purpose**: Safely checkout and pull specified branch +**Auth**: Required +**Body**: `{ "branch": "feature-branch" }` +**Response**: Success status and current branch +**Features**: +- Creates local tracking branch if doesn't exist +- Pulls latest changes from remote +- Handles both existing and new branches + +#### GET `/api/git-status` +**Purpose**: Check for unstaged changes before git operations +**Auth**: Required +**Response**: Git status with unstaged changes detection +```json +{ + "hasUnstagedChanges": true, + "currentBranch": "main", + "isClean": false, + "files": { + "modified": ["file1.rec"], + "not_added": ["file2.rec"], + "deleted": [], + "conflicted": [], + "staged": ["file3.rec"] + } +} +``` + +## Authentication & Session Management + +### Authentication Middleware +```javascript +function isAuthenticated(req, res, next) { + if (authConfig.skipAuth || req.isAuthenticated()) { + return next(); + } + return res.status(401).json({ error: 'Authentication required' }); +} +``` + +### User Repository Setup +- Each user gets isolated directory: `workdir/{username}` +- Repository cloned with user's GitHub token +- Git operations use authenticated URLs + +### Session Storage +- **Active sessions**: `global.interactiveSessions[username]` +- **User tokens**: `global.userTokens[username]` +- **Auto cleanup**: Sessions cleaned after 5 minutes + +## Environment Configuration + +### Required Variables +```bash +# Server +BACKEND_PORT=9150 +HOST=localhost + +# Ask AI +ASK_AI_COMMAND="docker run --rm -i ubuntu:latest bash -c 'echo Input:; cat; sleep 2'" +ASK_AI_TIMEOUT=30000 + +# GitHub OAuth +GITHUB_CLIENT_ID=your_client_id +GITHUB_CLIENT_SECRET=your_secret +SKIP_AUTH=true # Development mode +``` + +### Security Features +- CORS configured for frontend URL +- Session-based authentication +- File access restricted to user directories +- Docker command isolation +- Timeout protection for long-running commands + +## Error Handling Patterns + +### Standard Error Response +```json +{ + "error": "Descriptive error message", + "details": "Additional context if available" +} +``` + +### Common Status Codes +- `200` - Success +- `400` - Bad request (missing parameters) +- `401` - Authentication required +- `403` - Access denied (file outside user directory) +- `404` - Resource not found +- `409` - Conflict (session already running) +- `500` - Server error + +## Performance Considerations + +### Session Management +- One active session per user enforced +- Background process cleanup after completion +- Memory-based storage (no database required) + +### File Operations +- Path validation for security +- Recursive directory creation +- Symlink support with target resolution + +### Git Operations +- Token-based authentication for private repos +- Remote URL rewriting for user tokens +- Branch tracking and status monitoring + +## Development Tips + +### Testing API Endpoints +```bash +# Test with curl +curl -X POST http://localhost:9150/api/interactive/start \ + -H "Content-Type: application/json" \ + -d '{"input":"echo hello"}' \ + --cookie-jar cookies.txt + +# Poll status +curl http://localhost:9150/api/interactive/status/session-id \ + --cookie cookies.txt +``` + +### Debugging Sessions +- Check `global.interactiveSessions` in Node.js console +- Monitor process spawning and cleanup +- Verify localStorage persistence on frontend + +### Common Issues +- **Session conflicts**: Only one session per user allowed +- **File access**: Paths must be within user's test directory +- **Git authentication**: Requires valid GitHub token +- **Docker availability**: Ask AI commands need Docker installed \ No newline at end of file diff --git a/ui/doc/ASK_AI.md b/ui/doc/ASK_AI.md new file mode 100644 index 0000000..e700eb8 --- /dev/null +++ b/ui/doc/ASK_AI.md @@ -0,0 +1,135 @@ +# Ask AI Feature + +## Overview + +The Ask AI feature provides an interactive session interface that allows users to run commands and see live output. This feature supports session persistence and only allows one request per logged-in user at a time. + +## How It Works + +1. **User Interface**: Click the "Ask AI" button in the header to open the interactive session modal +2. **Input**: Enter your command or question in the text area +3. **Execution**: The command is sent to a configurable backend process +4. **Live Output**: See real-time logs as the command executes +5. **History**: View the output from the last completed command +6. **Session Management**: Only one session per user is allowed at a time + +## Configuration + +The interactive session is configured entirely through environment variables: + +### Required Environment Variables + +```bash +# Command to execute - receives user input via stdin +ASK_AI_COMMAND="docker run --rm -i ubuntu:latest bash -c \"echo 'Input received:'; cat; echo '\nSleeping for 2 seconds...'; sleep 2; echo 'Done!'\"" + +# Timeout for commands in milliseconds (default: 30000 = 30 seconds) +ASK_AI_TIMEOUT=30000 +``` + +### Example Configurations + +#### Simple Echo Command +```bash +ASK_AI_COMMAND="docker run --rm -i ubuntu:latest bash -c \"echo 'You said:'; cat\"" +``` + +#### Python Script Runner +```bash +ASK_AI_COMMAND="docker run --rm -i python:3.9-slim python3 -c \"import sys; exec(sys.stdin.read())\"" +``` + +#### Custom AI Integration +```bash +ASK_AI_COMMAND="docker run --rm -i your-ai-image:latest /app/process-input.sh" +``` + +## Features + +- **Live Output Streaming**: Real-time display of command output +- **Session Management**: One active session per user +- **Persistent History**: Session history stored in localStorage and restored when reopening +- **Command Restoration**: Last command is automatically restored in the input field +- **Timeout Protection**: Commands automatically timeout after configured duration +- **History Management**: View last command, output, and execution time with clear history option +- **Error Handling**: Displays clear error messages for failed commands +- **Cancellation**: Users can cancel running commands +- **Polling-Based**: Uses efficient polling instead of WebSockets + +## Session Persistence + +The Ask AI feature includes persistent session history using browser localStorage: + +### What Gets Stored +- **Last Command**: The command that was executed +- **Output**: Complete output from the last command execution +- **Timestamp**: When the command was executed +- **Automatic Restoration**: History is automatically loaded when reopening the modal + +### Storage Management +- **Automatic Save**: Session data is saved automatically when a command completes +- **Manual Clear**: Users can clear history using the trash icon in the history section +- **Browser Storage**: Data is stored locally in the browser and persists across sessions +- **No Server Storage**: Session history is only stored locally for privacy + +### User Experience +- **Seamless Continuation**: Close and reopen the modal without losing your last session +- **Command Restoration**: Last command is automatically filled in the input field +- **Timestamp Display**: See when your last command was executed +- **Easy Cleanup**: One-click history clearing when needed + +## Security Considerations + +- Commands are executed in isolated Docker containers +- User input is passed via stdin only +- Sessions are automatically cleaned up after completion +- Timeout protection prevents long-running processes +- User authentication is required + +## Testing + +Run the test script to verify the feature works: + +```bash +./test-interactive.sh +``` + +## API Endpoints + +### Start Session +- **POST** `/api/interactive/start` +- **Body**: `{ "input": "user command or question" }` +- **Response**: `{ "sessionId": "unique-session-id", "status": "started" }` + +### Check Status +- **GET** `/api/interactive/status/:sessionId` +- **Response**: Session status with logs and completion info + +### Cancel Session +- **POST** `/api/interactive/cancel/:sessionId` +- **Response**: `{ "status": "cancelled" }` + +## Implementation Details + +- Backend uses Node.js child_process to spawn commands +- Frontend polls every 1 second for updates +- Sessions are stored in memory with automatic cleanup +- Docker is used for command isolation and security +- Supports any command that can read from stdin + +## Troubleshooting + +### "Failed to start command" Error +- Check that Docker is installed and running +- Verify the ASK_AI_COMMAND environment variable is set correctly +- Ensure the command can read from stdin + +### Command Timeout +- Increase ASK_AI_TIMEOUT value +- Optimize your command for faster execution +- Check Docker container startup time + +### No Output Displayed +- Verify your command writes to stdout +- Check that the command doesn't buffer output +- Ensure proper error handling in your command \ No newline at end of file diff --git a/ui/doc/COMPONENTS.md b/ui/doc/COMPONENTS.md new file mode 100644 index 0000000..5455163 --- /dev/null +++ b/ui/doc/COMPONENTS.md @@ -0,0 +1,267 @@ +# CLT UI Components Guide + +## Core Components Overview + +### App.svelte +**Purpose**: Root application component with authentication management +**Key Features**: +- Authentication state management and periodic checks +- Loading states and error handling +- Auth-required vs skip-auth modes +- Branch info fetching after authentication + +**Critical Code**: +```javascript +// Periodic auth check every 60 seconds +const authCheckInterval = setInterval(async () => { + if ($authStore.isAuthenticated) { + const result = await fetch(`${API_URL}/api/health`); + // Refresh auth state if needed + } +}, 60000); +``` + +### Header.svelte +**Purpose**: Navigation bar with main action buttons +**Key Features**: +- Ask AI button (opens interactive session) +- Create PR button (enabled when git changes detected) +- User profile and logout +- Docker image configuration input + +**Critical Code**: +```javascript +// Git status checking every 10 seconds +const interval = setInterval(checkGitStatus, 10000); + +// Ask AI modal opening +function openInteractiveSession() { + interactiveSession?.openSession(); +} +``` + +### FileExplorer.svelte - Enhanced File Tree Management +**Purpose**: File tree navigation with Git safety and URL parameter support + +**Key Features (2024 Updates)**: +- **URL Parameter Processing**: Auto-open files, branch switching, failed test highlighting +- **Git Safety**: Unstaged changes detection before git operations +- **State Preservation**: Smart file tree merging with user interaction preservation +- **Failed Test Highlighting**: Complete directory path highlighting for failed tests +- **Background Polling**: 10-second file tree updates without workflow disruption +- **Drag & Drop**: Move files between directories with visual feedback +- **VSCode-style Creation**: Inline file/folder creation + +**Critical Patterns**: + +#### URL Parameter Processing with Git Safety +```typescript +// Always check for unstaged changes first +const urlParams = parseUrlParams(); +const hasGitAffectingParams = urlParams.branch || urlParams.filePath; +if (hasGitAffectingParams) { + const canProceed = await checkUnstagedChanges(); + if (!canProceed) { + // Clear URL parameters and stop processing + const url = new URL(window.location.href); + url.search = ''; + window.history.replaceState({}, '', url.toString()); + return; + } +} +``` + +#### State Preservation Pattern +```typescript +// Before any file tree update +preserveExpandedState(); +await filesStore.refreshFileTree(); + +// Reactive state restoration +$: { + if ($filesStore.fileTree) { + // If we have preserved state, restore it + if (preservedExpandedFolders.size > 0) { + expandedFolders = new Set(preservedExpandedFolders); + preservedExpandedFolders.clear(); + } + fileTree = $filesStore.fileTree; + } +} +``` + +#### Failed Test Integration (Reuses Git Status Pattern) +```typescript +function isDirWithFailedTests(dirPath: string): boolean { + for (const failedTest of failedTestPaths) { + if (failedTest.startsWith(dirPath + '/')) { + return true; + } + } + return false; +} + +// Integrates seamlessly into existing getFileGitStatus() +function getFileGitStatus(filePath: string): string | null { + if (failedTestPaths.has(filePath)) return 'F'; + if (isDirWithFailedTests(filePath)) return 'F'; + // Continue with existing git status logic... +} +``` + +**Critical Code**: +```javascript +// File tree API call +const response = await fetch(`${API_URL}/api/get-file-tree`); +// Only shows .rec and .recb files +if (entry.name.endsWith('.rec') || entry.name.endsWith('.recb')) +``` + +### Editor.svelte +**Purpose**: Main editing interface for CLT test files +**Key Features**: +- Command/output pair editing +- Real-time WASM diff highlighting +- Auto-save functionality +- Test execution with live results +- Block references and comments support + +**Critical Code**: +```javascript +// WASM diff highlighting +async function highlightDifferences(actual: string, expected: string) { + const diffResult = JSON.parse(patternMatcher.diff_text(expected, actual)); + // Renders git-style diff with highlighting +} + +// Auto-save on changes +filesStore.updateCommand(i, newValue); +``` + +\n### CodeMirrorInput.svelte & SimpleCodeMirror.svelte\n**Purpose**: Syntax-highlighted command input components\n**Key Features**:\n- Shell/bash syntax highlighting with CodeMirror 6\n- Automatic light/dark theme switching based on system preference\n- Real-time theme reconfiguration\n- Pre-built professional themes optimized for shell commands\n\n**Theme Architecture**:\n```javascript\n// Pre-built theme imports\nimport { oneDark } from '@codemirror/theme-one-dark';\nimport { bbedit } from '@uiw/codemirror-theme-bbedit';\n\n// Dynamic theme detection\nfunction detectThemePreference() {\n return window.matchMedia('(prefers-color-scheme: dark)').matches;\n}\n\n// Simple theme switching\nfunction getTheme() {\n return isDarkMode ? oneDark : bbedit;\n}\n\n// Real-time theme switching\nmediaQuery.addEventListener('change', (e) => {\n isDarkMode = e.matches;\n editorView.dispatch({\n effects: themeCompartment.reconfigure(getTheme())\n });\n});\n```\n\n**Current Themes**:\n- **Light Mode**: BBEdit Theme - Professional, clean appearance\n- **Dark Mode**: One Dark Theme - Popular, well-tested dark theme\n\n**Customization**:\n- See [THEMING.md](./THEMING.md) for comprehensive theming guide\n- Uses [@uiw/codemirror-themes](https://www.npmjs.com/package/@uiw/codemirror-themes) package\n- Easy theme switching with pre-built options\n- Automatic accessibility compliance\n +### InteractiveSession.svelte (Ask AI) +**Purpose**: Interactive command execution with session persistence +**Key Features**: +- Real-time command output streaming +- Session persistence across modal close/open +- Background polling continuation +- localStorage-based state management + +**Critical Code**: +```javascript +// Session persistence +function saveActiveSession(sessionId: string, command: string) { + localStorage.setItem('askAI_activeSession', JSON.stringify({ + sessionId, command, timestamp: new Date().toISOString() + })); +} + +// Background polling (continues when modal closed) +pollingInterval = setInterval(async () => { + const data = await fetch(`/api/interactive/status/${sessionId}`); + logs = data.logs; +}, 1000); +``` + +## State Management (Stores) + +### filesStore.ts +**Purpose**: File content and test execution state +**Key State**: +- `currentFile` - Currently edited file with commands +- `running` - Test execution status +- `saving` - File save status +- `dockerImage` - Docker image for test execution + +### authStore.ts +**Purpose**: User authentication and GitHub integration +**Key State**: +- `isAuthenticated` - Auth status +- `user` - User profile data +- `skipAuth` - Development mode flag +- `token` - GitHub access token + +### githubStore.ts +**Purpose**: Pull request creation modal +**Key State**: +- `showModal` - Modal visibility +- `success` - PR creation success +- `prUrl` - Created PR URL + +### branchStore.ts +**Purpose**: Git branch information +**Key State**: +- `currentBranch` - Current git branch +- `defaultBranch` - Repository default branch + +## Component Communication Patterns + +### Parent-Child Props +```svelte + + + + +export function openSession() { ... } +``` + +### Store Subscriptions +```javascript +// Reactive store subscriptions +$: commands = $filesStore.currentFile ? $filesStore.currentFile.commands : []; +``` + +### Event Handling +```javascript +// File operations +filesStore.addCommand(index, text, type); +filesStore.updateCommand(index, newValue); +filesStore.deleteCommand(index); +``` + +## Performance Optimizations + +### Batch Operations +- Use `batch_edit` for multiple file changes +- Minimize individual API calls + +### Efficient Polling +- Ask AI polls every 1 second only when active +- Git status checks every 10 seconds +- Auth checks every 60 seconds + +### WASM Loading +- Diff engine loads asynchronously +- Graceful fallback to plain text +- Pattern caching for performance + +## Common Development Patterns + +### Error Handling +```javascript +try { + const result = await apiCall(); + // Handle success +} catch (error) { + console.error('Operation failed:', error); + // Show user-friendly error +} +``` + +### Async UI Updates +```javascript +// Use setTimeout to avoid reactive update cycles +setTimeout(() => { + filesStore.updateCommand(i, newValue); +}, 0); +``` + +### localStorage Management +```javascript +// Always wrap in try-catch +try { + localStorage.setItem(key, JSON.stringify(data)); +} catch (err) { + console.warn('localStorage failed:', err); +} +``` \ No newline at end of file diff --git a/ui/doc/DEVELOPMENT.md b/ui/doc/DEVELOPMENT.md new file mode 100644 index 0000000..195826c --- /dev/null +++ b/ui/doc/DEVELOPMENT.md @@ -0,0 +1,182 @@ +# CLT UI Development Guide + +## Architecture Overview + +CLT UI is a Svelte-based web interface for managing and editing CLT test files with real-time execution capabilities. + +### Tech Stack +- **Frontend**: Svelte 5 + TypeScript + Vite +- **Backend**: Node.js + Express +- **Authentication**: GitHub OAuth (optional, can skip with SKIP_AUTH=true) +- **Storage**: localStorage for session persistence +- **Testing**: WASM-based diff engine for output comparison + +## Core Components + +### Main Layout +- **App.svelte** - Root component with auth state management +- **Header.svelte** - Navigation with Ask AI and Create PR buttons +- **FileExplorer.svelte** - File tree navigation and management +- **Editor.svelte** - Main editing interface with command/output pairs + +### Key Features +- **Ask AI** (`InteractiveSession.svelte`) - Interactive command execution with session persistence +- **Pull Request Modal** (`PullRequestModal.svelte`) - GitHub integration for PR creation +- **Real-time Testing** - Execute CLT tests with live output comparison + +## Critical Development Notes + +### State Management +- **filesStore** - Current file, commands, test results, running state +- **authStore** - User authentication and GitHub integration +- **githubStore** - PR creation modal state +- **branchStore** - Git branch information + +### Session Persistence (Ask AI) +- **Active sessions**: `askAI_activeSession` in localStorage +- **Completed history**: `askAI_sessionHistory` in localStorage +- **Background polling**: Continues even when modal is closed +- **One session per user**: Backend enforces this limitation + +### Authentication Flow +1. GitHub OAuth (if enabled) or skip auth mode +2. User repo cloning to `workdir/{username}` +3. Session management with tokens stored globally +4. Repository operations use user's GitHub token + +## Development Guidelines + +### Adding New URL Parameters +1. Add to `parseUrlParams()` return type and function +2. Handle in `onMount()` after unstaged changes check +3. Add to `handleUrlChange()` for dynamic updates +4. Test with unstaged changes scenarios + +### Adding New Git Operations +1. Always call `checkUnstagedChanges()` first +2. Use existing patterns from `resetToBranch` and `checkoutAndPull` +3. Preserve expanded state with `preserveExpandedState()` +4. Handle errors gracefully with user feedback + +### File Tree Modifications +1. Use `mergeFileTreePreservingState()` for updates +2. Call `preserveExpandedState()` before refreshes +3. Test that user interactions are maintained +4. Verify polling doesn't disrupt workflow + +### Performance Best Practices +- Use parallel tool execution for discovery operations +- Batch edits for multiple file changes +- Smart merging reduces DOM updates +- Preserve user state during background operations +```bash +# Backend +BACKEND_PORT=9150 +FRONTEND_PORT=9151 + +# Ask AI Feature +ASK_AI_COMMAND="docker run --rm -i ubuntu:latest bash -c \"echo 'Input:'; cat; sleep 2; echo 'Done'\"" +ASK_AI_TIMEOUT=30000 + +# GitHub OAuth (optional) +GITHUB_CLIENT_ID=your_client_id +GITHUB_CLIENT_SECRET=your_secret +SKIP_AUTH=true # for development +``` + +## Development Workflow + +### Setup +```bash +cd ui +npm install +npm run dev # Frontend on :5173 +node server.js # Backend on :9150 +``` + +### File Structure +``` +ui/ +β”œβ”€β”€ src/ +β”‚ β”œβ”€β”€ components/ # Svelte components +β”‚ β”œβ”€β”€ stores/ # State management +β”‚ └── config.js # API configuration +β”œβ”€β”€ server.js # Express backend +β”œβ”€β”€ doc/ # Development docs +└── dist/ # Build output +``` + +### Key API Endpoints +- `/api/get-file-tree` - File explorer data +- `/api/get-file` - File content +- `/api/save-file` - Save file changes +- `/api/run-test` - Execute CLT tests +- `/api/interactive/*` - Ask AI session management +- `/api/commit-changes` - Git operations + +## Testing & Building + +### Development +```bash +npm run dev # Development server +npm run build # Production build +npm run preview # Preview build +``` + +### Testing Ask AI +```bash +./test-session-persistence.sh # Test session persistence +./test-interactive.sh # Test basic functionality +``` + +## Common Issues & Solutions + +### Session Persistence Not Working +- Check localStorage keys: `askAI_activeSession`, `askAI_sessionHistory` +- Verify background polling continues after modal close +- Ensure `loadSessionState()` is called on component mount + +### Authentication Issues +- Set `SKIP_AUTH=true` for development +- Check GitHub OAuth configuration +- Verify session secrets and cookies + +### File Operations Failing +- Check user repo exists in `workdir/{username}` +- Verify file paths are within test directory +- Ensure proper authentication for git operations + +## Performance Notes + +- **Batch edits**: Use `batch_edit` for multiple file changes +- **Polling efficiency**: Ask AI polls every 1 second +- **WASM loading**: Diff engine loads asynchronously +- **Memory cleanup**: Sessions auto-cleanup after 5 minutes + +## Security Considerations + +- Commands execute in isolated Docker containers +- User input passed via stdin only +- File operations restricted to user's test directory +- GitHub tokens stored securely and cleaned up +- Session isolation per authenticated usersession secrets and cookies + +### File Operations Failing +- Check user repo exists in `workdir/{username}` +- Verify file paths are within test directory +- Ensure proper authentication for git operations + +## Performance Notes + +- **Batch edits**: Use `batch_edit` for multiple file changes +- **Polling efficiency**: Ask AI polls every 1 second +- **WASM loading**: Diff engine loads asynchronously +- **Memory cleanup**: Sessions auto-cleanup after 5 minutes + +## Security Considerations + +- Commands execute in isolated Docker containers +- User input passed via stdin only +- File operations restricted to user's test directory +- GitHub tokens stored securely and cleaned up +- Session isolation per authenticated user \ No newline at end of file diff --git a/ui/doc/README.md b/ui/doc/README.md new file mode 100644 index 0000000..4fa55dc --- /dev/null +++ b/ui/doc/README.md @@ -0,0 +1,165 @@ +# CLT UI Documentation Index + +## Development Documentation + +### πŸ“š Core Guides +- **[DEVELOPMENT.md](./DEVELOPMENT.md)** - Architecture overview, setup, and critical development notes +- **[COMPONENTS.md](./COMPONENTS.md)** - Detailed component guide with code patterns +- **[API.md](./API.md)** - Backend API reference and endpoint documentation + +### 🎯 Feature Documentation +- **[ASK_AI.md](./ASK_AI.md)** - Ask AI interactive session feature with session persistence\n- **[THEMING.md](./THEMING.md)** - CodeMirror theming guide for command syntax highlighting + +## Quick Reference + +### Development Setup +```bash +cd ui +npm install +npm run dev # Frontend :5173 +node server.js # Backend :9150 +``` + +### Key Environment Variables +```bash +ASK_AI_COMMAND="docker run --rm -i ubuntu:latest bash -c 'echo Input; cat'" +ASK_AI_TIMEOUT=30000 +SKIP_AUTH=true # Development mode +``` + +# CLT UI Documentation Index + +## Development Documentation + +### πŸ“š Core Guides +- **[DEVELOPMENT.md](./DEVELOPMENT.md)** - Enhanced architecture with Git safety, URL parameters, and state preservation +- **[COMPONENTS.md](./COMPONENTS.md)** - Detailed component guide with new patterns +- **[API.md](./API.md)** - Backend API reference including new Git safety endpoints + +### 🎯 Feature Documentation +- **[URL_PARAMETERS.md](./URL_PARAMETERS.md)** - Comprehensive URL parameter system with Git safety +- **[ASK_AI.md](./ASK_AI.md)** - Ask AI interactive session feature with session persistence +- **[THEMING.md](./THEMING.md)** - CodeMirror theming guide for command syntax highlighting + +## Quick Reference + +### Development Setup +```bash +cd ui +npm install +npm run dev # Frontend :5173 +node server.js # Backend :9150 +``` + +### New URL Parameter System +```bash +# Auto-open file with custom Docker image +?test_path=core/file.rec&docker_image=custom:latest + +# Switch branch and highlight failed tests +?branch=feature-branch&failed_tests[]=test1.rec&failed_tests[]=test2.rec + +# Complete workflow URL +?test_path=integration/auth.rec&branch=auth-fixes&docker_image=test:latest&failed_tests[]=integration/auth.rec +``` + +### Git Safety Features +- **Unstaged Changes Detection**: Automatic check before git operations +- **User Confirmation**: Clear dialog explaining consequences +- **Complete Cancellation**: Option to ignore URL parameters if conflicts exist +- **Applied Everywhere**: URL processing AND manual branch operations + +### Key Environment Variables +```bash +ASK_AI_COMMAND="docker run --rm -i ubuntu:latest bash -c 'echo Input; cat'" +ASK_AI_TIMEOUT=30000 +SKIP_AUTH=true # Development mode +``` + +### Enhanced Architecture Points + +#### State Preservation System +- **Smart Merging**: `mergeFileTreePreservingState()` preserves user interactions +- **Background Polling**: 10-second updates without workflow disruption +- **Expanded Folders**: Maintained across all operations +- **Selected Files**: Current selection preserved during updates + +#### Failed Test Highlighting +- **File Level**: Direct red "F" indicator for failed test files +- **Directory Level**: Parent directories marked with "F" indicator +- **Complete Path**: Entire directory chain highlighted +- **Git Integration**: Reuses existing git status infrastructure + +#### URL Parameter Processing +- **Git Safety**: Unstaged changes check before processing +- **Parameter Types**: `test_path`, `docker_image`, `branch`, `failed_tests[]` +- **Error Handling**: Non-existent files show errors instead of auto-creation +- **State Management**: Failed test highlighting preserved during operations + +### Common Development Tasks + +#### Adding New Component +1. Create in `src/components/` +2. Import in parent component +3. Add to stores if state needed +4. Update documentation + +#### Adding API Endpoint +1. Add route in `server.js` +2. Use `isAuthenticated` middleware +3. Validate user directory access +4. Update API.md documentation + +#### Debugging Ask AI +1. Check localStorage keys in browser +2. Monitor `global.interactiveSessions` in Node.js +3. Verify Docker command execution +4. Test session persistence across modal close/open + +### Performance Guidelines +- Use `batch_edit` for multiple file changes +- Minimize API polling frequency +- Load WASM diff engine asynchronously +- Clean up intervals and sessions properly + +### Security Notes +- All commands run in Docker containers +- File access restricted to user directories +- GitHub tokens handled securely +- Session isolation per user enforced + +## File Structure +``` +ui/ +β”œβ”€β”€ src/ +β”‚ β”œβ”€β”€ components/ # Svelte components +β”‚ β”‚ β”œβ”€β”€ App.svelte # Root component +β”‚ β”‚ β”œβ”€β”€ Header.svelte # Navigation +β”‚ β”‚ β”œβ”€β”€ FileExplorer.svelte +β”‚ β”‚ β”œβ”€β”€ Editor.svelte # Main editor +β”‚ β”‚ └── InteractiveSession.svelte # Ask AI +β”‚ β”œβ”€β”€ stores/ # State management +β”‚ └── config.js # API configuration +β”œβ”€β”€ server.js # Express backend +β”œβ”€β”€ doc/ # This documentation +└── dist/ # Build output +``` + +## Getting Help + +### Common Issues +1. **Session not persisting** β†’ Check localStorage and background polling +2. **Auth failing** β†’ Verify GitHub OAuth or use SKIP_AUTH=true +3. **File operations failing** β†’ Check user directory permissions +4. **Ask AI not working** β†’ Verify Docker is installed and ASK_AI_COMMAND + +### Debugging Tools +- Browser DevTools β†’ localStorage, network, console +- Node.js console β†’ `global.interactiveSessions`, `global.userTokens` +- Test scripts β†’ `./test-session-persistence.sh`, `./test-interactive.sh` + +### Key Logs to Monitor +- Session creation/cleanup +- File operation security checks +- Git authentication status +- Docker command execution \ No newline at end of file diff --git a/ui/doc/THEMING.md b/ui/doc/THEMING.md new file mode 100644 index 0000000..4346ab0 --- /dev/null +++ b/ui/doc/THEMING.md @@ -0,0 +1,313 @@ +# CodeMirror Theming Guide for CLT UI + +This document provides comprehensive information about the theming system for command syntax highlighting in the CLT UI, including how to configure, customize, and extend themes. + +## Overview + +The CLT UI uses CodeMirror 6 for syntax highlighting in command input fields. The theming system automatically switches between light and dark themes based on the user's system preference (`prefers-color-scheme`). + +**Current Implementation**: Uses pre-built themes from the [@uiw/codemirror-themes](https://www.npmjs.com/package/@uiw/codemirror-themes) package for optimal shell syntax highlighting. + +## Architecture + +### Theme Detection +- **Automatic Detection**: Uses `window.matchMedia('(prefers-color-scheme: dark)')` to detect user preference +- **Real-time Switching**: Listens for theme changes and updates the editor dynamically +- **Fallback**: Defaults to light theme if detection fails + +### Current Theme Selection +- **Light Mode**: **BBEdit Theme** - Clean, professional light theme optimized for code readability +- **Dark Mode**: **One Dark Theme** - Popular dark theme with excellent contrast + +### Theme Components +1. **Base Theme**: Pre-built themes from @uiw packages +2. **Dynamic Reconfiguration**: Uses CodeMirror's Compartment API for live theme switching +3. **Shell Syntax Support**: Themes are optimized for shell/bash syntax highlighting + +## File Structure + +``` +ui/src/components/ +β”œβ”€β”€ CodeMirrorInput.svelte # Main command input with syntax highlighting +β”œβ”€β”€ SimpleCodeMirror.svelte # Simplified version for blocks/comments +``` + +## Current Theme Configuration + +### Implementation +```javascript +// Theme imports +import { oneDark } from '@codemirror/theme-one-dark'; +import { bbedit } from '@uiw/codemirror-theme-bbedit'; + +// Dynamic theme selection +function getTheme() { + return isDarkMode ? oneDark : bbedit; +} + +// Real-time theme switching +mediaQuery.addEventListener('change', (e) => { + isDarkMode = e.matches; + editorView.dispatch({ + effects: themeCompartment.reconfigure(getTheme()) + }); +}); +``` + +### Why These Themes? + +#### BBEdit Theme (Light Mode) +- βœ… **Professional appearance** - Clean, minimal design +- βœ… **Excellent contrast** - High readability for shell commands +- βœ… **Optimized syntax colors** - Well-balanced color palette +- βœ… **Shell-friendly** - Good highlighting for commands, strings, operators + +#### One Dark Theme (Dark Mode) +- βœ… **Popular choice** - Widely used and tested +- βœ… **Eye-friendly** - Reduced strain in low-light environments +- βœ… **Comprehensive highlighting** - Full syntax support +- βœ… **Consistent experience** - Matches many developer tools + +## Available Pre-built Themes + +All themes from [@uiw/codemirror-themes](https://www.npmjs.com/package/@uiw/codemirror-themes) are available: + +### Recommended Light Themes +| Theme | Package | Best For | +|-------|---------|----------| +| **BBEdit** ⭐ | `@uiw/codemirror-theme-bbedit` | **Current choice** - Professional, clean | +| GitHub Light | `@uiw/codemirror-theme-github` | GitHub-style interface | +| XCode Light | `@uiw/codemirror-theme-xcode` | Apple ecosystem integration | +| Eclipse | `@uiw/codemirror-theme-eclipse` | IDE-style appearance | +| Material Light | `@uiw/codemirror-theme-material` | Material Design aesthetic | + +### Recommended Dark Themes +| Theme | Package | Best For | +|-------|---------|----------| +| **One Dark** ⭐ | `@codemirror/theme-one-dark` | **Current choice** - Popular, well-tested | +| Atom One | `@uiw/codemirror-theme-atom-one` | Atom editor style | +| Dracula | `@uiw/codemirror-theme-dracula` | High contrast, vibrant | +| Tokyo Night | `@uiw/codemirror-theme-tokyo-night` | Modern, stylish | +| Nord | `@uiw/codemirror-theme-nord` | Cool, arctic-inspired | + +## How to Change Themes + +### 1. Using Different Pre-built Themes + +**Step 1**: Install the desired theme package +```bash +cd ui +npm install @uiw/codemirror-theme-[theme-name] +``` + +**Step 2**: Update imports in both components +```javascript +// In CodeMirrorInput.svelte and SimpleCodeMirror.svelte +import { newLightTheme } from '@uiw/codemirror-theme-[light-theme]'; +import { newDarkTheme } from '@uiw/codemirror-theme-[dark-theme]'; +``` + +**Step 3**: Update the getTheme() function +```javascript +function getTheme() { + return isDarkMode ? newDarkTheme : newLightTheme; +} +``` + +### 2. Popular Theme Combinations + +```javascript +// GitHub-style combination +import { githubLight } from '@uiw/codemirror-theme-github'; +import { githubDark } from '@uiw/codemirror-theme-github'; + +// VS Code-style combination +import { vscodeDark } from '@uiw/codemirror-theme-vscode'; +import { bbedit } from '@uiw/codemirror-theme-bbedit'; // Light alternative + +// Material Design combination +import { materialLight } from '@uiw/codemirror-theme-material'; +import { materialDark } from '@uiw/codemirror-theme-material'; +``` + +### 3. Creating Custom Themes + +For advanced customization, use the `createTheme` function: + +```javascript +import { createTheme } from '@uiw/codemirror-themes'; +import { tags as t } from '@lezer/highlight'; + +const customLightTheme = createTheme({ + theme: 'light', + settings: { + background: '#ffffff', + foreground: '#333333', + caret: '#5d00ff', + selection: '#036dd626', + gutterBackground: '#f5f5f5', + gutterForeground: '#999999', + }, + styles: [ + { tag: t.comment, color: '#6a737d' }, + { tag: t.keyword, color: '#d73a49' }, + { tag: t.string, color: '#032f62' }, + { tag: t.operator, color: '#005cc5' }, + { tag: t.variableName, color: '#6f42c1' }, + ], +}); +``` + +## Shell Syntax Highlighting Details + +The CLT UI uses the shell mode from `@codemirror/legacy-modes/mode/shell` which provides: + +### Recognized Elements +- **Commands**: Built-in shell commands (`ls`, `grep`, `awk`, etc.) +- **Keywords**: Shell keywords (`if`, `then`, `else`, `fi`, etc.) +- **Strings**: Single and double-quoted strings +- **Comments**: Lines starting with `#` +- **Variables**: `$VAR`, `${VAR}`, `$1`, `$@`, etc. +- **Operators**: `|`, `>`, `>>`, `<`, `&&`, `||`, `;` +- **Numbers**: Numeric literals +- **Flags**: Command-line options (`-v`, `--verbose`) + +### Theme Color Mapping +Pre-built themes automatically handle these elements with appropriate colors: +- Commands and keywords get primary accent colors +- Strings use secondary colors for distinction +- Comments are typically muted/gray +- Operators use bright colors for visibility +- Variables get special highlighting + +## Testing Theme Changes + +### 1. Development Testing +```bash +cd ui +npm run dev +``` + +### 2. Build Testing +```bash +cd ui +npm run build +``` + +### 3. Visual Testing Checklist +- [ ] Light mode syntax highlighting is readable +- [ ] Dark mode syntax highlighting is readable +- [ ] Theme switches properly when system preference changes +- [ ] All shell elements are properly colored +- [ ] No CSS conflicts with main UI theme +- [ ] Sufficient contrast for accessibility + +## Troubleshooting + +### Common Issues + +1. **Theme not switching** + - Check browser support for `prefers-color-scheme` + - Verify media query listener is attached + - Check console for JavaScript errors + +2. **Theme not loading** + - Ensure theme package is installed: `npm install @uiw/codemirror-theme-[name]` + - Check import path matches package exports + - Verify build process completes without errors + +3. **Colors not as expected** + - Different themes have different color philosophies + - Test with various shell commands to see full palette + - Consider switching to a different pre-built theme + +### Debug Mode + +To debug theme issues, add logging to the `getTheme()` function: + +```javascript +function getTheme() { + console.log('Theme switching to:', isDarkMode ? 'dark' : 'light'); + const theme = isDarkMode ? oneDark : bbedit; + console.log('Using theme:', theme); + return theme; +} +``` + +## Best Practices + +### 1. Theme Selection +- **Choose popular themes** - Better tested and maintained +- **Test with real content** - Use actual shell commands for evaluation +- **Consider user base** - Match your audience's preferences +- **Maintain consistency** - Use themes from the same family when possible + +### 2. Accessibility +- **Verify contrast ratios** - WCAG 2.1 AA: 4.5:1 for normal text +- **Test with color blindness simulators** +- **Ensure themes work in both light and dark modes** + +### 3. Performance +- **Use pre-built themes** - Faster than custom themes +- **Minimize theme complexity** - Simpler themes load faster +- **Avoid frequent theme reconfiguration** + +### 4. Maintenance +- **Keep packages updated** - Themes receive bug fixes and improvements +- **Document theme choices** - Record why specific themes were chosen +- **Test after updates** - Verify themes still work after package updates + +## Package Management + +### Installing Themes +```bash +# Install specific theme +npm install @uiw/codemirror-theme-[name] + +# Install multiple themes +npm install @uiw/codemirror-theme-github @uiw/codemirror-theme-dracula + +# Install the full theme collection +npm install @uiw/codemirror-themes +``` + +### Keeping Themes Updated +```bash +# Update specific theme +npm update @uiw/codemirror-theme-bbedit + +# Update all theme packages +npm update @uiw/codemirror-theme-* +``` + +## Future Improvements + +### Planned Features +- [ ] User theme selection in UI settings +- [ ] Theme preview functionality +- [ ] High contrast accessibility themes +- [ ] Custom theme import/export + +### Extension Points +- Theme picker component +- User preference persistence +- Custom theme builder interface +- Integration with system accent colors + +## Contributing + +When contributing theme improvements: + +1. **Use pre-built themes when possible** - Avoid custom themes unless necessary +2. **Test in both light and dark modes** +3. **Ensure accessibility compliance** +4. **Document theme choices and rationale** +5. **Update this guide with new features** + +## References + +- [CodeMirror 6 Theming Guide](https://codemirror.net/docs/guide/#theming) +- [@uiw/codemirror-themes Package](https://www.npmjs.com/package/@uiw/codemirror-themes) +- [Theme Gallery](https://uiwjs.github.io/react-codemirror/#/theme/doc) +- [Shell Mode Documentation](https://codemirror.net/5/mode/shell/) +- [WCAG Color Contrast Guidelines](https://www.w3.org/WAI/WCAG21/Understanding/contrast-minimum.html) \ No newline at end of file diff --git a/ui/doc/THEMING_QUICK_REF.md b/ui/doc/THEMING_QUICK_REF.md new file mode 100644 index 0000000..699f184 --- /dev/null +++ b/ui/doc/THEMING_QUICK_REF.md @@ -0,0 +1,101 @@ +# Quick Theme Customization Reference + +## Current Implementation + +**Light Mode**: BBEdit Theme (`@uiw/codemirror-theme-bbedit`) +**Dark Mode**: One Dark Theme (`@codemirror/theme-one-dark`) + +Uses pre-built themes from [@uiw/codemirror-themes](https://www.npmjs.com/package/@uiw/codemirror-themes) package. + +## Quick Theme Changes + +### 1. Switch to Different Pre-built Themes + +```bash +# Install new theme package +cd ui +npm install @uiw/codemirror-theme-[theme-name] +``` + +```javascript +// Update imports in CodeMirrorInput.svelte and SimpleCodeMirror.svelte +import { newTheme } from '@uiw/codemirror-theme-[theme-name]'; + +// Update getTheme() function +function getTheme() { + return isDarkMode ? darkTheme : lightTheme; +} +``` + +### 2. Popular Theme Combinations + +```javascript +// GitHub Style +import { githubLight, githubDark } from '@uiw/codemirror-theme-github'; + +// Material Design +import { materialLight, materialDark } from '@uiw/codemirror-theme-material'; + +// VS Code Style +import { vscodeDark } from '@uiw/codemirror-theme-vscode'; +import { bbedit } from '@uiw/codemirror-theme-bbedit'; // Light alternative +``` + +## Available Themes + +### Light Themes +- `@uiw/codemirror-theme-bbedit` ⭐ **Current** +- `@uiw/codemirror-theme-github` +- `@uiw/codemirror-theme-xcode` +- `@uiw/codemirror-theme-eclipse` +- `@uiw/codemirror-theme-material` + +### Dark Themes +- `@codemirror/theme-one-dark` ⭐ **Current** +- `@uiw/codemirror-theme-atom-one` +- `@uiw/codemirror-theme-dracula` +- `@uiw/codemirror-theme-tokyo-night` +- `@uiw/codemirror-theme-nord` + +## Testing Changes + +```bash +cd ui +npm run dev # Test in development +npm run build # Verify build works +``` + +## Files to Modify + +- `ui/src/components/CodeMirrorInput.svelte` - Main command input +- `ui/src/components/SimpleCodeMirror.svelte` - Block/comment input + +## Theme Gallery + +Visit [Theme Gallery](https://uiwjs.github.io/react-codemirror/#/theme/doc) to preview all available themes. + +## Custom Themes + +For advanced customization, use the `createTheme` function: + +```javascript +import { createTheme } from '@uiw/codemirror-themes'; + +const customTheme = createTheme({ + theme: 'light', // or 'dark' + settings: { + background: '#ffffff', + foreground: '#333333', + // ... more settings + }, + styles: [ + { tag: t.keyword, color: '#d73a49' }, + { tag: t.string, color: '#032f62' }, + // ... more styles + ], +}); +``` + +## Full Documentation + +See [THEMING.md](./THEMING.md) for complete guide with examples, troubleshooting, and best practices. \ No newline at end of file diff --git a/ui/doc/URL_PARAMETERS.md b/ui/doc/URL_PARAMETERS.md new file mode 100644 index 0000000..beabde4 --- /dev/null +++ b/ui/doc/URL_PARAMETERS.md @@ -0,0 +1,224 @@ +# CLT UI - URL Parameter System + +## Overview + +CLT UI supports a comprehensive URL parameter system for deep linking, automated workflows, and CI/CD integration. The system includes Git safety features and state preservation. + +## Supported Parameters + +### `test_path` +**Purpose**: Auto-open specific test file +**Type**: String +**Example**: `?test_path=core/show-threads.rec` + +```bash +# Open specific test file +http://localhost:9151/?test_path=buddy/test-buddy.rec + +# Open nested test file +http://localhost:9151/?test_path=integration/auth/login.rec +``` + +### `docker_image` +**Purpose**: Set Docker image for test execution +**Type**: String +**Example**: `?docker_image=manticore:dev` + +```bash +# Set custom Docker image +http://localhost:9151/?docker_image=ghcr.io/manticoresoftware/manticoresearch:test-kit-latest + +# Use local image +http://localhost:9151/?docker_image=my-local-image:latest +``` + +### `branch` +**Purpose**: Auto-checkout and pull specified branch +**Type**: String +**Example**: `?branch=feature-branch` +**Safety**: Checks for unstaged changes before proceeding + +```bash +# Switch to feature branch +http://localhost:9151/?branch=feature-auth-fixes + +# Switch to main branch +http://localhost:9151/?branch=main +``` + +### `failed_tests[]` +**Purpose**: Highlight failed tests with red "F" indicator +**Type**: Array of strings +**Example**: `?failed_tests[]=test1.rec&failed_tests[]=test2.rec` + +```bash +# Highlight single failed test +http://localhost:9151/?failed_tests[]=core/test-error.rec + +# Highlight multiple failed tests +http://localhost:9151/?failed_tests[]=core/test1.rec&failed_tests[]=api/test2.rec&failed_tests[]=integration/test3.rec +``` + +## Combined Examples + +### Development Workflow +```bash +# Open specific test with custom Docker image +http://localhost:9151/?test_path=buddy/test-buddy.rec&docker_image=manticore:dev +``` + +### CI/CD Integration +```bash +# Switch branch and highlight failed tests from CI +http://localhost:9151/?branch=feature-fixes&failed_tests[]=core/test1.rec&failed_tests[]=api/test2.rec +``` + +### Complete Workflow URL +```bash +# Full workflow: branch + file + image + failed tests +http://localhost:9151/?test_path=integration/auth.rec&branch=auth-fixes&docker_image=test:latest&failed_tests[]=integration/auth.rec&failed_tests[]=core/auth-helper.rec +``` + +## Git Safety Features + +### Unstaged Changes Detection + +When URL parameters include git-affecting operations (`branch` or `test_path`), the system automatically checks for unstaged changes: + +**Detection Triggers:** +- `branch` parameter (any branch switching) +- `test_path` parameter (file operations) + +**User Dialog:** +``` +You have unstaged changes in your repository. + +Proceeding will potentially modify your working directory when switching branches or pulling changes. + +Do you want to continue? + +β€’ Click "OK" to continue (your changes may be affected) +β€’ Click "Cancel" to ignore URL parameters and keep current state +``` + +**Behavior:** +- **User clicks "OK"**: Proceeds with all URL parameters +- **User clicks "Cancel"**: Clears ALL URL parameters, maintains current state +- **Safe operations continue**: `failed_tests[]` highlighting still works (doesn't affect git state) + +### Error Handling + +**Non-existent Files:** +```bash +# If file doesn't exist +http://localhost:9151/?test_path=non-existent-file.rec +# Result: Shows error dialog, clears URL parameters +``` + +**Invalid Branch:** +```bash +# If branch doesn't exist +http://localhost:9151/?branch=non-existent-branch +# Result: Shows error message, maintains current branch +``` + +## Implementation Details + +### URL Parsing +```typescript +function parseUrlParams(): { + filePath?: string; + dockerImage?: string; + branch?: string; + failedTests?: string[]; +} { + const params = new URLSearchParams(window.location.search); + + return { + filePath: params.get('test_path') || undefined, + dockerImage: params.get('docker_image') || undefined, + branch: params.get('branch') || undefined, + failedTests: params.getAll('failed_tests[]') + }; +} +``` + +### Safety Check Integration +```typescript +// Check for git-affecting parameters +const hasGitAffectingParams = urlParams.branch || urlParams.filePath; +if (hasGitAffectingParams) { + const canProceed = await checkUnstagedChanges(); + if (!canProceed) { + // Clear URL parameters and stop processing + const url = new URL(window.location.href); + url.search = ''; + window.history.replaceState({}, '', url.toString()); + return; + } +} +``` + +### Failed Test Highlighting +```typescript +function isDirWithFailedTests(dirPath: string): boolean { + for (const failedTest of failedTestPaths) { + if (failedTest.startsWith(dirPath + '/')) { + return true; + } + } + return false; +} +``` + +## State Preservation + +### File Tree State +- **Expanded folders**: Maintained across URL parameter processing +- **Selected files**: Current selection preserved +- **Background polling**: Continues without disruption + +### URL Updates +- **Dynamic changes**: URL parameters can be updated without page reload +- **History management**: Proper browser history integration +- **State consistency**: UI state always matches URL parameters + +## Best Practices + +### For CI/CD Integration +1. **Always URL-encode file paths**: Use `encodeURIComponent()` for file paths with special characters +2. **Check git state**: Ensure clean working directory before using branch parameters +3. **Batch failed tests**: Include all failed tests in single URL for complete overview +4. **Use absolute URLs**: Include full domain for reliable linking + +### For Development +1. **Test with unstaged changes**: Verify safety dialogs work correctly +2. **Verify state preservation**: Ensure user interactions aren't disrupted +3. **Check error handling**: Test with non-existent files and branches +4. **Validate highlighting**: Confirm failed test indicators appear correctly + +### URL Encoding Examples +```bash +# File with spaces or special characters +test_path=core%2Fshow-threads.rec # core/show-threads.rec + +# Multiple failed tests +failed_tests[]=test%20with%20spaces.rec&failed_tests[]=core%2Fnested%2Ftest.rec +``` + +## Security Considerations + +### Path Validation +- All file paths are validated against user's allowed directory +- Path traversal attacks (`../`) are prevented +- Only `.rec` and `.recb` files are accessible + +### Git Operations +- All git operations require authentication +- Users can only access their own repository clone +- Branch operations are limited to user's permissions + +### Parameter Sanitization +- All URL parameters are properly decoded and validated +- Malicious input is rejected with appropriate error messages +- Git commands are executed with proper escaping \ No newline at end of file diff --git a/ui/gitRoutes.js b/ui/gitRoutes.js new file mode 100644 index 0000000..e9c8f6d --- /dev/null +++ b/ui/gitRoutes.js @@ -0,0 +1,776 @@ +import path from 'path'; +import fs from 'fs/promises'; +import simpleGit from 'simple-git'; +import { + getUserRepoPath, + getUserTestPath, + ensureGitRemoteWithToken, + slugify +} from './routes.js'; +import { getDefaultBranch } from './helpers.js'; + +// Setup Git routes +export function setupGitRoutes(app, isAuthenticated, dependencies) { + const { + WORKDIR, + ROOT_DIR, + REPO_URL, + getAuthConfig + } = dependencies; + + // API endpoint to get git status information + app.get('/api/git-status', isAuthenticated, async (req, res) => { + try { + // Check if user is authenticated with GitHub + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'GitHub authentication required' }); + } + + // Get the user's repo path + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const repoExists = await fs.access(userRepoPath).then(() => true).catch(() => false); + + if (!repoExists) { + return res.status(404).json({ error: 'Repository not found' }); + } + + try { + // Initialize simple-git with the user's repo path + const git = simpleGit({ baseDir: userRepoPath }); + + // Get current branch + const branchSummary = await git.branch(); + const currentBranch = branchSummary.current; + console.log(`Current branch: ${currentBranch}`); + + // Check if the branch is a PR branch + const isPrBranch = currentBranch.startsWith('clt-ui-'); + console.log(`Is PR branch: ${isPrBranch}`); + + // Get status information + const status = await git.status(); + console.log('Git status:', status); + + // Parse the status to get modified files + const modifiedFiles = []; + const modifiedDirs = new Set(); + + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + // Get the relative path to the test directory from the repo root + const relativeTestPath = path.relative(userRepoPath, testDir); + console.log(`Relative test path: ${relativeTestPath}`); + + // Process modified, created, and deleted files + const allChangedFiles = [ + ...status.modified, + ...status.created, + ...status.deleted, + ...status.not_added + ]; + + for (const filePath of allChangedFiles) { + console.log(`Checking file: ${filePath}`); + + // Check if this file is in the test directory + if (filePath.startsWith(relativeTestPath)) { + // Determine the status code + let statusCode = ''; + if (status.modified.includes(filePath)) statusCode = 'M'; + else if (status.created.includes(filePath)) statusCode = 'A'; + else if (status.deleted.includes(filePath)) statusCode = 'D'; + else if (status.not_added.includes(filePath)) statusCode = '??'; + + modifiedFiles.push({ + path: filePath, + status: statusCode + }); + + // Add all parent directories to modifiedDirs set + // Start with the file's directory + let dirPath = path.dirname(filePath); + while (dirPath && dirPath !== '.' && dirPath !== '/') { + modifiedDirs.add(dirPath); + dirPath = path.dirname(dirPath); + } + } + } + + // Check if there are any changes to commit in the test directory + const hasChanges = modifiedFiles.length > 0; + + return res.json({ + success: true, + currentBranch, + isPrBranch, + hasChanges, + modifiedFiles, + modifiedDirs: Array.from(modifiedDirs), + testPath: relativeTestPath + }); + } catch (gitError) { + console.error('Git operation error:', gitError); + return res.status(500).json({ + error: 'Git operation failed', + details: gitError.message + }); + } + } catch (error) { + console.error('Error getting git status:', error); + res.status(500).json({ error: `Failed to get git status: ${error.message}` }); + } + }); + + // API endpoint to get current branch information + app.get('/api/current-branch', isAuthenticated, async (req, res) => { + try { + // Check if user is authenticated with GitHub + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'GitHub authentication required' }); + } + + // Get the user's repo path + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const repoExists = await fs.access(userRepoPath).then(() => true).catch(() => false); + + if (!repoExists) { + return res.status(404).json({ error: 'Repository not found' }); + } + + try { + // Initialize simple-git with the user's repo path + const git = simpleGit({ baseDir: userRepoPath }); + + // Get branch information + const branchSummary = await git.branch(); + const currentBranch = branchSummary.current; + console.log(`Current branch: ${currentBranch}`); + + // Get remote repository URL + const remoteUrl = await git.remote(['get-url', 'origin']); + const cleanRemoteUrl = remoteUrl.replace(/https:\/\/[^@]+@/, 'https://'); + console.log(`Remote repository URL: ${cleanRemoteUrl}`); + + // Get default branch (cached) + const defaultBranch = await getDefaultBranch(git, userRepo); + console.log(`Default branch: ${defaultBranch}`); + + return res.json({ + success: true, + currentBranch, + defaultBranch, + repository: cleanRemoteUrl + }); + } catch (gitError) { + console.error('Git operation error:', gitError); + return res.status(500).json({ + error: 'Git operation failed', + details: gitError.message + }); + } + } catch (error) { + console.error('Error getting current branch:', error); + res.status(500).json({ error: `Failed to get current branch: ${error.message}` }); + } + }); + + // API endpoint to reset and sync to a specific branch + app.post('/api/reset-to-branch', isAuthenticated, async (req, res) => { + try { + const { branch } = req.body; + + if (!branch) { + return res.status(400).json({ error: 'Branch name is required' }); + } + + // Check if user is authenticated with GitHub + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'GitHub authentication required' }); + } + + // Get the user's repo path + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const repoExists = await fs.access(userRepoPath).then(() => true).catch(() => false); + + if (!repoExists) { + return res.status(404).json({ error: 'Repository not found' }); + } + + try { + // Initialize simple-git with the user's repo path + const git = simpleGit(userRepoPath); + await ensureGitRemoteWithToken(git, req.user.token, REPO_URL); + + // Get current status to check for changes + const status = await git.status(); + let stashMessage = ''; + + // Stash changes if needed + if (!status.isClean()) { + const timestamp = new Date().toISOString(); + stashMessage = `Auto-stashed by CLT-UI for ${req.user.username} at ${timestamp}`; + await git.stash(['push', '-m', stashMessage]); + console.log(`Stashed current changes: ${stashMessage}`); + } + + // Fetch latest from remote + await git.fetch(['--all']); + console.log('Fetched latest updates from remote'); + + // Get the list of branches to check if the requested branch exists + const branches = await git.branch(); + const branchExists = branches.all.includes(branch); + + if (branchExists) { + // Local branch exists, checkout and reset to remote + await git.checkout(branch); + console.log(`Switched to branch: ${branch}`); + + // Reset to origin's version of the branch + await git.reset(['--hard', `origin/${branch}`]); + console.log(`Reset to origin/${branch}`); + } else { + // Local branch doesn't exist, create tracking branch + await git.checkout(['-b', branch, `origin/${branch}`]); + console.log(`Created and checked out branch ${branch} tracking origin/${branch}`); + } + + return res.json({ + success: true, + branch, + repository: status.tracking, + stashed: status.isClean() ? null : stashMessage, + message: `Successfully reset to branch: ${branch}` + }); + } catch (gitError) { + console.error('Git operation error:', gitError); + return res.status(500).json({ + error: 'Git operation failed', + details: gitError.message || 'Unknown error during git operation' + }); + } + } catch (error) { + console.error('Error resetting to branch:', error); + res.status(500).json({ error: `Failed to reset to branch: ${error.message}` }); + } + }); + + // Checkout and pull branch endpoint + app.post('/api/checkout-and-pull', isAuthenticated, async (req, res) => { + try { + const { branch } = req.body; + + if (!branch) { + return res.status(400).json({ error: 'Branch name is required' }); + } + + // Check if user is authenticated with GitHub + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'GitHub authentication required' }); + } + + // Get the user's repo path + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const repoExists = await fs.access(userRepoPath).then(() => true).catch(() => false); + + if (!repoExists) { + return res.status(404).json({ error: 'Repository not found' }); + } + + try { + // Initialize simple-git with the user's repo path + const git = simpleGit(userRepoPath); + await ensureGitRemoteWithToken(git, req.user.token, REPO_URL); + + // Fetch latest from remote + await git.fetch(['--all']); + console.log('Fetched latest updates from remote'); + + // Get the list of branches to check if the requested branch exists + const branches = await git.branch(); + const localBranchExists = branches.all.includes(branch); + const remoteBranchExists = branches.all.includes(`remotes/origin/${branch}`); + + if (localBranchExists) { + // Local branch exists, checkout and pull + await git.checkout(branch); + console.log(`Switched to existing branch: ${branch}`); + + try { + await git.pull('origin', branch); + console.log(`Pulled latest changes for branch: ${branch}`); + } catch (pullError) { + console.warn(`Warning: Could not pull latest changes for ${branch}:`, pullError.message); + // Continue anyway - the checkout was successful + } + } else if (remoteBranchExists) { + // Remote branch exists, create local tracking branch + await git.checkout(['-b', branch, `origin/${branch}`]); + console.log(`Created and checked out branch ${branch} tracking origin/${branch}`); + } else { + return res.status(400).json({ + error: `Branch '${branch}' not found locally or on remote` + }); + } + + // Get current status to confirm + const status = await git.status(); + const currentBranch = status.current; + + return res.json({ + success: true, + currentBranch: currentBranch, + message: `Successfully checked out and pulled branch: ${currentBranch}` + }); + } catch (gitError) { + console.error('Git operation error:', gitError); + return res.status(500).json({ + error: 'Git operation failed', + details: gitError.message || 'Unknown error during git operation' + }); + } + } catch (error) { + console.error('Error in checkout-and-pull:', error); + res.status(500).json({ error: `Failed to checkout and pull branch: ${error.message}` }); + } + }); + + app.post('/api/create-pr', isAuthenticated, async (req, res) => { + const { title, description } = req.body; + if (!title) return res.status(400).json({ error: 'PR title is required' }); + + const username = req.user.username; + const userRepo = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + + // slugify title for branch name + const branchName = `clt-ui-${slugify(title)}`; + + const git = simpleGit({ baseDir: userRepo }); + await ensureGitRemoteWithToken(git, req.user.token, REPO_URL); + + try { + // fetch all + await git.fetch(['--all']); + + // list local + remote branches + const local = await git.branchLocal(); + const remote = await git.branch(['-r']); + const existsLocally = local.all.includes(branchName); + const existsRemote = remote.all.includes(`origin/${branchName}`); + const branchExists = existsLocally || existsRemote; + + const { exec } = await import('child_process'); + + // helper to run gh commands + const execPromise = (cmd) => new Promise((resolve, reject) => { + exec(cmd, { cwd: userRepo, env: { ...process.env, GH_TOKEN: req.user.token } }, + (err, stdout, stderr) => { + if (err) { + reject(stderr || err); + } else { + resolve(stdout.trim()); + } + } + ); + }); + + + const { spawn } = await import('child_process'); + + // Helper to run gh commands safely with spawn (no shell injection) + const execGhCommand = (args) => new Promise((resolve, reject) => { + console.log('Running gh command with args:', args); + const gh = spawn('gh', args, { + cwd: userRepo, + env: { ...process.env, GH_TOKEN: req.user.token }, + stdio: ['pipe', 'pipe', 'pipe'] + }); + + let stdout = ''; + let stderr = ''; + + gh.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + gh.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + gh.on('close', (code) => { + if (code === 0) { + resolve(stdout.trim()); + } else { + console.error(`gh command failed with code ${code}: ${stderr}`); + reject(new Error(stderr || `gh command failed with code ${code}`)); + } + }); + + gh.on('error', (error) => { + console.error('gh command error:', error); + reject(error); + }); + }); + + if (branchExists) { + // check if there's an OPEN PR for that head using safe spawn + const prArgs = ['pr', 'list', '--state', 'open', '--head', branchName, '--json', 'url']; + const prList = await execGhCommand(prArgs).catch(() => ''); + + if (prList && prList.trim() && prList.trim() !== '[]') { + // PROPERLY check if we have actual PRs + let actualPrs = []; + try { + actualPrs = JSON.parse(prList); + } catch (e) { + console.log('Failed to parse PR list:', e); + actualPrs = []; + } + + if (Array.isArray(actualPrs) && actualPrs.length > 0) { + // Branch AND PR both exist β†’ just commit & push to existing PR + console.log('Found existing PR, committing to existing branch'); + await git.checkout(branchName); + await git.add('.'); + const commit = await git.commit(title); + await git.push('origin', branchName); + + return res.json({ + success: true, + branch: branchName, + commit: commit.commit, + pr: actualPrs[0]?.url, + message: 'Committed and pushed to existing PR branch.' + }); + } + } + + // If we get here, branch exists but NO PR β†’ checkout existing branch and create PR + console.log('Branch exists but no PR found, checking out existing branch to create PR'); + await git.checkout(branchName); + await git.add('.'); + const commit = await git.commit(title); + await git.push('origin', branchName); + + // Now create PR from existing branch - continue to PR creation logic below + console.log('Committed to existing branch, now creating PR'); + } else { + // branch does not exist β†’ create it, commit & push, open PR + console.log('Branch does not exist, creating new branch'); + await git.checkoutLocalBranch(branchName); + await git.add('.'); + const commit = await git.commit(title); + await git.push('origin', branchName, ['--set-upstream']); + } + + // At this point, we have a branch with committed changes, now create the PR + // Get the commit info for response + const commitInfo = await git.log({ maxCount: 1 }); + const latestCommit = commitInfo.latest; + + // Get the current status to determine base branch + const status = await git.status(); + const currentBranch = status.current; + + // Determine base branch using cached helper + const baseBranch = await getDefaultBranch(git, userRepo); + + // Build gh pr create arguments safely (no shell injection possible) + const prArgs = [ + 'pr', 'create', + '--title', title, + '--head', branchName, + '--base', baseBranch + ]; + + if (description && description.trim()) { + prArgs.push('--body', description); + } else { + return res.status(400).json({ error: 'Description is required for PR creation' }); + } + + console.log('Creating PR with args:', prArgs); + const prOutput = await execGhCommand(prArgs).catch(e => { throw e }); + console.log('PR creation output:', prOutput); + + const prUrlMatch = prOutput.match(/https:\/\/github\.com\/[^\s]+/); + const prUrl = prUrlMatch?.[0] || null; + + if (!prUrl) { + // PR creation failed - don't return success + console.error('PR creation failed - no URL found in output:', prOutput); + return res.status(500).json({ + error: 'Pull request creation failed. No PR URL returned by GitHub CLI. Check your permissions and try again.' + }); + } + + return res.json({ + success: true, + branch: branchName, + commit: latestCommit?.hash || 'unknown', + pr: prUrl, + message: 'Pull request created successfully.' + }); + } + catch (err) { + console.error('create-pr error:', err); + return res.status(500).json({ error: err.toString() }); + } + }); + + // API endpoint to check PR status for current branch + app.get('/api/pr-status', isAuthenticated, async (req, res) => { + const username = req.user.username; + const userRepo = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + + const git = simpleGit({ baseDir: userRepo }); + await ensureGitRemoteWithToken(git, req.user.token, REPO_URL); + + try { + // Get current branch + const status = await git.status(); + const currentBranch = status.current; + + // Get default branch (cached) + const defaultBranch = await getDefaultBranch(git, userRepo); + console.log(`Default branch: ${defaultBranch}`); + + // Check if this is a PR branch (not default branch and starts with clt-ui-) + let isPrBranch = currentBranch && + currentBranch !== defaultBranch && + currentBranch.startsWith('clt-ui-'); + + const { exec } = await import('child_process'); + const execPromise = (cmd) => new Promise((resolve, reject) => { + exec(cmd, { cwd: userRepo, env: { ...process.env, GH_TOKEN: req.user.token } }, + (err, stdout, stderr) => { + if (err) { + reject(stderr || err); + } else { + resolve(stdout.trim()); + } + } + ); + }); + + let existingPr = null; + let recentCommits = []; + + // Check for existing PR for current branch using GitHub CLI + // Only check for PR if we're on a potential PR branch + if (isPrBranch) { + try { + console.log('πŸ” Checking for existing PR on branch:', currentBranch); + + // Method 1: Use gh pr list to find PRs for this specific branch + const prListCmd = `gh pr list --state open --head ${currentBranch} --json url,title,number`; + console.log('Running command:', prListCmd); + + const prListOutput = await execPromise(prListCmd); + console.log('gh pr list result:', prListOutput); + + if (prListOutput && prListOutput.trim() && prListOutput.trim() !== '[]') { + const prs = JSON.parse(prListOutput); + console.log('Parsed PRs:', prs); + + if (Array.isArray(prs) && prs.length > 0) { + existingPr = { + url: prs[0].url, + title: prs[0].title, + number: prs[0].number + }; + console.log('βœ… Found existing PR:', existingPr); + } + } else { + console.log('❌ No PR found for branch:', currentBranch); + } + } catch (error) { + console.log('❌ Error checking for PR:', error.message); + + // Fallback: try gh pr view (if we're currently on the PR branch) + try { + console.log('Trying fallback: gh pr view'); + const prViewOutput = await execPromise('gh pr view --json url,title,number'); + console.log('gh pr view result:', prViewOutput); + + if (prViewOutput && prViewOutput.trim()) { + const prData = JSON.parse(prViewOutput); + existingPr = { + url: prData.url, + title: prData.title, + number: prData.number + }; + console.log('βœ… Found existing PR via gh pr view:', existingPr); + } + } catch (viewError) { + console.log('❌ gh pr view also failed:', viewError.message); + } + } + } else { + console.log('ℹ️ Not on a PR branch, skipping PR detection'); + } + + // Get recent commits for current branch (last 5) + try { + const logOutput = await git.log({ maxCount: 5 }); + recentCommits = logOutput.all.map(commit => ({ + hash: commit.hash.substring(0, 8), + message: commit.message, + author: commit.author_name, + date: commit.date, + authorEmail: commit.author_email + })); + } catch (error) { + console.log('Error getting commit history:', error.message); + } + + res.json({ + currentBranch, + isPrBranch, + existingPr, + recentCommits, + hasChanges: !status.isClean(), + timestamp: Date.now() + }); + + } catch (error) { + console.error('Error checking PR status:', error); + res.status(500).json({ error: `Failed to check PR status: ${error.message}` }); + } + }); + + // API endpoint to commit changes to existing PR branch + app.post('/api/commit-changes', isAuthenticated, async (req, res) => { + const { message } = req.body; + if (!message) return res.status(400).json({ error: 'Commit message is required' }); + + const username = req.user.username; + const userRepo = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + + const git = simpleGit({ baseDir: userRepo }); + await ensureGitRemoteWithToken(git, req.user.token, REPO_URL); + + try { + // Get current branch and verify it's a PR branch + const status = await git.status(); + const currentBranch = status.current; + + if (!currentBranch?.startsWith('clt-ui-')) { + return res.status(400).json({ + error: 'Can only commit to PR branches (branches starting with clt-ui-)' + }); + } + + // Check if there are changes to commit + if (status.isClean()) { + return res.status(400).json({ error: 'No changes to commit' }); + } + + // Commit and push changes + await git.add('.'); + const commit = await git.commit(message); + await git.push('origin', currentBranch); + + // Get PR URL if exists + const { exec } = await import('child_process'); + const execPromise = (cmd) => new Promise((resolve, reject) => { + exec(cmd, { cwd: userRepo, env: { ...process.env, GH_TOKEN: req.user.token } }, + (err, stdout, stderr) => { + if (err) { + reject(stderr || err); + } else { + resolve(stdout.trim()); + } + } + ); + }); + + let prUrl = null; + try { + const prList = await execPromise( + `gh pr list --state open --head ${currentBranch} --json url` + ); + if (prList) { + const prs = JSON.parse(prList); + if (prs.length > 0) { + prUrl = prs[0].url; + } + } + } catch (error) { + console.log('Could not get PR URL:', error.message); + } + + res.json({ + success: true, + branch: currentBranch, + commit: commit.commit, + commitHash: commit.commit.substring(0, 8), + pr: prUrl, + message: 'Changes committed and pushed to PR successfully' + }); + + } catch (error) { + console.error('Error committing changes:', error); + res.status(500).json({ error: `Failed to commit changes: ${error.message}` }); + } + }); + + // Checkout a single file to discard changes + app.post('/api/checkout-file', isAuthenticated, async (req, res) => { + try { + // Check if user is authenticated with GitHub + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'GitHub authentication required' }); + } + + const { filePath } = req.body; + if (!filePath) { + return res.status(400).json({ error: 'File path is required' }); + } + + // Get the user's repo path + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const repoExists = await fs.access(userRepoPath).then(() => true).catch(() => false); + + if (!repoExists) { + return res.status(404).json({ error: 'Repository not found' }); + } + + try { + // Initialize simple-git with the user's repo path + const git = simpleGit(userRepoPath); + + // Check if file exists and has changes + const status = await git.status(); + const fileHasChanges = status.modified.includes(filePath) || + status.not_added.includes(filePath) || + status.deleted.includes(filePath); + + if (!fileHasChanges) { + return res.status(400).json({ error: 'File has no changes to discard' }); + } + + // Checkout the file to discard changes + await git.checkout(['HEAD', '--', filePath]); + + console.log(`Successfully checked out file: ${filePath}`); + + return res.json({ + success: true, + message: `Successfully discarded changes to ${filePath}`, + filePath: filePath + }); + + } catch (gitError) { + console.error('Git checkout error:', gitError); + return res.status(500).json({ + error: 'Failed to checkout file', + details: gitError.message + }); + } + + } catch (error) { + console.error('Error in checkout-file endpoint:', error); + res.status(500).json({ error: 'Failed to checkout file' }); + } + }); +} \ No newline at end of file diff --git a/ui/helpers.js b/ui/helpers.js new file mode 100644 index 0000000..bb75a97 --- /dev/null +++ b/ui/helpers.js @@ -0,0 +1,212 @@ +import path from 'path'; +import fs from 'fs/promises'; +import { writeFileSync, appendFileSync, existsSync, readdirSync, statSync, readFileSync } from 'fs'; + +// Cache for default branch detection per repository +const defaultBranchCache = new Map(); + +/** + * Get the default branch for a repository with caching + * @param {Object} git - SimpleGit instance + * @param {string} repoPath - Repository path for cache key + * @returns {Promise} Default branch name + */ +export async function getDefaultBranch(git, repoPath) { + // Check cache first + if (defaultBranchCache.has(repoPath)) { + return defaultBranchCache.get(repoPath); + } + + let defaultBranch; + try { + // Try to get the default branch from the HEAD reference + defaultBranch = await git.revparse(['--abbrev-ref', 'origin/HEAD']); + defaultBranch = defaultBranch.replace('origin/', ''); + } catch (headError) { + console.warn('Could not determine default branch from HEAD, using fallback:', headError.message); + + try { + // Fallback: Check if main or master exists + const branches = await git.branch(['-r']); + if (branches.all.includes('origin/main')) { + defaultBranch = 'main'; + } else if (branches.all.includes('origin/master')) { + defaultBranch = 'master'; + } else { + // Final fallback + defaultBranch = 'main'; + } + } catch (branchError) { + console.warn('Could not determine default branch from remote branches, defaulting to main:', branchError.message); + defaultBranch = 'main'; + } + } + + // Cache the result + defaultBranchCache.set(repoPath, defaultBranch); + console.log(`Default branch cached for ${repoPath}: ${defaultBranch}`); + + return defaultBranch; +} + +/** + * Clear the default branch cache (useful for testing or if repo changes) + * @param {string} repoPath - Optional specific repo path to clear, or clear all if not provided + */ +export function clearDefaultBranchCache(repoPath = null) { + if (repoPath) { + defaultBranchCache.delete(repoPath); + } else { + defaultBranchCache.clear(); + } +} + +// Helper function to save session data persistently +export function saveSessionToPersistentStorage(session, username) { + const logDir = process.env.ASK_AI_LOG; + if (!logDir) { + return; // No persistent storage configured + } + + try { + const userLogDir = path.join(logDir, username); + + // Create user directory if it doesn't exist + if (!existsSync(userLogDir)) { + fs.mkdir(userLogDir, { recursive: true }).catch(console.error); + } + + // Create log file name with timestamp + const timestamp = session.startTime ? session.startTime.toISOString().replace(/[:.]/g, '-') : new Date().toISOString().replace(/[:.]/g, '-'); + const logFileName = `${session.name || 'session'}_${timestamp}.log`; + const logFilePath = path.join(userLogDir, logFileName); + + // Prepare session metadata + const metadata = { + sessionId: session.id, + sessionName: session.name, + startTime: session.startTime, + endTime: session.endTime, + completed: session.completed, + cancelled: session.cancelled, + failed: session.failed, + exitCode: session.exitCode, + error: session.error, + cost: session.cost, + active: session.active || false + }; + + // Save session data + const sessionData = { + metadata, + logs: session.logs, + output: session.output + }; + + writeFileSync(logFilePath, JSON.stringify(sessionData, null, 2)); + console.log(`Session ${session.id} saved to ${logFilePath}`); + } catch (error) { + console.error('Failed to save session to persistent storage:', error); + } +} + +// Utility function to extract cost from logs (check last 100 lines or find first match) +export function extractCostFromLogs(logs) { + if (!logs || logs.length === 0) return null; + + const costRegex = /cost:\s*\$(\d+\.?\d*)/gi; + + // Check last 100 lines first for most recent cost + const linesToCheck = logs.slice(-100); + for (let i = linesToCheck.length - 1; i >= 0; i--) { + const matches = [...linesToCheck[i].matchAll(costRegex)]; + if (matches.length > 0) { + return parseFloat(matches[matches.length - 1][1]); + } + } + + // If no cost found in last 100 lines, check all logs for first occurrence + for (let i = 0; i < logs.length; i++) { + const matches = [...logs[i].matchAll(costRegex)]; + if (matches.length > 0) { + return parseFloat(matches[0][1]); + } + } + + return null; +} + +// Utility function to sanitize session names for file system compatibility +export function sanitizeSessionName(name) { + if (!name || !name.trim()) return ''; + + return name.trim() + .toLowerCase() + .replace(/[^a-z0-9\s-]/g, '') // Remove special characters except spaces and hyphens + .replace(/\s+/g, '-') // Replace spaces with hyphens + .replace(/-+/g, '-') // Replace multiple hyphens with single hyphen + .replace(/^-|-$/g, ''); // Remove leading/trailing hyphens +} + +// Utility function to write logs synchronously to both console and file +export function writeLogEntry(session, logEntry, logType = 'INFO') { + const timestamp = new Date().toISOString(); + const formattedEntry = `[${timestamp}] [${logType}] ${logEntry}`; + + // Add to session logs array + session.logs.push(logEntry); + + // Write to console (same as before) + console.log(`Session ${session.id} ${logType.toLowerCase()}:`, logEntry); + + // Write to log file immediately if configured + if (session.logFile) { + try { + appendFileSync(session.logFile, logEntry); + } catch (error) { + console.error(`Failed to write to log file ${session.logFile}:`, error); + } + } + + // Update cost in real-time + session.cost = extractCostFromLogs(session.logs); + + // Update session metadata in persistent storage incrementally + if (session.logFile) { + updateSessionMetadata(session); + } +} + +// Utility function to update session metadata incrementally +export function updateSessionMetadata(session) { + if (!session.logFile) return; + + try { + const metadata = { + sessionId: session.id, + sessionName: session.name, + startTime: session.startTime, + endTime: session.endTime, + completed: session.completed, + cancelled: session.cancelled, + failed: session.failed, + exitCode: session.exitCode, + error: session.error, + cost: session.cost, + active: session.active || false, + lastUpdated: new Date().toISOString() + }; + + const sessionData = { + metadata, + logs: session.logs, + output: session.output || session.logs.join('') + }; + + // Write metadata to a separate .meta file for incremental updates + const metaFile = session.logFile.replace('.log', '.meta'); + writeFileSync(metaFile, JSON.stringify(sessionData, null, 2)); + } catch (error) { + console.error('Failed to update session metadata:', error); + } +} \ No newline at end of file diff --git a/ui/index.html b/ui/index.html new file mode 100644 index 0000000..5010f97 --- /dev/null +++ b/ui/index.html @@ -0,0 +1,13 @@ + + + + + + + CLT Editor + + +
+ + + \ No newline at end of file diff --git a/ui/interactiveRoutes.js b/ui/interactiveRoutes.js new file mode 100644 index 0000000..f2d6214 --- /dev/null +++ b/ui/interactiveRoutes.js @@ -0,0 +1,617 @@ +import path from 'path'; +import fs from 'fs/promises'; +import { existsSync, readdirSync, statSync, readFileSync } from 'fs'; +import { getUserRepoPath } from './routes.js'; +import { + saveSessionToPersistentStorage, + extractCostFromLogs, + sanitizeSessionName, + writeLogEntry, + updateSessionMetadata +} from './helpers.js'; + +// Setup Interactive Session routes +export function setupInteractiveRoutes(app, isAuthenticated, dependencies) { + const { + WORKDIR, + ROOT_DIR, + getAuthConfig + } = dependencies; + + // Interactive session endpoints + // Start a new interactive command session + app.post('/api/interactive/start', isAuthenticated, async (req, res) => { + try { + const { input, sessionName } = req.body; + + if (!input || !input.trim()) { + return res.status(400).json({ error: 'Input is required' }); + } + + // Check if user is authenticated + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'Authentication required' }); + } + + const username = req.user.username; + + // Check if user already has a running session + if (global.interactiveSessions[username] && global.interactiveSessions[username].running) { + return res.status(409).json({ error: 'Another command is already running for this user' }); + } + + // Check if continuing an existing session or creating a new one + let sessionId; + let sanitizedSessionName = sessionName ? sanitizeSessionName(sessionName) : ''; + let isSessionContinuation = false; + + // If sessionName is provided, check if we're continuing an existing session + if (sanitizedSessionName) { + // First check if there was a recently completed session with the same name + const recentSession = global.interactiveSessions[username]; + + if (recentSession && !recentSession.running && recentSession.name === sanitizedSessionName) { + // Continue the recent session + sessionId = recentSession.id; + isSessionContinuation = true; + console.log(`Continuing recent session: ${sessionId}`); + } else { + // Look for existing session files with this name + const logDir = process.env.ASK_AI_LOG; + if (logDir) { + const userLogDir = path.join(logDir, username); + if (existsSync(userLogDir)) { + const existingFiles = readdirSync(userLogDir) + .filter(file => file.endsWith('.log') || file.endsWith('.meta')) + .filter(file => file.startsWith(sanitizedSessionName + '_')); + + if (existingFiles.length > 0) { + // Continue existing session - use existing session ID from metadata + const latestFile = existingFiles + .map(file => { + const filePath = path.join(userLogDir, file); + const stats = statSync(filePath); + return { file, stats, filePath }; + }) + .sort((a, b) => b.stats.mtime - a.stats.mtime)[0]; + + try { + const sessionData = JSON.parse(readFileSync(latestFile.filePath, 'utf8')); + if (sessionData.metadata && sessionData.metadata.sessionId) { + sessionId = sessionData.metadata.sessionId; + isSessionContinuation = true; + console.log(`Continuing existing session from file: ${sessionId}`); + } + } catch (error) { + console.warn('Failed to read existing session metadata, creating new session:', error); + } + } + } + } + } + } + + // Generate new session ID if not continuing existing session + if (!sessionId) { + sessionId = sanitizedSessionName + ? `${username}-${sanitizedSessionName}-${Date.now()}` + : `${username}-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`; + console.log(`Generated new session ID: ${sessionId}`); + } + + // Get the interactive command from environment + const askAiCommand = process.env.ASK_AI_COMMAND || 'docker run --rm -i ubuntu:latest bash -c "echo \\"Input received:\\"; cat; echo \\"\\nSleeping for 2 seconds...\\"; sleep 2; echo \\"Done!\\""'; + const askAiTimeout = parseInt(process.env.ASK_AI_TIMEOUT || '30000'); + + console.log(`Starting interactive session ${sessionId} for user ${username}`); + console.log(`Command: ${askAiCommand}`); + console.log(`Input: ${input}`); + console.log(`Timeout: ${askAiTimeout}ms`); + + // Create log file only if ASK_AI_LOG is configured + const logDir = process.env.ASK_AI_LOG; + let logFile = null; + if (logDir) { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const fileName = `${sessionName || sessionId}_${timestamp}.log`; + const userLogDir = path.join(logDir, username); + logFile = path.join(userLogDir, fileName); + + // Ensure directory exists synchronously before creating log file + try { + if (!existsSync(userLogDir)) { + await fs.mkdir(userLogDir, { recursive: true }); + console.log(`Created log directory: ${userLogDir}`); + } + } catch (error) { + console.error('Failed to create log directory:', error); + logFile = null; // Disable logging if directory creation fails + } + } + + // Initialize session + const session = { + id: sessionId, + name: sanitizedSessionName || sessionId, + username, + running: true, + completed: false, + cancelled: false, + failed: false, + logs: [], + output: '', + cost: null, + startTime: new Date(), + endTime: null, + logFile, + process: null, + timeout: null, + exitCode: null, + active: true // Mark as active session + }; + + global.interactiveSessions[username] = session; + + // Log session start information + const sessionStartLog = `=== SESSION START === +Session ID: ${sessionId} +Session Name: ${session.name} +Username: ${username} +Start Time: ${session.startTime.toISOString()} +Command: ${askAiCommand} +Timeout: ${askAiTimeout}ms +Input Length: ${input.length} characters +Log File: ${logFile || 'Not configured'} +=== INPUT === +${input} +=== OUTPUT === +`; + + // Write initial session information + writeLogEntry(session, sessionStartLog, 'START'); + + // Import child_process + const { spawn } = await import('child_process'); + + // Get user repository path for WORKDIR_PATH environment variable + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + console.log(`WORKDIR_PATH: ${userRepoPath}`); + + // Start the process with shell to handle complex commands + const childProcess = spawn('sh', ['-c', askAiCommand], { + stdio: ['pipe', 'pipe', 'pipe'], + env: { + ...process.env, + WORKDIR_PATH: userRepoPath, + SESSION_NAME: sanitizedSessionName || sessionId + } + }); + + session.process = childProcess; + + // Set up timeout + session.timeout = setTimeout(() => { + if (session.running && childProcess) { + console.log(`Session ${sessionId} timed out after ${askAiTimeout}ms`); + childProcess.kill('SIGTERM'); + + const timeoutLog = ` +=== SESSION TIMEOUT === +Session ID: ${sessionId} +Timeout: ${askAiTimeout}ms +Time: ${new Date().toISOString()} +Process terminated due to timeout +=== TIMEOUT END === +`; + + writeLogEntry(session, timeoutLog, 'TIMEOUT'); + } + }, askAiTimeout); + + // Send input to the process + childProcess.stdin.write(input); + childProcess.stdin.end(); + + // Handle stdout + childProcess.stdout.on('data', (data) => { + const output = data.toString(); + writeLogEntry(session, output, 'STDOUT'); + }); + + // Handle stderr + childProcess.stderr.on('data', (data) => { + const output = data.toString(); + const logEntry = `STDERR: ${output}`; + writeLogEntry(session, logEntry, 'STDERR'); + }); + + // Handle process completion + childProcess.on('close', (code) => { + session.running = false; + session.completed = true; + session.failed = code !== 0; + session.exitCode = code; + session.output = session.logs.join(''); + session.endTime = new Date(); + session.cost = extractCostFromLogs(session.logs); + session.active = false; // Mark as inactive + + // Log session completion + const completionLog = ` +=== SESSION END === +Session ID: ${sessionId} +End Time: ${session.endTime.toISOString()} +Exit Code: ${code} +Duration: ${session.endTime.getTime() - session.startTime.getTime()}ms +Cost: ${session.cost ? '$' + session.cost.toFixed(5) : 'N/A'} +Status: ${code === 0 ? 'SUCCESS' : 'FAILED'} +Total Log Entries: ${session.logs.length} +=== SESSION COMPLETE === +`; + + writeLogEntry(session, completionLog, 'END'); + + // Save session data persistently (final save) + saveSessionToPersistentStorage(session, username); + + // Clear timeout + if (session.timeout) { + clearTimeout(session.timeout); + session.timeout = null; + } + + console.log(`Session ${sessionId} completed with exit code: ${code}`); + + // Clean up after 5 minutes (but keep session data for history) + setTimeout(() => { + if (global.interactiveSessions[username] && global.interactiveSessions[username].id === sessionId) { + // Don't delete the session, just clean up the process reference + global.interactiveSessions[username].process = null; + console.log(`Cleaned up process for session ${sessionId}`); + } + }, 5 * 60 * 1000); + }); + + // Handle process error + childProcess.on('error', (error) => { + session.running = false; + session.completed = true; + session.failed = true; + session.error = error.message; + session.endTime = new Date(); + session.active = false; // Mark as inactive + + // Log error with details + const errorLog = ` +=== SESSION ERROR === +Session ID: ${sessionId} +Error Time: ${session.endTime.toISOString()} +Error Message: ${error.message} +Duration: ${session.endTime.getTime() - session.startTime.getTime()}ms +=== ERROR DETAILS === +${error.stack || error.message} +=== SESSION TERMINATED === +`; + + writeLogEntry(session, errorLog, 'ERROR'); + + session.output = session.logs.join(''); + session.cost = extractCostFromLogs(session.logs); + + // Save session data persistently + saveSessionToPersistentStorage(session, username); + + // Clear timeout + if (session.timeout) { + clearTimeout(session.timeout); + session.timeout = null; + } + + console.error(`Session ${sessionId} error:`, error); + }); + + res.json({ sessionId, status: 'started' }); + } catch (error) { + console.error('Error starting interactive session:', error); + res.status(500).json({ error: 'Failed to start interactive session' }); + } + }); + + // Get status of an interactive session + app.get('/api/interactive/status/:sessionId', isAuthenticated, async (req, res) => { + try { + const { sessionId } = req.params; + + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'Authentication required' }); + } + + const username = req.user.username; + const session = global.interactiveSessions[username]; + + if (!session || session.id !== sessionId) { + return res.status(404).json({ error: 'Session not found' }); + } + + res.json({ + sessionId, + name: session.name, + running: session.running, + completed: session.completed, + cancelled: session.cancelled, + failed: session.failed, + logs: session.logs, + output: session.output, + cost: session.cost, + exitCode: session.exitCode, + error: session.error, + startTime: session.startTime, + endTime: session.endTime + }); + } catch (error) { + console.error('Error getting session status:', error); + res.status(500).json({ error: 'Failed to get session status' }); + } + }); + + // Cancel an interactive session + app.post('/api/interactive/cancel/:sessionId', isAuthenticated, async (req, res) => { + try { + const { sessionId } = req.params; + + if (!req.user || !req.user.username) { + return res.status(401).json({ error: 'Authentication required' }); + } + + const username = req.user.username; + const session = global.interactiveSessions[username]; + + if (!session || session.id !== sessionId) { + return res.status(404).json({ error: 'Session not found' }); + } + + if (session.process && session.running) { + session.process.kill('SIGTERM'); + session.running = false; + session.completed = true; + session.cancelled = true; + session.logs.push('\nProcess cancelled by user'); + session.output = session.logs.join(''); + session.cost = extractCostFromLogs(session.logs); + session.endTime = new Date(); + session.active = false; // Mark as inactive + + // Save session data persistently (including cancelled sessions) + saveSessionToPersistentStorage(session, username); + + // Clear timeout + if (session.timeout) { + clearTimeout(session.timeout); + session.timeout = null; + } + + console.log(`Session ${sessionId} cancelled by user`); + } + + res.json({ status: 'cancelled' }); + } catch (error) { + console.error('Error cancelling session:', error); + res.status(500).json({ error: 'Failed to cancel session' }); + } + }); + + // List all sessions (only if ASK_AI_LOG is configured) + app.get('/api/interactive/sessions', isAuthenticated, async (req, res) => { + try { + const username = req.user.username; + const logDir = process.env.ASK_AI_LOG; + + if (!logDir) { + return res.json({ sessions: [], persistent: false }); + } + + const userLogDir = path.join(logDir, username); + + if (!existsSync(userLogDir)) { + return res.json({ sessions: [], persistent: true }); + } + + const allFiles = readdirSync(userLogDir) + .filter(file => file.endsWith('.log') || file.endsWith('.meta')); + + // Group files by session ID to prefer .meta over .log + const sessionMap = new Map(); + + allFiles.forEach(file => { + const sessionId = file.replace(/\.(log|meta)$/, ''); + if (!sessionMap.has(sessionId) || file.endsWith('.meta')) { + sessionMap.set(sessionId, file); + } + }); + + const logFiles = Array.from(sessionMap.values()) + .map(file => { + const filePath = path.join(userLogDir, file); + const stats = statSync(filePath); + + try { + // Read the JSON session data + const sessionData = JSON.parse(readFileSync(filePath, 'utf8')); + const metadata = sessionData.metadata || {}; + + return { + sessionId: metadata.sessionId || file.replace(/\.(log|meta)$/, ''), + sessionName: metadata.sessionName || 'Unknown Session', + startTime: metadata.startTime ? new Date(metadata.startTime) : stats.birthtime, + endTime: metadata.endTime ? new Date(metadata.endTime) : null, + completed: metadata.completed || false, + cancelled: metadata.cancelled || false, + failed: metadata.failed || false, + cost: metadata.cost || 0, + active: metadata.active || false, + size: stats.size, + logFile: filePath + }; + } catch (error) { + // Fallback for old format or corrupted files + console.warn(`Failed to parse session file ${filePath}:`, error); + const [sessionName, timestamp] = file.replace(/\.(log|meta)$/, '').split('_'); + + return { + sessionId: file.replace(/\.(log|meta)$/, ''), // Use filename as fallback + sessionName: sessionName || 'Unknown Session', + startTime: stats.birthtime, + endTime: null, + completed: false, + cancelled: false, + failed: false, + cost: 0, + active: false, + size: stats.size, + logFile: filePath + }; + } + }) + .sort((a, b) => new Date(b.startTime) - new Date(a.startTime)); + + // Remove any duplicate sessions before processing + // Group sessions by sessionName and keep only the most recent one + const sessionGroups = new Map(); + logFiles.forEach(session => { + const key = session.sessionName; + if (!sessionGroups.has(key) || + new Date(session.startTime) > new Date(sessionGroups.get(key).startTime)) { + sessionGroups.set(key, session); + } + }); + + // Convert back to array + const deduplicatedLogFiles = Array.from(sessionGroups.values()) + .sort((a, b) => new Date(b.startTime) - new Date(a.startTime)); + + // Check if there's a currently active session + const currentSession = global.interactiveSessions[username]; + if (currentSession && currentSession.running) { + // Mark the current session as active in the list + const activeSessionIndex = deduplicatedLogFiles.findIndex(s => { + // First try exact match with session ID + if (s.sessionId === currentSession.id) { + return true; + } + + // Then try matching by session name + if (s.sessionName === currentSession.name) { + return true; + } + + return false; + }); + + if (activeSessionIndex === -1) { + // Add current session if not in persistent storage yet + deduplicatedLogFiles.unshift({ + sessionId: currentSession.id, + sessionName: currentSession.name, + startTime: currentSession.startTime, + endTime: null, + completed: false, + cancelled: false, + failed: false, + cost: currentSession.cost || 0, + active: true, + size: 0, + logFile: null + }); + } else { + // Mark existing session as active and update with current session data + deduplicatedLogFiles[activeSessionIndex].active = true; + deduplicatedLogFiles[activeSessionIndex].sessionId = currentSession.id; // Ensure correct session ID + deduplicatedLogFiles[activeSessionIndex].cost = currentSession.cost || deduplicatedLogFiles[activeSessionIndex].cost; + } + } + + res.json({ sessions: deduplicatedLogFiles, persistent: true }); + } catch (error) { + console.error('Error listing sessions:', error); + res.status(500).json({ error: 'Failed to list sessions' }); + } + }); + + // Get logs for specific session (only if ASK_AI_LOG is configured) + app.get('/api/interactive/session/:sessionId/logs', isAuthenticated, async (req, res) => { + try { + const { sessionId } = req.params; + const username = req.user.username; + const logDir = process.env.ASK_AI_LOG; + + if (!logDir) { + return res.status(404).json({ error: 'Persistent logging not configured' }); + } + + const userLogDir = path.join(logDir, username); + + if (!existsSync(userLogDir)) { + return res.status(404).json({ error: 'Session logs not found' }); + } + + const logFiles = readdirSync(userLogDir) + .filter(file => file.includes(sessionId) && (file.endsWith('.log') || file.endsWith('.meta'))); + + if (logFiles.length === 0) { + return res.status(404).json({ error: 'Session logs not found' }); + } + + // Prefer .meta files for most up-to-date information + const metaFile = logFiles.find(file => file.endsWith('.meta')); + const logFile = logFiles.find(file => file.endsWith('.log')); + + const primaryFile = metaFile ? path.join(userLogDir, metaFile) : path.join(userLogDir, logFile); + + try { + // Try to read as JSON (new format from .meta or .log) + const sessionData = JSON.parse(readFileSync(primaryFile, 'utf8')); + const metadata = sessionData.metadata || {}; + const logs = sessionData.logs || []; + + res.json({ + sessionId: metadata.sessionId || sessionId, + sessionName: metadata.sessionName || 'Unknown Session', + logs: logs, + output: sessionData.output || logs.join(''), + cost: metadata.cost || extractCostFromLogs(logs), + startTime: metadata.startTime, + endTime: metadata.endTime, + completed: metadata.completed || false, + cancelled: metadata.cancelled || false, + failed: metadata.failed || false, + active: metadata.active || false, + logFile: metaFile || logFile + }); + } catch (parseError) { + // Fallback for old format (plain text logs) + console.warn(`Session ${sessionId} using legacy format, parsing as text`); + const logs = readFileSync(logFile, 'utf8'); + const logLines = logs.split('\n').filter(line => line.trim()); + const cost = extractCostFromLogs(logLines); + + res.json({ + sessionId, + sessionName: sessionId, + logs: logLines, + output: logs, + cost, + startTime: null, + endTime: null, + completed: false, + cancelled: false, + failed: false, + active: false, + logFile: logFiles[0] + }); + } + } catch (error) { + console.error('Error getting session logs:', error); + res.status(500).json({ error: 'Failed to get session logs' }); + } + }); +} \ No newline at end of file diff --git a/ui/package-lock.json b/ui/package-lock.json new file mode 100644 index 0000000..4976a95 --- /dev/null +++ b/ui/package-lock.json @@ -0,0 +1,3922 @@ +{ + "name": "ui", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "ui", + "version": "0.0.0", + "dependencies": { + "@codemirror/basic-setup": "^0.20.0", + "@codemirror/commands": "^6.8.1", + "@codemirror/lang-javascript": "^6.2.4", + "@codemirror/language": "^6.11.1", + "@codemirror/legacy-modes": "^6.5.1", + "@codemirror/state": "^6.5.2", + "@codemirror/theme-one-dark": "^6.1.2", + "@codemirror/view": "^6.37.2", + "@tailwindcss/postcss": "^4.1.3", + "@uiw/codemirror-theme-basic": "^4.23.13", + "@uiw/codemirror-theme-bbedit": "^4.23.13", + "@uiw/codemirror-theme-github": "^4.23.13", + "@uiw/codemirror-theme-white": "^4.23.13", + "@uiw/codemirror-themes": "^4.23.13", + "cmjs-shell": "github:milahu/codemirror-shell", + "dotenv": "^16.5.0", + "express": "^4.18.3", + "express-session": "^1.18.1", + "passport": "^0.7.0", + "passport-github2": "^0.1.12", + "prismjs": "^1.30.0", + "simple-git": "^3.27.0", + "svelte-codemirror-editor": "^1.4.1", + "svelte-file-tree": "^0.1.0", + "svelte-split-pane": "^0.1.2" + }, + "devDependencies": { + "@sveltejs/vite-plugin-svelte": "^5.0.3", + "@tsconfig/svelte": "^5.0.4", + "autoprefixer": "^10.4.21", + "concurrently": "^8.2.2", + "postcss": "^8.5.3", + "svelte": "^5.20.2", + "svelte-check": "^4.1.4", + "tailwindcss": "^4.1.3", + "typescript": "~5.7.2", + "vite": "^6.2.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz", + "integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@codemirror/autocomplete": { + "version": "6.18.6", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.6.tgz", + "integrity": "sha512-PHHBXFomUs5DF+9tCOM/UoW6XQ4R44lLNNhRaW9PKPTU0D7lIjRg3ElxaJnTwsl/oHiR93WSXDBrekhoUGCPtg==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@codemirror/basic-setup": { + "version": "0.20.0", + "resolved": "https://registry.npmjs.org/@codemirror/basic-setup/-/basic-setup-0.20.0.tgz", + "integrity": "sha512-W/ERKMLErWkrVLyP5I8Yh8PXl4r+WFNkdYVSzkXYPQv2RMPSkWpr2BgggiSJ8AHF/q3GuApncDD8I4BZz65fyg==", + "deprecated": "In version 6.0, this package has been renamed to just 'codemirror'", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^0.20.0", + "@codemirror/commands": "^0.20.0", + "@codemirror/language": "^0.20.0", + "@codemirror/lint": "^0.20.0", + "@codemirror/search": "^0.20.0", + "@codemirror/state": "^0.20.0", + "@codemirror/view": "^0.20.0" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/autocomplete": { + "version": "0.20.3", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-0.20.3.tgz", + "integrity": "sha512-lYB+NPGP+LEzAudkWhLfMxhTrxtLILGl938w+RcFrGdrIc54A+UgmCoz+McE3IYRFp4xyQcL4uFJwo+93YdgHw==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^0.20.0", + "@codemirror/state": "^0.20.0", + "@codemirror/view": "^0.20.0", + "@lezer/common": "^0.16.0" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/commands": { + "version": "0.20.0", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-0.20.0.tgz", + "integrity": "sha512-v9L5NNVA+A9R6zaFvaTbxs30kc69F6BkOoiEbeFw4m4I0exmDEKBILN6mK+GksJtvTzGBxvhAPlVFTdQW8GB7Q==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^0.20.0", + "@codemirror/state": "^0.20.0", + "@codemirror/view": "^0.20.0", + "@lezer/common": "^0.16.0" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/language": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-0.20.2.tgz", + "integrity": "sha512-WB3Bnuusw0xhVvhBocieYKwJm04SOk5bPoOEYksVHKHcGHFOaYaw+eZVxR4gIqMMcGzOIUil0FsCmFk8yrhHpw==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^0.20.0", + "@codemirror/view": "^0.20.0", + "@lezer/common": "^0.16.0", + "@lezer/highlight": "^0.16.0", + "@lezer/lr": "^0.16.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/lint": { + "version": "0.20.3", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-0.20.3.tgz", + "integrity": "sha512-06xUScbbspZ8mKoODQCEx6hz1bjaq9m8W8DxdycWARMiiX1wMtfCh/MoHpaL7ws/KUMwlsFFfp2qhm32oaCvVA==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^0.20.0", + "@codemirror/view": "^0.20.2", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/search": { + "version": "0.20.1", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-0.20.1.tgz", + "integrity": "sha512-ROe6gRboQU5E4z6GAkNa2kxhXqsGNbeLEisbvzbOeB7nuDYXUZ70vGIgmqPu0tB+1M3F9yWk6W8k2vrFpJaD4Q==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^0.20.0", + "@codemirror/view": "^0.20.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/state": { + "version": "0.20.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-0.20.1.tgz", + "integrity": "sha512-ms0tlV5A02OK0pFvTtSUGMLkoarzh1F8mr6jy1cD7ucSC2X/VLHtQCxfhdSEGqTYlQF2hoZtmLv+amqhdgbwjQ==", + "license": "MIT" + }, + "node_modules/@codemirror/basic-setup/node_modules/@codemirror/view": { + "version": "0.20.7", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-0.20.7.tgz", + "integrity": "sha512-pqEPCb9QFTOtHgAH5XU/oVy9UR/Anj6r+tG5CRmkNVcqSKEPmBU05WtN/jxJCFZBXf6HumzWC9ydE4qstO3TxQ==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^0.20.0", + "style-mod": "^4.0.0", + "w3c-keyname": "^2.2.4" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@lezer/common": { + "version": "0.16.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-0.16.1.tgz", + "integrity": "sha512-qPmG7YTZ6lATyTOAWf8vXE+iRrt1NJd4cm2nJHK+v7X9TsOF6+HtuU/ctaZy2RCrluxDb89hI6KWQ5LfQGQWuA==", + "license": "MIT" + }, + "node_modules/@codemirror/basic-setup/node_modules/@lezer/highlight": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-0.16.0.tgz", + "integrity": "sha512-iE5f4flHlJ1g1clOStvXNLbORJoiW4Kytso6ubfYzHnaNo/eo5SKhxs4wv/rtvwZQeZrK3we8S9SyA7OGOoRKQ==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^0.16.0" + } + }, + "node_modules/@codemirror/basic-setup/node_modules/@lezer/lr": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-0.16.3.tgz", + "integrity": "sha512-pau7um4eAw94BEuuShUIeQDTf3k4Wt6oIUOYxMmkZgDHdqtIcxWND4LRxi8nI9KuT4I1bXQv67BCapkxt7Ywqw==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^0.16.0" + } + }, + "node_modules/@codemirror/commands": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.8.1.tgz", + "integrity": "sha512-KlGVYufHMQzxbdQONiLyGQDUW0itrLZwq3CcY7xpv9ZLRHqzkBSoteocBHtMCoY7/Ci4xhzSrToIeLg7FxHuaw==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.4.0", + "@codemirror/view": "^6.27.0", + "@lezer/common": "^1.1.0" + } + }, + "node_modules/@codemirror/lang-javascript": { + "version": "6.2.4", + "resolved": "https://registry.npmjs.org/@codemirror/lang-javascript/-/lang-javascript-6.2.4.tgz", + "integrity": "sha512-0WVmhp1QOqZ4Rt6GlVGwKJN3KW7Xh4H2q8ZZNGZaP6lRdxXJzmjm4FqvmOojVj6khWJHIb9sp7U/72W7xQgqAA==", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.6.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0", + "@lezer/javascript": "^1.0.0" + } + }, + "node_modules/@codemirror/language": { + "version": "6.11.1", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.11.1.tgz", + "integrity": "sha512-5kS1U7emOGV84vxC+ruBty5sUgcD0te6dyupyRVG2zaSjhTDM73LhVKUtVwiqSe6QwmEoA4SCiU8AKPFyumAWQ==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.23.0", + "@lezer/common": "^1.1.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/legacy-modes": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@codemirror/legacy-modes/-/legacy-modes-6.5.1.tgz", + "integrity": "sha512-DJYQQ00N1/KdESpZV7jg9hafof/iBNp9h7TYo1SLMk86TWl9uDsVdho2dzd81K+v4retmK6mdC7WpuOQDytQqw==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0" + } + }, + "node_modules/@codemirror/lint": { + "version": "6.8.5", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.5.tgz", + "integrity": "sha512-s3n3KisH7dx3vsoeGMxsbRAgKe4O1vbrnKBClm99PU0fWxmxsx5rR2PfqQgIt+2MMJBHbiJ5rfIdLYfB9NNvsA==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.35.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/search": { + "version": "6.5.11", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.11.tgz", + "integrity": "sha512-KmWepDE6jUdL6n8cAAqIpRmLPBZ5ZKnicE8oGU/s3QrAVID+0VhLFrzUucVKHG5035/BSykhExDL/Xm7dHthiA==", + "license": "MIT", + "peer": true, + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/state": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.2.tgz", + "integrity": "sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==", + "license": "MIT", + "dependencies": { + "@marijn/find-cluster-break": "^1.0.0" + } + }, + "node_modules/@codemirror/theme-one-dark": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/@codemirror/theme-one-dark/-/theme-one-dark-6.1.2.tgz", + "integrity": "sha512-F+sH0X16j/qFLMAfbciKTxVOwkdAS336b7AXTKOZhy8BR3eH/RelsnLgLFINrpST63mmN2OuwUt0W2ndUgYwUA==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/highlight": "^1.0.0" + } + }, + "node_modules/@codemirror/view": { + "version": "6.37.2", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.37.2.tgz", + "integrity": "sha512-XD3LdgQpxQs5jhOOZ2HRVT+Rj59O4Suc7g2ULvZ+Yi8eCkickrkZ5JFuoDhs2ST1mNI5zSsNYgR3NGa4OUrbnw==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.5.0", + "crelt": "^1.0.6", + "style-mod": "^4.1.0", + "w3c-keyname": "^2.2.4" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.2.tgz", + "integrity": "sha512-wCIboOL2yXZym2cgm6mlA742s9QeJ8DjGVaL39dLN4rRwrOgOyYSnOaFPhKZGLb2ngj4EyfAFjsNJwPXZvseag==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.2.tgz", + "integrity": "sha512-NQhH7jFstVY5x8CKbcfa166GoV0EFkaPkCKBQkdPJFvo5u+nGXLEH/ooniLb3QI8Fk58YAx7nsPLozUWfCBOJA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.2.tgz", + "integrity": "sha512-5ZAX5xOmTligeBaeNEPnPaeEuah53Id2tX4c2CVP3JaROTH+j4fnfHCkr1PjXMd78hMst+TlkfKcW/DlTq0i4w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.2.tgz", + "integrity": "sha512-Ffcx+nnma8Sge4jzddPHCZVRvIfQ0kMsUsCMcJRHkGJ1cDmhe4SsrYIjLUKn1xpHZybmOqCWwB0zQvsjdEHtkg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.2.tgz", + "integrity": "sha512-MpM6LUVTXAzOvN4KbjzU/q5smzryuoNjlriAIx+06RpecwCkL9JpenNzpKd2YMzLJFOdPqBpuub6eVRP5IgiSA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.2.tgz", + "integrity": "sha512-5eRPrTX7wFyuWe8FqEFPG2cU0+butQQVNcT4sVipqjLYQjjh8a8+vUTfgBKM88ObB85ahsnTwF7PSIt6PG+QkA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.2.tgz", + "integrity": "sha512-mLwm4vXKiQ2UTSX4+ImyiPdiHjiZhIaE9QvC7sw0tZ6HoNMjYAqQpGyui5VRIi5sGd+uWq940gdCbY3VLvsO1w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.2.tgz", + "integrity": "sha512-6qyyn6TjayJSwGpm8J9QYYGQcRgc90nmfdUb0O7pp1s4lTY+9D0H9O02v5JqGApUyiHOtkz6+1hZNvNtEhbwRQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.2.tgz", + "integrity": "sha512-UHBRgJcmjJv5oeQF8EpTRZs/1knq6loLxTsjc3nxO9eXAPDLcWW55flrMVc97qFPbmZP31ta1AZVUKQzKTzb0g==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.2.tgz", + "integrity": "sha512-gq/sjLsOyMT19I8obBISvhoYiZIAaGF8JpeXu1u8yPv8BE5HlWYobmlsfijFIZ9hIVGYkbdFhEqC0NvM4kNO0g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.2.tgz", + "integrity": "sha512-bBYCv9obgW2cBP+2ZWfjYTU+f5cxRoGGQ5SeDbYdFCAZpYWrfjjfYwvUpP8MlKbP0nwZ5gyOU/0aUzZ5HWPuvQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.2.tgz", + "integrity": "sha512-SHNGiKtvnU2dBlM5D8CXRFdd+6etgZ9dXfaPCeJtz+37PIUlixvlIhI23L5khKXs3DIzAn9V8v+qb1TRKrgT5w==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.2.tgz", + "integrity": "sha512-hDDRlzE6rPeoj+5fsADqdUZl1OzqDYow4TB4Y/3PlKBD0ph1e6uPHzIQcv2Z65u2K0kpeByIyAjCmjn1hJgG0Q==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.2.tgz", + "integrity": "sha512-tsHu2RRSWzipmUi9UBDEzc0nLc4HtpZEI5Ba+Omms5456x5WaNuiG3u7xh5AO6sipnJ9r4cRWQB2tUjPyIkc6g==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.2.tgz", + "integrity": "sha512-k4LtpgV7NJQOml/10uPU0s4SAXGnowi5qBSjaLWMojNCUICNu7TshqHLAEbkBdAszL5TabfvQ48kK84hyFzjnw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.2.tgz", + "integrity": "sha512-GRa4IshOdvKY7M/rDpRR3gkiTNp34M0eLTaC1a08gNrh4u488aPhuZOCpkF6+2wl3zAN7L7XIpOFBhnaE3/Q8Q==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.2.tgz", + "integrity": "sha512-QInHERlqpTTZ4FRB0fROQWXcYRD64lAoiegezDunLpalZMjcUcld3YzZmVJ2H/Cp0wJRZ8Xtjtj0cEHhYc/uUg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.2.tgz", + "integrity": "sha512-talAIBoY5M8vHc6EeI2WW9d/CkiO9MQJ0IOWX8hrLhxGbro/vBXJvaQXefW2cP0z0nQVTdQ/eNyGFV1GSKrxfw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.2.tgz", + "integrity": "sha512-voZT9Z+tpOxrvfKFyfDYPc4DO4rk06qamv1a/fkuzHpiVBMOhpjK+vBmWM8J1eiB3OLSMFYNaOaBNLXGChf5tg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.2.tgz", + "integrity": "sha512-dcXYOC6NXOqcykeDlwId9kB6OkPUxOEqU+rkrYVqJbK2hagWOMrsTGsMr8+rW02M+d5Op5NNlgMmjzecaRf7Tg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.2.tgz", + "integrity": "sha512-t/TkWwahkH0Tsgoq1Ju7QfgGhArkGLkF1uYz8nQS/PPFlXbP5YgRpqQR3ARRiC2iXoLTWFxc6DJMSK10dVXluw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.2.tgz", + "integrity": "sha512-cfZH1co2+imVdWCjd+D1gf9NjkchVhhdpgb1q5y6Hcv9TP6Zi9ZG/beI3ig8TvwT9lH9dlxLq5MQBBgwuj4xvA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.2.tgz", + "integrity": "sha512-7Loyjh+D/Nx/sOTzV8vfbB3GJuHdOQyrOryFdZvPHLf42Tk9ivBU5Aedi7iyX+x6rbn2Mh68T4qq1SDqJBQO5Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.2.tgz", + "integrity": "sha512-WRJgsz9un0nqZJ4MfhabxaD9Ft8KioqU3JMinOTvobbX6MOSUigSBlogP8QB3uxpJDsFS6yN+3FDBdqE5lg9kg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.2.tgz", + "integrity": "sha512-kM3HKb16VIXZyIeVrM1ygYmZBKybX8N4p754bw390wGO3Tf2j4L2/WYL+4suWujpgf6GBYs3jv7TyUivdd05JA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@kwsites/file-exists": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@kwsites/file-exists/-/file-exists-1.1.1.tgz", + "integrity": "sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.1" + } + }, + "node_modules/@kwsites/promise-deferred": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@kwsites/promise-deferred/-/promise-deferred-1.1.1.tgz", + "integrity": "sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==", + "license": "MIT" + }, + "node_modules/@lezer/common": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz", + "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==", + "license": "MIT" + }, + "node_modules/@lezer/highlight": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz", + "integrity": "sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/javascript": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@lezer/javascript/-/javascript-1.5.1.tgz", + "integrity": "sha512-ATOImjeVJuvgm3JQ/bpo2Tmv55HSScE2MTPnKRMRIPx2cLhHGyX2VnqpHhtIV1tVzIjZDbcWQm+NCTF40ggZVw==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.1.3", + "@lezer/lr": "^1.3.0" + } + }, + "node_modules/@lezer/lr": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz", + "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@marijn/find-cluster-break": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz", + "integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==", + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.40.0.tgz", + "integrity": "sha512-+Fbls/diZ0RDerhE8kyC6hjADCXA1K4yVNlH0EYfd2XjyH0UGgzaQ8MlT0pCXAThfxv3QUAczHaL+qSv1E4/Cg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.40.0.tgz", + "integrity": "sha512-PPA6aEEsTPRz+/4xxAmaoWDqh67N7wFbgFUJGMnanCFs0TV99M0M8QhhaSCks+n6EbQoFvLQgYOGXxlMGQe/6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.40.0.tgz", + "integrity": "sha512-GwYOcOakYHdfnjjKwqpTGgn5a6cUX7+Ra2HeNj/GdXvO2VJOOXCiYYlRFU4CubFM67EhbmzLOmACKEfvp3J1kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.40.0.tgz", + "integrity": "sha512-CoLEGJ+2eheqD9KBSxmma6ld01czS52Iw0e2qMZNpPDlf7Z9mj8xmMemxEucinev4LgHalDPczMyxzbq+Q+EtA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.40.0.tgz", + "integrity": "sha512-r7yGiS4HN/kibvESzmrOB/PxKMhPTlz+FcGvoUIKYoTyGd5toHp48g1uZy1o1xQvybwwpqpe010JrcGG2s5nkg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.40.0.tgz", + "integrity": "sha512-mVDxzlf0oLzV3oZOr0SMJ0lSDd3xC4CmnWJ8Val8isp9jRGl5Dq//LLDSPFrasS7pSm6m5xAcKaw3sHXhBjoRw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.40.0.tgz", + "integrity": "sha512-y/qUMOpJxBMy8xCXD++jeu8t7kzjlOCkoxxajL58G62PJGBZVl/Gwpm7JK9+YvlB701rcQTzjUZ1JgUoPTnoQA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.40.0.tgz", + "integrity": "sha512-GoCsPibtVdJFPv/BOIvBKO/XmwZLwaNWdyD8TKlXuqp0veo2sHE+A/vpMQ5iSArRUz/uaoj4h5S6Pn0+PdhRjg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.40.0.tgz", + "integrity": "sha512-L5ZLphTjjAD9leJzSLI7rr8fNqJMlGDKlazW2tX4IUF9P7R5TMQPElpH82Q7eNIDQnQlAyiNVfRPfP2vM5Avvg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.40.0.tgz", + "integrity": "sha512-ATZvCRGCDtv1Y4gpDIXsS+wfFeFuLwVxyUBSLawjgXK2tRE6fnsQEkE4csQQYWlBlsFztRzCnBvWVfcae/1qxQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loongarch64-gnu": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.40.0.tgz", + "integrity": "sha512-wG9e2XtIhd++QugU5MD9i7OnpaVb08ji3P1y/hNbxrQ3sYEelKJOq1UJ5dXczeo6Hj2rfDEL5GdtkMSVLa/AOg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.40.0.tgz", + "integrity": "sha512-vgXfWmj0f3jAUvC7TZSU/m/cOE558ILWDzS7jBhiCAFpY2WEBn5jqgbqvmzlMjtp8KlLcBlXVD2mkTSEQE6Ixw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.40.0.tgz", + "integrity": "sha512-uJkYTugqtPZBS3Z136arevt/FsKTF/J9dEMTX/cwR7lsAW4bShzI2R0pJVw+hcBTWF4dxVckYh72Hk3/hWNKvA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.40.0.tgz", + "integrity": "sha512-rKmSj6EXQRnhSkE22+WvrqOqRtk733x3p5sWpZilhmjnkHkpeCgWsFFo0dGnUGeA+OZjRl3+VYq+HyCOEuwcxQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.40.0.tgz", + "integrity": "sha512-SpnYlAfKPOoVsQqmTFJ0usx0z84bzGOS9anAC0AZ3rdSo3snecihbhFTlJZ8XMwzqAcodjFU4+/SM311dqE5Sw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.40.0.tgz", + "integrity": "sha512-RcDGMtqF9EFN8i2RYN2W+64CdHruJ5rPqrlYw+cgM3uOVPSsnAQps7cpjXe9be/yDp8UC7VLoCoKC8J3Kn2FkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.40.0.tgz", + "integrity": "sha512-HZvjpiUmSNx5zFgwtQAV1GaGazT2RWvqeDi0hV+AtC8unqqDSsaFjPxfsO6qPtKRRg25SisACWnJ37Yio8ttaw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.40.0.tgz", + "integrity": "sha512-UtZQQI5k/b8d7d3i9AZmA/t+Q4tk3hOC0tMOMSq2GlMYOfxbesxG4mJSeDp0EHs30N9bsfwUvs3zF4v/RzOeTQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.40.0.tgz", + "integrity": "sha512-+m03kvI2f5syIqHXCZLPVYplP8pQch9JHyXKZ3AGMKlg8dCyr2PKHjwRLiW53LTrN/Nc3EqHOKxUxzoSPdKddA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.40.0.tgz", + "integrity": "sha512-lpPE1cLfP5oPzVjKMx10pgBmKELQnFJXHgvtHCtuJWOv8MxqdEIMNtgHgBFf7Ea2/7EuVwa9fodWUfXAlXZLZQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@sveltejs/acorn-typescript": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@sveltejs/acorn-typescript/-/acorn-typescript-1.0.5.tgz", + "integrity": "sha512-IwQk4yfwLdibDlrXVE04jTZYlLnwsTT2PIOQQGNLWfjavGifnk1JD1LcZjZaBTRcxZu2FfPfNLOE04DSu9lqtQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^8.9.0" + } + }, + "node_modules/@sveltejs/vite-plugin-svelte": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@sveltejs/vite-plugin-svelte/-/vite-plugin-svelte-5.0.3.tgz", + "integrity": "sha512-MCFS6CrQDu1yGwspm4qtli0e63vaPCehf6V7pIMP15AsWgMKrqDGCPFF/0kn4SP0ii4aySu4Pa62+fIRGFMjgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sveltejs/vite-plugin-svelte-inspector": "^4.0.1", + "debug": "^4.4.0", + "deepmerge": "^4.3.1", + "kleur": "^4.1.5", + "magic-string": "^0.30.15", + "vitefu": "^1.0.4" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22" + }, + "peerDependencies": { + "svelte": "^5.0.0", + "vite": "^6.0.0" + } + }, + "node_modules/@sveltejs/vite-plugin-svelte-inspector": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@sveltejs/vite-plugin-svelte-inspector/-/vite-plugin-svelte-inspector-4.0.1.tgz", + "integrity": "sha512-J/Nmb2Q2y7mck2hyCX4ckVHcR5tu2J+MtBEQqpDrrgELZ2uvraQcK/ioCV61AqkdXFgriksOKIceDcQmqnGhVw==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.7" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22" + }, + "peerDependencies": { + "@sveltejs/vite-plugin-svelte": "^5.0.0", + "svelte": "^5.0.0", + "vite": "^6.0.0" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.3.tgz", + "integrity": "sha512-H/6r6IPFJkCfBJZ2dKZiPJ7Ueb2wbL592+9bQEl2r73qbX6yGnmQVIfiUvDRB2YI0a3PWDrzUwkvQx1XW1bNkA==", + "license": "MIT", + "dependencies": { + "enhanced-resolve": "^5.18.1", + "jiti": "^2.4.2", + "lightningcss": "1.29.2", + "tailwindcss": "4.1.3" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.3.tgz", + "integrity": "sha512-t16lpHCU7LBxDe/8dCj9ntyNpXaSTAgxWm1u2XQP5NiIu4KGSyrDJJRlK9hJ4U9yJxx0UKCVI67MJWFNll5mOQ==", + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.3", + "@tailwindcss/oxide-darwin-arm64": "4.1.3", + "@tailwindcss/oxide-darwin-x64": "4.1.3", + "@tailwindcss/oxide-freebsd-x64": "4.1.3", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.3", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.3", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.3", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.3", + "@tailwindcss/oxide-linux-x64-musl": "4.1.3", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.3", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.3" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.3.tgz", + "integrity": "sha512-cxklKjtNLwFl3mDYw4XpEfBY+G8ssSg9ADL4Wm6//5woi3XGqlxFsnV5Zb6v07dxw1NvEX2uoqsxO/zWQsgR+g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.3.tgz", + "integrity": "sha512-mqkf2tLR5VCrjBvuRDwzKNShRu99gCAVMkVsaEOFvv6cCjlEKXRecPu9DEnxp6STk5z+Vlbh1M5zY3nQCXMXhw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.3.tgz", + "integrity": "sha512-7sGraGaWzXvCLyxrc7d+CCpUN3fYnkkcso3rCzwUmo/LteAl2ZGCDlGvDD8Y/1D3ngxT8KgDj1DSwOnNewKhmg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.3.tgz", + "integrity": "sha512-E2+PbcbzIReaAYZe997wb9rId246yDkCwAakllAWSGqe6VTg9hHle67hfH6ExjpV2LSK/siRzBUs5wVff3RW9w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.3.tgz", + "integrity": "sha512-GvfbJ8wjSSjbLFFE3UYz4Eh8i4L6GiEYqCtA8j2Zd2oXriPuom/Ah/64pg/szWycQpzRnbDiJozoxFU2oJZyfg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.3.tgz", + "integrity": "sha512-35UkuCWQTeG9BHcBQXndDOrpsnt3Pj9NVIB4CgNiKmpG8GnCNXeMczkUpOoqcOhO6Cc/mM2W7kaQ/MTEENDDXg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.3.tgz", + "integrity": "sha512-dm18aQiML5QCj9DQo7wMbt1Z2tl3Giht54uVR87a84X8qRtuXxUqnKQkRDK5B4bCOmcZ580lF9YcoMkbDYTXHQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.3.tgz", + "integrity": "sha512-LMdTmGe/NPtGOaOfV2HuO7w07jI3cflPrVq5CXl+2O93DCewADK0uW1ORNAcfu2YxDUS035eY2W38TxrsqngxA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.3.tgz", + "integrity": "sha512-aalNWwIi54bbFEizwl1/XpmdDrOaCjRFQRgtbv9slWjmNPuJJTIKPHf5/XXDARc9CneW9FkSTqTbyvNecYAEGw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.3.tgz", + "integrity": "sha512-PEj7XR4OGTGoboTIAdXicKuWl4EQIjKHKuR+bFy9oYN7CFZo0eu74+70O4XuERX4yjqVZGAkCdglBODlgqcCXg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.3.tgz", + "integrity": "sha512-T8gfxECWDBENotpw3HR9SmNiHC9AOJdxs+woasRZ8Q/J4VHN0OMs7F+4yVNZ9EVN26Wv6mZbK0jv7eHYuLJLwA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/postcss": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.3.tgz", + "integrity": "sha512-6s5nJODm98F++QT49qn8xJKHQRamhYHfMi3X7/ltxiSQ9dyRsaFSfFkfaMsanWzf+TMYQtbk8mt5f6cCVXJwfg==", + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "@tailwindcss/node": "4.1.3", + "@tailwindcss/oxide": "4.1.3", + "postcss": "^8.4.41", + "tailwindcss": "4.1.3" + } + }, + "node_modules/@tsconfig/svelte": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/svelte/-/svelte-5.0.4.tgz", + "integrity": "sha512-BV9NplVgLmSi4mwKzD8BD/NQ8erOY/nUE/GpgWe2ckx+wIQF5RyRirn/QsSSCPeulVpc3RA/iJt6DpfTIZps0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz", + "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", + "license": "MIT" + }, + "node_modules/@uiw/codemirror-theme-basic": { + "version": "4.23.13", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-theme-basic/-/codemirror-theme-basic-4.23.13.tgz", + "integrity": "sha512-7GYiryQZSZFpKuskc0d//Pn81z0OtnHhEQp3tZ5Jn2GQuMEHdZt9RomLLzqJTqOXOXVm37cKuYbd47XWcgejmw==", + "license": "MIT", + "dependencies": { + "@uiw/codemirror-themes": "4.23.13" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/@uiw/codemirror-theme-bbedit": { + "version": "4.23.13", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-theme-bbedit/-/codemirror-theme-bbedit-4.23.13.tgz", + "integrity": "sha512-7Yc3iQFEecbaQisy5YC+9aov/PVGSKZAEGRR86C+keC/3yxAe6jcg9a/rVuafegm70pKAhO+beGZGjot0k3DVQ==", + "license": "MIT", + "dependencies": { + "@uiw/codemirror-themes": "4.23.13" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/@uiw/codemirror-theme-github": { + "version": "4.23.13", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-theme-github/-/codemirror-theme-github-4.23.13.tgz", + "integrity": "sha512-e75jgCl6Zf7sC63ntyl4r2GU6ekqKbyfIB4g6EnneQlIzJQiDTM+mU0/pDR5hLTyGQdxQpcDs9EmDqfEyejGSQ==", + "license": "MIT", + "dependencies": { + "@uiw/codemirror-themes": "4.23.13" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/@uiw/codemirror-theme-white": { + "version": "4.23.13", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-theme-white/-/codemirror-theme-white-4.23.13.tgz", + "integrity": "sha512-objVXKwMYwWmLyUxTxzvmz3IkpLUNrScO2X/5dcNPrkClj7Kc/Tkhc61sreq4DGVYN7jFu9bCOwhDdG6+bht1A==", + "license": "MIT", + "dependencies": { + "@uiw/codemirror-themes": "4.23.13" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/@uiw/codemirror-themes": { + "version": "4.23.13", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-themes/-/codemirror-themes-4.23.13.tgz", + "integrity": "sha512-thk4X8VNl15XPoDiOXdkeMAIIHQOoc5lPfmgOvrhPXHzt4zvH5efLWBw3zgpwuOWF+Uk6sYrS0eumtsSO/kgcA==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@codemirror/language": ">=6.0.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/view": ">=6.0.0" + } + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.21", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz", + "integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.24.4", + "caniuse-lite": "^1.0.30001702", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/base64url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", + "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/browserslist": { + "version": "4.24.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", + "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001688", + "electron-to-chromium": "^1.5.73", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.1" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001713", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001713.tgz", + "integrity": "sha512-wCIWIg+A4Xr7NfhTuHdX+/FKh3+Op3LBbSp2N5Pfx6T/LhdQy3GTyoTg48BReaW/MyMNZAkTadsBtai3ldWK0Q==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cmjs-shell": { + "version": "0.3.10", + "resolved": "git+ssh://git@github.com/milahu/codemirror-shell.git#ae8e8efc45d6d8b416ac09e9a430c6474b0ff86e", + "license": "MIT" + }, + "node_modules/codemirror": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz", + "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==", + "license": "MIT", + "peer": true, + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concurrently": { + "version": "8.2.2", + "resolved": "https://registry.npmjs.org/concurrently/-/concurrently-8.2.2.tgz", + "integrity": "sha512-1dP4gpXFhei8IOtlXRE/T/4H88ElHgTiUzh71YUmtjTEHMSRS2Z/fgOxHSxxusGHogsRfxNq1vyAwxSC+EVyDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.2", + "date-fns": "^2.30.0", + "lodash": "^4.17.21", + "rxjs": "^7.8.1", + "shell-quote": "^1.8.1", + "spawn-command": "0.0.2", + "supports-color": "^8.1.1", + "tree-kill": "^1.2.2", + "yargs": "^17.7.2" + }, + "bin": { + "conc": "dist/bin/concurrently.js", + "concurrently": "dist/bin/concurrently.js" + }, + "engines": { + "node": "^14.13.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/open-cli-tools/concurrently?sponsor=1" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/crelt": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", + "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==", + "license": "MIT" + }, + "node_modules/date-fns": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", + "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.21.0" + }, + "engines": { + "node": ">=0.11" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/date-fns" + } + }, + "node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-libc": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/dotenv": { + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.5.0.tgz", + "integrity": "sha512-m/C+AwOAr9/W1UOIZUo232ejMNnJAJtYQjUbHoNTBNTJSvqzzDh7vnrei3o3r3m9blf6ZoDkvcw0VmozNRFJxg==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.136", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.136.tgz", + "integrity": "sha512-kL4+wUTD7RSA5FHx5YwWtjDnEEkIIikFgWHR4P6fqjw1PPLlqYkxeOb++wAauAssat0YClCy8Y3C5SxgSkjibQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz", + "integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.25.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.2.tgz", + "integrity": "sha512-16854zccKPnC+toMywC+uKNeYSv+/eXkevRAfwRD/G9Cleq66m8XFIrigkbvauLLlCfDL45Q2cWegSg53gGBnQ==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.2", + "@esbuild/android-arm": "0.25.2", + "@esbuild/android-arm64": "0.25.2", + "@esbuild/android-x64": "0.25.2", + "@esbuild/darwin-arm64": "0.25.2", + "@esbuild/darwin-x64": "0.25.2", + "@esbuild/freebsd-arm64": "0.25.2", + "@esbuild/freebsd-x64": "0.25.2", + "@esbuild/linux-arm": "0.25.2", + "@esbuild/linux-arm64": "0.25.2", + "@esbuild/linux-ia32": "0.25.2", + "@esbuild/linux-loong64": "0.25.2", + "@esbuild/linux-mips64el": "0.25.2", + "@esbuild/linux-ppc64": "0.25.2", + "@esbuild/linux-riscv64": "0.25.2", + "@esbuild/linux-s390x": "0.25.2", + "@esbuild/linux-x64": "0.25.2", + "@esbuild/netbsd-arm64": "0.25.2", + "@esbuild/netbsd-x64": "0.25.2", + "@esbuild/openbsd-arm64": "0.25.2", + "@esbuild/openbsd-x64": "0.25.2", + "@esbuild/sunos-x64": "0.25.2", + "@esbuild/win32-arm64": "0.25.2", + "@esbuild/win32-ia32": "0.25.2", + "@esbuild/win32-x64": "0.25.2" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/esm-env": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/esm-env/-/esm-env-1.2.2.tgz", + "integrity": "sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA==", + "license": "MIT" + }, + "node_modules/esrap": { + "version": "1.4.6", + "resolved": "https://registry.npmjs.org/esrap/-/esrap-1.4.6.tgz", + "integrity": "sha512-F/D2mADJ9SHY3IwksD4DAXjTt7qt7GWUf3/8RhCNWmC/67tyb55dpimHmy7EplakFaflV0R/PC+fdSPqrRHAQw==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express-session": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/express-session/-/express-session-1.18.1.tgz", + "integrity": "sha512-a5mtTqEaZvBCL9A9aqkrtfz+3SMDhOVUnjafjo+s7A9Txkq+SVX2DLvSp1Zrv4uCXa3lMSK3viWnh9Gg07PBUA==", + "license": "MIT", + "dependencies": { + "cookie": "0.7.2", + "cookie-signature": "1.0.7", + "debug": "2.6.9", + "depd": "~2.0.0", + "on-headers": "~1.0.2", + "parseurl": "~1.3.3", + "safe-buffer": "5.2.1", + "uid-safe": "~2.1.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/express-session/node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express-session/node_modules/cookie-signature": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", + "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==", + "license": "MIT" + }, + "node_modules/express-session/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express-session/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.4.3", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.3.tgz", + "integrity": "sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-reference": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz", + "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.6" + } + }, + "node_modules/jiti": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.4.2.tgz", + "integrity": "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==", + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/lightningcss": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.29.2.tgz", + "integrity": "sha512-6b6gd/RUXKaw5keVdSEtqFVdzWnU5jMxTUjA2bVcMNPLwSQ08Sv/UodBVtETLCn7k4S1Ibxwh7k68IwLZPgKaA==", + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.29.2", + "lightningcss-darwin-x64": "1.29.2", + "lightningcss-freebsd-x64": "1.29.2", + "lightningcss-linux-arm-gnueabihf": "1.29.2", + "lightningcss-linux-arm64-gnu": "1.29.2", + "lightningcss-linux-arm64-musl": "1.29.2", + "lightningcss-linux-x64-gnu": "1.29.2", + "lightningcss-linux-x64-musl": "1.29.2", + "lightningcss-win32-arm64-msvc": "1.29.2", + "lightningcss-win32-x64-msvc": "1.29.2" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.29.2.tgz", + "integrity": "sha512-cK/eMabSViKn/PG8U/a7aCorpeKLMlK0bQeNHmdb7qUnBkNPnL+oV5DjJUo0kqWsJUapZsM4jCfYItbqBDvlcA==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.29.2.tgz", + "integrity": "sha512-j5qYxamyQw4kDXX5hnnCKMf3mLlHvG44f24Qyi2965/Ycz829MYqjrVg2H8BidybHBp9kom4D7DR5VqCKDXS0w==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.29.2.tgz", + "integrity": "sha512-wDk7M2tM78Ii8ek9YjnY8MjV5f5JN2qNVO+/0BAGZRvXKtQrBC4/cn4ssQIpKIPP44YXw6gFdpUF+Ps+RGsCwg==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.29.2.tgz", + "integrity": "sha512-IRUrOrAF2Z+KExdExe3Rz7NSTuuJ2HvCGlMKoquK5pjvo2JY4Rybr+NrKnq0U0hZnx5AnGsuFHjGnNT14w26sg==", + "cpu": [ + "arm" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.29.2.tgz", + "integrity": "sha512-KKCpOlmhdjvUTX/mBuaKemp0oeDIBBLFiU5Fnqxh1/DZ4JPZi4evEH7TKoSBFOSOV3J7iEmmBaw/8dpiUvRKlQ==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.29.2.tgz", + "integrity": "sha512-Q64eM1bPlOOUgxFmoPUefqzY1yV3ctFPE6d/Vt7WzLW4rKTv7MyYNky+FWxRpLkNASTnKQUaiMJ87zNODIrrKQ==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.29.2.tgz", + "integrity": "sha512-0v6idDCPG6epLXtBH/RPkHvYx74CVziHo6TMYga8O2EiQApnUPZsbR9nFNrg2cgBzk1AYqEd95TlrsL7nYABQg==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.29.2.tgz", + "integrity": "sha512-rMpz2yawkgGT8RULc5S4WiZopVMOFWjiItBT7aSfDX4NQav6M44rhn5hjtkKzB+wMTRlLLqxkeYEtQ3dd9696w==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.29.2.tgz", + "integrity": "sha512-nL7zRW6evGQqYVu/bKGK+zShyz8OVzsCotFgc7judbt6wnB2KbiKKJwBE4SGoDBQ1O94RjW4asrCjQL4i8Fhbw==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.29.2.tgz", + "integrity": "sha512-EdIUW3B2vLuHmv7urfzMI/h2fmlnOQBk1xlsDxkN1tCWKjNFjfLhGxYk8C8mzpSfr+A6jFFIi8fU6LbQGsRWjA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/locate-character": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", + "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==", + "license": "MIT" + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/magic-string": { + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-releases": { + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/oauth": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/oauth/-/oauth-0.10.2.tgz", + "integrity": "sha512-JtFnB+8nxDEXgNyniwz573xxbKSOu3R8D40xQKqcjwJ2CDkYqUDI53o6IuzDJBx60Z8VKCm271+t8iFjakrl8Q==", + "license": "MIT" + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/passport": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/passport/-/passport-0.7.0.tgz", + "integrity": "sha512-cPLl+qZpSc+ireUvt+IzqbED1cHHkDoVYMo30jbJIdOOjQ1MQYZBPiNvmi8UM6lJuOpTPXJGZQk0DtC4y61MYQ==", + "license": "MIT", + "dependencies": { + "passport-strategy": "1.x.x", + "pause": "0.0.1", + "utils-merge": "^1.0.1" + }, + "engines": { + "node": ">= 0.4.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/jaredhanson" + } + }, + "node_modules/passport-github2": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/passport-github2/-/passport-github2-0.1.12.tgz", + "integrity": "sha512-3nPUCc7ttF/3HSP/k9sAXjz3SkGv5Nki84I05kSQPo01Jqq1NzJACgMblCK0fGcv9pKCG/KXU3AJRDGLqHLoIw==", + "dependencies": { + "passport-oauth2": "1.x.x" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/passport-oauth2": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/passport-oauth2/-/passport-oauth2-1.8.0.tgz", + "integrity": "sha512-cjsQbOrXIDE4P8nNb3FQRCCmJJ/utnFKEz2NX209f7KOHPoX18gF7gBzBbLLsj2/je4KrgiwLLGjf0lm9rtTBA==", + "license": "MIT", + "dependencies": { + "base64url": "3.x.x", + "oauth": "0.10.x", + "passport-strategy": "1.x.x", + "uid2": "0.0.x", + "utils-merge": "1.x.x" + }, + "engines": { + "node": ">= 0.4.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/jaredhanson" + } + }, + "node_modules/passport-strategy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/passport-strategy/-/passport-strategy-1.0.0.tgz", + "integrity": "sha512-CB97UUvDKJde2V0KDWWB3lyf6PC3FaZP7YxZ2G8OAtn9p4HI9j9JLP9qjOGZFvyl8uwNT8qM+hGnz/n16NI7oA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/pause": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/pause/-/pause-0.0.1.tgz", + "integrity": "sha512-KG8UEiEVkR3wGEb4m5yZkVCzigAD+cVEJck2CzYZO37ZGJfctvVptVO192MwrtPhzONn6go8ylnOdMhKqi4nfg==" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", + "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.8", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/random-bytes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/random-bytes/-/random-bytes-1.0.0.tgz", + "integrity": "sha512-iv7LhNVO047HzYR3InF6pUcUsPQiHTM1Qal51DcGSuZFBil1aBBWG5eHPNek7bvILMaYJ/8RU1e8w1AMdHmLQQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", + "dev": true, + "license": "MIT" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.40.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.40.0.tgz", + "integrity": "sha512-Noe455xmA96nnqH5piFtLobsGbCij7Tu+tb3c1vYjNbTkfzGqXqQXG3wJaYXkRZuQ0vEYN4bhwg7QnIrqB5B+w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.7" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.40.0", + "@rollup/rollup-android-arm64": "4.40.0", + "@rollup/rollup-darwin-arm64": "4.40.0", + "@rollup/rollup-darwin-x64": "4.40.0", + "@rollup/rollup-freebsd-arm64": "4.40.0", + "@rollup/rollup-freebsd-x64": "4.40.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.40.0", + "@rollup/rollup-linux-arm-musleabihf": "4.40.0", + "@rollup/rollup-linux-arm64-gnu": "4.40.0", + "@rollup/rollup-linux-arm64-musl": "4.40.0", + "@rollup/rollup-linux-loongarch64-gnu": "4.40.0", + "@rollup/rollup-linux-powerpc64le-gnu": "4.40.0", + "@rollup/rollup-linux-riscv64-gnu": "4.40.0", + "@rollup/rollup-linux-riscv64-musl": "4.40.0", + "@rollup/rollup-linux-s390x-gnu": "4.40.0", + "@rollup/rollup-linux-x64-gnu": "4.40.0", + "@rollup/rollup-linux-x64-musl": "4.40.0", + "@rollup/rollup-win32-arm64-msvc": "4.40.0", + "@rollup/rollup-win32-ia32-msvc": "4.40.0", + "@rollup/rollup-win32-x64-msvc": "4.40.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "dev": true, + "license": "MIT", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shell-quote": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.2.tgz", + "integrity": "sha512-AzqKpGKjrj7EM6rKVQEPpB288oCfnrEIuyoT9cyF4nmGa7V8Zk6f7RRqYisX8X9m+Q7bd632aZW4ky7EhbQztA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/simple-git": { + "version": "3.27.0", + "resolved": "https://registry.npmjs.org/simple-git/-/simple-git-3.27.0.tgz", + "integrity": "sha512-ivHoFS9Yi9GY49ogc6/YAi3Fl9ROnF4VyubNylgCkA+RVqLaKWnDSzXOVzya8csELIaWaYNutsEuAhZrtOjozA==", + "license": "MIT", + "dependencies": { + "@kwsites/file-exists": "^1.1.1", + "@kwsites/promise-deferred": "^1.1.1", + "debug": "^4.3.5" + }, + "funding": { + "type": "github", + "url": "https://github.com/steveukx/git-js?sponsor=1" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/spawn-command": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/spawn-command/-/spawn-command-0.0.2.tgz", + "integrity": "sha512-zC8zGoGkmc8J9ndvml8Xksr1Amk9qBujgbF0JAIWO7kXr43w0h/0GJNM/Vustixu+YE8N/MTrQ7N31FvHUACxQ==", + "dev": true + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/style-mod": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", + "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==", + "license": "MIT" + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/svelte": { + "version": "5.26.2", + "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.26.2.tgz", + "integrity": "sha512-e2TEcGK2YKVwDWYy5OsptVclYgDvfY1E/8IzPiOq63uG/GDo/j5VUYTC9EinQNraoZalbMWN+5f5TYC1QlAqOw==", + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.3.0", + "@jridgewell/sourcemap-codec": "^1.5.0", + "@sveltejs/acorn-typescript": "^1.0.5", + "@types/estree": "^1.0.5", + "acorn": "^8.12.1", + "aria-query": "^5.3.1", + "axobject-query": "^4.1.0", + "clsx": "^2.1.1", + "esm-env": "^1.2.1", + "esrap": "^1.4.6", + "is-reference": "^3.0.3", + "locate-character": "^3.0.0", + "magic-string": "^0.30.11", + "zimmerframe": "^1.1.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/svelte-check": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/svelte-check/-/svelte-check-4.1.6.tgz", + "integrity": "sha512-P7w/6tdSfk3zEVvfsgrp3h3DFC75jCdZjTQvgGJtjPORs1n7/v2VMPIoty3PWv7jnfEm3x0G/p9wH4pecTb0Wg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "chokidar": "^4.0.1", + "fdir": "^6.2.0", + "picocolors": "^1.0.0", + "sade": "^1.7.4" + }, + "bin": { + "svelte-check": "bin/svelte-check" + }, + "engines": { + "node": ">= 18.0.0" + }, + "peerDependencies": { + "svelte": "^4.0.0 || ^5.0.0-next.0", + "typescript": ">=5.0.0" + } + }, + "node_modules/svelte-codemirror-editor": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/svelte-codemirror-editor/-/svelte-codemirror-editor-1.4.1.tgz", + "integrity": "sha512-Pv350iro0Y/AZTT/y2OLaonheQqAwl50Hdfipa2Jv1Z04TSP5kPUyxQnRjqxeRW7DXOX9s5Nd11tHdBl9iYSzw==", + "license": "MIT", + "peerDependencies": { + "codemirror": "^6.0.0", + "svelte": "^3.0.0 || ^4.0.0 || ^5.0.0" + } + }, + "node_modules/svelte-file-tree": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/svelte-file-tree/-/svelte-file-tree-0.1.0.tgz", + "integrity": "sha512-jR0zTXQ2dgA/IXqJiGAUGy6RsNbZLOpT44Q3R8oSxrJ0kmrecjuUSr3USEsUPz1oI3lAug4lcno4cJCQa+8Iqw==", + "dependencies": { + "esm-env": "^1.2.2", + "svelte-signals": "^0.0.2" + }, + "peerDependencies": { + "svelte": "^5.20.0" + } + }, + "node_modules/svelte-signals": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/svelte-signals/-/svelte-signals-0.0.2.tgz", + "integrity": "sha512-pnTUvgc6cZHGorNn9vozsf0waeVM1RsCm+R4qtbAM0YzLtwPRsZH9p/OBNrXdCbZOs3VoEW4ZTmaN+RAp+uUPA==", + "peerDependencies": { + "svelte": "^5.0.0" + } + }, + "node_modules/svelte-split-pane": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/svelte-split-pane/-/svelte-split-pane-0.1.2.tgz", + "integrity": "sha512-JluEydC9v2DetyhlwCF3CdqAkET8XPHP7WeWbl4lVLOg55avDOhoS5U6BRhvWd104HOqhUcCSz+7Nveyjmzjeg==", + "license": "MIT" + }, + "node_modules/tailwindcss": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.3.tgz", + "integrity": "sha512-2Q+rw9vy1WFXu5cIxlvsabCwhU2qUwodGq03ODhLJ0jW4ek5BUtoCsnLB0qG+m8AHgEsSJcJGDSDe06FXlP74g==", + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tree-kill": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", + "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", + "dev": true, + "license": "MIT", + "bin": { + "tree-kill": "cli.js" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typescript": { + "version": "5.7.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", + "integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uid-safe": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/uid-safe/-/uid-safe-2.1.5.tgz", + "integrity": "sha512-KPHm4VL5dDXKz01UuEd88Df+KzynaohSL9fBh096KWAxSKZQDI2uBrVqtvRM4rwrIrRRKsdLNML/lnaaVSRioA==", + "license": "MIT", + "dependencies": { + "random-bytes": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/uid2": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/uid2/-/uid2-0.0.4.tgz", + "integrity": "sha512-IevTus0SbGwQzYh3+fRsAMTVVPOoIVufzacXcHPmdlle1jUpq7BRL+mw3dgeLanvGZdwwbWhRV6XrcFNdBmjWA==", + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vite": { + "version": "6.2.6", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.2.6.tgz", + "integrity": "sha512-9xpjNl3kR4rVDZgPNdTL0/c6ao4km69a/2ihNQbcANz8RuCOK3hQBmLSJf3bRKVQjVMda+YvizNE8AwvogcPbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "postcss": "^8.5.3", + "rollup": "^4.30.1" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitefu": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/vitefu/-/vitefu-1.0.6.tgz", + "integrity": "sha512-+Rex1GlappUyNN6UfwbVZne/9cYC4+R2XDk9xkNXBKMw6HQagdX9PgZ8V2v1WUSK1wfBLp7qbI1+XSNIlB1xmA==", + "dev": true, + "license": "MIT", + "workspaces": [ + "tests/deps/*", + "tests/projects/*" + ], + "peerDependencies": { + "vite": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0" + }, + "peerDependenciesMeta": { + "vite": { + "optional": true + } + } + }, + "node_modules/w3c-keyname": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/zimmerframe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/zimmerframe/-/zimmerframe-1.1.2.tgz", + "integrity": "sha512-rAbqEGa8ovJy4pyBxZM70hg4pE6gDgaQ0Sl9M3enG3I0d6H4XSAM3GeNGLKnsBpuijUow064sf7ww1nutC5/3w==", + "license": "MIT" + } + } +} diff --git a/ui/package.json b/ui/package.json new file mode 100644 index 0000000..6040dbf --- /dev/null +++ b/ui/package.json @@ -0,0 +1,53 @@ +{ + "name": "ui", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "node --experimental-modules dev.js", + "build": "vite build", + "preview": "vite preview", + "check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json", + "server": "node --experimental-modules server.js", + "start": "npm run build && npm run server" + }, + "devDependencies": { + "@sveltejs/vite-plugin-svelte": "^5.0.3", + "@tsconfig/svelte": "^5.0.4", + "autoprefixer": "^10.4.21", + "concurrently": "^8.2.2", + "postcss": "^8.5.3", + "svelte": "^5.20.2", + "svelte-check": "^4.1.4", + "tailwindcss": "^4.1.3", + "typescript": "~5.7.2", + "vite": "^6.2.0" + }, + "dependencies": { + "@codemirror/basic-setup": "^0.20.0", + "@codemirror/commands": "^6.8.1", + "@codemirror/lang-javascript": "^6.2.4", + "@codemirror/language": "^6.11.1", + "@codemirror/legacy-modes": "^6.5.1", + "@codemirror/state": "^6.5.2", + "@codemirror/theme-one-dark": "^6.1.2", + "@codemirror/view": "^6.37.2", + "@tailwindcss/postcss": "^4.1.3", + "@uiw/codemirror-theme-basic": "^4.23.13", + "@uiw/codemirror-theme-bbedit": "^4.23.13", + "@uiw/codemirror-theme-github": "^4.23.13", + "@uiw/codemirror-theme-white": "^4.23.13", + "@uiw/codemirror-themes": "^4.23.13", + "cmjs-shell": "github:milahu/codemirror-shell", + "dotenv": "^16.5.0", + "express": "^4.18.3", + "express-session": "^1.18.1", + "passport": "^0.7.0", + "passport-github2": "^0.1.12", + "prismjs": "^1.30.0", + "simple-git": "^3.27.0", + "svelte-codemirror-editor": "^1.4.1", + "svelte-file-tree": "^0.1.0", + "svelte-split-pane": "^0.1.2" + } +} diff --git a/ui/postcss.config.js b/ui/postcss.config.js new file mode 100644 index 0000000..44d2119 --- /dev/null +++ b/ui/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + '@tailwindcss/postcss': {}, + autoprefixer: {}, + }, +}; \ No newline at end of file diff --git a/ui/public/auth/login.html b/ui/public/auth/login.html new file mode 100644 index 0000000..8fb2679 --- /dev/null +++ b/ui/public/auth/login.html @@ -0,0 +1,105 @@ + + + + + + CLT UI - Login + + + + + + + + \ No newline at end of file diff --git a/ui/public/vite.svg b/ui/public/vite.svg new file mode 100644 index 0000000..e7b8dfb --- /dev/null +++ b/ui/public/vite.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/routes.js b/ui/routes.js new file mode 100644 index 0000000..1f3ea8a --- /dev/null +++ b/ui/routes.js @@ -0,0 +1,684 @@ +import path from 'path'; +import fs from 'fs/promises'; +import { createReadStream } from 'fs'; +import simpleGit from 'simple-git'; +import { + parseRecFileFromMapWasm, + generateRecFileToMapWasm, + validateTestFromMapWasm +} from './wasmNodeWrapper.js'; + +// Helper functions that were in server.js +export function getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig) { + // Get fresh auth config + const authConfig = getAuthConfig(); + + // If auth is skipped, use a default user + if (authConfig.skipAuth) { + return path.join(WORKDIR, 'dev-mode'); + } + + // If user is authenticated, use their username + if (req.isAuthenticated() && req.user && req.user.username) { + return path.join(WORKDIR, req.user.username); + } + + // Fallback to the root directory if no user is available + return ROOT_DIR; +} + +export function getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig) { + const userRepo = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + return path.join(userRepo, 'test', 'clt-tests'); +} + +// Helper function to merge patterns like CLT does (system + project patterns) +export async function getMergedPatterns(userRepoPath, __dirname) { + const patterns = {}; + + // First, load system patterns from global .clt/patterns (like CLT binary directory) + const systemPatternsPath = path.join(__dirname, '.clt', 'patterns'); + try { + const systemContent = await fs.readFile(systemPatternsPath, 'utf8'); + parsePatternContent(systemContent, patterns); + console.log(`πŸ“‹ Loaded ${Object.keys(patterns).length} system patterns from: ${systemPatternsPath}`); + } catch (error) { + console.log(`ℹ️ No system patterns file found: ${systemPatternsPath}`); + } + + // Then, load project patterns from user repo (these override system patterns) + const projectPatternsPath = path.join(userRepoPath, '.clt', 'patterns'); + try { + const projectContent = await fs.readFile(projectPatternsPath, 'utf8'); + parsePatternContent(projectContent, patterns); // This will override system patterns + console.log(`πŸ“‹ Merged with ${Object.keys(patterns).length} total patterns after loading project patterns from: ${projectPatternsPath}`); + } catch (error) { + console.log(`ℹ️ No project patterns file found: ${projectPatternsPath}`); + } + + return patterns; +} + +// Helper function to parse pattern file content +function parsePatternContent(content, patterns) { + for (const line of content.split('\n')) { + const trimmedLine = line.trim(); + if (trimmedLine && !trimmedLine.startsWith('#')) { + const parts = trimmedLine.split(' '); + if (parts.length >= 2) { + const name = parts[0]; + const regex = parts.slice(1).join(' '); + patterns[name] = regex; + } + } + } +} + +// Helper function to create file content map for WASM processing +export async function createFileContentMap(mainFilePath, baseDir, req = null) { + const fileMap = {}; + + try { + // Read the main file + const mainContent = await fs.readFile(mainFilePath, 'utf8'); + const relativePath = path.relative(baseDir, mainFilePath); + fileMap[relativePath] = mainContent; + + // Find and read all .recb block files in the same directory and subdirectories + const mainDir = path.dirname(mainFilePath); + await findAndReadBlockFiles(mainDir, baseDir, fileMap); + + // Parse the main file content to find block references and resolve them + await resolveBlockReferences(mainContent, mainFilePath, baseDir, fileMap); + + console.log(`πŸ“ Created file content map with ${Object.keys(fileMap).length} files`); + console.log(`πŸ“ File map keys: ${Object.keys(fileMap).join(', ')}`); + return fileMap; + } catch (error) { + console.error('Error creating file content map:', error); + throw error; + } +} + +// Parse content and resolve block references +async function resolveBlockReferences(content, mainFilePath, baseDir, fileMap) { + // Find all block references in the content (e.g., "––– block: ../base/start-searchd –––") + const blockRegex = /–––\s*block:\s*([^–]+)\s*–––/g; + let match; + + while ((match = blockRegex.exec(content)) !== null) { + const blockPath = match[1].trim(); + console.log(`πŸ” Found block reference: "${blockPath}"`); + + // Resolve the block file path relative to the main file's directory + const mainFileDir = path.dirname(mainFilePath); + let resolvedBlockPath; + + // Handle different block path formats + if (blockPath.includes('/')) { + // Path with directory (e.g., "../base/start-searchd" or "auth/login") + const blockFileName = blockPath.endsWith('.recb') ? blockPath : `${blockPath}.recb`; + resolvedBlockPath = path.resolve(mainFileDir, blockFileName); + } else { + // Simple block name (e.g., "login-sequence") + const blockFileName = blockPath.endsWith('.recb') ? blockPath : `${blockPath}.recb`; + resolvedBlockPath = path.join(mainFileDir, blockFileName); + } + + console.log(`πŸ“‚ Resolved block path: ${resolvedBlockPath}`); + + // Create the key that WASM expects (with .recb extension) + const wasmExpectedKey = blockPath.endsWith('.recb') ? blockPath : `${blockPath}.recb`; + + // Check if we already have this block in the map + if (!fileMap[wasmExpectedKey]) { + try { + console.log(`πŸ“„ Reading referenced block file: ${resolvedBlockPath}`); + + // Check if file exists first + await fs.access(resolvedBlockPath); + + const blockContent = await fs.readFile(resolvedBlockPath, 'utf8'); + + // Store with the key that WASM expects (with .recb extension) + fileMap[wasmExpectedKey] = blockContent; + console.log(`βœ… Added referenced block file: ${wasmExpectedKey} (${blockContent.length} chars)`); + + // Recursively resolve block references in this block file + await resolveBlockReferences(blockContent, resolvedBlockPath, baseDir, fileMap); + } catch (error) { + console.error(`❌ Could not read referenced block file ${resolvedBlockPath}:`, error.message); + + // Add error content to prevent WASM from failing + fileMap[wasmExpectedKey] = `––– input –––\necho "Error: Block file not found: ${blockPath}"\n––– output –––\nError: Block file not found`; + console.log(`⚠️ Added error placeholder for block: ${wasmExpectedKey}`); + } + } else { + console.log(`ℹ️ Block ${wasmExpectedKey} already in file map`); + } + } +} + +// Recursively find and read .recb files +async function findAndReadBlockFiles(dir, baseDir, fileMap) { + try { + const entries = await fs.readdir(dir, { withFileTypes: true }); + + for (const entry of entries) { + if (entry.name.startsWith('.')) continue; // Skip hidden files + + const fullPath = path.join(dir, entry.name); + + if (entry.isDirectory()) { + // Recursively search subdirectories + await findAndReadBlockFiles(fullPath, baseDir, fileMap); + } else if (entry.name.endsWith('.recb')) { + // Read block file + try { + const content = await fs.readFile(fullPath, 'utf8'); + const relativePath = path.relative(baseDir, fullPath); + fileMap[relativePath] = content; + console.log(`πŸ“„ Added block file to map: ${relativePath}`); + } catch (error) { + console.warn(`⚠️ Could not read block file ${fullPath}:`, error.message); + } + } + } + } catch (error) { + console.warn(`⚠️ Could not read directory ${dir}:`, error.message); + } +} + +// Convert WASM TestStructure to legacy command format for UI compatibility +export function convertTestStructureToLegacyCommands(testStructure, parentBlock = null, blockSource = null) { + const commands = []; + + if (!testStructure || !testStructure.steps) { + return commands; + } + + for (const step of testStructure.steps) { + switch (step.step_type) { + case 'input': + commands.push({ + command: step.content || '', + type: 'command', + status: 'pending', + parentBlock, + blockSource, + isBlockCommand: !!parentBlock + }); + break; + + case 'output': + // Output steps are handled as expectedOutput in the previous input command + if (commands.length > 0 && commands[commands.length - 1].type === 'command') { + commands[commands.length - 1].expectedOutput = step.content || ''; + } + break; + + case 'block': + const blockPath = step.args && step.args.length > 0 ? step.args[0] : 'unknown-block'; + + // Add the block reference + commands.push({ + command: blockPath, + type: 'block', + status: 'pending', + parentBlock, + blockSource, + isBlockCommand: false + }); + + // Add nested commands from the block + if (step.steps && step.steps.length > 0) { + const nestedCommands = convertTestStructureToLegacyCommands( + { steps: step.steps }, + { command: blockPath }, + blockPath + ); + commands.push(...nestedCommands); + } + break; + + case 'comment': + commands.push({ + command: step.content || '', + type: 'comment', + status: 'pending', + parentBlock, + blockSource, + isBlockCommand: !!parentBlock + }); + break; + } + } + + return commands; +} + +// Helper function to get a file tree +export async function buildFileTree(dir, basePath = '', followSymlinks = true) { + const entries = await fs.readdir(dir, { withFileTypes: true }); + const tree = []; + + for (const entry of entries) { + // Skip hidden files/directories that start with a dot + if (entry.name.startsWith('.')) continue; + + const relativePath = path.join(basePath, entry.name); + const fullPath = path.join(dir, entry.name); + + let isDirectory = entry.isDirectory(); + let targetPath = fullPath; + + // Handle symlinks + if (entry.isSymbolicLink() && followSymlinks) { + try { + // Get the symlink target + const linkTarget = await fs.readlink(fullPath); + + // Resolve to absolute path if needed + const resolvedTarget = path.isAbsolute(linkTarget) + ? linkTarget + : path.resolve(path.dirname(fullPath), linkTarget); + + // Attempt to get stats of the target + const targetStats = await fs.stat(resolvedTarget); + isDirectory = targetStats.isDirectory(); + targetPath = resolvedTarget; + + console.log(`Symlink ${fullPath} -> ${resolvedTarget} (is directory: ${isDirectory})`); + } catch (error) { + console.error(`Error processing symlink ${fullPath}:`, error); + continue; // Skip this entry if we can't resolve the symlink + } + } + + if (isDirectory) { + // For directories (or symlinks to directories), recursively build the tree + let children = []; + try { + children = await buildFileTree(targetPath, relativePath, followSymlinks); + } catch (error) { + console.error(`Error reading directory ${targetPath}:`, error); + } + + tree.push({ + name: entry.name, + path: relativePath, + isDirectory: true, + isSymlink: entry.isSymbolicLink(), + targetPath: entry.isSymbolicLink() ? targetPath : undefined, + children + }); + } else { + // For files, check if they match our extensions + if (entry.name.endsWith('.rec') || entry.name.endsWith('.recb')) { + tree.push({ + name: entry.name, + path: relativePath, + isDirectory: false, + isSymlink: entry.isSymbolicLink(), + targetPath: entry.isSymbolicLink() ? targetPath : undefined + }); + } + } + } + + return tree; +} + +// Convert the title of PR into the branch +export function slugify(str) { + return str + .toLowerCase() + .trim() + .replace(/[\s\_]+/g, '-') + .replace(/[^\w\-]+/g, '') + .replace(/\-+/g, '-'); +} + +// Helper function to extract duration from rep file content +export function extractDuration(content) { + const durationMatch = content.match(/––– duration: (\d+)ms/); + return durationMatch ? parseInt(durationMatch[1], 10) : null; +} + +// Make sure that we use the origin with the user's authentication token when connecting online +export async function ensureGitRemoteWithToken(gitInstance, token, REPO_URL) { + if (!token) return; + + try { + // Use the REPO_URL variable directly for consistent base URL + const tokenUrl = REPO_URL.replace('https://', `https://x-access-token:${token}@`); + + // Remove existing origin and add new one with token + await gitInstance.removeRemote('origin'); + await gitInstance.addRemote('origin', tokenUrl); + console.log('Git remote configured with authentication token'); + } catch (error) { + console.warn('Error configuring git remote with token:', error.message); + } +} + +// Setup routes function +export function setupRoutes(app, isAuthenticated, dependencies) { + const { + WORKDIR, + ROOT_DIR, + REPO_URL, + __dirname, + getAuthConfig, + ensureUserRepo + } = dependencies; + + // API health check endpoint - can be used to verify authentication + app.get('/api/health', isAuthenticated, (req, res) => { + return res.json({ + status: 'ok', + authenticated: req.isAuthenticated(), + user: req.user ? req.user.username : null + }); + }); + + // API endpoint to get the file tree + app.get('/api/get-file-tree', isAuthenticated, async (req, res) => { + try { + // Get fresh auth config + const authConfig = getAuthConfig(); + + // Ensure user repo exists + const username = req.user?.username || (authConfig.skipAuth ? 'dev-mode' : null); + if (username) { + await ensureUserRepo(username); + } + + // Get the user's test directory + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const testDirExists = await fs.access(testDir).then(() => true).catch(() => false); + + if (!testDirExists) { + return res.status(404).json({ error: 'Test directory not found' }); + } + + // Build the file tree with the user's test directory as the base + const fileTree = await buildFileTree(testDir); + + // Return the file tree directly without wrapping in a virtual root node + res.json({ fileTree }); + } catch (error) { + console.error('Error getting file tree:', error); + res.status(500).json({ error: 'Failed to get file tree' }); + } + }); + + // API endpoint to get file content + app.get('/api/get-file', isAuthenticated, async (req, res) => { + try { + const { path: filePath } = req.query; + + if (!filePath) { + return res.status(400).json({ error: 'File path is required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absolutePath = path.join(testDir, filePath); + + // Basic security check to ensure the path is within the test directory + if (!absolutePath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // For .rec and .recb files, use WASM parsing to return structured content + if (filePath.endsWith('.rec') || filePath.endsWith('.recb')) { + try { + console.log(`πŸ“– Loading structured test file via WASM: ${absolutePath}`); + + // Get raw content first + const rawContent = await fs.readFile(absolutePath, 'utf8'); + + // Parse .rec file using WASM with file content map (NO file I/O in WASM) + console.log(`πŸ“– Parsing .rec file with WASM using content map: ${absolutePath}`); + + // Create file content map for WASM + const fileMap = await createFileContentMap(absolutePath, testDir, req); + const relativeFilePath = path.relative(testDir, absolutePath); + + // Call WASM with path + content map (NO FALLBACK) + const testStructure = await parseRecFileFromMapWasm(relativeFilePath, fileMap); + + // Convert WASM structure to UI commands format + const uiCommands = convertTestStructureToLegacyCommands(testStructure); + + res.json({ + content: rawContent, + structuredData: testStructure, // Keep for future use + commands: uiCommands, // For current UI compatibility + wasmparsed: true + }); + } catch (error) { + console.error('WASM parsing failed:', error); + // Return raw content if WASM fails + const rawContent = await fs.readFile(absolutePath, 'utf8'); + res.json({ + content: rawContent, + structuredData: null, + commands: [], // Empty commands array + wasmparsed: false, + error: error.message + }); + } + } else { + // For non-test files, return raw content + const content = await fs.readFile(absolutePath, 'utf8'); + res.json({ content }); + } + } catch (error) { + console.error('Error reading file:', error); + res.status(404).json({ error: 'File not found or could not be read' }); + } + }); + + // API endpoint to save file content + app.post('/api/save-file', isAuthenticated, async (req, res) => { + try { + const { path: filePath, content, structuredData } = req.body; + + if (!filePath) { + return res.status(400).json({ error: 'File path is required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absolutePath = path.join(testDir, filePath); + + // Basic security check to ensure the path is within the test directory + if (!absolutePath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // Ensure directory exists + const directory = path.dirname(absolutePath); + await fs.mkdir(directory, { recursive: true }); + + // For .rec and .recb files, try WASM generation if structured data is provided + if ((filePath.endsWith('.rec') || filePath.endsWith('.recb')) && structuredData) { + console.log(`πŸ’Ύ Saving structured test file via WASM: ${absolutePath}`); + + try { + // Create file content map for any referenced block files (same as reading) + const existingFileMap = await createFileContentMap(absolutePath, testDir, req).catch(() => ({})); + + // Use WASM to generate file content map from structured data + const relativeFilePath = path.relative(testDir, absolutePath); + const generatedFileContentMap = await generateRecFileToMapWasm(relativeFilePath, structuredData); + + // Get the generated content for the main file + const generatedContent = generatedFileContentMap[relativeFilePath]; + + if (generatedContent && generatedContent.length > 0) { + // Write the generated content to disk + await fs.writeFile(absolutePath, generatedContent, 'utf8'); + console.log('βœ… File saved via WASM generation'); + res.json({ + success: true, + method: 'wasm', + generatedContent: generatedContent + }); + return; + } else { + console.warn('WASM generation returned empty content, falling back to manual content'); + } + } catch (error) { + console.error('WASM generation failed:', error); + console.warn('Falling back to manual content'); + } + } + + // Fallback: save the manual content directly + if (content !== undefined) { + await fs.writeFile(absolutePath, content, 'utf8'); + console.log('βœ… File saved via manual content'); + res.json({ + success: true, + method: 'manual' + }); + } else { + return res.status(400).json({ error: 'File path and content (or structuredData) are required' }); + } + } catch (error) { + console.error('Error saving file:', error); + res.status(500).json({ error: 'Failed to save file' }); + } + }); + + // API endpoint to move or rename a file + app.post('/api/move-file', isAuthenticated, async (req, res) => { + try { + const { sourcePath, targetPath } = req.body; + + if (!sourcePath || !targetPath) { + return res.status(400).json({ error: 'Source and target paths are required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absoluteSourcePath = path.join(testDir, sourcePath); + const absoluteTargetPath = path.join(testDir, targetPath); + + // Basic security check to ensure both paths are within the test directory + if (!absoluteSourcePath.startsWith(testDir) || !absoluteTargetPath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // Ensure target directory exists + const targetDir = path.dirname(absoluteTargetPath); + await fs.mkdir(targetDir, { recursive: true }); + + // Move/rename the file + await fs.rename(absoluteSourcePath, absoluteTargetPath); + + res.json({ success: true }); + } catch (error) { + console.error('Error moving file:', error); + res.status(500).json({ error: 'Failed to move file' }); + } + }); + + // API endpoint to delete a file + app.delete('/api/delete-file', isAuthenticated, async (req, res) => { + try { + const { path: filePath } = req.body; + + if (!filePath) { + return res.status(400).json({ error: 'File path is required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absolutePath = path.join(testDir, filePath); + + // Basic security check to ensure the path is within the test directory + if (!absolutePath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // Check if it's a file or directory + const stats = await fs.stat(absolutePath); + + if (stats.isDirectory()) { + // For directories, use recursive removal + await fs.rm(absolutePath, { recursive: true }); + } else { + // For individual files + await fs.unlink(absolutePath); + } + + res.json({ success: true }); + } catch (error) { + console.error('Error deleting file:', error); + res.status(500).json({ error: 'Failed to delete file' }); + } + }); + + // API endpoint to create directory + app.post('/api/create-directory', isAuthenticated, async (req, res) => { + try { + const { path: dirPath } = req.body; + + if (!dirPath) { + return res.status(400).json({ error: 'Directory path is required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absolutePath = path.join(testDir, dirPath); + + // Basic security check to ensure the path is within the test directory + if (!absolutePath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // Create directory recursively + await fs.mkdir(absolutePath, { recursive: true }); + + res.json({ success: true, path: dirPath }); + } catch (error) { + console.error('Error creating directory:', error); + res.status(500).json({ error: 'Failed to create directory' }); + } + }); + + // API endpoint to get patterns file + app.get('/api/get-patterns', isAuthenticated, async (req, res) => { + try { + console.log('🎯 Getting merged patterns for user repository'); + + // Get the user's repository path for pattern context + const authConfig = getAuthConfig(); + const username = req.user?.username || (authConfig.skipAuth ? 'dev-mode' : null); + let userRepoPath = null; + + if (username) { + userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + console.log(`Using user repo path for patterns: ${userRepoPath}`); + } + + // Get merged patterns (system + project) + try { + const patterns = await getMergedPatterns(userRepoPath || __dirname, __dirname); + console.log(`βœ… Found ${Object.keys(patterns).length} merged patterns`); + return res.json({ patterns }); + } catch (patternError) { + console.error('Pattern merging failed:', patternError); + // Return empty patterns instead of failing + return res.json({ patterns: {} }); + } + } catch (error) { + console.error('Error getting patterns:', error); + res.status(500).json({ error: 'Failed to get patterns' }); + } + }); +} \ No newline at end of file diff --git a/ui/server.js b/ui/server.js new file mode 100644 index 0000000..7068d14 --- /dev/null +++ b/ui/server.js @@ -0,0 +1,202 @@ +import express from 'express'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import fs from 'fs/promises'; +import { createReadStream } from 'fs'; +import session from 'express-session'; +import dotenv from 'dotenv'; +import simpleGit from 'simple-git'; + +import { + getPatternsWasm, + parseRecFileFromMapWasm, + generateRecFileToMapWasm, + validateTestFromMapWasm +} from './wasmNodeWrapper.js'; + +// Import refactored modules +import { setupRoutes, getUserRepoPath, getUserTestPath } from './routes.js'; +import { setupGitAndTestRoutes } from './testAndGitRoutes.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Load environment variables from .env file FIRST +dotenv.config(); + +// Import auth modules AFTER environment variables are loaded +import { setupPassport, isAuthenticated, addAuthRoutes } from './auth.js'; +import { getAuthConfig } from './config/auth.js'; + +const app = express(); +const PORT = process.env.BACKEND_PORT || process.env.PORT || 3000; +const HOST = process.env.HOST || 'localhost'; + +// Root directory of the project (the current directory where server.js is running) +const ROOT_DIR = process.cwd(); + +// Initialize session middleware +app.use(session({ + secret: process.env.SESSION_SECRET || 'clt-ui-secret-key', + resave: false, + saveUninitialized: false, + cookie: { + secure: process.env.NODE_ENV === 'production', + maxAge: 24 * 60 * 60 * 1000, // 24 hours + httpOnly: true, + sameSite: 'lax' // Allow cookies in cross-domain context with some security + } +})); + +// Initialize passport and authentication +const passport = setupPassport(); +app.use(passport.initialize()); +app.use(passport.session()); + +// Add helper to save tokens when users authenticate +app.use((req, res, next) => { + if (req.user && req.user.username && req.user.token) { + // Store token for repository operations + if (!global.userTokens) global.userTokens = {}; + global.userTokens[req.user.username] = req.user.token; + } + next(); +}); + +// Enable CORS for development +app.use((req, res, next) => { + // Always allow the frontend URL + const frontendUrl = process.env.FRONTEND_URL || 'http://localhost:5173'; + const origin = req.headers.origin; + + // If request comes from frontend URL or another known origin + if (origin) { + res.header('Access-Control-Allow-Origin', origin); + res.header('Access-Control-Allow-Credentials', 'true'); + res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'); + res.header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS'); + } else { + // For requests without origin (like API tools), set a less permissive policy + res.header('Access-Control-Allow-Origin', '*'); + res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'); + res.header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS'); + } + + if (req.method === 'OPTIONS') { + return res.sendStatus(200); + } + next(); +}); + +// Parse JSON bodies +app.use(express.json()); + +// Add authentication routes +addAuthRoutes(app); + +// Create a workdir folder for the user repos if it doesn't exist +const WORKDIR = path.join(ROOT_DIR, 'workdir'); +const REPO_URL = process.env.REPO_URL; + +// Ensure workdir exists +try { + await fs.mkdir(WORKDIR, { recursive: true }); + console.log(`Ensured workdir exists at ${WORKDIR}`); +} catch (error) { + console.error(`Error creating workdir: ${error}`); +} + +// Setup or fetch the user's repository on login +async function ensureUserRepo(username) { + if (!username) return null; + + try { + const userDir = path.join(WORKDIR, username); + const userRepoExists = await fs.access(userDir).then(() => true).catch(() => false); + + if (!userRepoExists) { + console.log(`Setting up repository for user ${username}`); + await fs.mkdir(userDir, { recursive: true }); + + // Get the user's token if available from session + const userToken = global.userTokens && global.userTokens[username]; + + // Clone the repository using simple-git + const git = simpleGit({ baseDir: WORKDIR }); + + // Use authentication if we have a token + if (userToken) { + // Create authenticated URL + let cloneUrl = REPO_URL; + if (REPO_URL.startsWith('https://')) { + cloneUrl = REPO_URL.replace('https://', `https://x-access-token:${userToken}@`); + } + console.log(`Cloning repository for user ${username} with authentication`); + await git.clone(cloneUrl, userDir); + + // Initialize a new git instance in the user's repository directory + const userGit = simpleGit(userDir); + + // Set local repository configuration for the specific repository + await userGit.addConfig('user.name', username, false, 'local'); + await userGit.addConfig('user.email', `${username}@users.noreply.github.com`, false, 'local'); + console.log(`Set local git config for ${username}`); + } else { + console.log('Missing user token, skipping git clone'); + } + + console.log(`Cloned repository for user ${username}`); + } + + // Verify the repo is valid and the CLT tests folder exists + const testDir = path.join(userDir, 'test', 'clt-tests'); + const testDirExists = await fs.access(testDir).then(() => true).catch(() => false); + + if (!testDirExists) { + console.error(`CLT tests directory not found for user ${username}. Expected at: ${testDir}`); + return null; + } + + return { userDir, testDir }; + } catch (error) { + console.error(`Error setting up user repository: ${error}`); + return null; + } +} + +// Store user tokens for repository operations +global.userTokens = {}; + +// Store for interactive sessions +global.interactiveSessions = {}; + +// Make the function available globally for the auth system +global.ensureUserRepo = ensureUserRepo; + +// Setup routes from refactored modules +const dependencies = { + WORKDIR, + ROOT_DIR, + REPO_URL, + __dirname, + getAuthConfig, + ensureUserRepo +}; + +setupRoutes(app, isAuthenticated, dependencies); +setupGitAndTestRoutes(app, isAuthenticated, dependencies); + +// Serve static files from the dist directory +app.use(express.static(path.join(__dirname, 'dist'))); + +// Serve public content (for login page and other public resources) +app.use(express.static(path.join(__dirname, 'public'))); + +app.get('*', isAuthenticated, (req, res) => { + res.sendFile(path.join(__dirname, 'dist', 'index.html')); +}); + +// Start server +app.listen(PORT, HOST === 'localhost' ? HOST : '0.0.0.0', () => { + console.log(`Server is running on ${HOST}:${PORT}`); +}); \ No newline at end of file diff --git a/ui/src/App.svelte b/ui/src/App.svelte new file mode 100644 index 0000000..fcd2362 --- /dev/null +++ b/ui/src/App.svelte @@ -0,0 +1,194 @@ + + +
+ {#if isLoading} +
+
+

Loading...

+
+ {:else if $authStore.isAuthenticated || $authStore.skipAuth} +
+ +
+ + +
+ + + + {:else} + + {/if} +
+ + diff --git a/ui/src/app.css b/ui/src/app.css new file mode 100644 index 0000000..180bd92 --- /dev/null +++ b/ui/src/app.css @@ -0,0 +1,848 @@ +/* Base styles */ +:root { + --color-bg-primary: #ffffff; + --color-bg-secondary: #f7f9fc; + --color-bg-tertiary: #edf2f7; + --color-bg-accent: #3b82f6; + --color-bg-hover: #f0f5ff; + --color-bg-selected: #ebf4ff; + + --color-text-primary: #1a202c; + --color-text-secondary: #4a5568; + --color-text-tertiary: #718096; + --color-text-accent: #2b6cb0; + --color-text-inverted: #ffffff; + + --color-border-light: #e2e8f0; + --color-border-medium: #cbd5e0; + --color-border-dark: #a0aec0; + + --spacing-xs: 4px; + --spacing-sm: 8px; + --spacing-md: 16px; + --spacing-lg: 24px; + --spacing-xl: 32px; + + --radius-sm: 4px; + --radius-md: 6px; + --radius-lg: 12px; + --radius-circle: 50%; + + --shadow-sm: 0 1px 3px rgba(0,0,0,0.1); + --shadow-md: 0 4px 6px rgba(0,0,0,0.05), 0 2px 4px rgba(0,0,0,0.05); + --shadow-lg: 0 10px 15px rgba(0,0,0,0.05); + + --font-sans: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + --font-mono: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + + --transition-fast: 150ms ease-in-out; + --transition-normal: 250ms ease-in-out; +} + +/* Dark mode colors */ +@media (prefers-color-scheme: dark) { + :root { + --color-bg-primary: #1a202c; + --color-bg-secondary: #2d3748; + --color-bg-tertiary: #4a5568; + --color-bg-accent: #4299e1; + --color-bg-hover: #2a4365; + --color-bg-selected: #2c5282; + + --color-text-primary: #f7fafc; + --color-text-secondary: #e2e8f0; + --color-text-tertiary: #cbd5e0; + --color-text-accent: #90cdf4; + } +} + +/* Reset */ +* { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +body { + font-family: var(--font-sans); + font-size: 14px; + line-height: 1.5; + color: var(--color-text-primary); + background-color: var(--color-bg-secondary); + height: 100vh; + margin: 0; +} + +/* Main layout */ +.app-container { + display: flex; + flex-direction: column; + height: 100vh; + overflow: hidden; +} + +/* Header */ +.header { + background-color: var(--color-bg-primary); + border-bottom: 1px solid var(--color-border-light); + box-shadow: var(--shadow-sm); + padding: var(--spacing-md) var(--spacing-lg); + display: flex; + align-items: center; + justify-content: space-between; +} + +.app-title { + display: flex; + align-items: center; + gap: var(--spacing-sm); + font-size: 18px; + font-weight: 600; + min-width: 250px; + color: var(--color-text-primary); +} + +.app-title svg { + width: 22px; + height: 22px; + color: var(--color-text-accent); +} + +.docker-image-container { + background-color: var(--color-bg-secondary); + border: 1px solid var(--color-border-light); + border-radius: var(--radius-md); + padding: var(--spacing-xs) var(--spacing-md); + display: flex; + align-items: center; + gap: var(--spacing-sm); +} + +.docker-image-container label { + font-size: 13px; + font-weight: 500; + color: var(--color-text-secondary); + display: flex; + align-items: center; +} + +.docker-image-container input { + padding: var(--spacing-xs) var(--spacing-sm); + border: 1px solid var(--color-border-medium); + border-radius: var(--radius-sm); + font-size: 13px; + width: 350px; + background-color: var(--color-bg-primary); + color: var(--color-text-primary); +} + +.docker-image-container input:focus { + outline: none; + border-color: var(--color-bg-accent); + box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.25); +} + +/* Main content */ +.main-content { + display: flex; + flex: 1; + overflow: hidden; +} + +/* File Explorer */ +.file-explorer { + width: 250px; + flex-shrink: 0; + background-color: var(--color-bg-primary); + border-right: 1px solid var(--color-border-light); + display: flex; + flex-direction: column; +} + +.file-explorer-header { + padding: var(--spacing-md); + border-bottom: 1px solid var(--color-border-light); + font-weight: 500; +} + +.file-tree { + flex: 1; + overflow-y: auto; + padding: var(--spacing-sm); +} + +.file-node { + position: relative; + margin-bottom: var(--spacing-xs); +} + +.tree-item { + display: flex; + align-items: center; + padding: var(--spacing-sm); + border-radius: var(--radius-sm); + cursor: pointer; + transition: background-color var(--transition-fast); +} + +.tree-item:hover { + background-color: var(--color-bg-hover); +} + +.tree-item.selected { + background-color: var(--color-bg-selected); + color: var(--color-text-accent); +} + +.tree-item-icon { + margin-right: var(--spacing-sm); + display: flex; + align-items: center; + justify-content: center; + width: 18px; + height: 18px; + color: var(--color-text-tertiary); +} + +.tree-item-folder { + color: #f0b429; +} + +.tree-item-folder-open { + color: #3182ce; +} + +.tree-item-name { + flex: 1; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.tree-item-arrow { + width: 16px; + height: 16px; + transition: transform var(--transition-fast); + color: var(--color-text-tertiary); +} + +.tree-item-arrow.expanded { + transform: rotate(90deg); +} + +.tree-children { + margin-left: var(--spacing-lg); + border-left: 1px solid var(--color-border-light); + padding-left: var(--spacing-sm); +} + +.tree-children .file-node { + position: relative; +} + +.tree-children .file-node::before { + content: ''; + position: absolute; + left: -10px; + top: 12px; + width: 8px; + height: 1px; + background-color: var(--color-border-light); +} + +.file-explorer-footer { + padding: var(--spacing-sm); + border-top: 1px solid var(--color-border-light); +} + +.new-file-form { + display: flex; +} + +.new-file-input { + flex: 1; + padding: var(--spacing-sm); + border: 1px solid var(--color-border-medium); + border-radius: var(--radius-sm); + font-size: 12px; +} + +.new-file-button { + display: flex; + align-items: center; + justify-content: center; + width: 32px; + height: 32px; + background-color: var(--color-bg-accent); + color: white; + border: none; + border-radius: var(--radius-sm); + cursor: pointer; +} + +/* Editor */ +.editor { + flex: 1; + display: flex; + flex-direction: column; + background-color: var(--color-bg-secondary); + overflow: hidden; +} + +.editor-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: var(--spacing-md); + background-color: var(--color-bg-primary); + border-bottom: 1px solid var(--color-border-light); +} + +.file-path { + font-family: var(--font-mono); + font-size: 13px; + padding: var(--spacing-xs) var(--spacing-sm); + background-color: var(--color-bg-secondary); + border-radius: var(--radius-sm); +} + +.file-modified-indicator { + margin-left: var(--spacing-xs); + color: var(--color-text-accent); +} + +.save-button { + background-color: #38a169; + color: white; + border: none; + border-radius: var(--radius-sm); + padding: var(--spacing-sm) var(--spacing-lg); + font-size: 13px; + font-weight: 500; + cursor: pointer; + transition: background-color var(--transition-fast); +} + +.save-button:hover { + background-color: #2f855a; +} + +.save-button:disabled { + background-color: #a0aec0; + cursor: not-allowed; +} + +.editor-content { + flex: 1; + overflow-y: auto; + padding: var(--spacing-lg); +} + +.editor-empty { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + height: 100%; + color: var(--color-text-tertiary); +} + +.editor-empty svg { + width: 64px; + height: 64px; + margin-bottom: var(--spacing-lg); + color: var(--color-border-medium); +} + +.command-list { + display: flex; + flex-direction: column; + gap: var(--spacing-xs); +} + +.command-card { + background-color: var(--color-bg-primary); + border-radius: var(--radius-md); + border: 1px solid var(--color-border-light); + box-shadow: var(--shadow-sm); + overflow: hidden; + transition: box-shadow var(--transition-fast); +} + +.command-card:hover { + box-shadow: var(--shadow-md); +} + +.command-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: var(--spacing-xs) var(--spacing-md); + background-color: var(--color-bg-secondary); + border-bottom: 1px solid var(--color-border-light); +} + +.command-title { + display: flex; + align-items: center; + gap: var(--spacing-sm); + font-weight: 500; + color: var(--color-text-secondary); +} + +.command-number { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + height: 20px; + background-color: var(--color-bg-accent); + color: white; + border-radius: var(--radius-circle); + font-size: 11px; +} + +/* Command Actions - Clean Icon-Only Design */ +.command-actions { + display: flex; + align-items: center; + gap: 2px; + opacity: 0.6; + transition: opacity var(--transition-fast); +} + +.command-card:hover .command-actions { + opacity: 1; +} + +.action-button { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + padding: 0; + background-color: transparent; + border: none; + border-radius: 3px; + cursor: pointer; + transition: all var(--transition-fast); + color: var(--color-text-tertiary); +} + +.action-button:hover:not(:disabled) { + background-color: var(--color-bg-hover); + color: var(--color-text-primary); + transform: scale(1.1); +} + +.action-button:disabled { + opacity: 0.3; + cursor: not-allowed; +} + +.action-button.add-command:hover:not(:disabled) { + color: #059669; + background-color: rgba(5, 150, 105, 0.1); +} + +.action-button.add-block:hover:not(:disabled) { + color: #7c3aed; + background-color: rgba(124, 58, 237, 0.1); +} + +.action-button.add-comment:hover:not(:disabled) { + color: #0ea5e9; + background-color: rgba(14, 165, 233, 0.1); +} + +.action-button.move-up:hover:not(:disabled), +.action-button.move-down:hover:not(:disabled) { + color: #6b7280; + background-color: rgba(107, 114, 128, 0.1); +} + +.action-button.duplicate:hover:not(:disabled) { + color: #f59e0b; + background-color: rgba(245, 158, 11, 0.1); +} + +.action-button.delete:hover:not(:disabled) { + color: #e53e3e; + background-color: rgba(229, 62, 62, 0.1); +} + +/* Action Separator */ +.action-separator { + width: 1px; + height: 14px; + background-color: var(--color-border-light); + margin: 0 4px; + opacity: 0.5; +} + +/* Legacy styles - remove */ +.delete-button, +.dropdown-container, +.dropdown-menu, +.dropdown-item { + display: none; +} + +.command-body { + padding: var(--spacing-xs) var(--spacing-md); +} + +.command-input { + width: 100%; + padding: var(--spacing-sm); + border: 1px solid var(--color-border-light); + border-radius: var(--radius-sm); + font-family: var(--font-mono); + font-size: 13px; + resize: none; + background-color: var(--color-bg-secondary); + color: var(--color-text-primary); + margin-bottom: 0; +} + +.command-input:focus { + outline: none; + border-color: var(--color-bg-accent); + box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.25); +} + +.output-grid { + display: grid; + grid-template-columns: 1fr 1fr; + gap: var(--spacing-sm); + margin-top: var(--spacing-xs); +} + +.output-column { + display: flex; + flex-direction: column; +} + +.output-header { + display: flex; + align-items: center; + gap: var(--spacing-xs); + margin-bottom: var(--spacing-sm); + font-weight: 500; + font-size: 12px; + color: var(--color-text-secondary); +} + +.output-indicator { + width: 8px; + height: 8px; + border-radius: var(--radius-circle); +} + +.expected-indicator { + background-color: #38a169; +} + +.actual-indicator { + background-color: #805ad5; +} + +/* Output fields with synchronized expansion */ +.expected-output, +.actual-output { + width: 100%; + padding: var(--spacing-sm); + border: 1px solid var(--color-border-light); + border-radius: var(--radius-sm); + font-family: var(--font-mono); + font-size: 13px; + height: 100px !important; /* Default height */ + max-height: 100px !important; + transition: all 0.2s ease; +} + +.expected-output { + resize: none; + background-color: #f0fff4; + color: var(--color-text-primary); + word-wrap: normal; +} + +.expected-output:focus { + outline: none; + border-color: #38a169; + box-shadow: 0 0 0 2px rgba(56, 161, 105, 0.25); +} + +.actual-output { + background-color: #f8f4ff; + color: var(--color-text-secondary); + cursor: pointer; +} + +.expected-output.expanded, +.actual-output.expanded { + height: 400px !important; + max-height: 400px !important; +} + +/* Adjust output grid for expanded items */ +.output-grid.has-expanded-outputs { + align-items: stretch; +} + +.output-grid.has-expanded-outputs .output-column { + display: flex; + flex-direction: column; +} + +.output-grid.has-expanded-outputs .expected-output, +.output-grid.has-expanded-outputs .actual-output { + flex: 1; +} + +.no-commands { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + padding: var(--spacing-xl) 0; + background-color: var(--color-bg-primary); + border: 2px dashed var(--color-border-light); + border-radius: var(--radius-md); + text-align: center; +} + +.no-commands svg { + width: 32px; + height: 32px; + color: var(--color-text-tertiary); + margin-bottom: var(--spacing-md); +} + +.no-commands h3 { + font-size: 16px; + font-weight: 500; + margin-bottom: var(--spacing-sm); + color: var(--color-text-primary); +} + +.no-commands p { + color: var(--color-text-tertiary); + margin-bottom: var(--spacing-lg); + max-width: 300px; +} + +.add-first-command-button { + display: flex; + align-items: center; + gap: var(--spacing-sm); + padding: var(--spacing-sm) var(--spacing-lg); + background-color: var(--color-bg-accent); + color: white; + border: none; + border-radius: var(--radius-sm); + font-size: 13px; + font-weight: 500; + cursor: pointer; + transition: background-color var(--transition-fast); +} + +.add-first-command-button svg { + margin: 0; + color: white; +} + +.add-first-command-button:hover { + background-color: #2c5282; +} + +/* Status indicators */ +.pending-status { + background-color: #f3f4f6; /* Light gray background */ + color: #6b7280; /* Medium gray text */ + border: 1px solid #e5e7eb; +} + +.matched-status { + background-color: #dcfce7; + color: #166534; +} + +.failed-status { + background-color: #fee2e2; + color: #b91c1c; +} + +.passed-status { + background-color: #dcfce7; + color: #166534; +} + +/* WASM Diff Highlighting Styles - Added at Global Level */ +.wasm-diff { + font-family: var(--font-mono) !important; + white-space: pre-wrap !important; + line-height: 1.5 !important; + display: block !important; +} + +.wasm-diff * { + margin: 0 !important; + padding: 0 !important; +} + +.highlight-diff { + background-color: #fecaca !important; /* light red background */ + color: #991b1b !important; /* dark red text */ + padding: 1px 0 !important; + font-weight: bold !important; + border-bottom: 1px dashed #dc2626 !important; + display: inline !important; +} + +.highlight-line { + background-color: #fef2f2 !important; /* very light red */ + display: block !important; + width: 100% !important; + border-left: 3px solid #ef4444 !important; + padding-left: 4px !important; + margin-left: -7px !important; + margin-top: 0 !important; + margin-bottom: 0 !important; +} + +.diff-added-line { + background-color: #ecfdf5 !important; /* green-50 */ + display: block !important; + width: 100% !important; + border-left: 3px solid #10b981 !important; + padding-left: 4px !important; + margin-left: -7px !important; + margin-top: 0 !important; + margin-bottom: 0 !important; + line-height: 1.5 !important; +} + +.diff-removed-line { + background-color: #fee2e2 !important; /* light red background */ + display: block !important; + width: 100% !important; + border-left: 3px solid #dc2626 !important; + padding-left: 4px !important; + margin-left: -7px !important; + margin-top: 0 !important; + margin-bottom: 0 !important; + color: #b91c1c !important; + line-height: 1.5 !important; +} + +/* Command card with failed status */ +.command-card.failed-command { + border: 2px solid #dc2626 !important; + box-shadow: 0 0 8px rgba(220, 38, 38, 0.3) !important; +} + +/* Dark mode specific adjustments */ +@media (prefers-color-scheme: dark) { + .expected-output { + background-color: rgba(56, 161, 105, 0.1); + } + + .actual-output { + background-color: rgba(128, 90, 213, 0.1); + } + + .delete-button:hover { + background-color: rgba(229, 62, 62, 0.2); + } + + .add-command-button:hover { + background-color: rgba(66, 153, 225, 0.1); + } + + .matched-status, + .passed-status { + background-color: rgba(22, 101, 52, 0.2); + color: #86efac; + } + + .failed-status { + background-color: rgba(185, 28, 28, 0.2); + color: #fca5a5; + } + + .pending-status { + background-color: rgba(75, 85, 99, 0.2); + color: #d1d5db; /* Light gray text in dark mode */ + border: 1px solid rgba(107, 114, 128, 0.3); + } + + .command-card.failed-command { + border: 2px solid #ef4444 !important; + box-shadow: 0 0 8px rgba(239, 68, 68, 0.3) !important; + } + + .highlight-diff { + background-color: rgba(239, 68, 68, 0.25) !important; + color: #fca5a5 !important; + border-bottom: 1px dashed #ef4444 !important; + } + + .highlight-line { + background-color: rgba(239, 68, 68, 0.1) !important; + border-left: 3px solid #ef4444 !important; + color: #fca5a5 !important; + } + + .diff-added-line { + background-color: rgba(16, 185, 129, 0.1) !important; + border-left: 3px solid #10b981 !important; + color: #d1fae5 !important; + } + + .diff-removed-line { + background-color: rgba(220, 38, 38, 0.1) !important; + border-left: 3px solid #dc2626 !important; + color: #fecaca !important; + } +} + +/* Output fields with synchronized expansion */ +.expected-output, +.actual-output { + width: 100%; + padding: var(--spacing-sm); + border: 1px solid var(--color-border-light); + border-radius: var(--radius-sm); + font-family: var(--font-mono); + font-size: 13px; + min-height: 100px !important; /* Minimum height, not fixed height */ + height: 100px !important; + max-height: 100px !important; + overflow-y: auto; + transition: all 0.2s ease; +} + +.expected-output.expanded, +.actual-output.expanded { + min-height: 100px !important; + height: 400px !important; + max-height: 400px !important; +} + +/* Adjust output grid for expanded items */ +.output-grid.has-expanded-outputs { + align-items: stretch; +} + +.output-grid.has-expanded-outputs .output-column { + display: flex; + flex-direction: column; +} + +.output-grid.has-expanded-outputs .expected-output, +.output-grid.has-expanded-outputs .actual-output { + flex: 1; +} diff --git a/ui/src/assets/svelte.svg b/ui/src/assets/svelte.svg new file mode 100644 index 0000000..c5e0848 --- /dev/null +++ b/ui/src/assets/svelte.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/components/CodeMirrorInput.svelte b/ui/src/components/CodeMirrorInput.svelte new file mode 100644 index 0000000..ca8b8a2 --- /dev/null +++ b/ui/src/components/CodeMirrorInput.svelte @@ -0,0 +1,209 @@ + + +
+
+ {#if !value && placeholder} +
{placeholder}
+ {/if} +
+ + \ No newline at end of file diff --git a/ui/src/components/Editor.svelte b/ui/src/components/Editor.svelte new file mode 100644 index 0000000..bc70a91 --- /dev/null +++ b/ui/src/components/Editor.svelte @@ -0,0 +1,1435 @@ + + +
+ +
+
+ {#if $filesStore.currentFile} + {$filesStore.currentFile.path} + {#if $filesStore.currentFile.status} + + {@html getStatusIcon($filesStore.currentFile.status)} + {$filesStore.currentFile.status.charAt(0).toUpperCase() + $filesStore.currentFile.status.slice(1)} + + {/if} + {#if $filesStore.currentFile.dirty} + + {#if $filesStore.saving} + Saving... + {/if} + {:else if $filesStore.currentFile.lastSaved} + + Saved at {formatTime($filesStore.currentFile.lastSaved)} + + {/if} + {:else} + No file selected + {/if} +
+ +
+ {#if $filesStore.running} + + + + + + Running test... + + {/if} +
+ +
+
+ + {#if isCurrentFileModified} + + {/if} + +
+
+
+ + +
+ {#if !$filesStore.currentFile} +
+ + + + + + + +

Select a file from the sidebar or create a new one

+
+ {:else if testStructure} + +
+ {#if testStructure.description} +
+

Description

+

{testStructure.description}

+
+ {/if} + + +
+ {#each commands as command, i (command.stepIndex || i)} + {@const displayNumber = command.isNested ? + (commands.slice(0, i).filter(c => c.isNested && c.nestingLevel === command.nestingLevel && JSON.stringify(c.stepPath.slice(0, -1)) === JSON.stringify(command.stepPath.slice(0, -1))).length + 1) : + (commands.slice(0, i).filter(c => !c.isNested).length + 1) + } + handleUpdateCommand(e.detail.index, e.detail.newValue)} + on:updateExpectedOutput={(e) => handleUpdateExpectedOutput(e.detail.index, e.detail.newValue)} + on:toggleExpansion={(e) => handleToggleExpansion(e.detail)} + on:addCommand={(e) => addCommand(e.detail.index, e.detail.type)} + on:deleteCommand={(e) => deleteCommand(e.detail.index)} + /> + {/each} + + + {#if commands.length === 0} +
+ + + + +

No Commands Yet

+

Add your first item to start building your test

+
+ + + + + +
+
+ {/if} +
+
+ {:else} + +
+ + + + + + + +

This file is not in the new structured format. Please reload or re-parse the file.

+
+ {/if} +
+
+ + diff --git a/ui/src/components/EditorLogic.js b/ui/src/components/EditorLogic.js new file mode 100644 index 0000000..e9e9444 --- /dev/null +++ b/ui/src/components/EditorLogic.js @@ -0,0 +1,315 @@ +// EditorLogic.js - WASM, patterns, and data conversion logic extracted from Editor.svelte + +import { writable } from 'svelte/store'; +import { filesStore } from '../stores/filesStore'; +import { API_URL } from '../config.js'; + +// WASM and Pattern Management - using Svelte stores for reactivity +let wasmLoaded = false; +let patternMatcher = null; +let patterns = {}; + +// Create reactive stores for WASM state +export const wasmLoadedStore = writable(false); +export const patternMatcherStore = writable(null); + +// Fetch patterns from server +export async function fetchPatterns() { + try { + const response = await fetch(`${API_URL}/api/get-patterns`, { + credentials: 'include' + }); + + if (response.ok) { + const data = await response.json(); + patterns = data.patterns || {}; + console.log('Loaded patterns:', patterns); + return patterns; + } else { + console.warn('Could not load patterns:', await response.text()); + return {}; + } + } catch (err) { + console.error('Error fetching patterns:', err); + return {}; + } +} + +// Initialize WASM module +export async function initWasm() { + try { + console.log('Initializing WASM diff module...'); + + // Use dynamic import to avoid build-time issues + const module = await import('../../pkg/wasm.js'); + + // Initialize the WASM module properly for web target + await module.default(); + + // Default patterns if API fails + const defaultPatterns = { + "NUMBER": "[0-9]+", + "DATE": "[0-9]{4}\\-[0-9]{2}\\-[0-9]{2}", + "DATETIME": "[0-9]{4}\\-[0-9]{2}\\-[0-9]{2}\\s[0-9]{2}:[0-9]{2}:[0-9]{2}", + "IPADDR": "[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+", + "PATH": "[A-Za-z0-9\\/\\.\\-\\_]+", + "SEMVER": "[0-9]+\\.[0-9]+\\.[0-9]+", + "TIME": "[0-9]{2}:[0-9]{2}:[0-9]{2}", + "YEAR": "[0-9]{4}" + }; + + // Fetch patterns first + let patternsData; + try { + patternsData = await fetchPatterns(); + // If patterns is empty, use default patterns + if (Object.keys(patternsData).length === 0) { + console.log('No patterns returned from API, using defaults'); + patternsData = defaultPatterns; + } + } catch (err) { + console.error('Failed to fetch patterns, using defaults:', err); + patternsData = defaultPatterns; + } + + // Initialize pattern matcher with fetched or default patterns + patternMatcher = new module.PatternMatcher(JSON.stringify(patternsData)); + window.patternMatcher = patternMatcher; + wasmLoaded = true; + + // Update the reactive stores + wasmLoadedStore.set(true); + patternMatcherStore.set(patternMatcher); + + console.log('WASM diff module initialized successfully with patterns:', patternsData); + } catch (err) { + console.error('Failed to initialize WASM diff module:', err); + } +} + +// Export getters for WASM state +export function getWasmLoaded() { + return wasmLoaded; +} + +export function getPatternMatcher() { + return patternMatcher; +} + +// Convert structured data to legacy command format for UI compatibility +export function convertStructuredToCommands(testStructure) { + if (!testStructure || !testStructure.steps) return []; + + const commands = []; + let globalStepIndex = 0; // Track global step index across all levels + + // Process steps, including nested steps when blocks are expanded + function processSteps(steps, level = 0, parentBlockPath = []) { + let i = 0; + while (i < steps.length) { + const step = steps[i]; + const currentPath = level === 0 ? [i] : [...parentBlockPath, i]; + const currentGlobalIndex = globalStepIndex; // Capture current global index + + if (step.type === 'input') { + // Create command from input step + const command = { + command: step.content || '', + expectedOutput: '', + actualOutput: step.actualOutput || '', + status: step.status || 'pending', + error: step.error || false, + type: 'command', + initializing: false, + duration: step.duration, + // Add metadata to track back to structured format + stepIndex: currentGlobalIndex, // Use global step index + stepPath: currentPath, + isInputOutputPair: false, + isNested: level > 0, + nestingLevel: level + }; + + globalStepIndex++; // Increment global index + + // Look for following output step + if (i + 1 < steps.length && steps[i + 1].type === 'output') { + const outputStep = steps[i + 1]; + command.expectedOutput = outputStep.content || ''; + if (outputStep.actualOutput) { + command.actualOutput = outputStep.actualOutput; + } + + // For input-output pairs, combine error status from both steps + const inputHasError = step.error || false; + const outputHasError = outputStep.error || false; + const combinedError = inputHasError || outputHasError; + + // Use the most severe status (failed > success) + if (combinedError || outputStep.status === 'failed' || step.status === 'failed') { + command.status = 'failed'; + command.error = true; + } else { + command.status = outputStep.status || step.status || 'success'; + command.error = false; + } + + command.isInputOutputPair = true; + globalStepIndex++; // Increment for the output step too + i++; // Skip the output step since we processed it + } + + commands.push(command); + } else if (step.type === 'block') { + const blockCommand = { + command: step.args[0] || '', + status: step.status || 'pending', + error: step.error || false, + type: 'block', + initializing: false, + isExpanded: step.isExpanded || false, + duration: step.duration, + // Add metadata to track back to structured format + stepIndex: currentGlobalIndex, // Use global step index + stepPath: currentPath, + isInputOutputPair: false, + isNested: level > 0, + nestingLevel: level, + // Store nested steps for expansion + nestedSteps: step.steps + }; + + globalStepIndex++; // Increment global index for block + + commands.push(blockCommand); + + // If block is expanded, process its nested steps + if (step.isExpanded && step.steps && step.steps.length > 0) { + processSteps(step.steps, level + 1, currentPath); + } + } else if (step.type === 'comment') { + commands.push({ + command: step.content || '', + status: step.status || 'pending', + error: step.error || false, + type: 'comment', + initializing: false, + duration: step.duration, + // Add metadata to track back to structured format + stepIndex: currentGlobalIndex, // Use global step index + stepPath: currentPath, + isInputOutputPair: false, + isNested: level > 0, + nestingLevel: level + }); + + globalStepIndex++; // Increment global index for comment + } else if (step.type === 'output') { + // Increment global index for standalone output steps + globalStepIndex++; + } + // Skip standalone output steps (they should be handled with input steps) + + i++; + } + } + + // Process all steps, including nested ones when expanded + processSteps(testStructure.steps, 0); + return commands; +} + +// Update structured format when commands are modified +export function updateStructuredCommand(testStructure, commandIndex, commands, newValue) { + if (!testStructure) { + // Fallback to legacy method + filesStore.updateCommand(commandIndex, newValue); + return; + } + + // Find the corresponding step in structured format + const command = commands[commandIndex]; + if (!command) { + console.error('Could not find command at index:', commandIndex); + return; + } + + // Create updated structure + const updatedStructure = { ...testStructure }; + + // Navigate to the correct location using stepPath + let targetSteps = updatedStructure.steps; + const stepPath = command.stepPath; + + // Navigate to the parent container + for (let i = 0; i < stepPath.length - 1; i++) { + targetSteps = targetSteps[stepPath[i]].steps; + } + + // Get the final index and update the step + const finalIndex = stepPath[stepPath.length - 1]; + + if (finalIndex >= 0 && finalIndex < targetSteps.length) { + const step = { ...targetSteps[finalIndex] }; + + if (step.type === 'input') { + step.content = newValue; + } else if (step.type === 'block') { + step.args = [newValue]; + } else if (step.type === 'comment') { + step.content = newValue; + } + + targetSteps[finalIndex] = step; + + // Update the store with new structure + filesStore.updateTestStructure(updatedStructure); + } +} + +// Update expected output in structured format +export function updateStructuredExpectedOutput(testStructure, commandIndex, commands, newValue) { + if (!testStructure) { + // Fallback to legacy method + filesStore.updateExpectedOutput(commandIndex, newValue); + return; + } + + // Find the corresponding step in structured format + const command = commands[commandIndex]; + if (!command || !command.isInputOutputPair) { + console.error('Could not find input/output pair for command index:', commandIndex); + return; + } + + // Update the command directly in the commands array to avoid re-rendering all components + // IMPORTANT: Preserve isOutputExpanded state during updates + commands[commandIndex] = { ...command, expectedOutput: newValue, isOutputExpanded: command.isOutputExpanded }; + + // Also update the structured format for persistence + const updatedStructure = { ...testStructure }; + + // Navigate to the correct location using stepPath + let targetSteps = updatedStructure.steps; + const stepPath = command.stepPath; + + // Navigate to the parent container + for (let i = 0; i < stepPath.length - 1; i++) { + targetSteps = targetSteps[stepPath[i]].steps; + } + + // Get the final index and update the output step (should be at finalIndex + 1) + const finalIndex = stepPath[stepPath.length - 1]; + + if (finalIndex + 1 >= 0 && finalIndex + 1 < targetSteps.length) { + const outputStep = { ...targetSteps[finalIndex + 1] }; + + if (outputStep.type === 'output') { + outputStep.content = newValue; + targetSteps[finalIndex + 1] = outputStep; + + // Update the store with new structure + filesStore.updateTestStructure(updatedStructure); + } + } +} \ No newline at end of file diff --git a/ui/src/components/FileExplorer.svelte b/ui/src/components/FileExplorer.svelte new file mode 100644 index 0000000..c2f9971 --- /dev/null +++ b/ui/src/components/FileExplorer.svelte @@ -0,0 +1,1937 @@ + + +
+
+
+ Files + {#if selectedFolder} + β†’ {selectedFolder.split('/').pop()} + {:else if $filesStore.currentFile} + β†’ {$filesStore.currentFile.path.split('/').slice(0, -1).pop() || 'root'} + {:else} + β†’ root + {/if} +
+
+ + + + + + + + {#if selectedFolder || $filesStore.currentFile} + + + + + {/if} + + {#if showRecycleBin} +
+ + + + + + +
+ {/if} +
+
+ +
+ {#if fileTree && fileTree.length > 0} + + {#if shouldShowInputAt('root')} +
+
+
+ {#if newItemType === 'folder'} + + + + {:else} + + + + {/if} +
+ +
+
+ {/if} + + + {#each fileTree as node} +
+
handleDragStart(e, node)} + on:dragover={(e) => handleDragOver(e, node)} + on:dragleave={handleDragLeave} + on:drop={(e) => handleDrop(e, node)} + on:dragend={handleDragEnd} + on:click={(e) => handleNodeClick(e, node)} + on:keydown={(e) => e.key === 'Enter' && handleNodeClick(e, node)} + > +
+ {#if node.isDirectory} + {#if expandedFolders.has(node.path)} + + + + + {:else} + + + + {/if} + {:else} + + + + {/if} + + {#if node.isSymlink} + + + + + + + {/if} +
+ {node.name} + + + {#if !node.isDirectory} + {@const gitStatus = getFileGitStatus(node.path)} + {#if gitStatus} + + {getGitStatusDisplay(gitStatus)} + + {/if} + {:else if isDirModified(node.path)} + M + {/if} + + {#if node.isDirectory} +
+ + + +
+ {/if} +
+ + + {#if node.isDirectory && node.children && expandedFolders.has(node.path)} +
+ + {#if shouldShowInputAt(node.path)} +
+
+
+ {#if newItemType === 'folder'} + + + + {:else} + + + + {/if} +
+ +
+
+ {/if} + + {#each node.children as childNode} + +
+
handleDragStart(e, childNode)} + on:dragover={(e) => handleDragOver(e, childNode)} + on:dragleave={handleDragLeave} + on:drop={(e) => handleDrop(e, childNode)} + on:dragend={handleDragEnd} + on:click={(e) => handleNodeClick(e, childNode)} + on:keydown={(e) => e.key === 'Enter' && handleNodeClick(e, childNode)} + > +
+ {#if childNode.isDirectory} + {#if expandedFolders.has(childNode.path)} + + + + + {:else} + + + + {/if} + {:else} + + + + {/if} +
+ {childNode.name} + + + {#if !childNode.isDirectory} + {@const gitStatus = getFileGitStatus(childNode.path)} + {#if gitStatus} + + {getGitStatusDisplay(gitStatus)} + + {/if} + {:else if isDirModified(childNode.path)} + M + {/if} + + {#if childNode.isDirectory} +
+ + + +
+ {/if} +
+ + + {#if childNode.isDirectory && childNode.children && expandedFolders.has(childNode.path)} +
+ + {#if shouldShowInputAt(childNode.path)} +
+
+
+ {#if newItemType === 'folder'} + + + + {:else} + + + + {/if} +
+ +
+
+ {/if} + + {#each childNode.children as grandChildNode} +
+
handleDragStart(e, grandChildNode)} + on:dragover={(e) => handleDragOver(e, grandChildNode)} + on:dragleave={handleDragLeave} + on:drop={(e) => handleDrop(e, grandChildNode)} + on:dragend={handleDragEnd} + on:click={(e) => handleNodeClick(e, grandChildNode)} + on:keydown={(e) => e.key === 'Enter' && handleNodeClick(e, grandChildNode)} + > +
+ {#if grandChildNode.isDirectory} + {#if expandedFolders.has(grandChildNode.path)} + + + + + {:else} + + + + {/if} + {:else} + + + + {/if} +
+ {grandChildNode.name} + + + {#if !grandChildNode.isDirectory} + {@const gitStatus = getFileGitStatus(grandChildNode.path)} + {#if gitStatus} + + {getGitStatusDisplay(gitStatus)} + + {/if} + {:else if isDirModified(grandChildNode.path)} + M + {/if} + + {#if grandChildNode.isDirectory} +
+ + + +
+ {/if} +
+ + +
+ {/each} +
+ {/if} +
+ {/each} +
+ {/if} +
+ {/each} + {:else} +
+

No files found

+

Default directory: tests

+
+ {/if} +
+ + +
+ + diff --git a/ui/src/components/Header.svelte b/ui/src/components/Header.svelte new file mode 100644 index 0000000..48e4b6f --- /dev/null +++ b/ui/src/components/Header.svelte @@ -0,0 +1,415 @@ + + +
+
+ + + + CLT Editor +
+ +
+ + + {#if $filesStore.running} + + + + + + + {/if} +
+ + +
+ + + + + \ No newline at end of file diff --git a/ui/src/components/InteractiveSession.svelte b/ui/src/components/InteractiveSession.svelte new file mode 100644 index 0000000..8cea9e2 --- /dev/null +++ b/ui/src/components/InteractiveSession.svelte @@ -0,0 +1,1435 @@ + + +{#if isOpen} + +{/if} + + +{#if showNewSessionModal} + +{/if} + + diff --git a/ui/src/components/OutputCodeMirror.svelte b/ui/src/components/OutputCodeMirror.svelte new file mode 100644 index 0000000..60451c0 --- /dev/null +++ b/ui/src/components/OutputCodeMirror.svelte @@ -0,0 +1,251 @@ + + +
+
+ {#if !value && placeholder} +
{placeholder}
+ {/if} +
+ + \ No newline at end of file diff --git a/ui/src/components/PullRequestModal.svelte b/ui/src/components/PullRequestModal.svelte new file mode 100644 index 0000000..ddf6acd --- /dev/null +++ b/ui/src/components/PullRequestModal.svelte @@ -0,0 +1,1168 @@ + + +{#if github.showModal} + +{/if} + + \ No newline at end of file diff --git a/ui/src/components/SimpleCodeMirror.svelte b/ui/src/components/SimpleCodeMirror.svelte new file mode 100644 index 0000000..640b2c8 --- /dev/null +++ b/ui/src/components/SimpleCodeMirror.svelte @@ -0,0 +1,192 @@ + + +
+
+ {#if !value && placeholder} +
{placeholder}
+ {/if} +
+ + diff --git a/ui/src/components/Step.svelte b/ui/src/components/Step.svelte new file mode 100644 index 0000000..b79bb4b --- /dev/null +++ b/ui/src/components/Step.svelte @@ -0,0 +1,1341 @@ + + +
+ +
+
+ {displayNumber} + {#if command.type === 'block'} + Block Reference + + + {:else if command.type === 'comment'} + Comment + {:else if command.isBlockCommand} + Block Command + {:else} + Command + {/if} + {#if command.initializing} + + {@html getStatusIcon('pending')} + Pending + + {:else if command.status === 'matched'} + + {@html getStatusIcon('matched')} + Matched + + {:else if command.status === 'success'} + + {@html getStatusIcon('success')} + Success + + {:else if command.status === 'failed'} + + {@html getStatusIcon('failed')} + Failed + + {:else if command.type === 'block'} + + {@html getStatusIcon(command.status)} + {command.status.charAt(0).toUpperCase() + command.status.slice(1)} + + {:else if command.status} + + {@html getStatusIcon(command.status)} + {command.status.charAt(0).toUpperCase() + command.status.slice(1)} + + {/if} + {#if command.blockSource && command.isBlockCommand} + From: {command.blockSource.split('/').pop()} + {/if} + {#if command.duration} + {formatDuration(command.duration)} + {/if} +
+
+ + + + + + + + + +
+ + + +
+
+ + +
+ {#if command.type === 'block'} + + + {:else if command.type === 'comment'} + + + {:else} + + + + + {#if !command.initializing && command.status !== 'pending'} +
+
+
+ + + {#if command.actualOutput && getActualOutputContent(command.actualOutput)} + + {/if} +
+
+
{ isUserEditing = true; }} + on:paste={() => { isUserEditing = true; }} + use:initOutputScroll={true} + > + +
+
+
+
+
+ + +
+
+
+ {#if command.actualOutput} + {#await highlightDifferences(getActualOutputContent(command.actualOutput), command.expectedOutput || '', wasmLoaded, patternMatcher)} +
{getActualOutputContent(command.actualOutput)}
+ {:then diffHtml} +
{@html diffHtml}
+ {/await} + {:else} + Empty output. + {/if} +
+
+
+
+ {/if} + {/if} +
+
+ + \ No newline at end of file diff --git a/ui/src/components/StepLogic.js b/ui/src/components/StepLogic.js new file mode 100644 index 0000000..810e669 --- /dev/null +++ b/ui/src/components/StepLogic.js @@ -0,0 +1,288 @@ +// StepLogic.js - Scroll synchronization, WASM diff highlighting, and utility functions + +// Scroll synchronization logic +export class ScrollSyncManager { + constructor() { + this.isScrollSyncing = false; + this.isVisible = true; + this.scrollTimeout = null; + } + + setVisible(visible) { + this.isVisible = visible; + } + + // Improved synchronized scroll function + syncScroll(fromExpected, expectedOutputEl, actualOutputEl) { + if (this.isScrollSyncing || !this.isVisible) return; + + this.isScrollSyncing = true; + + // Use requestAnimationFrame for smooth syncing + requestAnimationFrame(() => { + if (fromExpected && expectedOutputEl && actualOutputEl) { + const maxScroll = expectedOutputEl.scrollHeight - expectedOutputEl.clientHeight; + if (maxScroll > 0) { + const scrollPercentage = expectedOutputEl.scrollTop / maxScroll; + const targetMaxScroll = actualOutputEl.scrollHeight - actualOutputEl.clientHeight; + actualOutputEl.scrollTop = scrollPercentage * Math.max(0, targetMaxScroll); + } + } else if (!fromExpected && actualOutputEl && expectedOutputEl) { + const maxScroll = actualOutputEl.scrollHeight - actualOutputEl.clientHeight; + if (maxScroll > 0) { + const scrollPercentage = actualOutputEl.scrollTop / maxScroll; + const targetMaxScroll = expectedOutputEl.scrollHeight - expectedOutputEl.clientHeight; + expectedOutputEl.scrollTop = scrollPercentage * Math.max(0, targetMaxScroll); + } + } + + // Reset sync flag immediately after sync + this.isScrollSyncing = false; + }); + } + + // Output scroll action with comprehensive event handling + initOutputScroll(node, isExpected, getExpectedOutputEl, getActualOutputEl) { + // Create handlers that get fresh element references each time + const handleScroll = () => { + if (this.isScrollSyncing) return; + + // Cancel previous timeout + if (this.scrollTimeout) { + cancelAnimationFrame(this.scrollTimeout); + } + + // Use requestAnimationFrame for smooth syncing + this.scrollTimeout = requestAnimationFrame(() => { + const expectedOutputEl = getExpectedOutputEl(); + const actualOutputEl = getActualOutputEl(); + this.syncScroll(isExpected, expectedOutputEl, actualOutputEl); + this.scrollTimeout = null; + }); + }; + + // Also handle wheel events for immediate sync during fast scrolling + const handleWheel = (e) => { + if (!this.isScrollSyncing) { + // Small delay to let the scroll happen first + setTimeout(() => { + if (!this.isScrollSyncing) { + const expectedOutputEl = getExpectedOutputEl(); + const actualOutputEl = getActualOutputEl(); + this.syncScroll(isExpected, expectedOutputEl, actualOutputEl); + } + }, 0); + } + }; + + // Handle keyboard navigation that might cause scrolling + const handleKeydown = (e) => { + if (['ArrowUp', 'ArrowDown', 'PageUp', 'PageDown', 'Home', 'End'].includes(e.key)) { + setTimeout(() => { + if (!this.isScrollSyncing) { + const expectedOutputEl = getExpectedOutputEl(); + const actualOutputEl = getActualOutputEl(); + this.syncScroll(isExpected, expectedOutputEl, actualOutputEl); + } + }, 0); + } + }; + + node.addEventListener('scroll', handleScroll, { passive: true }); + node.addEventListener('wheel', handleWheel, { passive: true }); + node.addEventListener('keydown', handleKeydown, { passive: true }); + + return { + destroy: () => { + node.removeEventListener('scroll', handleScroll); + node.removeEventListener('wheel', handleWheel); + node.removeEventListener('keydown', handleKeydown); + if (this.scrollTimeout) { + cancelAnimationFrame(this.scrollTimeout); + } + } + }; + } + + cleanup() { + if (this.scrollTimeout) { + cancelAnimationFrame(this.scrollTimeout); + } + } +} + +// Utility functions +export function getStatusIcon(status) { + // Create different status indicators for different item types + if (status === 'matched' || status === 'success') { + return ` + + `; + } + if (status === 'failed') { + return ` + + `; + } + if (status === 'block' || status === 'pending') { + // Use a different icon for blocks - file icon is more appropriate for blocks, clock for pending + const isBlock = status === 'block'; + const isPending = status === 'pending'; + + if (isBlock) { + return ` + + `; + } else { + return ` + + `; + } + } + return ''; +} + +// Escape HTML special characters +export function escapeHtml(text) { + return text + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); +} + +// Highlight differences using the WASM module +export async function highlightDifferences(actual, expected, wasmLoaded, patternMatcher) { + try { + if (!wasmLoaded || !patternMatcher) { + console.log('WASM module not loaded yet, showing plain text'); + return escapeHtml(actual); // Return plain text if WASM isn't ready + } + + // Return simple escaped text if inputs are identical + if (actual === expected) { + // Style as matched - no need for diff + if (actual && actual.trim() !== '') { + // Split by newlines to render properly + const lines = actual.split('\n'); + let resultHtml = ''; + + lines.forEach((line, index) => { + resultHtml += `${escapeHtml(line)}`; + if (index < lines.length - 1) { + resultHtml += '
'; + } + }); + + return resultHtml; + } + return escapeHtml(actual); + } + + // Get the diff result from the WASM module (returns a JSON string) + let diffResult; + try { + diffResult = JSON.parse(patternMatcher.diff_text(expected, actual)); + } catch (diffErr) { + console.error('Error during diff processing:', diffErr); + return escapeHtml(actual); + } + + if (!diffResult.has_diff) { + // No differences found; return with success styling + if (actual && actual.trim() !== '' && expected && expected.trim() !== '') { + // Split by newlines to render properly + const lines = actual.split('\n'); + let resultHtml = ''; + + lines.forEach((line, index) => { + resultHtml += `${escapeHtml(line)}`; + if (index < lines.length - 1) { + resultHtml += '
'; + } + }); + + return resultHtml; + } + return escapeHtml(actual); // Simply escape if no meaningful content + } + + let resultHtml = ''; + + // Iterate over each diff line. (Assumes diffResult.diff_lines is in sequential order.) + for (let i = 0; i < diffResult.diff_lines.length; i++) { + const diffLine = diffResult.diff_lines[i]; + if (diffLine.line_type === "same") { + resultHtml += `${escapeHtml(diffLine.content)}`; + // Add a newline between content lines unless it's the last line + if (i < diffResult.diff_lines.length - 1) { + resultHtml += '
'; + } + } else if (diffLine.line_type === "added") { + // Render added lines with a plus sign. + resultHtml += `+ ${escapeHtml(diffLine.content)}`; + } else if (diffLine.line_type === "removed") { + // Render removed lines with a minus sign. + resultHtml += `βˆ’ ${escapeHtml(diffLine.content)}`; + } else if (diffLine.line_type === "changed") { + // For changed lines, show a "~" marker. + if (diffLine.highlight_ranges && diffLine.highlight_ranges.length > 0) { + let lineHtml = '~ '; + let lastPos = 0; + for (const range of diffLine.highlight_ranges) { + // Append unchanged text + lineHtml += escapeHtml(diffLine.content.substring(lastPos, range.start)); + // Append highlighted text + lineHtml += `${escapeHtml(diffLine.content.substring(range.start, range.end))}`; + lastPos = range.end; + } + // Append any remainder of the text. + lineHtml += escapeHtml(diffLine.content.substring(lastPos)); + lineHtml += ''; + resultHtml += lineHtml; + } else { + resultHtml += `~ ${escapeHtml(diffLine.content)}`; + } + } + } + return resultHtml; + } catch (err) { + console.error('Error highlighting differences:', err); + return escapeHtml(actual); // On error, return plain escaped text. + } +} + +export function formatDuration(ms) { + if (ms === null) return ''; + return `${ms}ms`; +} + +export function parseActualOutputContent(actualOutput) { + if (!actualOutput) return ''; + + // Handle the case when there's a duration section in the output. + const durationMatch = actualOutput.match(/–––\s*duration/); + if (durationMatch) { + // Return everything before the duration marker. + return actualOutput.substring(0, durationMatch.index).trim(); + } + + // If no duration marker found, return the whole output. + return actualOutput.trim(); +} + +// Get actual output content without duration +export function getActualOutputContent(actualOutput) { + if (!actualOutput) return ''; + + // Handle the case when there's a duration section in the output. + const durationMatch = actualOutput.match(/–––\s*duration/); + if (durationMatch) { + // Return everything before the duration marker. + return actualOutput.substring(0, durationMatch.index).trim(); + } + + // If no duration marker found, return the whole output. + return actualOutput.trim(); +} \ No newline at end of file diff --git a/ui/src/config.js b/ui/src/config.js new file mode 100644 index 0000000..e2105ee --- /dev/null +++ b/ui/src/config.js @@ -0,0 +1,9 @@ +// Backend API URL configuration +const API_URL = import.meta.env.VITE_API_URL || 'http://localhost:3000'; + +// Auth related constants +const AUTH_GITHUB_URL = `${API_URL}/auth/github`; +const AUTH_LOGOUT_URL = `${API_URL}/logout`; +const AUTH_CURRENT_USER_URL = `${API_URL}/api/current-user`; + +export { API_URL, AUTH_GITHUB_URL, AUTH_LOGOUT_URL, AUTH_CURRENT_USER_URL }; diff --git a/ui/src/lib/Counter.svelte b/ui/src/lib/Counter.svelte new file mode 100644 index 0000000..37d75ce --- /dev/null +++ b/ui/src/lib/Counter.svelte @@ -0,0 +1,10 @@ + + + diff --git a/ui/src/main.ts b/ui/src/main.ts new file mode 100644 index 0000000..2f3cc5c --- /dev/null +++ b/ui/src/main.ts @@ -0,0 +1,9 @@ +import { mount } from 'svelte' +import './app.css' +import App from './App.svelte' + +const app = mount(App, { + target: document.getElementById('app')!, +}) + +export default app \ No newline at end of file diff --git a/ui/src/stores/authStore.ts b/ui/src/stores/authStore.ts new file mode 100644 index 0000000..5e1452d --- /dev/null +++ b/ui/src/stores/authStore.ts @@ -0,0 +1,146 @@ +import { writable } from 'svelte/store'; +import { AUTH_CURRENT_USER_URL, AUTH_LOGOUT_URL } from '../config.js'; + +// Define types +type User = { + id?: string; + username: string; + displayName?: string; + email?: string; + avatarUrl?: string; + token?: string; +}; + +type AuthState = { + isAuthenticated: boolean; + isLoading: boolean; + user: User | null; + skipAuth: boolean; + error: string | null; +}; + +// Create the initial state +const initialState: AuthState = { + isAuthenticated: false, + isLoading: true, + user: null, + skipAuth: false, + error: null +}; + +// Create the store +export const authStore = writable(initialState); + +// Function to fetch the current authentication state +export async function fetchAuthState() { + try { + authStore.update(state => ({ ...state, isLoading: true, error: null })); + + const response = await fetch(AUTH_CURRENT_USER_URL, { + credentials: 'include', // Important for cookies/session + headers: { + 'Accept': 'application/json' + } + }); + + if (!response.ok) { + // Clear any stored state if the server says we're not authenticated + sessionStorage.removeItem('auth_state'); + localStorage.removeItem('auth_state'); + + // For 401/403, this is expected when not authenticated - don't show error + // Only show error for repeated authentication failures or other issues + authStore.update(state => ({ + ...state, + isAuthenticated: false, + user: null, + isLoading: false, + error: null // Don't show error for normal 401/403 responses + })); + return; + } + + const data = await response.json(); + + // Update auth store with the fresh data + authStore.update(state => ({ + ...state, + isAuthenticated: data.isAuthenticated, + user: data.user || null, + skipAuth: data.skipAuth || false, + isLoading: false + })); + + // If authenticated, store the auth state in sessionStorage (not localStorage) + if (data.isAuthenticated && data.user) { + // Store only until the window is closed - using sessionStorage + sessionStorage.setItem('auth_state', JSON.stringify({ + isAuthenticated: data.isAuthenticated, + user: data.user, + skipAuth: data.skipAuth || false + })); + } else { + // If not authenticated, clear any previously stored state + sessionStorage.removeItem('auth_state'); + } + + return data; + } catch (error) { + console.error('Auth error:', error); + + // Never show authentication errors during normal auth state checking + // Errors should only be shown when user actively tries to authenticate + authStore.update(state => ({ + ...state, + isAuthenticated: false, + user: null, + isLoading: false, + error: null // Never show error during normal auth checking + })); + + // Clear sessionStorage on any error + sessionStorage.removeItem('auth_state'); + } +} + +// Function to logout +export async function logout() { + try { + // First update the local store to prevent UI flicker + authStore.set({ + ...initialState, + isLoading: true + }); + + // Send the logout request to the server + const response = await fetch(AUTH_LOGOUT_URL, { + method: 'GET', + credentials: 'include' + }); + + if (!response.ok) { + console.error('Logout response not OK:', response.status); + } + + // Clear the auth store completely + authStore.set({ + ...initialState, + isLoading: false + }); + + // Clear any auth-related localStorage/sessionStorage + localStorage.removeItem('auth_state'); + sessionStorage.removeItem('auth_state'); + + // Force a hard reload of the page to clear any cached state + window.location.href = window.location.origin + window.location.pathname; + } catch (error) { + console.error('Logout failed:', error); + // Even if the server request fails, reset the local state + authStore.set({ + ...initialState, + isLoading: false, + error: 'Logout failed. Please try again.' + }); + } +} diff --git a/ui/src/stores/branchStore.ts b/ui/src/stores/branchStore.ts new file mode 100644 index 0000000..f1486d4 --- /dev/null +++ b/ui/src/stores/branchStore.ts @@ -0,0 +1,146 @@ +import { writable } from 'svelte/store'; +import { API_URL } from '../config.js'; +import { filesStore } from './filesStore'; + +interface BranchState { + currentBranch: string; + defaultBranch: string; + isResetting: boolean; + isLoading: boolean; + error: string | null; + success: boolean; + message: string | null; +} + +const initialState: BranchState = { + currentBranch: 'unknown', // Will be updated from the server + defaultBranch: 'master', // Default branch name + isResetting: false, + isLoading: false, + error: null, + success: false, + message: null +}; + +function createBranchStore() { + const { subscribe, set, update } = writable(initialState); + + return { + subscribe, + fetchCurrentBranch: async () => { + update(state => ({ ...state, isLoading: true, error: null })); + + try { + const response = await fetch(`${API_URL}/api/current-branch`, { + credentials: 'include' + }); + + const data = await response.json(); + + if (!response.ok) { + throw new Error(data.error || 'Failed to get current branch'); + } + + update(state => ({ + ...state, + isLoading: false, + currentBranch: data.currentBranch, + defaultBranch: data.defaultBranch || 'master' + })); + + return data; + } catch (error) { + update(state => ({ + ...state, + isLoading: false, + error: error.message || 'An error occurred while getting current branch' + })); + console.error('Error fetching current branch:', error); + } + }, + resetToBranch: async (branch: string) => { + update(state => ({ ...state, isResetting: true, error: null, success: false, message: null })); + + try { + const response = await fetch(`${API_URL}/api/reset-to-branch`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ branch }) + }); + + const data = await response.json(); + + if (!response.ok) { + throw new Error(data.error || 'Failed to reset to branch'); + } + + update(state => ({ + ...state, + isResetting: false, + success: true, + currentBranch: branch, + message: data.message + })); + + // Refresh the file tree after successful reset + await filesStore.refreshFileTree(); + + return data; + } catch (error) { + update(state => ({ + ...state, + isResetting: false, + error: error.message || 'An error occurred while resetting to branch' + })); + throw error; + } + }, + checkoutAndPull: async (branch: string) => { + update(state => ({ ...state, isResetting: true, error: null, success: false, message: null })); + + try { + const response = await fetch(`${API_URL}/api/checkout-and-pull`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ branch }) + }); + + const data = await response.json(); + + if (!response.ok) { + throw new Error(data.error || 'Failed to checkout and pull branch'); + } + + update(state => ({ + ...state, + isResetting: false, + success: true, + currentBranch: branch, + message: data.message + })); + + // Refresh the file tree after successful checkout + await filesStore.refreshFileTree(); + + return data; + } catch (error) { + update(state => ({ + ...state, + isResetting: false, + error: error.message || 'An error occurred while checking out branch' + })); + throw error; + } + }, + setCurrentBranch: (branch: string) => update(state => ({ ...state, currentBranch: branch })), + reset: () => set(initialState) + }; +} + +export const branchStore = createBranchStore(); \ No newline at end of file diff --git a/ui/src/stores/filesStore.ts b/ui/src/stores/filesStore.ts new file mode 100644 index 0000000..ba81572 --- /dev/null +++ b/ui/src/stores/filesStore.ts @@ -0,0 +1,1243 @@ +import { writable } from 'svelte/store'; +import { API_URL } from '../config.js'; + +interface FileNode { + name: string; + path: string; + isDirectory: boolean; + isSymlink?: boolean; + targetPath?: string; + children?: FileNode[]; +} + +// New WASM structured format interfaces +interface TestStep { + type: string; // "input" | "output" | "block" | "comment" + args: string[]; // For blocks: [blockPath], for outputs: [checker] + content: string | null; // Command/output content + steps: TestStep[] | null; // For blocks: nested steps + + // Runtime properties (added by UI) + status?: 'success' | 'failed'; + actualOutput?: string; + duration?: number; + isExpanded?: boolean; // For block expansion +} + +interface TestStructure { + description: string | null; + steps: TestStep[]; +} + +interface RecordingFile { + path: string; + // Structured format (WASM-based) + testStructure?: TestStructure; + dirty: boolean; + lastSaved?: Date; + status?: 'success' | 'failed'; +} + +interface FilesState { + fileTree: FileNode[]; + currentFile: RecordingFile | null; + configDirectory: string; + dockerImage: string; + saving: boolean; + running: boolean; +} + +const defaultState: FilesState = { + fileTree: [], + currentFile: null, + configDirectory: '', + dockerImage: 'ghcr.io/manticoresoftware/manticoresearch:test-kit-latest', + saving: false, + running: false +}; + +// Helper function to parse commands from the content of a .rec file +const parseRecFileContent = (content: string): RecordingCommand[] => { + const commands: RecordingCommand[] = []; + const lines = content.split('\n'); + let currentSection = ''; + let currentCommand = ''; + let currentOutput = ''; + let commandType: 'command' | 'block' | 'comment' = 'command'; + + let i = 0; + while (i < lines.length) { + const line = lines[i].trim(); + + // Detect section markers + if (line.startsWith('––– ') || line.startsWith('--- ')) { + // Process completed section before starting a new one + if (currentSection === 'input' && currentCommand) { + // We have a command but no output section yet + currentSection = ''; // Reset section + } else if (currentSection === 'output') { + // We've completed an input/output pair + commands.push({ + command: currentCommand.trim(), + expectedOutput: currentOutput, // Don't trim output to preserve whitespace + type: 'command', + status: 'pending', + }); + + // Reset for next command + currentCommand = ''; + currentOutput = ''; + currentSection = ''; + } else if (currentSection === 'comment' && currentCommand) { + // We've completed a comment section + commands.push({ + command: currentCommand.trim(), + type: 'comment', + status: 'pending', + }); + + // Reset for next command + currentCommand = ''; + currentSection = ''; + } else if (currentSection === 'block' && currentCommand) { + // We've completed a block reference + commands.push({ + command: currentCommand.trim(), + type: 'block', + status: 'pending', + }); + + // Reset for next command + currentCommand = ''; + currentSection = ''; + } + + // Parse the marker to determine what section follows + if (line.includes('input')) { + currentSection = 'input'; + commandType = 'command'; + } else if (line.includes('output')) { + currentSection = 'output'; + } else if (line.includes('comment')) { + currentSection = 'comment'; + commandType = 'comment'; + } else if (line.includes('block:')) { + currentSection = 'block'; + commandType = 'block'; + // Extract path from block marker: "--- block: path/to/file ---" + const pathMatch = line.match(/block:\s*([^\s]+)/); + if (pathMatch && pathMatch[1]) { + currentCommand = pathMatch[1].trim(); + } + } + + i++; + continue; + } + + // Process content based on current section + if (currentSection === 'input') { + if (currentCommand) currentCommand += '\n'; + currentCommand += lines[i]; + } else if (currentSection === 'output') { + if (currentOutput) currentOutput += '\n'; + currentOutput += lines[i]; + } else if (currentSection === 'comment') { + if (currentCommand) currentCommand += '\n'; + currentCommand += lines[i]; + } else if (currentSection === 'block' && !currentCommand) { + // Only set the command if we haven't extracted it from the marker + currentCommand = lines[i]; + } + + i++; + } + + // Handle the last section if it wasn't closed properly + if (currentSection === 'input' && currentCommand) { + commands.push({ + command: currentCommand.trim(), + type: 'command', + status: 'pending', + }); + } else if (currentSection === 'output' && currentCommand) { + commands.push({ + command: currentCommand.trim(), + expectedOutput: currentOutput, // Don't trim output to preserve whitespace + type: 'command', + status: 'pending', + }); + } else if (currentSection === 'comment' && currentCommand) { + commands.push({ + command: currentCommand.trim(), + type: 'comment', + status: 'pending', + }); + } else if (currentSection === 'block' && currentCommand) { + commands.push({ + command: currentCommand.trim(), + type: 'block', + status: 'pending', + }); + } + + return commands; +}; + +// Helper function to merge file trees while preserving user interaction state +const mergeFileTreePreservingState = (oldTree: FileNode[], newTree: FileNode[]): FileNode[] => { + if (!oldTree || oldTree.length === 0) { + // No existing state to preserve, return new tree as-is + return newTree; + } + + // Create a map of old tree nodes for quick lookup + const oldNodeMap = new Map(); + const buildNodeMap = (nodes: FileNode[]) => { + nodes.forEach(node => { + oldNodeMap.set(node.path, node); + if (node.children) { + buildNodeMap(node.children); + } + }); + }; + buildNodeMap(oldTree); + + // Recursively merge new tree with old state + const mergeNodes = (newNodes: FileNode[]): FileNode[] => { + return newNodes.map(newNode => { + const oldNode = oldNodeMap.get(newNode.path); + + if (oldNode && newNode.isDirectory && oldNode.isDirectory) { + // Directory exists in both - preserve structure and merge children + return { + ...newNode, + children: newNode.children ? mergeNodes(newNode.children) : undefined + }; + } else { + // New node or different type - use new node as-is + return { + ...newNode, + children: newNode.children ? mergeNodes(newNode.children) : undefined + }; + } + }); + }; + + return mergeNodes(newTree); +}; + +// Helper function to determine if a file is loaded correctly +const checkFileLoaded = async (path: string) => { + try { + // We'll make the API call to get the file content + const response = await fetch(`${API_URL}/api/get-file?path=${encodeURIComponent(path)}`, { + credentials: 'include' + }); + + if (response.ok) { + const data = await response.json(); + + // Check if we have structured data from backend (WASM-parsed) + if (data.structuredData && data.wasmparsed) { + console.log('βœ… Using WASM-parsed structured data from backend'); + return { + success: true, + testStructure: data.structuredData, + method: 'wasm' + }; + } else if (data.structuredData) { + console.log('βœ… Using structured data from backend'); + return { + success: true, + testStructure: data.structuredData, + method: 'structured' + }; + } else { + // Fallback to manual parsing for backward compatibility + console.log('⚠️ Using manual parsing fallback'); + const fileContent = data.content; + const commands = parseRecFileContent(fileContent); + return { success: true, commands, method: 'manual' }; + } } else { + return { success: false, error: `Failed to load file: ${response.statusText}` }; + } + } catch (error) { + console.error('Error loading file:', error); + return { success: false, error: `Failed to load file: ${error}` }; + } +}; + +function createFilesStore() { + const { subscribe, set, update } = writable(defaultState); + let runModule: any; // Reference to the module itself for self-referencing + + // Helper function to get patterns using WASM + const getWasmPatterns = async (): Promise> => { + try { + const patternsArray = await getPatterns(); + const patterns: Record = {}; + + patternsArray.forEach(pattern => { + patterns[pattern.name] = pattern.pattern; + }); + + return patterns; + } catch (error) { + console.warn('Failed to load patterns via WASM:', error); + return {}; + } + }; + + const saveFileToBackend = async (file: RecordingFile) => { + // Convert UI commands to structured format for WASM backend + const convertUIToStructured = (commands: RecordingCommand[]) => { + const testSteps = commands.map(cmd => { + if (cmd.type === 'block') { + return { + Block: { + path: cmd.command, + source_file: cmd.blockSource || null + } + }; + } else if (cmd.type === 'comment') { + return { + Comment: cmd.command + }; + } else { + return { + Command: { + input: cmd.command, + expected_output: cmd.expectedOutput || '', + actual_output: cmd.actualOutput || null + } + }; + } + }); + + return { + steps: testSteps, + metadata: { + created_at: new Date().toISOString(), + version: "1.0" + } + }; + }; + + // Format the file content according to the .rec format (manual fallback) + let content = ''; + + // Check if file has commands (legacy format) or testStructure (new format) + if (!file.commands && !file.testStructure) { + throw new Error('File has neither commands nor testStructure'); + } + + // For structured format files, we don't need to generate manual content + if (file.testStructure) { + try { + const response = await fetch(`${API_URL}/api/save-file`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ + path: file.path, + structuredData: file.testStructure // Send structured data, let backend handle WASM + }) + }); + + if (!response.ok) { + throw new Error(`Failed to save file: ${response.statusText}`); + } + + const result = await response.json(); + console.log('βœ… File saved with structured format'); + return result; + } catch (error) { + console.error('Error saving structured file:', error); + throw error; + } + } + + // Handle legacy format with commands + file.commands!.forEach((cmd, index) => { + // Add newline before section if not the first command + if (index > 0) { + content += '\\n'; + } + + // Handle different command types + if (cmd.type === 'block') { + // Format as block reference - no extra newline after + content += `––– block: ${cmd.command} –––`; + } else if (cmd.type === 'comment') { + // Format as comment - no extra newline after + content += `––– comment –––\\n${cmd.command}`; + } else { + // Default - regular command (input/output format) + content += '––– input –––\\n'; + content += cmd.command; + + // Don't add extra newline if command already ends with one + if (!cmd.command.endsWith('\\n')) { + content += '\\n'; + } + + // Add output section marker - no extra newline for empty outputs + content += '––– output –––'; + + // Use the expected output if provided, otherwise use actual output if available + // Make sure to maintain all whitespace and newlines exactly as in the expected output + const outputToSave = cmd.expectedOutput || cmd.actualOutput || ''; + + // Only add a newline before the output if there's actual content + if (outputToSave && outputToSave.trim() !== '') { + content += '\\n' + outputToSave; + } + } + }); + + try { + // Prepare structured data for WASM backend + const structuredData = convertUIToStructured(file.commands!); + + const response = await fetch(`${API_URL}/api/save-file`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', // Add credentials for cookie passing + body: JSON.stringify({ + path: file.path, + content, // Manual format as fallback + structuredData // Structured format for WASM + }) + }); + + if (!response.ok) { + throw new Error(`Failed to save file: ${response.statusText}`); + } + + const result = await response.json(); + console.log('βœ… File saved via WASM backend'); + return result; + } catch (error) { + console.error('Error saving file:', error); + throw error; + } + }; + + // Fallback manual save function + const saveFileToBackendManual = async (file: RecordingFile) => { + // Format the file content according to the .rec format + let content = ''; + + file.commands.forEach((cmd, index) => { + // Add newline before section if not the first command + if (index > 0) { + content += '\n'; + } + + // Handle different command types + if (cmd.type === 'block') { + // Format as block reference - no extra newline after + content += `––– block: ${cmd.command} –––`; + } else if (cmd.type === 'comment') { + // Format as comment - no extra newline after + content += `––– comment –––\n${cmd.command}`; + } else { + // Default - regular command (input/output format) + content += '––– input –––\n'; + content += cmd.command; + + // Don't add extra newline if command already ends with one + if (!cmd.command.endsWith('\n')) { + content += '\n'; + } + + // Add output section marker - no extra newline for empty outputs + content += '––– output –––'; + + // Use the expected output if provided, otherwise use actual output if available + // Make sure to maintain all whitespace and newlines exactly as in the expected output + const outputToSave = cmd.expectedOutput || cmd.actualOutput || ''; + + // Only add a newline before the output if there's actual content + if (outputToSave && outputToSave.trim() !== '') { + content += '\n' + outputToSave; + } + } + }); + + try { + const response = await fetch(`${API_URL}/api/save-file`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', // Add credentials for cookie passing + body: JSON.stringify({ + path: file.path, + content + }) + }); + + if (!response.ok) { + throw new Error(`Failed to save file: ${response.statusText}`); + } + + return await response.json(); + } catch (error) { + console.error('Error saving file:', error); + throw error; + } + }; + + const runTest = async (filePath: string, dockerImage: string) => { + try { + const response = await fetch(`${API_URL}/api/run-test`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', // Add credentials for cookie passing + body: JSON.stringify({ + filePath, + dockerImage + }) + }); + + if (!response.ok) { + throw new Error(`Failed to run test: ${response.statusText}`); + } + + const result = await response.json(); + + console.log('Run test completed successfully'); + + // Process enhanced testStructure (preferred method) + if (result.testStructure) { + console.log('πŸ” Processing enriched testStructure with actual outputs'); + + update(state => { + if (!state.currentFile?.testStructure) return state; + + // Recursive function to process nested steps + function processStepsRecursively(steps) { + return steps.map(step => { + const processedStep = { + ...step, + actualOutput: step.actualOutput || '', // From .rep file + status: step.status || 'success', // Backend already set status + error: step.error || false // Backend already set error flag + }; + + // Process nested steps if they exist + if (step.steps && step.steps.length > 0) { + processedStep.steps = processStepsRecursively(step.steps); + } + + return processedStep; + }); + } + + // Update the testStructure directly with enriched data (backend already enhanced) + const enrichedSteps = processStepsRecursively(result.testStructure.steps); + + console.log(`πŸ“‹ Updated ${enrichedSteps.length} steps with enriched data`); + + return { + ...state, + running: false, + currentFile: { + ...state.currentFile, + testStructure: { + ...state.currentFile.testStructure, + steps: enrichedSteps + }, + status: result.success ? 'success' : 'failed' + } + }; + }); + } + else { + // No validation results - just update running status + update(state => ({ + ...state, + running: false, + currentFile: state.currentFile ? { + ...state.currentFile, + status: result.success ? 'success' : 'failed' + } : null + })); + } + + return result; + } catch (error) { + console.error('Error running test:', error); + throw error; + } + }; + + // Debounce function for autosave + let saveTimeout: number | null = null; + const debouncedSave = (file: RecordingFile, shouldRunAfterSave: boolean = false) => { + // Cancel any pending saves + if (saveTimeout) { + clearTimeout(saveTimeout); + saveTimeout = null; + } + + // Always mark as dirty when there are changes + update(state => ({ + ...state, + currentFile: state.currentFile ? { + ...state.currentFile, + dirty: true + } : null + })); + + // Check if auto-save is enabled + const storedValue = localStorage.getItem('autoSaveEnabled'); + const autoSaveEnabled = storedValue === null ? true : storedValue === 'true'; + + // If auto-save is disabled and this isn't an explicit save request, don't proceed with save + if (!autoSaveEnabled && !shouldRunAfterSave) { + return; + } + + // Use shorter debounce (500ms) for better responsiveness + const debounceTime = 500; + + saveTimeout = setTimeout(async () => { + update(state => ({ ...state, saving: true })); + + try { + await saveFileToBackend(file); + + update(state => ({ + ...state, + saving: false, + currentFile: state.currentFile ? { + ...state.currentFile, + // Maintain the command structure - don't reset change flags! + dirty: false, + lastSaved: new Date() + } : null + })); + + // Run the test only if explicitly requested + if (shouldRunAfterSave) { + await runCurrentTest(); + } + } catch (error) { + update(state => ({ ...state, saving: false })); + console.error('Failed to save file:', error); + } + }, debounceTime); + }; + + const runCurrentTest = async () => { + const state = getState(); + if (!state.currentFile || state.running) return; + + update(state => ({ ...state, running: true })); + + try { + const result = await runTest(state.currentFile.path, state.dockerImage); + + update(state => { + if (!state.currentFile) return state; + + // Handle structured format with enhanced testStructure + if (state.currentFile.testStructure && result.testStructure) { + console.log('βœ… Test completed for structured format file'); + console.log('Result:', result); + + // Use enhanced testStructure from backend (already has actualOutput, status, error) + function processStepsRecursively(steps) { + return steps.map(step => { + const processedStep = { + ...step, + actualOutput: step.actualOutput || '', + status: step.status || 'success', + error: step.error || false + }; + + // Process nested steps if they exist + if (step.steps && step.steps.length > 0) { + processedStep.steps = processStepsRecursively(step.steps); + } + + return processedStep; + }); + } + + const enhancedSteps = processStepsRecursively(result.testStructure.steps); + + return { + ...state, + running: false, + currentFile: { + ...state.currentFile, + testStructure: { + ...state.currentFile.testStructure, + steps: enhancedSteps + }, + status: result.success ? 'success' : 'failed' + } + }; + } else { + // Fallback - just update overall status + return { + ...state, + running: false, + currentFile: { + ...state.currentFile, + status: result.success ? 'success' : 'failed' + } + }; + } + }); + } catch (error) { + console.error('Failed to run test:', error); + // Make sure we clear the running flag even if there's an error + update(state => ({ ...state, running: false })); + } + }; + + // Helper function to update all child paths when moving a directory + const updateChildPaths = (children: FileNode[], oldParentPath: string, newParentPath: string): FileNode[] => { + return children.map(child => { + // Calculate the new path by replacing the old parent path with the new one + const newPath = child.path.replace(oldParentPath, newParentPath); + + if (child.isDirectory && child.children) { + // Recursively update children + return { + ...child, + path: newPath, + children: updateChildPaths(child.children, oldParentPath, newParentPath) + }; + } else { + // For files, just update the path + return { + ...child, + path: newPath + }; + } + }); + }; + + // Helper to get current state + const getState = (): FilesState => { + let currentState: FilesState = defaultState; + subscribe(state => { currentState = state; })(); + return currentState; + }; + + // Helper function to find a node in the file tree + const findNodeInTree = (tree: FileNode[], path: string): FileNode | null => { + for (const node of tree) { + if (node.path === path) { + return node; + } + if (node.isDirectory && node.children) { + const found = findNodeInTree(node.children, path); + if (found) return found; + } + } + return null; + }; + + // Helper function to remove a node from the file tree + const removeNodeFromTree = (tree: FileNode[], path: string): FileNode[] => { + return tree.filter(node => { + if (node.path === path) { + return false; // Remove this node + } + if (node.isDirectory && node.children) { + node.children = removeNodeFromTree(node.children, path); + } + return true; + }); + }; + + // Helper function to optimistically add a node to the file tree + const addNodeToDirectory = (tree: FileNode[], dirPath: string, newNode: FileNode): FileNode[] => { + return tree.map(node => { + if (node.path === dirPath && node.isDirectory) { + // Found the directory, add the new node to its children + return { + ...node, + children: [...(node.children || []), newNode] + }; + } + if (node.isDirectory && node.children) { + // Recursively look in this directory's children + return { + ...node, + children: addNodeToDirectory(node.children, dirPath, newNode) + }; + } + return node; + }); + }; + + // Create store instance + const storeModule = { + subscribe, + setConfigDirectory: (directory: string) => update(state => ({ + ...state, + configDirectory: directory + })), + setDockerImage: (image: string) => update(state => ({ + ...state, + dockerImage: image + })), + setFileTree: (tree: FileNode[]) => update(state => ({ + ...state, + fileTree: tree + })), + refreshFileTree: async () => { + try { + const response = await fetch(`${API_URL}/api/get-file-tree`, { + credentials: 'include' + }); + + if (!response.ok) { + throw new Error(`Failed to fetch file tree: ${response.statusText}`); + } + + const data = await response.json(); + // Use the file tree returned directly from the API + if (data.fileTree) { + storeModule.setFileTree(data.fileTree); + return true; + } else { + storeModule.setFileTree([]); + return false; + } + } catch (error) { + console.error('Error refreshing file tree:', error); + return false; + } + }, + moveFile: async (sourcePath: string, targetPath: string) => { + try { + // Get current state + const state = getState(); + + // Find the node to move + const sourceNode = findNodeInTree(state.fileTree, sourcePath); + if (!sourceNode) { + throw new Error('Source file not found in file tree'); + } + + // Get target directory path (the parent folder) + const targetDirPath = targetPath.substring(0, targetPath.lastIndexOf('/')); + + // Make a copy of the node to move + const newNode = { + ...sourceNode, + path: targetPath, + name: targetPath.split('/').pop() || '' + }; + + // If it's a directory, we need to update all child paths + if (sourceNode.isDirectory && sourceNode.children) { + newNode.children = updateChildPaths(sourceNode.children, sourcePath, targetPath); + } + + // Update the file tree optimistically + update(state => { + // Remove from its original location + const newTree = removeNodeFromTree([...state.fileTree], sourcePath); + + // Add to the target directory + const updatedTree = addNodeToDirectory(newTree, targetDirPath, newNode); + + return { + ...state, + fileTree: updatedTree + }; + }); + + // Now do the actual server request + const response = await fetch(`${API_URL}/api/move-file`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ + sourcePath, + targetPath + }) + }); + + if (!response.ok) { + // If server operation fails, refresh the file tree to restore correct state + await storeModule.refreshFileTree(); + throw new Error(`Failed to move file: ${response.statusText}`); + } + + // Update currentFile path if it was the moved file + update(state => { + if (state.currentFile && state.currentFile.path === sourcePath) { + return { + ...state, + currentFile: { + ...state.currentFile, + path: targetPath + } + }; + } else if (state.currentFile && state.currentFile.path.startsWith(sourcePath + '/')) { + // If current file is inside moved directory, update its path too + const relativePath = state.currentFile.path.substring(sourcePath.length); + const newPath = targetPath + relativePath; + return { + ...state, + currentFile: { + ...state.currentFile, + path: newPath + } + }; + } + return state; + }); + + return true; + } catch (error) { + console.error('Error moving file:', error); + return false; + } + }, + deleteFile: async (path: string) => { + try { + // Update UI optimistically + update(state => { + // Remove from file tree + const newTree = removeNodeFromTree([...state.fileTree], path); + + // Update currentFile if it was the deleted file + if (state.currentFile && state.currentFile.path === path) { + return { + ...state, + fileTree: newTree, + currentFile: null + }; + } + + return { + ...state, + fileTree: newTree + }; + }); + + // Now do the actual server request + const response = await fetch(`${API_URL}/api/delete-file`, { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ path }) + }); + + if (!response.ok) { + // If server operation fails, refresh the file tree to restore correct state + await storeModule.refreshFileTree(); + throw new Error(`Failed to delete file: ${response.statusText}`); + } + + return true; + } catch (error) { + console.error('Error deleting file:', error); + return false; + } + }, + loadFile: async (path: string) => { + try { + // Load the file content + const result = await checkFileLoaded(path); + + if (result.success) { + // Handle new structured format (preferred) + if (result.testStructure) { + console.log('βœ… Using new structured format for file:', path); + + // Add runtime properties to all steps (status: pending, isExpanded: false) + const processSteps = (steps: TestStep[]): TestStep[] => { + return steps.map(step => ({ + ...step, + status: 'pending', + isExpanded: step.type === 'block' ? false : undefined, + steps: step.steps ? processSteps(step.steps) : null + })); + }; + + const processedTestStructure: TestStructure = { + ...result.testStructure, + steps: processSteps(result.testStructure.steps) + }; + + // Update store with the new structured data + update(state => ({ + ...state, + currentFile: { + path, + testStructure: processedTestStructure, + dirty: false, + status: 'pending' + } + })); + + return true; + } + } else { + console.error('Failed to load file:', result.error); + return false; + } + } catch (error) { + console.error('Error in loadFile:', error); + return false; + } + }, + saveOnly: async () => { + const state = getState(); + if (!state.currentFile) return; + + update(state => ({ ...state, saving: true })); + + try { + await saveFileToBackend(state.currentFile); + + update(state => ({ + ...state, + saving: false, + currentFile: state.currentFile ? { + ...state.currentFile, // Keep existing commands with their flags + dirty: false, + lastSaved: new Date() + } : null + })); + } catch (error) { + update(state => ({ ...state, saving: false })); + console.error('Failed to save file:', error); + } + }, + saveAndRun: async () => { + const state = getState(); + if (!state.currentFile) return; + + update(state => ({ ...state, saving: true })); + + try { + await saveFileToBackend(state.currentFile); + + update(state => ({ + ...state, + saving: false, + currentFile: state.currentFile ? { + ...state.currentFile, // Keep existing commands with their flags + dirty: false, + lastSaved: new Date() + } : null + })); + + // Run test after saving + await runCurrentTest(); + } catch (error) { + update(state => ({ ...state, saving: false })); + console.error('Failed to save file:', error); + } + }, + forceSave: async () => { + // Keep for backward compatibility + const state = getState(); + if (!state.currentFile) return; + + // Redirect to saveAndRun + await storeModule.saveAndRun(); + }, + createNewFile: (path: string) => { + const newFile = { + path, + testStructure: { + description: null, + steps: [] + }, + dirty: true, + status: 'pending' + }; + + // Extract the file name and directory path + const fileName = path.split('/').pop() || ''; + const dirPath = path.substring(0, path.lastIndexOf('/')); + + // Create the file node for the file tree + const newNode: FileNode = { + name: fileName, + path, + isDirectory: false + }; + + // Update the file tree optimistically + update(state => { + // Add the new file to the directory + const updatedTree = addNodeToDirectory([...state.fileTree], dirPath, newNode); + + return { + ...state, + fileTree: updatedTree, + currentFile: newFile + }; + }); + + // Now do the actual save operation + setTimeout(async () => { + try { + // Save the file + await debouncedSave(newFile, false); + } catch (err) { + console.error('Error saving new file:', err); + // If there's an error, refresh the file tree to restore correct state + setTimeout(async () => { + await storeModule.refreshFileTree(); + }, 100); + } + }, 0); + }, + runTest: runCurrentTest, + + // Add validation function + validateTest: async (filePath: string) => { + try { + const response = await fetch(`${API_URL}/api/validate-test`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ filePath }) + }); + + if (!response.ok) { + throw new Error(`Validation failed: ${response.statusText}`); + } + + const validationResult = await response.json(); + console.log('βœ… Test validation completed via WASM backend'); + return validationResult; + } catch (error) { + console.error('Error validating test:', error); + throw error; + } + }, + + // Update test structure (for new structured format) + updateTestStructure: (newStructure: TestStructure) => { + update(state => { + if (!state.currentFile) return state; + + return { + ...state, + currentFile: { + ...state.currentFile, + testStructure: newStructure, + dirty: true + } + }; + }); + }, + + // Clear current file selection + clearCurrentFile: () => { + update(state => ({ + ...state, + currentFile: null + })); + } + }; + + // Set the self-reference to allow calling other methods + runModule = storeModule; + + return storeModule; +} + +export const filesStore = createFilesStore(); +export type { FileNode, RecordingCommand, RecordingFile }; + +// Helper functions for file tree manipulation +export function findNodeInTree(tree: FileNode[], path: string): FileNode | null { + for (const node of tree) { + if (node.path === path) { + return node; + } + if (node.isDirectory && node.children) { + const found = findNodeInTree(node.children, path); + if (found) return found; + } + } + return null; +} + +export function addNodeToDirectory(tree: FileNode[], dirPath: string, newNode: FileNode): FileNode[] { + return tree.map(node => { + if (node.path === dirPath && node.isDirectory) { + // Found the directory, add the new node to its children + return { + ...node, + children: [...(node.children || []), newNode] + }; + } + if (node.isDirectory && node.children) { + // Recursively look in this directory's children + return { + ...node, + children: addNodeToDirectory(node.children, dirPath, newNode) + }; + } + return node; + }); +} + +export function removeNodeFromTree(tree: FileNode[], path: string): FileNode[] { + return tree.filter(node => { + if (node.path === path) { + return false; // Remove this node + } + if (node.isDirectory && node.children) { + node.children = removeNodeFromTree(node.children, path); + } + return true; + }); +} + +export function updateChildPaths(children: FileNode[], oldParentPath: string, newParentPath: string): FileNode[] { + return children.map(child => { + // Calculate the new path by replacing the old parent path with the new one + const newPath = child.path.replace(oldParentPath, newParentPath); + + if (child.isDirectory && child.children) { + // Recursively update children + return { + ...child, + path: newPath, + children: updateChildPaths(child.children, oldParentPath, newParentPath) + }; + } else { + // For files, just update the path + return { + ...child, + path: newPath + }; + } + }); +} + +// Export types for use in components +export type { TestStep, TestStructure, RecordingCommand, FileNode }; diff --git a/ui/src/stores/gitStatusStore.ts b/ui/src/stores/gitStatusStore.ts new file mode 100644 index 0000000..f9e6fe2 --- /dev/null +++ b/ui/src/stores/gitStatusStore.ts @@ -0,0 +1,312 @@ +import { writable } from 'svelte/store'; +import { API_URL } from '../config.js'; + +export interface GitFileStatus { + path: string; + status: 'M' | 'A' | 'D' | 'R' | 'C' | 'U' | '??' | string; // M=Modified, A=Added, D=Deleted, R=Renamed, C=Copied, U=Unmerged, ??=Untracked +} + +export interface GitStatusState { + currentBranch: string; + isPrBranch: boolean; + hasChanges: boolean; + modifiedFiles: GitFileStatus[]; + modifiedDirs: string[]; + testPath: string; + isLoading: boolean; + error: string | null; + lastUpdated: number | null; + isPaused: boolean; // Flag to pause updates when modal is open +} + +const initialState: GitStatusState = { + currentBranch: 'master', + isPrBranch: false, + hasChanges: false, + modifiedFiles: [], + modifiedDirs: [], + testPath: 'test/clt-tests', + isLoading: false, + error: null, + lastUpdated: null, + isPaused: false +}; + +function createGitStatusStore() { + const { subscribe, set, update } = writable(initialState); + + let pollInterval: number | null = null; + let isPollingActive = false; + + return { + subscribe, + + // Fetch git status once + fetchGitStatus: async () => { + update(state => ({ ...state, isLoading: true, error: null })); + + try { + console.log('Fetching git status from:', `${API_URL}/api/git-status`); + + const response = await fetch(`${API_URL}/api/git-status`, { + credentials: 'include' + }); + + console.log('Git status response:', response.status, response.statusText); + + if (!response.ok) { + if (response.status === 401) { + throw new Error('GitHub authentication required'); + } + if (response.status === 404) { + throw new Error('Repository not found'); + } + + // Try to get error details from response + let errorMessage = `HTTP ${response.status}: ${response.statusText}`; + try { + const errorData = await response.json(); + if (errorData.error) { + errorMessage = errorData.error; + } + } catch (e) { + // Ignore JSON parsing error, use default message + } + + throw new Error(errorMessage); + } + + const data = await response.json(); + console.log('Git status data:', data); + + // Check if response contains error + if (data.error) { + throw new Error(data.error); + } + + let hasChanges = false; + let modifiedFiles: GitFileStatus[] = []; + let modifiedDirs: string[] = []; + let currentBranch = 'main'; + let isPrBranch = false; + + // Handle different response formats + if (data.success !== undefined) { + // Format from existing endpoint that returns { success: true, hasChanges, modifiedFiles, ... } + hasChanges = data.hasChanges || false; + modifiedFiles = data.modifiedFiles || []; + modifiedDirs = data.modifiedDirs || []; + currentBranch = data.currentBranch || 'main'; + isPrBranch = data.isPrBranch || false; + } else if (data.files) { + // Format from /api/git-status endpoint that returns { hasUnstagedChanges, files: { modified, not_added, ... } } + hasChanges = data.hasUnstagedChanges || !data.isClean; + + // Convert backend file arrays to GitFileStatus format + if (data.files.modified) { + modifiedFiles.push(...data.files.modified.map(path => ({ path, status: 'M' as const }))); + } + if (data.files.not_added) { + modifiedFiles.push(...data.files.not_added.map(path => ({ path, status: '??' as const }))); + } + if (data.files.deleted) { + modifiedFiles.push(...data.files.deleted.map(path => ({ path, status: 'D' as const }))); + } + if (data.files.conflicted) { + modifiedFiles.push(...data.files.conflicted.map(path => ({ path, status: 'U' as const }))); + } + if (data.files.staged) { + modifiedFiles.push(...data.files.staged.map(path => ({ path, status: 'A' as const }))); + } + + // Extract unique directories from modified files + modifiedDirs = [...new Set( + modifiedFiles + .map(file => file.path.split('/').slice(0, -1).join('/')) + .filter(dir => dir.length > 0) + )]; + + currentBranch = data.currentBranch || 'main'; + isPrBranch = currentBranch?.startsWith('clt-ui-') || false; + } + + update(state => ({ + ...state, + isLoading: false, + currentBranch, + isPrBranch, + hasChanges, + modifiedFiles, + modifiedDirs, + testPath: 'test/clt-tests', + lastUpdated: Date.now(), + error: null + })); + + } catch (error) { + console.error('Error fetching git status:', error); + update(state => ({ + ...state, + isLoading: false, + error: error.message + })); + } + }, + + // Start periodic polling (only allow one active polling instance) + startPolling: (intervalMs: number = 5000) => { + // If already polling, don't start another instance + if (isPollingActive) { + console.log('Git status polling already active, ignoring duplicate start request'); + return; + } + + // Clear any existing interval + if (pollInterval) { + clearInterval(pollInterval); + pollInterval = null; + } + + isPollingActive = true; + console.log(`Starting git status polling with ${intervalMs}ms interval`); + + // Fetch immediately + gitStatusStore.fetchGitStatus(); + + // Then poll periodically + pollInterval = setInterval(() => { + // Check if polling is paused before fetching + let currentState: GitStatusState; + const unsubscribe = subscribe(state => { + currentState = state; + }); + unsubscribe(); + + if (!currentState!.isPaused) { + gitStatusStore.fetchGitStatus(); + } + }, intervalMs); + }, + + // Stop polling + stopPolling: () => { + if (pollInterval) { + clearInterval(pollInterval); + pollInterval = null; + } + isPollingActive = false; + console.log('Git status polling stopped'); + }, + + // Force restart polling (useful when multiple components try to start) + restartPolling: (intervalMs: number = 5000) => { + gitStatusStore.stopPolling(); + isPollingActive = false; // Ensure flag is reset + gitStatusStore.startPolling(intervalMs); + }, + + // Check if polling is currently active + isPolling: () => isPollingActive, + + // Pause/resume polling (for when modal is open) + pausePolling: () => { + update(state => ({ ...state, isPaused: true })); + }, + + resumePolling: () => { + update(state => ({ ...state, isPaused: false })); + }, + + // Get status for a specific file path + getFileStatus: (filePath: string): string | null => { + let currentState: GitStatusState; + const unsubscribe = subscribe(state => { + currentState = state; + }); + unsubscribe(); + + const fileStatus = currentState!.modifiedFiles.find(file => file.path === filePath); + return fileStatus ? fileStatus.status : null; + }, + + // Check if a directory has changes + isDirModified: (dirPath: string): boolean => { + let currentState: GitStatusState; + const unsubscribe = subscribe(state => { + currentState = state; + }); + unsubscribe(); + + return currentState!.modifiedDirs.includes(dirPath); + }, + + // Check for unstaged changes and prompt user if they exist + checkUnstagedChanges: async (): Promise => { + // First ensure we have fresh git status + await gitStatusStore.fetchGitStatus(); + + let currentState: GitStatusState; + const unsubscribe = subscribe(state => { + currentState = state; + }); + unsubscribe(); + + // If no changes, proceed + if (!currentState!.hasChanges) { + return true; + } + + // If there are changes, prompt user + const fileCount = currentState!.modifiedFiles.length; + const fileList = currentState!.modifiedFiles + .slice(0, 5) // Show first 5 files + .map(file => ` ${file.status} ${file.path}`) + .join('\n'); + + const moreFiles = fileCount > 5 ? `\n ... and ${fileCount - 5} more files` : ''; + + const message = `You have ${fileCount} unstaged change${fileCount > 1 ? 's' : ''} in your working directory:\n\n${fileList}${moreFiles}\n\nProceeding will potentially affect these changes. Do you want to continue?`; + + return confirm(message); + }, + + // Checkout a single file to discard changes + checkoutFile: async (filePath: string): Promise => { + try { + const response = await fetch(`${API_URL}/api/checkout-file`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'include', + body: JSON.stringify({ filePath }) + }); + + const data = await response.json(); + + if (!response.ok) { + console.error('Checkout file failed:', data.error); + alert(`Failed to checkout file: ${data.error}`); + return false; + } + + console.log('File checked out successfully:', data.message); + + // Refresh git status after checkout + await gitStatusStore.fetchGitStatus(); + + return true; + } catch (error) { + console.error('Error checking out file:', error); + alert(`Error checking out file: ${error.message}`); + return false; + } + } + }; +} + +export const gitStatusStore = createGitStatusStore(); + +// Export functions for direct import +export const checkUnstagedChanges = gitStatusStore.checkUnstagedChanges; +export const checkoutFile = gitStatusStore.checkoutFile; \ No newline at end of file diff --git a/ui/src/stores/githubStore.ts b/ui/src/stores/githubStore.ts new file mode 100644 index 0000000..4e6f142 --- /dev/null +++ b/ui/src/stores/githubStore.ts @@ -0,0 +1,193 @@ +import { writable, get } from 'svelte/store'; +import { API_URL } from '../config.js'; +import { gitStatusStore } from './gitStatusStore.ts'; + +interface PrStatus { + currentBranch: string; + isPrBranch: boolean; + existingPr: { + url: string; + title: string; + number: number; + } | null; + recentCommits: Array<{ + hash: string; + message: string; + author: string; + date: string; + authorEmail: string; + }>; + hasChanges: boolean; + timestamp: number; +} + +interface PrState { + isCreating: boolean; + isCommitting: boolean; + showModal: boolean; + error: string | null; + success: boolean; + prUrl: string | null; + repoUrl: string | null; + message: string | null; + prStatus: PrStatus | null; + isLoadingStatus: boolean; +} + +const initialState: PrState = { + isCreating: false, + isCommitting: false, + showModal: false, + error: null, + success: false, + prUrl: null, + repoUrl: null, + message: null, + prStatus: null, + isLoadingStatus: false +}; + +function createGithubStore() { + const { subscribe, set, update } = writable(initialState); + + const store = { + subscribe, + showModal: () => { + update(state => ({ ...state, showModal: true, error: null, success: false, prUrl: null, repoUrl: null, message: null })); + // Fetch PR status when modal opens + store.fetchPrStatus(); + }, + hideModal: () => update(state => ({ ...state, showModal: false })), + + // Fetch PR status for current branch + fetchPrStatus: async () => { + update(state => ({ ...state, isLoadingStatus: true, error: null })); + + try { + const response = await fetch(`${API_URL}/api/pr-status`, { + credentials: 'include' + }); + + if (!response.ok) { + throw new Error(`Failed to fetch PR status: ${response.statusText}`); + } + + const prStatus = await response.json(); + + update(state => ({ + ...state, + isLoadingStatus: false, + prStatus, + error: null + })); + + return prStatus; + } catch (error) { + update(state => ({ + ...state, + isLoadingStatus: false, + error: error.message || 'Failed to fetch PR status' + })); + throw error; + } + }, + + // Commit changes to existing PR branch + commitChanges: async (message: string) => { + update(state => ({ ...state, isCommitting: true, error: null, success: false })); + + try { + const response = await fetch(`${API_URL}/api/commit-changes`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ message }) + }); + + const data = await response.json(); + + if (!response.ok) { + throw new Error(data.error || 'Failed to commit changes'); + } + + update(state => ({ + ...state, + isCommitting: false, + success: true, + prUrl: data.pr || null, + message: data.message || 'Changes committed successfully' + })); + + // Don't auto-refresh here - let the modal handle it + + return data; + } catch (error) { + update(state => ({ + ...state, + isCommitting: false, + error: error.message || 'An error occurred while committing changes' + })); + throw error; + } + }, + createPullRequest: async (title: string, description: string) => { + update(state => ({ ...state, isCreating: true, error: null, success: false, prUrl: null, repoUrl: null, message: null })); + + try { + // Pre-flight check: ensure we have fresh git status + await gitStatusStore.fetchGitStatus(); + const gitStatus = get(gitStatusStore); + + if (!gitStatus.hasChanges) { + throw new Error('No changes detected. Please make some changes before creating a pull request.'); + } + + if (gitStatus.error) { + throw new Error(`Git status error: ${gitStatus.error}`); + } + + const response = await fetch(`${API_URL}/api/create-pr`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'include', + body: JSON.stringify({ title, description }) + }); + + const data = await response.json(); + + if (!response.ok) { + throw new Error(data.error || 'Failed to create pull request'); + } + + update(state => ({ + ...state, + isCreating: false, + success: true, + prUrl: data.pr || null, + repoUrl: data.repository || data.repoUrl || null, + message: data.message || 'Pull request created successfully' + })); + + // Don't auto-refresh here - let the modal handle it + + return data; + } catch (error) { + update(state => ({ + ...state, + isCreating: false, + error: error.message || 'An error occurred while creating the pull request' + })); + throw error; + } + }, + reset: () => set(initialState) + }; + + return store; +} + +export const githubStore = createGithubStore(); \ No newline at end of file diff --git a/ui/src/vite-env.d.ts b/ui/src/vite-env.d.ts new file mode 100644 index 0000000..4078e74 --- /dev/null +++ b/ui/src/vite-env.d.ts @@ -0,0 +1,2 @@ +/// +/// diff --git a/ui/svelte.config.js b/ui/svelte.config.js new file mode 100644 index 0000000..b0683fd --- /dev/null +++ b/ui/svelte.config.js @@ -0,0 +1,7 @@ +import { vitePreprocess } from '@sveltejs/vite-plugin-svelte' + +export default { + // Consult https://svelte.dev/docs#compile-time-svelte-preprocess + // for more information about preprocessors + preprocess: vitePreprocess(), +} diff --git a/ui/tailwind.config.js b/ui/tailwind.config.js new file mode 100644 index 0000000..998f0be --- /dev/null +++ b/ui/tailwind.config.js @@ -0,0 +1,8 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: ['./src/**/*.{html,js,svelte,ts}'], + theme: { + extend: {}, + }, + plugins: [], +}; \ No newline at end of file diff --git a/ui/testAndGitRoutes.js b/ui/testAndGitRoutes.js new file mode 100644 index 0000000..fa46307 --- /dev/null +++ b/ui/testAndGitRoutes.js @@ -0,0 +1,14 @@ +import { setupTestRoutes } from './testRoutes.js'; +import { setupGitRoutes } from './gitRoutes.js'; +import { setupInteractiveRoutes } from './interactiveRoutes.js'; + +// Export the main setup function that combines all route modules +export function setupGitAndTestRoutes(app, isAuthenticated, dependencies) { + // Setup all route modules + setupTestRoutes(app, isAuthenticated, dependencies); + setupGitRoutes(app, isAuthenticated, dependencies); + setupInteractiveRoutes(app, isAuthenticated, dependencies); +} + +// Re-export the processTestResults function for backward compatibility +export { processTestResults } from './testProcessor.js'; \ No newline at end of file diff --git a/ui/testProcessor.js b/ui/testProcessor.js new file mode 100644 index 0000000..13e94ff --- /dev/null +++ b/ui/testProcessor.js @@ -0,0 +1,223 @@ +import fs from 'fs/promises'; +import { + convertTestStructureToLegacyCommands, + extractDuration +} from './routes.js'; + +// Function to process test results with WASM structured format +export async function processTestResults(absolutePath, testStructure, stdout, stderr, exitCode, error) { + // Convert WASM TestStructure to the format expected by the existing logic + // This maintains UI compatibility while using the new structured format + const expandedCommands = convertTestStructureToLegacyCommands(testStructure); + + const repFilePath = absolutePath.replace(/\.rec$/, '.rep'); + let success = false; // Default to failure, will update based on command results + + // Create a mapping of block commands by their parent block for status propagation + const blockCommandMap = new Map(); + expandedCommands.forEach(cmd => { + if (cmd.isBlockCommand && cmd.parentBlock) { + // The key is the parent block's index/ID + const key = `${cmd.parentBlock.command}|${cmd.blockSource || ''}`; + + if (!blockCommandMap.has(key)) { + blockCommandMap.set(key, []); + } + blockCommandMap.get(key).push(cmd); + } + }); + + try { + // Try to read the .rep file for actual outputs and durations + let repContent = ''; + const repSections = []; + + try { + repContent = await fs.readFile(repFilePath, 'utf8'); + console.log(`Successfully read .rep file: ${repFilePath}`); + + // Check if rep file is empty + if (!repContent || repContent.trim() === '') { + console.warn(`The .rep file is empty: ${repFilePath}`); + throw new Error('Empty rep file'); + } + + // Parse sections + const sections = repContent.split('––– input –––').slice(1); + if (sections.length === 0) { + console.warn(`No input sections found in .rep file: ${repFilePath}`); + throw new Error('No sections found in rep file'); + } + + // Parse the .rep file sections + for (const section of sections) { + const parts = section.split('––– output –––'); + if (parts.length >= 2) { + repSections.push({ + command: parts[0].trim(), + output: parts[1].trim(), + full: section + }); + } + } + + console.log(`Parsed ${repSections.length} sections from rep file`); + if (repSections.length === 0) { + console.warn(`Failed to parse sections from .rep file: ${repFilePath}`); + throw new Error('Failed to parse sections from rep file'); + } + } catch (repError) { + // If the rep file doesn't exist or is invalid, continue without it + console.warn(`Could not process .rep file: ${repError.message}`); + + // Try to parse outputs from stdout instead + if (stdout && stdout.trim()) { + console.log('Attempting to parse command outputs from stdout'); + const sections = stdout.split('––– input –––').slice(1); + for (const section of sections) { + const parts = section.split('––– output –––'); + if (parts.length >= 2) { + repSections.push({ + command: parts[0].trim(), + output: parts[1].trim(), + full: section + }); + } + } + console.log(`Parsed ${repSections.length} sections from stdout`); + } + } + + // Process commands with outputs from rep file or stdout + let allCommandsPassed = true; + + // Debug logging for initial command statuses + console.log('Processing commands - total:', expandedCommands.length); + console.log('Block commands:', expandedCommands.filter(cmd => cmd.isBlockCommand).length); + console.log('Block references:', expandedCommands.filter(cmd => cmd.type === 'block' && !cmd.isBlockCommand).length); + + for (const cmd of expandedCommands) { + // Skip comments and mark blocks + if (cmd.type === 'comment') { + continue; + } else if (cmd.type === 'block' && !cmd.isBlockCommand) { + // Mark block with appropriate initial status based on exit code + cmd.status = exitCode === 0 ? 'matched' : 'pending'; + continue; + } + + // For regular commands, find the corresponding output + const commandText = cmd.command.trim(); + const matchingSection = repSections.find(s => s.command.trim() === commandText); + + if (matchingSection) { + // Extract duration + cmd.duration = extractDuration(matchingSection.full); + + // Get the output content + const output = matchingSection.output; + const nextDelimiterMatch = output.match(/–––\s.*?\s–––/); + const actualOutput = nextDelimiterMatch + ? output.substring(0, nextDelimiterMatch.index).trim() + : output; + + // Set actual output + cmd.actualOutput = actualOutput; + + // Set expected output if not already set + if (!cmd.expectedOutput) { + cmd.expectedOutput = actualOutput; + } + + // If the exitCode is 0, treat everything as matched, otherwise do normal comparison + if (exitCode === 0) { + cmd.status = 'matched'; + } else { + // Determine status based on comparison + if (cmd.expectedOutput === actualOutput) { + cmd.status = 'matched'; + } else { + cmd.status = 'failed'; + allCommandsPassed = false; + } + } + } else { + // Could not find matching output + console.warn(`No matching output found for command: ${commandText.substring(0, 50)}...`); + + // If the exitCode is 0, treat everything as matched, even if no output was found + if (exitCode === 0) { + cmd.status = 'matched'; + cmd.actualOutput = 'No matching output found, but test passed.'; + } else { + cmd.status = 'failed'; + allCommandsPassed = false; + + // Set actual output to an error message for UI display + cmd.actualOutput = 'Error: No matching output found for this command'; + } + } + } + + // Now propagate status to block declarations based on their contained commands only + for (const cmd of expandedCommands) { + if (cmd.isBlockCommand && cmd.parentBlock) { + // The key is the parent block's index/ID + const key = `${cmd.parentBlock.command}|${cmd.blockSource || ''}`; + const blockCommands = blockCommandMap.get(key) || []; + + if (blockCommands.length > 0) { + // If any block command failed, mark the block as failed + const anyFailed = blockCommands.some(bc => bc.status === 'failed'); + // If any command matched, consider the block matched + const anyMatched = blockCommands.some(bc => bc.status === 'matched'); + + // Set status based only on the block's commands, independent of test exit code + // If no failures and at least one passed, then the block passed + cmd.status = anyFailed ? 'failed' : (anyMatched ? 'matched' : 'pending'); + + // Debug log for block status + console.log(`Block ${cmd.command} status: ${cmd.status} (anyFailed=${anyFailed}, anyMatched=${anyMatched}, command count=${blockCommands.length})`); + console.log('Block commands:', blockCommands.map(bc => ({ cmd: bc.command.substring(0, 30), status: bc.status }))); + + // Update overall success status only for real failures + if (anyFailed) allCommandsPassed = false; + } + } + } + + // Determine overall success - a test is successful if exitCode is 0, + // regardless of individual command comparisons, which might only be different due to pattern variables + success = exitCode === 0; + + } catch (processError) { + console.error('Error processing test results:', processError); + // Mark all commands as failed if there was an error + for (const cmd of expandedCommands) { + if (cmd.type !== 'comment' && !cmd.status) { + cmd.status = 'failed'; + } + } + } + + // Make sure non-block commands have a status set (blocks already handled above) + for (const cmd of expandedCommands) { + if (cmd.type !== 'comment' && cmd.type !== 'block' && !cmd.status) { + // For non-block commands without a status, set default status + cmd.status = 'pending'; + } + } + + const testReallyFailed = exitCode !== 0; + return { + commands: expandedCommands, + success, + exitCode, + exitCodeSuccess: exitCode === 0, + error: exitCode !== 0 ? error?.message : null, + stderr, + stdout, + message: success ? 'Test executed successfully' : 'Test executed with differences', + testReallyFailed: exitCode !== 0 + }; +} \ No newline at end of file diff --git a/ui/testRoutes.js b/ui/testRoutes.js new file mode 100644 index 0000000..55e11ad --- /dev/null +++ b/ui/testRoutes.js @@ -0,0 +1,252 @@ +import path from 'path'; +import fs from 'fs/promises'; +import { + parseRecFileFromMapWasm, + validateTestFromMapWasm +} from './wasmNodeWrapper.js'; +import { + getUserRepoPath, + getUserTestPath, + getMergedPatterns, + createFileContentMap +} from './routes.js'; +import { processTestResults } from './testProcessor.js'; + +// Setup Test routes +export function setupTestRoutes(app, isAuthenticated, dependencies) { + const { + WORKDIR, + ROOT_DIR, + __dirname, + getAuthConfig + } = dependencies; + + // API endpoint to run a test + app.post('/api/run-test', isAuthenticated, async (req, res) => { + try { + const { filePath, dockerImage } = req.body; + + if (!filePath) { + return res.status(400).json({ error: 'File path is required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absolutePath = path.join(testDir, filePath); + + // Basic security check to ensure the path is within the test directory + if (!absolutePath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // Execute the clt test command to run the test (from the user's project directory) + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const relativeFilePath = path.relative(userRepoPath, absolutePath); + const testCommand = `clt test -d -t ${relativeFilePath} ${dockerImage ? dockerImage : ''}`; + console.log(`Executing test command: ${testCommand} in dir: ${userRepoPath}`); + + const { exec } = await import('child_process'); + + // Execute in the user's repository directory + const execOptions = { + cwd: userRepoPath, + env: { + ...process.env, + CLT_NO_COLOR: '1', + } + }; + + + exec(testCommand, execOptions, async (error, stdout, stderr) => { + // Log all output regardless of success/failure + console.log(`Test stdout: ${stdout}`); + if (stderr) { + console.log(`Test stderr: ${stderr}`); + } + + // Warnings like Docker platform mismatch shouldn't be treated as errors + // If the exit code is 0, the test actually passed + const exitCode = error ? error.code : 0; + const testReallyFailed = exitCode !== 0; + console.log(`Test exit code: ${exitCode}, Test failed: ${testReallyFailed}`); + console.log(testReallyFailed ? `Test completed with differences: ${error?.message}` : 'Test passed with no differences'); + + try { + // Parse the .rec file using WASM with content map + console.log(`πŸ“– Parsing .rec file with WASM: ${absolutePath}`); + const fileMap = await createFileContentMap(absolutePath, testDir, req); + const relativeFilePath = path.relative(testDir, absolutePath); + const testStructure = await parseRecFileFromMapWasm(relativeFilePath, fileMap); + + // Check if .rep file exists for validation + const repFilePath = absolutePath.replace('.rec', '.rep'); + const repRelativePath = relativeFilePath.replace('.rec', '.rep'); + let validationResults = null; + + try { + await fs.access(repFilePath); + // .rep file exists, add it to file map and validate + const repContent = await fs.readFile(repFilePath, 'utf8'); + fileMap[repRelativePath] = repContent; + + console.log(`πŸ” Running validation with WASM: ${repFilePath}`); + + // Get patterns for validation (proper merging like CLT) + try { + const userRepoPath = getUserRepoPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const patterns = await getMergedPatterns(userRepoPath, __dirname); + + console.log(`πŸ”₯ VALIDATION PATTERNS DUMP - COUNT: ${Object.keys(patterns).length}`); + console.log(`πŸ”₯ PATTERNS OBJECT:`, JSON.stringify(patterns, null, 2)); + console.log(`πŸ”₯ VERSION PATTERN:`, patterns.VERSION); + console.log(`πŸ”₯ PATTERN KEYS:`, Object.keys(patterns).join(', ')); + + // Run validation with patterns + console.log(`πŸ”₯ CALLING validateTestFromMapWasm WITH PATTERNS`); + validationResults = await validateTestFromMapWasm(relativeFilePath, fileMap, patterns); + console.log(`πŸ”₯ VALIDATION RESULT:`, JSON.stringify(validationResults, null, 2)); + + // ENRICH testStructure with actual outputs and error flags + try { + console.log(`πŸ“‹ Enriching testStructure with actual outputs from .rep file`); + + // Parse .rep file to get actual outputs + const repStructure = await parseRecFileFromMapWasm(repRelativePath, fileMap); + console.log(`πŸ“‹ Rep structure:`, JSON.stringify(repStructure, null, 2)); + + // Extract only OUTPUT type blocks from .rep file (flat/expanded) + const repOutputs = repStructure.steps.filter(step => step.type === 'output'); + console.log(`πŸ“‹ Found ${repOutputs.length} output blocks in .rep file`); + + // Track output index for sequential mapping + let outputIndex = 0; + let globalStepIndex = 0; // Track global step index for error mapping + + // Recursive function to traverse nested structure and assign outputs + function enrichStepsRecursively(steps) { + steps.forEach((step, index) => { + const currentStepIndex = globalStepIndex; + globalStepIndex++; // Increment for every step (input, output, block, comment) + + // Check for validation errors on ANY step type (not just input) + const hasError = validationResults.errors && + validationResults.errors.some(error => error.step === currentStepIndex); + step.error = hasError; + + // If this is an input step, set status based on error + if (step.type === 'input') { + step.status = hasError ? 'failed' : 'success'; + console.log(`πŸ“‹ Input step ${currentStepIndex}: ${hasError ? 'FAILED' : 'SUCCESS'}`); + } + + // If this is an output step, assign next .rep output and use already-set error status + if (step.type === 'output') { + if (repOutputs[outputIndex]) { + step.actualOutput = repOutputs[outputIndex].content || ''; + console.log(`πŸ“‹ Assigned rep output ${outputIndex + 1} to step ${currentStepIndex}: ${step.actualOutput ? 'SET' : 'EMPTY'}`); + } + outputIndex++; + + // Set status based on already-set error flag + step.status = step.error ? 'failed' : 'success'; + console.log(`πŸ“‹ Output step ${currentStepIndex}: ${step.error ? 'FAILED' : 'SUCCESS'}`); + } + + // If this is a block step, check for nested errors and process nested steps + if (step.type === 'block') { + if (step.steps && step.steps.length > 0) { + enrichStepsRecursively(step.steps); + // Block is failed if any nested step failed + const hasNestedError = step.steps.some(nestedStep => nestedStep.error); + step.error = hasNestedError; + step.status = hasNestedError ? 'failed' : 'success'; + } + } + + // For comment steps, just set success status + if (step.type === 'comment') { + step.error = false; + step.status = 'success'; + } + }); + } + + // Start recursive enrichment + enrichStepsRecursively(testStructure.steps); + + console.log(`βœ… TestStructure enriched with ${outputIndex} outputs processed`); + } catch (enrichError) { + console.error('Error enriching testStructure:', enrichError.message); + } + + } catch (patternError) { + console.warn('Could not load patterns for validation:', patternError.message); + // Fall back to validation without patterns + validationResults = await validateTestFromMapWasm(relativeFilePath, fileMap); + } + + console.log(`βœ… Validation completed: ${validationResults.success ? 'PASSED' : 'FAILED'}`); + } catch (repError) { + console.log(`ℹ️ No .rep file found for validation: ${repFilePath}`); + } + + // Process test results for UI compatibility + const results = await processTestResults(absolutePath, testStructure, stdout, stderr, exitCode, error); + + // Return the results with validation + res.json({ + filePath, + dockerImage: dockerImage || 'default-image', + testStructure, // NEW: Structured format from WASM + validationResults, // NEW: Validation results if .rep exists + ...results + }); + } catch (readError) { + console.error('Error reading test files:', readError); + res.status(500).json({ + error: `Failed to read test files: ${readError.message}`, + stderr, + stdout + }); + } + }); + } catch (error) { + console.error('Error running test:', error); + res.status(500).json({ error: `Failed to run test: ${error.message}` }); + } + }); + + // API endpoint to validate a test file + app.post('/api/validate-test', isAuthenticated, async (req, res) => { + try { + const { filePath } = req.body; + + if (!filePath) { + return res.status(400).json({ error: 'File path is required' }); + } + + // Use the user's test directory as the base + const testDir = getUserTestPath(req, WORKDIR, ROOT_DIR, getAuthConfig); + const absolutePath = path.join(testDir, filePath); + + // Basic security check to ensure the path is within the test directory + if (!absolutePath.startsWith(testDir)) { + return res.status(403).json({ error: 'Access denied' }); + } + + // Validate using WASM + try { + console.log(`πŸ” Validating test file via WASM: ${absolutePath}`); + const validationResult = await validateTestWasm(absolutePath); + console.log('βœ… WASM validation completed'); + res.json(validationResult); + } catch (wasmError) { + console.warn('WASM validation failed, returning default valid result:', wasmError.message); + res.json({ valid: true, errors: [], method: 'fallback' }); + } + } catch (error) { + console.error('Error validating test:', error); + res.status(500).json({ error: 'Failed to validate test' }); + } + }); +} \ No newline at end of file diff --git a/ui/tsconfig.app.json b/ui/tsconfig.app.json new file mode 100644 index 0000000..55a2f9b --- /dev/null +++ b/ui/tsconfig.app.json @@ -0,0 +1,20 @@ +{ + "extends": "@tsconfig/svelte/tsconfig.json", + "compilerOptions": { + "target": "ESNext", + "useDefineForClassFields": true, + "module": "ESNext", + "resolveJsonModule": true, + /** + * Typecheck JS in `.svelte` and `.js` files by default. + * Disable checkJs if you'd like to use dynamic types in JS. + * Note that setting allowJs false does not prevent the use + * of JS in `.svelte` files. + */ + "allowJs": true, + "checkJs": true, + "isolatedModules": true, + "moduleDetection": "force" + }, + "include": ["src/**/*.ts", "src/**/*.js", "src/**/*.svelte"] +} diff --git a/ui/tsconfig.json b/ui/tsconfig.json new file mode 100644 index 0000000..1ffef60 --- /dev/null +++ b/ui/tsconfig.json @@ -0,0 +1,7 @@ +{ + "files": [], + "references": [ + { "path": "./tsconfig.app.json" }, + { "path": "./tsconfig.node.json" } + ] +} diff --git a/ui/tsconfig.node.json b/ui/tsconfig.node.json new file mode 100644 index 0000000..db0becc --- /dev/null +++ b/ui/tsconfig.node.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", + "target": "ES2022", + "lib": ["ES2023"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/ui/vite.config.ts b/ui/vite.config.ts new file mode 100644 index 0000000..49b3a61 --- /dev/null +++ b/ui/vite.config.ts @@ -0,0 +1,30 @@ +import { defineConfig, loadEnv } from 'vite'; +import { svelte } from '@sveltejs/vite-plugin-svelte'; + +// https://vite.dev/config/ +export default defineConfig(({ mode }) => { + // Load env variables for the current mode + const env = loadEnv(mode, process.cwd()); + + // Get backend port and host from environment variables or use defaults + const backendPort = env.VITE_BACKEND_PORT || env.BACKEND_PORT || 3000; + const backendHost = env.VITE_HOST || env.HOST || 'localhost'; + + return { + plugins: [svelte()], + server: { + allowedHosts: true, + fs: { + allow: ['..'] + }, + proxy: { + '/api': { + target: `http://${backendHost}:${backendPort}`, + changeOrigin: true, + secure: false, + rewrite: (path) => path + } + } + } + }; +}); diff --git a/ui/wasmNodeWrapper.js b/ui/wasmNodeWrapper.js new file mode 100644 index 0000000..37ff962 --- /dev/null +++ b/ui/wasmNodeWrapper.js @@ -0,0 +1,439 @@ +// Node.js WASM wrapper for backend integration +import path from 'path'; +import { fileURLToPath } from 'url'; +import { readFileSync } from 'fs'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Import WASM module with proper singleton pattern +let wasmModule = null; +let wasmInitPromise = null; + +async function initWasm() { + // If already initialized, return the cached module + if (wasmModule) { + return wasmModule; + } + + // If initialization is in progress, wait for it + if (wasmInitPromise) { + await wasmInitPromise; + return wasmModule; + } + + // Start initialization + wasmInitPromise = (async () => { + try { + console.log('πŸ”„ Initializing WASM module...'); + + // Import the WASM module + const wasmPath = path.join(__dirname, 'pkg', 'wasm.js'); + const wasmImport = await import(wasmPath); + + // Load WASM binary directly in Node.js (avoid fetch) + const wasmBinaryPath = path.join(__dirname, 'pkg', 'wasm_bg.wasm'); + const wasmBinary = readFileSync(wasmBinaryPath); + + // Initialize the WASM module with binary data + await wasmImport.default(wasmBinary); + + // Cache the initialized module + wasmModule = wasmImport; + + console.log('βœ… WASM module initialized successfully for backend'); + return wasmModule; + } catch (error) { + console.error('❌ Failed to initialize WASM module:', error); + wasmInitPromise = null; // Reset promise on failure to allow retry + throw error; + } + })(); + + await wasmInitPromise; + return wasmModule; +} + +// WASM-based file parsing +export async function parseRecFileWasm(filePath) { + const wasm = await initWasm(); + try { + const absoluteFilePath = path.resolve(filePath); + console.log(`πŸ”„ Parsing .rec file with WASM: ${absoluteFilePath}`); + const structuredJson = wasm.read_test_file_wasm(absoluteFilePath); + + // Check if we got valid JSON + if (!structuredJson || typeof structuredJson !== 'string') { + console.warn('WASM read_test_file_wasm returned:', typeof structuredJson, structuredJson); + // Return minimal valid structure + return { + steps: [], + metadata: { + created_at: new Date().toISOString(), + version: "1.0" + } + }; + } + + return JSON.parse(structuredJson); + } catch (error) { + console.error(`❌ WASM parsing failed for ${path.resolve(filePath)}:`, error); + // Return minimal valid structure instead of throwing + return { + steps: [], + metadata: { + created_at: new Date().toISOString(), + version: "1.0" + } + }; + } +} + +// WASM-based file generation +export async function generateRecFileWasm(filePath, testStructure) { + const wasm = await initWasm(); + try { + const absoluteFilePath = path.resolve(filePath); + console.log(`πŸ”„ Generating .rec content with WASM: ${absoluteFilePath}`); + const structuredJson = JSON.stringify(testStructure); + const recContent = wasm.write_test_file_wasm(absoluteFilePath, structuredJson); + + // Check if we got valid content + if (recContent === undefined || recContent === null) { + console.warn('WASM write_test_file_wasm returned undefined/null'); + return ''; // Return empty string instead of undefined + } + + return recContent; + } catch (error) { + console.error(`❌ WASM generation failed for ${path.resolve(filePath)}:`, error); + return ''; // Return empty string instead of throwing + } +} + +// WASM-based pattern retrieval with proper git directory context +export async function getPatternsWasm(userRepoPath = null) { + const wasm = await initWasm(); + try { + const absoluteRepoPath = userRepoPath ? path.resolve(userRepoPath) : null; + console.log(`πŸ”„ Getting patterns with WASM from repo: ${absoluteRepoPath || 'default'}`); + + // Use the user's repository path as context for pattern discovery + const patternsJson = wasm.get_patterns_wasm(absoluteRepoPath); + + // Check if we got valid JSON + if (!patternsJson || typeof patternsJson !== 'string') { + console.log('No patterns found or invalid response, returning empty patterns'); + return {}; + } + + try { + const patternsArray = JSON.parse(patternsJson); + + // Check if it's actually an array + if (!Array.isArray(patternsArray)) { + console.log('Patterns result is not an array, trying as object'); + // If it's already an object with pattern names, return as-is + if (typeof patternsArray === 'object') { + return patternsArray; + } + return {}; + } + + // Convert array to object format expected by UI + const patterns = {}; + patternsArray.forEach(pattern => { + if (pattern && pattern.name && pattern.pattern) { + patterns[pattern.name] = pattern.pattern; + } + }); + + return patterns; + } catch (jsonError) { + console.warn('Failed to parse patterns JSON:', jsonError.message); + return {}; + } + } catch (error) { + console.warn('WASM pattern retrieval failed:', error.message); + return {}; // Return empty patterns instead of throwing + } +} + +// WASM-based test validation +export async function validateTestWasm(recFilePath) { + const wasm = await initWasm(); + try { + const absoluteRecFilePath = path.resolve(recFilePath); + console.log(`πŸ”„ Validating test with WASM: ${absoluteRecFilePath}`); + const validationJson = wasm.validate_test_wasm(absoluteRecFilePath); + + // Check if we got valid JSON + if (!validationJson || typeof validationJson !== 'string') { + console.warn('WASM validate_test_wasm returned:', typeof validationJson, validationJson); + return { valid: true, errors: [] }; // Return default valid result + } + + return JSON.parse(validationJson); + } catch (error) { + console.error(`❌ WASM validation failed for ${path.resolve(recFilePath)}:`, error); + return { valid: true, errors: [] }; // Return default valid result instead of throwing + } +} + +// WASM-based test structure replacement +export async function replaceTestStructureWasm(filePath, oldStructure, newStructure) { + const wasm = await initWasm(); + try { + const absoluteFilePath = path.resolve(filePath); + console.log(`πŸ”„ Replacing test structure with WASM: ${absoluteFilePath}`); + const oldJson = JSON.stringify(oldStructure); + const newJson = JSON.stringify(newStructure); + const result = wasm.replace_test_structure_wasm(absoluteFilePath, oldJson, newJson); + return result; + } catch (error) { + console.error(`❌ WASM structure replacement failed for ${path.resolve(filePath)}:`, error); + throw error; + } +} + +// WASM-based test structure appending +export async function appendTestStructureWasm(filePath, appendStructure) { + const wasm = await initWasm(); + try { + const absoluteFilePath = path.resolve(filePath); + console.log(`πŸ”„ Appending test structure with WASM: ${absoluteFilePath}`); + const appendJson = JSON.stringify(appendStructure); + const result = wasm.append_test_structure_wasm(absoluteFilePath, appendJson); + return result; + } catch (error) { + console.error(`❌ WASM structure appending failed for ${path.resolve(filePath)}:`, error); + throw error; + } +} + +// Convert UI RecordingCommand format to WASM TestStructure format +export function convertUIToWasmFormat(commands) { + const testSteps = commands.map(cmd => { + if (cmd.type === 'block') { + return { + Block: { + path: cmd.command, + source_file: cmd.blockSource || null + } + }; + } else if (cmd.type === 'comment') { + return { + Comment: cmd.command + }; + } else { + return { + Command: { + input: cmd.command, + expected_output: cmd.expectedOutput || '', + actual_output: cmd.actualOutput || null + } + }; + } + }); + + return { + steps: testSteps, + metadata: { + created_at: new Date().toISOString(), + version: "1.0" + } + }; +} + +// Convert WASM TestStructure format to UI RecordingCommand format +export function convertWasmToUIFormat(testStructure) { + if (!testStructure.steps) { + throw new Error('Invalid test structure: missing steps'); + } + + const commands = []; + + for (const step of testStructure.steps) { + // Handle new structure format (with step.type) + if (step.type) { + switch (step.type) { + case 'statement': + // Statements don't become commands in UI + break; + case 'input': + commands.push({ + command: step.content, + type: 'command', + status: 'pending' + }); + break; + case 'expected_output': + // Expected output is handled separately + break; + case 'block': + commands.push({ + command: `@${step.args[0]}`, + type: 'block', + status: 'pending', + isBlockCommand: false + }); + break; + } + } + // Handle old structure format (with step.Block, step.Command, etc.) + else if (step.Block) { + commands.push({ + command: step.Block.path, + type: 'block', + status: 'pending', + blockSource: step.Block.source_file, + isBlockCommand: false + }); + } else if (step.Comment) { + commands.push({ + command: step.Comment, + type: 'comment', + status: 'pending' + }); + } else if (step.Command) { + commands.push({ + command: step.Command.input, + expectedOutput: step.Command.expected_output, + actualOutput: step.Command.actual_output, + type: 'command', + status: 'pending' + }); + } else { + throw new Error(`Unknown step type: ${JSON.stringify(step)}`); + } + } + + return commands; +} + +// ===== NEW WASM-COMPATIBLE FUNCTIONS (NO FILE SYSTEM OPERATIONS) ===== + +// WASM-based file parsing using file content map (WASM-compatible) +export async function parseRecFileFromMapWasm(filePath, fileMap) { + const wasm = await initWasm(); + + // Validate that the WASM function is available + if (!wasm.read_test_file_from_map_wasm) { + throw new Error('WASM function read_test_file_from_map_wasm is not available'); + } + + try { + console.log(`πŸ”„ Parsing .rec file from map with WASM: ${filePath}`); + const fileMapJson = JSON.stringify(fileMap); + const structuredJson = wasm.read_test_file_from_map_wasm(filePath, fileMapJson); + + // Check if we got valid JSON + if (!structuredJson || typeof structuredJson !== 'string') { + console.warn('WASM read_test_file_from_map_wasm returned:', typeof structuredJson, structuredJson); + return { + steps: [], + metadata: { + created_at: new Date().toISOString(), + file_path: filePath + } + }; + } + + const parsed = JSON.parse(structuredJson); + + // Check for errors in the parsed result + if (parsed.error) { + console.error('WASM parsing error:', parsed.error); + throw new Error(parsed.error); + } + + console.log(`βœ… Successfully parsed .rec file from map: ${filePath}`); + return parsed; + } catch (error) { + console.error(`❌ WASM file parsing from map failed for ${filePath}:`, error); + throw error; + } +} + +// WASM-based file generation to content map (WASM-compatible) +export async function generateRecFileToMapWasm(filePath, testStructure) { + const wasm = await initWasm(); + + // Validate that the WASM function is available + if (!wasm.write_test_file_to_map_wasm) { + throw new Error('WASM function write_test_file_to_map_wasm is not available'); + } + + try { + console.log(`πŸ”„ Generating .rec file to map with WASM: ${filePath}`); + const structureJson = JSON.stringify(testStructure); + const fileMapJson = wasm.write_test_file_to_map_wasm(filePath, structureJson); + + if (!fileMapJson || typeof fileMapJson !== 'string') { + throw new Error('WASM write_test_file_to_map_wasm returned invalid result'); + } + + const parsed = JSON.parse(fileMapJson); + + // Check for errors in the parsed result + if (parsed.error) { + console.error('WASM generation error:', parsed.error); + throw new Error(parsed.error); + } + + console.log(`βœ… Successfully generated .rec file to map: ${filePath}`); + return parsed; // Returns file map with path -> content + } catch (error) { + console.error(`❌ WASM file generation to map failed for ${filePath}:`, error); + throw error; + } +} + +// WASM-based test validation using file content map (WASM-compatible) +export async function validateTestFromMapWasm(recFilePath, fileMap, patterns = null) { + const wasm = await initWasm(); + + // Validate that the WASM function is available + if (!wasm.validate_test_from_map_wasm) { + throw new Error('WASM function validate_test_from_map_wasm is not available'); + } + + try { + console.log(`πŸ”„ Validating test from map with WASM: ${recFilePath}`); + console.log(`πŸ”₯ WASM WRAPPER PATTERNS DUMP:`, patterns ? JSON.stringify(patterns, null, 2) : 'NULL'); + console.log(`πŸ”₯ WASM WRAPPER PATTERNS COUNT:`, patterns ? Object.keys(patterns).length : 0); + + const fileMapJson = JSON.stringify(fileMap); + const patternsJson = patterns ? JSON.stringify(patterns) : null; + + console.log(`πŸ”₯ CALLING WASM WITH PATTERNS JSON:`, patternsJson); + + const validationJson = wasm.validate_test_from_map_wasm(recFilePath, fileMapJson, patternsJson); + + // Check if we got valid JSON + if (!validationJson || typeof validationJson !== 'string') { + console.warn('WASM validate_test_from_map_wasm returned:', typeof validationJson, validationJson); + return { success: true, errors: [], summary: 'No validation performed' }; + } + + const parsed = JSON.parse(validationJson); + + // Check for errors in the parsed result + if (parsed.error) { + console.error('WASM validation error:', parsed.error); + throw new Error(parsed.error); + } + + console.log(`βœ… Successfully validated test from map: ${recFilePath}`); + if (patterns && Object.keys(patterns).length > 0) { + console.log(`πŸ“‹ Used ${Object.keys(patterns).length} patterns for validation`); + } + return parsed; + } catch (error) { + console.error(`❌ WASM test validation from map failed for ${recFilePath}:`, error); + throw error; + } +} + +// Export the initialization function for explicit control +export { initWasm }; diff --git a/wasm/.gitignore b/wasm/.gitignore new file mode 100644 index 0000000..615e7ab --- /dev/null +++ b/wasm/.gitignore @@ -0,0 +1,3 @@ +/target +/pkg + diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock new file mode 100644 index 0000000..87f0ae7 --- /dev/null +++ b/wasm/Cargo.lock @@ -0,0 +1,277 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "parser" +version = "0.1.0" +dependencies = [ + "anyhow", + "regex", + "serde", + "serde_json", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "wasm" +version = "0.1.0" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "once_cell", + "parser", + "regex", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "serde", + "serde_json", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml new file mode 100644 index 0000000..197d8ab --- /dev/null +++ b/wasm/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "wasm" +version = "0.1.0" +edition = "2021" +authors = ["Manticore Software Ltd "] +license = "Apache-2.0" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +wasm-bindgen = { version = "0.2", features = ["serde-serialize"] } +regex = "1.9.1" +once_cell = "1.8.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +parser = { path = "../parser" } +js-sys = "0.3" +web-sys = { version = "0.3", features = ["console"] } + +[features] +default = ["console_error_panic_hook"] +console_error_panic_hook = ["dep:console_error_panic_hook"] + +[dependencies.console_error_panic_hook] +version = "0.1.7" +optional = true + +[profile.release] +strip = true +opt-level = 3 +lto = true +codegen-units = 1 + +[package.metadata.wasm-pack.profile.release] +wasm-opt = false \ No newline at end of file diff --git a/wasm/README.md b/wasm/README.md new file mode 100644 index 0000000..76e9ba8 --- /dev/null +++ b/wasm/README.md @@ -0,0 +1,205 @@ +# wasm-diff + +This library provides a text-diffing solution with support for variable pattern matching. It is written in Rust and compiled to WebAssembly (WASM), enabling its use in both web and non-web JavaScript applications. The main functionality is exposed via the `PatternMatcher` struct which can compare two strings line-by-line, taking into account static parts and dynamic pattern segments. + +## Table of Contents + +- [Features](#features) +- [Installation](#installation) +- [Compilation to WASM](#compilation-to-wasm) +- [Usage in JavaScript](#usage-in-javascript) +- [API Overview](#api-overview) +- [How It Works](#how-it-works) +- [License](#license) + +## Features + +- **Pattern Replacement**: Replace variables matching `%{VAR_NAME}` syntax with configured patterns. +- **Line-by-Line Diff**: Compare two multi-line strings returning an object with detailed change information. +- **Highlighting**: Computes character-level diff ranges to highlight differences between lines. +- **WASM & JS Integration**: Easily compile to WASM and use the library from JavaScript. + +## Installation + +First, ensure you have the following tools installed: +- [Rust](https://www.rust-lang.org/tools/install) +- [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/) (for easy compilation to WebAssembly) + +Clone this repository or include the source in your project. + +## Compilation to WASM + +There are two primary methods to compile the library to WebAssembly: + +### 1. Using `wasm-pack` + +The simplest way to compile and bundle your library is with `wasm-pack`. + +1. **Install wasm-pack** (if not installed): + + ```bash + cargo install wasm-pack + ``` + +2. **Build the package**: + + In the project directory, run: + + ```bash + RUSTFLAGS='-C target-feature=+bulk-memory' wasm-pack build --release --target web --out-dir ../ui/pkg --quiet + ``` + + This command compiles the Rust code to WASM and generates a `pkg/` directory with your WASM module and JavaScript bindings. + +### 2. Using `cargo` Directly + +If you prefer, you can compile using Cargo and then use a bundler (like webpack or rollup) along with [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen). + +1. **Compile to WASM**: + + ```bash + cargo build --target wasm32-unknown-unknown --release + ``` + +2. **Run wasm-bindgen**: + + ```bash + wasm-bindgen target/wasm32-unknown-unknown/release/.wasm --out-dir pkg --target bundler + ``` + + Replace `` with the actual crate name. + +## Usage in JavaScript + +Once you have compiled your WASM module (using one of the methods above), you can use it in a JavaScript project. + +### Example with ES Modules + +Assuming you built the module with `wasm-pack` and have the `pkg` folder in your project: + +```html + + + + + WASM Pattern Matcher Demo + + + + + +``` + +### Example with Node.js + +If you want to use the WASM module in a Node.js environment, ensure Node.js supports ES modules or configure proper bundling. With Node.js v14+ and using ES modules: + +```javascript +// index.js +import init, { PatternMatcher } from "./pkg/.js"; + +async function run() { + await init(); + + // Create a new instance of PatternMatcher (with or without configuration) + const matcher = new PatternMatcher(JSON.stringify({ + "A": "[a-zA-Z]+", + "B": "\\d+" + })); + + const expectedText = "Hello %{A}\nLine two\nBye %{B}"; + const actualText = "Hello Universe\nLine two updated\nBye 5678"; + + const diffJson = matcher.diff_text(expectedText, actualText); + console.log(JSON.parse(diffJson)); +} + +run(); +``` + +Then simply run: + +```bash +node index.js +``` + +## API Overview + +### `new(patterns_json: Option)` + +- **Description**: Constructs a new `PatternMatcher` object. +- **Parameters**: + - `patterns_json`: A JSON string representing a map of variable names to regex patterns. When provided, each occurrence of `%{VAR_NAME}` in the input text will be replaced by the corresponding pattern (wrapped with delimiters). +- **Returns**: A new instance of `PatternMatcher`. + +### `diff_text(expected: &str, actual: &str) -> String` + +- **Description**: Compares two multi-line strings line-by-line. +- **Parameters**: + - `expected`: The expected text (can include variables like `%{A}`). + - `actual`: The actual text to compare against. +- **Returns**: A JSON string representing a `DiffResult` object that includes: + - `has_diff`: A boolean flag indicating whether any differences were found. + - `diff_lines`: An array where each element represents a diff result for a line with the type: + - `"same"`: Lines are identical. + - `"added"`: Lines added in the actual text. + - `"removed"`: Lines missing from the actual text. + - `"changed"`: Lines that differ, along with highlighted ranges marking the differences. + +### Internal Functions + +The library also implements several helper functions: + +- **`replace_vars_to_patterns`**: Replaces occurrences of `%{VAR_NAME}` in the text using the provided configuration. +- **`split_into_parts`**: Splits a line into alternating static and pattern parts based on custom delimiters (`#!/` and `/!#`). +- **`has_diff`**: Uses the aforementioned functions to determine if a line in the actual text differs from the expected text after applying variable replacements. +- **`compute_diff_ranges`**: Performs a simple character-level diff (via common prefix/suffix detection) to compute ranges for highlighting. + +## How It Works + +1. **Variable Replacement**: + When calling `diff_text`, the expected text lines are processed to replace any variable patterns of the form `%{VAR_NAME}` with custom regex patterns sourced from the provided configuration. Each matching variable is transformed into a token with custom delimiters (`#!/` and `/!#`), allowing later splitting. + +2. **Parsing and Matching**: + Each processed expected line is split into static and pattern parts. The comparison function (`has_diff`) then iterates over these parts. For static parts, an exact match is expected at the corresponding location in the actual line. For dynamic pattern parts, the function builds a regex to match the expected content. If any segment does not match, the line is flagged as different. + +3. **Line Diff Calculation**: + Once a difference is found, the `compute_diff_ranges` function compares the two lines character-by-character. It determines the common prefix and suffix and highlights the middle β€œchanged” segment in the result. This information is provided back in the `diff_lines` array inside the diff result. + +4. **Result Serialization**: + The final diff result is a JSON string that can be deserialized and easily used in JavaScript applications to render differences, e.g., with syntax highlighting or diff views. + +## License + +[MIT License](LICENSE) diff --git a/wasm/src/lib.rs b/wasm/src/lib.rs new file mode 100644 index 0000000..da89be4 --- /dev/null +++ b/wasm/src/lib.rs @@ -0,0 +1,406 @@ +use wasm_bindgen::prelude::*; +use regex::Regex; +use std::collections::HashMap; +use serde::{Serialize, Deserialize}; +use once_cell::sync::Lazy; +use parser::{TestStructure, read_test_file, write_test_file, replace_test_structure, append_test_structure, get_patterns, read_test_file_from_map, write_test_file_to_map, validate_test_from_map, validate_test_from_map_with_patterns}; + +static VAR_REGEX: Lazy = Lazy::new(|| { + Regex::new(r"%\{[A-Z]{1}[A-Z_0-9]*\}").unwrap() +}); + +// ===== EXISTING DIFF TYPES ===== +#[derive(Serialize, Deserialize)] +pub struct DiffResult { + has_diff: bool, + diff_lines: Vec, +} + +#[derive(Serialize, Deserialize)] +pub struct DiffLine { + line_type: String, // "same", "added", "removed", or "changed" + content: String, + old_content: Option, // Only used for "changed" lines + highlight_ranges: Option>, +} + +#[derive(Serialize, Deserialize)] +pub struct HighlightRange { + start: usize, + end: usize, +} + +// ===== EXISTING PATTERN MATCHER ===== +#[wasm_bindgen] +pub struct PatternMatcher { + config: HashMap, +} + +#[wasm_bindgen] +impl PatternMatcher { + #[wasm_bindgen(constructor)] + pub fn new(patterns_json: Option) -> Self { + let config = match patterns_json { + Some(json) => { + let patterns: HashMap = serde_json::from_str(&json).unwrap_or_default(); + patterns.into_iter().map(|(k, v)| { + (k, format!("#!/{}/!#", v)) + }).collect() + }, + None => HashMap::new(), + }; + + Self { config } + } + + #[wasm_bindgen] + pub fn diff_text(&self, expected: &str, actual: &str) -> String { + let expected_lines: Vec<&str> = expected.lines().collect(); + let actual_lines: Vec<&str> = actual.lines().collect(); + + let mut result = DiffResult { + has_diff: false, + diff_lines: Vec::new(), + }; + + let max_lines = std::cmp::max(expected_lines.len(), actual_lines.len()); + for i in 0..max_lines { + match (expected_lines.get(i), actual_lines.get(i)) { + (Some(exp), Some(act)) => { + if self.has_diff(exp.to_string(), act.to_string()) { + result.has_diff = true; + // Lines are different + let (_ranges1, ranges2) = self.compute_diff_ranges(exp, act); + + result.diff_lines.push(DiffLine { + line_type: "changed".to_string(), + content: act.to_string(), + old_content: Some(exp.to_string()), + highlight_ranges: Some(ranges2), + }); + } else { + // Lines are same + result.diff_lines.push(DiffLine { + line_type: "same".to_string(), + content: act.to_string(), + old_content: None, + highlight_ranges: None, + }); + } + }, + (Some(exp), None) => { + // Line removed + result.has_diff = true; + result.diff_lines.push(DiffLine { + line_type: "removed".to_string(), + content: exp.to_string(), + old_content: None, + highlight_ranges: None, + }); + }, + (None, Some(act)) => { + // Line added + result.has_diff = true; + result.diff_lines.push(DiffLine { + line_type: "added".to_string(), + content: act.to_string(), + old_content: None, + highlight_ranges: None, + }); + }, + _ => {}, + } + } + + serde_json::to_string(&result).unwrap_or_else(|_| "{\"error\": \"Failed to serialize diff result\"}".to_string()) + } + + fn has_diff(&self, rec_line: String, rep_line: String) -> bool { + let rec_line = self.replace_vars_to_patterns(rec_line); + let parts = self.split_into_parts(&rec_line); + let mut last_index = 0; + + for part in parts { + match part { + MatchingPart::Static(static_part) => { + if rep_line[last_index..].starts_with(&static_part) { + last_index += static_part.len(); + } else { + return true; + } + } + MatchingPart::Pattern(pattern) => { + let pattern_regex = Regex::new(&pattern).unwrap_or(Regex::new(".*").unwrap()); + if let Some(mat) = pattern_regex.find(&rep_line[last_index..]) { + last_index += mat.end(); + } else { + return true; + } + } + } + } + + last_index != rep_line.len() + } + + fn split_into_parts(&self, rec_line: &str) -> Vec { + let mut parts = Vec::new(); + + let first_splits: Vec<&str> = rec_line.split("#!/").collect(); + for (i, first_split) in first_splits.iter().enumerate() { + if i == 0 { + // First part is always static + if !first_split.is_empty() { + parts.push(MatchingPart::Static(first_split.to_string())); + } + continue; + } + + let second_splits: Vec<&str> = first_split.split("/!#").collect(); + if second_splits.len() >= 2 { + // First part is the pattern + parts.push(MatchingPart::Pattern(second_splits[0].to_string())); + // Second part is static text + if second_splits.len() > 1 && !second_splits[1].is_empty() { + parts.push(MatchingPart::Static(second_splits[1..].join("/!#"))); + } + } else { + // If no closing pattern delimiter, treat as static + parts.push(MatchingPart::Static(format!("#!/{}", first_split))); + } + } + parts + } + + fn replace_vars_to_patterns(&self, line: String) -> String { + VAR_REGEX.replace_all(&line, |caps: ®ex::Captures| { + let matched = &caps[0]; + let key = matched[2..matched.len() - 1].to_string(); + self.config.get(&key).unwrap_or(&matched.to_string()).clone() + }).into_owned() + } + + fn compute_diff_ranges(&self, old_line: &str, new_line: &str) -> (Vec, Vec) { + // Simple char-by-char diff implementation + let old_chars: Vec = old_line.chars().collect(); + let new_chars: Vec = new_line.chars().collect(); + + // Compute common prefix length + let prefix_len = old_chars + .iter() + .zip(new_chars.iter()) + .take_while(|(c1, c2)| c1 == c2) + .count(); + + // Compute common suffix length + let mut suffix_len = 0; + let max_suffix = std::cmp::min( + old_chars.len().saturating_sub(prefix_len), + new_chars.len().saturating_sub(prefix_len) + ); + + for i in 0..max_suffix { + let old_idx = old_chars.len() - 1 - i; + let new_idx = new_chars.len() - 1 - i; + if old_chars[old_idx] != new_chars[new_idx] { + break; + } + suffix_len += 1; + } + + // Create highlight ranges + let old_ranges = if prefix_len < old_chars.len().saturating_sub(suffix_len) { + vec![HighlightRange { + start: prefix_len, + end: old_chars.len().saturating_sub(suffix_len), + }] + } else { + vec![] + }; + + let new_ranges = if prefix_len < new_chars.len().saturating_sub(suffix_len) { + vec![HighlightRange { + start: prefix_len, + end: new_chars.len().saturating_sub(suffix_len), + }] + } else { + vec![] + }; + + (old_ranges, new_ranges) + } +} + +enum MatchingPart { + Static(String), + Pattern(String), +} + +// ===== REC FILE PARSING WASM BINDINGS ===== + +/// Convert a .rec file to structured JSON format +#[wasm_bindgen] +pub fn read_test_file_wasm(test_file_path: &str) -> String { + match read_test_file(test_file_path) { + Ok(structure) => serde_json::to_string(&structure).unwrap_or_else(|e| { + format!("{{\"error\": \"Failed to serialize result: {}\"}}", e) + }), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// Convert structured JSON format back to .rec file content +#[wasm_bindgen] +pub fn write_test_file_wasm(test_file_path: &str, test_structure_json: &str) -> String { + let test_structure: TestStructure = match serde_json::from_str(test_structure_json) { + Ok(s) => s, + Err(e) => return format!("{{\"error\": \"Invalid JSON: {}\"}}", e), + }; + + match write_test_file(test_file_path, &test_structure) { + Ok(()) => "{\"success\": true}".to_string(), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// Replace old test structure with new test structure in existing file +#[wasm_bindgen] +pub fn replace_test_structure_wasm( + test_file_path: &str, + old_structure_json: &str, + new_structure_json: &str, +) -> String { + let old_structure: TestStructure = match serde_json::from_str(old_structure_json) { + Ok(s) => s, + Err(e) => return format!("{{\"error\": \"Invalid old structure JSON: {}\"}}", e), + }; + + let new_structure: TestStructure = match serde_json::from_str(new_structure_json) { + Ok(s) => s, + Err(e) => return format!("{{\"error\": \"Invalid new structure JSON: {}\"}}", e), + }; + + match replace_test_structure(test_file_path, &old_structure, &new_structure) { + Ok(()) => "{\"success\": true}".to_string(), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// Append test structure to existing file +#[wasm_bindgen] +pub fn append_test_structure_wasm(test_file_path: &str, append_structure_json: &str) -> String { + let append_structure: TestStructure = match serde_json::from_str(append_structure_json) { + Ok(s) => s, + Err(e) => return format!("{{\"error\": \"Invalid structure JSON: {}\"}}", e), + }; + + match append_test_structure(test_file_path, &append_structure) { + Ok(steps_added) => format!("{{\"success\": true, \"steps_added\": {}}}", steps_added), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// Get all available patterns from system and project .clt/patterns files +#[wasm_bindgen] +pub fn get_patterns_wasm(clt_binary_path: Option) -> String { + match get_patterns(clt_binary_path.as_deref()) { + Ok(patterns) => serde_json::to_string(&patterns).unwrap_or_else(|e| { + format!("{{\"error\": \"Failed to serialize patterns: {}\"}}", e) + }), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// Validate a test by comparing .rec file with its .rep result file (WASM binding) +#[wasm_bindgen] +pub fn validate_test_wasm(rec_file_path: &str) -> String { + match parser::validate_test(rec_file_path) { + Ok(result) => serde_json::to_string(&result).unwrap_or_else(|e| { + format!("{{\"error\": \"Failed to serialize result: {}\"}}", e) + }), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +// ===== NEW WASM-COMPATIBLE FUNCTIONS (NO FILE SYSTEM OPERATIONS) ===== + +/// WASM-compatible function to parse .rec file using file content map +/// This avoids file system operations that are not supported in WASM +#[wasm_bindgen] +pub fn read_test_file_from_map_wasm(main_file_path: &str, file_map_json: &str) -> String { + // Parse the file map from JSON + let file_map: HashMap = match serde_json::from_str(file_map_json) { + Ok(map) => map, + Err(e) => return format!("{{\"error\": \"Invalid file map JSON: {}\"}}", e), + }; + + match read_test_file_from_map(main_file_path, &file_map) { + Ok(structure) => serde_json::to_string(&structure).unwrap_or_else(|e| { + format!("{{\"error\": \"Failed to serialize result: {}\"}}", e) + }), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// WASM-compatible function to convert test structure to file content map +/// Returns a JSON object with file paths as keys and content as values +#[wasm_bindgen] +pub fn write_test_file_to_map_wasm(test_file_path: &str, test_structure_json: &str) -> String { + let test_structure: TestStructure = match serde_json::from_str(test_structure_json) { + Ok(s) => s, + Err(e) => return format!("{{\"error\": \"Invalid JSON: {}\"}}", e), + }; + + match write_test_file_to_map(test_file_path, &test_structure) { + Ok(file_map) => serde_json::to_string(&file_map).unwrap_or_else(|e| { + format!("{{\"error\": \"Failed to serialize file map: {}\"}}", e) + }), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} + +/// WASM-compatible function to validate test using file content map +/// This avoids file system operations that are not supported in WASM +#[wasm_bindgen] +pub fn validate_test_from_map_wasm( + rec_file_path: &str, + file_map_json: &str, + patterns_json: Option +) -> String { + // Parse the file map from JSON + let file_map: HashMap = match serde_json::from_str(file_map_json) { + Ok(map) => map, + Err(e) => return format!("{{\"error\": \"Invalid file map JSON: {}\"}}", e), + }; + + // Parse patterns if provided + let patterns = if let Some(patterns_str) = patterns_json { + // Log patterns received in WASM + web_sys::console::log_1(&format!("πŸ”₯ WASM RECEIVED PATTERNS JSON: {}", patterns_str).into()); + + match serde_json::from_str::>(&patterns_str) { + Ok(p) => { + web_sys::console::log_1(&format!("πŸ”₯ WASM PARSED {} PATTERNS", p.len()).into()); + web_sys::console::log_1(&format!("πŸ”₯ WASM PATTERN KEYS: {:?}", p.keys().collect::>()).into()); + if let Some(version_pattern) = p.get("VERSION") { + web_sys::console::log_1(&format!("πŸ”₯ WASM VERSION PATTERN: {}", version_pattern).into()); + } + Some(p) + }, + Err(e) => { + web_sys::console::log_1(&format!("πŸ”₯ WASM PATTERN PARSE ERROR: {}", e).into()); + return format!("{{\"error\": \"Invalid patterns JSON: {}\"}}", e); + } + } + } else { + web_sys::console::log_1(&"πŸ”₯ WASM NO PATTERNS PROVIDED".into()); + None + }; + + match validate_test_from_map_with_patterns(rec_file_path, &file_map, patterns) { + Ok(result) => serde_json::to_string(&result).unwrap_or_else(|e| { + format!("{{\"error\": \"Failed to serialize validation result: {}\"}}", e) + }), + Err(e) => format!("{{\"error\": \"{}\"}}", e), + } +} \ No newline at end of file