Skip to content

Commit

Permalink
Fix clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
Glyphack committed Sep 1, 2024
1 parent 9182078 commit 0257150
Show file tree
Hide file tree
Showing 14 changed files with 129 additions and 120 deletions.
45 changes: 33 additions & 12 deletions parser/ast_python.py → compat/ast_python.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,30 @@
import sys
import ast
from _ast import AST # Python internals I guess?
from _ast import AST # Python internals I guess?
import argparse
import pathlib
import codecs
import json

arg_parser = argparse.ArgumentParser(
description="Parse a Python program to AST."
)
arg_parser = argparse.ArgumentParser(description="Parse a Python program to AST.")
arg_parser.add_argument("--input-file", help="Read and parse input file.")
arg_parser.add_argument("--stdin", action="store_true", help="Read and parse input from stdin.")
arg_parser.add_argument("--type-comments", action="store_true", help="Produce an AST with type comments.")
arg_parser.add_argument(
"--stdin", action="store_true", help="Read and parse input from stdin."
)
arg_parser.add_argument(
"--type-comments", action="store_true", help="Produce an AST with type comments."
)
args = arg_parser.parse_args()

if args.input_file is not None:
source = pathlib.Path(args.input_file).read_text()
elif args.stdin:
source = sys.stdin.read()
else:
print("Missing input parameter. Please specify one of --input-file or --stdin.", file=sys.stderr)
print(
"Missing input parameter. Please specify one of --input-file or --stdin.",
file=sys.stderr,
)
sys.exit(1)

# ----- Begin inline dependency -------------------------------------------------------------------
Expand Down Expand Up @@ -53,16 +58,19 @@

BUILTIN_PURE = (int, float, bool)
BUILTIN_BYTES = (bytearray, bytes)
BUILTIN_STR = (str)
BUILTIN_STR = str


def decode_str(value):
return value


def decode_bytes(value):
try:
return value.decode('utf-8')
return value.decode("utf-8")
except:
return codecs.getencoder('hex_codec')(value)[0].decode('utf-8')
return codecs.getencoder("hex_codec")(value)[0].decode("utf-8")


def ast2json(node):
assert isinstance(node, AST)
Expand All @@ -72,8 +80,13 @@ def ast2json(node):
if attr.startswith("_") or attr == "n" or attr == "s":
continue
to_return[attr] = get_value(getattr(node, attr))
to_return.pop("lineno", None)
to_return.pop("end_lineno", None)
to_return.pop("col_offset", None)
to_return.pop("end_col_offset", None)
return to_return


def get_value(attr_value):
if attr_value is None:
return attr_value
Expand All @@ -92,11 +105,19 @@ def get_value(attr_value):
if isinstance(attr_value, type(Ellipsis)):
return "..."
else:
raise Exception("Unknown case for '%s' of type '%s'" % (attr_value, type(attr_value)))
raise Exception(
"Unknown case for '%s' of type '%s'" % (attr_value, type(attr_value))
)


# -------------------------------------------------------------------- End inline dependency ------


tree = ast.parse(source, filename=args.input_file or "stdin", mode="exec", type_comments=args.type_comments)
tree = ast.parse(
source,
filename=args.input_file or "stdin",
mode="exec",
type_comments=args.type_comments,
)
tree_json = ast2json(tree)
print(json.dumps(tree_json, indent=4))
File renamed without changes.
25 changes: 8 additions & 17 deletions compat/src/lexer_compat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ pub struct PythonToken {

pub fn lex_python_source(source: &str) -> Result<Vec<PythonToken>> {
let mut process = spawn_python_script_command(
"parser/lex_python.py",
"compat/lex_python.py",
vec!["--stdin", "--output-format", "json"],
default_python_path()?,
)?;
Expand All @@ -131,7 +131,7 @@ pub fn lex_python_source(source: &str) -> Result<Vec<PythonToken>> {
if let Some(mut stdin) = process.stdin.take() {
stdin.write_all(source.as_bytes()).into_diagnostic()?;
} else {
bail!("Failed to open stdin when running `parser/lex_python.py`");
bail!("Failed to open stdin when running `compat/lex_python.py`");
}
// Get process stdout and parse result.
let output = process.wait_with_output().into_diagnostic()?;
Expand Down Expand Up @@ -166,6 +166,9 @@ pub fn assert_tokens_eq(
} else {
let mut python_token = python_token.unwrap();
let mut enderpy_token = enderpy_token.unwrap();
// (compat_fstrings) TODO: python fstring is a bit different than enderpy.
// We merge multiple fstring middle tokens together and emit one token but python emits
// multiple fstring middle tokens. Here we skip to the end and do not check fstrings.
if python_token.kind == PythonKind::FstringStart {
if enderpy_token.kind == Kind::FStringStart {
// Python tokenizes fstring with more tokens than needed.
Expand All @@ -191,8 +194,6 @@ pub fn assert_tokens_eq(
} else if is_python_fstring_mismatch(
&mismatch,
&enderpy_tokens[enderpy_index + 1..],
&python_tokens[python_index + 1..],
&mut python_index,
&mut enderpy_index, // <-- `enderpy_index` may be updated
) {
// Nothing, but don't add the mismatch.
Expand Down Expand Up @@ -607,8 +608,6 @@ fn is_python_trailing_newline_mismatch(
fn is_python_fstring_mismatch(
mismatch: &TokenMismatch,
remaining_tokens: &[Token],
remaining_python_tokens: &[PythonToken],
python_index: &mut usize,
enderpy_index: &mut usize,
) -> bool {
match mismatch {
Expand All @@ -630,12 +629,6 @@ fn is_python_fstring_mismatch(
*enderpy_index += num_skipped;
return true;
}
// TokenMismatch::WrongValue(python_token, token, python_value, enderpy_value) => {
// if python_value == "{" {
// *python_index += 1;
// return true;
// }
// }
_ => (),
}
false
Expand Down Expand Up @@ -875,12 +868,10 @@ def",
"a = f\"hello\"",
"f\"\"\"hello\"\"\"",
"f'''hello'''",
// TODO lex_python: Python lexes these poorly.
// "f\"{{hey}}\"",
// "f\"oh_{{hey}}\"",
"f\"{{hey}}\"",
"f\"oh_{{hey}}\"",
"f'a' 'c'",
// TODO lex_python: Python 3.11 chokes on this input.
// "f'hello_{f'''{a}'''}'",
"f'hello_{f'''{a}'''}'",
]);

// Raw F-strings
Expand Down
2 changes: 0 additions & 2 deletions compat/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ use std::path::Path;
use zip::ZipArchive;

use self::lexer_compat::{assert_tokens_eq, lex_python_source};
use self::parser_compat::python_parser_test_ast;

pub mod lexer_compat;
pub mod parser_compat;
Expand Down Expand Up @@ -68,7 +67,6 @@ fn run_compatibility_test(file: &str) -> Result<()> {
let python_tokens = lex_python_source(&source)?;

assert_tokens_eq(python_tokens, enderpy_tokens, &lexer);
python_parser_test_ast(&vec![source.as_str()]);

Ok(())
}
100 changes: 47 additions & 53 deletions compat/src/parser_compat.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#![allow(clippy::all)]
use assert_json_diff::assert_json_matches_no_panic;
use miette::{bail, IntoDiagnostic, Result};
use serde_json::Value;
Expand All @@ -17,7 +18,7 @@ use terminal_size::{terminal_size, Width as TerminalWidth};

fn parse_python_source(source: &str) -> Result<Value> {
let mut process = spawn_python_script_command(
"parser/ast_python.py",
"compat/ast_python.py",
vec!["--stdin"],
default_python_path()?,
)?;
Expand All @@ -26,7 +27,7 @@ fn parse_python_source(source: &str) -> Result<Value> {
if let Some(mut stdin) = process.stdin.take() {
stdin.write_all(source.as_bytes()).into_diagnostic()?;
} else {
bail!("Failed to open stdin when running `parser/ast_python.py`");
bail!("Failed to open stdin when running `compat/ast_python.py`");
}
// Get process stdout and parse result.
let output = process.wait_with_output().into_diagnostic()?;
Expand Down Expand Up @@ -115,6 +116,7 @@ fn parse_enderpy_source(source: &str) -> Result<Value> {
Ok(ast)
}

#[allow(unused_macros)]
macro_rules! parser_test {
($test_name:ident, $test_file:expr) => {
#[test]
Expand Down Expand Up @@ -219,11 +221,10 @@ mod tests {
python_parser_test_ast(&[
"a or b",
"a and b",
// TODO ast_python: Python parses this as a BoolOp with 3 values.
// TODO: Python parses this as a BoolOp with 3 values.
// i.e. {"op": "or", "values": ["a", "b", "c"]}
// Enderpy parses this as a nested set of BoolOps.
// i.e. {"op": "or", "values": ["a", {"op": "or", "values": ["b", "c"]}]}
// I'm not sure which is correct.
// "a or b or c",
"a and b or c",
]);
Expand All @@ -236,24 +237,22 @@ mod tests {

#[test]
fn test_named_expression() {
// TODO ast_python: Enderpy chokes on this.
// python_parser_test_ast(&["(a := b)"]);
python_parser_test_ast(&["(a := b)"]);
}

#[test]
fn test_tuple() {
python_parser_test_ast(&[
"(a, b, c)",
// TODO ast_python: Enderpy doesn't handle newlines within a nested context.
"(a,
b, c)",
"(a
, b, c)",
// "(a,
// b,
// c)",
// "(a,
// )",
"(a,
b,
c)",
"(a,
)",
"(a, b, c,)",
]);
}
Expand All @@ -263,12 +262,6 @@ mod tests {
python_parser_test_ast(&["yield", "yield a", "yield from a"]);
}

#[test]
fn test_starred() {
// TODO ast_python: Enderpy chokes on this.
// python_parser_test_ast(&["(*a)"]);
}

#[test]
fn test_await_expression() {
python_parser_test_ast(&["await a"]);
Expand Down Expand Up @@ -326,14 +319,13 @@ mod tests {
"'a' 'b'",
// TODO ast_python: Enderpy evaluates this as 'r"a"b'. This seems wrong.
// "r'a' 'b'",
// TODO ast_python: Enderpy doesn't handle newlines within a nested context.
// "('a'
// 'b')",
// "('a'
// 'b', 'c')",
// "('a'
// 'b'
// 'c')",
"('a'
'b')",
"('a'
'b', 'c')",
"('a'
'b'
'c')",
// TODO ast_python: Python evaluates this as "ac". Enderpy creates 2 constants.
// "f'a' 'c'",
// TODO ast_python: Python evaluates this as "abc". Enderpy creates 3 constants.
Expand All @@ -351,8 +343,7 @@ mod tests {
"f'hello_{a}'",
"f'hello_{a} {b}'",
"f'hello_{a} {b} {c}'",
// unsupported
// "f'hello_{f'''{a}'''}'",
"f'hello_{f'''{a}'''}'",
]);
}

Expand Down Expand Up @@ -435,35 +426,38 @@ except *Exception as e:
]);
}

// parser_test!(test_functions, "test_data/inputs/functions.py");
// parser_test!(test_if, "test_data/inputs/if.py");
// parser_test!(test_indentation, "test_data/inputs/indentation.py");
// parser_test!(
// test_separate_statements,
// "test_data/inputs/separate_statements.py"
// );
// parser_test!(test_try, "test_data/inputs/try.py");
parser_test!(test_functions, "../parser/test_data/inputs/functions.py");
parser_test!(test_if, "../parser/test_data/inputs/if.py");
parser_test!(
test_indentation,
"../parser/test_data/inputs/indentation.py"
);
parser_test!(
test_separate_statements,
"../parser/test_data/inputs/separate_statements.py"
);
// parser_test!(test_try, "../parser/test_data/inputs/try.py");
// parser_test!(
// annotated_assignment,
// "test_data/inputs/annotated_assignment.py"
// "../parser/test_data/inputs/annotated_assignment.py"
// );
// parser_test!(binary_op, "test_data/inputs/binary_op.py");
// parser_test!(class, "test_data/inputs/class.py");
// parser_test!(dict, "test_data/inputs/dict.py");
// parser_test!(test_for, "test_data/inputs/for.py");
// parser_test!(from_import, "test_data/inputs/from_import.py");
// parser_test!(function_def, "test_data/inputs/function_def.py");
parser_test!(binary_op, "../parser/test_data/inputs/binary_op.py");
parser_test!(class, "../parser/test_data/inputs/class.py");
// parser_test!(dict, "../parser/test_data/inputs/dict.py");
// parser_test!(test_for, "../parser/test_data/inputs/for.py");
parser_test!(from_import, "../parser/test_data/inputs/from_import.py");
parser_test!(function_def, "../parser/test_data/inputs/function_def.py");
// parser_test!(
// generator_expressions,
// "test_data/inputs/generator_expressions.py"
// "../parser/test_data/inputs/generator_expressions.py"
// );
// parser_test!(lists, "test_data/inputs/lists.py");
// parser_test!(test_match, "test_data/inputs/match.py");
// parser_test!(sets, "test_data/inputs/sets.py");
// parser_test!(string, "test_data/inputs/string.py");
// parser_test!(subscript, "test_data/inputs/subscript.py");
// parser_test!(with, "test_data/inputs/with.py");
// parser_test!(newlines, "test_data/inputs/newlines.py");
// parser_test!(comments, "test_data/inputs/comments.py");
// parser_test!(types_alias, "test_data/inputs/type_alias.py");
// parser_test!(lists, "../parser/test_data/inputs/lists.py");
// parser_test!(test_match, "../parser/test_data/inputs/match.py");
// parser_test!(sets, "../parser/test_data/inputs/sets.py");
// parser_test!(string, "../parser/test_data/inputs/string.py");
// parser_test!(subscript, "../parser/test_data/inputs/subscript.py");
// parser_test!(with, "../parser/test_data/inputs/with.py");
// parser_test!(newlines, "../parser/test_data/inputs/newlines.py");
parser_test!(comments, "../parser/test_data/inputs/comments.py");
// parser_test!(types_alias, "../parser/test_data/inputs/type_alias.py");
}
8 changes: 4 additions & 4 deletions lsp/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@ use log::LevelFilter;
use tower_lsp::{jsonrpc::Result, lsp_types::*, Client, LanguageServer, LspService, Server};

#[derive(Debug)]
struct Backend<'a> {
struct Backend {
client: Client,
manager: BuildManager<'a>,
manager: BuildManager,
}

impl<'a> Backend<'a> {
impl<'a> Backend {
fn build(&self, path: PathBuf) {
let root = find_project_root(&path);
self.manager.build_one(root, &path);
Expand All @@ -20,7 +20,7 @@ impl<'a> Backend<'a> {
}

#[tower_lsp::async_trait]
impl LanguageServer for Backend<'static> {
impl LanguageServer for Backend {
async fn initialize(&self, i: InitializeParams) -> Result<InitializeResult> {
let root = match i.root_uri {
Some(v) => v.to_file_path().unwrap_or(PathBuf::from("")),
Expand Down
Loading

0 comments on commit 0257150

Please sign in to comment.