From 8f0fff3da396f8dd5395284ac3b9d0675a6df0ea Mon Sep 17 00:00:00 2001 From: Alexandre Eichenberger Date: Tue, 20 Sep 2022 13:41:38 -0400 Subject: [PATCH] Spelling and variable names (#1712) * cleanup of spelling and variable naming convention in gen_onnx_mlir and a few other places Signed-off-by: Alexandre Eichenberger --- src/Conversion/ONNXToKrnl/Math/Gemm.cpp | 2 +- .../ONNXToKrnl/ONNXToKrnlCommon.cpp | 2 +- src/Dialect/Mlir/IndexExpr.cpp | 2 +- .../onnx_to_mhlo/Tensor/Concat.mlir | 2 +- utils/gen_onnx_mlir.py | 54 +++++++++---------- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/src/Conversion/ONNXToKrnl/Math/Gemm.cpp b/src/Conversion/ONNXToKrnl/Math/Gemm.cpp index 2c117c9152..142364e826 100644 --- a/src/Conversion/ONNXToKrnl/Math/Gemm.cpp +++ b/src/Conversion/ONNXToKrnl/Math/Gemm.cpp @@ -367,7 +367,7 @@ struct ONNXGemmOpLowering : public ConversionPattern { << betaLit << (betaLit == 1.0 ? " (skip)" : "") << ", c, " << cDim0 << ", " << cDim1 << "\n"; } else { - llvm::dbgs() << "OP-STATS: gemm of unkown sizes " + llvm::dbgs() << "OP-STATS: gemm of unknown sizes " << (aTrans ? ", a trans" : "") << (bTrans ? ", b trans" : "") << ", alpha " << alphaLit << ", beta " << betaLit << "\n"; diff --git a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp index 9144f419ae..65937ecaaa 100644 --- a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp +++ b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp @@ -176,7 +176,7 @@ Value insertAllocAndDeallocSimple(PatternRewriter &rewriter, Operation *op, if (hasAllConstantDimensions(type)) return insertAllocAndDealloc( type, loc, rewriter, insertDealloc, nullptr, alignment); - // Otherwise, take the unkown operands from the output dim IndexExpressions + // Otherwise, take the unknown operands from the output dim IndexExpressions SmallVector allocOperands; auto memRefShape = type.getShape(); auto rank = memRefShape.size(); diff --git a/src/Dialect/Mlir/IndexExpr.cpp b/src/Dialect/Mlir/IndexExpr.cpp index 7d1db63113..481ac5148c 100644 --- a/src/Dialect/Mlir/IndexExpr.cpp +++ b/src/Dialect/Mlir/IndexExpr.cpp @@ -295,7 +295,7 @@ bool IndexExpr::canBeUsedInScope() const { // out of current scope. return false; } - llvm_unreachable("unkown kind"); + llvm_unreachable("unknown kind"); } //===----------------------------------------------------------------------===// diff --git a/test/mlir/conversion/onnx_to_mhlo/Tensor/Concat.mlir b/test/mlir/conversion/onnx_to_mhlo/Tensor/Concat.mlir index 64b840c067..9da511df59 100644 --- a/test/mlir/conversion/onnx_to_mhlo/Tensor/Concat.mlir +++ b/test/mlir/conversion/onnx_to_mhlo/Tensor/Concat.mlir @@ -1,6 +1,6 @@ // RUN: onnx-mlir-opt --convert-onnx-to-mhlo %s -split-input-file | FileCheck %s -// Test when output shape is unkown +// Test when output shape is unknown func.func @test_concat_dynamic_shape(%arg0 : tensor<5x5x?x32xf32>, %arg1 : tensor<5x5x?x32xf32>) -> tensor<5x5x?x32xf32> { %0 = "onnx.Concat"(%arg0, %arg1) { axis = 2 : si64} : (tensor<5x5x?x32xf32>, tensor<5x5x?x32xf32>) -> tensor<5x5x?x32xf32> "func.return"(%0) : (tensor<5x5x?x32xf32>) -> () diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index 045dbe10cc..733cc8a3be 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -465,7 +465,7 @@ )]) onnx_types = ( - 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16', + 'bool', 'int8', 'int16', 'int32', 'int64', 'unknown', 'float16', 'float', 'double', 'complex64', 'complex128', 'string' ) tblgen_types = ('AnyI1', 'AnyI8', 'AnyI16', 'AnyI32', 'AnyI64', @@ -621,7 +621,7 @@ def get_allowed_elem_types(schema, input): if t == None : return allowed_structure, None if not t in allowed_type_list : - allowed_tyoe_list = allowed_type_list.append(t) + allowed_type_list.append(t) return allowed_structure,allowed_type_list @@ -748,9 +748,9 @@ def format_value(value): # type: (Any) -> Text name_to_type[attr.name] = get_attr_type_optional(attr.type) return name_to_type -def get_numberof_list(mylist): - expected_num = len(mylist) - for element in mylist : +def get_numberof_list(my_list): + expected_num = len(my_list) + for element in my_list : if OpSchema.FormalParameterOption.Variadic == element.option: expected_num = -1 return expected_num @@ -866,7 +866,7 @@ def parse_type_str(allowedType): 'bfloat16' : 'BF16', 'float' : 'F32', 'double' : 'F64', - 'unkown' : 'BF16', + 'unknown' : 'BF16', 'complex64' : 'Complex', 'complex128' : 'Complex', 'string' : 'StringType'} @@ -892,7 +892,7 @@ def parse_a_type_constraint(constraint): mlirTypes.append(mlirType) # Remove redundant and sort. - # However onnx keeps a consitently meaningful order + # However onnx keeps a consistently meaningful order # There is no redundancy as long as each onnx type is mapped uniquely # mlirTypes = sorted(list(set(mlirTypes))) @@ -1175,24 +1175,24 @@ def build_operator_schemas(): # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])] operator_schemas = list( ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]] - exsting_ops = set() # type: Set[Text] - for domain, _supportmap in sorted(index.items()): + existing_ops = set() # type: Set[Text] + for domain, _support_map in sorted(index.items()): if not should_render_domain(domain): continue - processed_supportmap = list() - for _support, _namemap in sorted(_supportmap.items()): - processed_namemap = list() - for n, unsorted_versions in sorted(_namemap.items()): + processed_support_map = list() + for _support, _name_map in sorted(_support_map.items()): + processed_name_map = list() + for n, unsorted_versions in sorted(_name_map.items()): versions = sorted(unsorted_versions, key=lambda s: s.since_version) schema = versions[-1] - if schema.name in exsting_ops: + if schema.name in existing_ops: continue if check_operation_version: # Generate operation of the latest version of your onnx. - exsting_ops.add(schema.name) - processed_namemap.append((n, schema, versions)) + existing_ops.add(schema.name) + processed_name_map.append((n, schema, versions)) # Add checks against version_dict if schema.name not in version_dict : @@ -1208,24 +1208,24 @@ def build_operator_schemas(): if schema.name not in version_dict : continue found = False - vcounter = 0 + v_counter = 0 for schema in reversed(versions): # Check the version number against the version_dict - specified_version = version_dict[schema.name][vcounter] + specified_version = version_dict[schema.name][v_counter] if schema.since_version == specified_version: - exsting_ops.add(schema.name) - processed_namemap.append((n, schema, versions)) + existing_ops.add(schema.name) + processed_name_map.append((n, schema, versions)) found = True - vcounter += 1 - if len(version_dict[schema.name]) == vcounter : + v_counter += 1 + if len(version_dict[schema.name]) == v_counter : break if not found: print("Your onnx installation may be too old. " "The desired version for operation {} is not found.".format( schema.name)) sys.exit() - processed_supportmap.append((_support, processed_namemap)) - operator_schemas.append((domain, processed_supportmap)) + processed_support_map.append((_support, processed_name_map)) + operator_schemas.append((domain, processed_support_map)) return operator_schemas @@ -1252,11 +1252,11 @@ def main(args): # type: (Type[Args]) -> None gen_op_versions(op_importer) new_version_dict = dict() - for domain, supportmap in build_operator_schemas(): - for _, namemap in supportmap: + for domain, support_map in build_operator_schemas(): + for _, name_map in support_map: # Generate Op with version number if not the latest version previous_name = "" - for op_type, schema, versions in namemap: + for op_type, schema, versions in name_map: new_version_dict[schema.name] = [schema.since_version] if not check_operation_version : with_version = previous_name == schema.name