Skip to content

Commit cd92392

Browse files
authored
[Refactor] Clean up Relay references in the codebase (#17733)
Removing relay references and statements in the codebase.
1 parent 6b89f95 commit cd92392

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+80
-288
lines changed

cmake/config.cmake

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -248,12 +248,12 @@ set(USE_SORT ON)
248248

249249
# Whether to use Arm Compute Library (ACL) codegen
250250
# We provide 2 separate flags since we cannot build the ACL runtime on x86.
251-
# This is useful for cases where you want to cross-compile a relay graph
251+
# This is useful for cases where you want to cross-compile a graph
252252
# on x86 then run on AArch.
253253
#
254254
# An example of how to use this can be found here: docs/deploy/arm_compute_lib.rst.
255255
#
256-
# USE_ARM_COMPUTE_LIB - Support for compiling a relay graph offloading supported
256+
# USE_ARM_COMPUTE_LIB - Support for compiling a graph offloading supported
257257
# operators to Arm Compute Library. OFF/ON
258258
# USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR - Run Arm Compute Library annotated functions via the ACL
259259
# runtime. OFF/ON/"path/to/ACL"
@@ -263,7 +263,7 @@ set(USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR OFF)
263263
# Whether to build with TensorRT codegen or runtime
264264
# Examples are available here: docs/deploy/tensorrt.rst.
265265
#
266-
# USE_TENSORRT_CODEGEN - Support for compiling a relay graph where supported operators are
266+
# USE_TENSORRT_CODEGEN - Support for compiling a graph where supported operators are
267267
# offloaded to TensorRT. OFF/ON
268268
# USE_TENSORRT_RUNTIME - Support for running TensorRT compiled modules, requires presence of
269269
# TensorRT library. OFF/ON/"path/to/TensorRT"

cmake/modules/CUDA.cmake

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ if(USE_CUDA)
7070
if(USE_CUDNN)
7171
message(STATUS "Build with cuDNN support")
7272
include_directories(SYSTEM ${CUDA_CUDNN_INCLUDE_DIRS})
73-
tvm_file_glob(GLOB CUDNN_RELAY_CONTRIB_SRC src/relay/backend/contrib/cudnn/*.cc src/relax/backend/contrib/cudnn/*.cc)
74-
list(APPEND COMPILER_SRCS ${CUDNN_RELAY_CONTRIB_SRC})
73+
tvm_file_glob(GLOB CUDNN_RELAX_CONTRIB_SRC src/relax/backend/contrib/cudnn/*.cc)
74+
list(APPEND COMPILER_SRCS ${CUDNN_RELAX_CONTRIB_SRC})
7575
tvm_file_glob(GLOB CONTRIB_CUDNN_SRCS src/runtime/contrib/cudnn/*.cc)
7676
list(APPEND RUNTIME_SRCS ${CONTRIB_CUDNN_SRCS})
7777
list(APPEND TVM_RUNTIME_LINKER_LIBS ${CUDA_CUDNN_LIBRARY})
@@ -95,7 +95,7 @@ if(USE_CUDA)
9595

9696
if(USE_CUBLAS)
9797
message(STATUS "Build with cuBLAS support")
98-
tvm_file_glob(GLOB CUBLAS_CONTRIB_SRC src/relay/backend/contrib/cublas/*.cc src/relax/backend/contrib/cublas/*.cc)
98+
tvm_file_glob(GLOB CUBLAS_CONTRIB_SRC src/relax/backend/contrib/cublas/*.cc)
9999
list(APPEND COMPILER_SRCS ${CUBLAS_CONTRIB_SRC})
100100
tvm_file_glob(GLOB CONTRIB_CUBLAS_SRCS src/runtime/contrib/cublas/*.cc)
101101
list(APPEND RUNTIME_SRCS ${CONTRIB_CUBLAS_SRCS})

cmake/modules/contrib/ArmComputeLib.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# We separate the codegen and runtime build since ACL can only be built
1919
# for AArch. In the world where we take the cross compilation approach,
2020
# which is common with arm devices, we need to be able to cross-compile
21-
# a relay graph on x86 for AArch and then run the graph on AArch.
21+
# a graph on x86 for AArch and then run the graph on AArch.
2222
if(USE_ARM_COMPUTE_LIB)
2323
tvm_file_glob(GLOB ACL_RUNTIME_MODULE src/runtime/contrib/arm_compute_lib/acl_runtime.cc)
2424

cmake/modules/contrib/BNNS.cmake

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@
1717

1818
if(USE_BNNS STREQUAL "ON")
1919
add_definitions(-DUSE_JSON_RUNTIME=1)
20-
tvm_file_glob(GLOB BNNS_RELAY_CONTRIB_SRC src/relay/backend/contrib/bnns/*.cc)
20+
tvm_file_glob(GLOB BNNS_RELAY_CONTRIB_SRC "")
2121
list(APPEND COMPILER_SRCS ${BNNS_RELAY_CONTRIB_SRC})
22-
list(APPEND COMPILER_SRCS ${JSON_RELAY_CONTRIB_SRC})
2322

2423
list(APPEND TVM_RUNTIME_LINKER_LIBS "-framework Accelerate")
2524

cmake/modules/contrib/DNNL.cmake

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ if(IS_DIRECTORY ${USE_DNNL})
2121
message(WARNING "Cannot find DNNL library at ${USE_DNNL}.")
2222
else()
2323
add_definitions(-DUSE_JSON_RUNTIME=1)
24-
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/relay/backend/contrib/dnnl/*.cc src/relax/backend/contrib/dnnl/*.cc)
24+
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/relax/backend/contrib/dnnl/*.cc)
2525
list(APPEND COMPILER_SRCS ${DNNL_CONTRIB_SRC})
2626

2727
list(APPEND TVM_RUNTIME_LINKER_LIBS ${EXTERN_LIBRARY_DNNL})
@@ -34,7 +34,7 @@ if(IS_DIRECTORY ${USE_DNNL})
3434
endif()
3535
elseif((USE_DNNL STREQUAL "ON") OR (USE_DNNL STREQUAL "JSON"))
3636
add_definitions(-DUSE_JSON_RUNTIME=1)
37-
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/relay/backend/contrib/dnnl/*.cc src/relax/backend/contrib/dnnl/*.cc)
37+
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/relax/backend/contrib/dnnl/*.cc)
3838
list(APPEND COMPILER_SRCS ${DNNL_CONTRIB_SRC})
3939

4040
find_library(EXTERN_LIBRARY_DNNL dnnl)
@@ -46,9 +46,6 @@ elseif((USE_DNNL STREQUAL "ON") OR (USE_DNNL STREQUAL "JSON"))
4646
list(APPEND RUNTIME_SRCS ${DNNL_CONTRIB_SRC})
4747
message(STATUS "Build with DNNL JSON runtime: " ${EXTERN_LIBRARY_DNNL})
4848
elseif(USE_DNNL STREQUAL "C_SRC")
49-
tvm_file_glob(GLOB DNNL_RELAY_CONTRIB_SRC src/relay/backend/contrib/dnnl/*.cc)
50-
list(APPEND COMPILER_SRCS ${DNNL_RELAY_CONTRIB_SRC})
51-
5249
find_library(EXTERN_LIBRARY_DNNL dnnl)
5350
list(APPEND TVM_RUNTIME_LINKER_LIBS ${EXTERN_LIBRARY_DNNL})
5451
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/runtime/contrib/dnnl/dnnl.cc

cmake/modules/contrib/Mrvl.cmake

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,4 @@ if(USE_MRVL)
2424
src/runtime/contrib/mrvl/mrvl_sw_runtime_lib.cc
2525
)
2626
list(APPEND RUNTIME_SRCS ${RUNTIME_MRVL_SRCS})
27-
file(GLOB COMPILER_MRVL_SRCS
28-
src/relay/backend/contrib/mrvl/codegen.cc
29-
src/relay/backend/contrib/mrvl/compiler_attr.cc
30-
)
31-
list(APPEND COMPILER_SRCS ${COMPILER_MRVL_SRCS})
3227
endif(USE_MRVL)

cmake/modules/contrib/TensorRT.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ include (FindPackageHandleStandardArgs)
2323

2424
if(USE_TENSORRT_CODEGEN)
2525
message(STATUS "Build with TensorRT codegen")
26-
tvm_file_glob(GLOB COMPILER_TENSORRT_SRCS src/relay/backend/contrib/tensorrt/*.cc src/relax/backend/contrib/tensorrt/*.cc)
26+
tvm_file_glob(GLOB COMPILER_TENSORRT_SRCS src/relax/backend/contrib/tensorrt/*.cc)
2727
set_source_files_properties(${COMPILER_TENSORRT_SRCS} PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations")
2828
tvm_file_glob(GLOB RUNTIME_TENSORRT_SRCS src/runtime/contrib/tensorrt/tensorrt_runtime.cc)
2929
set_source_files_properties(${RUNTIME_TENSORRT_SRCS} PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations")

docs/arch/index.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ Target Translation
114114
The target translation phase transforms an IRModule to the corresponding target executable format.
115115
For backends such as x86 and ARM, we use the LLVM IRBuilder to build in-memory LLVM IR.
116116
We can also generate source-level languages such as CUDA C and OpenCL.
117-
Finally, we support direct translations of a Relay function (sub-graph) to specific targets via external code generators.
117+
Finally, we support direct translations of a Relax function (sub-graph) to specific targets via external code generators.
118118
It is important that the final code generation phase is as lightweight as possible. Vast majority of transformations
119119
and lowering should be performed before the target translation phase.
120120

@@ -175,8 +175,8 @@ In summary, the key data structures in the compilation flows are:
175175

176176
Most parts of the compilation are transformations among the key data structures.
177177

178-
- relay/transform and tir/transform are determinstic rule-based transformations
179-
- auto_scheduler and autotvm contains the search-based transformations
178+
- relax/transform and tir/transform are determinstic rule-based transformations
179+
- meta-schedule contains the search-based transformations
180180

181181
Finally, the compilation flow example is only a typical use-case of the TVM stack.
182182
We expose these key data structures and transformations to python and C++ APIs. As a result, you can use TVM just like the way you use numpy,
@@ -247,7 +247,7 @@ The ability to save/store, and inspect an IR node provides a foundation for maki
247247
tvm/ir
248248
------
249249
The `tvm/ir` folder contains the unified data structure and interfaces across for all IR function variants.
250-
The components in `tvm/ir` are shared by `tvm/relay` and `tvm/tir`, notable ones include
250+
The components in `tvm/ir` are shared by `tvm/relax` and `tvm/tir`, notable ones include
251251

252252
- IRModule
253253
- Type

docs/arch/pass_infra.rst

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
Pass Infrastructure
2121
===================
2222

23-
Both Relay and TVM IR contain a series of optimization passes which improve performance metrics
23+
Both Relax and TVM IR contain a series of optimization passes which improve performance metrics
2424
of models such as mean inference, memory footprint, or power consumption for
2525
specific devices. There is a suite of standard optimizations as well as machine
2626
learning-specific optimizations including constant folding, dead code
@@ -31,7 +31,7 @@ transformation using the analysis result collected during and/or before traversa
3131
However, as TVM evolves quickly, the need for a more systematic and efficient
3232
way to manage these passes is becoming apparent. In addition, a generic
3333
framework that manages the passes across different layers of the TVM stack (e.g.
34-
Relay and tir) paves the way for developers to quickly prototype and plug the
34+
Relax and tir) paves the way for developers to quickly prototype and plug the
3535
implemented passes into the system.
3636

3737
This doc describes the design of such an infra that takes the advantage of the
@@ -51,7 +51,7 @@ scheme through `Sequential`_ and `Block`_, respectively. With such constructs,
5151
these modern frameworks are able to conveniently add modules/layers to their
5252
containers and build up neural networks easily.
5353

54-
The design of the Relay pass infra is largely inspired by the hierarchical
54+
The design of the TVM pass infra is largely inspired by the hierarchical
5555
pass manager used in LLVM and the block-style containers used in the popular
5656
deep learning frameworks. The major goals of the pass infra include:
5757

@@ -170,7 +170,7 @@ Pass Constructs
170170
^^^^^^^^^^^^^^^
171171

172172
The pass infra is designed in a hierarchical manner, and it could work at
173-
different granularities of Relay/tir programs. A pure virtual class ``PassNode`` is
173+
different granularities of Relax/tir programs. A pure virtual class ``PassNode`` is
174174
introduced to serve as the base of the different optimization passes. This class
175175
contains several virtual methods that must be implemented by the
176176
subclasses at the level of modules, functions, or sequences of passes.
@@ -200,7 +200,7 @@ Module-Level Passes
200200

201201
Module level passes are geared mainly for global and inter-procedural
202202
optimizations (IPO), which are similar to the module pass used in LLVM. Some
203-
typical passes in Relay that need the global picture of a module, such as
203+
typical passes in Relax that need the global picture of a module, such as
204204
A-normal form conversion and lambda lifting, etc., fall into this set. At this
205205
level, users can even add and/or delete functions in a module. Note that all
206206
passes
@@ -226,13 +226,13 @@ Function-Level Passes
226226
^^^^^^^^^^^^^^^^^^^^^
227227

228228
Function-level passes are used to implement various intra-function level
229-
optimizations for a given Relay/tir module. It fetches one function at a time from
230-
the function list of a module for optimization and yields a rewritten Relay
229+
optimizations for a given Relax/tir module. It fetches one function at a time from
230+
the function list of a module for optimization and yields a rewritten Relax
231231
``Function`` or tir ``PrimFunc``. Most of passes can be classified into this category, such as
232-
common subexpression elimination and inference simplification in Relay as well as vectorization
232+
common subexpression elimination and inference simplification in Relax as well as vectorization
233233
and flattening storage in tir, etc.
234234

235-
Note that the scope of passes at this level is either a Relay function or a tir primitive function.
235+
Note that the scope of passes at this level is either a Relax function or a tir primitive function.
236236
Therefore, we cannot add or delete a function through these passes as they are not aware of
237237
the global information.
238238

include/tvm/ir/transform.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,18 +32,18 @@
3232
* - Reducing the effort required to implement new passes for compiler
3333
* developers, etc.
3434
*
35-
* Similar to LLVM's pass manager, we designed the Relay/Relax pass manager to work
35+
* Similar to LLVM's pass manager, we designed the Relax pass manager to work
3636
* different granularity, i.e. module level, function level, and even sequential
3737
* passe that contains a host of passes.
3838
*
3939
* However, we also extend the functionality of the traditional pass manager
4040
* with the consideration of requirements/convention from deep learning
41-
* frameworks, such as Pytorch and Gluon, etc. Each pass in the Relay/Relax pass
41+
* frameworks, such as Pytorch and Gluon, etc. Each pass in the Relax pass
4242
* manager performs the IRModule -> IRModule transformation. All
4343
* different types of passes, including the sequential-level pass object, are
4444
* essentially pass objects. This design, therefore, effectively provides users
4545
* a consistent and convenient interface, i.e. Pass, to play with. It offers a
46-
* means to ease the development and testing of Relay/Relax passes. For example, with
46+
* means to ease the development and testing of Relax passes. For example, with
4747
* the pass manager, external users will be able to have custom passes correctly
4848
* scheduled without having to modify a single handcrafted pass order.
4949
*
@@ -387,7 +387,7 @@ class PassInfo : public ObjectRef {
387387
/*!
388388
* \brief PassNode is the base type of differnt types of optimization passes.
389389
* It is designed as a pure class and implemented by different pass subclasses
390-
* at different granularity of Relay/Relax nodes.
390+
* at different granularity of Relax nodes.
391391
*/
392392
class PassNode : public Object {
393393
public:
@@ -460,7 +460,7 @@ class Pass : public ObjectRef {
460460
};
461461

462462
/*!
463-
* \brief The SequentialNode contains a set of passes that transform Relay/Relax
463+
* \brief The SequentialNode contains a set of passes that transform Relax
464464
* programs from one AST to another semantically equivalent one.
465465
*
466466
* One example of this level of pass is that the pass manager needs to correctly

include/tvm/ir/type.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
* \file tvm/ir/type.h
2222
* \brief IR/AST nodes for the unified type system in TVM.
2323
*
24-
* We use Relay's type system as the unified type system
24+
* We use TVM's type system as the unified type system
2525
* throughout the stack.
2626
*
2727
* This file contains types that are common across IR variants.
@@ -62,7 +62,7 @@ namespace tvm {
6262
/*!
6363
* \brief Type is the base type of all types.
6464
*
65-
* Relay's type system contains following subclasses:
65+
* TVM's type system contains following subclasses:
6666
*
6767
* - PrimType: type of primitive type values used in the low-level IR.
6868
* - FuncType: type of a function.

include/tvm/meta_schedule/builder.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ class BuilderInputNode : public runtime::Object {
4040
IRModule mod;
4141
/*! \brief The target to be built for. */
4242
Target target;
43-
/*! \brief Parameters for Relay build module. */
43+
/*! \brief Parameters for Relax build module. */
4444
Optional<Map<String, runtime::NDArray>> params;
4545

4646
void VisitAttrs(tvm::AttrVisitor* v) {
@@ -63,7 +63,7 @@ class BuilderInput : public runtime::ObjectRef {
6363
* \brief Constructor of BuilderInput.
6464
* \param mod The IRModule to be built.
6565
* \param target The target to be built for.
66-
* \param params Parameters for Relay build module.
66+
* \param params Parameters for Relax build module.
6767
*/
6868
TVM_DLL explicit BuilderInput(IRModule mod, Target target,
6969
Optional<Map<String, runtime::NDArray>> params = NullOpt);

include/tvm/target/virtual_device.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ constexpr int kInvalidDeviceType = -1;
5858
* \brief Describes at compile time the constraints on where data is to be stored at runtime
5959
* down to the (virtual) device and memory scope level, and how to compile code to compute that
6060
* data. Used by the \p PlanDevices pass to collect and solve (virtual) device constraints for
61-
* the whole Relay program.
61+
* the whole Relax program.
6262
*
6363
* Is a quadruple of:
6464
* - A \p device_type (\p DLDeviceType). May be \p kInvalidDeviceType if unconstrained.
@@ -79,7 +79,7 @@ constexpr int kInvalidDeviceType = -1;
7979
* then the function body is also executed on that device.
8080
*
8181
*
82-
* By 'execution' we include both (fused) primitive operators, and all the Relay expressions
82+
* By 'execution' we include both (fused) primitive operators, and all the Relax expressions
8383
* surrounding them which coordinates data and control flow. Again, typically non-primitive
8484
* operators must be executed on a 'CPU'-like device with good support for control flow.
8585
*

include/tvm/topi/transform.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -775,7 +775,7 @@ inline Tensor dynamic_strided_slice(const Tensor& x, const Array<PrimExpr>& begi
775775

776776
arith::Analyzer analyzer;
777777
for (size_t i = 0; i < num_slice_axes; ++i) {
778-
// Check ProducerLoad to keep backward compatibility for Relay.
778+
// Check ProducerLoad to keep backward compatibility for Relax.
779779
if (!begin[i]->IsInstance<ProducerLoadNode>() && !end[i]->IsInstance<ProducerLoadNode>() &&
780780
!strides[i]->IsInstance<ProducerLoadNode>()) {
781781
out_shape.push_back(
@@ -840,7 +840,7 @@ inline te::Tensor dynamic_strided_slice(const te::Tensor& x, const te::Tensor& b
840840
}
841841

842842
/*!
843-
* \brief Calculate the output shape of strided_slice, the entry point for Relay type relation
843+
* \brief Calculate the output shape of strided_slice, the entry point for Relax type relation
844844
*
845845
* \param ishape The input tensor shape
846846
* \param begin The indices to begin with in the slicing

python/gen_requirements.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,9 @@
2626
TVM can be roughly broken into these named pieces along the lines of Python dependencies:
2727
2828
- "core": A core piece, which is intended to be buildable with very few external dependencies. Users
29-
can use Relay, compile models, and run autotuning with this part.
29+
can use Relax, compile models, and run autotuning with this part.
3030
- "importer-<tool>": Model importers, which convert models defined in various other tools (i.e.
31-
TensorFlow, PyTorch, etc) into Relay models.
31+
TensorFlow, PyTorch, etc) into Relax models.
3232
- Extra features (i.e. XGBoost in AutoTVM). These enhance TVM's functionality, but aren't required
3333
for basic operation.
3434
@@ -74,7 +74,7 @@
7474
],
7575
),
7676
),
77-
# Relay frontends.
77+
# frontends.
7878
("importer-coreml", ("Requirements for the CoreML importer", ["coremltools"])),
7979
(
8080
"importer-keras",

python/setup.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -206,11 +206,6 @@ def is_pure(self):
206206
setup_kwargs = {"include_package_data": True}
207207

208208

209-
def get_package_data_files():
210-
# Relay standard libraries
211-
return []
212-
213-
214209
def long_description_contents():
215210
with open(pathlib.Path(CURRENT_DIR).resolve().parent / "README.md", encoding="utf-8") as readme:
216211
description = readme.read()
@@ -255,7 +250,6 @@ def long_description_contents():
255250
extras_require=extras_require,
256251
packages=find_packages(),
257252
package_dir={"tvm": "tvm"},
258-
package_data={"tvm": get_package_data_files()},
259253
distclass=BinaryDistribution,
260254
ext_modules=config_cython(),
261255
**setup_kwargs,

python/tvm/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
# Contrib initializers
6767
from .contrib import rocm as _rocm, nvcc as _nvcc
6868

69-
# Relay and Relax contain modules that are only available in compiler package
69+
# Relax contain modules that are only available in compiler package
7070
# Do not import them if TVM is built with runtime only
7171
if not _RUNTIME_ONLY:
7272
from . import relax

python/tvm/contrib/cutlass/build.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# specific language governing permissions and limitations
1616
# under the License.
1717
# pylint: disable=invalid-name, dangerous-default-value, arguments-differ
18-
"""Driver for partitioning and building a Relay module for CUTLASS offload."""
18+
"""Driver for partitioning and building a Relax module for CUTLASS offload."""
1919
import itertools
2020
import logging
2121
import multiprocessing
@@ -298,7 +298,7 @@ def tune_cutlass_kernels(
298298
Parameters
299299
----------
300300
mod : IRModule
301-
The Relay module with cutlass partitions.
301+
The IRModule with cutlass partitions.
302302
303303
sm : int
304304
An integer specifying the compute capability. For example, 75 for Turing and

python/tvm/contrib/mrvl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ def get_nodes_json_string(graph_json):
208208

209209
@tvm._ffi.register_func("tvm.mrvl.ModifyConstNames")
210210
def modify_const_names(nodes_json_str, consts_json_str):
211-
"""This takes the graph module returned by relay.build an generates nodes and constant
211+
"""This takes the graph module returned by build an generates nodes and constant
212212
meta data suitable for compilation by the back end.
213213
214214
Parameters

0 commit comments

Comments
 (0)