diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 7f19dee7568..4622723a72b 100755 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -1,25 +1,24 @@ - project(migraphx-doc) find_package(ROCM REQUIRED) include(ROCMDoxygenDoc) -set(DOXYGEN_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen/) +set(DOXYGEN_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen) rocm_add_doxygen_doc( OUTPUT_DIRECTORY ${DOXYGEN_OUTPUT} INPUT - ${PROJECT_SOURCE_DIR}/src + ${CMAKE_SOURCE_DIR}/src INCLUDE_PATH - ${PROJECT_SOURCE_DIR}/src/include - ${PROJECT_SOURCE_DIR}/src/targets/cpu/include - ${PROJECT_SOURCE_DIR}/src/targets/gpu/include + ${CMAKE_SOURCE_DIR}/src/include + ${CMAKE_SOURCE_DIR}/src/targets/cpu/include + ${CMAKE_SOURCE_DIR}/src/targets/gpu/include STRIP_FROM_INC_PATH - ${PROJECT_SOURCE_DIR}/src/include - ${PROJECT_SOURCE_DIR}/src/targets/cpu/include - ${PROJECT_SOURCE_DIR}/src/targets/gpu/include + ${CMAKE_SOURCE_DIR}/src/include + ${CMAKE_SOURCE_DIR}/src/targets/cpu/include + ${CMAKE_SOURCE_DIR}/src/targets/gpu/include EXCLUDE_PATTERNS - ${PROJECT_SOURCE_DIR}/src/targets/gpu/kernels - ${PROJECT_SOURCE_DIR}/src/targets/gpu/device + ${CMAKE_SOURCE_DIR}/src/targets/gpu/kernels + ${CMAKE_SOURCE_DIR}/src/targets/gpu/device SEARCH_INCLUDES YES MACRO_EXPANSION YES RECURSIVE YES @@ -39,13 +38,14 @@ rocm_add_doxygen_doc( EXTRACT_ALL YES ENUM_VALUES_PER_LINE 1 FULL_PATH_NAMES YES + WARN_LOGFILE "${DOXYGEN_OUTPUT}/DoxygenWarningLog.txt" PREDEFINED DOXYGEN ) include(ROCMSphinxDoc) rocm_add_sphinx_doc(src BUILDER html - OUTPUT_DIR html + OUTPUT_DIR html VARS breathe_projects.proj=${DOXYGEN_OUTPUT}/xml breathe_default_project=proj @@ -63,6 +63,6 @@ if(LATEX_FOUND) DEPENDS doxygen ) else() - message("Latex builder not found. Latex builder is required only for building the PDF documentation for MIGraph and is not necessary for building the library, or any other components. To build PDF documentation run make in ${CMAKE_CURRENT_SOURCE_DIR}/pdf, once a latex builder is installed.") + message("Latex builder not found. Latex builder is required only for building the PDF documentation for MIGraphX and is not necessary for building the library, or any other components. To build PDF documentation run make in ${CMAKE_CURRENT_SOURCE_DIR}/pdf, once a latex builder is installed.") endif() diff --git a/doc/src/developer_guide.rst b/doc/src/contributor_guide.rst similarity index 85% rename from doc/src/developer_guide.rst rename to doc/src/contributor_guide.rst index 81dae43aa01..c863b880033 100755 --- a/doc/src/developer_guide.rst +++ b/doc/src/contributor_guide.rst @@ -1,11 +1,11 @@ -Developer Guide +Contributor Guide =============== .. toctree:: :maxdepth: 2 :caption: Contents: - overview + dev_intro dev/data dev/operators dev/program diff --git a/doc/src/dev_intro.rst b/doc/src/dev_intro.rst new file mode 100644 index 00000000000..2b78c303cee --- /dev/null +++ b/doc/src/dev_intro.rst @@ -0,0 +1,152 @@ +MIGraphX Fundamentals +====================== + +MIGraphX provides an optimized execution engine for deep learning neural networks. +We will cover some simple operations in the MIGraphX framework here. +For a quick start guide to using MIGraphX, look in the examples directory: ``https://github.com/ROCmSoftwarePlatform/AMDMIGraphX/tree/develop/examples/migraphx``. + + +Location of the Examples +------------------------- + +The ``ref_dev_examples.cpp`` can be found in the test directory (``/test``). +The executable file ``test_ref_dev_examples`` based on this file will be created in the ``bin/`` of the build directory after running ``make -j$(nproc) test_ref_dev_examples``. +The executable will also be created when running ``make -j$(nproc) check``, alongside with all the other tests. +Directions for building MIGraphX from source can be found in the main README file: ``https://github.com/ROCmSoftwarePlatform/AMDMIGraphX#readme``. + + +Adding Two Literals +-------------------- + +A program is a collection of modules, which are collections of instructions to be executed when calling `eval `. +Each instruction has an associated `operation ` which represents the computation to be performed by the instruction. + +We start with a snippet of the simple ``add_two_literals()`` function:: + + // create the program and get a pointer to the main module + migraphx::program p; + auto* mm = p.get_main_module(); + + // add two literals to the program + auto one = mm->add_literal(1); + auto two = mm->add_literal(2); + + // make the add operation between the two literals and add it to the program + mm->add_instruction(migraphx::make_op("add"), one, two); + + // compile the program on the reference device + p.compile(migraphx::ref::target{}); + + // evaulate the program and retreive the result + auto result = p.eval({}).back(); + std::cout << "add_two_literals: 1 + 2 = " << result << "\n"; + +We start by creating a simple ``migraphx::program`` object and then getting a pointer to the main module of it. +The program is a collection of ``modules`` that start executing from the main module, so instructions are added to the modules rather than directly onto the program object. +We then use the `add_literal ` function to add an instruction that stores the literal number ``1`` while returning an `instruction_ref `. +The returned `instruction_ref ` can be used in another instruction as an input. +We use the same `add_literal ` function to add a ``2`` to the program. +After creating the literals, we then create the instruction to add the numbers together. +This is done by using the `add_instruction ` function with the ``"add"`` `operation ` created by `make_op ` along with the previous `add_literal` `instruction_ref ` for the input arguments of the instruction. +Finally, we can run this `program ` by compiling it for the reference target (CPU) and then running it with `eval ` +The result is then retreived and printed to the console. + +We can compile the program for the GPU as well, but the file will have to be moved to the ``test/gpu/`` directory and the correct target must be included:: + + #include + + +Using Parameters +----------------- + +The previous program will always produce the same value of adding ``1`` and ``2``. +In the next program we want to pass an input to a program and compute a value based on the input. +We can modify the program to take an input parameter ``x``, as seen in the ``add_parameter()`` function:: + + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::int32_type, {1}}; + + // add a "x" parameter with the shape s + auto x = mm->add_parameter("x", s); + auto two = mm->add_literal(2); + + // add the "add" instruction between the "x" parameter and "two" to the module + mm->add_instruction(migraphx::make_op("add"), x, two); + p.compile(migraphx::ref::target{}); + +This adds a parameter of type ``int32``, and compiles it for the CPU. +To run the program, we need to pass the parameter as a ``parameter_map`` when we call `eval `. +We create the ``parameter_map`` by setting the ``x`` key to an `argument ` object with an ``int`` data type:: + + // create a parameter_map object for passing a value to the "x" parameter + std::vector data = {4}; + migraphx::parameter_map params; + params["x"] = migraphx::argument(s, data.data()); + + auto result = p.eval(params).back(); + std::cout << "add_parameters: 4 + 2 = " << result << "\n"; + EXPECT(result.at() == 6); + + +Handling Tensor Data +--------------------- + +In the previous examples we have only been dealing with scalars, but the `shape ` class can describe multi-dimensional tensors. +For example, we can compute a simple convolution:: + + migraphx::program p; + auto* mm = p.get_main_module(); + + // create shape objects for the input tensor and weights + migraphx::shape input_shape{migraphx::shape::float_type, {2, 3, 4, 4}}; + migraphx::shape weights_shape{migraphx::shape::float_type, {3, 3, 3, 3}}; + + // create the parameters and add the "convolution" operation to the module + auto input = mm->add_parameter("X", input_shape); + auto weights = mm->add_parameter("W", weights_shape); + mm->add_instruction(migraphx::make_op("convolution", {{"padding", {1, 1}}, {"stride", {2, 2}}}), input, weights); + +Here we create two parameters for both the ``input`` and ``weights``. +In the previous examples, we created simple literals, however, most programs will take data from allocated buffers (usually on the GPU). +In this case, we can create `argument ` objects directly from the pointers to the buffers:: + + // Compile the program + p.compile(migraphx::ref::target{}); + + // Allocated buffers by the user + std::vector a = ...; + std::vector c = ...; + + // Solution vector + std::vector sol = ...; + + // Create the arguments in a parameter_map + migraphx::parameter_map params; + params["X"] = migraphx::argument(input_shape, a.data()); + params["W"] = migraphx::argument(weights_shape, c.data()); + + // Evaluate and confirm the result + auto result = p.eval(params).back(); + std::vector results_vector(64); + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + + EXPECT(migraphx::verify_range(results_vector, sol)); + +An `argument ` can handle memory buffers from either the GPU or the CPU. +By default when running the `program `, buffers are allocated on the corresponding target. +When compiling for the CPU, the buffers by default will be allocated on the CPU. +When compiling for the GPU, the buffers by default will be allocated on the GPU. +With the option ``offloaf_copy=true`` set while compiling for the GPU, the buffers will be located on the CPU. + + +Importing From ONNX +-------------------- + +A `program ` can be built directly from an onnx file using the MIGraphX ONNX parser. +This makes it easier to use neural networks directly from other frameworks. +In this case, there is an ``parse_onnx`` function:: + + program p = migraphx::parse_onnx("model.onnx"); + p.compile(migraphx::gpu::target{}); + diff --git a/doc/src/index.rst b/doc/src/index.rst index 199fee9db73..781e7bd157a 100755 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -13,7 +13,7 @@ Welcome to AMD MIGraphX's documentation! py_user_guide cpp_user_guide driver - developer_guide + contributor_guide Indices and tables diff --git a/doc/src/overview.rst b/doc/src/overview.rst deleted file mode 100644 index f5a3f7c1708..00000000000 --- a/doc/src/overview.rst +++ /dev/null @@ -1,89 +0,0 @@ -Overview -======== - -MIGraphX provides an optimized execution engine for deep learning neural networks. - -Building a program ------------------- - -A program consists of a set of instructions to be executed when calling `eval `. Each instruction has an associated `operation ` which represents the computation to be performed by the instruction. - -We can start by building a simple program to add two numbers together:: - - program p; - instruction_ref one = p.add_literal(1); - instruction_ref two = p.add_literal(2); - p.add_instruction(add{}, one, two); - -The `add_literal ` function will add an instruction to the program to store a literal number. The `instruction_ref ` is a reference to the instruction in the program, which can be used to compose the output of the instruction with another instruction. - -After creating the literals, we then create the instruction to add the numbers together. This is done by using the `add{} ` operation class along with the `instruction_ref ` for the input arguments of the instruction. - -Finally, we can run this `program ` by compiling it for the cpu and then running it with `eval `:: - - p.compile(cpu::target{}); - argument result = p.eval({}); - -The easiest way to see the result is to print it:: - - std::cout << result; - -Which will print ``3``. - -We can also compile the program for the gpu as well. - -Adding parameters ------------------ - -Of course, this program will always produce the same value which is quite uninteresting. Instead, we want to pass an input to a program and compute a value based on the input. This can be done with a parameter. For example, we can modify the program to take an input ``x``:: - - program p; - instruction_ref x = p.add_parameter("x", {shape::int64_type}); - instruction_ref two = p.add_literal(2); - p.add_instruction(add{}, x, two); - p.compile(cpu::target{}); - -This adds a parameter of type ``int64``, and compiles it for the ``cpu``. To run the program, we need to pass the parameter to it when we call `eval `:: - - argument result = p.eval({ - {"x", literal{1}.get_argument()} - }); - std::cout << result; - -This will print ``3``. - -A parameter is given as an `argument `. In this case, the simplest way of creating an `argument ` is from a `literal `. - -Tensor data ------------ - -In this example we are just creating numbers, but the `shape ` class can describe multi-dimensional tensors. For example, we can build a simple network with convolution and relu:: - - program p; - instruction_ref input = p.add_parameter("x", shape{shape::float_type, {1, 3, 32, 32}}); - instruction_ref weights = p.add_parameter("w", shape{shape::float_type, {1, 3, 5, 5}}); - instruction_ref conv = p.add_instruction(convolution{}, input, weights); - p.add_instruction(activation{"relu"}, conv); - -Here we create two parameters for both the ``input`` and ``weights``. In the previous examples, we just created simple literals, however, most programs will take data from already allocated buffers(usually on the GPU). In this case, we can create `argument ` objects directly from the pointers to the buffers:: - - // Compile the program - p.compile(gpu::target{}); - // Allocated buffers by the user - float* input = ...; - float* weights = ...; - // Create the arguments - argument input_arg{shape{shape::float_type, {1, 3, 32, 32}}, input}; - argument weights_arg{shape{shape::float_type, {1, 3, 32, 32}}, weights}; - p.eval({{"x", input_arg}, {"w", weights_arg}}) - -An `argument ` can handle memory buffers from either the GPU or the CPU, but when running the `program `, buffers should be allocated for the corresponding target. That is, when compiling for the CPU, the buffers should be allocated on the CPU, and when compiling for the GPU the buffers should be allocated on the GPU. - -Importing from onnx -------------------- - -A `program ` can be built directly from an onnx file, which makes it easier to use neural networks directly from other frameworks. In this case, there is an ``parse_onnx`` function:: - - program p = parse_onnx("model.onnx"); - p.compile(gpu::target{}); - diff --git a/test/ref_dev_examples.cpp b/test/ref_dev_examples.cpp new file mode 100644 index 00000000000..d6d4a4a6890 --- /dev/null +++ b/test/ref_dev_examples.cpp @@ -0,0 +1,151 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test.hpp" + +/*! + * Example MIGraphX programs for following the Contributor's Guide. + */ + +TEST_CASE(add_two_literals) +{ + /*! + * Simple MIGraphX program to add two literal values. + * Equivalent to adding two constant scalar values together. + */ + // create the program a get a pointer to the main module + migraphx::program p; + auto* mm = p.get_main_module(); + + // add two literals to the program + auto one = mm->add_literal(1); + auto two = mm->add_literal(2); + + // make the "add" operation between the two literals and add it to the program + mm->add_instruction(migraphx::make_op("add"), one, two); + + // compile the program on the reference device + p.compile(migraphx::ref::target{}); + + // evaulate the program and retreive the result + auto result = p.eval({}).back(); + std::cout << "add_two_literals: 1 + 2 = " << result << "\n"; + EXPECT(result.at() == 3); +} + +TEST_CASE(add_parameters) +{ + /*! + * Modified version of MIGraphX program seen in add_two_literals to accept a parameter. + * Equivalent to adding a constant scalar value with another scalar input. + */ + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::int32_type, {1}}; + + // add a "x" parameter with the shape s + auto x = mm->add_parameter("x", s); + auto two = mm->add_literal(2); + + // add the "add" instruction between the "x" parameter and "two" to the module + mm->add_instruction(migraphx::make_op("add"), x, two); + p.compile(migraphx::ref::target{}); + + // create a parameter_map object for passing a value to the "x" parameter + std::vector data = {4}; + migraphx::parameter_map params; + params["x"] = migraphx::argument(s, data.data()); + + auto result = p.eval(params).back(); + std::cout << "add_parameters: 4 + 2 = " << result << "\n"; + EXPECT(result.at() == 6); +} + +TEST_CASE(handling_tensors) +{ + /*! + * This example does a convolution operation over an input tensor using the given weighting + * tensor. This is meant to show an example of working with tensors in MIGraphX. The output + * tensor is compared against a precomputed solution tensor at the end of the program. + */ + migraphx::program p; + auto* mm = p.get_main_module(); + + // create shape objects for the input tensor and weights + migraphx::shape input_shape{migraphx::shape::float_type, {2, 3, 4, 4}}; + migraphx::shape weights_shape{migraphx::shape::float_type, {2, 3, 3, 3}}; + + // create the parameters and add the "convolution" operation to the module + auto input = mm->add_parameter("X", input_shape); + auto weights = mm->add_parameter("W", weights_shape); + mm->add_instruction(migraphx::make_op("convolution", {{"padding", {1, 1}}, {"stride", {2, 2}}}), + input, + weights); + + p.compile(migraphx::ref::target{}); + + // Allocated buffers by the user + std::vector a = { + 2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712, + -0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606, + 0.02345525, -0.33579525, 0.38901961, 1.05473483, -1.31188095, 1.8963089, -0.07265259, + 0.947339, 0.41949373, -0.70814759, 0.25892952, 1.07311416, 1.2571274, -0.62318051, + -0.19951548, -0.94232577, -0.29393643, 0.42292568, -0.80230367, 1.40909171, 0.63617158, + 0.13900366, 1.09253144, -0.15265895, 1.54781747, 0.72780299, 1.09189606, -0.38068101, + 0.97057933, -0.58958799, 1.56188643, 0.21474874, 0.58725154, -1.27097559, -0.03024297, + 1.09437096, -0.4897908, 0.34838957, -1.31042492, -1.69069934, 0.86956722, -0.40457946, + 0.46691212, 1.29273605, 0.26464137, 0.22073045, -1.02178168, 0.22163901, -1.84387338, + 0.75522131, -0.45775682, -0.42241111, -1.50944722, 1.07256448, -1.95876884, -0.28106022, + 0.3341668, 2.13129425, -1.14728117, -1.06555498, -0.298444, -0.88322699, -0.65866792, + -2.06007552, 0.01374334, 0.45612028, 0.52715492, 1.01914406, -1.72659791, 0.80650896, + 0.16860051, 2.24112225, -0.78620857, 0.36566174, -0.07020134, -0.47976932, -0.68230027, + -0.94711417, -0.54506505, 1.66504931, -0.71860826, 0.61132306}; + + std::vector c = { + -0.14601797, -0.13000923, 0.06521662, 0.06178288, -0.11083675, 0.10154136, 0.09990512, + 0.06030385, -0.11374587, -0.17523311, -0.14344215, 0.17802463, 0.06300922, -0.15325832, + 0.07066704, 0.05166031, 0.00615084, -0.02606523, 0.08083995, -0.17913306, 0.0624622, + 0.0735731, -0.04198661, -0.0164391, -0.06374192, 0.16569914, 0.10681538, 0.07370754, + 0.02802075, 0.00282027, 0.15104802, -0.11084409, -0.00197773, 0.07924436, 0.03528272, + 0.04765259, -0.15896152, 0.07917164, 0.12125669, -0.1154705, -0.11999125, 0.12749968, + -0.06269585, 0.18658121, -0.03944227, 0.0111798, -0.17731084, 0.11789055, -0.09982193, + 0.08142821, 0.0729029, 0.11303909, 0.12735154, 0.03885292}; + + // Solution vector + std::vector sol = {-0.20817225, + 0.87965256, + 0.14958936, + -1.24887264, + -0.06540672, + 0.20778663, + 0.40456355, + -0.99900877, + 0.4917807, + 0.1994698, + 0.64205718, + 0.37798831, + -0.25315839, + 0.44276932, + -0.16138598, + 0.79344082}; + + // Create the arguments in a parameter_map + migraphx::parameter_map params; + params["X"] = migraphx::argument(input_shape, a.data()); + params["W"] = migraphx::argument(weights_shape, c.data()); + + // Evaluate and confirm the result + auto result = p.eval(params).back(); + std::vector results_vector(64); + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + + EXPECT(migraphx::verify_range(results_vector, sol)); +} + +int main(int argc, const char* argv[]) { test::run(argc, argv); }