From 044539b4e3684690000eac6340fcb88e96c26700 Mon Sep 17 00:00:00 2001 From: Alex McCaskey Date: Fri, 8 Nov 2024 18:20:19 +0000 Subject: [PATCH] Initial commit. Hello world! Signed-off-by: boschmitt <7152025+boschmitt@users.noreply.github.com> --- .clang-format | 11 + .cudaq_version | 11 + .github/actions/build-lib/action.yaml | 80 ++ .github/actions/build-lib/build_all.sh | 15 + .github/actions/build-lib/build_qec.sh | 14 + .github/actions/build-lib/build_solvers.sh | 14 + .github/actions/get-cudaq-build/action.yaml | 168 +++ .../actions/get-cudaq-build/build_cudaq.sh | 75 ++ .github/actions/get-cudaq-build/get_assets.py | 57 + .github/actions/get-cudaq-version/action.yaml | 30 + .github/copy-pr-bot.yaml | 1 + .github/workflows/all_libs.yaml | 81 ++ .github/workflows/build_wheels.yaml | 51 + .github/workflows/cudaq_cache.yaml | 47 + .github/workflows/docs.yaml | 104 ++ .github/workflows/lib_qec.yaml | 77 ++ .github/workflows/lib_solvers.yaml | 81 ++ .github/workflows/pr_cache_cleanup.yaml | 31 + .github/workflows/pr_workflow.yaml | 129 ++ .github/workflows/scripts/build_cudaq.sh | 107 ++ .github/workflows/scripts/build_wheels.sh | 127 ++ .github/workflows/scripts/install_git_cli.sh | 18 + .gitignore | 98 ++ CMakeLists.txt | 106 ++ Contributing.md | 95 ++ Contributor_License_Agreement.md | 70 + LICENSE | 16 + NOTICE | 70 + README.md | 32 + cmake/Modules/CUDA-QX.cmake | 167 +++ cmake/Modules/FindSphinx.cmake | 14 + docker/build_env/Dockerfile | 22 + docker/release/Dockerfile | 58 + docker/release/Dockerfile.wheel | 98 ++ docs/CMakeLists.txt | 110 ++ docs/Doxyfile.in | 56 + docs/README.md | 67 + docs/sphinx/_static/cuda_quantum_icon.svg | 553 ++++++++ docs/sphinx/_static/cudaq_override.css | 27 + docs/sphinx/_templates/autosummary/class.rst | 26 + .../_templates/autosummary/dataclass.rst | 10 + docs/sphinx/_templates/layout.html | 62 + docs/sphinx/_templates/openapi.html | 17 + docs/sphinx/api/core/cpp_api.rst | 44 + docs/sphinx/api/qec/cpp_api.rst | 34 + docs/sphinx/api/qec/python_api.rst | 18 + docs/sphinx/api/solvers/cpp_api.rst | 77 ++ docs/sphinx/api/solvers/python_api.rst | 30 + docs/sphinx/components/qec/introduction.rst | 967 ++++++++++++++ .../components/solvers/introduction.rst | 554 ++++++++ docs/sphinx/conf.py.in | 189 +++ .../examples/qec/cpp/circuit_level_noise.cpp | 126 ++ .../examples/qec/cpp/code_capacity_noise.cpp | 105 ++ .../qec/python/circuit_level_noise.py | 82 ++ .../qec/python/code_capacity_noise.py | 56 + .../examples/qec/python/pseudo_threshold.py | 60 + docs/sphinx/examples/solvers/cpp/adapt_h2.cpp | 39 + .../solvers/cpp/molecular_docking_qaoa.cpp | 60 + .../sphinx/examples/solvers/cpp/uccsd_vqe.cpp | 53 + .../examples/solvers/python/adapt_h2.py | 56 + .../python/generate_molecular_hamiltonians.py | 85 ++ .../solvers/python/molecular_docking_qaoa.py | 51 + .../examples/solvers/python/uccsd_vqe.py | 45 + .../examples_rst/qec/circuit_level_noise.rst | 77 ++ .../examples_rst/qec/code_capacity_noise.rst | 84 ++ docs/sphinx/examples_rst/qec/examples.rst | 11 + docs/sphinx/examples_rst/solvers/adapt.rst | 39 + docs/sphinx/examples_rst/solvers/examples.rst | 13 + .../solvers/molecular_hamiltonians.rst | 103 ++ docs/sphinx/examples_rst/solvers/qaoa.rst | 60 + docs/sphinx/examples_rst/solvers/vqe.rst | 66 + docs/sphinx/index.rst | 66 + docs/sphinx/quickstart/installation.rst | 136 ++ examples | 1 + libs/core/CMakeLists.txt | 73 ++ .../include/cuda-qx/core/extension_point.h | 201 +++ libs/core/include/cuda-qx/core/graph.h | 122 ++ .../include/cuda-qx/core/heterogeneous_map.h | 206 +++ libs/core/include/cuda-qx/core/tear_down.h | 35 + libs/core/include/cuda-qx/core/tensor.h | 231 ++++ libs/core/include/cuda-qx/core/tensor_impl.h | 159 +++ libs/core/include/cuda-qx/core/tuple_utils.h | 39 + libs/core/include/cuda-qx/core/type_traits.h | 119 ++ libs/core/lib/CMakeLists.txt | 33 + libs/core/lib/graph.cpp | 215 ++++ libs/core/lib/tear_down.cpp | 27 + libs/core/lib/tensor_impls/xtensor_impl.cpp | 315 +++++ libs/core/unittests/CMakeLists.txt | 37 + libs/core/unittests/test_core.cpp | 1121 +++++++++++++++++ libs/qec/CMakeLists.txt | 113 ++ libs/qec/README.md | 31 + libs/qec/include/cudaq/qec/code.h | 239 ++++ libs/qec/include/cudaq/qec/codes/repetition.h | 77 ++ libs/qec/include/cudaq/qec/codes/steane.h | 134 ++ libs/qec/include/cudaq/qec/decoder.h | 234 ++++ libs/qec/include/cudaq/qec/experiments.h | 103 ++ libs/qec/include/cudaq/qec/noise_model.h | 103 ++ libs/qec/include/cudaq/qec/patch.h | 32 + libs/qec/include/cudaq/qec/stabilizer_utils.h | 30 + libs/qec/lib/CMakeLists.txt | 83 ++ libs/qec/lib/code.cpp | 76 ++ libs/qec/lib/codes/CMakeLists.txt | 15 + libs/qec/lib/codes/repetition.cpp | 52 + libs/qec/lib/codes/repetition_device.cpp | 43 + libs/qec/lib/codes/steane.cpp | 38 + libs/qec/lib/codes/steane_device.cpp | 101 ++ libs/qec/lib/decoder.cpp | 73 ++ libs/qec/lib/decoders/single_error_lut.cpp | 91 ++ libs/qec/lib/device/CMakeLists.txt | 12 + libs/qec/lib/device/memory_circuit.cpp | 120 ++ libs/qec/lib/device/memory_circuit.h | 66 + libs/qec/lib/experiments.cpp | 198 +++ libs/qec/lib/stabilizer_utils.cpp | 135 ++ libs/qec/pyproject.toml | 47 + libs/qec/python/CMakeLists.txt | 87 ++ libs/qec/python/bindings/cudaqx_qec.cpp | 19 + libs/qec/python/bindings/py_code.cpp | 452 +++++++ libs/qec/python/bindings/py_code.h | 15 + libs/qec/python/bindings/py_decoder.cpp | 172 +++ libs/qec/python/bindings/py_decoder.h | 15 + libs/qec/python/bindings/type_casters.h | 82 ++ libs/qec/python/bindings/utils.h | 68 + libs/qec/python/cudaq_qec/__init__.py | 45 + libs/qec/python/cudaq_qec/patch.py | 49 + .../python/cudaq_qec/plugins/codes/example.py | 70 + .../cudaq_qec/plugins/decoders/example.py | 23 + libs/qec/python/tests/test_code.py | 227 ++++ libs/qec/python/tests/test_decoder.py | 98 ++ libs/qec/unittests/CMakeLists.txt | 46 + .../unittests/backend-specific/CMakeLists.txt | 9 + .../backend-specific/stim/CMakeLists.txt | 26 + .../backend-specific/stim/test_qec_stim.cpp | 537 ++++++++ .../qec/unittests/decoders/sample_decoder.cpp | 46 + libs/qec/unittests/test_decoders.cpp | 149 +++ libs/qec/unittests/test_qec.cpp | 584 +++++++++ libs/solvers/CMakeLists.txt | 145 +++ libs/solvers/README.md | 32 + libs/solvers/include/cudaq/solvers/adapt.h | 141 +++ .../cudaq/solvers/adapt/adapt_simulator.h | 49 + .../include/cudaq/solvers/observe_gradient.h | 149 +++ .../observe_gradients/central_difference.h | 37 + .../observe_gradients/forward_difference.h | 34 + .../observe_gradients/parameter_shift.h | 34 + .../solvers/include/cudaq/solvers/operators.h | 14 + .../cudaq/solvers/operators/graph/clique.h | 43 + .../cudaq/solvers/operators/graph/max_cut.h | 48 + .../cudaq/solvers/operators/molecule.h | 203 +++ .../operators/molecule/fermion_compiler.h | 32 + .../fermion_compilers/jordan_wigner.h | 23 + .../molecule/molecule_package_driver.h | 43 + .../cudaq/solvers/operators/operator_pool.h | 45 + .../operator_pools/qaoa_operator_pool.h | 27 + .../operator_pools/spin_complement_gsd.h | 27 + .../operator_pools/uccsd_operator_pool.h | 25 + .../solvers/include/cudaq/solvers/optimizer.h | 129 ++ .../include/cudaq/solvers/optimizers/cobyla.h | 37 + .../include/cudaq/solvers/optimizers/lbfgs.h | 36 + libs/solvers/include/cudaq/solvers/qaoa.h | 160 +++ .../include/cudaq/solvers/qaoa/qaoa_device.h | 51 + .../include/cudaq/solvers/stateprep/uccsd.h | 80 ++ libs/solvers/include/cudaq/solvers/vqe.h | 404 ++++++ libs/solvers/lib/CMakeLists.txt | 91 ++ libs/solvers/lib/adapt/CMakeLists.txt | 11 + libs/solvers/lib/adapt/adapt.cpp | 14 + libs/solvers/lib/adapt/adapt_simulator.cpp | 219 ++++ libs/solvers/lib/adapt/device/CMakeLists.txt | 13 + libs/solvers/lib/adapt/device/adapt.cpp | 27 + libs/solvers/lib/adapt/device/adapt.h | 42 + .../lib/adapt/device/prepare_state.cpp | 12 + libs/solvers/lib/adapt/device/prepare_state.h | 19 + .../observe_gradients/central_difference.cpp | 32 + .../observe_gradients/forward_difference.cpp | 28 + .../observe_gradients/observe_gradient.cpp | 14 + .../lib/observe_gradients/parameter_shift.cpp | 31 + libs/solvers/lib/operators/graph/clique.cpp | 51 + libs/solvers/lib/operators/graph/max_cut.cpp | 45 + .../molecule/drivers/library_utils.h | 74 ++ .../operators/molecule/drivers/process.cpp | 80 ++ .../lib/operators/molecule/drivers/process.h | 33 + .../molecule/drivers/pyscf_driver.cpp | 192 +++ .../fermion_compilers/fermion_compiler.cpp | 10 + .../fermion_compilers/jordan_wigner.cpp | 443 +++++++ .../lib/operators/molecule/molecule.cpp | 144 +++ .../operator_pools/operator_pool.cpp | 11 + .../operator_pools/qaoa_operator_pool.cpp | 94 ++ .../operator_pools/spin_complement_gsd.cpp | 160 +++ .../operator_pools/uccsd_operator_pool.cpp | 122 ++ libs/solvers/lib/optimizers/CMakeLists.txt | 11 + .../lib/optimizers/lbfgs/CMakeLists.txt | 25 + .../lib/optimizers/lbfgs/LBFGSObjective.cpp | 25 + .../lib/optimizers/lbfgs/LBFGSObjective.h | 119 ++ libs/solvers/lib/optimizers/lbfgs/lbfgs.cpp | 25 + libs/solvers/lib/optimizers/optimizer.cpp | 11 + .../lib/optimizers/prima/CMakeLists.txt | 38 + libs/solvers/lib/optimizers/prima/cobyla.cpp | 70 + libs/solvers/lib/qaoa/CMakeLists.txt | 11 + libs/solvers/lib/qaoa/device/CMakeLists.txt | 10 + libs/solvers/lib/qaoa/device/qaoa_device.cpp | 45 + libs/solvers/lib/qaoa/qaoa.cpp | 158 +++ libs/solvers/lib/stateprep/CMakeLists.txt | 12 + libs/solvers/lib/stateprep/uccsd.cpp | 584 +++++++++ libs/solvers/pyproject.toml | 52 + libs/solvers/python/CMakeLists.txt | 90 ++ .../python/bindings/cudaqx_solvers.cpp | 19 + .../python/bindings/solvers/py_optim.cpp | 179 +++ .../python/bindings/solvers/py_optim.h | 15 + .../python/bindings/solvers/py_solvers.cpp | 946 ++++++++++++++ .../python/bindings/solvers/py_solvers.h | 15 + .../python/bindings/utils/kwargs_utils.h | 53 + .../python/bindings/utils/type_casters.h | 82 ++ libs/solvers/python/cudaq_solvers/__init__.py | 10 + .../pyscf/generators/gas_phase_generator.py | 670 ++++++++++ .../molecule/pyscf/hamiltonian_generator.py | 18 + libs/solvers/python/tests/resources/LiH.xyz | 4 + libs/solvers/python/tests/test_adapt.py | 79 ++ libs/solvers/python/tests/test_molecule.py | 181 +++ .../python/tests/test_operator_pools.py | 103 ++ libs/solvers/python/tests/test_optim.py | 34 + libs/solvers/python/tests/test_qaoa.py | 379 ++++++ libs/solvers/python/tests/test_uccsd.py | 143 +++ libs/solvers/python/tests/test_vqe.py | 104 ++ libs/solvers/tools/CMakeLists.txt | 8 + libs/solvers/tools/molecule/CMakeLists.txt | 27 + libs/solvers/tools/molecule/cudaq-pyscf.py | 172 +++ libs/solvers/unittests/CMakeLists.txt | 69 + libs/solvers/unittests/nvqpp/CMakeLists.txt | 11 + libs/solvers/unittests/nvqpp/test_kernels.cpp | 40 + libs/solvers/unittests/nvqpp/test_kernels.h | 9 + libs/solvers/unittests/test_adapt.cpp | 48 + libs/solvers/unittests/test_molecule.cpp | 138 ++ .../solvers/unittests/test_operator_pools.cpp | 188 +++ libs/solvers/unittests/test_optimizers.cpp | 86 ++ libs/solvers/unittests/test_qaoa.cpp | 473 +++++++ libs/solvers/unittests/test_uccsd.cpp | 67 + libs/solvers/unittests/test_vqe.cpp | 93 ++ scripts/build_docs.sh | 95 ++ scripts/build_wheels.sh | 67 + scripts/ci/build_cudaq_wheel.sh | 157 +++ scripts/ci/build_qec_wheel.sh | 48 + scripts/ci/build_solvers_wheel.sh | 51 + scripts/ci/test_wheels.sh | 38 + scripts/run_clang_format.sh | 41 + scripts/test_cudaqx_build.sh | 99 ++ scripts/test_libs_builds.sh | 121 ++ scripts/test_wheels.sh | 53 + .../container/validate_container.sh | 129 ++ scripts/validation/wheel/install_packages.sh | 28 + scripts/validation/wheel/validate_wheels.sh | 112 ++ 248 files changed, 25336 insertions(+) create mode 100644 .clang-format create mode 100644 .cudaq_version create mode 100644 .github/actions/build-lib/action.yaml create mode 100755 .github/actions/build-lib/build_all.sh create mode 100755 .github/actions/build-lib/build_qec.sh create mode 100755 .github/actions/build-lib/build_solvers.sh create mode 100644 .github/actions/get-cudaq-build/action.yaml create mode 100644 .github/actions/get-cudaq-build/build_cudaq.sh create mode 100644 .github/actions/get-cudaq-build/get_assets.py create mode 100644 .github/actions/get-cudaq-version/action.yaml create mode 100644 .github/copy-pr-bot.yaml create mode 100644 .github/workflows/all_libs.yaml create mode 100644 .github/workflows/build_wheels.yaml create mode 100644 .github/workflows/cudaq_cache.yaml create mode 100644 .github/workflows/docs.yaml create mode 100644 .github/workflows/lib_qec.yaml create mode 100644 .github/workflows/lib_solvers.yaml create mode 100644 .github/workflows/pr_cache_cleanup.yaml create mode 100644 .github/workflows/pr_workflow.yaml create mode 100755 .github/workflows/scripts/build_cudaq.sh create mode 100755 .github/workflows/scripts/build_wheels.sh create mode 100644 .github/workflows/scripts/install_git_cli.sh create mode 100644 .gitignore create mode 100644 CMakeLists.txt create mode 100644 Contributing.md create mode 100644 Contributor_License_Agreement.md create mode 100644 LICENSE create mode 100644 NOTICE create mode 100644 README.md create mode 100644 cmake/Modules/CUDA-QX.cmake create mode 100644 cmake/Modules/FindSphinx.cmake create mode 100644 docker/build_env/Dockerfile create mode 100644 docker/release/Dockerfile create mode 100644 docker/release/Dockerfile.wheel create mode 100644 docs/CMakeLists.txt create mode 100644 docs/Doxyfile.in create mode 100644 docs/README.md create mode 100644 docs/sphinx/_static/cuda_quantum_icon.svg create mode 100644 docs/sphinx/_static/cudaq_override.css create mode 100644 docs/sphinx/_templates/autosummary/class.rst create mode 100644 docs/sphinx/_templates/autosummary/dataclass.rst create mode 100644 docs/sphinx/_templates/layout.html create mode 100644 docs/sphinx/_templates/openapi.html create mode 100644 docs/sphinx/api/core/cpp_api.rst create mode 100644 docs/sphinx/api/qec/cpp_api.rst create mode 100644 docs/sphinx/api/qec/python_api.rst create mode 100644 docs/sphinx/api/solvers/cpp_api.rst create mode 100644 docs/sphinx/api/solvers/python_api.rst create mode 100644 docs/sphinx/components/qec/introduction.rst create mode 100644 docs/sphinx/components/solvers/introduction.rst create mode 100644 docs/sphinx/conf.py.in create mode 100644 docs/sphinx/examples/qec/cpp/circuit_level_noise.cpp create mode 100644 docs/sphinx/examples/qec/cpp/code_capacity_noise.cpp create mode 100644 docs/sphinx/examples/qec/python/circuit_level_noise.py create mode 100644 docs/sphinx/examples/qec/python/code_capacity_noise.py create mode 100644 docs/sphinx/examples/qec/python/pseudo_threshold.py create mode 100644 docs/sphinx/examples/solvers/cpp/adapt_h2.cpp create mode 100644 docs/sphinx/examples/solvers/cpp/molecular_docking_qaoa.cpp create mode 100644 docs/sphinx/examples/solvers/cpp/uccsd_vqe.cpp create mode 100644 docs/sphinx/examples/solvers/python/adapt_h2.py create mode 100644 docs/sphinx/examples/solvers/python/generate_molecular_hamiltonians.py create mode 100644 docs/sphinx/examples/solvers/python/molecular_docking_qaoa.py create mode 100644 docs/sphinx/examples/solvers/python/uccsd_vqe.py create mode 100644 docs/sphinx/examples_rst/qec/circuit_level_noise.rst create mode 100644 docs/sphinx/examples_rst/qec/code_capacity_noise.rst create mode 100644 docs/sphinx/examples_rst/qec/examples.rst create mode 100644 docs/sphinx/examples_rst/solvers/adapt.rst create mode 100644 docs/sphinx/examples_rst/solvers/examples.rst create mode 100644 docs/sphinx/examples_rst/solvers/molecular_hamiltonians.rst create mode 100644 docs/sphinx/examples_rst/solvers/qaoa.rst create mode 100644 docs/sphinx/examples_rst/solvers/vqe.rst create mode 100644 docs/sphinx/index.rst create mode 100644 docs/sphinx/quickstart/installation.rst create mode 120000 examples create mode 100644 libs/core/CMakeLists.txt create mode 100644 libs/core/include/cuda-qx/core/extension_point.h create mode 100644 libs/core/include/cuda-qx/core/graph.h create mode 100644 libs/core/include/cuda-qx/core/heterogeneous_map.h create mode 100644 libs/core/include/cuda-qx/core/tear_down.h create mode 100644 libs/core/include/cuda-qx/core/tensor.h create mode 100644 libs/core/include/cuda-qx/core/tensor_impl.h create mode 100644 libs/core/include/cuda-qx/core/tuple_utils.h create mode 100644 libs/core/include/cuda-qx/core/type_traits.h create mode 100644 libs/core/lib/CMakeLists.txt create mode 100644 libs/core/lib/graph.cpp create mode 100644 libs/core/lib/tear_down.cpp create mode 100644 libs/core/lib/tensor_impls/xtensor_impl.cpp create mode 100644 libs/core/unittests/CMakeLists.txt create mode 100644 libs/core/unittests/test_core.cpp create mode 100644 libs/qec/CMakeLists.txt create mode 100644 libs/qec/README.md create mode 100644 libs/qec/include/cudaq/qec/code.h create mode 100644 libs/qec/include/cudaq/qec/codes/repetition.h create mode 100644 libs/qec/include/cudaq/qec/codes/steane.h create mode 100644 libs/qec/include/cudaq/qec/decoder.h create mode 100644 libs/qec/include/cudaq/qec/experiments.h create mode 100644 libs/qec/include/cudaq/qec/noise_model.h create mode 100644 libs/qec/include/cudaq/qec/patch.h create mode 100644 libs/qec/include/cudaq/qec/stabilizer_utils.h create mode 100644 libs/qec/lib/CMakeLists.txt create mode 100644 libs/qec/lib/code.cpp create mode 100644 libs/qec/lib/codes/CMakeLists.txt create mode 100644 libs/qec/lib/codes/repetition.cpp create mode 100644 libs/qec/lib/codes/repetition_device.cpp create mode 100644 libs/qec/lib/codes/steane.cpp create mode 100644 libs/qec/lib/codes/steane_device.cpp create mode 100644 libs/qec/lib/decoder.cpp create mode 100644 libs/qec/lib/decoders/single_error_lut.cpp create mode 100644 libs/qec/lib/device/CMakeLists.txt create mode 100644 libs/qec/lib/device/memory_circuit.cpp create mode 100644 libs/qec/lib/device/memory_circuit.h create mode 100644 libs/qec/lib/experiments.cpp create mode 100644 libs/qec/lib/stabilizer_utils.cpp create mode 100644 libs/qec/pyproject.toml create mode 100644 libs/qec/python/CMakeLists.txt create mode 100644 libs/qec/python/bindings/cudaqx_qec.cpp create mode 100644 libs/qec/python/bindings/py_code.cpp create mode 100644 libs/qec/python/bindings/py_code.h create mode 100644 libs/qec/python/bindings/py_decoder.cpp create mode 100644 libs/qec/python/bindings/py_decoder.h create mode 100644 libs/qec/python/bindings/type_casters.h create mode 100644 libs/qec/python/bindings/utils.h create mode 100644 libs/qec/python/cudaq_qec/__init__.py create mode 100644 libs/qec/python/cudaq_qec/patch.py create mode 100644 libs/qec/python/cudaq_qec/plugins/codes/example.py create mode 100644 libs/qec/python/cudaq_qec/plugins/decoders/example.py create mode 100644 libs/qec/python/tests/test_code.py create mode 100644 libs/qec/python/tests/test_decoder.py create mode 100644 libs/qec/unittests/CMakeLists.txt create mode 100644 libs/qec/unittests/backend-specific/CMakeLists.txt create mode 100644 libs/qec/unittests/backend-specific/stim/CMakeLists.txt create mode 100644 libs/qec/unittests/backend-specific/stim/test_qec_stim.cpp create mode 100644 libs/qec/unittests/decoders/sample_decoder.cpp create mode 100644 libs/qec/unittests/test_decoders.cpp create mode 100644 libs/qec/unittests/test_qec.cpp create mode 100644 libs/solvers/CMakeLists.txt create mode 100644 libs/solvers/README.md create mode 100644 libs/solvers/include/cudaq/solvers/adapt.h create mode 100644 libs/solvers/include/cudaq/solvers/adapt/adapt_simulator.h create mode 100644 libs/solvers/include/cudaq/solvers/observe_gradient.h create mode 100644 libs/solvers/include/cudaq/solvers/observe_gradients/central_difference.h create mode 100644 libs/solvers/include/cudaq/solvers/observe_gradients/forward_difference.h create mode 100644 libs/solvers/include/cudaq/solvers/observe_gradients/parameter_shift.h create mode 100644 libs/solvers/include/cudaq/solvers/operators.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/graph/clique.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/graph/max_cut.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/molecule.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compiler.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compilers/jordan_wigner.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/molecule/molecule_package_driver.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/operator_pool.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/operator_pools/qaoa_operator_pool.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/operator_pools/spin_complement_gsd.h create mode 100644 libs/solvers/include/cudaq/solvers/operators/operator_pools/uccsd_operator_pool.h create mode 100644 libs/solvers/include/cudaq/solvers/optimizer.h create mode 100644 libs/solvers/include/cudaq/solvers/optimizers/cobyla.h create mode 100644 libs/solvers/include/cudaq/solvers/optimizers/lbfgs.h create mode 100644 libs/solvers/include/cudaq/solvers/qaoa.h create mode 100644 libs/solvers/include/cudaq/solvers/qaoa/qaoa_device.h create mode 100644 libs/solvers/include/cudaq/solvers/stateprep/uccsd.h create mode 100644 libs/solvers/include/cudaq/solvers/vqe.h create mode 100644 libs/solvers/lib/CMakeLists.txt create mode 100644 libs/solvers/lib/adapt/CMakeLists.txt create mode 100644 libs/solvers/lib/adapt/adapt.cpp create mode 100644 libs/solvers/lib/adapt/adapt_simulator.cpp create mode 100644 libs/solvers/lib/adapt/device/CMakeLists.txt create mode 100644 libs/solvers/lib/adapt/device/adapt.cpp create mode 100644 libs/solvers/lib/adapt/device/adapt.h create mode 100644 libs/solvers/lib/adapt/device/prepare_state.cpp create mode 100644 libs/solvers/lib/adapt/device/prepare_state.h create mode 100644 libs/solvers/lib/observe_gradients/central_difference.cpp create mode 100644 libs/solvers/lib/observe_gradients/forward_difference.cpp create mode 100644 libs/solvers/lib/observe_gradients/observe_gradient.cpp create mode 100644 libs/solvers/lib/observe_gradients/parameter_shift.cpp create mode 100644 libs/solvers/lib/operators/graph/clique.cpp create mode 100644 libs/solvers/lib/operators/graph/max_cut.cpp create mode 100644 libs/solvers/lib/operators/molecule/drivers/library_utils.h create mode 100644 libs/solvers/lib/operators/molecule/drivers/process.cpp create mode 100644 libs/solvers/lib/operators/molecule/drivers/process.h create mode 100644 libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp create mode 100644 libs/solvers/lib/operators/molecule/fermion_compilers/fermion_compiler.cpp create mode 100644 libs/solvers/lib/operators/molecule/fermion_compilers/jordan_wigner.cpp create mode 100644 libs/solvers/lib/operators/molecule/molecule.cpp create mode 100644 libs/solvers/lib/operators/operator_pools/operator_pool.cpp create mode 100644 libs/solvers/lib/operators/operator_pools/qaoa_operator_pool.cpp create mode 100644 libs/solvers/lib/operators/operator_pools/spin_complement_gsd.cpp create mode 100644 libs/solvers/lib/operators/operator_pools/uccsd_operator_pool.cpp create mode 100644 libs/solvers/lib/optimizers/CMakeLists.txt create mode 100644 libs/solvers/lib/optimizers/lbfgs/CMakeLists.txt create mode 100644 libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.cpp create mode 100644 libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.h create mode 100644 libs/solvers/lib/optimizers/lbfgs/lbfgs.cpp create mode 100644 libs/solvers/lib/optimizers/optimizer.cpp create mode 100644 libs/solvers/lib/optimizers/prima/CMakeLists.txt create mode 100644 libs/solvers/lib/optimizers/prima/cobyla.cpp create mode 100644 libs/solvers/lib/qaoa/CMakeLists.txt create mode 100644 libs/solvers/lib/qaoa/device/CMakeLists.txt create mode 100644 libs/solvers/lib/qaoa/device/qaoa_device.cpp create mode 100644 libs/solvers/lib/qaoa/qaoa.cpp create mode 100644 libs/solvers/lib/stateprep/CMakeLists.txt create mode 100644 libs/solvers/lib/stateprep/uccsd.cpp create mode 100644 libs/solvers/pyproject.toml create mode 100644 libs/solvers/python/CMakeLists.txt create mode 100644 libs/solvers/python/bindings/cudaqx_solvers.cpp create mode 100644 libs/solvers/python/bindings/solvers/py_optim.cpp create mode 100644 libs/solvers/python/bindings/solvers/py_optim.h create mode 100644 libs/solvers/python/bindings/solvers/py_solvers.cpp create mode 100644 libs/solvers/python/bindings/solvers/py_solvers.h create mode 100644 libs/solvers/python/bindings/utils/kwargs_utils.h create mode 100644 libs/solvers/python/bindings/utils/type_casters.h create mode 100644 libs/solvers/python/cudaq_solvers/__init__.py create mode 100644 libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/generators/gas_phase_generator.py create mode 100644 libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/hamiltonian_generator.py create mode 100644 libs/solvers/python/tests/resources/LiH.xyz create mode 100644 libs/solvers/python/tests/test_adapt.py create mode 100644 libs/solvers/python/tests/test_molecule.py create mode 100644 libs/solvers/python/tests/test_operator_pools.py create mode 100644 libs/solvers/python/tests/test_optim.py create mode 100644 libs/solvers/python/tests/test_qaoa.py create mode 100644 libs/solvers/python/tests/test_uccsd.py create mode 100644 libs/solvers/python/tests/test_vqe.py create mode 100644 libs/solvers/tools/CMakeLists.txt create mode 100644 libs/solvers/tools/molecule/CMakeLists.txt create mode 100644 libs/solvers/tools/molecule/cudaq-pyscf.py create mode 100644 libs/solvers/unittests/CMakeLists.txt create mode 100644 libs/solvers/unittests/nvqpp/CMakeLists.txt create mode 100644 libs/solvers/unittests/nvqpp/test_kernels.cpp create mode 100644 libs/solvers/unittests/nvqpp/test_kernels.h create mode 100644 libs/solvers/unittests/test_adapt.cpp create mode 100644 libs/solvers/unittests/test_molecule.cpp create mode 100644 libs/solvers/unittests/test_operator_pools.cpp create mode 100644 libs/solvers/unittests/test_optimizers.cpp create mode 100644 libs/solvers/unittests/test_qaoa.cpp create mode 100644 libs/solvers/unittests/test_uccsd.cpp create mode 100644 libs/solvers/unittests/test_vqe.cpp create mode 100644 scripts/build_docs.sh create mode 100755 scripts/build_wheels.sh create mode 100644 scripts/ci/build_cudaq_wheel.sh create mode 100644 scripts/ci/build_qec_wheel.sh create mode 100644 scripts/ci/build_solvers_wheel.sh create mode 100644 scripts/ci/test_wheels.sh create mode 100644 scripts/run_clang_format.sh create mode 100755 scripts/test_cudaqx_build.sh create mode 100755 scripts/test_libs_builds.sh create mode 100755 scripts/test_wheels.sh create mode 100755 scripts/validation/container/validate_container.sh create mode 100644 scripts/validation/wheel/install_packages.sh create mode 100755 scripts/validation/wheel/validate_wheels.sh diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..e695289 --- /dev/null +++ b/.clang-format @@ -0,0 +1,11 @@ +BasedOnStyle: LLVM +AlwaysBreakTemplateDeclarations: Yes +IncludeCategories: + - Regex: '^<' + Priority: 4 + - Regex: '^"(llvm|llvm-c|clang|clang-c|mlir|mlir-c)/' + Priority: 3 + - Regex: '^"(qoda|\.\.)/' + Priority: 2 + - Regex: '.*' + Priority: 1 diff --git a/.cudaq_version b/.cudaq_version new file mode 100644 index 0000000..5a525a4 --- /dev/null +++ b/.cudaq_version @@ -0,0 +1,11 @@ +{ + "cudaq": { + "repository": "NVIDIA/cuda-quantum", + "ref": "d63dc8d93b4f9c95677aad2ddad2f9020cde45d0" + }, + "cuquantum": { + "url": "https://developer.download.nvidia.com/compute/cuquantum/redist/cuquantum/linux-x86_64/", + "pattern": "cuquantum-linux-x86_64-24.11.0.21_cuda12-archive.tar.xz" + } +} + diff --git a/.github/actions/build-lib/action.yaml b/.github/actions/build-lib/action.yaml new file mode 100644 index 0000000..1e12662 --- /dev/null +++ b/.github/actions/build-lib/action.yaml @@ -0,0 +1,80 @@ +name: Build a CUDAQX library + +inputs: + lib: + description: 'Library name to build. (e.g, all, qec or solvers)' + required: true + pr-number: + description: 'Unique pull request identifier.' + default: '' + required: false + save-ccache: + description: 'Indicates whether to save the compilation cache' + default: 'false' + required: false +outputs: + build-dir: + description: 'Build dir.' + value: ${{ steps.build-lib.outputs.build_dir }} + + +runs: + using: "composite" + steps: + - name: Install requirements + run: | + apt update + apt install -y --no-install-recommends ccache + python3 -m pip install cmake --user + echo "$HOME/.local/bin:$PATH" >> $GITHUB_PATH + shell: bash + + - name: Compilation cache key + id: ccache-key + run: | + echo "main=ccache-${{ inputs.lib }}-cu12.0-gcc11" >> $GITHUB_OUTPUT + if [[ -n "${{ inputs.pr-number }}" ]]; then + echo "pr=-pr${{ inputs.pr-number }}" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: Try to restoring previous compilation cache + id: restore-ccache + uses: actions/cache/restore@v4 + with: + fail-on-cache-miss: false + path: /ccache-${{ inputs.lib }} + key: ${{ steps.ccache-key.outputs.main }}${{ steps.ccache-key.outputs.pr }} + restore-keys: | + ${{ steps.ccache-key.outputs.main }} + + - name: Build library + id: build-lib + env: + CCACHE_DIR: /ccache-${{ inputs.lib }} + run: | + build_dir=build_${{ inputs.lib }} + .github/actions/build-lib/build_${{ inputs.lib }}.sh $build_dir + echo "build_dir=$build_dir" >> $GITHUB_OUTPUT + shell: bash + + # We need to delete previous cache entry otherwise the new one won't be stored + - name: Delete previous main compilation cache + if: steps.restore-ccache.outputs.cache-hit == 'true' && inputs.save-ccache == 'true' + env: + GH_TOKEN: ${{ github.token }} + run: | + bash .github/workflows/scripts/install_git_cli.sh + gh cache delete \ + ${{ steps.ccache-key.outputs.main }}${{ steps.ccache-key.outputs.pr }} \ + --repo ${{ github.repository }} + shell: bash + + - name: Store compilation cache + if: inputs.save-ccache == 'true' + uses: actions/cache/save@v4 + continue-on-error: true + with: + path: /ccache-${{ inputs.lib }} + key: ${{ steps.ccache-key.outputs.main }}${{ steps.ccache-key.outputs.pr }} + diff --git a/.github/actions/build-lib/build_all.sh b/.github/actions/build-lib/build_all.sh new file mode 100755 index 0000000..a790209 --- /dev/null +++ b/.github/actions/build-lib/build_all.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +cmake -S . -B "$1" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_COMPILER=gcc-11 \ + -DCMAKE_CXX_COMPILER=g++-11 \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DCUDAQ_DIR=/cudaq-install/lib/cmake/cudaq/ \ + -DCUDAQX_ENABLE_LIBS="all" \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON + +cmake --build "$1" --target install + diff --git a/.github/actions/build-lib/build_qec.sh b/.github/actions/build-lib/build_qec.sh new file mode 100755 index 0000000..b609aac --- /dev/null +++ b/.github/actions/build-lib/build_qec.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +cmake -S libs/qec -B "$1" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_COMPILER=gcc-11 \ + -DCMAKE_CXX_COMPILER=g++-11 \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DCUDAQ_DIR=/cudaq-install/lib/cmake/cudaq/ \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON + +cmake --build "$1" --target install + diff --git a/.github/actions/build-lib/build_solvers.sh b/.github/actions/build-lib/build_solvers.sh new file mode 100755 index 0000000..ce4a614 --- /dev/null +++ b/.github/actions/build-lib/build_solvers.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +cmake -S libs/solvers -B "$1" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_COMPILER=gcc-11 \ + -DCMAKE_CXX_COMPILER=g++-11 \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DCUDAQ_DIR=/cudaq-install/lib/cmake/cudaq/ \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON + +cmake --build "$1" --target install + diff --git a/.github/actions/get-cudaq-build/action.yaml b/.github/actions/get-cudaq-build/action.yaml new file mode 100644 index 0000000..729d70f --- /dev/null +++ b/.github/actions/get-cudaq-build/action.yaml @@ -0,0 +1,168 @@ +name: Get CUDAQ build +description: 'Either restore CUDAQ from cache or build it' + +inputs: + repo: + description: 'CUDAQ repository.' + required: true + ref: + description: 'The branch, tag or SHA to checkout.' + required: true + token: + description: 'CUDAQ repository access token.' + default: '' + required: false + pr-number: + description: 'Unique pull request identifier.' + default: '' + required: false + save-build: + description: 'Indicates whether to save the build' + default: 'false' + required: false + save-ccache: + description: 'Indicates whether to save the compilation cache' + default: 'false' + required: false + lookup-only: + description: 'Check if a cache entry exists without downloading the cache' + default: 'false' + required: false +outputs: + found-cache: + description: 'A boolean value to indicate that a cache entry was found.' + value: ${{ steps.check-cache.outputs.valid }} + +runs: + using: "composite" + steps: + # ========================================================================== + # Try to restore from cache + # ========================================================================== + + - name: Create CUDAQ build cache key + id: cudaq-build-key + env: + # This are a list of files that when changed should require a new cudaq build + to_hash: | + .github/actions/get-cudaq-build/** + .cudaq_version + run: | + hash=${{ hashFiles(format('{0}', env.to_hash)) }} + echo "main=cudaq-${{ inputs.ref }}-$hash" >> $GITHUB_OUTPUT + if [[ -n "${{ inputs.pr-number }}" ]]; then + echo "pr=-pr${{ inputs.pr-number }}" >> $GITHUB_OUTPUT + fi + shell: bash --noprofile --norc -euo pipefail {0} + + - name: Try to restoring CUDAQ from cache + id: restore-cudaq-build + uses: actions/cache/restore@v4 + with: + fail-on-cache-miss: false + path: /cudaq-install + key: ${{ steps.cudaq-build-key.outputs.main }}${{ steps.cudaq-build-key.outputs.pr }} + restore-keys: ${{ steps.cudaq-build-key.outputs.main }} + lookup-only: ${{ inputs.lookup-only }} + + # The restore action could find a partial match using the `restore-keys`. In such cases + # it would still report `cache-hit` as false, but would load the cache from the partial + # one. Thus, we need to check whether the cache is valid by other means. + - name: Check if cache is valid + id: check-cache + run: | + if [[ "${{ steps.restore-cudaq-build.outputs.cache-matched-key }}" == "" ]]; then + echo "valid=false" >> $GITHUB_OUTPUT + else + echo "valid=true" >> $GITHUB_OUTPUT + fi + shell: bash --noprofile --norc -euo pipefail {0} + + # ========================================================================== + # Get cuQuantum + # ========================================================================== + + - name: Download assets + if: steps.check-cache.outputs.valid == 'false' && inputs.lookup-only == 'false' + env: + GITHUB_TOKEN: ${{ inputs.token }} + CUQUANTUM_INSTALL_PREFIX: /cudaq-install + run: | + bash .github/workflows/scripts/install_git_cli.sh + mkdir -p ${CUQUANTUM_INSTALL_PREFIX} + python3 .github/actions/get-cudaq-build/get_assets.py + cuquantum_archive=$(jq -r '.cuquantum.pattern' .cudaq_version) + tar xf "${cuquantum_archive}" --strip-components 1 -C "${CUQUANTUM_INSTALL_PREFIX}" + shell: bash --noprofile --norc -euo pipefail {0} + + # ========================================================================== + # Build CUDAQ + # ========================================================================== + + - name: Get CUDAQ code + if: steps.check-cache.outputs.valid == 'false' && inputs.lookup-only == 'false' + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repo }} + ref: ${{ inputs.ref }} + path: cudaq + set-safe-directory: true + + - name: Try to restoring CUDAQ compilation cache + if: steps.check-cache.outputs.valid == 'false' && inputs.lookup-only == 'false' + id: restore-ccache + uses: actions/cache/restore@v4 + with: + fail-on-cache-miss: false + path: /cudaq-ccache + key: ccache-cudaq + + - name: Install CUDAQ build requirements + if: steps.check-cache.outputs.valid == 'false' && inputs.lookup-only == 'false' + run: | + bash .github/workflows/scripts/install_git_cli.sh + apt install -y --no-install-recommends ccache + shell: bash --noprofile --norc -euo pipefail {0} + + - name: Build CUDAQ + if: steps.check-cache.outputs.valid == 'false' && inputs.lookup-only == 'false' + env: + CCACHE_DIR: /cudaq-ccache + cudaq-build-script: .github/actions/get-cudaq-build/build_cudaq.sh + CUQUANTUM_INSTALL_PREFIX: /cudaq-install + CUTENSOR_INSTALL_PREFIX: /cudaq-install + CUDAQ_INSTALL_PREFIX: /cudaq-install + run: bash ${{ env.cudaq-build-script }} Release ccache gcc-11 g++-11 + shell: bash --noprofile --norc -euo pipefail {0} + + # ========================================================================== + # Store CUDAQ compilation cache + # ========================================================================== + + # We need to delete previous cache entry otherwise the new one won't be stored + - name: Delete previous compilation cache + if: steps.restore-ccache.outputs.cache-hit == 'true' && inputs.save-ccache == 'true' + env: + GH_TOKEN: ${{ github.token }} + run: | + gh cache delete ccache-cudaq --repo ${{ github.repository }} + shell: bash --noprofile --norc -euo pipefail {0} + + - name: Store compilation (CCache) + if: steps.check-cache.outputs.valid == 'false' && inputs.save-ccache == 'true' && inputs.lookup-only == 'false' + uses: actions/cache/save@v4 + with: + path: /cudaq-ccache + key: ccache-cudaq + + + # ========================================================================== + # Store CUDAQ build cache + # ========================================================================== + + - name: Store CUDAQ build in the cache + if: steps.check-cache.outputs.valid == 'false' && inputs.save-build == 'true' && inputs.lookup-only == 'false' + uses: actions/cache/save@v4 + with: + path: /cudaq-install + key: ${{ steps.cudaq-build-key.outputs.main }}${{ steps.cudaq-build-key.outputs.pr }} diff --git a/.github/actions/get-cudaq-build/build_cudaq.sh b/.github/actions/get-cudaq-build/build_cudaq.sh new file mode 100644 index 0000000..e9ef22b --- /dev/null +++ b/.github/actions/get-cudaq-build/build_cudaq.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +BUILD_TYPE=${1:-"Release"} +LAUNCHER=${2:-""} +CC=${3:-"gcc"} +CXX=${4:-"g++"} + +LLVM_INSTALL_PREFIX=/usr/local/llvm +CUTENSOR_INSTALL_PREFIX=/opt/nvidia/cutensor + +cd cudaq + +# Determine linker and linker flags +if [ -x "$(command -v "$LLVM_INSTALL_PREFIX/bin/ld.lld")" ]; then + echo "Configuring nvq++ to use the lld linker by default." + NVQPP_LD_PATH="$LLVM_INSTALL_PREFIX/bin/ld.lld" +fi + + +# Determine CUDA flags +cuda_driver=${CUDACXX:-${CUDA_HOME:-/usr/local/cuda}/bin/nvcc} + +if [ -z "$CUDAHOSTCXX" ] && [ -z "$CUDAFLAGS" ]; then + CUDAFLAGS='-allow-unsupported-compiler' + if [ -x "$CXX" ] && [ -n "$("$CXX" --version | grep -i clang)" ]; then + CUDAFLAGS+=" --compiler-options --stdlib=libstdc++" + fi + if [ -d "$GCC_TOOLCHAIN" ]; then + # e.g. GCC_TOOLCHAIN=/opt/rh/gcc-toolset-11/root/usr/ + CUDAFLAGS+=" --compiler-options --gcc-toolchain=\"$GCC_TOOLCHAIN\"" + fi +fi + +# Determine OpenMP flags +if [ -n "$(find "$LLVM_INSTALL_PREFIX" -name 'libomp.so')" ]; then + OMP_LIBRARY=${OMP_LIBRARY:-libomp} + OpenMP_libomp_LIBRARY=${OMP_LIBRARY#lib} + OpenMP_FLAGS="${OpenMP_FLAGS:-'-fopenmp'}" +fi + +echo "Preparing CUDA-Q build with LLVM installation in $LLVM_INSTALL_PREFIX..." +cmake_args="-G Ninja \ + -DCMAKE_INSTALL_PREFIX='"$CUDAQ_INSTALL_PREFIX"' \ + -DCMAKE_BUILD_TYPE=$BUILD_TYPE \ + -DCMAKE_C_COMPILER=$CC \ + -DCMAKE_CXX_COMPILER=$CXX \ + -DCMAKE_C_COMPILER_LAUNCHER=$LAUNCHER \ + -DCMAKE_CXX_COMPILER_LAUNCHER=$LAUNCHER \ + -DNVQPP_LD_PATH='"$NVQPP_LD_PATH"' \ + -DCMAKE_CUDA_COMPILER='"$cuda_driver"' \ + -DCMAKE_CUDA_FLAGS='"$CUDAFLAGS"' \ + -DCMAKE_CUDA_HOST_COMPILER='"${CUDAHOSTCXX:-$CXX}"' \ + ${OpenMP_libomp_LIBRARY:+-DOpenMP_C_LIB_NAMES=lib$OpenMP_libomp_LIBRARY} \ + ${OpenMP_libomp_LIBRARY:+-DOpenMP_CXX_LIB_NAMES=lib$OpenMP_libomp_LIBRARY} \ + ${OpenMP_libomp_LIBRARY:+-DOpenMP_libomp_LIBRARY=$OpenMP_libomp_LIBRARY} \ + ${OpenMP_FLAGS:+"-DOpenMP_C_FLAGS='"$OpenMP_FLAGS"'"} \ + ${OpenMP_FLAGS:+"-DOpenMP_CXX_FLAGS='"$OpenMP_FLAGS"'"} \ + -DCUDAQ_REQUIRE_OPENMP=TRUE \ + -DCUDAQ_ENABLE_PYTHON=TRUE \ + -DCUDAQ_BUILD_TESTS=FALSE \ + -DCUDAQ_TEST_MOCK_SERVERS=FALSE \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF" + +echo $cmake_args | xargs cmake -S . -B "build" + +cmake --build "build" --target install + diff --git a/.github/actions/get-cudaq-build/get_assets.py b/.github/actions/get-cudaq-build/get_assets.py new file mode 100644 index 0000000..c9e2ff9 --- /dev/null +++ b/.github/actions/get-cudaq-build/get_assets.py @@ -0,0 +1,57 @@ +import json +import sys +import subprocess +import zipfile + +def download_asset_github(repo, tag, pattern, install_dir=None): + # Construct the gh command + if tag: + gh_command = f"gh release download {tag} --repo {repo} -p '{pattern}'" + else: + gh_command = f"gh release download --repo {repo} -p '{pattern}'" + + print(f"Executing command: {gh_command}") + + # Execute the gh command + try: + subprocess.run(gh_command, shell=True, check=True) + print(f"Asset(s) matching '{pattern}' downloaded successfully.") + except subprocess.CalledProcessError as e: + print(f"Error downloading asset: {e}") + sys.exit(1) + + # Add output directory to the command + if install_dir and pattern.endswith(".whl"): + # Extract the contents of the wheel file + with zipfile.ZipFile(pattern, 'r') as zip_ref: + zip_ref.extractall(install_dir) + +def download_asset_wget(url, pattern): + try: + result = subprocess.run(['wget', url + pattern], capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + print(f"An error occurred: {e}") + print(f"wget output: {e.output}") + +def main(): + # Read the entry from a JSON file + with open('.cudaq_version', 'r') as json_file: + assets = json.load(json_file) + + for name, info in assets.items(): + if "tag" in info: + download_asset_github( + info["repository"], + info["tag"], + info["pattern"], + info.get("install_dir") + ) + if "url" in info: + download_asset_wget( + info["url"], + info["pattern"] + ) + +if __name__ == "__main__": + main() + diff --git a/.github/actions/get-cudaq-version/action.yaml b/.github/actions/get-cudaq-version/action.yaml new file mode 100644 index 0000000..7b98448 --- /dev/null +++ b/.github/actions/get-cudaq-version/action.yaml @@ -0,0 +1,30 @@ +name: Get CUDAQ version + +outputs: + repo: + description: 'CUDAQ repository' + value: ${{ steps.get-cudaq-version.outputs.repo }} + ref: + description: 'The branch, tag or SHA to checkout' + value: ${{ steps.get-cudaq-version.outputs.ref }} + +runs: + using: "composite" + steps: + + - name: Install jq + run: | + apt-get update + apt-get install -y --no-install-recommends jq + shell: bash + + - name: Get required CUDAQ version + id: get-cudaq-version + run: | + repo=$(jq -r '.cudaq.repository' .cudaq_version) + ref=$(jq -r '.cudaq.ref' .cudaq_version) + echo "repo=$repo" >> $GITHUB_OUTPUT + echo "ref=$ref" >> $GITHUB_OUTPUT + shell: bash + + diff --git a/.github/copy-pr-bot.yaml b/.github/copy-pr-bot.yaml new file mode 100644 index 0000000..d4ca941 --- /dev/null +++ b/.github/copy-pr-bot.yaml @@ -0,0 +1 @@ +enabled: true diff --git a/.github/workflows/all_libs.yaml b/.github/workflows/all_libs.yaml new file mode 100644 index 0000000..2b4a8cd --- /dev/null +++ b/.github/workflows/all_libs.yaml @@ -0,0 +1,81 @@ +name: All libs + +on: + workflow_call: + +jobs: + pr-build: + name: Build and test + if: startsWith(github.ref, 'refs/heads/pull-request/') + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && 'linux-amd64-cpu8' || 'ubuntu-latest' }} + container: ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu12.0-gcc11-main + permissions: + actions: write + contents: read + pull-requests: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Lookup PR info + id: get-pr-info + env: + GH_TOKEN: ${{ github.token }} + uses: nv-gha-runners/get-pr-info@main + + - name: Export PR info + id: export-pr-info + run: | + echo "pr_number=${{ fromJSON(steps.get-pr-info.outputs.pr-info).number }}" >> $GITHUB_OUTPUT + + # ======================================================================== + # CUDA Quantum build + # ======================================================================== + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ build + uses: ./.github/actions/get-cudaq-build + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + pr-number: ${{ steps.export-pr-info.outputs.pr_number }} + + # ======================================================================== + # Build + # ======================================================================== + + - name: Install build requirements + run: | + apt install -y --no-install-recommends gfortran libblas-dev libcusolver-dev-12-0 + + - name: Build + id: build + uses: ./.github/actions/build-lib + with: + lib: "all" + pr-number: ${{ steps.export-pr-info.outputs.pr_number }} + save-ccache: true + + # ======================================================================== + # Run tests + # ======================================================================== + # + - name: Run tests + run: cmake --build ${{ steps.build.outputs.build-dir }} --target run_tests + + # ======================================================================== + # Run python tests + # ======================================================================== + + - name: Install python requirements + run: pip install numpy pytest cupy-cuda12x cuquantum-python-cu12 + + - name: Run Python tests + run: cmake --build ${{ steps.build.outputs.build-dir }} --target run_python_tests + diff --git a/.github/workflows/build_wheels.yaml b/.github/workflows/build_wheels.yaml new file mode 100644 index 0000000..71092f3 --- /dev/null +++ b/.github/workflows/build_wheels.yaml @@ -0,0 +1,51 @@ +name: Build wheels + +on: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + linux-build: + name: Linux build + runs-on: ubuntu-latest + # CUDAQ requires a highly specialized environment to build. Thus, it is much + # easier to rely on their's devdeps images to do the building. + container: ghcr.io/nvidia/cuda-quantum-devdeps:manylinux-amd64-${{ matrix.toolchain.id }}-main + permissions: + actions: write + contents: read + strategy: + fail-fast: false + matrix: + toolchain: + - id: cu12.0-gcc11 + cc: gcc-11 + cxx: g++-11 + build-type: Release + + steps: + - name: Get code + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Get CUDAQ code + uses: actions/checkout@v4 + with: + repository: 'NVIDIA/cuda-quantum' + ref: ${{ inputs.ref }} + path: cudaq + set-safe-directory: true + + - name: Build CUDAQ toolchain + run: | + .github/workflows/scripts/build_cudaq.sh + + - name: Build wheels + run: | + .github/workflows/scripts/build_wheels.sh \ + --cudaq-prefix $HOME/.cudaq \ + diff --git a/.github/workflows/cudaq_cache.yaml b/.github/workflows/cudaq_cache.yaml new file mode 100644 index 0000000..35d9dfa --- /dev/null +++ b/.github/workflows/cudaq_cache.yaml @@ -0,0 +1,47 @@ +name: CUDAQ cache + +on: + workflow_dispatch: + branches: + - main + + push: + branches: + - main + paths: + - '.github/workflows/cudaq_cache.yaml' + - '.github/actions/get-cudaq-build/**' + - '.cudaq_version' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-cudaq: + name: Build CUDAQ + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && 'linux-amd64-cpu32' || 'ubuntu-latest' }} + container: ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu12.0-gcc11-main + permissions: + actions: write + contents: read + pull-requests: read + steps: + - name: Get code + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ build + uses: ./.github/actions/get-cudaq-build + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + save-build: true + save-ccache: true + diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000..adaee19 --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,104 @@ +name: Documentation + +on: + workflow_dispatch: + branches: + - main + + push: + branches: + - main + # FIXME: remove? + - "pull-request/[0-9]+" + paths: + - '.github/workflows/docs.yaml' + - 'docs/**' + - '**/*.cpp' + - '**/*.h' + - '**/*.py' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + name: Build + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && 'linux-amd64-cpu8' || 'ubuntu-latest' }} + container: ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu12.0-gcc11-main + permissions: + actions: write + contents: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + set-safe-directory: true + + # ======================================================================== + # CUDA Quantum build + # ======================================================================== + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ build + uses: ./.github/actions/get-cudaq-build + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + + # ======================================================================== + # Build docs + # ======================================================================== + + - name: Install requirements + run: | + apt install -y --no-install-recommends \ + gfortran libblas-dev libcusolver-dev-12-0 doxygen + + python3 -m pip install IPython breathe enum_tools myst_parser nbsphinx \ + sphinx_copybutton sphinx_inline_tabs sphinx_gallery sphinx_rtd_theme \ + sphinx_reredirects sphinx_toolbox cupy-cuda12x + + python3 -m pip install cmake --user + echo "$HOME/.local/bin:$PATH" >> $GITHUB_PATH + + - name: Build docs + run: | + cmake -S . -B "build" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_COMPILER=gcc-11 \ + -DCMAKE_CXX_COMPILER=g++-11 \ + -DCUDAQ_DIR=/cudaq-install/lib/cmake/cudaq/ \ + -DCUDAQX_ENABLE_LIBS="all" \ + -DCUDAQX_INCLUDE_DOCS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON + + cmake --build "build" --target docs + + # ======================================================================== + + - name: Upload + uses: actions/upload-pages-artifact@v3 + with: + path: build/docs/build/ + + deploy: + name: Deploy latest docs + if: github.ref == 'refs/heads/main' + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 + diff --git a/.github/workflows/lib_qec.yaml b/.github/workflows/lib_qec.yaml new file mode 100644 index 0000000..4900b8a --- /dev/null +++ b/.github/workflows/lib_qec.yaml @@ -0,0 +1,77 @@ +name: QEC lib + +on: + workflow_call: + +jobs: + pr-build: + name: Build and test + if: startsWith(github.ref, 'refs/heads/pull-request/') + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && 'linux-amd64-cpu8' || 'ubuntu-latest' }} + container: ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu12.0-gcc11-main + permissions: + actions: write + contents: read + pull-requests: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Lookup PR info + id: get-pr-info + env: + GH_TOKEN: ${{ github.token }} + uses: nv-gha-runners/get-pr-info@main + + - name: Export PR info + id: export-pr-info + run: | + echo "pr_number=${{ fromJSON(steps.get-pr-info.outputs.pr-info).number }}" >> $GITHUB_OUTPUT + + # ======================================================================== + # CUDA Quantum build + # ======================================================================== + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ build + uses: ./.github/actions/get-cudaq-build + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + pr-number: ${{ steps.export-pr-info.outputs.pr_number }} + + # ======================================================================== + # Build library + # ======================================================================== + + - name: Build + id: build + uses: ./.github/actions/build-lib + with: + lib: "qec" + pr-number: ${{ steps.export-pr-info.outputs.pr_number }} + save-ccache: true + + # ======================================================================== + # Run tests + # ======================================================================== + # + - name: Run tests + run: cmake --build ${{ steps.build.outputs.build-dir }} --target run_tests + + # ======================================================================== + # Run python tests + # ======================================================================== + + - name: Install python requirements + run: pip install numpy pytest cupy-cuda12x cuquantum-python-cu12 + + - name: Run Python tests + run: cmake --build ${{ steps.build.outputs.build-dir }} --target run_python_tests + diff --git a/.github/workflows/lib_solvers.yaml b/.github/workflows/lib_solvers.yaml new file mode 100644 index 0000000..2dbb7a6 --- /dev/null +++ b/.github/workflows/lib_solvers.yaml @@ -0,0 +1,81 @@ +name: Solvers lib + +on: + workflow_call: + +jobs: + pr-build: + name: Build and test + if: startsWith(github.ref, 'refs/heads/pull-request/') + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && 'linux-amd64-cpu8' || 'ubuntu-latest' }} + container: ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu12.0-gcc11-main + permissions: + actions: write + contents: read + pull-requests: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Lookup PR info + id: get-pr-info + env: + GH_TOKEN: ${{ github.token }} + uses: nv-gha-runners/get-pr-info@main + + - name: Export PR info + id: export-pr-info + run: | + echo "pr_number=${{ fromJSON(steps.get-pr-info.outputs.pr-info).number }}" >> $GITHUB_OUTPUT + + # ======================================================================== + # CUDA Quantum build + # ======================================================================== + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ build + uses: ./.github/actions/get-cudaq-build + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + pr-number: ${{ steps.export-pr-info.outputs.pr_number }} + + # ======================================================================== + # Build library + # ======================================================================== + + - name: Install build requirements + run: | + apt install -y --no-install-recommends gfortran libblas-dev libcusolver-dev-12-0 + + - name: Build + id: build + uses: ./.github/actions/build-lib + with: + lib: "solvers" + pr-number: ${{ steps.export-pr-info.outputs.pr_number }} + save-ccache: true + + # ======================================================================== + # Run tests + # ======================================================================== + # + - name: Run tests + run: cmake --build ${{ steps.build.outputs.build-dir }} --target run_tests + + # ======================================================================== + # Run python tests + # ======================================================================== + + - name: Install python requirements + run: pip install numpy pytest cupy-cuda12x cuquantum-python-cu12 + + - name: Run Python tests + run: cmake --build ${{ steps.build.outputs.build-dir }} --target run_python_tests + diff --git a/.github/workflows/pr_cache_cleanup.yaml b/.github/workflows/pr_cache_cleanup.yaml new file mode 100644 index 0000000..37dda94 --- /dev/null +++ b/.github/workflows/pr_cache_cleanup.yaml @@ -0,0 +1,31 @@ +name: PR cleanup + +on: delete + +jobs: + pr-cleanup: + name: Cleanup cache + if: github.event.ref_type == 'branch' && startsWith(github.event.ref, 'pull-request/') + runs-on: ubuntu-latest + permissions: + actions: write + contents: read + steps: + - name: Delete cache entries + env: + GH_TOKEN: ${{ github.token }} + run: | + bash .github/workflows/scripts/install_git_cli.sh + + pr_number=$(echo ${{ github.event.ref }} | sed 's/.*\///') + + # Fetch the list of cache keys + cache_keys=$(gh cache list --repo ${{ github.repository }} | cut -f 2) + + for key in $cache_keys + do + if [[ $key =~ pr$pr_number ]]; then + gh cache delete $key --repo ${{ github.repository }} + echo "Deleted cache entry: $key" + fi + done diff --git a/.github/workflows/pr_workflow.yaml b/.github/workflows/pr_workflow.yaml new file mode 100644 index 0000000..86955d5 --- /dev/null +++ b/.github/workflows/pr_workflow.yaml @@ -0,0 +1,129 @@ +name: PR workflow + +on: + push: + branches: + - "pull-request/[0-9]+" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-changes: + name: Check changes + runs-on: ubuntu-latest + outputs: + build-cudaq: ${{ steps.filter.outputs.build-cudaq }} + build-all: ${{ steps.filter.outputs.build-all }} + build-qec: ${{ steps.filter.outputs.build-qec }} + build-solvers: ${{ steps.filter.outputs.build-solvers }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Lookup PR info + id: get-pr-info + env: + GH_TOKEN: ${{ github.token }} + uses: nv-gha-runners/get-pr-info@main + + - name: Check what needs testing + uses: dorny/paths-filter@v3 + id: filter + with: + base: ${{ fromJSON(steps.get-pr-info.outputs.pr-info).base.sha }} + filters: | + build-cudaq: + - '.github/workflows/cudaq_bump.yml' + - '.github/actions/get-cudaq-build/**' + - '.cudaq_version' + build-all: + - '.github/actions/build-lib/action.yaml' + - '.github/actions/build-lib/build_all.yaml' + - '.github/workflows/all_libs.yaml' + - 'cmake/Modules/**' + - '**/CMakeLists.txt' + build-qec: + - '.github/actions/build-lib/action.yaml' + - '.github/actions/build-lib/build_qec.sh' + - '.github/workflows/lib_qec.yaml' + - 'cmake/Modules/**' + - 'libs/core/**.cpp' + - 'libs/core/**.h' + - 'libs/core/**/CMakeLists.txt' + - 'libs/qec/**.cpp' + - 'libs/qec/**.h' + - 'libs/qec/**.in' + - 'libs/qec/**.py' + - 'libs/qec/**/CMakeLists.txt' + build-solvers: + - '.github/actions/build-lib/action.yaml' + - '.github/actions/build-lib/build_solvers.sh' + - '.github/workflows/lib_solvers.yaml' + - 'cmake/Modules/**' + - 'libs/core/**.cpp' + - 'libs/core/**.h' + - 'libs/core/**/CMakeLists.txt' + - 'libs/solvers/**.cpp' + - 'libs/solvers/**.h' + - 'libs/solvers/**.in' + - 'libs/solvers/**.py' + - 'libs/solvers/**/CMakeLists.txt' + + build-cudaq: + name: Build CUDAQ + needs: [check-changes] + if: needs.check-changes.outputs.build-cudaq == 'true' + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && 'linux-amd64-cpu32' || 'ubuntu-latest' }} + container: ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu12.0-gcc11-main + permissions: + actions: write + contents: read + pull-requests: read + steps: + - name: Get code + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Lookup PR info + id: get-pr-info + env: + GH_TOKEN: ${{ github.token }} + uses: nv-gha-runners/get-pr-info@main + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ build + uses: ./.github/actions/get-cudaq-build + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + pr-number: ${{ fromJSON(steps.get-pr-info.outputs.pr-info).number }} + save-build: true + save-ccache: false + + build-all: + name: All libs + needs: [check-changes, build-cudaq] + if: needs.check-changes.outputs.build-all == 'true' || needs.check-changes.outputs.build-cudaq == 'true' + uses: ./.github/workflows/all_libs.yaml + + build-qec: + name: QEC + needs: [check-changes, build-cudaq] + if: needs.check-changes.outputs.build-qec == 'true' || needs.check-changes.outputs.build-cudaq == 'true' + uses: ./.github/workflows/lib_qec.yaml + + build-solvers: + name: Solvers + needs: [check-changes, build-cudaq] + if: needs.check-changes.outputs.build-solvers == 'true' || needs.check-changes.outputs.build-cudaq == 'true' + uses: ./.github/workflows/lib_solvers.yaml + diff --git a/.github/workflows/scripts/build_cudaq.sh b/.github/workflows/scripts/build_cudaq.sh new file mode 100755 index 0000000..551d41e --- /dev/null +++ b/.github/workflows/scripts/build_cudaq.sh @@ -0,0 +1,107 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + + +export CUDA_VERSION=12.0 + +# We need to use a newer toolchain because CUDA-QX libraries rely on c++20 +source /opt/rh/gcc-toolset-11/enable + +export CC=gcc +export CXX=g++ + +python_version=3.10 +python=python${python_version} +${python} -m pip install --no-cache-dir numpy auditwheel + +echo "Building CUDA-Q." +cd cudaq + +# ============================================================================== +# Building MLIR bindings +# ============================================================================== + +echo "Building MLIR bindings for ${python}" && \ + rm -rf "$LLVM_INSTALL_PREFIX/src" "$LLVM_INSTALL_PREFIX/python_packages" && \ + Python3_EXECUTABLE="$(which ${python})" \ + LLVM_PROJECTS='clang;mlir;python-bindings' \ + LLVM_CMAKE_CACHE=/cmake/caches/LLVM.cmake LLVM_SOURCE=/llvm-project \ + bash /scripts/build_llvm.sh -c Release -v + +# ============================================================================== +# Building CUDA-Q +# ============================================================================== + +CUDAQ_PATCH='diff --git a/CMakeLists.txt b/CMakeLists.txt +index 3f2c138..ddb15b3 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -540,7 +540,7 @@ add_subdirectory(tools) + add_subdirectory(utils) + + if (CUDAQ_ENABLE_PYTHON) +- find_package(Python 3 COMPONENTS Interpreter Development) ++ find_package(Python 3 COMPONENTS Interpreter Development.Module) + + # Apply specific patch to pybind11 for our documentation. + # Only apply the patch if not already applied. +diff --git a/python/runtime/cudaq/domains/plugins/CMakeLists.txt b/python/runtime/cudaq/domains/plugins/CMakeLists.txt +index 7b7541d..2261334 100644 +--- a/python/runtime/cudaq/domains/plugins/CMakeLists.txt ++++ b/python/runtime/cudaq/domains/plugins/CMakeLists.txt +@@ -17,6 +17,6 @@ if (SKBUILD) + if (NOT Python_FOUND) + message(FATAL_ERROR "find_package(Python) not run?") + endif() +- target_link_libraries(cudaq-pyscf PRIVATE Python::Python pybind11::pybind11 cudaq-chemistry cudaq-spin cudaq cudaq-py-utils) ++ target_link_libraries(cudaq-pyscf PRIVATE Python::Module pybind11::pybind11 cudaq-chemistry cudaq-spin cudaq cudaq-py-utils) + endif() + install(TARGETS cudaq-pyscf DESTINATION lib/plugins)' + +CUDAQ_PATCH2='diff --git a/lib/Frontend/nvqpp/ConvertDecl.cpp b/lib/Frontend/nvqpp/ConvertDecl.cpp +index 149959c8e..ea23990f6 100644 +--- a/lib/Frontend/nvqpp/ConvertDecl.cpp ++++ b/lib/Frontend/nvqpp/ConvertDecl.cpp +@@ -169,8 +169,10 @@ bool QuakeBridgeVisitor::interceptRecordDecl(clang::RecordDecl *x) { + auto fnTy = cast(popType()); + return pushType(cc::IndirectCallableType::get(fnTy)); + } +- auto loc = toLocation(x); +- TODO_loc(loc, "unhandled type, " + name + ", in cudaq namespace"); ++ if (!isInNamespace(x, "solvers") && !isInNamespace(x, "qec")) { ++ auto loc = toLocation(x); ++ TODO_loc(loc, "unhandled type, " + name + ", in cudaq namespace"); ++ } + } + if (isInNamespace(x, "std")) { + if (name.equals("vector")) { +diff --git a/lib/Frontend/nvqpp/ConvertExpr.cpp b/lib/Frontend/nvqpp/ConvertExpr.cpp +index e6350d1c5..28c98c6cb 100644 +--- a/lib/Frontend/nvqpp/ConvertExpr.cpp ++++ b/lib/Frontend/nvqpp/ConvertExpr.cpp +@@ -2050,7 +2050,9 @@ bool QuakeBridgeVisitor::VisitCallExpr(clang::CallExpr *x) { + return pushValue(call.getResult(0)); + } + +- TODO_loc(loc, "unknown function, " + funcName + ", in cudaq namespace"); ++ if (!isInNamespace(func, "solvers") && !isInNamespace(func, "qec")) { ++ TODO_loc(loc, "unknown function, " + funcName + ", in cudaq namespace"); ++ } + } // end in cudaq namespace + + if (isInNamespace(func, "std")) {' + +echo "$CUDAQ_PATCH" | git apply --verbose +echo "$CUDAQ_PATCH2" | git apply --verbose + +$python -m venv --system-site-packages .venv +source .venv/bin/activate +CUDAQ_BUILD_TESTS=FALSE bash scripts/build_cudaq.sh -v + diff --git a/.github/workflows/scripts/build_wheels.sh b/.github/workflows/scripts/build_wheels.sh new file mode 100755 index 0000000..58b1912 --- /dev/null +++ b/.github/workflows/scripts/build_wheels.sh @@ -0,0 +1,127 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + + +# ============================================================================== +# Handling options +# ============================================================================== + +show_help() { + echo "Usage: $0 [options]" + echo "Options:" + echo " --build-type Build type (e.g., Release)" + echo " --cudaq-prefix Path to CUDA-Q's install prefix" + echo " (default: \$HOME/.cudaq)" +} + +parse_options() { + while (( $# > 0 )); do + case "$1" in + --build-type) + if [[ -n "$2" && "$2" != -* ]]; then + build_type=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + --cudaq-prefix) + if [[ -n "$2" && "$2" != -* ]]; then + cudaq_prefix=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + -*) + echo "Error: Unknown option $1" >&2 + show_help + exit 1 + ;; + *) + echo "Error: Unknown argument $1" >&2 + show_help + exit 1 + ;; + esac + done +} + +# Initialize an empty array to store libs names +cudaq_prefix=$HOME/.cudaq +build_type=Release + +# Parse options +parse_options "$@" + +# ============================================================================== +# Helpers +# ============================================================================== + +python_version=3.10 +python=python${python_version} + +# We need to use a newer toolchain because CUDA-QX libraries rely on c++20 +source /opt/rh/gcc-toolset-11/enable + +export CC=gcc +export CXX=g++ + +# ============================================================================== +# QEC library +# ============================================================================== + +cd libs/qec + +SKBUILD_CMAKE_ARGS="-DCUDAQ_DIR=$cudaq_prefix/lib/cmake/cudaq;-DCMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN=/opt/rh/gcc-toolset-11/root/usr/lib/gcc/x86_64-redhat-linux/11/" \ +$python -m build --wheel + +LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/_skbuild/lib" \ +$python -m auditwheel -v repair dist/*.whl \ + --exclude libcudaq-em-default.so \ + --exclude libcudaq-python-interop.so \ + --exclude libcudaq-ensmallen.so \ + --exclude libcudaq-common.so \ + --exclude libcudaq-platform-default.so \ + --exclude libnvqir-qpp.so \ + --exclude libnvqir.so \ + --exclude libcudaq.so \ + --exclude libcudaq-spin.so \ + --exclude libcudaq-nlopt.so \ + --wheel-dir /wheels + +# ============================================================================== +# Solvers library +# ============================================================================== + +cd ../solvers + +SKBUILD_CMAKE_ARGS="-DCUDAQ_DIR=$cudaq_prefix/lib/cmake/cudaq;-DCMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN=/opt/rh/gcc-toolset-11/root/usr/lib/gcc/x86_64-redhat-linux/11/" \ +$python -m build --wheel + +LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/_skbuild/lib" \ +$python -m auditwheel -v repair dist/*.whl \ + --exclude libcudaq-em-default.so \ + --exclude libcudaq-python-interop.so \ + --exclude libcudaq-ensmallen.so \ + --exclude libcudaq-common.so \ + --exclude libcudaq-platform-default.so \ + --exclude libnvqir-qpp.so \ + --exclude libnvqir.so \ + --exclude libcudaq.so \ + --exclude libcudaq-spin.so \ + --exclude libcudaq-nlopt.so \ + --exclude libgfortran.so.5 \ + --exclude libquadmath.so.0 \ + --exclude libmvec.so.1 \ + --wheel-dir /wheels + diff --git a/.github/workflows/scripts/install_git_cli.sh b/.github/workflows/scripts/install_git_cli.sh new file mode 100644 index 0000000..4542d12 --- /dev/null +++ b/.github/workflows/scripts/install_git_cli.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +(type -p wget >/dev/null || (apt update && apt-get install wget -y)) \ + && mkdir -p -m 755 /etc/apt/keyrings \ + && wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \ + && chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ + && apt update \ + && apt install -y --no-install-recommends gh + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..506d28e --- /dev/null +++ b/.gitignore @@ -0,0 +1,98 @@ +# Editor backup files +*~ + +# Patch files +*.orig +*.rej + +# Compiled Object files +*.slo +*.lo +*.o +*.obj +*.x +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +**/Output/ +**/.lit*.txt + +# Executables +*.exe +*.out +*.app +**/out/ +/*build*/ +/*Build/ +/plugins/ +/other_library_builds/ +/.cproject +/.project +/.settings/ +**/*.jar +**/.ptp* +*.ab +/dist/ +/*egg*/ +/python/*egg* +/*tmp*/ +/wheelhouse/ +**/.ipynb_checkpoints +compile_commands.json +**/*.dat +**/.antlr +__pycache__/ + +# IDE files +.vscode/* +.theia/* + +# Container files +**/.docker/* + +# LSP files +.cache/* + +# LLVM/MLIR files +*.ll +*.bc + +# Build results +[Bb]in/ +[Oo]bj/ +*.bson +*.csv +*.bin +docs/sphinx/_doxygen +docs/sphinx/_mdgen +**/_build/* +_version.py + +# third party integrations +simulators/ +apps/ + +# macOS +.DS_Store + +# JetBrains IDE files +.idea + +# vim files +*.tmp diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..4e822d1 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,106 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +set(CMAKE_WARN_DEPRECATED OFF CACHE BOOL "" FORCE) + +# We need 3.28 because of the `EXCLUDE_FROM_ALL` in FetchContent_Declare +cmake_minimum_required(VERSION 3.28 FATAL_ERROR) + +project(CUDAQX + VERSION 0.0.0 + LANGUAGES C CXX) + +set(CUDAQX_ALL_LIBS "qec;solvers") +set(CUDAQX_ENABLE_LIBS "" CACHE STRING + "Semicolon-separated list of libs to build (${CUDAQX_ALL_LIBS}), or \"all\".") + +# We don't want to handle "all" later, thus expand it here. +if(CUDAQX_ENABLE_LIBS STREQUAL "all" ) + set(CUDAQX_ENABLE_LIBS ${CUDAQX_ALL_LIBS}) +endif() + +# Sanity check. +foreach(lib ${CUDAQX_ENABLE_LIBS}) + if (NOT "${lib}" IN_LIST CUDAQX_ALL_LIBS) + message(FATAL_ERROR "${lib} isn't a known library: ${CUDAQX_ALL_LIBS}.") + endif() +endforeach() + +# Project setup +# ============================================================================== + +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +# Add our Modules to the path +list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules") + +# Include custom CUDA-QX modules +include(CUDA-QX) + +# Options +# ============================================================================== + +option(CUDAQX_INCLUDE_TESTS "Generate build targets for unit tests." OFF) +option(CUDAQX_INCLUDE_DOCS "Generate build targets for the docs." OFF) +option(CUDAQX_BINDINGS_PYTHON "Generate build targets for python bindings." OFF) + +# Top-level External Dependencies +# ============================================================================== + +# FIXME for now, we only use library mode +set(CUDAQ_LIBRARY_MODE ON) +find_package(CUDAQ REQUIRED) + +# Top-level targets +# ============================================================================== + +# Helper targets to collect libraries and python modules +add_custom_target(cudaqx-pymodules) + +# Top-level testing +# ============================================================================== + +if (CUDAQX_INCLUDE_TESTS) + include(CTest) + add_custom_target(CUDAQXUnitTests) + add_custom_target(run_tests + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH="${CUDAQ_INSTALL_DIR}:${CMAKE_BINARY_DIR}/python" + ${CMAKE_CTEST_COMMAND} --output-on-failure + DEPENDS CUDAQXUnitTests + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + if (CUDAQX_BINDINGS_PYTHON) + set(PYTHON_TEST_DIRS "") + foreach(lib ${CUDAQX_ENABLE_LIBS}) + list(APPEND PYTHON_TEST_DIRS ../libs/${lib}/python/tests) + endforeach() + add_custom_target(run_python_tests + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH="${CUDAQ_INSTALL_DIR}:${CMAKE_BINARY_DIR}/python" + pytest -v ${PYTHON_TEST_DIRS} + DEPENDS cudaqx-pymodules + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + endif() +endif() + +# Directory setup +# ============================================================================== + +add_subdirectory(libs/core) + +if (CUDAQX_INCLUDE_DOCS) + add_subdirectory(docs) +endif() + +foreach(lib ${CUDAQX_ENABLE_LIBS}) + add_subdirectory(libs/${lib}) +endforeach() + diff --git a/Contributing.md b/Contributing.md new file mode 100644 index 0000000..287e083 --- /dev/null +++ b/Contributing.md @@ -0,0 +1,95 @@ +# Contributing + +Thank you for your interest in contributing to CUDA-QX! Based on the type of +contribution, it will fall into three categories: + +1. Report a bug, feature request, or documentation issue: + + File an [issue][cuda_qx_issues] describing what you encountered or what + you want to see changed. The NVIDIA team will evaluate the issues and triage + them, scheduling them for a release. If you believe the issue needs priority + attention comment on the issue to notify the team. + +1. Share your work built upon CUDA-QX: + + We would love to hear more about your work! Please share with us on + [NVIDIA/cuda-qx GitHub + Discussions](https://github.com/NVIDIA/cuda-qx/discussions) or consider + contributing to our [examples](./docs/sphinx/examples/)! We also take any + CUDA-QX related questions on this forum. + +1. Implement a feature or bug-fix: + + Please file an [issue][cuda_qx_issues] on the repository and express + your interest in contributing to its implementation. Someone from the CUDA-QX + team will respond on the issue to discuss how to best proceed with the + suggestion. + +When you contribute code to this repository, whether be it an example, bug fix, +or feature, make sure that you can contribute your work under the used +[open-source license](./LICENSE), that is make sure no license and/or patent +conflict is introduced by your pull-request. To confirm this, you will need to +[sign off on your commits](#commit-sign-off) as described below. Thanks in advance +for your patience as we review your contributions; we do appreciate them! + +[cuda_qx_issues]: https://github.com/NVIDIA/cuda-qx/issues + +## Commit Sign-off + +We require that all contributors "sign-off" on their commits. This certifies +that the contribution is your original work, or you have rights to submit it +under the same license, or a compatible license. Any contribution which contains +commits that are not signed off will not be accepted. + +To sign off on a commit you simply use the `--signoff` (or `-s`) option when +committing your changes: + +```bash +git commit -s -m "Add cool feature." +``` + +This will append the following to your commit message: + +```txt +Signed-off-by: Your Name +``` + +By signing off on your commits you attest to the [Developer Certificate of Origin +(DCO)](https://developercertificate.org/). Full text of the DCO: + +```txt +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` \ No newline at end of file diff --git a/Contributor_License_Agreement.md b/Contributor_License_Agreement.md new file mode 100644 index 0000000..c808502 --- /dev/null +++ b/Contributor_License_Agreement.md @@ -0,0 +1,70 @@ +# Individual Contributor License Agreement (CLA) + +**Thank you for submitting your contributions to this project.** + +By signing this CLA, you agree that the following terms apply to all of your +past, present and future contributions to the project. + +## License + +You hereby represent that all present, past and future contributions are +governed by the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0) +copyright statement. + +This entails that to the extent possible under law, you transfer all copyright +and related or neighboring rights of the code or documents you contribute to the +project itself or its maintainers. Furthermore you also represent that you have +the authority to perform the above waiver with respect to the entirety of you +contributions. + +## Moral Rights + +To the fullest extent permitted under applicable law, you hereby waive, and +agree not to assert, all of your “moral rights” in or relating to your +contributions for the benefit of the project. + +## Third Party Content + +If your Contribution includes or is based on any source code, object code, bug +fixes, configuration changes, tools, specifications, documentation, data, +materials, feedback, information or other works of authorship that were not +authored by you (“Third Party Content”) or if you are aware of any third party +intellectual property or proprietary rights associated with your Contribution +(“Third Party Rights”), then you agree to include with the submission of your +Contribution full details respecting such Third Party Content and Third Party +Rights, including, without limitation, identification of which aspects of your +Contribution contain Third Party Content or are associated with Third Party +Rights, the owner/author of the Third Party Content and Third Party Rights, +where you obtained the Third Party Content, and any applicable third party +license terms or restrictions respecting the Third Party Content and Third Party +Rights. For greater certainty, the foregoing obligations respecting the +identification of Third Party Content and Third Party Rights do not apply to any +portion of a Project that is incorporated into your Contribution to that same +Project. + +## Representations + +You represent that, other than the Third Party Content and Third Party Rights +identified by you in accordance with this Agreement, you are the sole author of +your Contributions and are legally entitled to grant the foregoing licenses and +waivers in respect of your Contributions. If your Contributions were created in +the course of your employment with your past or present employer(s), you +represent that such employer(s) has authorized you to make your Contributions on +behalf of such employer(s) or such employer (s) has waived all of their right, +title or interest in or to your Contributions. + +## Disclaimer + +To the fullest extent permitted under applicable law, your Contributions are +provided on an "as is" basis, without any warranties or conditions, express or +implied, including, without limitation, any implied warranties or conditions of +non-infringement, merchantability or fitness for a particular purpose. You are +not required to provide support for your Contributions, except to the extent you +desire to provide support. + +## No Obligation + +You acknowledge that the maintainers of this project are under no obligation to +use or incorporate your contributions into the project. The decision to use or +incorporate your contributions into the project will be made at the sole +discretion of the maintainers or their authorized delegates. \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..e667172 --- /dev/null +++ b/LICENSE @@ -0,0 +1,16 @@ +Apache License Version 2.0 + +Copyright (c) 2024 NVIDIA Corporation & Affiliates +All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..8a6a0a5 --- /dev/null +++ b/NOTICE @@ -0,0 +1,70 @@ +CUDA-QX + +This product includes software developed by NVIDIA corporation and affiliates and includes material from third parties. It includes work released under the following licenses: + +PyBind11 - BSD-style license + + +The source code is based on the work originally developed by Wenzel Jakob +License at + +---------------------------------------------------------------- + +NLohmann JSON - MIT License + + +The source code is based on the work originally developed by Niels Lohmann +License at + +---------------------------------------------------------------- + +{fmt} - MIT License + + +Originally developed by Victor Zverovich and contributors. +The incorporated source code and its license can be found as a submodule on the CUDA-Q repository. +License at + +---------------------------------------------------------------- + +BLAS - custom license + + +See also + +---------------------------------------------------------------- + +XTL - BSD-3-Clause + + +License at + +---------------------------------------------------------------- + +XTensor - BSD-3-Clause + + +License at + +---------------------------------------------------------------- + +liblbfgs - MIT License + + +License at + +---------------------------------------------------------------- + +Prima - BSD 3-Clause + + +License at + +---------------------------------------------------------------- + +Stim - Apache 2.0 + + +License at + +---------------------------------------------------------------- diff --git a/README.md b/README.md new file mode 100644 index 0000000..934b90a --- /dev/null +++ b/README.md @@ -0,0 +1,32 @@ +# Welcome to the CUDA-QX repository + +This repository contains a set of libraries that build on +NVIDIA CUDA-Q. These libraries enable the rapid development of hybrid quantum-classical +application code leveraging state-of-the-art CPUs, GPUs, and QPUs. + +## Getting Started +To learn more about how to work with the CUDA-QX libraries, please take a look at the +[CUDA-QX Documentation][cudaqx_docs]. The page contains detailed +[installation instructions][official_install] for officially released packages. + +[cudaqx_docs]: https://nvidia.github.io/cudaqx +[official_install]: https://nvidia.github.io/cudaqx/quickstart/installation.html + +## Contributing + +There are many ways in which you can get involved with CUDA-QX. If you are +interested in developing quantum applications with the CUDA-QX libraries, +this repository is a great place to get started! For more information about +contributing to the CUDA-QX platform, please take a look at +[Contributing.md](./Contributing.md). + +## License + +The code in this repository is licensed under [Apache License 2.0](./LICENSE). + +Contributing a pull request to this repository requires accepting the +Contributor License Agreement (CLA) declaring that you have the right to, and +actually do, grant us the rights to use your contribution. A CLA-bot will +automatically determine whether you need to provide a CLA and decorate the PR +appropriately. Simply follow the instructions provided by the bot. You will only +need to do this once. diff --git a/cmake/Modules/CUDA-QX.cmake b/cmake/Modules/CUDA-QX.cmake new file mode 100644 index 0000000..6ce932e --- /dev/null +++ b/cmake/Modules/CUDA-QX.cmake @@ -0,0 +1,167 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +#[=======================================================================[.rst: +cudaqx_add_device_code +---------------------------- + +Add NVQ++ custom-compiled quantum device source files to a target library using nvq++. + +This function compiles specified source files using the nvq++ compiler +from the CUDAQ installation and adds the resulting object files to the +given library target. + +.. command:: add_custom_compiled_sources + + .. code-block:: cmake + + cudaqx_add_device_code( + + SOURCES [ ...] + [COMPILER_FLAGS [ ...]] + ) + + ```` + The name of the existing library target to which the compiled + sources will be added. + + ``SOURCES [ ...]`` + A list of source files to be compiled. + + ``COMPILER_FLAGS [ ...]`` + Optional. A list of compiler flags to be passed to nvq++. + +This function creates custom commands to compile each source file using +nvq++, generates custom targets for each compilation, and adds the +resulting object files to the specified library target. + +Note: This function assumes that the CUDAQ_INSTALL_DIR variable is set +to the CUDAQ installation directory. + +Example usage: + cudaqx_add_device_code( + my_library + SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/file1.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/file2.cpp + COMPILER_FLAGS + --enable-mlir + -v + ) + +#]=======================================================================] +function(cudaqx_add_device_code LIBRARY_NAME) + set(options) + set(oneValueArgs) + set(multiValueArgs SOURCES COMPILER_FLAGS) + cmake_parse_arguments(ARGS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if(NOT DEFINED CUDAQ_INSTALL_DIR) + message(FATAL_ERROR "CUDAQ_INSTALL_DIR must be defined") + endif() + + if(NOT ARGS_SOURCES) + message(FATAL_ERROR "At least one SOURCE file is required") + endif() + + set(COMPILER ${CUDAQ_INSTALL_DIR}/bin/nvq++) + + # It might be that our CXX toolchain is installed in non-standard path and + # `cudaq-quake`, being a clang-based compiler, won't be able to find it. In + # such cases, setting CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN will allows us + # to tell `cudaq-quake` where to look for the toolchain. (This happens when + # building wheels inside the manylinux container, for example.) + if (CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN) + set(ARGS_COMPILER_FLAGS "${ARGS_COMPILER_FLAGS} --gcc-install-dir=${CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN}") + endif() + + set(prop "$") + foreach(source ${ARGS_SOURCES}) + get_filename_component(filename ${source} NAME_WE) + set(output_file "${CMAKE_CURRENT_BINARY_DIR}/${filename}.o") + cmake_path(GET output_file FILENAME baseName) + + add_custom_command( + OUTPUT ${output_file} + COMMAND ${COMPILER} + ${ARGS_COMPILER_FLAGS} -c -fPIC --enable-mlir + ${CMAKE_CURRENT_SOURCE_DIR}/${source} -o ${baseName} + "$<$:-I $>" + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${source} + COMMENT "Compiling ${source} with nvq++" + VERBATIM + ) + + list(APPEND object_files ${output_file}) + list(APPEND custom_targets ${filename}_target) + + add_custom_target(${filename}_target DEPENDS ${output_file}) + endforeach() + + add_dependencies(${LIBRARY_NAME} ${custom_targets}) + target_sources(${LIBRARY_NAME} PRIVATE ${object_files}) +endfunction() + +#[=======================================================================[.rst: +cudaqx_set_target +------------------------- + Set up a CUDA-QX target with the specified name. + + This function creates an interface library for the given CUDA-QX + target, links it to the main CUDAQ library, and adds target-specific libraries. + + :param TARGETNAME: The name of the CUDA-QX target to set up. + + .. note:: + This function will create an interface library + named ``cudaq_${TARGETNAME}`` and an alias target ``cudaq::cudaq_${TARGETNAME}``. + + **Example:** + + .. code-block:: cmake + + cudaqx_set_target(my_target) + + This will: + + 1. Create an interface library ``cudaq_my_target`` + 2. Link it to ``cudaq::cudaq`` + 3. Link it to ``cudaq::cudaq-my_target-target`` + 4. Create an alias target ``cudaq::cudaq_my_target`` + + This function simplifies the process of setting up CUDA-QX targets by + automating the creation of interface libraries and establishing the necessary linkages. +#]=======================================================================] +function(cudaqx_set_target TARGETNAME) + message(STATUS "Setting CUDA-QX Target = ${TARGETNAME}") + + # Create a new interface target + add_library(cudaq_${TARGETNAME} INTERFACE) + + # Link to the original cudaq target + target_link_libraries(cudaq_${TARGETNAME} INTERFACE cudaq::cudaq) + + # Add the additional target-specific library + target_link_libraries(cudaq_${TARGETNAME} INTERFACE cudaq::cudaq-${TARGETNAME}-target) + + # Create an alias target to make it easier to use + add_library(cudaq::cudaq_${TARGETNAME} ALIAS cudaq_${TARGETNAME}) +endfunction() + +#[=======================================================================[.rst: +cudaqx_add_pymodule +------------------------- + This is a helper function to add CUDAQ-QX libraries' python modules. It's + main purpose is to create a custom target, cudaqx-pymodules, which depends + on all libraries' python modules. + +#]=======================================================================] +function(cudaqx_add_pymodule module) + pybind11_add_module(${module} ${ARGN}) + add_dependencies(cudaqx-pymodules ${module}) +endfunction() diff --git a/cmake/Modules/FindSphinx.cmake b/cmake/Modules/FindSphinx.cmake new file mode 100644 index 0000000..ac03000 --- /dev/null +++ b/cmake/Modules/FindSphinx.cmake @@ -0,0 +1,14 @@ +# Unfortunately, there is no "standard way to find sphinx" + +find_program(SPHINX_EXECUTABLE + NAMES sphinx-build + DOC "Path to sphinx-build executable" +) + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args( + Sphinx "Failed to find sphinx-build executable" SPHINX_EXECUTABLE +) + +mark_as_advanced(SPHINX_EXECUTABLE) diff --git a/docker/build_env/Dockerfile b/docker/build_env/Dockerfile new file mode 100644 index 0000000..979834a --- /dev/null +++ b/docker/build_env/Dockerfile @@ -0,0 +1,22 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +FROM ghcr.io/nvidia/cuda-quantum-devdeps:ext-cu11.8-gcc11-main + +RUN apt-get update && apt-get install -y gfortran libblas-dev libcusolver-dev-11-8 \ + && python3 -m pip install cmake --user \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +RUN git clone https://github.com/nvidia/cuda-quantum \ + && cd cuda-quantum && mkdir build && cd build \ + && ~/.local/bin/cmake -G Ninja .. -DLLVM_DIR=/opt/llvm/lib/cmake \ + -DCUDAQ_ENABLE_PYTHON=TRUE -DCMAKE_INSTALL_PREFIX=$HOME/.cudaq \ + && ninja install + + + diff --git a/docker/release/Dockerfile b/docker/release/Dockerfile new file mode 100644 index 0000000..a6b1593 --- /dev/null +++ b/docker/release/Dockerfile @@ -0,0 +1,58 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + + +# To build the release image run the following from the top-level of the repo (must clone the cudaqx-private repo) +# +# docker build -t ghcr.io/nvidia/cudaqx-private:latest-$(uname -m) . -f docker/release/Dockerfile +# +# Test the image with +# +# scripts/validation/container/validate_container.sh + +FROM ghcr.io/nvidia/private/cuda-quantum:cu12-0.9.0 as build + +ARG GITHUB_UNAME +ARG GITHUB_API_TOKEN +ARG DEBIAN_FRONTEND=noninteractive +USER root + +RUN apt-get update && apt-get install -y g++ gfortran libblas-dev ninja-build \ + && python3 -m pip install cmake --user + +ADD . . + +RUN mkdir build && cd build \ + && ~/.local/bin/cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=/opt/nvidia/cudaq \ + -DCUDAQ_DIR=/opt/nvidia/cudaq/lib/cmake/cudaq \ + -DCUDAQX_ENABLE_LIBS="all" \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON \ + -DCUDAQX_INSTALL_PYTHON=ON \ + -DCMAKE_BUILD_TYPE=Release \ + && ninja install && pwd + +FROM ghcr.io/nvidia/private/cuda-quantum:cu12-0.9.0 + +USER root + +COPY --from=build "/opt/nvidia/cudaq/" "/opt/nvidia/cudaq" +COPY --from=build "/home/cudaq/docs/sphinx/examples" "/home/cudaq/cudaqx-examples" +COPY --from=build "/home/cudaq/libs/qec/python/tests" "/opt/nvidia/cudaq/cudaqx_pytests/qec" +COPY --from=build "/home/cudaq/libs/solvers/python/tests" "/opt/nvidia/cudaq/cudaqx_pytests/solvers" + +RUN chown -R cudaq /home/cudaq && chgrp -R cudaq /home/cudaq +RUN apt-get update && apt-get install -y g++ gfortran + +USER cudaq + +RUN python3 -m pip install --user mpi4py \ + pyscf fastapi uvicorn cupy-cuda12x nvidia-cublas-cu12 networkx + +WORKDIR /home/cudaq +ENTRYPOINT ["bash", "-l"] diff --git a/docker/release/Dockerfile.wheel b/docker/release/Dockerfile.wheel new file mode 100644 index 0000000..c855a82 --- /dev/null +++ b/docker/release/Dockerfile.wheel @@ -0,0 +1,98 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# To build the image that tests the wheels, run the following from the top-level of the repo +# +# docker build -t ghcr.io/nvidia/cudaqx-private-wheels-test:latest-$(uname -m) . -f docker/release/Dockerfile.wheel --build-arg GITHUB_API_TOKEN=TOKEN +# +# Test the wheels with +# +# scripts/validation/wheel/validate_wheels.sh +# +# This tests wheels for 3.10, 3.11, and 3.12 +# +# Potential Build Arguments: +# CUDAQX_PRIVATE_ARTIFACTS_ID - this is the run ID of the workflow that built the wheels. +# i.e. https://github.com/NVIDIA/cudaqx-private/actions/runs/11810855185, it is 11810855185 +# CUQUANTUM_RELEASE - the name of the cuQuantum release uploaded to cudaq-private +# CUDAQ_RELEASE - the name of the CUDA-Q release uploaded to cudaq-private + +FROM ubuntu:22.04 +ARG DEBIAN_FRONTEND=noninteractive + +ARG CUDAQX_PRIVATE_ARTIFACTS_ID=11871513805 +ARG CUQUANTUM_RELEASE=cuquantum-24.11 +ARG CUDAQ_RELEASE=0.9.0-rc4 +ARG GITHUB_API_TOKEN + +# Install build dependencies +RUN apt-get update && apt-get install -y \ + software-properties-common \ + build-essential \ + libssl-dev \ + zlib1g-dev \ + libbz2-dev \ + libreadline-dev \ + libsqlite3-dev \ + curl \ + libncursesw5-dev \ + xz-utils \ + tk-dev \ + libxml2-dev \ + libxmlsec1-dev \ + libffi-dev \ + liblzma-dev \ + unzip \ + sudo \ + wget git + +# Install pyenv +RUN curl https://pyenv.run | bash +ENV PYENV_ROOT="/root/.pyenv" +ENV PATH="${PYENV_ROOT}/bin:${PATH}" +RUN echo 'eval "$(pyenv init -)"' >> ~/.bashrc + +# Install Python versions +RUN pyenv install 3.10.13 && \ + pyenv install 3.11.7 && \ + pyenv install 3.12.1 && \ + pyenv global 3.10.13 + +# Install GitHub CLI +RUN mkdir -p -m 755 /etc/apt/keyrings && \ + wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null && \ + chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null && \ + apt-get update && apt-get install -y gh + +# Setup GitHub auth +RUN echo "${GITHUB_API_TOKEN}" | gh auth login --with-token + +# Download required releases +RUN gh release download ${CUQUANTUM_RELEASE} --repo NVIDIA/cudaq-private \ + && gh release download ${CUDAQ_RELEASE} --repo NVIDIA/cudaq-private + +RUN gh run download ${CUDAQX_PRIVATE_ARTIFACTS_ID} --repo NVIDIA/cudaqx-private + +# Function to install packages for a specific Python version +COPY scripts/validation/wheel/install_packages.sh / +RUN chmod +x /install_packages.sh + +# Install packages for each Python version + +# Configure pyenv shell integration +RUN unzip wheelhouse.zip + +RUN for version in 3.10.13 3.11.7 3.12.1; do \ + PYTHON_WHEEL_VER="cp$(echo $version | cut -d. -f1-2 | sed 's/\.//')" && \ + /install_packages.sh $PYTHON_WHEEL_VER ${version}; \ + done + +WORKDIR /workspace +RUN gh repo clone https://github.com/NVIDIA/cudaqx-private -- -b staging/2024.11 --depth 1 +RUN apt-get update && apt-get install -y gfortran diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt new file mode 100644 index 0000000..bd9f52b --- /dev/null +++ b/docs/CMakeLists.txt @@ -0,0 +1,110 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +find_package(Doxygen REQUIRED) +find_package(Sphinx REQUIRED) + +# Find all the public headers +set(CUDAQX_PUBLIC_HEADERS) +file(GLOB_RECURSE LIB_PUBLIC_HEADERS "../libs/core/include/*.h") +list(APPEND CUDAQX_PUBLIC_HEADERS ${LIB_PUBLIC_HEADERS}) +foreach(lib ${CUDAQX_ALL_LIBS}) + file(GLOB_RECURSE LIB_PUBLIC_HEADERS "../libs/${lib}/include/*.h") + list(APPEND CUDAQX_PUBLIC_HEADERS ${LIB_PUBLIC_HEADERS}) +endforeach() + +# ============================================================================== +# Doxygen +# ============================================================================== + +# We need to give doxygen a space separated list of files +list(JOIN CUDAQX_PUBLIC_HEADERS " " HEADERS_LIST) + +set(DOXYGEN_INPUT ${HEADERS_LIST}) +set(DOXYGEN_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/doxygen) +set(DOXYGEN_INDEX_FILE ${DOXYGEN_OUTPUT_DIR}/xml/index.xml) +set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in) +set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) + +configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY) + +# Only regenerate Doxygen when the Doxyfile or public headers change +add_custom_command( + OUTPUT ${DOXYGEN_INDEX_FILE} + DEPENDS ${CUDAQX_PUBLIC_HEADERS} + COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE_OUT} + MAIN_DEPENDENCY ${DOXYFILE_OUT} ${DOXYFILE_IN} + COMMENT "Generating docs" +) + +add_custom_target(doxygen_docs DEPENDS ${DOXYGEN_INDEX_FILE}) + +# ============================================================================== +# Sphinx +# ============================================================================== + +find_package(Python COMPONENTS Interpreter REQUIRED) + +function(require_python_module module_name) + execute_process( + COMMAND ${Python_EXECUTABLE} -c "import ${module_name}" + RESULT_VARIABLE result + OUTPUT_QUIET + ERROR_QUIET + ) + if(NOT result EQUAL 0) + message(FATAL_ERROR "Python module '${module_name}' not found") + endif() +endfunction() + +require_python_module(IPython) +require_python_module(breathe) +require_python_module(enum_tools) +require_python_module(myst_parser) +require_python_module(nbsphinx) +require_python_module(sphinx_copybutton) +require_python_module(sphinx_inline_tabs) +require_python_module(sphinx_gallery) +require_python_module(sphinx_rtd_theme) +require_python_module(sphinx_reredirects) +require_python_module(sphinx_toolbox) + +set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/sphinx) +set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}/build) +set(SPHINX_INDEX_FILE ${SPHINX_BUILD}/index.html) + +set(SPHINX_CONF_IN ${SPHINX_SOURCE}/conf.py.in) +set(SPHINX_CONF ${CMAKE_CURRENT_BINARY_DIR}/conf.py) + +configure_file(${SPHINX_CONF_IN} ${SPHINX_CONF} @ONLY) + +# Only regenerate Sphinx when: +# - Doxygen has rerun +# - Our doc files have been updated +# - The Sphinx config has been updated +# TODO: set warning as error (-W flag) +add_custom_command( + OUTPUT ${SPHINX_INDEX_FILE} + COMMAND ${SPHINX_EXECUTABLE} -v -n --keep-going -b html + -c ${CMAKE_CURRENT_BINARY_DIR} + -Dbreathe_projects.cudaqx=${DOXYGEN_OUTPUT_DIR}/xml + ${SPHINX_SOURCE} ${SPHINX_BUILD} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS + ${SPHINX_SOURCE}/index.rst + ${DOXYGEN_INDEX_FILE} + cudaqx-pymodules + MAIN_DEPENDENCY ${SPHINX_CONF_IN} ${SPHINX_CONF} + COMMENT "Generating documentation with Sphinx" +) + +add_custom_target(sphinx_docs DEPENDS ${SPHINX_INDEX_FILE}) + +# ============================================================================== + +add_custom_target(docs DEPENDS doxygen_docs sphinx_docs) diff --git a/docs/Doxyfile.in b/docs/Doxyfile.in new file mode 100644 index 0000000..29b697c --- /dev/null +++ b/docs/Doxyfile.in @@ -0,0 +1,56 @@ +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +PROJECT_NAME = "CUDA-QX" +INPUT = @DOXYGEN_INPUT@ +FILE_PATTERNS = *.h \ + *.hpp +RECURSIVE = YES +OUTPUT_DIRECTORY = @DOXYGEN_OUTPUT_DIR@ + +ALIASES = "entry_point_kernel=\par This function is an entry-point CUDA-Q quantum kernel. It can be invoked from host." +ALIASES += "pure_device_kernel=\par This function is a pure-device CUDA-Q quantum kernel. It cannot be invoked from host. It can only be invoked from other CUDA-Q kernel code." + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +EXTRACT_ALL = YES +EXTRACT_PRIVATE = YES +EXTRACT_STATIC = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = YES +EXPAND_ONLY_PREDEF = YES +PREDEFINED = "__qpu__=" + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +GENERATE_HTML = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- + +GENERATE_XML = YES +XML_NS_MEMB_FILE_SCOPE = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +GENERATE_LATEX = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +HAVE_DOT = NO + diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..f262d53 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,67 @@ +# CUDA-QX Documentation + +This folder contains tools and content to build the CUDA-QX documentation. +The [script for building docs](../scripts/build_docs.sh) can be used to build +the complete CUDA-QX documentation. Please see the comment in that script +for more detail. + +We use [Sphinx](https://www.sphinx-doc.org/) to produce documentation in HTML +format. This documentation includes conceptual documentation in the form of +Markdown or reStructuredText format, API documentation generated based on doc +comments in the source code, as well as potentially source code examples and +snippets. + +## API Documentation + +We use [Sphinx](https://www.sphinx-doc.org/) to include documentation defined in +the form of doc comments in the source code for all of our APIs. The build is +configured by the settings in the [sphinx/conf.py](./sphinx/conf.py) file. + +- **C++ source code**:
+ As part of the build [Doxygen](https://www.doxygen.org/) is used to generated + documentation based on doc comments. The documentation generation is + configured in the [Doxyfile.in](./Doxyfile.in) file - see the manual for + [possible configurations](https://www.doxygen.nl/manual/config.html). Our + build replaces the environment variables used in that file to produce the + final `Doxyfile` with which `doxygen` is invoked. We use the [Breathe + extension](https://breathe.readthedocs.io/) for Sphinx to incorporate content + from the generated XML files in our docs. + +- **Python bindings**:
+ We use [pybind11](https://github.com/pybind/pybind11) to define Python + bindings for the CUDA-QX API. Doc comments are defined as part of + defining these bindings in C++. To incorporate the API documentation, the + `cudaqx` Python package needs to be built and installed prior to generating the + CUDA-QX documentation. The [build_docs.sh](../scripts/build_docs.sh) + script will automatically do that if necessary. This project generates + ReStructuredText documentation from Python `docstrings` using `autodoc`. + The generated `.rst` files are in `docs/source/apidoc`. The files are + generated automatically by `doc-build` and `doc-clean` will remove them. + +## Sphinx Extensions + +The extensions we use to generate API docs are outlined and linked in the +section above. The full list of built-in Sphinx tensions can be found +[here](https://www.sphinx-doc.org/en/master/usage/extensions/index.html). The +list of extensions that are enabled for building CUDA-QX documentation is +defined by the value of the `extensions` configuration in +[conf.py](./sphinx/conf.py). + +## References + +Additional links that may be helpful that are not listed above: + +- [References and automatic link generation in + Doxygen](https://www.star.bnl.gov/public/comp/sofi/doxygen/autolink.html) +- [Using Napoleon style for Python doc + comments](https://docs.softwareheritage.org/devel/contributing/sphinx.html) +- [Cross-referencing Python + objects](https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing-python-objects) +- [Cross-referencing C++ + objects](https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing) +- [Sphinx configuration + options](https://www.sphinx-doc.org/en/master/usage/configuration.html) +- [Syntax highlighting in inline + code](https://sphinxawesome.xyz/demo/inline-code/#syntax-highlighting-in-inline-code) +- [Test examples in Python + documentation](https://docs.python.org/3/library/doctest.html) diff --git a/docs/sphinx/_static/cuda_quantum_icon.svg b/docs/sphinx/_static/cuda_quantum_icon.svg new file mode 100644 index 0000000..e7f2901 --- /dev/null +++ b/docs/sphinx/_static/cuda_quantum_icon.svg @@ -0,0 +1,553 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/sphinx/_static/cudaq_override.css b/docs/sphinx/_static/cudaq_override.css new file mode 100644 index 0000000..4ab0ac1 --- /dev/null +++ b/docs/sphinx/_static/cudaq_override.css @@ -0,0 +1,27 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + * * + * File: cudaq_override.css * + * Created Date: 16 Feb 2023 * + * Last Modified: 16 Feb 2023 * + ******************************************************************************/ + +.wy-nav-content { +max-width: 1240px !important; +} + +.wy-menu-vertical a { + font-size: 92%; +} + +.wy-menu-vertical li code, .wy-menu-vertical li .rst-content tt, .rst-content .wy-menu-vertical li tt { + font-size: 100%; +} + +code.code span.pre, code.cpp span.pre, code.docutils span.pre{ + color: darkgreen; +} diff --git a/docs/sphinx/_templates/autosummary/class.rst b/docs/sphinx/_templates/autosummary/class.rst new file mode 100644 index 0000000..b45a3fd --- /dev/null +++ b/docs/sphinx/_templates/autosummary/class.rst @@ -0,0 +1,26 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + + {% block methods %} + {% if methods %} + .. rubric:: {{ _('Methods') }} + + {% for item in methods %} + .. automethod:: {{ item }} + {%- endfor %} + + {% endif %} + {% endblock %} + + {% block attributes %} + {% if attributes %} + .. rubric:: {{ _('Attributes') }} + + {% for item in attributes %} + .. autoattribute:: {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} diff --git a/docs/sphinx/_templates/autosummary/dataclass.rst b/docs/sphinx/_templates/autosummary/dataclass.rst new file mode 100644 index 0000000..b8c3532 --- /dev/null +++ b/docs/sphinx/_templates/autosummary/dataclass.rst @@ -0,0 +1,10 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + + {% block methods %} + .. automethod:: __init__ + {% endblock %} + diff --git a/docs/sphinx/_templates/layout.html b/docs/sphinx/_templates/layout.html new file mode 100644 index 0000000..c0f02f2 --- /dev/null +++ b/docs/sphinx/_templates/layout.html @@ -0,0 +1,62 @@ +{% extends "!layout.html" %} + {% block sidebartitle %} {{ super() }} + + + {% endblock %} + + {% block footer %} {{ super() }} + + + {% endblock %} diff --git a/docs/sphinx/_templates/openapi.html b/docs/sphinx/_templates/openapi.html new file mode 100644 index 0000000..d50627f --- /dev/null +++ b/docs/sphinx/_templates/openapi.html @@ -0,0 +1,17 @@ + + + + + +
+ + + + \ No newline at end of file diff --git a/docs/sphinx/api/core/cpp_api.rst b/docs/sphinx/api/core/cpp_api.rst new file mode 100644 index 0000000..c400ea2 --- /dev/null +++ b/docs/sphinx/api/core/cpp_api.rst @@ -0,0 +1,44 @@ +CUDA-QX Namespaces and Core Library C++ API +******************************************** + +Namespaces +========== +.. doxygennamespace:: cudaqx + :desc-only: +.. doxygennamespace:: cudaq + :desc-only: +.. doxygennamespace:: cudaq::qec + :desc-only: +.. doxygennamespace:: cudaq::qec::steane + :desc-only: +.. doxygennamespace:: cudaq::qec::repetition + :desc-only: +.. doxygennamespace:: cudaq::solvers + :desc-only: +.. doxygennamespace:: cudaq::solvers::stateprep + :desc-only: +.. doxygennamespace:: cudaq::solvers::adapt + :desc-only: +.. doxygennamespace:: cudaq::optim + :desc-only: + +Core +============= + +.. doxygenclass:: cudaqx::extension_point + :members: + +.. doxygenclass:: cudaqx::heterogeneous_map + :members: + +.. doxygenclass:: cudaqx::tear_down + :members: + +.. doxygenclass:: cudaqx::details::tensor_impl + :members: + +.. doxygenclass:: cudaqx::tensor + :members: + +.. doxygenclass:: cudaqx::graph + :members: \ No newline at end of file diff --git a/docs/sphinx/api/qec/cpp_api.rst b/docs/sphinx/api/qec/cpp_api.rst new file mode 100644 index 0000000..04a379d --- /dev/null +++ b/docs/sphinx/api/qec/cpp_api.rst @@ -0,0 +1,34 @@ +CUDA-Q QEC C++ API +****************************** + +.. doxygentypedef:: cudaq::qec::float_t + +.. doxygenstruct:: cudaq::qec::decoder_result + :members: + +.. doxygenclass:: cudaq::qec::decoder + :members: + +.. doxygenstruct:: cudaq::qec::patch + :members: + +.. doxygenclass:: cudaq::qec::steane::steane + :members: + +.. doxygenclass:: cudaq::qec::repetition::repetition + :members: + +.. doxygenclass:: cudaq::qec::code + :members: + +.. doxygenenum:: cudaq::qec::operation + +.. doxygenfunction:: cudaq::qec::sample_code_capacity(const cudaqx::tensor &, std::size_t, double) +.. doxygenfunction:: cudaq::qec::sample_code_capacity(const cudaqx::tensor &, std::size_t, double, unsigned) +.. doxygenfunction:: cudaq::qec::sample_code_capacity(const code &, std::size_t, double) +.. doxygenfunction:: cudaq::qec::sample_code_capacity(const code &, std::size_t, double, unsigned) + +.. doxygenfunction:: cudaq::qec::sample_memory_circuit(const code &, std::size_t, std::size_t) +.. doxygenfunction:: cudaq::qec::sample_memory_circuit(const code &, std::size_t, std::size_t, cudaq::noise_model &) +.. doxygenfunction:: cudaq::qec::sample_memory_circuit(const code &, operation, std::size_t, std::size_t) +.. doxygenfunction:: cudaq::qec::sample_memory_circuit(const code &, operation, std::size_t, std::size_t, cudaq::noise_model &) diff --git a/docs/sphinx/api/qec/python_api.rst b/docs/sphinx/api/qec/python_api.rst new file mode 100644 index 0000000..1ea1e62 --- /dev/null +++ b/docs/sphinx/api/qec/python_api.rst @@ -0,0 +1,18 @@ +CUDA-Q QEC Python API +****************************** + +.. automodule:: cudaq_qec + :members: + +.. autoclass:: cudaq_qec.Code + :members: + +.. autoclass:: cudaq_qec.Decoder + :members: + +.. autoclass:: cudaq_qec.DecoderResult + :members: + +.. autofunction:: cudaq_qec.sample_memory_circuit + +.. autofunction:: cudaq_qec.sample_code_capacity diff --git a/docs/sphinx/api/solvers/cpp_api.rst b/docs/sphinx/api/solvers/cpp_api.rst new file mode 100644 index 0000000..623fecb --- /dev/null +++ b/docs/sphinx/api/solvers/cpp_api.rst @@ -0,0 +1,77 @@ +CUDA-Q Solvers C++ API +****************************** + +.. doxygenclass:: cudaq::solvers::operator_pool + :members: + +.. doxygenclass:: cudaq::solvers::spin_complement_gsd +.. doxygenclass:: cudaq::solvers::uccsd +.. doxygenclass:: cudaq::solvers::qaoa_pool + +.. doxygenstruct:: cudaq::solvers::atom + :members: + +.. doxygenclass:: cudaq::solvers::molecular_geometry + :members: + +.. doxygenstruct:: cudaq::solvers::molecular_hamiltonian + :members: + +.. doxygenstruct:: cudaq::solvers::molecule_options + :members: + +.. doxygenfunction:: cudaq::solvers::create_molecule + +.. doxygenfunction:: cudaq::solvers::get_maxcut_hamiltonian + +.. doxygenfunction:: cudaq::solvers::get_clique_hamiltonian + +.. doxygenfunction:: cudaq::solvers::one_particle_op + +.. doxygentypedef:: cudaq::ParameterizedKernel +.. doxygentypedef:: cudaq::optim::optimization_result +.. doxygenclass:: cudaq::optim::optimizable_function +.. doxygenclass:: cudaq::optim::optimizer + :members: +.. doxygenclass:: cudaq::optim::cobyla +.. doxygenclass:: cudaq::optim::lbfgs +.. doxygenclass:: cudaq::observe_gradient + :members: +.. doxygenstruct:: cudaq::observe_iteration + :members: +.. doxygenclass:: cudaq::central_difference +.. doxygenclass:: cudaq::forward_difference +.. doxygenclass:: cudaq::parameter_shift + +.. doxygenenum:: cudaq::observe_execution_type + +.. doxygenstruct:: cudaq::solvers::vqe_result +.. doxygenfunction:: cudaq::solvers::vqe(QuantumKernel &&, const spin_op &, const std::string &, const std::string &, const std::vector &, heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::vqe(QuantumKernel &&, const spin_op &, const std::string &, const std::vector &, heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::vqe(QuantumKernel &&, const spin_op &, const std::string &, observe_gradient &, const std::vector &, heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::vqe(QuantumKernel &&, const spin_op &, optim::optimizer &, const std::string &, const std::vector &, heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::vqe(QuantumKernel &&, const spin_op &, optim::optimizer &, const std::vector &, heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::vqe(QuantumKernel &&, const spin_op &, optim::optimizer &, observe_gradient &, const std::vector &, heterogeneous_map) + +.. doxygentypedef:: cudaq::solvers::adapt::result +.. doxygenfunction:: cudaq::solvers::adapt_vqe(const cudaq::qkernel&)> &, const spin_op &, const std::vector &, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::adapt_vqe(const cudaq::qkernel&)> &, const spin_op &, const std::vector &, const optim::optimizer&, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::adapt_vqe(const cudaq::qkernel&)> &, const spin_op &, const std::vector &, const optim::optimizer&, const std::string&, const heterogeneous_map) + +.. doxygentypedef:: cudaq::solvers::stateprep::excitation_list +.. doxygenfunction:: cudaq::solvers::stateprep::get_uccsd_excitations +.. doxygenfunction:: cudaq::solvers::stateprep::get_num_uccsd_parameters +.. doxygenfunction:: cudaq::solvers::stateprep::single_excitation +.. doxygenfunction:: cudaq::solvers::stateprep::double_excitation +.. doxygenfunction:: cudaq::solvers::stateprep::uccsd(cudaq::qview<>, const std::vector&, std::size_t, std::size_t) +.. doxygenfunction:: cudaq::solvers::stateprep::uccsd(cudaq::qview<>, const std::vector&, std::size_t) + + +.. doxygenstruct:: cudaq::solvers::qaoa_result + :members: +.. doxygenfunction:: cudaq::solvers::qaoa(const cudaq::spin_op &, const cudaq::spin_op &, const optim::optimizer &, std::size_t, const std::vector &, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::qaoa(const cudaq::spin_op &, const optim::optimizer &, std::size_t, const std::vector &, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::qaoa(const cudaq::spin_op &, std::size_t, const std::vector &, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::qaoa(const cudaq::spin_op &, const cudaq::spin_op &, std::size_t, const std::vector &, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::get_num_qaoa_parameters(const cudaq::spin_op &, const cudaq::spin_op &, std::size_t, const heterogeneous_map) +.. doxygenfunction:: cudaq::solvers::get_num_qaoa_parameters(const cudaq::spin_op &, std::size_t, const heterogeneous_map) diff --git a/docs/sphinx/api/solvers/python_api.rst b/docs/sphinx/api/solvers/python_api.rst new file mode 100644 index 0000000..27e4da3 --- /dev/null +++ b/docs/sphinx/api/solvers/python_api.rst @@ -0,0 +1,30 @@ +CUDA-Q Solvers Python API +****************************** + +.. automodule:: cudaq_solvers + :members: + +.. autofunction:: cudaq_solvers.jordan_wigner + +.. autoclass:: cudaq_solvers.MolecularHamiltonian + :members: + +.. autofunction:: cudaq_solvers.get_operator_pool + +.. autofunction:: cudaq_solvers.optim.optimize +.. autoclass:: cudaq_solvers.ObserveExecutionType + :members: + :undoc-members: +.. autoclass:: cudaq_solvers.ObserveIteration + :members: +.. autofunction:: cudaq_solvers.vqe +.. autofunction:: cudaq_solvers.adapt_vqe + +.. autofunction:: cudaq_solvers.stateprep.uccsd +.. autofunction:: cudaq_solvers.stateprep.single_excitation +.. autofunction:: cudaq_solvers.stateprep.double_excitation +.. autofunction:: cudaq_solvers.stateprep.get_num_uccsd_parameters +.. autofunction:: cudaq_solvers.stateprep.get_uccsd_excitations + +.. autofunction:: cudaq_solvers.get_num_qaoa_parameters + diff --git a/docs/sphinx/components/qec/introduction.rst b/docs/sphinx/components/qec/introduction.rst new file mode 100644 index 0000000..a12bbee --- /dev/null +++ b/docs/sphinx/components/qec/introduction.rst @@ -0,0 +1,967 @@ +CUDA-Q QEC - Quantum Error Correction Library +============================================= + +Overview +-------- +The ``cudaq-qec`` library provides a comprehensive framework for quantum +error correction research and development. It leverages GPU acceleration +for efficient syndrome decoding and error correction simulations (coming soon). + +Core Components +---------------- +``cudaq-qec`` is composed of two main interfaces - the :code:`cudaq::qec::code` and +:code:`cudaq::qec::decoder` types. These types are meant to be extended by developers +to provide new error correcting codes and new decoding strategies. + +QEC Code Framework :code:`cudaq::qec::code` +------------------------------------------- + +The :code:`cudaq::qec::code` class serves as the base class for all quantum error correcting codes in CUDA-Q QEC. It provides +a flexible extension point for implementing new codes and defines the core interface that all QEC codes must support. + +The core abstraction here is that of a mapping or dictionary of logical operations to their +corresponding physical implementation in the error correcting code as CUDA-Q quantum kernels. + +Class Structure +^^^^^^^^^^^^^^^ + +The code base class provides: + +1. **Operation Enumeration**: Defines supported logical operations + + .. code-block:: cpp + + enum class operation { + x, // Logical X gate + y, // Logical Y gate + z, // Logical Z gate + h, // Logical Hadamard gate + s, // Logical S gate + cx, // Logical CNOT gate + cy, // Logical CY gate + cz, // Logical CZ gate + stabilizer_round, // Stabilizer measurement round + prep0, // Prepare |0⟩ state + prep1, // Prepare |1⟩ state + prepp, // Prepare |+⟩ state + prepm // Prepare |-⟩ state + }; + + +2. **Patch Type**: Defines the structure of a logical qubit patch + + .. code-block:: cpp + + struct patch { + cudaq::qview<> data; // View of data qubits + cudaq::qview<> ancx; // View of X stabilizer ancilla qubits + cudaq::qview<> ancz; // View of Z stabilizer ancilla qubits + }; + + The `patch` type represents a logical qubit in quantum error correction codes. It contains: + - `data`: A view of the data qubits in the patch + - `ancx`: A view of the ancilla qubits used for X stabilizer measurements + - `ancz`: A view of the ancilla qubits used for Z stabilizer measurements + + This structure is designed for use within CUDA-Q kernel code and provides a + convenient way to access different qubit subsets within a logical qubit patch. + + +3. **Kernel Type Aliases**: Defines quantum kernel signatures + + .. code-block:: cpp + + using one_qubit_encoding = cudaq::qkernel; + using two_qubit_encoding = cudaq::qkernel; + using stabilizer_round = cudaq::qkernel( + patch, const std::vector&, const std::vector&)>; + +4. **Protected Members**: + + - :code:`operation_encodings`: Maps operations to their quantum kernel implementations. The key is the ``operation`` enum and the value is a variant on the above kernel type aliases. + - :code:`m_stabilizers`: Stores the code's stabilizer generators + +Implementing a New Code +^^^^^^^^^^^^^^^^^^^^^^^ + +To implement a new quantum error correcting code: + +1. **Create a New Class**: + + .. code-block:: cpp + + class my_code : public qec::code { + protected: + // Implement required virtual methods + public: + my_code(const heterogeneous_map& options); + }; + +2. **Implement Required Virtual Methods**: + + .. code-block:: cpp + + // Number of physical data qubits + std::size_t get_num_data_qubits() const override; + + // Total number of ancilla qubits + std::size_t get_num_ancilla_qubits() const override; + + // Number of X-type ancilla qubits + std::size_t get_num_ancilla_x_qubits() const override; + + // Number of Z-type ancilla qubits + std::size_t get_num_ancilla_z_qubits() const override; + +3. **Define Quantum Kernels**: + + Create CUDA-Q kernels for each logical operation: + + .. code-block:: cpp + + __qpu__ void x(patch p) { + // Implement logical X + } + + __qpu__ std::vector stabilizer(patch p, + const std::vector& x_stabs, + const std::vector& z_stabs) { + // Implement stabilizer measurements + } + +4. **Register Operations**: + + In the constructor, register quantum kernels for each operation: + + .. code-block:: cpp + + my_code::my_code(const heterogeneous_map& options) : code() { + // Register operations + operation_encodings.insert( + std::make_pair(operation::x, x)); + operation_encodings.insert( + std::make_pair(operation::stabilizer_round, stabilizer)); + + // Define stabilizer generators + m_stabilizers = qec::stabilizers({"XXXX", "ZZZZ"}); + } + + + Note that in your constructor, you have access to user-provided ``options``. For + example, if your code depends on an integer paramter called ``distance``, you can + retrieve that from the user via + + .. code-block:: cpp + + my_code::my_code(const heterogeneous_map& options) : code() { + // ... fill the map and stabilizers ... + + // Get the user-provided distance, or just + // set to 3 if user did not provide one + this->distance = options.get("distance", /*defaultValue*/ 3); + } + +5. **Register Extension Point**: + + Add extension point registration: + + .. code-block:: cpp + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + my_code, + static std::unique_ptr create( + const heterogeneous_map &options) { + return std::make_unique(options); + } + ) + + CUDAQ_REGISTER_TYPE(my_code) + +Example: Steane Code +^^^^^^^^^^^^^^^^^^^^^ + +The Steane [[7,1,3]] code provides a complete example implementation: + +1. **Header Definition**: + + - Declares quantum kernels for all logical operations + - Defines the code class with required virtual methods + - Specifies 7 data qubits and 6 ancilla qubits (3 X-type, 3 Z-type) + +2. **Implementation**: + + .. code-block:: cpp + + steane::steane(const heterogeneous_map &options) : code() { + // Register all logical operations + operation_encodings.insert( + std::make_pair(operation::x, x)); + // ... register other operations ... + + // Define stabilizer generators + m_stabilizers = qec::stabilizers({ + "XXXXIII", "IXXIXXI", "IIXXIXX", + "ZZZZIII", "IZZIZZI", "IIZZIZZ" + }); + } + +3. **Quantum Kernels**: + + Implements fault-tolerant logical operations: + + .. code-block:: cpp + + __qpu__ void x(patch logicalQubit) { + // Apply logical X to specific data qubits + x(logicalQubit.data[4], logicalQubit.data[5], + logicalQubit.data[6]); + } + + __qpu__ std::vector stabilizer(patch logicalQubit, + const std::vector& x_stabilizers, + const std::vector& z_stabilizers) { + // Measure X stabilizers + h(logicalQubit.ancx); + // ... apply controlled-X gates ... + h(logicalQubit.ancx); + + // Measure Z stabilizers + // ... apply controlled-X gates ... + + // Return measurement results + return mz(logicalQubit.ancz, logicalQubit.ancx); + } + +Implementing a New Code in Python +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +CUDA-Q QEC supports implementing quantum error correction codes in Python +using the :code:`@qec.code` decorator. This provides a more accessible way +to prototype and develop new codes. + +1. **Create a New Python File**: + + Create a new file (e.g., :code:`my_steane.py`) with your code implementation: + + .. code-block:: python + + import cudaq + import cudaq_qec as qec + from cudaq_qec import patch + +2. **Define Quantum Kernels**: + + Implement the required quantum kernels using the :code:`@cudaq.kernel` decorator: + + .. code-block:: python + + @cudaq.kernel + def prep0(logicalQubit: patch): + h(logicalQubit.data[0], logicalQubit.data[4], logicalQubit.data[6]) + x.ctrl(logicalQubit.data[0], logicalQubit.data[1]) + x.ctrl(logicalQubit.data[4], logicalQubit.data[5]) + # ... additional initialization gates ... + + @cudaq.kernel + def stabilizer(logicalQubit: patch, + x_stabilizers: list[int], + z_stabilizers: list[int]) -> list[bool]: + # Measure X stabilizers + h(logicalQubit.ancx) + for xi in range(len(logicalQubit.ancx)): + for di in range(len(logicalQubit.data)): + if x_stabilizers[xi * len(logicalQubit.data) + di] == 1: + x.ctrl(logicalQubit.ancx[xi], logicalQubit.data[di]) + h(logicalQubit.ancx) + + # Measure Z stabilizers + for zi in range(len(logicalQubit.ancx)): + for di in range(len(logicalQubit.data)): + if z_stabilizers[zi * len(logicalQubit.data) + di] == 1: + x.ctrl(logicalQubit.data[di], logicalQubit.ancz[zi]) + + # Get and reset ancillas + results = mz(logicalQubit.ancz, logicalQubit.ancx) + reset(logicalQubit.ancx) + reset(logicalQubit.ancz) + return results + +3. **Implement the Code Class**: + + Create a class decorated with :code:`@qec.code` that implements the required interface: + + .. code-block:: python + + @qec.code('py-steane-example') + class MySteaneCodeImpl: + def __init__(self, **kwargs): + qec.Code.__init__(self, **kwargs) + + # Define stabilizer generators + self.stabilizers = qec.Stabilizers([ + "XXXXIII", "IXXIXXI", "IIXXIXX", + "ZZZZIII", "IZZIZZI", "IIZZIZZ" + ]) + + # Register quantum kernels + self.operation_encodings = { + qec.operation.prep0: prep0, + qec.operation.stabilizer_round: stabilizer + } + + def get_num_data_qubits(self): + return 7 + + def get_num_ancilla_x_qubits(self): + return 3 + + def get_num_ancilla_z_qubits(self): + return 3 + + def get_num_ancilla_qubits(self): + return 6 + +4. **Install the Code**: + + Install your Python-implemented code using :code:`cudaqx-config`: + + .. code-block:: bash + + cudaqx-config --install-code my_steane.py + +5. **Using the Code**: + + The code can now be used like any other CUDA-Q QEC code: + + .. code-block:: python + + import cudaq_qec as qec + + # Create instance of your code + code = qec.get_code('py-steane-example') + + # Use the code for various numerical experiments + +Key Points +^^^^^^^^^^^ + +* The :code:`@qec.code` decorator takes the name of the code as an argument +* Operation encodings are registered via the :code:`operation_encodings` dictionary +* Stabilizer generators are defined using the :code:`qec.Stabilizers` class +* The code must implement all required methods from the base class interface + + +Using the Code Framework +^^^^^^^^^^^^^^^^^^^^^^^^^ + +To use an implemented code: + +.. tab:: Python + + .. code-block:: python + + # Create a code instance + code = qec.get_code("steane") + + # Access stabilizer information + stabilizers = code.get_stabilizers() + parity = code.get_parity() + + # The code can now be used for various numerical + # experiments - see section below. + +.. tab:: C++ + + .. code-block:: cpp + + // Create a code instance + auto code = cudaq::qec::get_code("steane"); + + // Access stabilizer information + auto stabilizers = code->get_stabilizers(); + auto parity = code->get_parity(); + + // The code can now be used for various numerical + // experiments - see section below. + +Pre-built Quantum Error Correction Codes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +CUDA-Q QEC provides several well-studied quantum error correction codes out of the box. Here's a detailed overview of each: + +Steane Code +~~~~~~~~~~~ + +The Steane code is a ``[[7,1,3]]`` CSS (Calderbank-Shor-Steane) code that encodes +one logical qubit into seven physical qubits with a code distance of 3. + +**Key Properties**: + +* Data qubits: 7 +* Encoded qubits: 1 +* Code distance: 3 +* Ancilla qubits: 6 (3 for X stabilizers, 3 for Z stabilizers) + +**Stabilizer Generators**: + +* X-type: ``["XXXXIII", "IXXIXXI", "IIXXIXX"]`` +* Z-type: ``["ZZZZIII", "IZZIZZI", "IIZZIZZ"]`` + +The Steane code can correct any single-qubit error and detect up to two errors. +It is particularly notable for being the smallest CSS code that can implement a universal set of transversal gates. + +Usage: + +.. tab:: Python + + .. code-block:: python + + import cudaq_qec as qec + + # Create Steane code instance + steane = qec.get_code("steane") + +.. tab:: C++ + + .. code-block:: cpp + + auto steane = cudaq::qec::get_code("steane"); + +Repetition Code +~~~~~~~~~~~~~~~ +The repetition code is a simple [[n,1,n]] code that protects against +bit-flip (X) errors by encoding one logical qubit into n physical qubits, where n is the code distance. + +**Key Properties**: + +* Data qubits: n (distance) +* Encoded qubits: 1 +* Code distance: n +* Ancilla qubits: n-1 (all for Z stabilizers) + +**Stabilizer Generators**: + +* For distance 3: ``["ZZI", "IZZ"]`` +* For distance 5: ``["ZZIII", "IZZII", "IIZZI", "IIIZZ"]`` + +The repetition code is primarily educational as it can only correct +X errors. However, it serves as an excellent introduction to QEC concepts. + +Usage: + +.. tab:: Python + + .. code-block:: python + + import cudaq_qec as qec + + # Create distance-3 repetition code + code = qec.get_code('repetition', distance=3}) + + # Access stabilizers + stabilizers = code.get_stabilizers() # Returns ["ZZI", "IZZ"] + +.. tab:: C++ + + .. code-block:: cpp + + auto code = qec::get_code("repetition", {{"distance", 3}}); + + // Access stabilizers + auto stabilizers = code->get_stabilizers(); + + +Decoder Framework :code:`cudaq::qec::decoder` +---------------------------------------------- + +The CUDA-Q QEC decoder framework provides an extensible system for implementing +quantum error correction decoders through the :code:`cudaq::qec::decoder` base class. + +Class Structure +^^^^^^^^^^^^^^^ + +The decoder base class defines the core interface for syndrome decoding: + +.. code-block:: cpp + + class decoder { + protected: + std::size_t block_size; // For [n,k] code, this is n + std::size_t syndrome_size; // For [n,k] code, this is n-k + tensor H; // Parity check matrix + + public: + struct decoder_result { + bool converged; // Decoder convergence status + std::vector result; // Soft error probabilities + }; + + virtual decoder_result decode( + const std::vector& syndrome) = 0; + + virtual std::vector decode_multi( + const std::vector>& syndrome); + }; + +Key Components: + +* **Parity Check Matrix**: Defines the code structure via :code:`H` +* **Block Size**: Number of physical qubits in the code +* **Syndrome Size**: Number of stabilizer measurements +* **Decoder Result**: Contains convergence status and error probabilities +* **Multiple Decoding Modes**: Single syndrome or batch processing + +Implementing a New Decoder in C++ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To implement a new decoder: + +1. **Create Decoder Class**: + +.. code-block:: cpp + + class my_decoder : public qec::decoder { + private: + // Decoder-specific members + + public: + my_decoder(const tensor& H, + const heterogeneous_map& params) + : decoder(H) { + // Initialize decoder + } + + decoder_result decode( + const std::vector& syndrome) override { + // Implement decoding logic + } + }; + +2. **Register Extension Point**: + +.. code-block:: cpp + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + my_decoder, + static std::unique_ptr create( + const tensor& H, + const heterogeneous_map& params) { + return std::make_unique(H, params); + } + ) + + CUDAQ_REGISTER_TYPE(my_decoder) + +Example: Lookup Table Decoder +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here's a simple lookup table decoder for the Steane code: + +.. code-block:: cpp + + class single_error_lut : public decoder { + private: + std::map single_qubit_err_signatures; + + public: + single_error_lut(const tensor& H, + const heterogeneous_map& params) + : decoder(H) { + // Build lookup table for single-qubit errors + for (std::size_t qErr = 0; qErr < block_size; qErr++) { + std::string err_sig(syndrome_size, '0'); + for (std::size_t r = 0; r < syndrome_size; r++) { + bool syndrome = 0; + for (std::size_t c = 0; c < block_size; c++) + syndrome ^= (c != qErr) && H.at({r, c}); + err_sig[r] = syndrome ? '1' : '0'; + } + single_qubit_err_signatures.insert({err_sig, qErr}); + } + } + + decoder_result decode( + const std::vector& syndrome) override { + decoder_result result{false, + std::vector(block_size, 0.0)}; + + // Convert syndrome to string + std::string syndrome_str(syndrome_size, '0'); + for (std::size_t i = 0; i < syndrome_size; i++) + syndrome_str[i] = (syndrome[i] >= 0.5) ? '1' : '0'; + + // Lookup error location + auto it = single_qubit_err_signatures.find(syndrome_str); + if (it != single_qubit_err_signatures.end()) { + result.converged = true; + result.result[it->second] = 1.0; + } + + return result; + } + }; + +Implementing a Decoder in Python +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +CUDA-Q QEC supports implementing decoders in Python using the :code:`@qec.decoder` decorator: + +1. **Create Decoder Class**: + +.. code-block:: python + + @qec.decoder("my_decoder") + class MyDecoder: + def __init__(self, H, **kwargs): + qec.Decoder.__init__(self, H) + self.H = H + # Initialize with optional kwargs + + def decode(self, syndrome): + # Create result object + result = qec.DecoderResult() + + # Implement decoding logic + # ... + + # Set results + result.converged = True + result.result = [0.0] * self.block_size + + return result + +2. **Using Custom Parameters**: + +.. code-block:: python + + # Create decoder with custom parameters + decoder = qec.get_decoder("my_decoder", + H=parity_check_matrix, + custom_param=42) + +Key Features +^^^^^^^^^^^^^ + +* **Soft Decision Decoding**: Results are probabilities in [0,1] +* **Batch Processing**: Support for decoding multiple syndromes +* **Asynchronous Decoding**: Optional async interface for parallel processing +* **Custom Parameters**: Flexible configuration via heterogeneous_map +* **Python Integration**: First-class support for Python implementations + +Usage Example +^^^^^^^^^^^^^^ + +.. tab:: Python + + .. code-block:: python + + import cudaq_qec as qec + + # Get a code instance + code = qec.get_code('steane') + + # Create decoder with code's parity matrix + decoder = qec.get_decoder('single_error_lut', + H=code.get_parity()) + + # Run stabilizer measurements + syndromes, dataQubitResults = qec.sample_memory_circuit(steane, numShots, numRounds) + + # Decode syndrome + result = decoder.decode(syndromes[0]) + if result.converged: + print("Error locations:", + [i for i,p in enumerate(result.result) if p > 0.5]) + +.. tab:: C++ + + .. code-block:: cpp + + using namespace cudaq; + + // Get a code instance + auto code = qec::get_code("steane"); + + // Create decoder with code's parity matrix + auto decoder = qec::get_decoder("single_error_lut", + code->get_parity()); + + // Run stabilizer measurements + auto [syndromes, dataQubitResults] = qec::sample_memory_circuit(*code, /*numShots*/numShots, /*numRounds*/ 1); + + // Decode syndrome + auto result = decoder->decode(syndromes[0]); + + +Numerical Experiments +--------------------- + +CUDA-Q QEC provides utilities for running numerical experiments with quantum error correction codes. + +Conventions +^^^^^^^^^^^ + +To address vectors of qubits (`cudaq::qvector`), CUDAQ indexing starts from 0, and 0 corresponds +to the leftmost position when working with pauli strings (`cudaq::spin_op`). For example, applying a pauli X operator +to qubit 1 out of 7 would be `X_1 = IXIIIII`. + +While implementing your own codes and decoders, you are free to follow any convention that is convenient to you. However, +to interact with the pre-built QEC codes and decoders within this library, the following conventions are used. All of these codes +are CSS codes, and so we separate :math:`X`-type and :math:`Z`-type errors. For example, an error vector for 3 qubits will +have 6 entries, 3 bits representing the presence of a bit-flip on each qubit, and 3 bits representing a phase-flip on each qubit. +An error vector representing a bit-flip on qubit 0, and a phase-flip on qubit 1 would look like `E = 100010`. This means that this +error vector is just two error vectors (`E_X, E_Z`) concatenated together (`E = E_X | E_Z`). + +These errors are detected by stabilizers. :math:`Z`-stabilizers detect :math:`X`-type errors and vice versa. Thus we write our +CSS parity check matrices as + +.. math:: + H_{CSS} = \begin{pmatrix} + H_Z & 0 \\ + 0 & H_X + \end{pmatrix}, + +so that when we generate a syndrome vector by multiplying the parity check matrix by an error vector we get + +.. math:: + \begin{align} + S &= H \cdot E\\ + S_X &= H_Z \cdot E_x\\ + S_Z &= H_X \cdot E_Z. + \end{align} + +This means that for the concatenated syndrome vector `S = S_X | S_Z`, the first part, `S_X`, are syndrome bits triggered by `Z` +stabilizers detecting `X` errors. This is because the `Z` stabilizers like `ZZI` and `IZZ` anti-commute with `X` errors like +`IXI`. + +The decoder prediction as to what error happened is `D = D_X | D_Z`. A successful error decoding does not require that `D = E`, +but that `D + E` is not a logical operator. There are a couple ways to check this. +For bitflip errors, we check that the residual error `R = D_X + E_X` is not `L_X`. Since `X` anticommutes +with `Z`, we can check that `L_Z(D_X + E_X) = 0`. This is because we just need to check if they have mutual support on an even +or odd number of qubits. We could also check that `R` is not a stabilizer. + +Similar to the parity check matrix, the logical obvervables are also stored in a matrix as + +.. math:: + L = \begin{pmatrix} + L_Z & 0 \\ + 0 & L_X + \end{pmatrix}, + +so that when determining logical errors, we can do matrix multiplication + +.. math:: + \begin{align} + P &= L \cdot R\\ + P_X &= L_Z \cdot R_x\\ + P_Z &= L_X \cdot R_Z. + \end{align} + +Here we're using `P` as this can be stored in a Pauli frame tracker to track observable flips. + +Each logical qubit has logical observables associated with it. Depending on what basis the data qubits are measured in, either the +`X` or `Z` logical observables can be measured. The data qubits which support the logical observable is contained the `qec::code` class as well. + +To do a logical `Z(X)` measurement, measure out all of the data qubits in the `Z(X)` basis. Then check support on the appropriate +`Z(x)` observable. + + +Memory Circuit Experiments +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Memory circuit experiments test a QEC code's ability to preserve quantum information over time by: + +1. Preparing an initial logical state +2. Performing multiple rounds of stabilizer measurements +3. Measuring data qubits to verify state preservation +4. Optionally applying noise during the process + +Function Variants +~~~~~~~~~~~~~~~~~ + +.. tab:: Python + + .. code-block:: python + + import cudaq_qec as qec + + # Basic memory circuit with |0⟩ state + syndromes, measurements = qec.sample_memory_circuit( + code, # QEC code instance + numShots=1000, # Number of circuit executions + numRounds=1 # Number of stabilizer rounds + ) + + # Memory circuit with custom initial state + syndromes, measurements = qec.sample_memory_circuit( + code, # QEC code instance + state_prep=qec.operation.prep1, # Initial state + numShots=1000, # Number of shots + numRounds=1 # Number of rounds + ) + + # Memory circuit with noise model + noise = cudaq.noise_model() + noise.add_channel(...) # Configure noise + syndromes, measurements = qec.sample_memory_circuit( + code, # QEC code instance + numShots=1000, # Number of shots + numRounds=1, # Number of rounds + noise=noise # Noise model + ) + +.. tab:: C++ + + .. code-block:: cpp + + // Basic memory circuit with |0⟩ state + auto [syndromes, measurements] = qec::sample_memory_circuit( + code, // QEC code instance + numShots, // Number of circuit executions + numRounds // Number of stabilizer rounds + ); + + // Memory circuit with custom initial state + auto [syndromes, measurements] = qec::sample_memory_circuit( + code, // QEC code instance + operation::prep1, // Initial state preparation + numShots, // Number of circuit executions + numRounds // Number of stabilizer rounds + ); + + // Memory circuit with noise model + auto noise_model = cudaq::noise_model(); + noise_model.add_channel(...); // Configure noise + auto [syndromes, measurements] = qec::sample_memory_circuit( + code, // QEC code instance + numShots, // Number of circuit executions + numRounds, // Number of stabilizer rounds + noise_model // Noise model to apply + ); + +Return Values +~~~~~~~~~~~~~ + +The functions return a tuple containing: + +1. **Syndrome Measurements** (:code:`tensor`): + + * Shape: :code:`(num_shots, (num_rounds-1) * syndrome_size)` + * Contains stabilizer measurement results + * Values are 0 or 1 representing measurement outcomes + +2. **Data Measurements** (:code:`tensor`): + + * Shape: :code:`(num_shots, block_size)` + * Contains final data qubit measurements + * Used to verify logical state preservation + +Example Usage +~~~~~~~~~~~~~ + +Here's a complete example of running a memory experiment: + +.. tab:: Python + + .. code-block:: python + + import cudaq + import cudaq_qec as qec + + # Create code and decoder + code = qec.get_code('steane') + decoder = qec.get_decoder('steane_lut', + code.get_parity()) + + # Configure noise + noise = cudaq.noise_model() + noise.add_channel('x', depolarizing=0.001) + + # Run memory experiment + syndromes, measurements = qec.sample_memory_circuit( + code, + state_prep=qec.operation.prep0, + num_shots=1000, + num_rounds=10, + noise=noise + ) + + # Analyze results + for shot in range(1000): + # Get syndrome for this shot + syndrome = syndromes[shot].tolist() + + # Decode syndrome + result = decoder.decode(syndrome) + if result.converged: + # Process correction + pass + +.. tab:: C++ + + .. code-block:: cpp + + // Top of file + #include "cudaq/qec/experiments.h" + + // Create a Steane code instance + auto code = cudaq::qec::get_code("steane"); + + // Configure noise model + auto noise = cudaq::noise_model(); + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_depolarization(0.1), + /*num_controls=*/1); + + // Run memory experiment + auto [syndromes, measurements] = qec::sample_memory_circuit( + code, // Code instance + operation::prep0, // Prepare |0⟩ state + 1000, // 1000 shots + 10, // 10 rounds + noise // Apply noise + ); + + // Analyze results + auto decoder = qec::get_decoder("single_error_lut", code->get_parity()); + for (std::size_t shot = 0; shot < 1000; shot++) { + // Get syndrome for this shot + std::vector syndrome(code->get_syndrome_size()); + for (std::size_t i = 0; i < syndrome.size(); i++) + syndrome[i] = syndromes.at({shot, i}); + + // Decode syndrome + auto result = decoder->decode(syndrome); + // Process correction + // ... + } + +Additional Noise Models: +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. tab:: Python + + .. code-block:: python + + noise = cudaq.NoiseModel() + + # Add multiple error channels + noise.add_all_qubit_channel('h', cudaq.BitFlipChannel(0.001)) + + # Specify two qubit errors + noise.add_all_qubit_channel("x", qec.TwoQubitDepolarization(p), 1) + +.. tab:: C++ + + .. code-block:: cpp + + cudaq::noise_model noise; + + # Add multiple error channels + noise.add_all_qubit_channel( + "x", cudaq::BitFlipChannel(/*probability*/ 0.01)); + + # Specify two qubit errors + noise.add_all_qubit_channel( + "x", cudaq::qec::two_qubit_depolarization(/*probability*/ 0.01), + /*numControls*/ 1); + diff --git a/docs/sphinx/components/solvers/introduction.rst b/docs/sphinx/components/solvers/introduction.rst new file mode 100644 index 0000000..0bbc36c --- /dev/null +++ b/docs/sphinx/components/solvers/introduction.rst @@ -0,0 +1,554 @@ +CUDA-Q Solvers Library +======================= + +Overview +-------- +The CUDA-Q Solvers library provides high-level quantum-classical hybrid +algorithms and supporting infrastructure for quantum chemistry and +optimization problems. It features implementations of VQE, ADAPT-VQE, +and supporting utilities for Hamiltonian generation and operator pool management. + +Core Components +----------------- + +1. **Variational Algorithms**: + + * Variational Quantum Eigensolver (VQE) + * Adaptive Derivative-Assembled Pseudo-Trotter VQE (ADAPT-VQE) + +2. **Quantum Chemistry Tools**: + + * Molecular Hamiltonian Generation + * One-Particle Operator Creation + * Geometry Management + +3. **Operator Infrastructure**: + + * Operator Pool Generation + * Fermion-to-Qubit Mappings + * Gradient Computation + +Operator Infrastructure +------------------------ + +Molecular Hamiltonian Options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :code:`molecule_options` structure provides extensive configuration for molecular calculations in CUDA-QX. + ++---------------------+---------------+------------------+------------------------------------------+ +| Option | Type | Default | Description | ++=====================+===============+==================+==========================================+ +| driver | string | "RESTPySCFDriver"| Quantum chemistry driver backend | ++---------------------+---------------+------------------+------------------------------------------+ +| fermion_to_spin | string | "jordan_wigner" | Fermionic to qubit operator mapping | ++---------------------+---------------+------------------+------------------------------------------+ +| type | string | "gas_phase" | Type of molecular system | ++---------------------+---------------+------------------+------------------------------------------+ +| symmetry | bool | false | Use molecular symmetry | ++---------------------+---------------+------------------+------------------------------------------+ +| memory | double | 4000.0 | Memory allocation (MB) | ++---------------------+---------------+------------------+------------------------------------------+ +| cycles | size_t | 100 | Maximum SCF cycles | ++---------------------+---------------+------------------+------------------------------------------+ +| initguess | string | "minao" | Initial SCF guess method | ++---------------------+---------------+------------------+------------------------------------------+ +| UR | bool | false | Enable unrestricted calculations | ++---------------------+---------------+------------------+------------------------------------------+ +| nele_cas | optional | nullopt | Number of electrons in active space | +| | | | | ++---------------------+---------------+------------------+------------------------------------------+ +| norb_cas | optional | nullopt | Number of spatial orbitals in | +| | | | in active space | ++---------------------+---------------+------------------+------------------------------------------+ +| MP2 | bool | false | Enable MP2 calculations | ++---------------------+---------------+------------------+------------------------------------------+ +| natorb | bool | false | Use natural orbitals | ++---------------------+---------------+------------------+------------------------------------------+ +| casci | bool | false | Perform CASCI calculations | ++---------------------+---------------+------------------+------------------------------------------+ +| ccsd | bool | false | Perform CCSD calculations | ++---------------------+---------------+------------------+------------------------------------------+ +| casscf | bool | false | Perform CASSCF calculations | ++---------------------+---------------+------------------+------------------------------------------+ +| integrals_natorb | bool | false | Use natural orbitals for integrals | ++---------------------+---------------+------------------+------------------------------------------+ +| integrals_casscf | bool | false | Use CASSCF orbitals for integrals | ++---------------------+---------------+------------------+------------------------------------------+ +| potfile | optional | nullopt | Path to external potential file | +| | | | | ++---------------------+---------------+------------------+------------------------------------------+ +| verbose | bool | false | Enable detailed output logging | ++---------------------+---------------+------------------+------------------------------------------+ + +Example Usage +^^^^^^^^^^^^^ + +.. tab:: Python + + .. code-block:: python + + import cudaq_solvers as solvers + + # Configure molecular options + options = { + 'fermion_to_spin': 'jordan_wigner', + 'casci': True, + 'memory': 8000.0, + 'verbose': True + } + + # Create molecular Hamiltonian + molecule = solvers.create_molecule( + geometry=[('H', (0., 0., 0.)), + ('H', (0., 0., 0.7474))], + basis='sto-3g', + spin=0, + charge=0, + **options + ) + +.. tab:: C++ + + .. code-block:: cpp + + using namespace cudaq::solvers; + + // Configure molecular options + molecule_options options; + options.fermion_to_spin = "jordan_wigner"; + options.casci = true; + options.memory = 8000.0; + options.verbose = true; + + // Create molecular geometry + auto geometry = molecular_geometry({ + atom{"H", {0.0, 0.0, 0.0}}, + atom{"H", {0.0, 0.0, 0.7474}} + }); + + // Create molecular Hamiltonian + auto molecule = create_molecule( + geometry, + "sto-3g", + 0, // spin + 0, // charge + options + ); + +Variational Quantum Eigensolver (VQE) +-------------------------------------- + +The VQE algorithm finds the minimum eigenvalue of a +Hamiltonian using a hybrid quantum-classical approach. + +VQE Examples +------------- + +The VQE implementation supports multiple usage patterns with different levels of customization. + +Basic Usage +^^^^^^^^^^^ + +.. tab:: Python + + .. code-block:: python + + import cudaq + from cudaq import spin + import cudaq_solvers as solvers + + # Define quantum kernel (ansatz) + @cudaq.kernel + def ansatz(theta: float): + q = cudaq.qvector(2) + x(q[0]) + ry(theta, q[1]) + x.ctrl(q[1], q[0]) + + # Define Hamiltonian + H = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - \ + 2.1433 * spin.y(0) * spin.y(1) + \ + 0.21829 * spin.z(0) - 6.125 * spin.z(1) + + # Run VQE with defaults (cobyla optimizer) + energy, parameters, data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + H, + initial_parameters=[0.0], + verbose=True + ) + print(f"Ground state energy: {energy}") + +.. tab:: C++ + + .. code-block:: cpp + + #include "cudaq.h" + + #include "cudaq/solvers/operators.h" + #include "cudaq/solvers/vqe.h" + + // Define quantum kernel + struct ansatz { + void operator()(std::vector theta) __qpu__ { + cudaq::qvector q(2); + x(q[0]); + ry(theta[0], q[1]); + x(q[1], q[0]); + } + }; + + // Create Hamiltonian + auto H = 5.907 - 2.1433 * x(0) * x(1) - + 2.1433 * y(0) * y(1) + + 0.21829 * z(0) - 6.125 * z(1); + + // Run VQE with default optimizer + auto result = cudaq::solvers::vqe( + ansatz{}, + H, + {0.0}, // Initial parameters + {{"verbose", true}} + ); + printf("Ground state energy: %lf\n", result.energy); + +Custom Optimization +^^^^^^^^^^^^^^^^^^^ + +.. tab:: Python + + .. code-block:: python + + # Using L-BFGS-B optimizer with parameter-shift gradients + energy, parameters, data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + H, + initial_parameters=[0.0], + optimizer='lbfgs', + gradient='parameter_shift', + verbose=True + ) + + # Using SciPy optimizer directly + from scipy.optimize import minimize + + def callback(xk): + exp_val = cudaq.observe(ansatz, H, xk[0]).expectation() + print(f"Energy at iteration: {exp_val}") + + energy, parameters, data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + H, + initial_parameters=[0.0], + optimizer=minimize, + callback=callback, + method='L-BFGS-B', + jac='3-point', + tol=1e-4, + options={'disp': True} + ) + +.. tab:: C++ + + .. code-block:: cpp + + // Using L-BFGS optimizer with central difference gradients + auto optimizer = cudaq::optim::optimizer::get("lbfgs"); + auto gradient = cudaq::observe_gradient::get( + "central_difference", + ansatz{}, + H + ); + + auto result = cudaq::solvers::vqe( + ansatz{}, + H, + *optimizer, + *gradient, + {0.0}, // Initial parameters + {{"verbose", true}} + ); + +Shot-based Simulation +^^^^^^^^^^^^^^^^^^^^^ + +.. tab:: Python + + .. code-block:: python + + # Run VQE with finite shots + energy, parameters, data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + H, + initial_parameters=[0.0], + shots=10000, + max_iterations=10, + verbose=True + ) + + # Analyze measurement data + for iteration in data: + counts = iteration.result.counts() + print("\nMeasurement counts:") + print("XX basis:", counts.get_register_counts('XX')) + print("YY basis:", counts.get_register_counts('YY')) + print("ZI basis:", counts.get_register_counts('ZI')) + print("IZ basis:", counts.get_register_counts('IZ')) + +.. tab:: C++ + + .. code-block:: cpp + + // Run VQE with finite shots + auto optimizer = cudaq::optim::optimizer::get("lbfgs"); + auto gradient = cudaq::observe_gradient::get( + "parameter_shift", + ansatz{}, + H + ); + + auto result = cudaq::solvers::vqe( + ansatz{}, + H, + *optimizer, + *gradient, + {0.0}, + { + {"shots", 10000}, + {"verbose", true} + } + ); + + // Analyze measurement data + for (auto& iteration : result.iteration_data) { + std::cout << "Iteration type: " + << (iteration.type == observe_execution_type::gradient + ? "gradient" : "function") + << "\n"; + iteration.result.dump(); + } + +ADAPT-VQE +--------- + +The Adaptive Derivative-Assembled Pseudo-Trotter Variational Quantum Eigensolver (ADAPT-VQE) +is an advanced quantum algorithm that dynamically builds a problem-tailored ansatz +based on operator gradients. + +Key Features +^^^^^^^^^^^^ + +* Dynamic ansatz construction +* Gradient-based operator selection +* Automatic termination criteria +* Support for various operator pools +* Compatible with multiple optimizers + +Basic Usage +^^^^^^^^^^^^ + +.. tab:: Python + + .. code-block:: python + + import cudaq + import cudaq_solvers as solvers + + # Define molecular geometry + geometry = [ + ('H', (0., 0., 0.)), + ('H', (0., 0., 0.7474)) + ] + + # Create molecular Hamiltonian + molecule = solvers.create_molecule( + geometry, + 'sto-3g', + spin=0, + charge=0, + casci=True + ) + + # Generate operator pool + operators = solvers.get_operator_pool( + "spin_complement_gsd", + num_orbitals=molecule.n_orbitals + ) + + numElectrons = molecule.n_electrons + + # Define initial state preparation + @cudaq.kernel + def initial_state(q: cudaq.qview): + for i in range(numElectrons): + x(q[i]) + + # Run ADAPT-VQE + energy, parameters, operators = solvers.adapt_vqe( + initial_state, + molecule.hamiltonian, + operators, + verbose=True + ) + print(f"Ground state energy: {energy}") + +.. tab:: C++ + + .. code-block:: cpp + + #include "cudaq/solvers/adapt.h" + #include "cudaq/solvers/operators.h" + + // compile with + // nvq++ adaptEx.cpp --enable-mlir -lcudaq-solvers + // ./a.out + + int main() { + // Define initial state preparation + auto initial_state = [](cudaq::qvector<>& q) __qpu__ { + for (std::size_t i = 0; i < 2; ++i) + x(q[i]); + }; + + // Create Hamiltonian (H2 molecule example) + cudaq::solvers::molecular_geometry geometry{{"H", {0., 0., 0.}}, + {"H", {0., 0., .7474}}}; + auto molecule = cudaq::solvers::create_molecule( + geometry, "sto-3g", 0, 0, {.casci = true, .verbose = true}); + + auto h = molecule.hamiltonian; + + // Generate operator pool + auto pool = cudaq::solvers::operator_pool::get( + "spin_complement_gsd"); + auto operators = pool->generate({ + {"num-orbitals", h.num_qubits() / 2} + }); + + // Run ADAPT-VQE + auto [energy, parameters, selected_ops] = + cudaq::solvers::adapt_vqe( + initial_state, + h, + operators, + { + {"grad_norm_tolerance", 1e-3}, + {"verbose", true} + } + ); + } + +Advanced Usage +^^^^^^^^^^^^^^^ + +Custom Optimization Settings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. tab:: Python + + .. code-block:: python + + # Using L-BFGS-B optimizer with central difference gradients + energy, parameters, operators = solvers.adapt_vqe( + initial_state, + molecule.hamiltonian, + operators, + optimizer='lbfgs', + gradient='central_difference', + verbose=True + ) + + # Using SciPy optimizer directly + from scipy.optimize import minimize + energy, parameters, operators = solvers.adapt_vqe( + initial_state, + molecule.hamiltonian, + operators, + optimizer=minimize, + method='L-BFGS-B', + jac='3-point', + tol=1e-8, + options={'disp': True} + ) + +.. tab:: C++ + + .. code-block:: cpp + + // Using L-BFGS optimizer with central difference gradients + auto optimizer = cudaq::optim::optimizer::get("lbfgs"); + auto [energy, parameters, operators] = + cudaq::solvers::adapt_vqe( + initial_state{}, + h, + operators, + *optimizer, + "central_difference", + { + {"grad_norm_tolerance", 1e-3}, + {"verbose", true} + } + ); + +Available Operator Pools +^^^^^^^^^^^^^^^^^^^^^^^^^ + +CUDA-QX provides several pre-built operator pools for ADAPT-VQE: + +* **spin_complement_gsd**: Spin-complemented generalized singles and doubles +* **uccsd**: UCCSD operators +* **qaoa**: QAOA mixer excitation operators + +.. code-block:: python + + # Generate different operator pools + gsd_ops = solvers.get_operator_pool( + "spin_complement_gsd", + num_orbitals=molecule.n_orbitals + ) + + uccsd_ops = solvers.get_operator_pool( + "uccsd", + num_orbitals=molecule.n_orbitals, + num_electrons=molecule.n_electrons + ) + +Algorithm Parameters +^^^^^^^^^^^^^^^^^^^^^^ + +ADAPT-VQE supports various configuration options: + +* **grad_norm_tolerance**: Convergence threshold for operator gradients +* **max_iterations**: Maximum number of ADAPT iterations +* **verbose**: Enable detailed output +* **shots**: Number of measurements for shot-based simulation + +.. code-block:: python + + energy, parameters, operators = solvers.adapt_vqe( + initial_state, + hamiltonian, + operators, + grad_norm_tolerance=1e-3, + max_iterations=20, + verbose=True, + shots=10000 + ) + +Results Analysis +^^^^^^^^^^^^^^^^^ + +The algorithm returns three components: + +1. **energy**: Final ground state energy +2. **parameters**: Optimized parameters for each selected operator +3. **operators**: List of selected operators in order of application + +.. code-block:: python + + # Analyze results + print(f"Final energy: {energy}") + print("\nSelected operators and parameters:") + for param, op in zip(parameters, operators): + print(f"θ = {param:.6f} : {op}") \ No newline at end of file diff --git a/docs/sphinx/conf.py.in b/docs/sphinx/conf.py.in new file mode 100644 index 0000000..7aae33f --- /dev/null +++ b/docs/sphinx/conf.py.in @@ -0,0 +1,189 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import os +import sys + +sys.path.insert(0, os.path.abspath('@CUDAQ_INSTALL_DIR@')) +sys.path.insert(0, os.path.abspath('@CMAKE_BINARY_DIR@/python')) + +# -- Project information ----------------------------------------------------- + +project = 'NVIDIA CUDA-QX' +copyright = '2024, NVIDIA Corporation & Affiliates' +author = 'NVIDIA Corporation & Affiliates' + +# The version info for the project you're documenting, acts as replacement for +# |version| used in various places throughout the docs. + +# The short X.Y version. +version = os.getenv("CUDAQX_VERSION", "latest") + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + # 'sphinx.ext.imgmath', + 'sphinx.ext.ifconfig', + 'sphinx.ext.autodoc', # to get documentation from python doc comments + 'sphinx.ext.autosummary', + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.doctest', # test example codes in docs + 'sphinx.ext.extlinks', + 'sphinx.ext.intersphinx', + 'sphinx.ext.mathjax', + 'sphinx.ext.napoleon', # support google/numpy style docstrings + #'sphinx.ext.linkcode', + 'sphinx_reredirects', + 'breathe', + 'enum_tools.autoenum', # for pretty-print Python enums + 'myst_parser', # for including markdown files + 'sphinx_inline_tabs', # showing code blocks in multiple languages + 'nbsphinx', # for supporting jupyter notebooks + 'sphinx_copybutton', # allows for copy/paste of code cells + "sphinx_gallery.load_style", + "IPython.sphinxext.ipython_console_highlighting", +] + +nbsphinx_allow_errors = False +nbsphinx_thumbnails = { + # Default thumbnail if the notebook does not define a cell tag to specify the thumbnail. + # See also: https://nbsphinx.readthedocs.io/en/latest/subdir/gallery.html + '**': '@SPHINX_SOURCE@/_static/cuda_quantum_icon.svg' +} + +imgmath_latex_preamble = r'\usepackage{braket}' + +imgmath_image_format = 'svg' +imgmath_font_size = 14 +#imgmath_dvipng_args = ['-gamma', '1.5', '-D', '110', '-bg', 'Transparent'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['@SPHINX_SOURCE@/_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_templates'] + +# The reST default role (used for this markup: `text`) to use for all documents. +default_role = 'code' # NOTE: the following may be a better choice to error on the side of flagging anything that is referenced but but not declared +#default_role = 'cpp:any' # see https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'lightbulb' + +# autosummary is buggy: this must be py instead of cpp so that the domain setting +# can be propagated to the autogen'd rst files. +# primary_domain = 'py' + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. + +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "collapse_navigation": False, + "sticky_navigation": False, + "prev_next_buttons_location": "both", + "style_nav_header_background": + "#76b900" # Set upper left search bar to NVIDIA green +} + +html_css_files = ['@SPHINX_SOURCE@/_static/cudaq_override.css'] + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['@SPHINX_SOURCE@/_static'] + +# Output file base name for HTML help builder. +htmlhelp_basename = 'cudaqxDoc' + + +def setup(app): + app.add_css_file('cudaq_override.css') + + +# -- Options for BREATHE ------------------------------------------------- + +breathe_default_project = "cudaqx" + +breathe_show_enumvalue_initializer = True + +# -- Other options ------------------------------------------------- + +autosummary_generate = True + +intersphinx_mapping = { + 'python': ('https://docs.python.org/3/', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'cudaq': ('https://nvidia.github.io/cuda-quantum/latest', None) +} + +redirects = {"versions": "../latest/releases.html"} + +nitpick_ignore = [ + ('cpp:identifier', 'pid_t'), + ('cpp:identifier', 'uint8_t'), + ('cpp:identifier', 'details::tensor_impl::scalar_type'), + ('cpp:identifier', 'cudaqx'), + ('cpp:identifier', 'size_t'), + ('cpp:identifier', 'details'), + ('cpp:identifier', 'spin_op'), + ('cpp:identifier', 'heterogeneous_map'), + ('cpp:identifier', 'cudaq::qkernel&)>'), + ('cpp:identifier', 'cudaq::qkernel'), + ('cpp:identifier', 'cudaq::qkernel'), + ('cpp:identifier', + 'cudaq::qkernel(patch, const std::vector&, const std::vector&)>' + ), + ('cpp:identifier', 'cudaq::qvector<>'), + ('cpp:identifier', 'cudaq::qview<>'), + ('cpp:identifier', 'cudaq::measure_result'), + ('py:class', 'SpinOperator'), + ('py:class', 'numpy.int32'), + ('py:class', 'numpy.uint8'), + ('py:class', 'cudaq.mlir._mlir_libs._quakeDialects.cudaq_runtime.qview') +] + +napoleon_google_docstring = True +napoleon_numpy_docstring = False +autosectionlabel_prefix_document = True +autosectionlabel_maxdepth = 2 +copybutton_copy_empty_lines = False +pybind11_compatibility = True diff --git a/docs/sphinx/examples/qec/cpp/circuit_level_noise.cpp b/docs/sphinx/examples/qec/cpp/circuit_level_noise.cpp new file mode 100644 index 0000000..668c02d --- /dev/null +++ b/docs/sphinx/examples/qec/cpp/circuit_level_noise.cpp @@ -0,0 +1,126 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// Compile and run with +// nvq++ --enable-mlir -lcudaq-qec circuit_level_noise.cpp -o circuit_level +// ./circuit_level + +#include "cudaq.h" +#include "cudaq/qec/decoder.h" +#include "cudaq/qec/experiments.h" +#include "cudaq/qec/noise_model.h" + +int main() { + // Choose a QEC code + auto steane = cudaq::qec::get_code("steane"); + + // Access the parity check matrix + auto H = steane->get_parity(); + std::cout << "H:\n"; + H.dump(); + + // Access the logical observables + auto observables = steane->get_pauli_observables_matrix(); + auto Lz = steane->get_observables_z(); + + // Data qubits the logical Z observable is supported on + std::cout << "Lz:\n"; + Lz.dump(); + + // Observables are stacked as Z over X for mat-vec multiplication + std::cout << "Obs:\n"; + observables.dump(); + + // How many shots to run the experiment + int nShots = 3; + // For each shot, how many rounds of stabilizer measurements + int nRounds = 4; + + // can set seed for reproducibility + // cudaq::set_random_seed(1337); + cudaq::noise_model noise; + + // Add a depolarization noise channel after each cx gate + noise.add_all_qubit_channel( + "x", cudaq::qec::two_qubit_depolarization(/*probability*/ 0.01), + /*numControls*/ 1); + + // Perform a noisy z-basis memory circuit experiment + auto [syndromes, data] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep0, nShots, nRounds, noise); + + // With noise, many syndromes will flip each QEC cycle, these are the + // syndrome differences from the previous cycle. + std::cout << "syndromes:\n"; + syndromes.dump(); + + // With noise, Lz will sometimes be flipped + std::cout << "data:\n"; + data.dump(); + + // Use z-measurements on data qubits to determine the logical mz + // In an x-basis experiment, use Lx. + auto logical_mz = Lz.dot(data.transpose()) % 2; + std::cout << "logical_mz each shot:\n"; + logical_mz.dump(); + + // Select a decoder + auto decoder = cudaq::qec::get_decoder("single_error_lut", H); + + // Initialize a pauli_frame to track the logical errors + cudaqx::tensor pauli_frame({observables.shape()[0]}); + + // Start a loop to count the number of logical errors + size_t numLerrors = 0; + for (size_t shot = 0; shot < nShots; ++shot) { + std::cout << "shot: " << shot << "\n"; + + for (size_t round = 0; round < nRounds - 1; ++round) { + std::cout << "round: " << round << "\n"; + + // Access one row of the syndrome tensor + size_t count = shot * (nRounds - 1) + round; + size_t stride = syndromes.shape()[1]; + cudaqx::tensor syndrome({stride}); + syndrome.borrow(syndromes.data() + stride * count); + std::cout << "syndrome:\n"; + syndrome.dump(); + + // Decode the syndrome + auto [converged, v_result] = decoder->decode(syndrome); + cudaqx::tensor result_tensor; + cudaq::qec::convert_vec_soft_to_tensor_hard(v_result, result_tensor); + std::cout << "decode result:\n"; + result_tensor.dump(); + + // See if the decoded result anti-commutes with observables + auto decoded_observables = observables.dot(result_tensor); + std::cout << "decoded observable:\n"; + decoded_observables.dump(); + + // update from previous stabilizer round + pauli_frame = (pauli_frame + decoded_observables) % 2; + std::cout << "pauli frame:\n"; + pauli_frame.dump(); + } + + // prep0 means we expected to measure out 0. + uint8_t expected_mz = 0; + // Apply the pauli frame correction to our logical measurement + uint8_t corrected_mz = (logical_mz.at({0, shot}) + pauli_frame.at({0})) % 2; + + // Check if Logical_mz + pauli_frame_X = 0? + std::cout << "Corrected readout: " << +corrected_mz << "\n"; + std::cout << "Expected readout: " << +expected_mz << "\n"; + if (corrected_mz != expected_mz) + numLerrors++; + std::cout << "\n"; + } + + std::cout << "numLogicalErrors: " << numLerrors << "\n"; +} diff --git a/docs/sphinx/examples/qec/cpp/code_capacity_noise.cpp b/docs/sphinx/examples/qec/cpp/code_capacity_noise.cpp new file mode 100644 index 0000000..bb11865 --- /dev/null +++ b/docs/sphinx/examples/qec/cpp/code_capacity_noise.cpp @@ -0,0 +1,105 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// This example shows the primary cudaq::qec types: +// decoder, code +// +// Compile and run with +// nvq++ --enable-mlir -lcudaq-qec code_capacity_noise.cpp -o +// code_capacity_noise +// ./code_capacity_noise + +#include +#include +#include + +#include "cudaq.h" +#include "cudaq/qec/decoder.h" +#include "cudaq/qec/experiments.h" + +int main() { + auto steane = cudaq::qec::get_code("steane"); + auto Hz = steane->get_parity_z(); + std::vector t_shape = Hz.shape(); + + std::cout << "Hz.shape():\n"; + for (size_t elem : t_shape) + std::cout << elem << " "; + std::cout << "\n"; + + std::cout << "Hz:\n"; + Hz.dump(); + + auto Lz = steane->get_observables_x(); + std::cout << "Lz:\n"; + Lz.dump(); + + double p = 0.2; + size_t nShots = 5; + auto lut_decoder = cudaq::qec::get_decoder("single_error_lut", Hz); + + std::cout << "nShots: " << nShots << "\n"; + + // May want a order-2 tensor of syndromes + // access tensor by stride to write in an entire syndrome + cudaqx::tensor syndrome({Hz.shape()[0]}); + + int nErrors = 0; + for (size_t shot = 0; shot < nShots; ++shot) { + std::cout << "shot: " << shot << "\n"; + auto shot_data = cudaq::qec::generate_random_bit_flips(Hz.shape()[1], p); + std::cout << "shot data\n"; + shot_data.dump(); + + auto observable_z_data = Lz.dot(shot_data); + observable_z_data = observable_z_data % 2; + std::cout << "Data Lz state:\n"; + observable_z_data.dump(); + + auto syndrome = Hz.dot(shot_data); + syndrome = syndrome % 2; + std::cout << "syndrome:\n"; + syndrome.dump(); + + auto [converged, v_result] = lut_decoder->decode(syndrome); + cudaqx::tensor result_tensor; + // v_result is a std::vector, of soft information. We'll convert + // this to hard information and store as a tensor. + cudaq::qec::convert_vec_soft_to_tensor_hard(v_result, result_tensor); + std::cout << "decode result:\n"; + result_tensor.dump(); + + // check observable result + auto decoded_observable_z = Lz.dot(result_tensor); + std::cout << "decoded observable:\n"; + decoded_observable_z.dump(); + + // check how many observable operators were decoded correctly + // observable_z_data == decoded_observable_z This maps onto element wise + // addition (mod 2) + auto observable_flips = decoded_observable_z + observable_z_data; + observable_flips = observable_flips % 2; + std::cout << "Logical errors:\n"; + observable_flips.dump(); + std::cout << "\n"; + + // shot counts as a observable error unless all observables are correct + if (observable_flips.any()) { + nErrors++; + } + } + std::cout << "Total logical errors: " << nErrors << "\n"; + + // Full data gen in function call + auto [syn, data] = cudaq::qec::sample_code_capacity(Hz, nShots, p); + std::cout << "Numerical experiment:\n"; + std::cout << "Data:\n"; + data.dump(); + std::cout << "Syn:\n"; + syn.dump(); +} diff --git a/docs/sphinx/examples/qec/python/circuit_level_noise.py b/docs/sphinx/examples/qec/python/circuit_level_noise.py new file mode 100644 index 0000000..d65f456 --- /dev/null +++ b/docs/sphinx/examples/qec/python/circuit_level_noise.py @@ -0,0 +1,82 @@ +import numpy as np +import cudaq +import cudaq_qec as qec + +# Get a QEC code +cudaq.set_target("stim") +steane = qec.get_code("steane") + +# Get the parity check matrix of a code +# Can get the full code, or for CSS codes +# just the X or Z component +H = steane.get_parity() +print(f"H:\n{H}") +observables = steane.get_pauli_observables_matrix() +Lz = steane.get_observables_z() +print(f"observables:\n{observables}") +print(f"Lz:\n{Lz}") + +nShots = 3 +nRounds = 4 + +# error probabily +p = 0.01 +noise = cudaq.NoiseModel() +noise.add_all_qubit_channel("x", qec.TwoQubitDepolarization(p), 1) + +# prepare logical |0> state, tells the sampler to do z-basis experiment +statePrep = qec.operation.prep0 +# our expected measurement in this state is 0 +expected_value = 0 + +# sample the steane memory circuit with noise on each cx gate +# reading out the syndromes after each stabilizer round (xor'd against the previous) +# and readout out the data qubits at the end of the experiment +syndromes, data = qec.sample_memory_circuit(steane, statePrep, nShots, nRounds, noise) +print("From sample function:\n") +print("syndromes:\n",syndromes) +print("data:\n",data) + +# Get a decoder +decoder = qec.get_decoder("single_error_lut", H) +nLogicalErrors = 0 + +# Logical Mz each shot (use Lx if preparing in X-basis) +logical_measurements = (Lz@data.transpose()) % 2 +# only one logical qubit, so do not need the second axis +logical_measurements = logical_measurements.flatten() +print("LMz:\n", logical_measurements) + +# initialize a Pauli frame to track logical flips +# through the stabilizer rounds +pauli_frame = np.array([0,0], dtype=np.uint8) +for shot in range(0,nShots): + print("shot:", shot) + for syndrome in syndromes: + print("syndrome:", syndrome) + # decode the syndrome + result = decoder.decode(syndrome) + data_prediction = np.array(result.result, dtype=np.uint8) + + # see if the decoded result anti-commutes with the observables + print("decode result:", data_prediction) + decoded_observables = (observables@data_prediction) % 2 + print("decoded_observables:", decoded_observables) + + # update pauli frame + pauli_frame = (pauli_frame + decoded_observables) % 2 + print("pauli frame:", pauli_frame) + + # after pauli frame has tracked corrections through the rounds + # apply the pauli frame correction to the measurement, and see + # if this matches the state we intended to prepare + # We prepared |0>, so we check if logical measurement Mz + Pf_X = 0 + corrected_mz = (logical_measurements[shot] + pauli_frame[0]) % 2 + print("Expected value:", expected_value) + print("Corrected value:", corrected_mz) + if (corrected_mz != expected_value): + nLogicalErrors += 1 + +# Count how many shots the decoder failed to correct the errors +print("Number of logical errors:", nLogicalErrors) + diff --git a/docs/sphinx/examples/qec/python/code_capacity_noise.py b/docs/sphinx/examples/qec/python/code_capacity_noise.py new file mode 100644 index 0000000..95deaa2 --- /dev/null +++ b/docs/sphinx/examples/qec/python/code_capacity_noise.py @@ -0,0 +1,56 @@ +import numpy as np +import cudaq_qec as qec + +# Get a QEC code +steane = qec.get_code("steane") + +# Get the parity check matrix of a code +# Can get the full code, or for CSS codes +# just the X or Z component +Hz = steane.get_parity_z() +print(f"Hz:\n{Hz}") +observable = steane.get_observables_z() +print(f"observable:\n{observable}") + +# error probabily +p = 0.1 +# Get a decoder +decoder = qec.get_decoder("single_error_lut", Hz) + +# Perform a code capacity noise model numerical experiment +nShots = 10 +nLogicalErrors = 0 +for i in range(nShots): + print(f"shot: {i}") + + # Generate noisy data + data = qec.generate_random_bit_flips(Hz.shape[1], p) + print(f"data: {data}") + + # Calculate which syndromes are flagged. + syndrome = Hz@data % 2 + print(f"syndrome: {syndrome}") + + # Decode the syndrome to predict what happen to the data + result = decoder.decode(syndrome) + data_prediction = np.array(result.result, dtype=np.uint8) + print(f"data_prediction: {data_prediction}") + + # See if this prediction flipped the observable + predicted_observable = observable@data_prediction % 2 + print(f"predicted_observable: {predicted_observable}") + + # See if the observable was actually flipped + actual_observable = observable@data % 2 + print(f"actual_observable: {actual_observable}") + if (predicted_observable != actual_observable): + nLogicalErrors += 1 + +# Count how many shots the decoder failed to correct the errors +print(f"{nLogicalErrors} logical errors in {nShots} shots\n") + +# Can also generate syndromes and data from a single line with: +syndromes, data = qec.sample_code_capacity(Hz, nShots, p) +print("From sample function:") +print("syndromes:\n",syndromes) +print("data:\n",data) diff --git a/docs/sphinx/examples/qec/python/pseudo_threshold.py b/docs/sphinx/examples/qec/python/pseudo_threshold.py new file mode 100644 index 0000000..1bc20cd --- /dev/null +++ b/docs/sphinx/examples/qec/python/pseudo_threshold.py @@ -0,0 +1,60 @@ +import numpy as np +import cudaq_qec as qec +import matplotlib.pyplot as plt + +# Get a QEC code +# steane = qec.get_code("repetition", distance=9) +steane = qec.get_code("steane") + +# Get the parity check matrix of a code +# Can get the full code, or for CSS codes +# just the X or Z component +Hz = steane.get_parity_z() +observable = steane.get_observables_z() + +# Get a decoder +decoder = qec.get_decoder("single_error_lut", Hz) + +# Perform a code capacity noise model numerical experiment +nShots = 100000 +LERates = [] +# PERates = np.linspace(0.1, 0.50, num=20) +PERates = np.logspace(-2.0, -0.5, num=25) + +for p in PERates: + nLogicalErrors = 0 + for i in range(nShots): + data = qec.generate_random_bit_flips(Hz.shape[1], p) + # Calculate which syndromes are flagged. + syndrome = Hz@data % 2 + + result = decoder.decode(syndrome) + data_prediction = np.array(result.result) + + predicted_observable = observable@data_prediction % 2 + + actual_observable = observable@data % 2 + if (predicted_observable != actual_observable): + nLogicalErrors += 1 + LERates.append(nLogicalErrors/nShots) + +# Count how many shots the decoder failed to correct the errors +print("PERates:", PERates) +print("LERates:", LERates) + +# Create a figure and an axes object +fig, ax = plt.subplots() + +# Plot the data +ax.loglog(PERates, LERates) +ax.loglog(PERates, PERates, 'r--', label='y=x') + +# Add a title and labels +ax.set_title("Steane Code") +ax.set_xlabel("Physical Error Rate") +ax.set_ylabel("Logical Error Rate") + +# Show the plot +# plt.show() +# plt.savefig("myplot.png") + diff --git a/docs/sphinx/examples/solvers/cpp/adapt_h2.cpp b/docs/sphinx/examples/solvers/cpp/adapt_h2.cpp new file mode 100644 index 0000000..987e9ee --- /dev/null +++ b/docs/sphinx/examples/solvers/cpp/adapt_h2.cpp @@ -0,0 +1,39 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq.h" +#include "cudaq/solvers/adapt.h" +#include "cudaq/solvers/operators.h" + +// Compile and run with +// nvq++ --enable-mlir -lcudaq-solvers adapt_h2.cpp -o adapt_ex +// ./adapt_ex + +int main() { + // Create the molecular hamiltonian + cudaq::solvers::molecular_geometry geometry{{"H", {0., 0., 0.}}, + {"H", {0., 0., .7474}}}; + auto molecule = cudaq::solvers::create_molecule( + geometry, "sto-3g", 0, 0, {.casci = true, .verbose = true}); + + // Get the spin operator + auto h = molecule.hamiltonian; + + // Create the operator pool + auto pool = cudaq::solvers::operator_pool::get("spin_complement_gsd"); + auto poolList = pool->generate({{"num-orbitals", h.num_qubits() / 2}}); + + // Run ADAPT + auto [energy, thetas, ops] = cudaq::solvers::adapt_vqe( + [](cudaq::qvector<> &q) __qpu__ { + x(q[0]); + x(q[1]); + }, + h, poolList, {{"grad_norm_tolerance", 1e-3}}); + + printf("Final = %.12lf\n", energy); +} \ No newline at end of file diff --git a/docs/sphinx/examples/solvers/cpp/molecular_docking_qaoa.cpp b/docs/sphinx/examples/solvers/cpp/molecular_docking_qaoa.cpp new file mode 100644 index 0000000..1d60eac --- /dev/null +++ b/docs/sphinx/examples/solvers/cpp/molecular_docking_qaoa.cpp @@ -0,0 +1,60 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq.h" +#include "cudaq/solvers/operators.h" +#include "cudaq/solvers/qaoa.h" + +// Compile and run with +// nvq++ --enable-mlir -lcudaq-solvers molecular_docking_qaoa.cpp +// ./a.out + +int main() { + + // Create the ligand-configuration graph + cudaqx::graph g; + std::vector weights{0.6686, 0.6686, 0.6686, 0.1453, 0.1453, 0.1453}; + std::vector> edges{{0, 1}, {0, 2}, {0, 4}, {0, 5}, + {1, 2}, {1, 3}, {1, 5}, {2, 3}, + {2, 4}, {3, 4}, {3, 5}, {4, 5}}; + for (std::size_t node = 0; auto weight : weights) + g.add_node(node++, weight); + + for (auto &edge : edges) + g.add_edge(edge.first, edge.second); + + // Set some parameters we'll need + double penalty = 6.0; + std::size_t numLayers = 3; + + // Create the Clique Hamiltonian + auto H = cudaq::solvers::get_clique_hamiltonian(g, penalty); + + // Get the number of required variational parameters + auto numParams = cudaq::solvers::get_num_qaoa_parameters( + H, numLayers, + {{"full_parameterization", true}, {"counterdiabatic", true}}); + + // Create the initial parameters to begin optimization + auto initParams = cudaq::random_vector(-M_PI / 8., M_PI / 8., numParams); + + // Run QAOA, specify full parameterization and counterdiabatic + // Full parameterization uses an optimization parameter for + // every term in the clique Hamiltonian and the mixer hamiltonian. + // Specifying counterdiabatic adds extra Ry rotations at the + // end of each layer. + auto [opt_value, opt_params, opt_config] = cudaq::solvers::qaoa( + H, numLayers, initParams, + {{"full_parameterization", true}, {"counterdiabatic", true}}); + + // Print out the results + std::cout << "Optimal energy: " << opt_value << "\n"; + std::cout << "Sampled states: "; + opt_config.dump(); + std::cout << "Optimal configuraiton: " << opt_config.most_probable() << "\n"; +} \ No newline at end of file diff --git a/docs/sphinx/examples/solvers/cpp/uccsd_vqe.cpp b/docs/sphinx/examples/solvers/cpp/uccsd_vqe.cpp new file mode 100644 index 0000000..5fe6690 --- /dev/null +++ b/docs/sphinx/examples/solvers/cpp/uccsd_vqe.cpp @@ -0,0 +1,53 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq.h" +#include "cudaq/solvers/operators.h" +#include "cudaq/solvers/stateprep/uccsd.h" +#include "cudaq/solvers/vqe.h" + +// Compile and run with +// nvq++ --enable-mlir -lcudaq-solvers uccsd_vqe.cpp -o uccsd_vqe +// ./uccsd_vqe + +int main() { + // Create the molecular hamiltonian + cudaq::solvers::molecular_geometry geometry{{"H", {0., 0., 0.}}, + {"H", {0., 0., .7474}}}; + auto molecule = cudaq::solvers::create_molecule( + geometry, "sto-3g", 0, 0, {.casci = true, .verbose = true}); + + // Get the spin operator + auto h = molecule.hamiltonian; + + // Get the number of electrons and qubits + auto numElectrons = molecule.n_electrons; + auto numQubits = molecule.n_orbitals * 2; + + // Create an initial set of parameters for the optimization + auto numParams = cudaq::solvers::stateprep::get_num_uccsd_parameters( + numElectrons, numQubits); + std::vector init(numParams, -2.); + + // Run VQE + auto [energy, thetas, ops] = cudaq::solvers::vqe( + [&](std::vector params, std::size_t numQubits, + std::size_t numElectrons) __qpu__ { + cudaq::qvector q(numQubits); + for (auto i : cudaq::range(numElectrons)) + x(q[i]); + + cudaq::solvers::stateprep::uccsd(q, params, numElectrons); + }, + molecule.hamiltonian, init, + [&](std::vector x) { + return std::make_tuple(x, numQubits, numElectrons); + }, + {{"verbose", true}}); + + printf("Final = %.12lf\n", energy); +} \ No newline at end of file diff --git a/docs/sphinx/examples/solvers/python/adapt_h2.py b/docs/sphinx/examples/solvers/python/adapt_h2.py new file mode 100644 index 0000000..51575a7 --- /dev/null +++ b/docs/sphinx/examples/solvers/python/adapt_h2.py @@ -0,0 +1,56 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import cudaq, cudaq_solvers as solvers + +# Run this script with +# python3 adapt_h2.py +# +# In order to leverage CUDA-Q MQPU and distribute the work across +# multiple QPUs (thereby observing a speed-up), set the target and +# use MPI: +# +# cudaq.set_target('nvidia', mqpu=True) +# cudaq.mpi.initialize() +# +# run with +# +# mpiexec -np N and vary N to see the speedup... +# e.g. mpiexec -np 2 python3 adapt_h2_mqpu.py +# +# End the script with +# cudaq.mpi.finalize() + +# Create the molecular hamiltonian +geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] +molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + casci=True) + +# Create the ADAPT operator pool +operators = solvers.get_operator_pool( + "spin_complement_gsd", num_orbitals=molecule.n_orbitals) + +# Get the number of electrons so we can +# capture it in the initial state kernel +numElectrons = molecule.n_electrons + +# Define the initial Hartree Fock state +@cudaq.kernel +def initState(q: cudaq.qview): + for i in range(numElectrons): + x(q[i]) + +# Run ADAPT-VQE +energy, thetas, ops = solvers.adapt_vqe(initState, + molecule.hamiltonian, + operators) + +# Print the result. +print(" = ", energy) diff --git a/docs/sphinx/examples/solvers/python/generate_molecular_hamiltonians.py b/docs/sphinx/examples/solvers/python/generate_molecular_hamiltonians.py new file mode 100644 index 0000000..e23f6e3 --- /dev/null +++ b/docs/sphinx/examples/solvers/python/generate_molecular_hamiltonians.py @@ -0,0 +1,85 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import cudaq_solvers as solvers + +# Generate active space Hamiltonian using HF molecular orbitals + +geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] +molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + verbose=True) + +print('N2 HF Hamiltonian') +print('Energies : ', molecule.energies) +print('No. of orbitals: ', molecule.n_orbitals) +print('No. of electrons: ', molecule.n_electrons) + +# Generate active space Hamiltonian using natural orbitals from MP2 + +geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] +molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + MP2=True, + integrals_natorb=True, + verbose=True) + +print('N2 Natural Orbitals from MP2 Hamiltonian') +print('Energies: ', molecule.energies) +print('No. of orbitals: ', molecule.n_orbitals) +print('No. of electrons: ', molecule.n_electrons) + +# Generate active space Hamiltonian using casscf orbitals, +# where the active space of the casscf was defined from HF molecular orbitals + +geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] +molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + casscf=True, + integrals_casscf=True, + verbose=True) + + +print('N2 Active Space Hamiltonian Using CASSF Orbitals - HF orbitals') +print('Energies: ', molecule.energies) +print('No. of orbitals: ', molecule.n_orbitals) +print('No. of electrons: ', molecule.n_electrons) + +# Generate active space Hamiltonian using casscf orbitals, +# where the active space of the casscf was defined from the MP2 natural orbitals. + +geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] +molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + MP2=True, + natorb=True, + casscf=True, + integrals_casscf=True, + verbose=True) + +print('N2 Active Space Hamiltonian Using CASSF Orbitals - MP2 natural orbitals') +print('N2 HF Hamiltonian') +print('Energies: ', molecule.energies) +print('No. of orbitals: ', molecule.n_orbitals) +print('No. of electrons: ', molecule.n_electrons) diff --git a/docs/sphinx/examples/solvers/python/molecular_docking_qaoa.py b/docs/sphinx/examples/solvers/python/molecular_docking_qaoa.py new file mode 100644 index 0000000..c612f2e --- /dev/null +++ b/docs/sphinx/examples/solvers/python/molecular_docking_qaoa.py @@ -0,0 +1,51 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import cudaq, cudaq_solvers as solvers +import networkx as nx, numpy as np + +# Create the ligand-configuration graph +G = nx.Graph() +edges = [[0, 1], [0, 2], [0, 4], [0, 5], [1, 2], [1, 3], [1, 5], [2, 3], [2, 4], + [3, 4], [3, 5], [4, 5]] +weights = [0.6686, 0.6686, 0.6686, 0.1453, 0.1453, 0.1453] +for i, weight in enumerate(weights): + G.add_node(i, weight=weight) +G.add_edges_from(edges) + +# Set some parameters we'll need +penalty = 6.0 +num_layers = 3 + +# Create the Clique Hamiltonian +H = solvers.get_clique_hamiltonian(G, penalty=penalty) + +# Get the number of parameters we'll need +parameter_count = solvers.get_num_qaoa_parameters(H, + num_layers, + full_parameterization=True, + counterdiabatic=True) + +# Create the initial parameters to begin optimization +init_params = np.random.uniform(-np.pi / 8, np.pi / 8, parameter_count) + +# Run QAOA, specify full parameterization and counterdiabatic +# Full parameterization uses an optimization parameter for +# every term in the clique Hamiltonian and the mixer hamiltonian. +# Specifying counterdiabatic adds extra Ry rotations at the +# end of each layer. +opt_value, opt_params, opt_config = solvers.qaoa(H, + num_layers, + init_params, + full_parameterization=True, + counterdiabatic=True) + +# Print the results +print() +print('Optimal energy: ', opt_value) +print('Sampled states: ', opt_config) +print('Optimal Configuration: ', opt_config.most_probable()) diff --git a/docs/sphinx/examples/solvers/python/uccsd_vqe.py b/docs/sphinx/examples/solvers/python/uccsd_vqe.py new file mode 100644 index 0000000..8797880 --- /dev/null +++ b/docs/sphinx/examples/solvers/python/uccsd_vqe.py @@ -0,0 +1,45 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import cudaq, cudaq_solvers as solvers +from scipy.optimize import minimize + +# Create the molecular hamiltonian +geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] +molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + casci=True) + +# Get the number of qubits and electrons +numQubits = molecule.n_orbitals * 2 +numElectrons = molecule.n_electrons +spin = 0 +initialX = [-.2] * solvers.stateprep.get_num_uccsd_parameters( + numElectrons, numQubits) + + +# Define the UCCSD ansatz +@cudaq.kernel +def ansatz(thetas: list[float]): + q = cudaq.qvector(numQubits) + for i in range(numElectrons): + x(q[i]) + solvers.stateprep.uccsd(q, thetas, numElectrons, spin) + + +# Run VQE +energy, params, all_data = solvers.vqe(ansatz, + molecule.hamiltonian, + initialX, + optimizer=minimize, + method='L-BFGS-B', + jac='3-point', + tol=1e-4, + options={'disp': True}) +print(f'Final = {energy}') diff --git a/docs/sphinx/examples_rst/qec/circuit_level_noise.rst b/docs/sphinx/examples_rst/qec/circuit_level_noise.rst new file mode 100644 index 0000000..857086a --- /dev/null +++ b/docs/sphinx/examples_rst/qec/circuit_level_noise.rst @@ -0,0 +1,77 @@ +Quantum Error Correction with Circuit-level Noise Modeling +---------------------------------------------------------- +This example builds upon the previous code-capacity noise model example. +In the circuit-level noise modeling experiment, we have many of the same components from the CUDA-Q QEC library: QEC codes, decoders, and noisy data. +The primary difference here, is that we can begin to run CUDA-Q kernels to generate noisy data, rather than just generating random bitstring to represent our errors. + +Along with the stabilizers, parity check matrices, and logical observables, the QEC code type also has an encoding map. +This map allows codes to define logical gates in terms of gates on the underlying physical qubits. +These encodings operate on the `qec.patch` type, which represents three registers of physical qubits making up a logical qubit. +A data qubit register, an X-stabilizer ancilla register, and a Z-stabilizer ancilla register. + +The most notable encoding stored in the QEC map, is how the `qec.operation.stabilizer_round`, which encodes a `cudaq.kernel` which stores the gate-level information for how to do a stabilizer measurement. +These stabilizer rounds are the gate-level way to encode the parity check matrix of a QEC code into quantum circuits. + +This example walks through how to use the CUDA-Q QEC library to perform a quantum memory experiment simulation. +These experiments model how well QEC cycles, or rounds of stabilizer measuments, can protect the information encoded in a logical qubit. +If noise is turned off, then the information is protected indefinitely. +Here, we will model depolarization noise after each CX gate, and track how many logical errors occur. + + +CUDA-Q QEC Implementation ++++++++++++++++++++++++++++++ +Here's how to use CUDA-Q QEC to perform a circuit-level noise model experiment in both Python and C++: + +.. tab:: Python + + .. literalinclude:: ../../examples/qec/python/circuit_level_noise.py + :language: python + +.. tab:: C++ + + .. literalinclude:: ../../examples/qec/cpp/circuit_level_noise.cpp + :language: cpp + + Compile and run with + + .. code-block:: bash + + nvq++ --enable-mlir -lcudaq-qec circuit_level_noise.cpp -o circuit_level_noise + ./circuit_level_noise + + +1. QEC Code and Decoder types: + - As in the code capacity example, our central objects are the `qec.code` and `qec.decoder` types. + +2. Clifford simulation backend: + - As the size of QEC circuits can grow quite large, Clifford simulation is often the best tool for these simulations. + - `cudaq.set_target("stim")` selects the highly performant Stim simulator as the simulation backend. + +3. Noise model: + - To add noisy gates we use the `cudaq.NoiseModel` type. + - CUDA-Q supports the generation of arbitrary noise channels, but here we use a `qec.TwoQubitDepolarization` channel to add a depolarization channel. + - This is added to the `CX` gate by adding it to the `X` gate with 1 control. + - This noisy gate is added to every qubit via that `noise.add_all_qubit_channel` function. + +4. Getting circuit-level noisy data: + - The `qec.code` is the first input parameter here, as the code's `stabilizer_round` determines the circuits executed. + - Each memory circuit runs for an input number of `nRounds`, which specifies how many `stabilizer_round` kernels are ran. + - After `nRounds` the data qubits are measured and the run is over. + - This is performed `nShots` number of times. + - During a shot, each syndrome is `xor`'d against the preceding syndrome, so that we can track a sparser flow of data showing which round each parity check was violated. + - This means we will get a total of `nShots * (nRounds - 1)` syndromes to decode and analyze. + +5. Data qubit measurements: + - The data qubits are only read out after the end of each shot, so there are `nShots` worth of data readouts. + - The basis of the data qubit measurements depends on the state preparation used. + - Z-basis readout when preparing the logical `|0>` or logical `|1>` state with the `qec.operation.prep0` or `qec.operation.prep1` kernels. + - X-basis readout when preparing the logical `|+>` or logical `|->` state with the `qec.operation.prepp` or `qec.operation.prepm` kernels. + +6. Logical Errors: + - From here, the decoding procedure is again similar to the code capacity case, expect for we use a pauli frame to track errors that happen each QEC cycle. + - The final values of the pauli frame tell us how our logical state flipped during the experiment, and what needs to be done to correct it. + - We compare our known initial state (corrected by the Pauli frame), against our measured data qubits to determine if a logical error occurred. + + +The CUDA-Q QEC library thus provides a platform for numerical QEC experiments. The `qec.code` can be used to analyze a variety of QEC codes (both library or user provided), with a variety of decoders (both library or user provided). +The CUDA-Q QEC library also provides tools to speed up the automation of generating noisy data and syndromes. diff --git a/docs/sphinx/examples_rst/qec/code_capacity_noise.rst b/docs/sphinx/examples_rst/qec/code_capacity_noise.rst new file mode 100644 index 0000000..e4f614f --- /dev/null +++ b/docs/sphinx/examples_rst/qec/code_capacity_noise.rst @@ -0,0 +1,84 @@ +Quantum Error Correction with Code-Capacity Noise Modeling +---------------------------------------------------------- + +Quantum error correction (QEC) describes a set of tools used to detect and correct errors which occur to qubits on quantum computers. +This example will walk through how the CUDA-Q QEC library handles two of the most common objects in QEC: stabilizer codes, and decoders. +A stabilizer code is the quantum generalization of linear codes in classical error correction, which use parity checks to detect errors on noise bits. +In QEC, we'll perform stabilizer measurements on ancilla qubits to check the parity of our data qubits. +These stabilizer measurements are non-destructive, and thus allow us to check the relative parity of qubits without destroying our quantum information. + +For example, if we prepare two qubits in the state `\Psi = a|00> + b|11>`, we maybe want to check if a bit-flip error happened. +We can measure the stabilizer `ZZ`, which will return 0 if there are no errors or even number of errors, but will return 1 if either has flipped. +This is how we can perform parity checks in quantum computing, without performing destructive measurements which collapse our superposition. +How these measurements are physically performed can be seen in the circuit-level noise QEC example. + +We can specify a stabilizer code with either a list of stabilizer operators (like `ZZ` above), or equivalently, a parity check matrix. +We can think of the columns of a parity check matrix as the types of errors that can occur. In this case, each qubit can experience a bit flip `X` or a phase flip `Z` error, so the parity check matrix will have 2N columns where N is the number of data qubits. +Each row represents a stabilizer, or a parity check. +The values are either 0 or 1, where a 1 means that the corresponding column does participate in the parity check, and a 0 means it does not. +Therefore, if a single `X/Z` error happens to a qubit, the supported rows of the parity check matrix will trigger. +This is called the syndrome, a string of 0's and 1's corresponding to which parity checks were violated. +A special class of stabilizer codes are called CSS (Calderbank-Shor-Steane) codes, which means the `X` and `Z` components of their parity check matrix can be separated. + +This brings us to decoding. Decoding is the act of solving the problem: given a syndrome, which underlying errors are most likely? +There are many decoding algorithms, but this example will use a simple single-error look-up table. +This means that the decoder will enumerate for each single error bit string, what the resulting syndromes are. +Then given a syndrome, it will look up the error string and return that as a result. + +The last thing we need, is a way to generate errors. +This example will go through a code capacity noise model where we have an independent and identical chance that an `X` or `Z` error happens on each qubit with some probability `p`. + +CUDA-Q QEC Implementation ++++++++++++++++++++++++++++++ +Here's how to use CUDA-Q QEC to perform a code capacity noise model experiment in both Python and C++: + +.. tab:: Python + + .. literalinclude:: ../../examples/qec/python/code_capacity_noise.py + :language: python + +.. tab:: C++ + + .. literalinclude:: ../../examples/qec/cpp/code_capacity_noise.cpp + :language: cpp + + Compile and run with + + .. code-block:: bash + + nvq++ --enable-mlir -lcudaq-qec code_capacity_noise.cpp -o code_capacity_noise + ./code_capacity_noise + + +Code Explanation +++++++++++++++++ + +1. QEC Code type: + - CUDA-Q QEC centers around the `qec.code` type, which contains the data relevant for a given code. + - In particular, this represents a collection of qubits which represent a single logical qubit. + - Here we get one of the most well known QEC codes, the Steane code, with the `qec.get_code` function. + - We can get the stabilizers from a code with the `code.get_stabilizers()` function. + - In this example, we get the parity check matrix of the code. Because the Steane code is a CSS code, we can extract just the `Z` components of the parity check matrix. + - Here, we see this matrix has 3 rows and 7 columns, which means there are 7 data qubits (7 possible single bit-flip errors) and 3 Z-stabilizers (parity checks). Note that `Z` stabilizers check for `X` type errors. + - Lastly, we get the logical `Z` observable for the code. This will allow us to see if the `Z` observable of our logical qubit has flipped. + +2. Decoder type: + - A single-error look-up table (LUT) decoder can be acquired with the `qec.get_decoder` call. + - Passing in the parity check matrix gives the decoder the required information to associated syndromes with underlying error mechanisms. + - Once the decode has been constructed, the `decoder.decode(syndrome)` member function is called, which returns a predicted error given the syndrome. + +3. Noise model: + - To generate noisy data, we call `qec.generate_random_bit_flips(nBits, p)` which will return an array of bits, where each bit has probability `p` to have been flipped into 1, and a `1-p` chance to have remained 0. + - Since we are using the `Z` parity check matrix `H_Z`, we want to simulate random `X` errors on our 7 data qubits. + +4. Logical Errors: + - Once we have noisy data, we see what the resuling syndromes are by multiplying our noisy data vector with our parity check matrix (mod 2). + - From this syndrome, we see what the decoder predicts what errors occurred in the data. + - To classify as a logical error, the decoder does not need to exactly guess what happened to the data, but if there was a flip in the logical observable or not. + - If the decoder guesses this successfully, we have corrected the quantum error. If not, we have incurred a logical error. + +5. Further automation: + - While this workflow is nice for seeing things step by step, the `qec.sample_code_capacity` API is provided to generate a batch of noisy data and their corresponding syndromes. + +The CUDA-Q QEC library thus provides a platform for numerical QEC experiments. The `qec.code` can be used to analyze a variety of QEC codes (both library or user provided), with a variety of decoders (both library or user provided). +The CUDA-Q QEC library also provides tools to speed up the automation of generating noisy data and syndromes. diff --git a/docs/sphinx/examples_rst/qec/examples.rst b/docs/sphinx/examples_rst/qec/examples.rst new file mode 100644 index 0000000..606f2fa --- /dev/null +++ b/docs/sphinx/examples_rst/qec/examples.rst @@ -0,0 +1,11 @@ +************************* +CUDA-Q QEC by Example +************************* + +Examples that illustrate how to use CUDA-QX for application development are available in C++ and Python. + +.. toctree:: + :maxdepth: 1 + + Code-Capacity-QEC + Circuit-Level-QEC diff --git a/docs/sphinx/examples_rst/solvers/adapt.rst b/docs/sphinx/examples_rst/solvers/adapt.rst new file mode 100644 index 0000000..55da96f --- /dev/null +++ b/docs/sphinx/examples_rst/solvers/adapt.rst @@ -0,0 +1,39 @@ +ADAPT-VQE +--------- + +ADAPT-VQE is an advanced quantum algorithm designed to improve upon the +standard Variational Quantum Eigensolver (VQE) approach for solving quantum +chemistry problems. It addresses key challenges faced by traditional VQE +methods by dynamically constructing a problem-specific ansatz, offering +several advantages: + +- Faster convergence: Adaptively selects the most impactful operators, potentially achieving convergence more quickly than fixed-ansatz VQE methods. +- Enhanced efficiency: Builds a compact ansatz tailored to the specific problem, potentially reducing overall circuit depth. +- Increased accuracy: Has demonstrated the ability to outperform standard VQE approaches in terms of accuracy for certain molecular systems. +- Adaptability: Automatically adjusts to different molecular systems without requiring significant user intervention or prior knowledge of the system's electronic structure. + +The ADAPT-VQE algorithm works by iteratively growing the quantum circuit +ansatz, selecting operators from a predefined pool based on their gradient +magnitudes. This adaptive approach allows the algorithm to focus +computational resources on the most relevant aspects of the problem, +potentially leading to more efficient and accurate simulations of molecular +systems on quantum computers. + +Here we demonstrate how to use the CUDA-Q Solvers library to execute the ADAPT-VQE algorithm. + +.. tab:: Python + + .. literalinclude:: ../../examples/solvers/python/adapt_h2.py + :language: python + +.. tab:: C++ + + .. literalinclude:: ../../examples/solvers/cpp/adapt_h2.cpp + :language: cpp + + Compile and run with + + .. code:: bash + + nvq++ --enable-mlir -lcudaq-solvers adapt_h2.cpp -o adapt_h2 + ./adapt_h2 diff --git a/docs/sphinx/examples_rst/solvers/examples.rst b/docs/sphinx/examples_rst/solvers/examples.rst new file mode 100644 index 0000000..75ec8d7 --- /dev/null +++ b/docs/sphinx/examples_rst/solvers/examples.rst @@ -0,0 +1,13 @@ +************************* +CUDA-Q Solvers by Example +************************* + +Examples that illustrate how to use CUDA-QX for application development are available in C++ and Python. + +.. toctree:: + :maxdepth: 1 + + Molecular-Hamiltonians + ADAPT-VQE + VQE + QAOA diff --git a/docs/sphinx/examples_rst/solvers/molecular_hamiltonians.rst b/docs/sphinx/examples_rst/solvers/molecular_hamiltonians.rst new file mode 100644 index 0000000..8ba2d75 --- /dev/null +++ b/docs/sphinx/examples_rst/solvers/molecular_hamiltonians.rst @@ -0,0 +1,103 @@ +Generating Molecular Hamiltonians +---------------------------------- + +The CUDA-Q Solvers library accelerates a wide range of applications in the domain of quantum chemistry. +To facilitate these calculations, CUDA-Q Solvers provides the `solver.create_molecule` function to allow users to generate basis sets and Hamiltonians for many systems of interest. +The molecule class contains basis set informations, and the Hamiltonian (`molecule.hamiltonian`) for the target systems. +These Hamiltonians can then be used as input into the hybrid quantum-classical solvers that the CUDA-Q Solvers API provides. + + +Molecular Orbitals and Hamiltonians ++++++++++++++++++++++++++++++++++++ + +First we define the atomic geometry of the molecule by specifying a array of atomic symbols as strings, and coordinates in 3D space. We then get a molecule object from the `solvers.create_molecule` call. +Here we create "default" Hamiltonian for the N2 system using complete active space molecular orbitals constructed from Hartree-Fock atomic orbitals. + +.. tab:: Python + + .. code-block:: python + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + verbose=True) + +We specify: + - The geometry previously created + - The single particle basis set (here STO-3G) + - The total spin + - The total charge + - The number of electrons in the complete active space + - The number of orbitals in the complete activate space + - A verbosity flag to help introspect on the data what was generated. + +Along with the orbitals and Hamiltonian, we can also view various properties like the Hartree-Fock energy, and the energy of the frozen core orbitals by printing `molecule.energies`. + +Natural Orbitals from MP2 +++++++++++++++++++++++++++ +Now we take our same N2 molecule, but generate natural orbitals from second order Møller–Plesset perturbation theory as the basis. + +.. tab:: Python + + .. code-block:: python + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + MP2=True, + integrals_natorb=True, + verbose=True) + +Note that we use the same API but,toggle `MP2=True` and `integrals_natorb=True`. + +CASSCF Orbitals ++++++++++++++++ + +Next, we can start from either Hartree-Fock or perturbation theory atomic orbitals and build complete active space self-consistent field (CASSCF) molecular orbitals. + +.. tab:: Python + + .. code-block:: python + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + casscf=True, + integrals_casscf=True, + verbose=True) + +For Hartree-Fock, or + +.. tab:: Python + + .. code-block:: python + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=2, + norb_cas=3, + MP2=True, + natorb=True, + casscf=True, + integrals_casscf=True, + verbose=True) + +for MP2. In these cases, printing the `molecule.energies` also shows the `R-CASSCF` energy for the system. + +Now that we have seen how to generate basis sets and Hamiltonians for quantum chemistry systems, we can use these as inputs to hybrid quantum-classical methods like VQE or adapt VQE via the CUDA-Q Solvers API. + diff --git a/docs/sphinx/examples_rst/solvers/qaoa.rst b/docs/sphinx/examples_rst/solvers/qaoa.rst new file mode 100644 index 0000000..907587c --- /dev/null +++ b/docs/sphinx/examples_rst/solvers/qaoa.rst @@ -0,0 +1,60 @@ +Quantum Approximate Optimization Algorithm (QAOA) +------------------------------------------------- + +The Quantum Approximate Optimization Algorithm (QAOA) is a hybrid quantum-classical algorithm that solves combinatorial optimization problems. + +Key features of QAOA: + +- Hybrid approach: Utilizes both quantum and classical resources efficiently. +- Iterative optimization: Classical optimizer adjusts circuit parameters to minimize energy. +- NISQ compatibility: This algorithm is designed to run on the noisy quantum computers of today. +- Flexibility: Can be applied to various problems in quantum chemistry and optimization problems broadly. + +.. tab:: Python + + .. literalinclude:: ../../examples/solvers/python/molecular_docking_qaoa.py + :language: python + +.. tab:: C++ + + .. literalinclude:: ../../examples/solvers/cpp/molecular_docking_qaoa.cpp + :language: cpp + + Compile and run with + + .. code-block:: bash + + nvq++ --enable-mlir -lcudaq-solvers molecular_docking_qaoa.cpp -o molecular_docking_qaoa + ./molecular_docking_qaoa + +CUDA-Q Solvers Implementation ++++++++++++++++++++++++++++++ +Here's how to use CUDA-Q Solvers to solve the Maximum Clique Problem using QAOA: + +Code Explanation +++++++++++++++++ +1. Graph Creation: + - A NetworkX graph is created to represent the problem. + - Nodes and edges are added with specific weights. + +2. Clique Hamiltonian Generation: + - `solvers.get_clique_hamiltonian` is used to create the Hamiltonian for the Maximum Clique Problem. + - The penalty term and number of QAOA layers are defined. + +3. QAOA Parameter Setup: + - The number of required parameters is calculated using `solvers.get_num_qaoa_parameters`. + - Randomly generate initial parameters. + +4. QAOA Execution with `solvers.qaoa`: + - Call the solver with the Hamiltonian, number of QAOA layers, and whether you want full parametrization and counterdiabatic driving. + - Full parameterization: Uses an optimization parameter for every term in the clique Hamiltonian and the mixer Hamiltonian. + - Counterdiabatic driving: Adds extra Ry rotations at the end of each layer. + +5. Results Analysis: + - The optimal energy, sampled states, and most probable configuration are printed. + +This implementation showcases the power of CUDA-Q Solvers in solving combinatorial optimization problems using hybrid quantum-classical algorithms. +By using CUDA-Q Solvers with the networkx library, we very quickly set up and ran a QAOA application to compute optimal configurations for a molecular docking problem. + + + diff --git a/docs/sphinx/examples_rst/solvers/vqe.rst b/docs/sphinx/examples_rst/solvers/vqe.rst new file mode 100644 index 0000000..ccf9c36 --- /dev/null +++ b/docs/sphinx/examples_rst/solvers/vqe.rst @@ -0,0 +1,66 @@ +Variational Quantum Eigensolver (VQE) +------------------------------------- + +The Variational Quantum Eigensolver (VQE) is a hybrid quantum-classical algorithm designed to find the ground state energy of a quantum system. It combines quantum computation with classical optimization to iteratively improve an approximation of the ground state. + +Key features of VQE: + +- Hybrid approach: Utilizes both quantum and classical resources efficiently. +- Variational method: Uses a parameterized quantum circuit (ansatz) to prepare trial states. +- Iterative optimization: Classical optimizer adjusts circuit parameters to minimize energy. +- Flexibility: Can be applied to various problems in quantum chemistry and materials science. + +VQE Algorithm Overview: + +1. Prepare an initial quantum state using a parameterized circuit (ansatz). +2. Measure the expectation value of the Hamiltonian. +3. Use a classical optimizer to adjust circuit parameters. +4. Repeat steps 1-3 until convergence or a stopping criterion is met. + +CUDA-Q Solvers Implementation ++++++++++++++++++++++++++++++ + +CUDA-Q Solvers provides a high-level interface for running VQE simulations. Here's how to use it in both Python and C++: + +.. tab:: Python + + .. literalinclude:: ../../examples/solvers/python/uccsd_vqe.py + :language: python + +.. tab:: C++ + + .. literalinclude:: ../../examples/solvers/cpp/uccsd_vqe.cpp + :language: cpp + + Compile and run with + + .. code-block:: bash + + nvq++ --enable-mlir -lcudaq-solvers uccsd_vqe.cpp -o uccsd_vqe + ./uccsd_vqe + +Code Explanation +++++++++++++++++ + +1. Molecule Creation: + - Both examples start by defining the molecular geometry (H2 molecule). + - The `create_molecule` function generates the molecular Hamiltonian. + +2. Ansatz Definition: + - The UCCSD (Unitary Coupled Cluster Singles and Doubles) ansatz is used. + - In Python, it's defined as a `cudaq.kernel`. + - In C++, it's defined as a lambda function within the VQE call. + +3. VQE Execution: + - The `solvers.vqe` function (Python) or `solvers::vqe` (C++) is called. + - It takes the ansatz, Hamiltonian, initial parameters, and optimization settings. + +4. Optimization: + - Python uses SciPy's `minimize` function with L-BFGS-B method. + - C++ uses CUDA-Q Solvers' built-in optimizer. + - Either language can make use of CUDA-QX builtin optimizers. + +5. Results: + - Both versions print the final ground state energy. + +The CUDA-Q Solvers implementation of VQE provides a high-level interface that handles the quantum-classical hybrid optimization loop, making it easy to apply VQE to molecular systems. Users can focus on defining the problem (molecule and ansatz) while CUDA-Q Solvers manages the complex interaction between quantum and classical resources. diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst new file mode 100644 index 0000000..9b2826c --- /dev/null +++ b/docs/sphinx/index.rst @@ -0,0 +1,66 @@ +CUDA-QX - The CUDA-Q Libraries Collection +========================================== + +CUDA-QX is a collection of libraries that build upon the CUDA-Q programming model +to enable the rapid development of hybrid quantum-classical application code leveraging +state-of-the-art CPUs, GPUs, and QPUs. It provides a collection of C++ +libraries and Python packages that enable research, development, and application +creation for use cases in quantum error correction and hybrid quantum-classical +solvers. + +.. toctree:: + :maxdepth: 2 + :caption: Getting Started + + quickstart/installation + +.. toctree:: + :maxdepth: 1 + :caption: Libraries + + components/qec/introduction + components/solvers/introduction + +.. toctree:: + :maxdepth: 2 + :caption: Examples + + examples_rst/solvers/examples + examples_rst/qec/examples + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + + api/core/cpp_api + api/qec/cpp_api + api/qec/python_api + api/solvers/cpp_api + api/solvers/python_api + +Key Features +------------- + +CUDA-QX is composed of two distinct libraries that build upon CUDA-Q programming model. +The libraries provided are cudaq-qec, a library enabling performant research workflows +for quantum error correction, and cudaq-solvers, a library that provides high-level +APIs for common quantum-classical solver workflows. + +* **cudaq-qec**: Quantum Error Correction Library + * Extensible framework describing quantum error correcting codes as a collection of CUDA-Q kernels. + * Extensible framework for describing syndrome decoders + * State-of-the-art, performant decoder implementations on NVIDIA GPUs (coming soon) + * Pre-built numerical experiment APIs + +* **cudaq-solvers**: Performant Quantum-Classical Simulation Workflows + * Variational Quantum Eigensolver (VQE) + * ADAPT-VQE implementation that scales via CUDA-Q MQPU. + * Quantum Approximate Optimization Algorithm (QAOA) + * More to come... + +Indices and Tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/sphinx/quickstart/installation.rst b/docs/sphinx/quickstart/installation.rst new file mode 100644 index 0000000..75cc26b --- /dev/null +++ b/docs/sphinx/quickstart/installation.rst @@ -0,0 +1,136 @@ +Installation Guide +================== + +Installation Methods +-------------------- + +CUDA-QX provides multiple installation methods to suit your needs: + +pip install +^^^^^^^^^^^^ + +The simplest way to install CUDA-QX is via pip. You can install individual components: + +.. code-block:: bash + + # Install QEC library + pip install cudaq-qec + + # Install Solvers library + pip install cudaq-solvers + + # Install both libraries + pip install cudaq-qec cudaq-solvers + +Docker Container +^^^^^^^^^^^^^^^^ + +CUDA-QX is available as a Docker container with all dependencies pre-installed: + +1. Pull the container: + +.. code-block:: bash + + docker pull ghcr.io/nvidia/cudaqx + +2. Run the container: + +.. code-block:: bash + + docker run --gpus all -it ghcr.io/nvidia/cudaqx + +The container includes: + * CUDA-Q compiler and runtime + * CUDA-QX libraries (QEC and Solvers) + * All required dependencies + * Example notebooks and tutorials + +Building from Source +^^^^^^^^^^^^^^^^^^^^ + +Prerequisites +~~~~~~~~~~~~~ + +Before building CUDA-QX from source, ensure your system meets the following requirements: + +* **CUDA-Q**: The NVIDIA quantum-classical programming model +* **CMake**: Version 3.28 or higher (``pip install cmake>=3.28``) +* **GCC**: Version 11 or higher +* **Python**: Version 3.10, 3.11, or 3.12 +* **NVIDIA GPU**: CUDA-capable GPU with compute capability 12.0 or higher +* **Git**: For cloning the repository + +Build Instructions +~~~~~~~~~~~~~~~~~~~ + +1. Clone the repository: + +.. code-block:: bash + + git clone https://github.com/nvidia/cudaqx + cd cudaqx + +2. Create and enter build directory: + +.. code-block:: bash + + mkdir build && cd build + +3. Configure with CMake: + +.. code-block:: bash + + cmake .. -G Ninja \ + -DCUDAQX_ENABLE_LIBS="all" \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON \ + -DCUDAQ_DIR=$HOME/.cudaq/lib/cmake/cudaq \ + -DCMAKE_CXX_FLAGS="-Wno-attributes" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=$HOME/.cudaqx + +4. Build and install: + +.. code-block:: bash + + ninja install + +CMake Build Options +~~~~~~~~~~~~~~~~~~~~ + +* ``CUDAQX_ENABLE_LIBS``: Specify which libraries to build (``all``, ``qec``, ``solvers``) +* ``CUDAQX_INCLUDE_TESTS``: Enable building of tests +* ``CUDAQX_BINDINGS_PYTHON``: Enable Python bindings +* ``CUDAQ_DIR``: Path to CUDA-Q installation +* ``CMAKE_INSTALL_PREFIX``: Installation directory + +Verifying Installation +----------------------- + +To verify your installation, run the following Python code: + +.. code-block:: python + + import cudaq_qec as qec + import cudaq_solvers as solvers + + +Troubleshooting (Common Issues) +-------------------------------- + +1. **CMake configuration fails**: + * Ensure CUDA-Q is properly installed + * Verify CMake version (``cmake --version``) + * Check GCC version (``gcc --version``) + +2. **CUDA device not found**: + * Verify NVIDIA driver installation + * Check CUDA toolkit installation + * Ensure GPU compute capability is supported + +3. **Python bindings not found**: + * Confirm ``CUDAQX_BINDINGS_PYTHON=ON`` during build + * Check Python environment activation + * Verify installation path is in ``PYTHONPATH`` + +For additional support, please visit our `GitHub Issues `_ page. diff --git a/examples b/examples new file mode 120000 index 0000000..2abc49c --- /dev/null +++ b/examples @@ -0,0 +1 @@ +docs/sphinx/examples \ No newline at end of file diff --git a/libs/core/CMakeLists.txt b/libs/core/CMakeLists.txt new file mode 100644 index 0000000..93911f1 --- /dev/null +++ b/libs/core/CMakeLists.txt @@ -0,0 +1,73 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# We need 3.28 because of the `EXCLUDE_FROM_ALL` in FetchContent_Declare +cmake_minimum_required(VERSION 3.28 FATAL_ERROR) + +# Project setup +# ============================================================================== + +# Check if core is built as a standalone project. +if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + project(cudaqx-core) + set(CUDAQX_CORE_STANDALONE_BUILD TRUE) +endif() + +# The following must go after `project(...)` +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED TRUE) +set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) + +set(CUDAQX_CORE_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +set(CUDAQX_CORE_INCLUDE_DIR ${CUDAQX_CORE_SOURCE_DIR}/include) + +# Options +# ============================================================================== + +option(CUDAQX_CORE_INCLUDE_TESTS + "Generate build targets for the CUDA-QX Core unit tests." + ${CUDAQX_INCLUDE_TESTS}) + +# External Dependencies +# ============================================================================== + +include(FetchContent) + +FetchContent_Declare( + xtl + GIT_REPOSITORY https://github.com/xtensor-stack/xtl + GIT_TAG 0.7.7 + EXCLUDE_FROM_ALL +) + +FetchContent_Declare( + xtensor + GIT_REPOSITORY https://github.com/xtensor-stack/xtensor + GIT_TAG 0.25.0 + EXCLUDE_FROM_ALL +) + +FetchContent_Declare( + xtensor-blas + GIT_REPOSITORY https://github.com/xtensor-stack/xtensor-blas + GIT_TAG 0.21.0 + EXCLUDE_FROM_ALL +) + +FetchContent_MakeAvailable(xtl xtensor xtensor-blas) + +# Directory setup +# ============================================================================== + +add_subdirectory(lib) + +if (CUDAQX_CORE_INCLUDE_TESTS) + add_custom_target(CUDAQXCoreUnitTests) + add_subdirectory(unittests) +endif() + diff --git a/libs/core/include/cuda-qx/core/extension_point.h b/libs/core/include/cuda-qx/core/extension_point.h new file mode 100644 index 0000000..98a2dda --- /dev/null +++ b/libs/core/include/cuda-qx/core/extension_point.h @@ -0,0 +1,201 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include +#include +#include + +namespace cudaqx { + +/// @brief A template class for implementing an extension point mechanism. +/// +/// This class provides a framework for registering and retrieving plugin-like +/// extensions. It allows dynamic creation of objects based on registered types. +/// +/// @tparam T The base type of the extensions. +/// @tparam CtorArgs Variadic template parameters for constructor arguments. +/// +/// How to use the extension_point class +/// +/// The extension_point class provides a mechanism for creating extensible +/// frameworks with plugin-like functionality. Here's how to use it: +/// +/// 1. Define your extension point: +/// Create a new class that inherits from cudaq::extension_point. +/// This class should declare pure virtual methods that extensions will +/// implement. +/// +/// @code +/// class MyExtensionPoint : public cudaq::extension_point { +/// public: +/// virtual std::string parrotBack(const std::string &msg) const = 0; +/// }; +/// @endcode +/// +/// 2. Implement concrete extensions: +/// Create classes that inherit from your extension point and implement its +/// methods. Use the CUDAQ_EXTENSION_CREATOR_FUNCTION macro to define a +/// creator function. +/// +/// @code +/// class RepeatBackOne : public MyExtensionPoint { +/// public: +/// std::string parrotBack(const std::string &msg) const override { +/// return msg + " from RepeatBackOne."; +/// } +/// +/// CUDAQ_EXTENSION_CREATOR_FUNCTION(MyExtensionPoint, RepeatBackOne) +/// }; +/// @endcode +/// +/// 3. Register your extensions: +/// Use the CUDAQ_REGISTER_TYPE macro to register each extension. +/// +/// @code +/// CUDAQ_REGISTER_TYPE(RepeatBackOne) +/// @endcode +/// +/// 4. Use your extensions: +/// You can now create instances of your extensions, check registrations, and +/// more. +/// +/// @code +/// auto extension = MyExtensionPoint::get("RepeatBackOne"); +/// std::cout << extension->parrotBack("Hello") << std::endl; +/// +/// auto registeredTypes = MyExtensionPoint::get_registered(); +/// bool isRegistered = MyExtensionPoint::is_registered("RepeatBackOne"); +/// @endcode +/// +/// This approach allows for a flexible, extensible design where new +/// functionality can be added without modifying existing code. +template +class extension_point { + + /// Type alias for the creator function. + using CreatorFunction = std::function(CtorArgs...)>; + +protected: + /// @brief Get the registry of creator functions. + /// @return A reference to the static registry map. + /// See INSTANTIATE_REGISTRY() macros below for sample implementations that + /// need to be included in C++ source files. + static std::unordered_map &get_registry(); + +public: + /// @brief Create an instance of a registered extension. + /// @param name The identifier of the registered extension. + /// @param args Constructor arguments for the extension. + /// @return A unique pointer to the created instance. + /// @throws std::runtime_error if the extension is not found. + static std::unique_ptr get(const std::string &name, CtorArgs... args) { + auto ®istry = get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("Cannot find extension with name = " + name); + + return iter->second(std::forward(args)...); + } + + /// @brief Get a list of all registered extension names. + /// @return A vector of registered extension names. + static std::vector get_registered() { + std::vector names; + auto ®istry = get_registry(); + for (auto &[k, v] : registry) + names.push_back(k); + return names; + } + + /// @brief Check if an extension is registered. + /// @param name The identifier of the extension to check. + /// @return True if the extension is registered, false otherwise. + static bool is_registered(const std::string &name) { + auto ®istry = get_registry(); + return registry.find(name) != registry.end(); + } +}; + +/// @brief Macro for defining a creator function for an extension. +/// @param BASE The base class of the extension. +/// @param TYPE The derived class implementing the extension. +#define CUDAQ_EXTENSION_CREATOR_FUNCTION(BASE, TYPE) \ + static inline bool register_type() { \ + auto ®istry = get_registry(); \ + registry[TYPE::class_identifier] = TYPE::create; \ + return true; \ + } \ + static const bool registered_; \ + static inline const std::string class_identifier = #TYPE; \ + static std::unique_ptr create() { return std::make_unique(); } + +/// @brief Macro for defining a custom creator function for an extension. +/// @param TYPE The class implementing the extension. +/// @param ... Custom implementation of the create function. +#define CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION(TYPE, ...) \ + static inline bool register_type() { \ + auto ®istry = get_registry(); \ + registry[TYPE::class_identifier] = TYPE::create; \ + return true; \ + } \ + static const bool registered_; \ + static inline const std::string class_identifier = #TYPE; \ + __VA_ARGS__ + +#define CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION_WITH_NAME(TYPE, NAME, ...) \ + static inline bool register_type() { \ + auto ®istry = TYPE::get_registry(); \ + registry.insert({NAME, TYPE::create}); \ + return true; \ + } \ + static const bool registered_; \ + static inline const std::string class_identifier = #TYPE; \ + __VA_ARGS__ + +/// @brief Macro for registering an extension type. +/// @param TYPE The class to be registered as an extension. +#define CUDAQ_REGISTER_TYPE(TYPE) \ + const bool TYPE::registered_ = TYPE::register_type(); + +/// In order to support building CUDA-QX libraries with g++ and building +/// application code with nvq++ (which uses clang++ under the hood), you must +/// implement the templated get_registry() function for every set of +/// extension_point. This *must* be done in a C++ file that is built +/// with the CUDA-QX libraries. +/// +/// Use this version of the helper macro if the only template argument to +/// extension_point<> is the derived class (with no additional creator args). +#define INSTANTIATE_REGISTRY_NO_ARGS(FULL_TYPE_NAME) \ + template <> \ + std::unordered_map()>> & \ + cudaqx::extension_point::get_registry() { \ + static std::unordered_map< \ + std::string, std::function()>> \ + registry; \ + return registry; \ + } + +/// Use this variadic version of the helper macro if there are additional +/// arguments for the creator function. +#define INSTANTIATE_REGISTRY(FULL_TYPE_NAME, ...) \ + template <> \ + std::unordered_map< \ + std::string, \ + std::function(__VA_ARGS__)>> & \ + cudaqx::extension_point::get_registry() { \ + static std::unordered_map< \ + std::string, \ + std::function(__VA_ARGS__)>> \ + registry; \ + return registry; \ + } + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/include/cuda-qx/core/graph.h b/libs/core/include/cuda-qx/core/graph.h new file mode 100644 index 0000000..851fadc --- /dev/null +++ b/libs/core/include/cuda-qx/core/graph.h @@ -0,0 +1,122 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include +#include +#include +#include +#include + +namespace cudaqx { + +/// @brief A class representing an undirected weighted graph +class graph { +private: + /// Adjacency list representation of the graph with weights + /// Maps node ID to vector of (neighbor_id, weight) pairs + std::unordered_map>> adjacency_list; + + /// Node weights storage + std::unordered_map node_weights; + + /// @brief Depth-first search helper function + /// @param node The starting node for DFS + /// @param visited Set of visited nodes + void dfs(int node, std::unordered_set &visited) const; + +public: + /// @brief Add a weighted edge between two nodes + /// @param u First node + /// @param v Second node + /// @param weight Edge weight + void add_edge(int u, int v, double weight = 1.0); + + /// @brief Add a node to the graph + /// @param node The node to add + void add_node(int node, double weight = 1.0); + + /// @brief Check if an edge exists between two nodes + /// @param i First node + /// @param j Second node + /// @return True if edge exists, false otherwise + bool edge_exists(int i, int j) const; + + /// @brief Set the weight of a node + /// @param node The node to set weight for + /// @param weight The weight value + void set_node_weight(int node, double weight); + + /// @brief Get the weight of a node + /// @param node The node to get weight for + /// @return Node weight, or 0.0 if node doesn't exist + double get_node_weight(int node) const; + + /// @brief Remove an edge between two nodes + /// @param u First node + /// @param v Second node + void remove_edge(int u, int v); + + /// @brief Remove a node and all its incident edges from the graph + /// @param node The node to remove + void remove_node(int node); + + /// @brief Get the neighbors of a node + /// @param node The node to get neighbors for + /// @return Vector of neighboring node IDs + std::vector get_neighbors(int node) const; + + /// @brief Get the neighbors of a node with their weights + /// @param node The node to get neighbors for + /// @return Vector of pairs containing (neighbor_id, weight) + std::vector> get_weighted_neighbors(int node) const; + + /// @brief Get all pairs of vertices that are not connected + /// @return Vector of pairs representing disconnected vertices + std::vector> get_disconnected_vertices() const; + + /// @brief Get all nodes in the graph + /// @return Vector of all nodes + std::vector get_nodes() const; + + /// @brief Get the number of nodes in the graph + /// @return Number of nodes + int num_nodes() const; + + /// @brief Get the number of edges in the graph + /// @return Number of edges + int num_edges() const; + + /// @brief Check if the graph is connected + /// @return True if the graph is connected, false otherwise + bool is_connected() const; + + /// @brief Get the degree of a node + /// @param node The node to get the degree for + /// @return Degree of the node + int get_degree(int node) const; + + /// @brief Get the weight of an edge between two nodes + /// @param u First node + /// @param v Second node + /// @return Edge weight, or -1 if edge doesn't exist + double get_edge_weight(int u, int v) const; + + /// @brief Update the weight of an existing edge + /// @param u First node + /// @param v Second node + /// @param weight New edge weight + /// @return True if edge exists and weight was updated, false otherwise + bool update_edge_weight(int u, int v, double weight); + + /// @brief Clear all nodes and edges from the graph + void clear(); +}; + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/include/cuda-qx/core/heterogeneous_map.h b/libs/core/include/cuda-qx/core/heterogeneous_map.h new file mode 100644 index 0000000..5edfcc5 --- /dev/null +++ b/libs/core/include/cuda-qx/core/heterogeneous_map.h @@ -0,0 +1,206 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include +#include +#include +#include +#include + +#include "cuda-qx/core/tuple_utils.h" +#include "cuda-qx/core/type_traits.h" + +namespace cudaqx { + +/// @brief A class that implements a heterogeneous map allowing string keys to +/// be mapped to any value type +class heterogeneous_map { +private: + std::unordered_map items; + + /// @brief Check if an std::any object can be cast to a specific type + /// @tparam T The type to cast to + /// @param t The std::any object to check + /// @return true if castable, false otherwise + template + bool isCastable(const std::any &t) const { + try { + std::any_cast(t); + } catch (...) { + return false; + } + return true; + } + +public: + /// @brief Default constructor + heterogeneous_map() = default; + + /// @brief Copy constructor + /// @param _other The map to copy from + heterogeneous_map(const heterogeneous_map &_other) { *this = _other; } + + /// @brief Move constructor + /// @param _other The map to move from + heterogeneous_map(heterogeneous_map &_other) { *this = _other; } + + /// @brief Constructor from initializer list + /// @param list The initializer list of key-value pairs + heterogeneous_map( + const std::initializer_list> &list) { + for (auto &l : list) + insert(l.first, l.second); + } + + /// @brief Clear the map + void clear() { items.clear(); } + + /// @brief Assignment operator + /// @param _other The map to assign from + /// @return Reference to this map + heterogeneous_map &operator=(const heterogeneous_map &_other) { + clear(); + items = _other.items; + return *this; + } + + /// @brief Insert a key-value pair into the map + /// @tparam T The type of the value + /// @param key The key + /// @param value The value + template + void insert(const std::string &key, const T &value) { + auto iter = items.find(key); + if (iter == items.end()) { + // Never insert a raw char array or char ptr, + // auto conver to a string + if constexpr (is_bounded_char_array{}) { + items.insert({key, std::string(value)}); + return; + } + + items.insert({key, value}); + return; + } + + items.at(key) = value; + } + + /// @brief Get a value from the map + /// @tparam T The type of the value to retrieve + /// @param key The key of the value to retrieve + /// @return The value associated with the key + /// @throw std::runtime_error if the key is invalid or the type doesn't match + template , int> = 0> + const T get(const KeyT &key) const { + auto iter = items.find(key); + if (iter == items.end()) + throw std::runtime_error("Invalid key."); + + if (isCastable(iter->second)) + return std::any_cast(iter->second); + + // It may be that user has requested a value of + // a type that is "related" to its actual type, e.g. + // we have a value of type int, but request here is std::size_t. + // Handle that case, by getting T's map of related types, and checking + // if any of them are valid. + using RelatedTypes = + typename RelatedTypesMap>::types; + std::optional opt; + cudaqx::tuple_for_each(RelatedTypes(), [&](auto &&el) { + if (!opt.has_value() && + isCastable>(iter->second)) + opt = std::any_cast>(iter->second); + }); + + if (opt.has_value()) + return opt.value(); + + // Can't find it, throw an exception + throw std::runtime_error( + "heterogeneous_map::get() error - Invalid type or key (" + + std::string(key) + ")."); + + return T(); + } + + /// @brief Get a value from the map, search for the value + /// from any of the provided string keys + /// @tparam T The type of the value to retrieve + /// @param keys The keys to search for the desired value. + /// @return The value associated with the key + /// @throw std::runtime_error if the key is invalid or the type doesn't match + template + const T get(const std::vector &keys) const { + for (auto &key : keys) { + try { + return get(key); + } catch (...) { + // do nothing + } + } + // Can't find it, throw an exception + auto keyStr = std::accumulate(keys.begin(), keys.end(), std::string(), + [](std::string ss, std::string s) { + return ss.empty() ? s : ss + "," + s; + }); + + throw std::runtime_error( + "heterogeneous_map::get(keys) error - Invalid keys (" + keyStr + ")."); + + return T(); + } + + template + const T get(const std::vector &keys, + const T &defaultValue) const { + for (auto &key : keys) { + try { + return get(key); + } catch (...) { + // do nothing + } + } + return defaultValue; + } + + /// @brief Get a value from the map with a default value + /// @tparam T The type of the value to retrieve + /// @param key The key of the value to retrieve + /// @param defaultValue The default value to return if the key is not found + /// @return The value associated with the key or the default value + template + const T get(const std::string key, const T &defaultValue) const { + try { + return get(key); + } catch (...) { + } + return defaultValue; + } + + /// @brief Get the size of the map + /// @return The number of key-value pairs in the map + std::size_t size() const { return items.size(); } + + /// @brief Check if the map contains a key + /// @param key The key to check + /// @return true if the key exists, false otherwise + bool contains(const std::string &key) const { return items.contains(key); } + bool contains(const std::vector &keys) const { + for (auto &key : keys) + if (items.contains(key)) + return true; + + return false; + } +}; + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/include/cuda-qx/core/tear_down.h b/libs/core/include/cuda-qx/core/tear_down.h new file mode 100644 index 0000000..e108e9c --- /dev/null +++ b/libs/core/include/cuda-qx/core/tear_down.h @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include + +namespace cudaqx { + +/// @brief A base class for tear down services to be run at application +/// shutdown. +/// +/// This class is designed to be subclassed with concrete tear down routines. +/// Instances of subclasses should be submitted to a global registry which will +/// be run at application shutdown. +class tear_down { +public: + /// @brief Pure virtual function to be implemented by derived classes. + /// + /// This function should contain the specific tear down logic for each + /// service. + virtual void runTearDown() const = 0; + + virtual ~tear_down() = default; +}; + +/// @brief Schedule a new tear down routine +void scheduleTearDown(std::unique_ptr); + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/include/cuda-qx/core/tensor.h b/libs/core/include/cuda-qx/core/tensor.h new file mode 100644 index 0000000..b4a445c --- /dev/null +++ b/libs/core/include/cuda-qx/core/tensor.h @@ -0,0 +1,231 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "extension_point.h" +#include "tensor_impl.h" +#include "type_traits.h" + +namespace cudaqx { + +/// @brief A tensor class implementing the PIMPL idiom. +template > +class tensor { +private: + std::shared_ptr> pimpl; + + tensor mv_product(const tensor &vec) const { + if (rank() != 2 || vec.rank() != 1) { + throw std::runtime_error( + "Matrix-vector product requires rank-2 matrix and rank-1 vector"); + } + if (shape()[1] != vec.shape()[0]) { + throw std::runtime_error("Invalid dimensions for matrix-vector product"); + } + + tensor result({shape()[0]}); + pimpl->matrix_vector_product(vec.pimpl.get(), result.pimpl.get()); + return result; + } + +public: + /// @brief Type alias for the scalar type used in the tensor + using scalar_type = details::tensor_impl::scalar_type; + static constexpr auto ScalarAsString = type_to_string(); + + /// @brief Construct an empty tensor + tensor() + : pimpl(std::shared_ptr>( + details::tensor_impl::get( + std::string("xtensor") + std::string(ScalarAsString), {}) + .release())) {} + + /// @brief Construct a tensor with the given shape + /// @param shape The shape of the tensor + tensor(const std::vector &shape) + : pimpl(std::shared_ptr>( + details::tensor_impl::get( + std::string("xtensor") + std::string(ScalarAsString), shape) + .release())) {} + + /// @brief Construct a tensor with the given data and shape + /// @param data Pointer to the tensor data + /// @param shape The shape of the tensor + tensor(const scalar_type *data, const std::vector &shape) + : pimpl(std::shared_ptr>( + details::tensor_impl::get(std::string("xtensor") + + std::string(ScalarAsString), + data, shape) + .release())) {} + + /// @brief Get the rank of the tensor + /// @return The rank of the tensor + std::size_t rank() const { return pimpl->rank(); } + + /// @brief Get the total size of the tensor + /// @return The total number of elements in the tensor + std::size_t size() const { return pimpl->size(); } + + /// @brief Get the shape of the tensor + /// @return A vector containing the dimensions of the tensor + std::vector shape() const { return pimpl->shape(); } + + /// @brief Access a mutable element of the tensor + /// @param indices The indices of the element to access + /// @return A reference to the element at the specified indices + scalar_type &at(const std::vector &indices) { + if (indices.size() != rank()) + throw std::runtime_error("Invalid indices provided to tensor::at(), size " + "must be equal to rank."); + return pimpl->at(indices); + } + + /// @brief Access a const element of the tensor + /// @param indices The indices of the element to access + /// @return A const reference to the element at the specified indices + const scalar_type &at(const std::vector &indices) const { + return pimpl->at(indices); + } + + /// @brief Copy data into the tensor + /// @param data Pointer to the source data + /// @param shape The shape of the source data + void copy(const scalar_type *data, + const std::vector shape = {}) { + if (pimpl->shape().empty() && shape.empty()) + throw std::runtime_error( + "This tensor does not have a shape yet, must provide one to copy()"); + + pimpl->copy(data, pimpl->shape().empty() ? shape : pimpl->shape()); + } + + /// @brief Take ownership of the given data + /// @param data Pointer to the source data + /// @param shape The shape of the source data + void take(const scalar_type *data, + const std::vector shape = {}) { + if (pimpl->shape().empty() && shape.empty()) + throw std::runtime_error( + "This tensor does not have a shape yet, must provide one to take()"); + + pimpl->take(data, pimpl->shape().empty() ? shape : pimpl->shape()); + } + + /// @brief Borrow the given data without taking ownership + /// @param data Pointer to the source data + /// @param shape The shape of the source data + void borrow(const scalar_type *data, + const std::vector shape = {}) { + if (pimpl->shape().empty() && shape.empty()) + throw std::runtime_error("This tensor does not have a shape yet, must " + "provide one to borrow()"); + + pimpl->borrow(data, pimpl->shape().empty() ? shape : pimpl->shape()); + } + + // Scalar-resulting operations + Scalar sum_all() const { return pimpl->sum_all(); } + + // Boolean-resulting operations + bool any() const { return pimpl->any(); } + + // Elementwise operations + tensor operator+(const tensor &other) const { + if (shape() != other.shape()) { + throw std::runtime_error("Tensor shapes must match for addition"); + } + tensor result(shape()); + pimpl->elementwise_add(other.pimpl.get(), result.pimpl.get()); + return result; + } + + tensor operator*(const tensor &other) const { + if (shape() != other.shape()) { + throw std::runtime_error("Tensor shapes must match for multiplication"); + } + tensor result(shape()); + pimpl->elementwise_multiply(other.pimpl.get(), result.pimpl.get()); + return result; + } + + tensor operator%(const tensor &other) const { + if (shape() != other.shape()) { + throw std::runtime_error("Tensor shapes must match for modulo"); + } + tensor result(shape()); + pimpl->elementwise_modulo(other.pimpl.get(), result.pimpl.get()); + return result; + } + + // Tensor-Scalar operations + tensor operator%(Scalar value) const { + tensor result(shape()); + pimpl->scalar_modulo(value, result.pimpl.get()); + return result; + } + + // Matrix operations (rank-2 specific) + tensor dot(const tensor &other) const { + + if (rank() == 2 && other.rank() == 1) + return mv_product(other); + + if (rank() != 2 || other.rank() != 2) { + throw std::runtime_error("Dot product requires rank-2 tensors"); + } + if (shape()[1] != other.shape()[0]) { + throw std::runtime_error("Invalid matrix dimensions for dot product"); + } + + std::vector result_shape = {shape()[0], other.shape()[1]}; + tensor result(result_shape); + pimpl->matrix_dot(other.pimpl.get(), result.pimpl.get()); + return result; + } + + tensor transpose() const { + if (rank() != 2) { + throw std::runtime_error("Transpose requires rank-2 tensors"); + } + + std::vector result_shape = {shape()[1], shape()[0]}; + tensor result(result_shape); + pimpl->matrix_transpose(result.pimpl.get()); + return result; + } + + /// @brief Get a pointer to the raw data of the tensor. + /// + /// This method provides direct access to the underlying data storage of the + /// tensor. It returns a pointer to the first element of the data array. + /// + /// @return scalar_type* A pointer to the mutable data of the tensor. + /// + /// @note Care should be taken when directly manipulating the raw data to + /// avoid + /// invalidating the tensor's internal state or violating its + /// invariants. + scalar_type *data() { return pimpl->data(); } + + /// @brief Get a const pointer to the raw data of the tensor. + /// + /// This method provides read-only access to the underlying data storage of + /// the tensor. It returns a const pointer to the first element of the data + /// array. + /// + /// @return const scalar_type * A const pointer to the immutable data of the + /// tensor. + /// + /// @note This const version ensures that the tensor's data cannot be modified + /// through the returned pointer, preserving const correctness. + const scalar_type *data() const { return pimpl->data(); } + + void dump() const { pimpl->dump(); } +}; + +} // namespace cudaqx diff --git a/libs/core/include/cuda-qx/core/tensor_impl.h b/libs/core/include/cuda-qx/core/tensor_impl.h new file mode 100644 index 0000000..9b82cc6 --- /dev/null +++ b/libs/core/include/cuda-qx/core/tensor_impl.h @@ -0,0 +1,159 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "extension_point.h" + +#include +#include +#include +#include +#include +namespace cudaqx::details { + +/// @brief Implementation class for tensor operations following the PIMPL idiom +template > +class tensor_impl : public extension_point, const Scalar *, + const std::vector> { +public: + /// @brief Type alias for the scalar type used in the tensor + using scalar_type = Scalar; + using BaseExtensionPoint = + extension_point, const Scalar *, + const std::vector>; + + /// @brief Create a tensor implementation with the given name and shape + /// @param name The name of the tensor implementation + /// @param shape The shape of the tensor + /// @return A unique pointer to the created tensor implementation + /// @throws std::runtime_error if the requested tensor implementation is + /// invalid + static std::unique_ptr> + get(const std::string &name, const std::vector &shape) { + auto ®istry = BaseExtensionPoint::get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("invalid tensor_impl requested: " + name); + + if (shape.empty()) + return iter->second(nullptr, {}); + + std::size_t size = std::accumulate(shape.begin(), shape.end(), 1, + std::multiplies()); + scalar_type *data = new scalar_type[size](); + return iter->second(data, shape); + } + + /// @brief Create a tensor implementation with the given name, data, and shape + /// @param name The name of the tensor implementation + /// @param data Pointer to the tensor data + /// @param shape The shape of the tensor + /// @return A unique pointer to the created tensor implementation + /// @throws std::runtime_error if the requested tensor implementation is + /// invalid + static std::unique_ptr> + get(const std::string &name, const scalar_type *data, + const std::vector &shape) { + auto ®istry = BaseExtensionPoint::get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("invalid tensor_impl requested: " + name); + return iter->second(data, shape); + } + + /// @brief Get the rank of the tensor + /// @return The rank of the tensor + virtual std::size_t rank() const = 0; + + /// @brief Get the total size of the tensor + /// @return The total number of elements in the tensor + virtual std::size_t size() const = 0; + + /// @brief Get the shape of the tensor + /// @return A vector containing the dimensions of the tensor + virtual std::vector shape() const = 0; + + /// @brief Access a mutable element of the tensor + /// @param indices The indices of the element to access + /// @return A reference to the element at the specified indices + virtual scalar_type &at(const std::vector &indices) = 0; + + /// @brief Access a const element of the tensor + /// @param indices The indices of the element to access + /// @return A const reference to the element at the specified indices + virtual const scalar_type &at(const std::vector &indices) const = 0; + + /// @brief Copy data into the tensor + /// @param data Pointer to the source data + /// @param shape The shape of the source data + virtual void copy(const scalar_type *data, + const std::vector &shape) = 0; + + /// @brief Take ownership of the given data + /// @param data Pointer to the source data + /// @param shape The shape of the source data + virtual void take(const scalar_type *data, + const std::vector &shape) = 0; + + /// @brief Borrow the given data without taking ownership + /// @param data Pointer to the source data + /// @param shape The shape of the source data + virtual void borrow(const scalar_type *data, + const std::vector &shape) = 0; + + virtual scalar_type sum_all() const = 0; + + virtual bool any() const = 0; + + virtual void elementwise_add(const tensor_impl *other, + tensor_impl *result) const = 0; + + virtual void elementwise_multiply(const tensor_impl *other, + tensor_impl *result) const = 0; + + virtual void elementwise_modulo(const tensor_impl *other, + tensor_impl *result) const = 0; + + virtual void scalar_modulo(Scalar value, + tensor_impl *result) const = 0; + + virtual void matrix_dot(const tensor_impl *other, + tensor_impl *result) const = 0; + + virtual void matrix_vector_product(const tensor_impl *vec, + tensor_impl *result) const = 0; + + virtual void matrix_transpose(tensor_impl *result) const = 0; + + /// @brief Get a pointer to the raw data of the tensor. + /// This method provides direct access to the underlying data storage of the + /// tensor. It returns a pointer to the first element of the data array. + /// + /// @return scalar_type* A pointer to the mutable data of the tensor. + /// @note Care should be taken when directly manipulating the raw data to + /// avoid invalidating the tensor's internal state or violating its + /// invariants. + virtual scalar_type *data() = 0; + + /// @brief Get a const pointer to the raw data of the tensor. + /// This method provides read-only access to the underlying data storage of + /// the tensor. It returns a const pointer to the first element of the data + /// array. + /// + /// @return const scalar_type * A const pointer to the immutable data of the + /// tensor. + /// @note This const version ensures that the tensor's data cannot be modified + /// through the returned pointer, preserving const correctness. + virtual const scalar_type *data() const = 0; + + virtual void dump() const = 0; + + virtual ~tensor_impl() = default; +}; + +} // namespace cudaqx::details diff --git a/libs/core/include/cuda-qx/core/tuple_utils.h b/libs/core/include/cuda-qx/core/tuple_utils.h new file mode 100644 index 0000000..735cdc1 --- /dev/null +++ b/libs/core/include/cuda-qx/core/tuple_utils.h @@ -0,0 +1,39 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include + +namespace cudaqx { + +template +void tuple_for_each( + TupleType &&, FunctionType, + std::integral_constant::type>::value>) {} +// Utility function for looping over tuple elements +template ::type>::value>::type> +// Utility function for looping over tuple elements +void tuple_for_each(TupleType &&t, FunctionType f, + std::integral_constant) { + f(std::get(t)); + tuple_for_each(std::forward(t), f, + std::integral_constant()); +} +// Utility function for looping over tuple elements +template +void tuple_for_each(TupleType &&t, FunctionType f) { + tuple_for_each(std::forward(t), f, + std::integral_constant()); +} + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/include/cuda-qx/core/type_traits.h b/libs/core/include/cuda-qx/core/type_traits.h new file mode 100644 index 0000000..0322fbe --- /dev/null +++ b/libs/core/include/cuda-qx/core/type_traits.h @@ -0,0 +1,119 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include +#include +#include + +namespace cudaqx { +/// @brief A template struct for mapping related types +/// @tparam T The base type +template +struct RelatedTypesMap { + using types = std::tuple<>; +}; + +/// @brief Specialization of RelatedTypesMap for int +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Specialization of RelatedTypesMap for std::size_t +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Specialization of RelatedTypesMap for long +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Specialization of RelatedTypesMap for short +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Specialization of RelatedTypesMap for double +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Specialization of RelatedTypesMap for float +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Specialization of RelatedTypesMap for std::string +template <> +struct RelatedTypesMap { + using types = std::tuple; +}; + +/// @brief Type trait to check if a type is a bounded char array +template +struct is_bounded_char_array : std::false_type {}; + +/// @brief Specialization for bounded char arrays +template +struct is_bounded_char_array : std::true_type {}; + +/// @brief Type trait to check if a type is a bounded array +template +struct is_bounded_array : std::false_type {}; + +/// @brief Specialization for bounded arrays +template +struct is_bounded_array : std::true_type {}; + +// Primary template (for unsupported types) +template +constexpr std::string_view type_to_string() { + return "unknown"; +} + +// Specializations for common scalar types +template <> +constexpr std::string_view type_to_string() { + return "int"; +} + +template <> +constexpr std::string_view type_to_string() { + return "double"; +} + +template <> +constexpr std::string_view type_to_string() { + return "float"; +} + +template <> +constexpr std::string_view type_to_string() { + return "long"; +} +template <> +constexpr std::string_view type_to_string() { + return "stdsizet"; +} +template <> +constexpr std::string_view type_to_string>() { + return "complex"; +} +template <> +constexpr std::string_view type_to_string>() { + return "complex"; +} + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/lib/CMakeLists.txt b/libs/core/lib/CMakeLists.txt new file mode 100644 index 0000000..ae2ea87 --- /dev/null +++ b/libs/core/lib/CMakeLists.txt @@ -0,0 +1,33 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_library(cudaqx-core STATIC + tear_down.cpp + graph.cpp + tensor_impls/xtensor_impl.cpp +) + +target_include_directories(cudaqx-core + PUBLIC + $ + $ +) + +target_link_libraries(cudaqx-core + PRIVATE + xtensor + xtensor-blas + fmt::fmt-header-only +) + +install(DIRECTORY ${CUDAQX_CORE_INCLUDE_DIR}/cuda-qx + DESTINATION "${CUDAQ_INCLUDE_DIR}" + COMPONENT cudaqx-core + FILES_MATCHING PATTERN "*.h" +) + diff --git a/libs/core/lib/graph.cpp b/libs/core/lib/graph.cpp new file mode 100644 index 0000000..dd158a2 --- /dev/null +++ b/libs/core/lib/graph.cpp @@ -0,0 +1,215 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cuda-qx/core/graph.h" + +#include + +namespace cudaqx { + +void graph::add_edge(int u, int v, double weight) { + // Check if the edge already exists + auto it_u = std::find_if(adjacency_list[u].begin(), adjacency_list[u].end(), + [v](const auto &p) { return p.first == v; }); + + if (it_u == adjacency_list[u].end()) { + // Edge doesn't exist, so add it + adjacency_list[u].push_back({v, weight}); + adjacency_list[v].push_back({u, weight}); + } + // If the edge already exists, do nothing +} + +void graph::clear() { + adjacency_list.clear(); + node_weights.clear(); +} + +void graph::add_node(int node, double weight) { + if (adjacency_list.find(node) == adjacency_list.end()) { + adjacency_list[node] = std::vector>(); + node_weights[node] = weight; + } +} + +void graph::set_node_weight(int node, double weight) { + node_weights[node] = weight; +} + +double graph::get_node_weight(int node) const { + auto it = node_weights.find(node); + return (it != node_weights.end()) ? it->second : 0.0; +} + +bool graph::edge_exists(int i, int j) const { + auto it_i = adjacency_list.find(i); + if (it_i != adjacency_list.end()) { + // Search for node j in i's adjacency list + return std::any_of(it_i->second.begin(), it_i->second.end(), + [j](const auto &pair) { return pair.first == j; }); + } + return false; +} + +std::vector> graph::get_disconnected_vertices() const { + std::vector> disconnected_pairs; + auto nodes = get_nodes(); + for (auto ni : nodes) + for (auto nj : nodes) + if (ni < nj) + if (!edge_exists(ni, nj)) + disconnected_pairs.push_back({ni, nj}); + + // Sort the pairs for consistent ordering + std::sort(disconnected_pairs.begin(), disconnected_pairs.end()); + + return disconnected_pairs; +} + +std::vector graph::get_neighbors(int node) const { + auto it = adjacency_list.find(node); + if (it != adjacency_list.end()) { + std::vector neighbors; + neighbors.reserve(it->second.size()); + + // Extract node IDs from pairs + for (const auto &pair : it->second) { + neighbors.push_back(pair.first); + } + + // Sort to ensure consistent ordering + std::sort(neighbors.begin(), neighbors.end()); + + return neighbors; + } + return std::vector(); +} + +std::vector> +graph::get_weighted_neighbors(int node) const { + auto it = adjacency_list.find(node); + if (it != adjacency_list.end()) { + return it->second; + } + return std::vector>(); +} +std::vector graph::get_nodes() const { + std::vector nodes; + for (const auto &pair : adjacency_list) { + nodes.push_back(pair.first); + } + return nodes; +} + +int graph::num_nodes() const { return adjacency_list.size(); } + +int graph::num_edges() const { + int total = 0; + for (const auto &pair : adjacency_list) { + total += pair.second.size(); + } + return total / 2; // Each edge is counted twice +} + +void graph::remove_edge(int u, int v) { + auto it_u = adjacency_list.find(u); + auto it_v = adjacency_list.find(v); + + if (it_u != adjacency_list.end()) { + it_u->second.erase( + std::remove_if(it_u->second.begin(), it_u->second.end(), + [v](const auto &p) { return p.first == v; }), + it_u->second.end()); + } + + if (it_v != adjacency_list.end()) { + it_v->second.erase( + std::remove_if(it_v->second.begin(), it_v->second.end(), + [u](const auto &p) { return p.first == u; }), + it_v->second.end()); + } +} +void graph::remove_node(int node) { + adjacency_list.erase(node); + node_weights.erase(node); + for (auto &pair : adjacency_list) { + pair.second.erase( + std::remove_if(pair.second.begin(), pair.second.end(), + [node](const auto &p) { return p.first == node; }), + pair.second.end()); + } +} +void graph::dfs(int node, std::unordered_set &visited) const { + visited.insert(node); + for (int neighbor : get_neighbors(node)) { + if (visited.find(neighbor) == visited.end()) { + dfs(neighbor, visited); + } + } +} + +bool graph::is_connected() const { + if (adjacency_list.empty()) { + return true; // Empty graph is considered connected + } + + std::unordered_set visited; + int start_node = adjacency_list.begin()->first; + dfs(start_node, visited); + + return visited.size() == adjacency_list.size(); +} + +int graph::get_degree(int node) const { + auto it = adjacency_list.find(node); + if (it != adjacency_list.end()) { + return it->second.size(); + } + return 0; // Node not found +} + +double graph::get_edge_weight(int u, int v) const { + auto it_u = adjacency_list.find(u); + if (it_u != adjacency_list.end()) { + auto edge_it = std::find_if(it_u->second.begin(), it_u->second.end(), + [v](const auto &p) { return p.first == v; }); + if (edge_it != it_u->second.end()) { + return edge_it->second; + } + } + return -1.0; // Edge not found +} + +bool graph::update_edge_weight(int u, int v, double weight) { + auto it_u = adjacency_list.find(u); + auto it_v = adjacency_list.find(v); + + bool updated = false; + + if (it_u != adjacency_list.end()) { + auto edge_it = std::find_if(it_u->second.begin(), it_u->second.end(), + [v](const auto &p) { return p.first == v; }); + if (edge_it != it_u->second.end()) { + edge_it->second = weight; + updated = true; + } + } + + if (it_v != adjacency_list.end()) { + auto edge_it = std::find_if(it_v->second.begin(), it_v->second.end(), + [u](const auto &p) { return p.first == u; }); + if (edge_it != it_v->second.end()) { + edge_it->second = weight; + updated = true; + } + } + + return updated; +} + +} // namespace cudaqx \ No newline at end of file diff --git a/libs/core/lib/tear_down.cpp b/libs/core/lib/tear_down.cpp new file mode 100644 index 0000000..a73c5a1 --- /dev/null +++ b/libs/core/lib/tear_down.cpp @@ -0,0 +1,27 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cuda-qx/core/tear_down.h" +#include +#include +#include + +namespace cudaqx { +class tear_down_holder { +public: + std::vector> services; + ~tear_down_holder() { + for (auto &s : services) + s->runTearDown(); + } +}; + +static tear_down_holder holder; +void scheduleTearDown(std::unique_ptr service) { + holder.services.emplace_back(std::move(service)); +} +} // namespace cudaqx diff --git a/libs/core/lib/tensor_impls/xtensor_impl.cpp b/libs/core/lib/tensor_impls/xtensor_impl.cpp new file mode 100644 index 0000000..eb96b4b --- /dev/null +++ b/libs/core/lib/tensor_impls/xtensor_impl.cpp @@ -0,0 +1,315 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cuda-qx/core/tensor_impl.h" +#include "cuda-qx/core/type_traits.h" + +#include +#include +#include + +#include + +namespace cudaqx { + +/// @brief An implementation of tensor_impl using xtensor library +template +class xtensor : public cudaqx::details::tensor_impl { +private: + Scalar *m_data = nullptr; ///< Pointer to the tensor data + std::vector m_shape; ///< Shape of the tensor + bool ownsData = true; ///< Flag indicating if this object owns the data + + /// @brief Check if the given indices are valid for this tensor + /// @param idxs Vector of indices to check + /// @return true if indices are valid, false otherwise + bool validIndices(const std::vector &idxs) const { + if (idxs.size() != m_shape.size()) + return false; + for (std::size_t dim = 0; auto idx : idxs) + if (idx < 0 || idx >= m_shape[dim++]) + return false; + return true; + } + +public: + /// @brief Constructor for xtensor + /// @param d Pointer to the tensor data + /// @param s Shape of the tensor + xtensor(const Scalar *d, const std::vector &s) + : m_data(const_cast(d)), m_shape(s) {} + + /// @brief Get the rank of the tensor + /// @return The rank (number of dimensions) of the tensor + std::size_t rank() const override { return m_shape.size(); } + + /// @brief Get the total size of the tensor + /// @return The total number of elements in the tensor + std::size_t size() const override { + if (rank() == 0) + return 0; + return std::accumulate(m_shape.begin(), m_shape.end(), 1, + std::multiplies()); + } + + /// @brief Get the shape of the tensor + /// @return A vector containing the dimensions of the tensor + std::vector shape() const override { return m_shape; } + + /// @brief Access a mutable element of the tensor + /// @param indices The indices of the element to access + /// @return A reference to the element at the specified indices + /// @throws std::runtime_error if indices are invalid + Scalar &at(const std::vector &indices) override { + if (!validIndices(indices)) + throw std::runtime_error("Invalid tensor indices: " + + fmt::format("{}", fmt::join(indices, ", "))); + + return xt::adapt(m_data, size(), xt::no_ownership(), m_shape)[indices]; + } + + /// @brief Access a const element of the tensor + /// @param indices The indices of the element to access + /// @return A const reference to the element at the specified indices + /// @throws std::runtime_error if indices are invalid + const Scalar &at(const std::vector &indices) const override { + if (!validIndices(indices)) + throw std::runtime_error("Invalid constant tensor indices: " + + fmt::format("{}", fmt::join(indices, ", "))); + return xt::adapt(m_data, size(), xt::no_ownership(), m_shape)[indices]; + } + + /// @brief Copy data into the tensor + /// @param d Pointer to the source data + /// @param shape The shape of the source data + void copy(const Scalar *d, const std::vector &shape) override { + auto size = std::accumulate(shape.begin(), shape.end(), 1, + std::multiplies()); + if (m_data) + std::free(m_data); + + m_data = m_data = new Scalar[size]; + std::copy(d, d + size, m_data); + m_shape = shape; + ownsData = true; + } + + /// @brief Take ownership of the given data + /// @param d Pointer to the source data + /// @param shape The shape of the source data + void take(const Scalar *d, const std::vector &shape) override { + m_data = const_cast(d); + m_shape = shape; + ownsData = true; + } + + /// @brief Borrow the given data without taking ownership + /// @param d Pointer to the source data + /// @param shape The shape of the source data + void borrow(const Scalar *d, const std::vector &shape) override { + m_data = const_cast(d); + m_shape = shape; + ownsData = false; + } + + /// @brief Sum all elements of the tensor + /// @return A scalar sum of all elements of the tensor + Scalar sum_all() const override { + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + return xt::sum(x)[0]; + } + + /// @brief Check if any values are non-zero + /// @return Returns true if any value is truthy, false otherwise + bool any() const override { + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + bool result; + // For non-complex types, use regular bool casting + if constexpr (std::is_integral_v) { + result = xt::any(x); + } + // For complex types, implement custom ny + else { + throw std::runtime_error("any() not supported on non-integral types."); + } + + return result; + } + + void elementwise_add(const details::tensor_impl *other, + details::tensor_impl *result) const override { + auto *other_xt = dynamic_cast *>(other); + auto *result_xt = dynamic_cast *>(result); + + if (!other_xt || !result_xt) { + throw std::runtime_error("Invalid tensor implementation type"); + } + + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto y = xt::adapt(other_xt->data(), other_xt->size(), xt::no_ownership(), + other_xt->shape()); + auto z = x + y; + std::copy(z.begin(), z.end(), result_xt->data()); + } + + void + elementwise_multiply(const details::tensor_impl *other, + details::tensor_impl *result) const override { + auto *other_xt = dynamic_cast *>(other); + auto *result_xt = dynamic_cast *>(result); + + if (!other_xt || !result_xt) { + throw std::runtime_error("Invalid tensor implementation type"); + } + + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto y = xt::adapt(other_xt->data(), other_xt->size(), xt::no_ownership(), + other_xt->shape()); + auto z = x * y; + std::copy(z.begin(), z.end(), result_xt->data()); + } + + void elementwise_modulo(const details::tensor_impl *other, + details::tensor_impl *result) const override { + auto *other_xt = dynamic_cast *>(other); + auto *result_xt = dynamic_cast *>(result); + + if (!other_xt || !result_xt) { + throw std::runtime_error("Invalid tensor implementation type"); + } + + // For non-complex types, use regular modulo + if constexpr (std::is_integral_v) { + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto y = xt::adapt(other_xt->data(), other_xt->size(), xt::no_ownership(), + other_xt->shape()); + auto z = x % y; + std::copy(z.begin(), z.end(), result_xt->data()); + } + // For complex types, implement custom modulo + else { + throw std::runtime_error("modulo not supported on non-integral types."); + } + } + + void scalar_modulo(Scalar value, + details::tensor_impl *result) const override { + auto *result_xt = dynamic_cast *>(result); + + // For non-complex types, use regular modulo + if constexpr (std::is_integral_v) { + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto z = x % value; + std::copy(z.begin(), z.end(), result_xt->data()); + } + // For complex types, implement custom modulo + else { + throw std::runtime_error("modulo not supported on non-integral types."); + } + } + + void matrix_dot(const details::tensor_impl *other, + details::tensor_impl *result) const override { + auto *other_xt = dynamic_cast *>(other); + auto *result_xt = dynamic_cast *>(result); + + if (!other_xt || !result_xt) { + throw std::runtime_error("Invalid tensor implementation type"); + } + + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto y = xt::adapt(other_xt->data(), other_xt->size(), xt::no_ownership(), + other_xt->shape()); + auto z = xt::linalg::dot(x, y); + std::copy(z.begin(), z.end(), result_xt->data()); + } + + void + matrix_vector_product(const details::tensor_impl *vec, + details::tensor_impl *result) const override { + auto *vec_xt = dynamic_cast *>(vec); + auto *result_xt = dynamic_cast *>(result); + + if (!vec_xt || !result_xt) { + throw std::runtime_error("Invalid tensor implementation type"); + } + + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto v = xt::adapt(vec_xt->data(), vec_xt->size(), xt::no_ownership(), + vec_xt->shape()); + auto z = xt::linalg::dot(x, v); + std::copy(z.begin(), z.end(), result_xt->data()); + } + + void matrix_transpose(details::tensor_impl *result) const override { + auto *result_xt = dynamic_cast *>(result); + + auto x = xt::adapt(m_data, size(), xt::no_ownership(), m_shape); + auto z = xt::transpose(x, {1, 0}); + std::copy(z.begin(), z.end(), result_xt->data()); + } + + Scalar *data() override { return m_data; } + const Scalar *data() const override { return m_data; } + void dump() const override { + std::cerr << xt::adapt(m_data, size(), xt::no_ownership(), m_shape) << '\n'; + } + + static constexpr auto ScalarAsString = cudaqx::type_to_string(); + + /// @brief Custom creator function for xtensor + /// @param d Pointer to the tensor data + /// @param s Shape of the tensor + /// @return A unique pointer to the created xtensor object + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION_WITH_NAME( + xtensor, std::string("xtensor") + std::string(ScalarAsString), + static std::unique_ptr> create( + const Scalar *d, const std::vector s) { + return std::make_unique>(d, s); + }) + + /// @brief Destructor for xtensor + ~xtensor() { + if (ownsData) + delete m_data; + } +}; + +/// @brief Register the xtensor types + +#define INSTANTIATE_REGISTRY_TENSOR_IMPL(TYPE) \ + INSTANTIATE_REGISTRY(cudaqx::details::tensor_impl, const TYPE *, \ + const std::vector) + +INSTANTIATE_REGISTRY_TENSOR_IMPL(std::complex) +INSTANTIATE_REGISTRY_TENSOR_IMPL(std::complex) +INSTANTIATE_REGISTRY_TENSOR_IMPL(int) +INSTANTIATE_REGISTRY_TENSOR_IMPL(uint8_t) +INSTANTIATE_REGISTRY_TENSOR_IMPL(double) +INSTANTIATE_REGISTRY_TENSOR_IMPL(float) +INSTANTIATE_REGISTRY_TENSOR_IMPL(std::size_t) + +template <> +const bool xtensor>::registered_ = + xtensor>::register_type(); +template <> +const bool xtensor>::registered_ = + xtensor>::register_type(); +template <> +const bool xtensor::registered_ = xtensor::register_type(); +template <> +const bool xtensor::registered_ = xtensor::register_type(); +template <> +const bool xtensor::registered_ = xtensor::register_type(); +template <> +const bool xtensor::registered_ = xtensor::register_type(); +template <> +const bool xtensor::registered_ = + xtensor::register_type(); + +} // namespace cudaqx diff --git a/libs/core/unittests/CMakeLists.txt b/libs/core/unittests/CMakeLists.txt new file mode 100644 index 0000000..b930fe7 --- /dev/null +++ b/libs/core/unittests/CMakeLists.txt @@ -0,0 +1,37 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# External Dependencies +# ============================================================================== + +FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v1.15.2 + EXCLUDE_FROM_ALL +) +FetchContent_MakeAvailable(googletest) + +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +# Bug in GCC 12 leads to spurious warnings (-Wrestrict) +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105329 +if (CMAKE_COMPILER_IS_GNUCXX + AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0 + AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13.0.0) + target_compile_options(gtest PUBLIC --param=evrp-mode=legacy) +endif() +include(GoogleTest) + +# ============================================================================== + +add_executable(test_core test_core.cpp) +target_link_libraries(test_core PRIVATE GTest::gtest_main cudaqx-core) +add_dependencies(CUDAQXCoreUnitTests test_core) +gtest_discover_tests(test_core) + diff --git a/libs/core/unittests/test_core.cpp b/libs/core/unittests/test_core.cpp new file mode 100644 index 0000000..639154e --- /dev/null +++ b/libs/core/unittests/test_core.cpp @@ -0,0 +1,1121 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/graph.h" +#include "cuda-qx/core/heterogeneous_map.h" +#include "cuda-qx/core/tensor.h" + +#include +#include + +#include + +namespace cudaqx::testing { + +// Define a new extension point for the framework +class MyExtensionPoint : public extension_point { +public: + virtual std::string parrotBack(const std::string &msg) const = 0; + virtual ~MyExtensionPoint() = default; +}; + +} // namespace cudaqx::testing + +INSTANTIATE_REGISTRY_NO_ARGS(cudaqx::testing::MyExtensionPoint) + +namespace cudaqx::testing { + +// Define a concrete realization of that extension point +class RepeatBackOne : public MyExtensionPoint { +public: + std::string parrotBack(const std::string &msg) const override { + return msg + " from RepeatBackOne."; + } + + // Extension must provide a creator function + CUDAQ_EXTENSION_CREATOR_FUNCTION(MyExtensionPoint, RepeatBackOne) +}; + +// Extensions must register themselves +CUDAQ_REGISTER_TYPE(RepeatBackOne) + +class RepeatBackTwo : public MyExtensionPoint { +public: + std::string parrotBack(const std::string &msg) const override { + return msg + " from RepeatBackTwo."; + } + CUDAQ_EXTENSION_CREATOR_FUNCTION(MyExtensionPoint, RepeatBackTwo) +}; +CUDAQ_REGISTER_TYPE(RepeatBackTwo) + +} // namespace cudaqx::testing + +TEST(CoreTester, checkSimpleExtensionPoint) { + + auto registeredNames = cudaqx::testing::MyExtensionPoint::get_registered(); + EXPECT_EQ(registeredNames.size(), 2); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RepeatBackTwo") != registeredNames.end()); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RepeatBackOne") != registeredNames.end()); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RepeatBackThree") == registeredNames.end()); + + { + auto var = cudaqx::testing::MyExtensionPoint::get("RepeatBackOne"); + EXPECT_EQ(var->parrotBack("Hello World"), + "Hello World from RepeatBackOne."); + } + { + auto var = cudaqx::testing::MyExtensionPoint::get("RepeatBackTwo"); + EXPECT_EQ(var->parrotBack("Hello World"), + "Hello World from RepeatBackTwo."); + } +} + +namespace cudaqx::testing { + +class MyExtensionPointWithArgs + : public extension_point { +protected: + int i; + double d; + +public: + MyExtensionPointWithArgs(int i, double d) : i(i), d(d) {} + virtual std::tuple parrotBack() const = 0; + virtual ~MyExtensionPointWithArgs() = default; +}; + +} // namespace cudaqx::testing + +INSTANTIATE_REGISTRY(cudaqx::testing::MyExtensionPointWithArgs, int, double) + +namespace cudaqx::testing { + +class RepeatBackOneWithArgs : public MyExtensionPointWithArgs { +public: + using MyExtensionPointWithArgs::MyExtensionPointWithArgs; + std::tuple parrotBack() const override { + return std::make_tuple(i, d, "RepeatBackOne"); + } + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + RepeatBackOneWithArgs, + static std::unique_ptr create(int i, double d) { + return std::make_unique(i, d); + }) +}; + +CUDAQ_REGISTER_TYPE(RepeatBackOneWithArgs) + +class RepeatBackTwoWithArgs : public MyExtensionPointWithArgs { +public: + using MyExtensionPointWithArgs::MyExtensionPointWithArgs; + std::tuple parrotBack() const override { + return std::make_tuple(i, d, "RepeatBackTwo"); + } + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + RepeatBackTwoWithArgs, + static std::unique_ptr create(int i, double d) { + return std::make_unique(i, d); + }) +}; + +CUDAQ_REGISTER_TYPE(RepeatBackTwoWithArgs) + +} // namespace cudaqx::testing + +TEST(CoreTester, checkSimpleExtensionPointWithArgs) { + + auto registeredNames = + cudaqx::testing::MyExtensionPointWithArgs::get_registered(); + EXPECT_EQ(registeredNames.size(), 2); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RepeatBackTwoWithArgs") != registeredNames.end()); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RepeatBackOneWithArgs") != registeredNames.end()); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RepeatBackThree") == registeredNames.end()); + + { + auto var = cudaqx::testing::MyExtensionPointWithArgs::get( + "RepeatBackOneWithArgs", 5, 2.2); + auto [i, d, msg] = var->parrotBack(); + EXPECT_EQ(msg, "RepeatBackOne"); + EXPECT_EQ(i, 5); + EXPECT_NEAR(d, 2.2, 1e-2); + } + { + auto var = cudaqx::testing::MyExtensionPointWithArgs::get( + "RepeatBackTwoWithArgs", 15, 12.2); + auto [i, d, msg] = var->parrotBack(); + EXPECT_EQ(msg, "RepeatBackTwo"); + EXPECT_EQ(i, 15); + EXPECT_NEAR(d, 12.2, 1e-2); + } +} + +TEST(CoreTester, checkTensorSimple) { + auto registeredNames = cudaqx::details::tensor_impl<>::get_registered(); + EXPECT_EQ(registeredNames.size(), 1); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "xtensorcomplex") != registeredNames.end()); + + { + cudaqx::tensor t({1, 2, 1}); + EXPECT_EQ(t.rank(), 3); + EXPECT_EQ(t.size(), 2); + for (std::size_t i = 0; i < 1; i++) + for (std::size_t j = 0; j < 2; j++) + for (std::size_t k = 0; k < 1; k++) + EXPECT_NEAR(t.at({i, j, k}).real(), 0.0, 1e-8); + + t.at({0, 1, 0}) = 2.2; + EXPECT_NEAR(t.at({0, 1, 0}).real(), 2.2, 1e-8); + + EXPECT_ANY_THROW({ t.at({2, 2, 2}); }); + } + + { + cudaqx::tensor t({2, 2}); + EXPECT_EQ(t.rank(), 2); + EXPECT_EQ(t.size(), 4); + std::vector> data{1, 2, 3, 4}; + t.copy(data.data(), {2, 2}); + EXPECT_NEAR(t.at({0, 0}).real(), 1., 1e-8); + EXPECT_NEAR(t.at({0, 1}).real(), 2., 1e-8); + EXPECT_NEAR(t.at({1, 0}).real(), 3., 1e-8); + EXPECT_NEAR(t.at({1, 1}).real(), 4., 1e-8); + } + { + cudaqx::tensor t({2, 2}); + EXPECT_EQ(t.rank(), 2); + EXPECT_EQ(t.size(), 4); + std::vector> data{1, 2, 3, 4}; + t.copy(data.data()); + EXPECT_NEAR(t.at({0, 0}).real(), 1., 1e-8); + EXPECT_NEAR(t.at({0, 1}).real(), 2., 1e-8); + EXPECT_NEAR(t.at({1, 0}).real(), 3., 1e-8); + EXPECT_NEAR(t.at({1, 1}).real(), 4., 1e-8); + } + { + cudaqx::tensor t({2, 2}); + EXPECT_EQ(t.rank(), 2); + EXPECT_EQ(t.size(), 4); + std::vector> data{1, 2, 3, 4}; + t.borrow(data.data(), {2, 2}); + EXPECT_NEAR(t.at({0, 0}).real(), 1., 1e-8); + EXPECT_NEAR(t.at({0, 1}).real(), 2., 1e-8); + EXPECT_NEAR(t.at({1, 0}).real(), 3., 1e-8); + EXPECT_NEAR(t.at({1, 1}).real(), 4., 1e-8); + } + + { + cudaqx::tensor t({2, 2}); + EXPECT_EQ(t.rank(), 2); + EXPECT_EQ(t.size(), 4); + std::vector> data{1, 2, 3, 4}; + t.borrow(data.data()); + EXPECT_NEAR(t.at({0, 0}).real(), 1., 1e-8); + EXPECT_NEAR(t.at({0, 1}).real(), 2., 1e-8); + EXPECT_NEAR(t.at({1, 0}).real(), 3., 1e-8); + EXPECT_NEAR(t.at({1, 1}).real(), 4., 1e-8); + } + { + cudaqx::tensor t; + std::vector> data{1, 2, 3, 4}; + EXPECT_THROW({ t.borrow(data.data()); }, std::runtime_error); + } + { + cudaqx::tensor t; + std::vector> data{1, 2, 3, 4}; + EXPECT_THROW({ t.copy(data.data()); }, std::runtime_error); + } + { + cudaqx::tensor t; + std::vector> data{1, 2, 3, 4}; + EXPECT_THROW({ t.take(data.data()); }, std::runtime_error); + } + { + cudaqx::tensor t({2, 2}); + EXPECT_EQ(t.rank(), 2); + EXPECT_EQ(t.size(), 4); + std::complex *data = new std::complex[4]; + double count = 1.0; + std::generate_n(data, 4, [&]() { return count++; }); + t.take(data, {2, 2}); + EXPECT_NEAR(t.at({0, 0}).real(), 1., 1e-8); + EXPECT_NEAR(t.at({0, 1}).real(), 2., 1e-8); + EXPECT_NEAR(t.at({1, 0}).real(), 3., 1e-8); + EXPECT_NEAR(t.at({1, 1}).real(), 4., 1e-8); + } + + { + cudaqx::tensor t({1, 2, 1}); + EXPECT_EQ(t.rank(), 3); + EXPECT_EQ(t.size(), 2); + for (std::size_t i = 0; i < 1; i++) + for (std::size_t j = 0; j < 2; j++) + for (std::size_t k = 0; k < 1; k++) + EXPECT_NEAR(t.at({i, j, k}), 0.0, 1e-8); + + t.at({0, 1, 0}) = 2; + EXPECT_EQ(t.at({0, 1, 0}), 2); + + EXPECT_ANY_THROW({ t.at({2, 2, 2}); }); + } +} + +// Test elementwise operations +TEST(TensorTest, ElementwiseAddition) { + cudaqx::tensor a({2, 2}); + cudaqx::tensor b({2, 2}); + + // Initialize test data + double data_a[] = {1.0, 2.0, 3.0, 4.0}; + double data_b[] = {5.0, 6.0, 7.0, 8.0}; + a.copy(data_a); + b.copy(data_b); + + auto result = a + b; + + // Check result dimensions + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 2); + EXPECT_EQ(result.shape()[1], 2); + + // Check elementwise addition results + EXPECT_DOUBLE_EQ(result.at({0, 0}), 6.0); // 1 + 5 + EXPECT_DOUBLE_EQ(result.at({0, 1}), 8.0); // 2 + 6 + EXPECT_DOUBLE_EQ(result.at({1, 0}), 10.0); // 3 + 7 + EXPECT_DOUBLE_EQ(result.at({1, 1}), 12.0); // 4 + 8 +} + +TEST(TensorTest, ElementwiseMultiplication) { + cudaqx::tensor a({2, 2}); + cudaqx::tensor b({2, 2}); + + double data_a[] = {1.0, 2.0, 3.0, 4.0}; + double data_b[] = {5.0, 6.0, 7.0, 8.0}; + a.copy(data_a); + b.copy(data_b); + + auto result = a * b; + + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 2); + EXPECT_EQ(result.shape()[1], 2); + + EXPECT_DOUBLE_EQ(result.at({0, 0}), 5.0); // 1 * 5 + EXPECT_DOUBLE_EQ(result.at({0, 1}), 12.0); // 2 * 6 + EXPECT_DOUBLE_EQ(result.at({1, 0}), 21.0); // 3 * 7 + EXPECT_DOUBLE_EQ(result.at({1, 1}), 32.0); // 4 * 8 +} + +TEST(TensorTest, ElementwiseModulo) { + cudaqx::tensor a({2, 2}); + cudaqx::tensor b({2, 2}); + + int data_a[] = {7, 8, 9, 10}; + int data_b[] = {4, 3, 5, 2}; + a.copy(data_a); + b.copy(data_b); + + auto result = a % b; + + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 2); + EXPECT_EQ(result.shape()[1], 2); + + EXPECT_EQ(result.at({0, 0}), 3); // 7 % 4 + EXPECT_EQ(result.at({0, 1}), 2); // 8 % 3 + EXPECT_EQ(result.at({1, 0}), 4); // 9 % 5 + EXPECT_EQ(result.at({1, 1}), 0); // 10 % 2 +} + +TEST(TensorTest, Any) { + { + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {7, 8, 9, 10}; + a.copy(data_a); + + uint8_t result = a.any(); + + EXPECT_TRUE(result); + } + { + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {0, 0, 1, 0}; + a.copy(data_a); + + uint8_t result = a.any(); + + EXPECT_TRUE(result); + } + { + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {0, 0, 0, 0}; + a.copy(data_a); + + uint8_t result = a.any(); + + EXPECT_FALSE(result); + } +} + +TEST(TensorTest, SumAll) { + { + // test int + cudaqx::tensor a({2, 2}); + + int data_a[] = {7, 8, 9, 10}; + a.copy(data_a); + + int result = a.sum_all(); + + EXPECT_EQ(result, 34); + } + { + // test uint8_t + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {7, 8, 9, 10}; + a.copy(data_a); + + uint8_t result = a.sum_all(); + + EXPECT_EQ(result, 34); + } + { + // test uint8_t overflow + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {70, 80, 90, 100}; + a.copy(data_a); + + uint8_t result = a.sum_all(); + + EXPECT_NE(result, 340); + EXPECT_EQ(result, (uint8_t)340); + } + { + // test float + cudaqx::tensor a({2, 2}); + + float data_a[] = {7.1, 8.2, 9.1, 10.3}; + a.copy(data_a); + + float result = a.sum_all(); + + float tolerance = 1.e-5; + EXPECT_FLOAT_EQ(result, 34.7); + } +} + +TEST(TensorTest, ScalarModulo) { + { + // test int + cudaqx::tensor a({2, 2}); + + int data_a[] = {7, 8, 9, 10}; + a.copy(data_a); + + auto result = a % 2; + + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 2); + EXPECT_EQ(result.shape()[1], 2); + + EXPECT_EQ(result.at({0, 0}), 1); // 7 % 2 + EXPECT_EQ(result.at({0, 1}), 0); // 8 % 2 + EXPECT_EQ(result.at({1, 0}), 1); // 9 % 2 + EXPECT_EQ(result.at({1, 1}), 0); // 10 % 2 + } + { + // test uint8_t + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {7, 8, 9, 10}; + a.copy(data_a); + + auto result = a % 2; + + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 2); + EXPECT_EQ(result.shape()[1], 2); + + EXPECT_EQ(result.at({0, 0}), 1); // 7 % 2 + EXPECT_EQ(result.at({0, 1}), 0); // 8 % 2 + EXPECT_EQ(result.at({1, 0}), 1); // 9 % 2 + EXPECT_EQ(result.at({1, 1}), 0); // 10 % 2 + } + { + // test result tensor is input tensor + cudaqx::tensor a({2, 2}); + + uint8_t data_a[] = {7, 8, 9, 10}; + a.copy(data_a); + + a = a % 2; + + EXPECT_EQ(a.rank(), 2); + EXPECT_EQ(a.shape()[0], 2); + EXPECT_EQ(a.shape()[1], 2); + + EXPECT_EQ(a.at({0, 0}), 1); // 7 % 2 + EXPECT_EQ(a.at({0, 1}), 0); // 8 % 2 + EXPECT_EQ(a.at({1, 0}), 1); // 9 % 2 + EXPECT_EQ(a.at({1, 1}), 0); // 10 % 2 + } +} + +TEST(TensorTest, MatrixDotProduct) { + cudaqx::tensor a({2, 3}); + cudaqx::tensor b({3, 2}); + + double data_a[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + double data_b[] = {7.0, 8.0, 9.0, 10.0, 11.0, 12.0}; + a.copy(data_a); + b.copy(data_b); + + auto result = a.dot(b); + + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 2); + EXPECT_EQ(result.shape()[1], 2); + + // Matrix multiplication results + EXPECT_DOUBLE_EQ(result.at({0, 0}), 58.0); // 1*7 + 2*9 + 3*11 + EXPECT_DOUBLE_EQ(result.at({0, 1}), 64.0); // 1*8 + 2*10 + 3*12 + EXPECT_DOUBLE_EQ(result.at({1, 0}), 139.0); // 4*7 + 5*9 + 6*11 + EXPECT_DOUBLE_EQ(result.at({1, 1}), 154.0); // 4*8 + 5*10 + 6*12 +} + +TEST(TensorTest, MatrixVectorProduct) { + cudaqx::tensor a({2, 3}); + cudaqx::tensor v({3}); + + double data_a[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + double data_v[] = {7.0, 8.0, 9.0}; + a.copy(data_a); + v.copy(data_v); + + auto result = a.dot(v); + + EXPECT_EQ(result.rank(), 1); + EXPECT_EQ(result.shape()[0], 2); + + EXPECT_DOUBLE_EQ(result.at({0}), 50.0); // 1*7 + 2*8 + 3*9 + EXPECT_DOUBLE_EQ(result.at({1}), 122.0); // 4*7 + 5*8 + 6*9 +} + +TEST(TensorTest, MatrixTranspose) { + cudaqx::tensor a({2, 3}); + + double data_a[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + a.copy(data_a); + + auto result = a.transpose(); + + EXPECT_EQ(a.rank(), 2); + EXPECT_EQ(a.shape()[0], 2); + EXPECT_EQ(a.shape()[1], 3); + + EXPECT_EQ(result.rank(), 2); + EXPECT_EQ(result.shape()[0], 3); + EXPECT_EQ(result.shape()[1], 2); + + EXPECT_DOUBLE_EQ(a.at({0, 0}), 1.0); + EXPECT_DOUBLE_EQ(a.at({0, 1}), 2.0); + EXPECT_DOUBLE_EQ(a.at({0, 2}), 3.0); + EXPECT_DOUBLE_EQ(a.at({1, 0}), 4.0); + EXPECT_DOUBLE_EQ(a.at({1, 1}), 5.0); + EXPECT_DOUBLE_EQ(a.at({1, 2}), 6.0); + + EXPECT_DOUBLE_EQ(result.at({0, 0}), 1.0); + EXPECT_DOUBLE_EQ(result.at({0, 1}), 4.0); + EXPECT_DOUBLE_EQ(result.at({1, 0}), 2.0); + EXPECT_DOUBLE_EQ(result.at({1, 1}), 5.0); + EXPECT_DOUBLE_EQ(result.at({2, 0}), 3.0); + EXPECT_DOUBLE_EQ(result.at({2, 1}), 6.0); +} + +// Test error conditions +TEST(TensorTest, MismatchedShapeAddition) { + cudaqx::tensor a({2, 2}); + cudaqx::tensor b({2, 3}); + + EXPECT_THROW(a + b, std::runtime_error); +} + +TEST(TensorTest, InvalidDotProductDimensions) { + cudaqx::tensor a({2, 3}); + cudaqx::tensor b({2, 2}); + + EXPECT_THROW(a.dot(b), std::runtime_error); +} + +TEST(TensorTest, InvalidMatrixVectorDimensions) { + cudaqx::tensor a({2, 3}); + cudaqx::tensor v({2}); + + EXPECT_THROW(a.dot(v), std::runtime_error); +} + +TEST(TensorTest, ConstructorWithShape) { + std::vector shape = {2, 3, 4}; + cudaqx::tensor t(shape); + + EXPECT_EQ(t.rank(), 3); + EXPECT_EQ(t.size(), 24); + EXPECT_EQ(t.shape(), shape); +} + +TEST(TensorTest, ConstructorWithDataAndShape) { + std::vector shape = {2, 2}; + std::complex *data = new std::complex[4]; + data[0] = {1.0, 0.0}; + data[1] = {0.0, 1.0}; + data[2] = {0.0, -1.0}; + data[3] = {1.0, 0.0}; + + cudaqx::tensor t(data, shape); + + EXPECT_EQ(t.rank(), 2); + EXPECT_EQ(t.size(), 4); + EXPECT_EQ(t.shape(), shape); + + // Check if data is correctly stored + EXPECT_EQ(t.at({0, 0}), std::complex(1.0, 0.0)); + EXPECT_EQ(t.at({0, 1}), std::complex(0.0, 1.0)); + EXPECT_EQ(t.at({1, 0}), std::complex(0.0, -1.0)); + EXPECT_EQ(t.at({1, 1}), std::complex(1.0, 0.0)); +} + +TEST(TensorTest, AccessElements) { + std::vector shape = {2, 3}; + cudaqx::tensor t(shape); + + // Set values + t.at({0, 0}) = {1.0, 0.0}; + t.at({0, 1}) = {0.0, 1.0}; + t.at({1, 2}) = {-1.0, 0.0}; + + // Check values + EXPECT_EQ(t.at({0, 0}), std::complex(1.0, 0.0)); + EXPECT_EQ(t.at({0, 1}), std::complex(0.0, 1.0)); + EXPECT_EQ(t.at({1, 2}), std::complex(-1.0, 0.0)); +} + +TEST(TensorTest, CopyData) { + std::vector shape = {2, 2}; + std::vector> data = { + {1.0, 0.0}, {0.0, 1.0}, {0.0, -1.0}, {1.0, 0.0}}; + cudaqx::tensor t(shape); + + t.copy(data.data(), shape); + + EXPECT_EQ(t.at({0, 0}), std::complex(1.0, 0.0)); + EXPECT_EQ(t.at({0, 1}), std::complex(0.0, 1.0)); + EXPECT_EQ(t.at({1, 0}), std::complex(0.0, -1.0)); + EXPECT_EQ(t.at({1, 1}), std::complex(1.0, 0.0)); +} + +TEST(TensorTest, TakeData) { + std::vector shape = {2, 2}; + auto data = new std::complex[4] { + {1.0, 0.0}, {0.0, 1.0}, {0.0, -1.0}, { 1.0, 0.0 } + }; + cudaqx::tensor t(shape); + + t.take(data, shape); + + EXPECT_EQ(t.at({0, 0}), std::complex(1.0, 0.0)); + EXPECT_EQ(t.at({0, 1}), std::complex(0.0, 1.0)); + EXPECT_EQ(t.at({1, 0}), std::complex(0.0, -1.0)); + EXPECT_EQ(t.at({1, 1}), std::complex(1.0, 0.0)); + + // Note: We don't delete data here as the tensor now owns it +} + +TEST(TensorTest, BorrowData) { + std::vector shape = {2, 2}; + std::vector> data = { + {1.0, 0.0}, {0.0, 1.0}, {0.0, -1.0}, {1.0, 0.0}}; + cudaqx::tensor t(shape); + + t.borrow(data.data(), shape); + + EXPECT_EQ(t.at({0, 0}), std::complex(1.0, 0.0)); + EXPECT_EQ(t.at({0, 1}), std::complex(0.0, 1.0)); + EXPECT_EQ(t.at({1, 0}), std::complex(0.0, -1.0)); + EXPECT_EQ(t.at({1, 1}), std::complex(1.0, 0.0)); +} + +TEST(TensorTest, InvalidAccess) { + std::vector shape = {2, 2}; + cudaqx::tensor t(shape); + + EXPECT_THROW(t.at({2, 0}), std::runtime_error); + EXPECT_THROW(t.at({0, 2}), std::runtime_error); + EXPECT_THROW(t.at({0, 0, 0}), std::runtime_error); +} + +TEST(TensorTest, checkNullaryConstructor) { + std::vector shape = {2, 2}; + std::vector> data = { + {1.0, 0.0}, {0.0, 1.0}, {0.0, -1.0}, {1.0, 0.0}}; + cudaqx::tensor t; + + t.copy(data.data(), shape); +} + +TEST(HeterogeneousMapTest, checkSimple) { + + { + cudaqx::heterogeneous_map m; + m.insert("hello", 2.2); + m.insert("another", 1); + m.insert("string", "string"); + EXPECT_EQ(3, m.size()); + EXPECT_NEAR(2.2, m.get("hello"), 1e-3); + EXPECT_EQ(1, m.get("another")); + // If the value is int-like, can get it as other int-like types + EXPECT_EQ(1, m.get("another")); + // same for float/double + EXPECT_NEAR(2.2, m.get("hello"), 1e-3); + EXPECT_EQ("string", m.get("string")); + EXPECT_EQ("defaulted", m.get("key22", "defaulted")); + } + + { + cudaqx::heterogeneous_map m({{"hello", 2.2}, {"string", "stringVal"}}); + EXPECT_EQ(2, m.size()); + EXPECT_NEAR(2.2, m.get("hello"), 1e-3); + EXPECT_EQ("stringVal", m.get("string")); + } +} + +TEST(HeterogeneousMapTest, InsertAndRetrieve) { + cudaqx::heterogeneous_map map; + map.insert("int_key", 42); + map.insert("string_key", std::string("hello")); + map.insert("double_key", 3.14); + + EXPECT_EQ(map.get("int_key"), 42); + EXPECT_EQ(map.get("string_key"), "hello"); + EXPECT_DOUBLE_EQ(map.get("double_key"), 3.14); +} + +TEST(HeterogeneousMapTest, InsertOverwrite) { + cudaqx::heterogeneous_map map; + map.insert("key", 10); + EXPECT_EQ(map.get("key"), 10); + + map.insert("key", 20); + EXPECT_EQ(map.get("key"), 20); +} + +TEST(HeterogeneousMapTest, GetWithDefault) { + cudaqx::heterogeneous_map map; + EXPECT_EQ(map.get("nonexistent_key", 100), 100); + EXPECT_EQ(map.get("nonexistent_key", std::string("default")), "default"); +} + +TEST(HeterogeneousMapTest, Contains) { + cudaqx::heterogeneous_map map; + map.insert("existing_key", 42); + + EXPECT_TRUE(map.contains("existing_key")); + EXPECT_FALSE(map.contains("nonexistent_key")); +} + +TEST(HeterogeneousMapTest, Size) { + cudaqx::heterogeneous_map map; + EXPECT_EQ(map.size(), 0); + + map.insert("key1", 10); + map.insert("key2", "value"); + + EXPECT_EQ(map.size(), 2); +} + +TEST(HeterogeneousMapTest, Clear) { + cudaqx::heterogeneous_map map; + map.insert("key1", 10); + map.insert("key2", "value"); + + EXPECT_EQ(map.size(), 2); + + map.clear(); + + EXPECT_EQ(map.size(), 0); + EXPECT_FALSE(map.contains("key1")); + EXPECT_FALSE(map.contains("key2")); +} + +TEST(HeterogeneousMapTest, RelatedTypes) { + cudaqx::heterogeneous_map map; + map.insert("int_key", 42); + + EXPECT_EQ(map.get("int_key"), 42); + EXPECT_EQ(map.get("int_key"), 42); + EXPECT_EQ(map.get("int_key"), 42); +} + +TEST(HeterogeneousMapTest, CharArrayConversion) { + cudaqx::heterogeneous_map map; + const char *cstr = "Hello"; + map.insert("char_array_key", cstr); + + EXPECT_EQ(map.get("char_array_key"), "Hello"); +} + +TEST(HeterogeneousMapTest, ExceptionHandling) { + cudaqx::heterogeneous_map map; + map.insert("int_key", 42); + + EXPECT_THROW(map.get("int_key"), std::runtime_error); + EXPECT_THROW(map.get("nonexistent_key"), std::runtime_error); +} + +TEST(HeterogeneousMapTest, CopyConstructor) { + cudaqx::heterogeneous_map map; + map.insert("key1", 10); + map.insert("key2", "value"); + + cudaqx::heterogeneous_map copy_map(map); + + EXPECT_EQ(copy_map.size(), 2); + EXPECT_EQ(copy_map.get("key1"), 10); + EXPECT_EQ(copy_map.get("key2"), "value"); +} + +TEST(HeterogeneousMapTest, AssignmentOperator) { + cudaqx::heterogeneous_map map; + map.insert("key1", 10); + map.insert("key2", "value"); + + cudaqx::heterogeneous_map assigned_map; + assigned_map = map; + + EXPECT_EQ(assigned_map.size(), 2); + EXPECT_EQ(assigned_map.get("key1"), 10); + EXPECT_EQ(assigned_map.get("key2"), "value"); +} + +TEST(HeterogeneousMapTest, InitializerListConstructor) { + cudaqx::heterogeneous_map map{{"int_key", 42}, + {"string_key", std::string("hello")}, + {"double_key", 3.14}}; + + EXPECT_EQ(map.size(), 3); + EXPECT_EQ(map.get("int_key"), 42); + EXPECT_EQ(map.get("string_key"), "hello"); + EXPECT_DOUBLE_EQ(map.get("double_key"), 3.14); +} + +TEST(GraphTester, AddEdge) { + cudaqx::graph g; + g.add_edge(1, 2, 1.5); + EXPECT_EQ(g.get_neighbors(1), std::vector{2}); + EXPECT_EQ(g.get_neighbors(2), std::vector{1}); + std::vector> tmp{{2, 1.5}}, tmp2{{1, 1.5}}; + EXPECT_EQ(g.get_weighted_neighbors(1), tmp); + EXPECT_EQ(g.get_weighted_neighbors(2), tmp2); +} + +TEST(GraphTester, AddEdgeDefaultWeight) { + cudaqx::graph g; + g.add_edge(1, 2); // Default weight should be 1.0 + EXPECT_EQ(g.get_neighbors(1), std::vector{2}); + EXPECT_EQ(g.get_neighbors(2), std::vector{1}); + std::vector> tmp{{2, 1.0}}; + EXPECT_EQ(g.get_weighted_neighbors(1), tmp); +} + +TEST(GraphTester, AddNode) { + cudaqx::graph g; + g.add_node(1); + EXPECT_EQ(g.get_nodes(), std::vector{1}); +} + +TEST(GraphTester, GetNeighbors) { + cudaqx::graph g; + g.add_edge(1, 2, 0.5); + g.add_edge(1, 3, 1.5); + g.add_edge(2, 3, 2.0); + std::vector tmp{2, 3}, tmp2{1, 2}, tmp3{1, 3}; + + EXPECT_EQ(g.get_neighbors(1), tmp); + EXPECT_EQ(g.get_neighbors(2), tmp3); + EXPECT_EQ(g.get_neighbors(3), tmp2); +} + +TEST(GraphTester, GetWeightedNeighbors) { + cudaqx::graph g; + g.add_edge(1, 2, 0.5); + g.add_edge(1, 3, 1.5); + g.add_edge(2, 3, 2.0); + + std::vector> expected1 = {{2, 0.5}, {3, 1.5}}; + std::vector> expected2 = {{1, 0.5}, {3, 2.0}}; + std::vector> expected3 = {{1, 1.5}, {2, 2.0}}; + + EXPECT_EQ(g.get_weighted_neighbors(1), expected1); + EXPECT_EQ(g.get_weighted_neighbors(2), expected2); + EXPECT_EQ(g.get_weighted_neighbors(3), expected3); +} + +TEST(GraphTester, GetNodes) { + cudaqx::graph g; + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_node(4); + std::vector expected_nodes = {1, 2, 3, 4}; + std::vector actual_nodes = g.get_nodes(); + std::sort(actual_nodes.begin(), actual_nodes.end()); + EXPECT_EQ(actual_nodes, expected_nodes); +} + +TEST(GraphTester, GetEdgeWeight) { + cudaqx::graph g; + g.add_edge(1, 2, 1.5); + g.add_edge(2, 3, 2.5); + + EXPECT_DOUBLE_EQ(g.get_edge_weight(1, 2), 1.5); + EXPECT_DOUBLE_EQ(g.get_edge_weight(2, 1), 1.5); // Test symmetry + EXPECT_DOUBLE_EQ(g.get_edge_weight(2, 3), 2.5); + EXPECT_DOUBLE_EQ(g.get_edge_weight(1, 3), -1.0); // Non-existent edge +} + +TEST(GraphTester, UpdateEdgeWeight) { + cudaqx::graph g; + g.add_edge(1, 2, 1.5); + + EXPECT_TRUE(g.update_edge_weight(1, 2, 3.0)); + EXPECT_DOUBLE_EQ(g.get_edge_weight(1, 2), 3.0); + EXPECT_DOUBLE_EQ(g.get_edge_weight(2, 1), 3.0); // Test symmetry + + EXPECT_FALSE(g.update_edge_weight(1, 3, 2.0)); // Non-existent edge +} + +TEST(GraphTest, RemoveEdge) { + cudaqx::graph g; + g.add_edge(1, 2, 1.0); + g.add_edge(1, 3, 2.0); + g.add_edge(2, 3, 3.0); + + g.remove_edge(1, 2); + + EXPECT_EQ(g.get_neighbors(1), std::vector{3}); + EXPECT_EQ(g.get_neighbors(2), std::vector{3}); + std::vector tmp{1, 2}; + EXPECT_EQ(g.get_neighbors(3), tmp); + EXPECT_EQ(g.num_edges(), 2); +} + +TEST(GraphTest, RemoveNode) { + cudaqx::graph g; + g.add_edge(1, 2, 1.0); + g.add_edge(1, 3, 2.0); + g.add_edge(2, 3, 3.0); + g.add_edge(3, 4, 1.5); + + g.remove_node(3); + + EXPECT_EQ(g.get_neighbors(1), std::vector{2}); + EXPECT_EQ(g.get_neighbors(2), std::vector{1}); + EXPECT_EQ(g.get_neighbors(4), std::vector{}); + EXPECT_EQ(g.num_nodes(), 3); + EXPECT_EQ(g.num_edges(), 1); +} + +TEST(GraphTest, NumNodes) { + cudaqx::graph g; + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_node(4); + EXPECT_EQ(g.num_nodes(), 4); +} + +TEST(GraphTest, NumEdges) { + cudaqx::graph g; + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_edge(1, 3); + EXPECT_EQ(g.num_edges(), 3); +} + +TEST(GraphTest, IsConnected) { + cudaqx::graph g; + EXPECT_TRUE(g.is_connected()); // Empty graph is considered connected + + g.add_node(1); + EXPECT_TRUE(g.is_connected()); // Single node graph is connected + + g.add_edge(1, 2); + g.add_edge(2, 3); + EXPECT_TRUE(g.is_connected()); + + g.add_node(4); + EXPECT_FALSE(g.is_connected()); + + g.add_edge(3, 4); + EXPECT_TRUE(g.is_connected()); +} + +TEST(GraphTest, GetDegree) { + cudaqx::graph g; + g.add_edge(1, 2); + g.add_edge(1, 3); + g.add_edge(1, 4); + g.add_edge(2, 3); + + EXPECT_EQ(g.get_degree(1), 3); + EXPECT_EQ(g.get_degree(2), 2); + EXPECT_EQ(g.get_degree(3), 2); + EXPECT_EQ(g.get_degree(4), 1); + EXPECT_EQ(g.get_degree(5), 0); // Non-existent node +} + +TEST(GraphTest, MultipleWeightedEdges) { + cudaqx::graph g; + g.add_edge(1, 2, 0.5); + g.add_edge(2, 3, 1.5); + g.add_edge(3, 1, 2.0); + + EXPECT_EQ(g.num_edges(), 3); + EXPECT_DOUBLE_EQ(g.get_edge_weight(1, 2), 0.5); + EXPECT_DOUBLE_EQ(g.get_edge_weight(2, 3), 1.5); + EXPECT_DOUBLE_EQ(g.get_edge_weight(3, 1), 2.0); + + // Verify neighbors without weights + std::vector tmp{2, 3}, tmp2{1, 3}, tmp3{1, 2}; + + EXPECT_EQ(g.get_neighbors(1), tmp); + EXPECT_EQ(g.get_neighbors(2), tmp2); + EXPECT_EQ(g.get_neighbors(3), tmp3); +} + +TEST(GraphTest, NegativeWeights) { + cudaqx::graph g; + g.add_edge(1, 2, -1.5); + g.add_edge(2, 3, -0.5); + + EXPECT_DOUBLE_EQ(g.get_edge_weight(1, 2), -1.5); + EXPECT_DOUBLE_EQ(g.get_edge_weight(2, 3), -0.5); + std::vector tmp2{1, 3}; + + // Verify neighbors without weights + EXPECT_EQ(g.get_neighbors(1), std::vector{2}); + EXPECT_EQ(g.get_neighbors(2), tmp2); +} + +TEST(GraphTest, NodeWeights) { + cudaqx::graph g; + + // Add nodes with weights + g.add_node(1, 2.5); + g.add_node(2, 1.5); + g.add_edge(1, 2, 1.0); + + // Test node weights + EXPECT_DOUBLE_EQ(g.get_node_weight(1), 2.5); + EXPECT_DOUBLE_EQ(g.get_node_weight(2), 1.5); + + // Test default weight + g.add_node(3); + EXPECT_DOUBLE_EQ(g.get_node_weight(3), 1.0); + + // Test non-existent node + EXPECT_DOUBLE_EQ(g.get_node_weight(4), 0.0); + + // Test weight update + g.set_node_weight(1, 3.0); + EXPECT_DOUBLE_EQ(g.get_node_weight(1), 3.0); + + // Test node removal + g.remove_node(1); + EXPECT_DOUBLE_EQ(g.get_node_weight(1), 0.0); +} + +TEST(GraphTest, NodeWeightsClear) { + cudaqx::graph g; + + g.add_node(1, 2.5); + g.add_node(2, 1.5); + g.clear(); + + EXPECT_DOUBLE_EQ(g.get_node_weight(1), 0.0); + EXPECT_DOUBLE_EQ(g.get_node_weight(2), 0.0); +} + +TEST(GraphTest, NodeWeightsMultiple) { + cudaqx::graph g; + + // Add multiple nodes with different weights + std::vector> nodes = { + {1, 1.5}, {2, 2.5}, {3, 3.5}, {4, 4.5}}; + + for (const auto &node : nodes) { + g.add_node(node.first, node.second); + } + + // Verify all weights + for (const auto &node : nodes) { + EXPECT_DOUBLE_EQ(g.get_node_weight(node.first), node.second); + } +} + +TEST(GraphTest, GetDisconnectedVertices) { + cudaqx::graph g; + + // Add two disconnected components + g.add_edge(1, 2); + g.add_edge(2, 3); + g.add_edge(4, 5); + g.add_edge(5, 6); + + auto disconnected = g.get_disconnected_vertices(); + + std::vector> expected = {{1, 3}, {1, 4}, {1, 5}, {1, 6}, + {2, 4}, {2, 5}, {2, 6}, {3, 4}, + {3, 5}, {3, 6}, {4, 6}}; + + // Sort both vectors to ensure consistent ordering + auto sort_pairs = [](std::vector> &pairs) { + // First ensure each pair has smaller number first + for (auto &p : pairs) { + if (p.first > p.second) { + std::swap(p.first, p.second); + } + } + // Then sort the vector of pairs + std::sort(pairs.begin(), pairs.end()); + }; + + sort_pairs(disconnected); + sort_pairs(expected); + + EXPECT_EQ(disconnected, expected); + + // Test with connected graph + cudaqx::graph g2; + g2.add_edge(1, 2); + g2.add_edge(2, 3); + g2.add_edge(3, 1); + + auto disconnected2 = g2.get_disconnected_vertices(); + EXPECT_TRUE(disconnected2.empty()); +} \ No newline at end of file diff --git a/libs/qec/CMakeLists.txt b/libs/qec/CMakeLists.txt new file mode 100644 index 0000000..68b3ad7 --- /dev/null +++ b/libs/qec/CMakeLists.txt @@ -0,0 +1,113 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# Requering the same version as the others. +cmake_minimum_required(VERSION 3.28 FATAL_ERROR) + +# Project setup +# ============================================================================== + +# Check if core is built as a standalone project. +if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + project(cudaqx-qec) + set(CUDAQX_QEC_STANDALONE_BUILD TRUE) + + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + + # Add our Modules to the path + list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/../../cmake/Modules") + + # Include custom CUDA-QX modules + include(CUDA-QX) + + # Helper target to collect python modules + add_custom_target(cudaqx-pymodules) +endif() + +# The following must go after `project(...)` +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED TRUE) +set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) + +set(CUDAQX_QEC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +set(CUDAQX_QEC_INCLUDE_DIR ${CUDAQX_QEC_SOURCE_DIR}/include) + +# Options +# ============================================================================== + +option(CUDAQX_QEC_INCLUDE_TESTS + "Generate build targets for the CUDA-QX QEC unit tests." + ${CUDAQX_INCLUDE_TESTS}) + +option(CUDAQX_QEC_BINDINGS_PYTHON + "Generate build targets for python bindings." + ${CUDAQX_BINDINGS_PYTHON}) + +option(CUDAQX_QEC_INSTALL_PYTHON + "Install python files alongside the library." + ${CUDAQX_INSTALL_PYTHON}) + +# External Dependencies +# ============================================================================== + +if (CUDAQX_QEC_STANDALONE_BUILD) + # FIXME for now, we only use library mode + set(CUDAQ_LIBRARY_MODE ON) + find_package(CUDAQ REQUIRED) + + # FIXME + add_subdirectory(../core core_build) +endif() + +# Wheel building setup +# ============================================================================== + +if (SKBUILD) + # When building with scikit, i.e., building wheels, we want all the install + # to be on the package directory. + set(CMAKE_INSTALL_BINDIR cudaq_qec/bin) + set(CMAKE_INSTALL_INCLUDEDIR cudaq_qec/include) + set(CMAKE_INSTALL_LIBDIR cudaq_qec/lib) +endif() + +# Directory setup +# ============================================================================== + +add_subdirectory(lib) + +if (CUDAQX_QEC_BINDINGS_PYTHON) + add_subdirectory(python) +endif() + +if (CUDAQX_QEC_INCLUDE_TESTS) + add_custom_target(CUDAQXQECUnitTests) + if (CUDAQX_QEC_STANDALONE_BUILD) + include(CTest) + + add_custom_target(run_tests + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH="${CUDAQ_INSTALL_DIR}:${CMAKE_BINARY_DIR}/python" + ${CMAKE_CTEST_COMMAND} --output-on-failure + DEPENDS CUDAQXQECUnitTests + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + if (CUDAQX_QEC_BINDINGS_PYTHON) + add_custom_target(run_python_tests + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH="${CUDAQ_INSTALL_DIR}:${CMAKE_BINARY_DIR}/python" + pytest -v ${CUDAQX_QEC_SOURCE_DIR}/python/tests + DEPENDS cudaqx-pymodules + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + endif() + else() + add_dependencies(CUDAQXUnitTests CUDAQXQECUnitTests) + endif() + add_subdirectory(unittests) +endif() diff --git a/libs/qec/README.md b/libs/qec/README.md new file mode 100644 index 0000000..61b6b62 --- /dev/null +++ b/libs/qec/README.md @@ -0,0 +1,31 @@ +# CUDA-Q QEC Library + +CUDA-Q QEC is a high-performance quantum error correction library +that leverages NVIDIA GPUs to accelerate classical decoding and +processing of quantum error correction codes. The library provides optimized +implementations of common QEC tasks including syndrome extraction, +decoding, and logical operation tracking. + +**Note**: CUDA-Q QEC is currently only supported on Linux operating systems using +`x86_64` processors. CUDA-Q QEC does not require a GPU to use, but some +components are GPU-accelerated. + +## Features + +- Fast syndrome extraction and processing on GPUs +- Common decoders for surface codes and other topological codes +- Real-time decoding capabilities for quantum feedback +- Integration with CUDA-Q quantum program execution + +## Getting Started + +For detailed documentation, tutorials, and API reference, visit the +[CUDA-Q QEC Documentation](https://nvidia.github.io/cudaqx/components/qec/introduction.html). + +## License + +CUDA-Q QEC is an open source project. The source code is available on +[GitHub][github_link] and licensed under [Apache License +2.0](https://github.com/NVIDIA/cudaqx/blob/main/LICENSE). + +[github_link]: https://github.com/NVIDIA/cudaqx/tree/main/libs/qec \ No newline at end of file diff --git a/libs/qec/include/cudaq/qec/code.h b/libs/qec/include/cudaq/qec/code.h new file mode 100644 index 0000000..25ad2e7 --- /dev/null +++ b/libs/qec/include/cudaq/qec/code.h @@ -0,0 +1,239 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include + +#include "cudaq/qis/qkernel.h" +#include "cudaq/qis/qvector.h" + +#include "cudaq/qec/noise_model.h" +#include "cudaq/qec/patch.h" +#include "cudaq/qec/stabilizer_utils.h" + +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/heterogeneous_map.h" +#include "cuda-qx/core/tensor.h" + +using namespace cudaqx; + +namespace cudaq::qec { + +/// @brief Enum describing all supported logical operations. +enum class operation { + x, ///< Logical X gate + y, ///< Logical Y gate + z, ///< Logical Z gate + h, ///< Logical Hadamard gate + s, ///< Logical S gate + cx, ///< Logical controlled-X gate + cy, ///< Logical controlled-Y gate + cz, ///< Logical controlled-Z gate + stabilizer_round, ///< Stabilizer measurement round + prep0, ///< Prepare logical |0⟩ state + prep1, ///< Prepare logical |1⟩ state + prepp, ///< Prepare logical |+⟩ state + prepm ///< Prepare logical |-⟩ state +}; + +/// @brief Base class for quantum error correcting codes in CUDA-Q. +/// @details +/// This class provides the core interface and functionality for implementing +/// quantum error correcting codes in CUDA-Q. It defines the basic operations +/// that any QEC code must support and provides infrastructure for syndrome +/// measurement and error correction experiments. +/// +/// To implement a new quantum error correcting code: +/// 1. Create a new class that inherits from code +/// 2. Implement the protected virtual methods: +/// - get_num_data_qubits() +/// - get_num_ancilla_qubits() +/// - get_num_ancilla_x_qubits() +/// - get_num_ancilla_z_qubits() +/// 3. Define quantum kernels for each required logical operation (these are +/// the fault tolerant logical operation implementations) +/// 4. Register the operations in your constructor using the +/// operation_encodings map on the base class +/// 5. Register your new code type using CUDAQ_REGISTER_TYPE +/// +/// Example implementation: +/// @code{.cpp} +/// __qpu__ void x_kernel(patch p); +/// __qpu__ void z_kernel(patch p); +/// class my_code : public qec::code { +/// protected: +/// std::size_t get_num_data_qubits() const override { return 7; } +/// std::size_t get_num_ancilla_qubits() const override { return 6; } +/// std::size_t get_num_ancilla_x_qubits() const override { return 3; } +/// std::size_t get_num_ancilla_z_qubits() const override { return 3; } +/// +/// public: +/// my_code(const heterogeneous_map& options) : code() { +/// // Can use user-specified options, e.g. auto d = +/// options.get("distance"); +/// operation_encodings.insert(std::make_pair(operation::x, x_kernel)); +/// operation_encodings.insert(std::make_pair(operation::z, z_kernel)); +/// // Register other required operations... +/// +/// // Define the default stabilizers! +/// m_stabilizers = qec::stabilizers({"XXXX", "ZZZZ"}); +/// } +/// +/// CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( +/// my_code, +/// static std::unique_ptr create(const heterogeneous_map +/// &options) { return std::make_unique(options); +/// } +/// ) +/// }; +/// +/// CUDAQ_REGISTER_TYPE(my_code) +/// @endcode +/// @brief Supported quantum operations for error correcting codes +class code : public extension_point { +public: + /// @brief Type alias for single qubit quantum kernels + using one_qubit_encoding = cudaq::qkernel; + + /// @brief Type alias for two qubit quantum kernels + using two_qubit_encoding = cudaq::qkernel; + + /// @brief Type alias for stabilizer measurement kernels + using stabilizer_round = cudaq::qkernel( + patch, const std::vector &, + const std::vector &)>; + + /// @brief Type alias for quantum operation encodings + using encoding = + std::variant; + +protected: + /// @brief Map storing the quantum kernels for each supported operation + std::unordered_map operation_encodings; + + /// @brief Stabilizer generators for the code + std::vector m_stabilizers; + + /// @brief Pauli Logical operators + std::vector m_pauli_observables; + + std::vector + fromPauliWords(const std::vector &words) { + std::vector ops; + for (auto &os : words) + ops.emplace_back(cudaq::spin_op::from_word(os)); + sortStabilizerOps(ops); + return ops; + } + +public: + /// @brief Get the number of physical data qubits needed for the code + /// @return Number of data qubits + virtual std::size_t get_num_data_qubits() const = 0; + + /// @brief Get the total number of ancilla qubits needed + /// @return Total number of ancilla qubits + virtual std::size_t get_num_ancilla_qubits() const = 0; + + /// @brief Get number of ancilla qubits needed for X stabilizer measurements + /// @return Number of X-type ancilla qubits + virtual std::size_t get_num_ancilla_x_qubits() const = 0; + + /// @brief Get number of ancilla qubits needed for Z stabilizer measurements + /// @return Number of Z-type ancilla qubits + virtual std::size_t get_num_ancilla_z_qubits() const = 0; + + code() = default; + virtual ~code() {} + + /// @brief Factory method to create a code instance with specified + /// stabilizers + /// @param name Name of the code to create + /// @param stabilizers Stabilizer generators for the code + /// @param options Optional code-specific configuration options + /// @return Unique pointer to created code instance + static std::unique_ptr + get(const std::string &name, const std::vector &stabilizers, + const heterogeneous_map options = {}); + + /// @brief Factory method to create a code instance + /// @param name Name of the code to create + /// @param options Optional code-specific configuration options + /// @return Unique pointer to created code instance + static std::unique_ptr get(const std::string &name, + const heterogeneous_map options = {}); + + /// @brief Get the full parity check matrix H = (Hx | Hz) + /// @return Tensor representing the parity check matrix + cudaqx::tensor get_parity() const; + + /// @brief Get the X component of the parity check matrix + /// @return Tensor representing Hx + cudaqx::tensor get_parity_x() const; + + /// @brief Get the Z component of the parity check matrix + /// @return Tensor representing Hz + cudaqx::tensor get_parity_z() const; + + /// @brief Get Lx stacked on Lz + /// @return Tensor representing pauli observables + cudaqx::tensor get_pauli_observables_matrix() const; + + /// @brief Get the Lx observables + /// @return Tensor representing Lx + cudaqx::tensor get_observables_x() const; + + /// @brief Get the Lz observables + /// @return Tensor representing Lz + cudaqx::tensor get_observables_z() const; + + /// @brief Get the stabilizer generators + /// @return Reference to stabilizers + const std::vector &get_stabilizers() const { + return m_stabilizers; + } + + /// @brief Return true if this code contains the given operation encoding. + bool contains_operation(operation op) const { + return operation_encodings.find(op) != operation_encodings.end(); + } + + // Return the CUDA-Q kernel for the given operation encoding. + // User must provide the qkernel type (stabilizer_round, one_qubit_encoding, + // or two_qubit_encoding) as the template type. + template + auto &&get_operation(operation op) const { + auto iter = operation_encodings.find(op); + if (iter == operation_encodings.end()) + throw std::runtime_error( + "code::get_operation error - could not find operation encoding " + + std::to_string(static_cast(op))); + + return std::get(iter->second); + } +}; + +/// Factory function to create a code instance with specified stabilizers +/// @param name Name of the code +/// @return Unique pointer to the created code instance +std::unique_ptr get_code(const std::string &name, + const heterogeneous_map options = {}); + +/// Factory function to create a code instance with specified stabilizers +/// @param name Name of the code +/// @param stab stabilizers +/// @return Unique pointer to the created code instance +std::unique_ptr get_code(const std::string &name, + const std::vector &stab, + const heterogeneous_map options = {}); + +/// Get a list of available quantum error correcting codes +/// @return Vector of strings containing names of available codes +std::vector get_available_codes(); + +} // namespace cudaq::qec diff --git a/libs/qec/include/cudaq/qec/codes/repetition.h b/libs/qec/include/cudaq/qec/codes/repetition.h new file mode 100644 index 0000000..7ecdc69 --- /dev/null +++ b/libs/qec/include/cudaq/qec/codes/repetition.h @@ -0,0 +1,77 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/code.h" +#include "cudaq/qec/patch.h" + +using namespace cudaqx; + +namespace cudaq::qec::repetition { + +/// \pure_device_kernel +/// +/// @brief Apply Logical X gate to a repetition code patch +/// @param p The patch to apply the X gate to +__qpu__ void x(patch p); + +/// \pure_device_kernel +/// +/// @brief Prepares the given repetition code in the |0⟩ state +/// @param[in,out] p The quantum patch to initialize +__qpu__ void prep0(patch p); + +/// \pure_device_kernel +/// +/// @brief Prepare a repetition code patch in the |1⟩ state +/// @param p The patch to prepare +__qpu__ void prep1(patch p); + +/// @brief Measures the X and Z stabilizers for the repetition code +/// @param[in] p The quantum patch to measure stabilizers on +/// @param[in] x_stabilizers Vector of qubit indices for X stabilizer +/// measurements +/// @param[in] z_stabilizers Vector of qubit indices for Z stabilizer +/// measurements +/// @return Vector containing the measurement results +__qpu__ std::vector +stabilizer(patch p, const std::vector &x_stabilizers, + const std::vector &z_stabilizers); + +/// @brief Implementation of the repetition quantum error correction code +class repetition : public cudaq::qec::code { +protected: + /// @brief The code distance parameter + std::size_t distance; + + /// @brief Gets the number of data qubits in the code + /// @return Number of data qubits + std::size_t get_num_data_qubits() const override; + + /// @brief Gets the total number of ancilla qubits + /// @return Total number of ancilla qubits + std::size_t get_num_ancilla_qubits() const override; + + /// @brief Gets the number of X-basis ancilla qubits + /// @return Number of X ancilla qubits + std::size_t get_num_ancilla_x_qubits() const override; + + /// @brief Gets the number of Z-basis ancilla qubits + /// @return Number of Z ancilla qubits + std::size_t get_num_ancilla_z_qubits() const override; + +public: + /// @brief Constructs a repetition code instance + repetition(const heterogeneous_map &); + + /// @brief Factory function to create repetition code instances + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + repetition, static std::unique_ptr create( + const cudaqx::heterogeneous_map &options) { + return std::make_unique(options); + }) +}; +} // namespace cudaq::qec::repetition diff --git a/libs/qec/include/cudaq/qec/codes/steane.h b/libs/qec/include/cudaq/qec/codes/steane.h new file mode 100644 index 0000000..e1feb14 --- /dev/null +++ b/libs/qec/include/cudaq/qec/codes/steane.h @@ -0,0 +1,134 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq/qec/code.h" +#include "cudaq/qec/patch.h" + +using namespace cudaqx; + +namespace cudaq::qec::steane { + +/// \pure_device_kernel +/// +/// @brief Apply X gate to a Steane code patch +/// @param p The patch to apply the X gate to +__qpu__ void x(patch p); + +/// \pure_device_kernel +/// +/// @brief Apply Y gate to a Steane code patch +/// @param p The patch to apply the Y gate to +__qpu__ void y(patch p); + +/// \pure_device_kernel +/// +/// @brief Apply Z gate to a Steane code patch +/// @param p The patch to apply the Z gate to +__qpu__ void z(patch p); + +/// \pure_device_kernel +/// +/// @brief Apply Hadamard gate to a Steane code patch +/// @param p The patch to apply the Hadamard gate to +__qpu__ void h(patch p); + +/// \pure_device_kernel +/// +/// @brief Apply S gate to a Steane code patch +/// @param p The patch to apply the S gate to +__qpu__ void s(patch p); + +/// \pure_device_kernel +/// +/// @brief Apply controlled-X gate between two Steane code patches +/// @param control The control patch +/// @param target The target patch +__qpu__ void cx(patch control, patch target); + +/// \pure_device_kernel +/// +/// @brief Apply controlled-Y gate between two Steane code patches +/// @param control The control patch +/// @param target The target patch +__qpu__ void cy(patch control, patch target); + +/// \pure_device_kernel +/// +/// @brief Apply controlled-Z gate between two Steane code patches +/// @param control The control patch +/// @param target The target patch +__qpu__ void cz(patch control, patch target); + +/// \pure_device_kernel +/// +/// @brief Prepare a Steane code patch in the |0⟩ state +/// @param p The patch to prepare +__qpu__ void prep0(patch p); + +/// \pure_device_kernel +/// +/// @brief Prepare a Steane code patch in the |1⟩ state +/// @param p The patch to prepare +__qpu__ void prep1(patch p); + +/// \pure_device_kernel +/// +/// @brief Prepare a Steane code patch in the |+⟩ state +/// @param p The patch to prepare +__qpu__ void prepp(patch p); + +/// \pure_device_kernel +/// +/// @brief Prepare a Steane code patch in the |-⟩ state +/// @param p The patch to prepare +__qpu__ void prepm(patch p); + +/// \pure_device_kernel +/// +/// @brief Perform stabilizer measurements on a Steane code patch +/// @param p The patch to measure +/// @param x_stabilizers Indices of X stabilizers to measure +/// @param z_stabilizers Indices of Z stabilizers to measure +/// @return Vector of measurement results +__qpu__ std::vector +stabilizer(patch p, const std::vector &x_stabilizers, + const std::vector &z_stabilizers); + +/// @brief Steane code implementation +class steane : public cudaq::qec::code { +protected: + /// @brief Get the number of data qubits in the Steane code + /// @return Number of data qubits (7 for Steane code) + std::size_t get_num_data_qubits() const override { return 7; } + + /// @brief Get the number of total ancilla qubits in the Steane code + /// @return Number of data qubits (6 for Steane code) + std::size_t get_num_ancilla_qubits() const override { return 6; } + + /// @brief Get the number of X ancilla qubits in the Steane code + /// @return Number of data qubits (3 for Steane code) + std::size_t get_num_ancilla_x_qubits() const override { return 3; } + + /// @brief Get the number of Z ancilla qubits in the Steane code + /// @return Number of data qubits (3 for Steane code) + std::size_t get_num_ancilla_z_qubits() const override { return 3; } + +public: + /// @brief Constructor for the Steane code + steane(const heterogeneous_map &); + + /// @brief Extension creator function for the Steane code + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + steane, static std::unique_ptr create( + const cudaqx::heterogeneous_map &options) { + return std::make_unique(options); + }) +}; + +} // namespace cudaq::qec::steane diff --git a/libs/qec/include/cudaq/qec/decoder.h b/libs/qec/include/cudaq/qec/decoder.h new file mode 100644 index 0000000..e669459 --- /dev/null +++ b/libs/qec/include/cudaq/qec/decoder.h @@ -0,0 +1,234 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/heterogeneous_map.h" +#include "cuda-qx/core/tensor.h" +#include +#include + +namespace cudaq::qec { + +#if defined(CUDAQX_QEC_FLOAT_TYPE) +using float_t = CUDAQX_QEC_FLOAT_TYPE; +#else +using float_t = float; +#endif + +/// @brief Decoder results +struct decoder_result { + /// @brief Whether or not the decoder converged + bool converged = false; + + /// @brief Vector of length `block_size` with soft probabilities of errors in + /// each index. + std::vector result; +}; + +/// @brief The `decoder` base class should be subclassed by specific decoder +/// implementations. The `heterogeneous_map` provides a placeholder for +/// arbitrary constructor parameters that can be unique to each specific +/// decoder. +class decoder + : public cudaqx::extension_point &, + const cudaqx::heterogeneous_map &> { +public: + decoder() = delete; + + /// @brief Constructor + /// @param H Decoder's parity check matrix represented as a tensor. The tensor + /// is required be rank 2 and must be of dimensions \p syndrome_size x + /// \p block_size. + /// will use the same \p H. + decoder(const cudaqx::tensor &H); + + /// @brief Decode a single syndrome + /// @param syndrome A vector of syndrome measurements where the floating point + /// value is the probability that the syndrome measurement is a |1>. The + /// length of the syndrome vector should be an integer multiple of the + /// decoder's \p syndrome_size. + /// @returns Vector of length \p block_size with soft probabilities of errors + /// in each index. + virtual decoder_result decode(const std::vector &syndrome) = 0; + + /// @brief Decode a single syndrome + /// @param syndrome An order-1 tensor of syndrome measurements where a 1 bit + /// represents that the syndrome measurement is a |1>. The + /// length of the syndrome vector should be an integer multiple of the + /// decoder's \p syndrome_size. + /// @returns Vector of length \p block_size of errors in each index. + virtual decoder_result decode(const cudaqx::tensor &syndrome); + + /// @brief Decode a single syndrome + /// @param syndrome A vector of syndrome measurements where the floating point + /// value is the probability that the syndrome measurement is a |1>. + /// @returns std::future of a vector of length `block_size` with soft + /// probabilities of errors in each index. + virtual std::future + decode_async(const std::vector &syndrome); + + /// @brief Decode multiple independent syndromes (may be done in serial or + /// parallel depending on the specific implementation) + /// @param syndrome A vector of `N` syndrome measurements where the floating + /// point value is the probability that the syndrome measurement is a |1>. + /// @returns 2-D vector of size `N` x `block_size` with soft probabilities of + /// errors in each index. + virtual std::vector + decode_multi(const std::vector> &syndrome); + + /// @brief This `get` overload supports default values. + static std::unique_ptr + get(const std::string &name, const cudaqx::tensor &H, + const cudaqx::heterogeneous_map ¶m_map = cudaqx::heterogeneous_map()); + + std::size_t get_block_size() { return block_size; } + std::size_t get_syndrome_size() { return syndrome_size; } + + /// @brief Destructor + virtual ~decoder() {} + +protected: + /// @brief For a classical `[n,k]` code, this is `n`. + std::size_t block_size = 0; + + /// @brief For a classical `[n,k]` code, this is `n-k` + std::size_t syndrome_size = 0; + + /// @brief The decoder's parity check matrix + cudaqx::tensor H; +}; + +/// @brief Convert a vector of soft probabilities to a vector of hard +/// probabilities. +/// @param in Soft probability input vector in range [0.0, 1.0] +/// @param out Hard probability output vector containing only 0/false or 1/true. +/// @param thresh Values >= thresh are assigned 1/true and all others are +/// assigned 0/false. +template ::value && + (std::is_integral::value || + std::is_same::value), + int>::type = 0> +inline void convert_vec_soft_to_hard(const std::vector &in, + std::vector &out, + t_soft thresh = 0.5) { + out.clear(); + out.reserve(in.size()); + for (auto x : in) + out.push_back(static_cast(x >= thresh ? 1 : 0)); +} + +/// @brief Convert a vector of soft probabilities to a tensor of hard +/// probabilities. Tensor must be uninitialized, or initialized to a rank-1 +/// tensor for equal dim as the vector. +/// @param in Soft probability input vector in range [0.0, 1.0] +/// @param out Hard probability output tensor containing only 0/false or 1/true. +/// @param thresh Values >= thresh are assigned 1/true and all others are +/// assigned 0/false. +template ::value && + (std::is_integral::value || + std::is_same::value), + int>::type = 0> +inline void convert_vec_soft_to_tensor_hard(const std::vector &in, + cudaqx::tensor &out, + t_soft thresh = 0.5) { + if (out.shape().empty()) + out = cudaqx::tensor({in.size()}); + if (out.rank() != 1) + throw std::runtime_error( + "Vector to tensor conversion requires rank-1 tensor"); + if (out.shape()[0] != in.size()) + throw std::runtime_error( + "Vector to tensor conversion requires tensor dim == vector length"); + for (size_t i = 0; i < in.size(); ++i) + out.at({i}) = static_cast(in[i] >= thresh ? 1 : 0); +} + +/// @brief Convert a vector of hard probabilities to a vector of soft +/// probabilities. +/// @param in Hard probability input vector containing only 0/false or 1/true. +/// @param out Soft probability output vector in the range [0.0, 1.0] +/// @param true_val The soft probability value assigned when the input is 1 +/// (default to 1.0) +/// @param false_val The soft probability value assigned when the input is 0 +/// (default to 0.0) +template ::value && + (std::is_integral::value || + std::is_same::value), + int>::type = 0> +inline void convert_vec_hard_to_soft(const std::vector &in, + std::vector &out, + const t_soft true_val = 1.0, + const t_soft false_val = 0.0) { + out.clear(); + out.reserve(in.size()); + for (auto x : in) + out.push_back(static_cast(x ? true_val : false_val)); +} + +/// @brief Convert a 2D vector of soft probabilities to a 2D vector of hard +/// probabilities. +/// @param in Soft probability input vector in range [0.0, 1.0] +/// @param out Hard probability output vector containing only 0/false or 1/true. +/// @param thresh Values >= thresh are assigned 1/true and all others are +/// assigned 0/false. +template ::value && + (std::is_integral::value || + std::is_same::value), + int>::type = 0> +inline void convert_vec_soft_to_hard(const std::vector> &in, + std::vector> &out, + t_soft thresh = 0.5) { + out.clear(); + out.reserve(in.size()); + for (auto &r : in) { + std::vector out_row; + out_row.reserve(r.size()); + for (auto c : r) + out_row.push_back(static_cast(c >= thresh ? 1 : 0)); + out.push_back(std::move(out_row)); + } +} + +/// @brief Convert a 2D vector of hard probabilities to a 2D vector of soft +/// probabilities. +/// @param in Hard probability input vector containing only 0/false or 1/true. +/// @param out Soft probability output vector in the range [0.0, 1.0] +/// @param true_val The soft probability value assigned when the input is 1 +/// (default to 1.0) +/// @param false_val The soft probability value assigned when the input is 0 +/// (default to 0.0) +template ::value && + (std::is_integral::value || + std::is_same::value), + int>::type = 0> +inline void convert_vec_hard_to_soft(const std::vector> &in, + std::vector> &out, + const t_soft true_val = 1.0, + const t_soft false_val = 0.0) { + out.clear(); + out.reserve(in.size()); + for (auto &r : in) { + std::vector out_row; + out_row.reserve(r.size()); + for (auto c : r) + out_row.push_back(static_cast(c ? true_val : false_val)); + out.push_back(std::move(out_row)); + } +} + +std::unique_ptr +get_decoder(const std::string &name, const cudaqx::tensor &H, + const cudaqx::heterogeneous_map options = {}); +} // namespace cudaq::qec diff --git a/libs/qec/include/cudaq/qec/experiments.h b/libs/qec/include/cudaq/qec/experiments.h new file mode 100644 index 0000000..e76d79e --- /dev/null +++ b/libs/qec/include/cudaq/qec/experiments.h @@ -0,0 +1,103 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq/qec/code.h" + +namespace cudaq::qec { + +/// @brief Generate rank-1 tensor of random bit flips +/// @param numBits Number of bits in tensor +/// @param error_probability Probability of bit flip on data +/// @return Tensor of randomly flipped bits +cudaqx::tensor generate_random_bit_flips(size_t numBits, + double error_probability); + +/// @brief Sample syndrome measurements with code capacity noise +/// @param H Parity check matrix of a QEC code +/// @param numShots Number of measurement shots +/// @param error_probability Probability of bit flip on data +/// @return Tuple containing syndrome measurements and data qubit +/// measurements +std::tuple, cudaqx::tensor> +sample_code_capacity(const cudaqx::tensor &H, std::size_t numShots, + double error_probability); + +/// @brief Sample syndrome measurements with code capacity noise +/// @param H Parity check matrix of a QEC code +/// @param numShots Number of measurement shots +/// @param error_probability Probability of bit flip on data +/// @param seed RNG seed for reproducible experiments +/// @return Tuple containing syndrome measurements and data qubit +/// measurements +std::tuple, cudaqx::tensor> +sample_code_capacity(const cudaqx::tensor &H, std::size_t numShots, + double error_probability, unsigned seed); + +/// @brief Sample syndrome measurements with code capacity noise +/// @param code QEC Code to sample +/// @param numShots Number of measurement shots +/// @param error_probability Probability of bit flip on data +/// @param seed RNG seed for reproducible experiments +/// @return Tuple containing syndrome measurements and data qubit +/// measurements +std::tuple, cudaqx::tensor> +sample_code_capacity(const code &code, std::size_t numShots, + double error_probability, unsigned seed); + +/// @brief Sample syndrome measurements with code capacity noise +/// @param code QEC Code to sample +/// @param numShots Number of measurement shots +/// @param error_probability Probability of bit flip on data +/// @return Tuple containing syndrome measurements and data qubit +/// measurements +std::tuple, cudaqx::tensor> +sample_code_capacity(const code &code, std::size_t numShots, + double error_probability); + +/// @brief Sample syndrome measurements with circuit-level noise +/// @param statePrep Initial state preparation operation +/// @param numShots Number of measurement shots +/// @param numRounds Number of stabilizer measurement rounds +/// @param noise Noise model to apply +/// @return Tuple containing syndrome measurements and data qubit +/// measurements (mz for z basis state prep, mx for x basis) +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, operation statePrep, + std::size_t numShots, std::size_t numRounds, + cudaq::noise_model &noise); + +/// @brief Sample syndrome measurements from the memory circuit +/// @param statePrep Initial state preparation operation +/// @param numShots Number of measurement shots +/// @param numRounds Number of stabilizer measurement rounds +/// @return Tuple containing syndrome measurements and data qubit +/// measurements (mz for z basis state prep, mx for x basis) +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, operation statePrep, + std::size_t numShots, std::size_t numRounds = 1); + +/// @brief Sample syndrome measurements starting from |0⟩ state +/// @param numShots Number of measurement shots +/// @param numRounds Number of stabilizer measurement rounds +/// @return Tuple containing syndrome measurements and data qubit +/// measurements (mz for z basis state prep, mx for x basis) +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, std::size_t numShots, + std::size_t numRounds = 1); + +/// @brief Sample syndrome measurements from |0⟩ state with noise +/// @param numShots Number of measurement shots +/// @param numRounds Number of stabilizer measurement rounds +/// @param noise Noise model to apply +/// @return Tuple containing syndrome measurements and data qubit +/// measurements (mz for z basis state prep, mx for x basis) +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, std::size_t numShots, + std::size_t numRounds, cudaq::noise_model &noise); +} // namespace cudaq::qec diff --git a/libs/qec/include/cudaq/qec/noise_model.h b/libs/qec/include/cudaq/qec/noise_model.h new file mode 100644 index 0000000..5b07ce5 --- /dev/null +++ b/libs/qec/include/cudaq/qec/noise_model.h @@ -0,0 +1,103 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "common/NoiseModel.h" + +#include + +namespace cudaq::qec { + +/// @brief Namespace containing utility functions for quantum error correction +namespace details { + +/// @brief Typedef for a matrix wrapper using std::vector +using matrix_wrapper = std::vector; + +/// @brief Compute the Kronecker product of two matrices +/// +/// @param A First matrix +/// @param rowsA Number of rows in matrix A +/// @param colsA Number of columns in matrix A +/// @param B Second matrix +/// @param rowsB Number of rows in matrix B +/// @param colsB Number of columns in matrix B +/// @return matrix_wrapper Result of the Kronecker product +inline matrix_wrapper kron(const matrix_wrapper &A, int rowsA, int colsA, + const matrix_wrapper &B, int rowsB, int colsB) { + matrix_wrapper C((rowsA * rowsB) * (colsA * colsB)); + for (int i = 0; i < rowsA; ++i) { + for (int j = 0; j < colsA; ++j) { + for (int k = 0; k < rowsB; ++k) { + for (int l = 0; l < colsB; ++l) { + C[(i * rowsB + k) * (colsA * colsB) + (j * colsB + l)] = + A[i * colsA + j] * B[k * colsB + l]; + } + } + } + } + return C; +} + +} // namespace details + +/// @brief Two-qubit bit flip channel implementation +class two_qubit_bitflip : public cudaq::kraus_channel { +public: + /// @brief Construct a two qubit kraus channel that applies a bit flip on + /// either qubit independently. + /// + /// @param probability The probability of a bit flip occurring + /// + two_qubit_bitflip(const cudaq::real probability) : kraus_channel() { + std::vector K0{std::sqrt(1 - probability), 0, 0, + std::sqrt(1 - probability)}, + K1{0, std::sqrt(probability), std::sqrt(probability), 0}; + auto E0 = details::kron(K0, 2, 2, K0, 2, 2); + auto E1 = details::kron(K0, 2, 2, K1, 2, 2); + auto E2 = details::kron(K1, 2, 2, K0, 2, 2); + auto E3 = details::kron(K1, 2, 2, K1, 2, 2); + + // Set the ops vector to contain only the Kronecker product + ops = {E0, E1, E2, E3}; + this->parameters.push_back(probability); + noise_type = cudaq::noise_model_type::bit_flip_channel; + validateCompleteness(); + } +}; + +class two_qubit_depolarization : public cudaq::kraus_channel { +public: + /// @brief Construct a two qubit kraus channel that applies a depolarization + /// channel on either qubit independently. + /// + /// @param probability The probability of a bit flip occurring + /// + two_qubit_depolarization(const cudaq::real probability) : kraus_channel() { + auto three = static_cast(3.); + auto negOne = static_cast(-1.); + std::vector> singleQubitKraus = { + {std::sqrt(1 - probability), 0, 0, std::sqrt(1 - probability)}, + {0, std::sqrt(probability / three), std::sqrt(probability / three), 0}, + {0, cudaq::complex{0, negOne * std::sqrt(probability / three)}, + cudaq::complex{0, std::sqrt(probability / three)}, 0}, + {std::sqrt(probability / three), 0, 0, + negOne * std::sqrt(probability / three)}}; + + // Generate 2-qubit Kraus operators + for (const auto &k1 : singleQubitKraus) { + for (const auto &k2 : singleQubitKraus) { + ops.push_back(details::kron(k1, 2, 2, k2, 2, 2)); + } + } + this->parameters.push_back(probability); + noise_type = cudaq::noise_model_type::depolarization_channel; + validateCompleteness(); + } +}; +} // namespace cudaq::qec \ No newline at end of file diff --git a/libs/qec/include/cudaq/qec/patch.h b/libs/qec/include/cudaq/qec/patch.h new file mode 100644 index 0000000..fa4ab71 --- /dev/null +++ b/libs/qec/include/cudaq/qec/patch.h @@ -0,0 +1,32 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq/qis/qubit_qis.h" + +namespace cudaq::qec { + +/// @brief Represents a logical qubit patch for quantum error correction +/// +/// This type is for CUDA-Q kernel code only. +/// +/// This structure defines a patch of qubits used in quantum error correction +/// codes. It consists of data qubits and ancilla qubits for X and Z stabilizer +/// measurements. +struct patch { + /// @brief View of the data qubits in the patch + cudaq::qview<> data; + + /// @brief View of the ancilla qubits used for X stabilizer measurements + cudaq::qview<> ancx; + + /// @brief View of the ancilla qubits used for Z stabilizer measurements + cudaq::qview<> ancz; +}; + +} // namespace cudaq::qec diff --git a/libs/qec/include/cudaq/qec/stabilizer_utils.h b/libs/qec/include/cudaq/qec/stabilizer_utils.h new file mode 100644 index 0000000..26143cf --- /dev/null +++ b/libs/qec/include/cudaq/qec/stabilizer_utils.h @@ -0,0 +1,30 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include + +#include "cuda-qx/core/tensor.h" + +#include "cudaq/qis/pauli_word.h" +#include "cudaq/spin_op.h" + +namespace cudaq::qec { +enum class stabilizer_type { XZ, X, Z }; +void sortStabilizerOps(std::vector &ops); + +/// Convert stabilizers to a parity check matrix +/// @return Tensor representing the parity check matrix +cudaqx::tensor +to_parity_matrix(const std::vector &stabilizers, + stabilizer_type type = stabilizer_type::XZ); +cudaqx::tensor +to_parity_matrix(const std::vector &words, + stabilizer_type type = stabilizer_type::XZ); + +} // namespace cudaq::qec \ No newline at end of file diff --git a/libs/qec/lib/CMakeLists.txt b/libs/qec/lib/CMakeLists.txt new file mode 100644 index 0000000..35c845c --- /dev/null +++ b/libs/qec/lib/CMakeLists.txt @@ -0,0 +1,83 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_compile_options(-Wno-attributes) + +# FIXME?: This must be a shared library. Trying to build a static one will fail. +add_library(cudaq-qec SHARED + code.cpp + stabilizer_utils.cpp + decoder.cpp + experiments.cpp + decoders/single_error_lut.cpp +) + +add_subdirectory(codes) +add_subdirectory(device) + +if (CUDAQX_QEC_USE_DOUBLE) + target_compile_definitions(cudaq-qec PUBLIC -DCUDAQX_QEC_FLOAT_TYPE=double) +endif() + +target_include_directories(cudaq-qec + PUBLIC + $ + $ + $ +) + +target_link_options(cudaq-qec PUBLIC + $<$:-Wl,--no-as-needed> +) + +target_link_libraries(cudaq-qec + PUBLIC + cudaqx-core + cudaq::cudaq + cudaq::cudaq-spin + PRIVATE + cudaq::cudaq-common +) + +set_target_properties(cudaq-qec PROPERTIES + LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) + +# RPATH configuration +# ============================================================================== + +if (NOT SKBUILD) + set_target_properties(cudaq-qec PROPERTIES + BUILD_RPATH "$ORIGIN" + INSTALL_RPATH "$ORIGIN:$ORIGIN/../lib" + ) + + # Let CMake automatically add paths of linked libraries to the RPATH: + set_target_properties(cudaq-qec PROPERTIES + INSTALL_RPATH_USE_LINK_PATH TRUE) +else() + # CUDA-Q install its libraries in site-packages/lib (or dist-packages/lib) + # Thus, we need the $ORIGIN/../lib + set_target_properties(cudaq-qec PROPERTIES + INSTALL_RPATH "$ORIGIN/../../lib" + ) +endif() + +# Install +# ============================================================================== + +install(TARGETS cudaq-qec + COMPONENT qec-lib + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + +install(DIRECTORY ${CUDAQX_QEC_INCLUDE_DIR}/cudaq + COMPONENT qec-headers + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING PATTERN "*.h" +) + diff --git a/libs/qec/lib/code.cpp b/libs/qec/lib/code.cpp new file mode 100644 index 0000000..3ce87ce --- /dev/null +++ b/libs/qec/lib/code.cpp @@ -0,0 +1,76 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/code.h" + +#include "device/memory_circuit.h" + +INSTANTIATE_REGISTRY(cudaq::qec::code, const cudaqx::heterogeneous_map &) + +namespace cudaq::qec { + +std::unique_ptr code::get(const std::string &name, + const std::vector &_stabilizers, + const heterogeneous_map options) { + auto ®istry = get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("invalid qec_code requested: " + name); + auto ret = iter->second(options); + ret->m_stabilizers = _stabilizers; + return ret; +} + +std::unique_ptr code::get(const std::string &name, + const heterogeneous_map options) { + auto ®istry = get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("invalid qec_code requested: " + name); + auto ret = iter->second(options); + return ret; +} + +cudaqx::tensor code::get_parity() const { + return to_parity_matrix(m_stabilizers); +} +cudaqx::tensor code::get_parity_x() const { + return to_parity_matrix(m_stabilizers, stabilizer_type::X); +} + +cudaqx::tensor code::get_parity_z() const { + return to_parity_matrix(m_stabilizers, stabilizer_type::Z); +} + +cudaqx::tensor code::get_pauli_observables_matrix() const { + return to_parity_matrix(m_pauli_observables); +} + +cudaqx::tensor code::get_observables_x() const { + return to_parity_matrix(m_pauli_observables, stabilizer_type::X); +} + +cudaqx::tensor code::get_observables_z() const { + return to_parity_matrix(m_pauli_observables, stabilizer_type::Z); +} + +std::unique_ptr get_code(const std::string &name, + const std::vector &stab, + const heterogeneous_map options) { + return code::get(name, stab, options); +} + +std::unique_ptr get_code(const std::string &name, + const heterogeneous_map options) { + return code::get(name, options); +} + +std::vector get_available_codes() { + return code::get_registered(); +} + +} // namespace cudaq::qec diff --git a/libs/qec/lib/codes/CMakeLists.txt b/libs/qec/lib/codes/CMakeLists.txt new file mode 100644 index 0000000..3cf88f1 --- /dev/null +++ b/libs/qec/lib/codes/CMakeLists.txt @@ -0,0 +1,15 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +cudaqx_add_device_code(cudaq-qec + SOURCES + steane_device.cpp + repetition_device.cpp +) + +target_sources(cudaq-qec PRIVATE steane.cpp repetition.cpp) diff --git a/libs/qec/lib/codes/repetition.cpp b/libs/qec/lib/codes/repetition.cpp new file mode 100644 index 0000000..06b7ff9 --- /dev/null +++ b/libs/qec/lib/codes/repetition.cpp @@ -0,0 +1,52 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/codes/repetition.h" + +namespace cudaq::qec::repetition { + +std::size_t repetition::get_num_data_qubits() const { return distance; } + +std::size_t repetition::get_num_ancilla_qubits() const { return distance - 1; } +std::size_t repetition::get_num_ancilla_x_qubits() const { return 0; } +std::size_t repetition::get_num_ancilla_z_qubits() const { + return get_num_ancilla_qubits(); +} + +repetition::repetition(const heterogeneous_map &options) : code() { + if (!options.contains("distance")) + throw std::runtime_error( + "[repetition] distance not provided. distance must be provided via " + "qec::get_code(..., options) options map."); + distance = options.get("distance"); + + // fill the operations + operation_encodings.insert( + std::make_pair(operation::stabilizer_round, stabilizer)); + operation_encodings.insert(std::make_pair(operation::x, x)); + operation_encodings.insert(std::make_pair(operation::prep0, prep0)); + operation_encodings.insert(std::make_pair(operation::prep1, prep1)); + + // Default Stabilizers should be Zi-1 Zi + for (std::size_t i = 1; i < get_num_data_qubits(); i++) { + m_stabilizers.push_back(cudaq::spin::i(get_num_data_qubits() - 1) * + cudaq::spin::z(i - 1) * cudaq::spin::z(i)); + } + + // Default Logical Observable is ZI...I + // This class is only for Z basis experiments + // so there is no X observable included. + cudaq::spin_op Lz = cudaq::spin::z(0); + Lz = Lz * cudaq::spin::i(get_num_data_qubits() - 1); + + m_pauli_observables.push_back(Lz); +} + +/// @brief Register the repetition code type +CUDAQ_REGISTER_TYPE(repetition) + +} // namespace cudaq::qec::repetition diff --git a/libs/qec/lib/codes/repetition_device.cpp b/libs/qec/lib/codes/repetition_device.cpp new file mode 100644 index 0000000..5a58e69 --- /dev/null +++ b/libs/qec/lib/codes/repetition_device.cpp @@ -0,0 +1,43 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/codes/repetition.h" + +namespace cudaq::qec::repetition { + +__qpu__ void x(patch logicalQubit) { x(logicalQubit.data); } + +__qpu__ void prep0(patch logicalQubit) { + for (std::size_t i = 0; i < logicalQubit.data.size(); i++) + reset(logicalQubit.data[i]); +} + +__qpu__ void prep1(patch logicalQubit) { + prep0(logicalQubit); + x(logicalQubit.data); +} + +__qpu__ std::vector +stabilizer(patch logicalQubit, const std::vector &x_stabilizers, + const std::vector &z_stabilizers) { + + // cnot between every data qubit + for (std::size_t i = 0; i < logicalQubit.ancz.size(); i++) + cudaq::x(logicalQubit.data[i], logicalQubit.ancz[i]); + + for (std::size_t i = 1; i < logicalQubit.data.size(); i++) + cudaq::x(logicalQubit.data[i], logicalQubit.ancz[i - 1]); + + auto results = mz(logicalQubit.ancz); + + for (std::size_t i = 0; i < logicalQubit.ancz.size(); i++) + reset(logicalQubit.ancz[i]); + + return results; +} + +} // namespace cudaq::qec::repetition diff --git a/libs/qec/lib/codes/steane.cpp b/libs/qec/lib/codes/steane.cpp new file mode 100644 index 0000000..8359d31 --- /dev/null +++ b/libs/qec/lib/codes/steane.cpp @@ -0,0 +1,38 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/codes/steane.h" + +using cudaq::qec::operation; + +namespace cudaq::qec::steane { + +steane::steane(const heterogeneous_map &options) : code() { + operation_encodings.insert(std::make_pair(operation::x, x)); + operation_encodings.insert(std::make_pair(operation::y, y)); + operation_encodings.insert(std::make_pair(operation::z, z)); + operation_encodings.insert(std::make_pair(operation::h, h)); + operation_encodings.insert(std::make_pair(operation::s, s)); + operation_encodings.insert(std::make_pair(operation::cx, cx)); + operation_encodings.insert(std::make_pair(operation::cy, cy)); + operation_encodings.insert(std::make_pair(operation::cz, cz)); + operation_encodings.insert( + std::make_pair(operation::stabilizer_round, stabilizer)); + operation_encodings.insert(std::make_pair(operation::prep0, prep0)); + operation_encodings.insert(std::make_pair(operation::prep1, prep1)); + operation_encodings.insert(std::make_pair(operation::prepp, prepp)); + operation_encodings.insert(std::make_pair(operation::prepm, prepm)); + + m_stabilizers = fromPauliWords( + {"XXXXIII", "IXXIXXI", "IIXXIXX", "ZZZZIII", "IZZIZZI", "IIZZIZZ"}); + m_pauli_observables = fromPauliWords({"IIIIXXX", "IIIIZZZ"}); +} + +/// @brief Register the Steane code type +CUDAQ_REGISTER_TYPE(steane) + +} // namespace cudaq::qec::steane diff --git a/libs/qec/lib/codes/steane_device.cpp b/libs/qec/lib/codes/steane_device.cpp new file mode 100644 index 0000000..6428165 --- /dev/null +++ b/libs/qec/lib/codes/steane_device.cpp @@ -0,0 +1,101 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq.h" +#include "cudaq/qec/patch.h" + +// using qec::patch; + +namespace cudaq::qec::steane { + +__qpu__ void x(patch logicalQubit) { + x(logicalQubit.data[4], logicalQubit.data[5], logicalQubit.data[6]); +} +__qpu__ void y(patch logicalQubit) { y(logicalQubit.data); } +__qpu__ void z(patch logicalQubit) { + z(logicalQubit.data[4], logicalQubit.data[5], logicalQubit.data[6]); +} +__qpu__ void h(patch logicalQubit) { h(logicalQubit.data); } +__qpu__ void s(patch logicalQubit) { s(logicalQubit.data); } + +__qpu__ void cx(patch logicalQubitA, patch logicalQubitB) { + for (std::size_t i = 0; i < 7; i++) { + x(logicalQubitA.data[i], logicalQubitB.data[i]); + } +} + +__qpu__ void cy(patch logicalQubitA, patch logicalQubitB) { + for (std::size_t i = 0; i < 7; i++) { + y(logicalQubitA.data[i], logicalQubitB.data[i]); + } +} + +__qpu__ void cz(patch logicalQubitA, patch logicalQubitB) { + for (std::size_t i = 0; i < 7; i++) { + z(logicalQubitA.data[i], logicalQubitB.data[i]); + } +} + +__qpu__ void prep0(patch logicalQubit) { + h(logicalQubit.data[0], logicalQubit.data[4], logicalQubit.data[6]); + x(logicalQubit.data[0], logicalQubit.data[1]); + x(logicalQubit.data[4], logicalQubit.data[5]); + x(logicalQubit.data[6], logicalQubit.data[3]); + x(logicalQubit.data[6], logicalQubit.data[5]); + x(logicalQubit.data[4], logicalQubit.data[2]); + x(logicalQubit.data[0], logicalQubit.data[3]); + x(logicalQubit.data[4], logicalQubit.data[1]); + x(logicalQubit.data[3], logicalQubit.data[2]); +} + +__qpu__ void prep1(patch logicalQubit) { + prep0(logicalQubit); + x(logicalQubit.data); +} + +__qpu__ void prepp(patch logicalQubit) { + prep0(logicalQubit); + h(logicalQubit.data); +} + +__qpu__ void prepm(patch logicalQubit) { + prep0(logicalQubit); + x(logicalQubit.data); + h(logicalQubit.data); +} + +__qpu__ std::vector +stabilizer(patch logicalQubit, const std::vector &x_stabilizers, + const std::vector &z_stabilizers) { + h(logicalQubit.ancx); + for (std::size_t xi = 0; xi < logicalQubit.ancx.size(); ++xi) + for (std::size_t di = 0; di < logicalQubit.data.size(); ++di) + if (x_stabilizers[xi * logicalQubit.data.size() + di] == 1) + cudaq::x(logicalQubit.ancx[xi], logicalQubit.data[di]); + h(logicalQubit.ancx); + + // Now apply z_stabilizer circuit + for (size_t zi = 0; zi < logicalQubit.ancz.size(); ++zi) + for (size_t di = 0; di < logicalQubit.data.size(); ++di) + if (z_stabilizers[zi * logicalQubit.data.size() + di] == 1) + cudaq::x(logicalQubit.data[di], logicalQubit.ancz[zi]); + + // S = (S_X, S_Z), (x flip syndromes, z flip syndrones). + // x flips are triggered by z-stabilizers (ancz) + // z flips are triggered by x-stabilizers (ancx) + auto results = mz(logicalQubit.ancz, logicalQubit.ancx); + + for (std::size_t i = 0; i < logicalQubit.ancx.size(); i++) + reset(logicalQubit.ancx[i]); + for (std::size_t i = 0; i < logicalQubit.ancz.size(); i++) + reset(logicalQubit.ancz[i]); + + return results; +} + +} // namespace cudaq::qec::steane diff --git a/libs/qec/lib/decoder.cpp b/libs/qec/lib/decoder.cpp new file mode 100644 index 0000000..9da1a07 --- /dev/null +++ b/libs/qec/lib/decoder.cpp @@ -0,0 +1,73 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/qec/decoder.h" +#include +#include + +INSTANTIATE_REGISTRY(cudaq::qec::decoder, const cudaqx::tensor &) +INSTANTIATE_REGISTRY(cudaq::qec::decoder, const cudaqx::tensor &, + const cudaqx::heterogeneous_map &) + +namespace cudaq::qec { + +decoder::decoder(const cudaqx::tensor &H) : H(H) { + const auto H_shape = H.shape(); + assert(H_shape.size() == 2 && "H tensor must be of rank 2"); + syndrome_size = H_shape[0]; + block_size = H_shape[1]; +} + +// Provide a trivial implementation of for tensor decode call. Child +// classes should override this if they never want to pass through floats. +decoder_result decoder::decode(const cudaqx::tensor &syndrome) { + // Check tensor is of order-1 + // If order >1, we could check that other modes are of dim = 1 such that + // n x 1, or 1 x n tensors are still valid. + if (syndrome.rank() != 1) { + throw std::runtime_error("Decode requires rank-1 tensors"); + } + std::vector soft_syndrome(syndrome.shape()[0]); + std::vector vec_cast(syndrome.data(), + syndrome.data() + syndrome.shape()[0]); + convert_vec_hard_to_soft(vec_cast, soft_syndrome); + return decode(soft_syndrome); +} + +// Provide a trivial implementation of the multi-syndrome decoder. Child classes +// should override this if they can do it more efficiently than this. +std::vector +decoder::decode_multi(const std::vector> &syndrome) { + std::vector result; + result.reserve(syndrome.size()); + for (auto &s : syndrome) + result.push_back(decode(s)); + return result; +} + +std::future +decoder::decode_async(const std::vector &syndrome) { + return std::async(std::launch::async, [&] { return this->decode(syndrome); }); +} + +std::unique_ptr +decoder::get(const std::string &name, const cudaqx::tensor &H, + const cudaqx::heterogeneous_map ¶m_map) { + auto ®istry = get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("invalid decoder requested: " + name); + return iter->second(H, param_map); +} + +std::unique_ptr get_decoder(const std::string &name, + const cudaqx::tensor &H, + const cudaqx::heterogeneous_map options) { + return decoder::get(name, H, options); +} +} // namespace cudaq::qec diff --git a/libs/qec/lib/decoders/single_error_lut.cpp b/libs/qec/lib/decoders/single_error_lut.cpp new file mode 100644 index 0000000..9af78e4 --- /dev/null +++ b/libs/qec/lib/decoders/single_error_lut.cpp @@ -0,0 +1,91 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/qec/decoder.h" +#include +#include +#include + +namespace cudaq::qec { + +/// @brief This is a simple LUT (LookUp Table) decoder that demonstrates how to +/// build a simple decoder that can correctly decode errors during a single bit +/// flip in the block. +class single_error_lut : public decoder { +private: + std::map single_qubit_err_signatures; + +public: + single_error_lut(const cudaqx::tensor &H, + const cudaqx::heterogeneous_map ¶ms) + : decoder(H) { + // Decoder-specific constructor arguments can be placed in `params`. + + // Build a lookup table for an error on each possible qubit + + // For each qubit with a possible error, calculate an error signature. + for (std::size_t qErr = 0; qErr < block_size; qErr++) { + std::string err_sig(syndrome_size, '0'); + for (std::size_t r = 0; r < syndrome_size; r++) { + bool syndrome = 0; + // Toggle syndrome on every "1" entry in the row. + // Except if there is an error on this qubit (c == qErr). + for (std::size_t c = 0; c < block_size; c++) + syndrome ^= (c != qErr) && H.at({r, c}); + err_sig[r] = syndrome ? '1' : '0'; + } + // printf("Adding err_sig=%s for qErr=%lu\n", err_sig.c_str(), qErr); + single_qubit_err_signatures.insert({err_sig, qErr}); + } + } + + virtual decoder_result decode(const std::vector &syndrome) { + // This is a simple decoder that simply results + decoder_result result{false, std::vector(block_size, 0.0)}; + + // Convert syndrome to a string + std::string syndrome_str(syndrome.size(), '0'); + assert(syndrome_str.length() == syndrome_size); + bool anyErrors = false; + for (std::size_t i = 0; i < syndrome_size; i++) { + if (syndrome[i] >= 0.5) { + syndrome_str[i] = '1'; + anyErrors = true; + } + } + + if (!anyErrors) { + result.converged = true; + return result; + } + + auto it = single_qubit_err_signatures.find(syndrome_str); + if (it != single_qubit_err_signatures.end()) { + assert(it->second < block_size); + result.converged = true; + result.result[it->second] = 1.0; + } else { + // Leave result.converged set to false. + } + + return result; + } + + virtual ~single_error_lut() {} + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + single_error_lut, static std::unique_ptr create( + const cudaqx::tensor &H, + const cudaqx::heterogeneous_map ¶ms) { + return std::make_unique(H, params); + }) +}; + +CUDAQ_REGISTER_TYPE(single_error_lut) + +} // namespace cudaq::qec diff --git a/libs/qec/lib/device/CMakeLists.txt b/libs/qec/lib/device/CMakeLists.txt new file mode 100644 index 0000000..2222458 --- /dev/null +++ b/libs/qec/lib/device/CMakeLists.txt @@ -0,0 +1,12 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +cudaqx_add_device_code(cudaq-qec + SOURCES + memory_circuit.cpp +) diff --git a/libs/qec/lib/device/memory_circuit.cpp b/libs/qec/lib/device/memory_circuit.cpp new file mode 100644 index 0000000..1fd57e4 --- /dev/null +++ b/libs/qec/lib/device/memory_circuit.cpp @@ -0,0 +1,120 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "memory_circuit.h" + +namespace cudaq::qec { + +static std::unique_ptr> rawAncillaMeasurements; +static std::unique_ptr> rawDataMeasurements; + +void persistDataMeasures(uint8_t *measures, std::size_t size) { + if (!rawDataMeasurements) + rawDataMeasurements = std::make_unique>(); + + auto &store = *rawDataMeasurements; + store.insert(store.end(), measures, measures + size); +} + +void persistAncillaMeasures(uint8_t *measures, std::size_t size) { + if (!rawAncillaMeasurements) + rawAncillaMeasurements = std::make_unique>(); + + auto &store = *rawAncillaMeasurements; + store.insert(store.end(), measures, measures + size); +} + +std::vector &getMemoryCircuitAncillaMeasurements() { + return *rawAncillaMeasurements.get(); +} + +std::vector &getMemoryCircuitDataMeasurements() { + return *rawDataMeasurements.get(); +} + +void clearRawMeasurements() { + (*rawDataMeasurements).clear(); + (*rawAncillaMeasurements).clear(); +} + +__qpu__ void __memory_circuit_stabs( + cudaq::qview<> data, cudaq::qview<> xstab_anc, cudaq::qview<> zstab_anc, + const code::stabilizer_round &stabilizer_round, + const code::one_qubit_encoding &statePrep, std::size_t numRounds, + const std::vector &x_stabilizers, + const std::vector &z_stabilizers) { + // Create the logical patch + patch logical(data, xstab_anc, zstab_anc); + + // Prepare the initial state fault tolerantly + statePrep({data, xstab_anc, zstab_anc}); + + // Generate syndrome data + size_t counter = 0; + std::vector measureInts((xstab_anc.size() + zstab_anc.size()) * + numRounds); + for (std::size_t round = 0; round < numRounds; round++) { + // Run the stabilizer round, generate the syndrome measurements + auto syndrome = stabilizer_round(logical, x_stabilizers, z_stabilizers); + + // Convert to integers for easy passing + for (size_t i = 0; i < syndrome.size(); i++) { + measureInts[counter] = syndrome[i]; + counter++; + } + } + + // Store the ancillas for analysis / decoding + persistAncillaMeasures(measureInts.data(), measureInts.size()); +} + +__qpu__ void memory_circuit_mz(const code::stabilizer_round &stabilizer_round, + const code::one_qubit_encoding &statePrep, + std::size_t numData, std::size_t numAncx, + std::size_t numAncz, std::size_t numRounds, + const std::vector &x_stabilizers, + const std::vector &z_stabilizers) { + + // Allocate the data and ancilla qubits + cudaq::qvector data(numData), xstab_anc(numAncx), zstab_anc(numAncz); + + // Persists ancilla measures + __memory_circuit_stabs(data, xstab_anc, zstab_anc, stabilizer_round, + statePrep, numRounds, x_stabilizers, z_stabilizers); + + auto dataResults = mz(data); + std::vector dataInts(numData); + for (size_t i = 0; i < numData; i++) + dataInts[i] = dataResults[i]; + + persistDataMeasures(dataInts.data(), numData); +} + +__qpu__ void memory_circuit_mx(const code::stabilizer_round &stabilizer_round, + const code::one_qubit_encoding &statePrep, + std::size_t numData, std::size_t numAncx, + std::size_t numAncz, std::size_t numRounds, + const std::vector &x_stabilizers, + const std::vector &z_stabilizers) { + + // Allocate the data and ancilla qubits + cudaq::qvector data(numData), xstab_anc(numAncx), zstab_anc(numAncz); + + // Persists ancilla measures + __memory_circuit_stabs(data, xstab_anc, zstab_anc, stabilizer_round, + statePrep, numRounds, x_stabilizers, z_stabilizers); + + h(data); + auto dataResults = mz(data); + std::vector dataInts(numData); + for (size_t i = 0; i < numData; i++) + dataInts[i] = dataResults[i]; + + persistDataMeasures(dataInts.data(), numData); +} + +} // namespace cudaq::qec diff --git a/libs/qec/lib/device/memory_circuit.h b/libs/qec/lib/device/memory_circuit.h new file mode 100644 index 0000000..c197eed --- /dev/null +++ b/libs/qec/lib/device/memory_circuit.h @@ -0,0 +1,66 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq.h" +#include "cudaq/qec/code.h" + +namespace cudaq::qec { + +/// @brief Get a reference to the raw measurements from the memory circuit +/// execution +/// @return Reference to the vector of integers storing syndrome information +std::vector &getMemoryCircuitMeasurements(); + +std::vector &getMemoryCircuitAncillaMeasurements(); + +std::vector &getMemoryCircuitDataMeasurements(); + +void clearRawMeasurements(); + +/// \entry_point_kernel +/// +/// @brief Execute a memory circuit for quantum error correction, mz on data +/// qubits +/// @param stabilizer_round Function pointer to the stabilizer round +/// implementation +/// @param statePrep Function pointer to the state preparation implementation +/// @param numData Number of data qubits in the code +/// @param numAncx Number of ancilla x qubits in the code +/// @param numAncz Number of ancilla z qubits in the code +/// @param numRounds Number of rounds to execute the memory circuit +/// @param x_stabilizers Vector of indices for X stabilizers +/// @param z_stabilizers Vector of indices for Z stabilizers +__qpu__ void memory_circuit_mz(const code::stabilizer_round &stabilizer_round, + const code::one_qubit_encoding &statePrep, + std::size_t numData, std::size_t numAncx, + std::size_t numAncz, std::size_t numRounds, + const std::vector &x_stabilizers, + const std::vector &z_stabilizers); +/// \entry_point_kernel +/// +/// @brief Execute a memory circuit for quantum error correction, mx on data +/// qubits +/// @param stabilizer_round Function pointer to the stabilizer round +/// implementation +/// @param statePrep Function pointer to the state preparation implementation +/// @param numData Number of data qubits in the code +/// @param numAncx Number of ancilla x qubits in the code +/// @param numAncz Number of ancilla z qubits in the code +/// @param numRounds Number of rounds to execute the memory circuit +/// @param x_stabilizers Vector of indices for X stabilizers +/// @param z_stabilizers Vector of indices for Z stabilizers +__qpu__ void memory_circuit_mx(const code::stabilizer_round &stabilizer_round, + const code::one_qubit_encoding &statePrep, + std::size_t numData, std::size_t numAncx, + std::size_t numAncz, std::size_t numRounds, + const std::vector &x_stabilizers, + const std::vector &z_stabilizers); + +} // namespace cudaq::qec diff --git a/libs/qec/lib/experiments.cpp b/libs/qec/lib/experiments.cpp new file mode 100644 index 0000000..8167f1b --- /dev/null +++ b/libs/qec/lib/experiments.cpp @@ -0,0 +1,198 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/experiments.h" + +#include "device/memory_circuit.h" + +using namespace cudaqx; + +namespace cudaq::qec { + +namespace details { +auto __sample_code_capacity(const cudaqx::tensor &H, + std::size_t nShots, double error_probability, + unsigned seed) { + // init RNG + std::mt19937 rng(seed); + std::bernoulli_distribution dist(error_probability); + + // Each row is a shot + // Each row elem is a 1 if error, 0 else. + cudaqx::tensor data({nShots, H.shape()[1]}); + cudaqx::tensor syndromes({nShots, H.shape()[0]}); + + std::vector bits(nShots * H.shape()[1]); + std::generate(bits.begin(), bits.end(), [&]() { return dist(rng); }); + + data.copy(bits.data(), data.shape()); + + // Syn = D * H^T + // [n,s] = [n,d]*[d,s] + syndromes = data.dot(H.transpose()) % 2; + + return std::make_tuple(syndromes, data); +} +} // namespace details + +// Single shot version +cudaqx::tensor generate_random_bit_flips(size_t numBits, + double error_probability) { + // init RNG + std::random_device rd; + std::mt19937 rng(rd()); + std::bernoulli_distribution dist(error_probability); + + // Each row is a shot + // Each row elem is a 1 if error, 0 else. + cudaqx::tensor data({numBits}); + std::vector bits(numBits); + std::generate(bits.begin(), bits.end(), [&]() { return dist(rng); }); + + data.copy(bits.data(), data.shape()); + return data; +} + +std::tuple, cudaqx::tensor> +sample_code_capacity(const cudaqx::tensor &H, std::size_t nShots, + double error_probability, unsigned seed) { + return details::__sample_code_capacity(H, nShots, error_probability, seed); +} + +std::tuple, cudaqx::tensor> +sample_code_capacity(const cudaqx::tensor &H, std::size_t nShots, + double error_probability) { + return details::__sample_code_capacity(H, nShots, error_probability, + std::random_device()()); +} + +std::tuple, cudaqx::tensor> +sample_code_capacity(const code &code, std::size_t nShots, + double error_probability) { + return sample_code_capacity(code.get_parity(), nShots, error_probability); +} + +std::tuple, cudaqx::tensor> +sample_code_capacity(const code &code, std::size_t nShots, + double error_probability, unsigned seed) { + return sample_code_capacity(code.get_parity(), nShots, error_probability, + seed); +} + +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, operation statePrep, + std::size_t numShots, std::size_t numRounds, + cudaq::noise_model &noise) { + if (!code.contains_operation(statePrep)) + throw std::runtime_error( + "sample_memory_circuit_error - requested state prep kernel not found."); + + auto &prep = code.get_operation(statePrep); + + if (!code.contains_operation(operation::stabilizer_round)) + throw std::runtime_error("sample_memory_circuit error - no stabilizer " + "round kernel for this code."); + + auto &stabRound = + code.get_operation(operation::stabilizer_round); + + cudaq::ExecutionContext ctx(""); + ctx.noiseModel = &noise; + auto &platform = cudaq::get_platform(); + + auto parity_x = code.get_parity_x(); + auto parity_z = code.get_parity_z(); + auto numData = code.get_num_data_qubits(); + auto numAncx = code.get_num_ancilla_x_qubits(); + auto numAncz = code.get_num_ancilla_z_qubits(); + + std::vector xVec(parity_x.data(), + parity_x.data() + parity_x.size()); + std::vector zVec(parity_z.data(), + parity_z.data() + parity_z.size()); + + std::size_t numRows = numShots * numRounds; + std::size_t numCols = numAncx + numAncz; + + // Allocate the tensor data for the syndromes and data. + cudaqx::tensor syndromeTensor({numShots * (numRounds - 1), numCols}); + cudaqx::tensor dataResults({numShots, numData}); + + // Run the memory circuit experiment + if (statePrep == operation::prep0 || statePrep == operation::prep1) { + // run z basis + for (std::size_t shot = 0; shot < numShots; shot++) { + platform.set_exec_ctx(&ctx); + memory_circuit_mz(stabRound, prep, numData, numAncx, numAncz, numRounds, + xVec, zVec); + platform.reset_exec_ctx(); + } + } else if (statePrep == operation::prepp || statePrep == operation::prepm) { + // run z basis + for (std::size_t shot = 0; shot < numShots; shot++) { + platform.set_exec_ctx(&ctx); + memory_circuit_mx(stabRound, prep, numData, numAncx, numAncz, numRounds, + xVec, zVec); + platform.reset_exec_ctx(); + } + } else { + throw std::runtime_error( + "sample_memory_circuit_error - invalid requested state prep kernel."); + } + + auto &dataMeasures = getMemoryCircuitDataMeasurements(); + dataResults.copy(dataMeasures.data()); + + // Get the raw ancilla measurments + auto &measurements = getMemoryCircuitAncillaMeasurements(); + + std::size_t numMeasRows = numShots * numRounds; + std::size_t numSyndRows = numShots * (numRounds - 1); + cudaqx::tensor measuresTensor({numMeasRows, numCols}); + measuresTensor.borrow(measurements.data()); + + // Convert to Syndromes + + // #pragma omp parallel for collapse(2) + for (std::size_t shot = 0; shot < numShots; ++shot) + for (std::size_t round = 1; round < numRounds; ++round) + for (std::size_t col = 0; col < numCols; ++col) { + std::size_t measIdx = shot * numRounds + round; + std::size_t prevMeasIdx = shot * numRounds + (round - 1); + std::size_t syndIdx = shot * (numRounds - 1) + (round - 1); + syndromeTensor.at({syndIdx, col}) = + measuresTensor.at({measIdx, col}) ^ + measuresTensor.at({prevMeasIdx, col}); + } + + clearRawMeasurements(); + + // Return the data. + return std::make_tuple(syndromeTensor, dataResults); +} + +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, operation op, std::size_t numShots, + std::size_t numRounds) { + cudaq::noise_model noise; + return sample_memory_circuit(code, op, numShots, numRounds, noise); +} + +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, std::size_t numShots, + std::size_t numRounds) { + return sample_memory_circuit(code, operation::prep0, numShots, numRounds); +} + +std::tuple, cudaqx::tensor> +sample_memory_circuit(const code &code, std::size_t numShots, + std::size_t numRounds, cudaq::noise_model &noise) { + return sample_memory_circuit(code, operation::prep0, numShots, numRounds, + noise); +} + +} // namespace cudaq::qec diff --git a/libs/qec/lib/stabilizer_utils.cpp b/libs/qec/lib/stabilizer_utils.cpp new file mode 100644 index 0000000..7f5c3b7 --- /dev/null +++ b/libs/qec/lib/stabilizer_utils.cpp @@ -0,0 +1,135 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/qec/stabilizer_utils.h" + +namespace cudaq::qec { +void sortStabilizerOps(std::vector &ops) { + // Sort the stabilizers, z first, then x + std::sort(ops.begin(), ops.end(), + [](const cudaq::spin_op &a, const cudaq::spin_op &b) { + auto astr = a.to_string(false); + auto bstr = b.to_string(false); + auto zIdxA = astr.find_first_of("Z"); + auto zIdxB = bstr.find_first_of("Z"); + if (zIdxA == std::string::npos) { + if (zIdxB != std::string::npos) + return false; + + // No Z in either, must both contain a X + auto xIdxA = astr.find_first_of("X"); + return xIdxA < bstr.find_first_of("X"); + } + + // First contains a Z + if (zIdxB == std::string::npos) + return true; + + return zIdxA < zIdxB; + }); +} + +// Need to push into the form +// H = [ H_Z | 0 ] +// [ 0 | H_X ] +cudaqx::tensor +to_parity_matrix(const std::vector &stabilizers, + stabilizer_type type) { + if (stabilizers.empty()) + return cudaqx::tensor(); + + sortStabilizerOps(const_cast &>(stabilizers)); + + if (type == stabilizer_type::XZ) { + auto numQubits = stabilizers[0].num_qubits(); + cudaqx::tensor t({stabilizers.size(), 2 * numQubits}); + // Start by counting the number of Z spin_ops + std::size_t numZRows = 0; + for (auto &op : stabilizers) + if (op.to_string(false).find("Z") != std::string::npos) + numZRows++; + else + break; + + // Need to shift Z bits left + for (std::size_t row = 0; row < numZRows; row++) { + for (std::size_t i = numQubits; i < 2 * numQubits; i++) { + if (stabilizers[row].get_raw_data().first[0][i]) + t.at({row, i - numQubits}) = 1; + } + } + + auto numXRows = stabilizers.size() - numZRows; + + for (std::size_t row = 0; row < numXRows; row++) { + for (std::size_t i = 0; i < numQubits; i++) { + if (stabilizers[numZRows + row].get_raw_data().first[0][i]) + t.at({numZRows + row, i + numQubits}) = 1; + } + } + + return t; + } + + if (type == stabilizer_type::Z) { + auto numQubits = stabilizers[0].num_qubits(); + // Start by counting the number of Z spin_ops + std::size_t numZRows = 0; + for (auto &op : stabilizers) + if (op.to_string(false).find("Z") != std::string::npos) + numZRows++; + else + break; + + if (numZRows == 0) + return cudaqx::tensor(); + + cudaqx::tensor ret({numZRows, numQubits}); + for (std::size_t row = 0; row < numZRows; row++) { + for (std::size_t i = numQubits; i < 2 * numQubits; i++) { + if (stabilizers[row].get_raw_data().first[0][i]) + ret.at({row, i - numQubits}) = 1; + } + } + + return ret; + } + + auto numQubits = stabilizers[0].num_qubits(); + // Start by counting the number of Z spin_ops + std::size_t numZRows = 0; + for (auto &op : stabilizers) + if (op.to_string(false).find("Z") != std::string::npos) + numZRows++; + else + break; + + auto numXRows = stabilizers.size() - numZRows; + + if (numXRows == 0) + return cudaqx::tensor(); + + cudaqx::tensor ret({numXRows, numQubits}); + for (std::size_t row = 0; row < numXRows; row++) { + for (std::size_t i = 0; i < numQubits; i++) { + if (stabilizers[numZRows + row].get_raw_data().first[0][i]) + ret.at({row, i}) = 1; + } + } + + return ret; +} + +cudaqx::tensor to_parity_matrix(const std::vector &words, + stabilizer_type type) { + + std::vector ops; + for (auto &os : words) + ops.emplace_back(cudaq::spin_op::from_word(os)); + return to_parity_matrix(ops, type); +} +} // namespace cudaq::qec diff --git a/libs/qec/pyproject.toml b/libs/qec/pyproject.toml new file mode 100644 index 0000000..1c37ac5 --- /dev/null +++ b/libs/qec/pyproject.toml @@ -0,0 +1,47 @@ +[build-system] +requires = ["scikit-build-core>=0.10"] +build-backend = "scikit_build_core.build" + +[project] +name = "cudaq-qec" +version = "0.1.0" +description = "Accelerated libraries for Quantum Error Correction built on CUDA-Q" +authors = [{name = "NVIDIA Corporation & Affiliates"}] +maintainers = [{name = "NVIDIA Corporation & Affiliates"}] +requires-python = ">=3.10" +readme = "README.md" +dependencies = [ + 'cuda-quantum-cu12 ~= 0.9.0', +] +classifiers = [ + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + "Environment :: GPU :: NVIDIA CUDA", + "Environment :: GPU :: NVIDIA CUDA :: 12", + 'Topic :: Software Development', + 'Topic :: Scientific/Engineering', +] + +[project.urls] +Homepage = "https://nvidia.github.io/cudaqx" +Documentation = "https://nvidia.github.io/cudaqx/components/qec/introduction.html" +Repository = "https://github.com/NVIDIA/cudaqx" + +[tool.scikit-build] +build-dir = "_skbuild" +build.verbose = true +cmake.version = ">=3.28" +cmake.build-type = "Release" +install.components = ["qec-python", "qec-lib"] +wheel.packages = [] +logging.level = "DEBUG" +ninja.version = ">=1.10" + +[tool.scikit-build.cmake.define] +CUDAQX_QEC_INCLUDE_TESTS = false +CUDAQX_QEC_BINDINGS_PYTHON = true diff --git a/libs/qec/python/CMakeLists.txt b/libs/qec/python/CMakeLists.txt new file mode 100644 index 0000000..6927af9 --- /dev/null +++ b/libs/qec/python/CMakeLists.txt @@ -0,0 +1,87 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# External Dependencies +# ============================================================================== + +FetchContent_Declare( + pybind11 + GIT_REPOSITORY https://github.com/pybind/pybind11 + GIT_TAG v2.13 + EXCLUDE_FROM_ALL +) +FetchContent_MakeAvailable(pybind11) + +find_package(Python COMPONENTS Interpreter) + +# ============================================================================== + +set(MODULE_NAME _pycudaqx_qec_the_suffix_matters_cudaq_qec) + +cudaqx_add_pymodule(${MODULE_NAME} + bindings/cudaqx_qec.cpp + bindings/py_code.cpp + bindings/py_decoder.cpp +) + +target_link_libraries(${MODULE_NAME} + PRIVATE + cudaq-qec + cudaq::cudaq +) + +set_target_properties(${MODULE_NAME} PROPERTIES + LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/python/cudaq_qec") + +if (NOT SKBUILD) + add_custom_target( + copy_qec_python_files ALL + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_SOURCE_DIR}/cudaq_qec + ${CMAKE_BINARY_DIR}/python/cudaq_qec + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/cudaq_qec + COMMENT "Copying Python files to binary directory" + ) + + add_dependencies(${MODULE_NAME} copy_qec_python_files) +endif() + +# RPATH configuration +# ============================================================================== + +if (NOT SKBUILD) + set_target_properties(${MODULE_NAME} PROPERTIES + BUILD_RPATH "$ORIGIN" + INSTALL_RPATH "$ORIGIN/../${CMAKE_INSTALL_LIBDIR}" + ) + + # Let CMake automatically add paths of linked libraries to the RPATH: + set_target_properties(${MODULE_NAME} PROPERTIES + INSTALL_RPATH_USE_LINK_PATH TRUE + ) +else() + # CUDA-Q install its libraries in site-packages/lib (or dist-packages/lib) + # Thus, we need the $ORIGIN/../lib + set_target_properties(${MODULE_NAME} PROPERTIES + INSTALL_RPATH "$ORIGIN/lib:$ORIGIN/../lib" + ) +endif() + +# Install +# ============================================================================== + +install(DIRECTORY cudaq_qec + COMPONENT qec-python + DESTINATION . +) + +install(TARGETS ${MODULE_NAME} + COMPONENT qec-python + DESTINATION cudaq_qec/ +) + diff --git a/libs/qec/python/bindings/cudaqx_qec.cpp b/libs/qec/python/bindings/cudaqx_qec.cpp new file mode 100644 index 0000000..7a2270b --- /dev/null +++ b/libs/qec/python/bindings/cudaqx_qec.cpp @@ -0,0 +1,19 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "py_code.h" +#include "py_decoder.h" + +#include +#include + +PYBIND11_MODULE(_pycudaqx_qec_the_suffix_matters_cudaq_qec, mod) { + mod.doc() = "Python bindings for the CUDA-Q QEC Libraries."; + cudaq::qec::bindCode(mod); + cudaq::qec::bindDecoder(mod); +} diff --git a/libs/qec/python/bindings/py_code.cpp b/libs/qec/python/bindings/py_code.cpp new file mode 100644 index 0000000..200ee2d --- /dev/null +++ b/libs/qec/python/bindings/py_code.cpp @@ -0,0 +1,452 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include +#include +#include +#include +#include + +#include "common/Logger.h" +#include "cudaq/python/PythonCppInterop.h" +#include "cudaq/utils/registry.h" + +#include "cudaq/qec/experiments.h" + +#include "type_casters.h" +#include "utils.h" + +namespace py = pybind11; +using namespace cudaqx; + +namespace cudaq::qec { + +class PyCode : public qec::code { +protected: + // Trampoline methods for pure virtual functions + std::size_t get_num_data_qubits() const override { + PYBIND11_OVERRIDE_PURE(std::size_t, qec::code, get_num_data_qubits); + } + + std::size_t get_num_ancilla_qubits() const override { + PYBIND11_OVERRIDE_PURE(std::size_t, qec::code, get_num_ancilla_qubits); + } + + std::size_t get_num_ancilla_x_qubits() const override { + PYBIND11_OVERRIDE_PURE(std::size_t, qec::code, get_num_ancilla_x_qubits); + } + + std::size_t get_num_ancilla_z_qubits() const override { + PYBIND11_OVERRIDE_PURE(std::size_t, qec::code, get_num_ancilla_z_qubits); + } +}; + +/// @brief A wrapper class that handles Python-defined quantum error correction +/// codes +/// @details This class serves as a bridge between Python-defined QEC codes and +/// the C++ implementation, managing the conversion of Python QEC code +/// definitions to their C++ counterparts. +class PyCodeHandle : public qec::code { +protected: + /// @brief Python object representing the registered QEC code + py::object pyCode; + +public: + /// @brief Constructs a PyCodeHandle from a Python QEC code object + /// @param registeredCode Python object containing the QEC code definition + /// @throw std::runtime_error if the Python code lacks required attributes + /// (stabilizers or operation_encodings) + /// @details Initializes the handle by: + /// - Validating the presence of required attributes + /// - Converting Python stabilizers to C++ representation + /// - Processing operation encodings and registering CUDA-Q kernels + PyCodeHandle(py::object registeredCode) : pyCode(registeredCode) { + if (!py::hasattr(registeredCode, "stabilizers")) + throw std::runtime_error( + "Invalid Python QEC Code. Must have self.stabilizers = " + "qec.Stabilizers(...). Please provide the stabilizers."); + if (!py::hasattr(registeredCode, "operation_encodings")) + throw std::runtime_error( + "Invalid Python QEC Code. Must have self.operation_encodings = " + "{...}. Please provide the CUDA-Q kernels for the operation " + "encodings."); + + // Get the stabilizers + m_stabilizers = + registeredCode.attr("stabilizers").cast>(); + // Get the CUDA-Q kernels for the operation encodings + auto opsDict = registeredCode.attr("operation_encodings").cast(); + + // For each CUDA-Q kernel, extract the JIT-ed function pointer + for (auto &[opKey, kernelHandle] : opsDict) { + py::object kernel = py::cast(kernelHandle); + auto opKeyEnum = opKey.cast(); + + // Create the kernel interop object + cudaq::python::CppPyKernelDecorator opInterop(kernel); + opInterop.compile(); + + // Get the kernel name + auto baseName = kernelHandle.attr("name").cast(); + std::string kernelName = "__nvqpp__mlirgen__" + baseName; + + // Extract teh function pointer, register with qkernel system + auto capsule = kernel.attr("extract_c_function_pointer")(kernelName) + .cast(); + void *ptr = capsule; + cudaq::registry::__cudaq_registerLinkableKernel(ptr, baseName.c_str(), + ptr); + + // Make sure we cast the function pointer correctly + if (opKeyEnum == operation::stabilizer_round) { + auto *casted = reinterpret_cast (*)( + patch, const std::vector &, + const std::vector &)>(ptr); + operation_encodings.insert( + {opKeyEnum, cudaq::qkernel( + patch, const std::vector &, + const std::vector &)>(casted)}); + continue; + } + + // FIXME handle other signatures later... this assumes single patch + // signatures + auto *casted = reinterpret_cast(ptr); + operation_encodings.insert( + {opKeyEnum, cudaq::qkernel(casted)}); + } + } + +protected: + // Trampoline methods for pure virtual functions + std::size_t get_num_data_qubits() const override { + return pyCode.attr("get_num_data_qubits")().cast(); + } + + std::size_t get_num_ancilla_qubits() const override { + return pyCode.attr("get_num_ancilla_qubits")().cast(); + } + + std::size_t get_num_ancilla_x_qubits() const override { + return pyCode.attr("get_num_ancilla_x_qubits")().cast(); + } + + std::size_t get_num_ancilla_z_qubits() const override { + return pyCode.attr("get_num_ancilla_z_qubits")().cast(); + } +}; + +// Registry to store code factory functions +class PyCodeRegistry { +private: + static std::unordered_map> + registry; + +public: + static void register_code(const std::string &name, + std::function factory) { + cudaq::info("Registering Pythonic QEC Code with name {}", name); + registry[name] = factory; + } + + static py::object get_code(const std::string &name, py::kwargs options) { + auto it = registry.find(name); + if (it == registry.end()) { + throw std::runtime_error("Unknown code: " + name); + } + + // Constructs the Python type with kwargs... + return it->second(options); + } + + static bool contains(const std::string &name) { + return registry.find(name) != registry.end(); + } +}; + +std::unordered_map> + PyCodeRegistry::registry; + +template +auto copyCUDAQXTensorToPyArray(const cudaqx::tensor &tensor) { + auto shape = tensor.shape(); + auto rows = shape[0]; + auto cols = shape[1]; + size_t total_size = rows * cols; + + // Allocate new memory and copy the data + T *data_copy = new T[total_size]; + std::memcpy(data_copy, tensor.data(), total_size * sizeof(T)); + + // Create a NumPy array using the buffer protocol + return py::array_t( + {rows, cols}, // Shape of the array + {cols * sizeof(T), sizeof(T)}, // Strides for row-major layout + data_copy, // Pointer to the data + py::capsule(data_copy, [](void *p) { delete[] static_cast(p); })); +} + +template +auto copy1DCUDAQXTensorToPyArray(const cudaqx::tensor &tensor) { + auto shape = tensor.shape(); + auto rows = shape[0]; + size_t total_size = rows; + + // Allocate new memory and copy the data + T *data_copy = new T[total_size]; + std::memcpy(data_copy, tensor.data(), total_size * sizeof(T)); + + // Create a NumPy array using the buffer protocol + return py::array_t( + {static_cast(rows)}, // Shape of the array + data_copy, // Pointer to the data + py::capsule(data_copy, [](void *p) { delete[] static_cast(p); })); +} + +void bindCode(py::module &mod) { + + auto qecmod = py::hasattr(mod, "qecrt") + ? mod.attr("qecrt").cast() + : mod.def_submodule("qecrt"); + + py::class_( + qecmod, "TwoQubitDepolarization", + R"#(Models the decoherence of the each qubit independently in a two-qubit operation into a mixture " + of the computational basis states, `|0>` and `|1>`.)#") + .def(py::init(), py::arg("probability"), + "Initialize the `TwoQubitDepolarizationChannel` with the provided " + "`probability`."); + + py::class_( + qecmod, "TwoQubitBitFlip", + R"#(Models independent bit flip errors after a two-qubit operation.)#") + .def(py::init(), py::arg("probability"), + "Initialize the `TwoQubitBitFlip` with the provided " + "`probability`."); + + py::enum_( + qecmod, "operation", + "Enumeration of quantum operations for state preparation") + .value("prep0", operation::prep0, "Prepare qubit in |0⟩ state") + .value("prep1", operation::prep1, "Prepare qubit in |1⟩ state") + .value("prepp", operation::prepp, "Prepare qubit in |+⟩ state") + .value("prepm", operation::prepm, "Prepare qubit in |-⟩ state") + .value("x", operation::x, "Apply the logical X operation") + .value("y", operation::y, "Apply the logical Y operation") + .value("z", operation::z, "Apply the logical Z operation") + .value("h", operation::h, "Apply the logical H operation") + .value("s", operation::s, "Apply the logical S operation") + .value("cx", operation::cx, "Apply the logical CX operation") + .value("cy", operation::cy, "Apply the logical CY operation") + .value("cz", operation::cz, "Apply the logical CZ operation") + .value("stabilizer_round", operation::stabilizer_round, + "Apply the stabilizer round operation."); + + qecmod.def( + "get_code", + [](const std::string &name, py::kwargs options) -> std::unique_ptr { + if (PyCodeRegistry::contains(name)) + return std::make_unique( + PyCodeRegistry::get_code(name, options)); + + if (options.contains("stabilizers")) { + auto obj = options["stabilizers"]; + if (!py::isinstance(obj)) + throw std::runtime_error( + "invalid stabilizers passed to get_code, must be a list of " + "string pauli words or list of cudaq.SpinOperator."); + + if (py::isinstance(obj.cast()[0])) { + options.attr("pop")("stabilizers"); + auto words = obj.cast>(); + std::vector ops; + for (auto &os : words) + ops.emplace_back(cudaq::spin_op::from_word(os)); + sortStabilizerOps(ops); + return get_code(name, ops, hetMapFromKwargs(options)); + } + + if (py::isinstance(obj[0])) { + options.attr("pop")("stabilizers"); + return get_code(name, obj.cast>(), + hetMapFromKwargs(options)); + } + + throw std::runtime_error( + "get_code error - invalid stabilizers element type."); + } + + return get_code(name, hetMapFromKwargs(options)); + }, + "Retrieve a quantum error correction code by name with optional " + "parameters"); + + qecmod.def("get_available_codes", &get_available_codes, + "Get a list of all available quantum error correction codes"); + + py::class_(qecmod, "Code", + "Represents a quantum error correction code") + .def(py::init<>()) + .def( + "get_parity", + [](code &code) { + return copyCUDAQXTensorToPyArray(code.get_parity()); + }, + "Get the parity check matrix of the code") + .def( + "get_parity_x", + [](code &code) { + return copyCUDAQXTensorToPyArray(code.get_parity_x()); + }, + "Get the X-type parity check matrix of the code") + .def( + "get_parity_z", + [](code &code) { + return copyCUDAQXTensorToPyArray(code.get_parity_z()); + }, + "Get the Z-type parity check matrix of the code") + .def( + "get_pauli_observables_matrix", + [](code &code) { + return copyCUDAQXTensorToPyArray( + code.get_pauli_observables_matrix()); + }, + "Get a matrix of the Pauli observables of the code") + .def( + "get_observables_x", + [](code &code) { + return copyCUDAQXTensorToPyArray(code.get_observables_x()); + }, + "Get the Pauli X observables of the code") + .def( + "get_observables_z", + [](code &code) { + return copyCUDAQXTensorToPyArray(code.get_observables_z()); + }, + "Get the Pauli Z observables of the code") + .def("get_stabilizers", &code::get_stabilizers, + "Get the stabilizer generators of the code"); + + qecmod.def("code", [&](const std::string &name) { + auto cppCodes = qec::get_available_codes(); + if (std::find(cppCodes.begin(), cppCodes.end(), name) != cppCodes.end()) + throw std::runtime_error("Invalid Python QEC Code name. " + name + + " is already used in the C++ Code registry."); + + return py::cpp_function([name](py::object code_class) -> py::object { + // Create new class that inherits from both Code and the original + class py::object base_code = py::module::import("cudaq_qec").attr("Code"); + // Create new type using Python's type() function + py::tuple bases = py::make_tuple(base_code); + py::dict namespace_dict = code_class.attr("__dict__"); + + if (!py::hasattr(code_class, "get_num_data_qubits")) + throw std::runtime_error( + "Code class must implement get_num_data_qubits method"); + + if (!py::hasattr(code_class, "get_num_ancilla_qubits")) + throw std::runtime_error( + "Code class must implement get_num_ancilla_qubits method"); + + if (!py::hasattr(code_class, "get_num_ancilla_x_qubits")) + throw std::runtime_error( + "Code class must implement get_num_ancilla_x_qubits method"); + + if (!py::hasattr(code_class, "get_num_ancilla_z_qubits")) + throw std::runtime_error( + "Code class must implement get_num_ancilla_z_qubits method"); + + py::object new_class = + py::reinterpret_steal(PyType_Type.tp_new( + &PyType_Type, + py::make_tuple(code_class.attr("__name__"), bases, namespace_dict) + .ptr(), + nullptr)); + + // Register the new class in the code registry + PyCodeRegistry::register_code(name, [new_class](py::kwargs options) { + py::object instance = new_class(**options); + return instance; + }); + return new_class; + }); + }); + + qecmod.def( + "generate_random_bit_flips", + [](std::size_t numBits, double error_probability) { + auto data = generate_random_bit_flips(numBits, error_probability); + return copy1DCUDAQXTensorToPyArray(data); + }, + "Generate a rank-1 tensor for random bits", py::arg("numBits"), + py::arg("error_probability")); + qecmod.def( + "sample_memory_circuit", + [](code &code, std::size_t numShots, std::size_t numRounds, + std::optional noise = std::nullopt) { + auto [synd, dataRes] = + noise ? sample_memory_circuit(code, numShots, numRounds, *noise) + : sample_memory_circuit(code, numShots, numRounds); + return py::make_tuple(copyCUDAQXTensorToPyArray(synd), + copyCUDAQXTensorToPyArray(dataRes)); + }, + "Sample the memory circuit of the code", py::arg("code"), + py::arg("numShots"), py::arg("numRounds"), + py::arg("noise") = std::nullopt); + qecmod.def( + "sample_memory_circuit", + [](code &code, operation op, std::size_t numShots, std::size_t numRounds, + std::optional noise = std::nullopt) { + auto [synd, dataRes] = + noise ? sample_memory_circuit(code, op, numShots, numRounds, *noise) + : sample_memory_circuit(code, op, numShots, numRounds); + return py::make_tuple(copyCUDAQXTensorToPyArray(synd), + copyCUDAQXTensorToPyArray(dataRes)); + }, + "Sample the memory circuit of the code with a specific initial " + "operation", + py::arg("code"), py::arg("op"), py::arg("numShots"), py::arg("numRounds"), + py::arg("noise") = std::nullopt); + + qecmod.def( + "sample_code_capacity", + [](code &code, std::size_t numShots, double errorProb, + std::optional seed = std::nullopt) { + if (seed.has_value()) { + auto [syndromes, dataRes] = + sample_code_capacity(code, numShots, errorProb, seed.value()); + return py::make_tuple(copyCUDAQXTensorToPyArray(syndromes), + copyCUDAQXTensorToPyArray(dataRes)); + } + + auto [syndromes, dataRes] = + sample_code_capacity(code, numShots, errorProb); + return py::make_tuple(copyCUDAQXTensorToPyArray(syndromes), + copyCUDAQXTensorToPyArray(dataRes)); + }, + "Sample syndrome measurements with code capacity noise.", py::arg("code"), + py::arg("numShots"), py::arg("errorProb"), py::arg("seed") = py::none()); + qecmod.def( + "sample_code_capacity", + [](const py::array_t H, std::size_t numShots, double errorProb, + std::optional seed = std::nullopt) { + if (seed.has_value()) { + auto [syndromes, dataRes] = sample_code_capacity( + toTensor(H), numShots, errorProb, seed.value()); + return py::make_tuple(copyCUDAQXTensorToPyArray(syndromes), + copyCUDAQXTensorToPyArray(dataRes)); + } + + auto [syndromes, dataRes] = + sample_code_capacity(toTensor(H), numShots, errorProb); + return py::make_tuple(copyCUDAQXTensorToPyArray(syndromes), + copyCUDAQXTensorToPyArray(dataRes)); + }, + "Sample syndrome measurements with code capacity noise.", py::arg("H"), + py::arg("numShots"), py::arg("errorProb"), py::arg("seed") = py::none()); +} +} // namespace cudaq::qec diff --git a/libs/qec/python/bindings/py_code.h b/libs/qec/python/bindings/py_code.h new file mode 100644 index 0000000..70c6c16 --- /dev/null +++ b/libs/qec/python/bindings/py_code.h @@ -0,0 +1,15 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include + +namespace py = pybind11; + +namespace cudaq::qec { +void bindCode(py::module &mod); +} // namespace cudaq::qec diff --git a/libs/qec/python/bindings/py_decoder.cpp b/libs/qec/python/bindings/py_decoder.cpp new file mode 100644 index 0000000..710f841 --- /dev/null +++ b/libs/qec/python/bindings/py_decoder.cpp @@ -0,0 +1,172 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include +#include +#include +#include +#include + +#include "common/Logger.h" + +#include "cudaq/qec/decoder.h" + +#include "type_casters.h" +#include "utils.h" + +namespace py = pybind11; +using namespace cudaqx; + +namespace cudaq::qec { + +class PyDecoder : public decoder { +public: + PyDecoder(const py::array_t &H) : decoder(toTensor(H)) {} + + decoder_result decode(const std::vector &syndrome) override { + PYBIND11_OVERRIDE_PURE(decoder_result, decoder, decode, syndrome); + } +}; + +// Registry to store decoder factory functions +class PyDecoderRegistry { +private: + static std::unordered_map< + std::string, + std::function &, py::kwargs)>> + registry; + +public: + static void register_decoder( + const std::string &name, + std::function &, py::kwargs)> + factory) { + cudaq::info("Registering Pythonic Decoder with name {}", name); + registry[name] = factory; + } + + static py::object get_decoder(const std::string &name, + const py::array_t &H, + py::kwargs options) { + auto it = registry.find(name); + if (it == registry.end()) { + throw std::runtime_error("Unknown decoder: " + name); + } + + return it->second(H, options); + } + + static bool contains(const std::string &name) { + return registry.find(name) != registry.end(); + } +}; + +std::unordered_map &, py::kwargs)>> + PyDecoderRegistry::registry; + +void bindDecoder(py::module &mod) { + + auto qecmod = py::hasattr(mod, "qecrt") + ? mod.attr("qecrt").cast() + : mod.def_submodule("qecrt"); + py::class_(qecmod, "DecoderResult", R"pbdoc( + A class representing the results of a quantum error correction decoding operation. + + This class encapsulates both the convergence status and the actual decoding result. +)pbdoc") + .def(py::init<>(), R"pbdoc( + Default constructor for DecoderResult. + + Creates a new DecoderResult instance with default values. + )pbdoc") + .def_readwrite("converged", &decoder_result::converged, R"pbdoc( + Boolean flag indicating if the decoder converged to a solution. + + True if the decoder successfully found a valid correction chain, + False if the decoder failed to converge or exceeded iteration limits. + )pbdoc") + .def_readwrite("result", &decoder_result::result, R"pbdoc( + The decoded correction chain or recovery operation. + + Contains the sequence of corrections that should be applied to recover + the original quantum state. The format depends on the specific decoder + implementation. + )pbdoc"); + + py::class_( + qecmod, "Decoder", "Represents a decoder for quantum error correction") + .def(py::init_alias &>()) + .def( + "decode", + [](decoder &decoder, const std::vector &syndrome) { + return decoder.decode(syndrome); + }, + "Decode the given syndrome to determine the error correction", + py::arg("syndrome")) + .def("get_block_size", &decoder::get_block_size, + "Get the size of the code block") + .def("get_syndrome_size", &decoder::get_syndrome_size, + "Get the size of the syndrome"); + + // Expose decorator function that handles inheritance + qecmod.def("decoder", [&](const std::string &name) { + return py::cpp_function([name](py::object decoder_class) -> py::object { + // Create new class that inherits from both Decoder and the original + class py::object base_decoder = + py::module::import("cudaq_qec").attr("Decoder"); + // Create new type using Python's type() function + py::tuple bases = py::make_tuple(base_decoder); + py::dict namespace_dict = decoder_class.attr("__dict__"); + + if (!py::hasattr(decoder_class, "decode")) + throw std::runtime_error("Decoder class must implement decode method"); + + py::object new_class = py::reinterpret_steal( + PyType_Type.tp_new(&PyType_Type, + py::make_tuple(decoder_class.attr("__name__"), + bases, namespace_dict) + .ptr(), + nullptr)); + + // Register the new class in the decoder registry + PyDecoderRegistry::register_decoder( + name, [new_class](const py::array_t &H, py::kwargs options) { + py::object instance = new_class(H, **options); + return instance; + }); + return new_class; + }); + }); + + qecmod.def( + "get_decoder", + [](const std::string &name, const py::array_t H, + const py::kwargs options) + -> std::variant> { + if (PyDecoderRegistry::contains(name)) + return PyDecoderRegistry::get_decoder(name, H, options); + + py::buffer_info buf = H.request(); + + // Create a vector of the array dimensions + std::vector shape; + for (py::ssize_t d : buf.shape) { + shape.push_back(static_cast(d)); + } + + // Create a tensor and borrow the NumPy array data + cudaqx::tensor tensor_H(shape); + tensor_H.borrow(static_cast(buf.ptr), shape); + + return get_decoder(name, tensor_H, hetMapFromKwargs(options)); + }, + "Get a decoder by name with a given parity check matrix" + "and optional decoder-specific parameters"); +} + +} // namespace cudaq::qec diff --git a/libs/qec/python/bindings/py_decoder.h b/libs/qec/python/bindings/py_decoder.h new file mode 100644 index 0000000..0a800f5 --- /dev/null +++ b/libs/qec/python/bindings/py_decoder.h @@ -0,0 +1,15 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include + +namespace py = pybind11; + +namespace cudaq::qec { +void bindDecoder(py::module &mod); +} // namespace cudaq::qec diff --git a/libs/qec/python/bindings/type_casters.h b/libs/qec/python/bindings/type_casters.h new file mode 100644 index 0000000..e4ebde4 --- /dev/null +++ b/libs/qec/python/bindings/type_casters.h @@ -0,0 +1,82 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "common/ObserveResult.h" +#include "pybind11/pybind11.h" +#include "pybind11/pytypes.h" +namespace py = pybind11; + +namespace pybind11 { +namespace detail { +template <> +struct type_caster { + PYBIND11_TYPE_CASTER(cudaq::spin_op, const_name("SpinOperator")); + + bool load(handle src, bool) { + if (!src) + return false; + auto data = src.attr("serialize")().cast>(); + auto numQubits = src.attr("get_qubit_count")().cast(); + value = cudaq::spin_op(data, numQubits); + return true; + } + + static handle cast(cudaq::spin_op v, return_value_policy /*policy*/, + handle /*parent*/) { + py::object tv_py = py::module::import("cudaq").attr("SpinOperator")( + v.getDataRepresentation(), v.num_qubits()); // Construct new python obj + return tv_py.release(); + } +}; + +template <> +struct type_caster { + PYBIND11_TYPE_CASTER(cudaq::sample_result, const_name("SampleResult")); + + bool load(handle src, bool) { + if (!src) + return false; + + auto data = src.attr("serialize")().cast>(); + value = cudaq::sample_result(); + value.deserialize(data); + return true; + } + + static handle cast(cudaq::sample_result v, return_value_policy /*policy*/, + handle /*parent*/) { + py::object tv_py = py::module::import("cudaq").attr("SampleResult")(); + tv_py.attr("deserialize")(v.serialize()); + return tv_py.release(); + } +}; + +template <> +struct type_caster { + PYBIND11_TYPE_CASTER(cudaq::observe_result, const_name("ObserveResult")); + + bool load(handle src, bool) { + if (!src) + return false; + + auto e = src.attr("expectation")().cast(); + value = cudaq::observe_result(e, cudaq::spin_op()); + // etc. + return true; + } + + static handle cast(cudaq::observe_result v, return_value_policy /*policy*/, + handle /*parent*/) { + py::object tv_py = py::module::import("cudaq").attr("ObserveResult")( + v.expectation(), v.get_spin(), v.raw_data()); + return tv_py.release(); + } +}; +} // namespace detail +} // namespace pybind11 diff --git a/libs/qec/python/bindings/utils.h b/libs/qec/python/bindings/utils.h new file mode 100644 index 0000000..71d5b51 --- /dev/null +++ b/libs/qec/python/bindings/utils.h @@ -0,0 +1,68 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "pybind11/pybind11.h" + +namespace py = pybind11; + +namespace cudaqx { + +/// @brief Return the value of given type corresponding to the provided +/// key string from the provided options `kwargs` `dict`. Return the `orVal` +/// if the key is not in the `dict`. +template +T getValueOr(py::kwargs &options, const std::string &key, const T &orVal) { + if (options.contains(key)) + for (auto item : options) + if (item.first.cast() == key) + return item.second.cast(); + + return orVal; +} + +inline heterogeneous_map hetMapFromKwargs(const py::kwargs &kwargs) { + cudaqx::heterogeneous_map result; + + for (const auto &item : kwargs) { + std::string key = py::cast(item.first); + auto value = item.second; + + if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else { + throw std::runtime_error( + "Invalid python type for mapping kwargs to a heterogeneous_map."); + } + } + + return result; +} + +template +tensor toTensor(const py::array_t &H) { + py::buffer_info buf = H.request(); + + // Create a vector of the array dimensions + std::vector shape; + for (py::ssize_t d : buf.shape) { + shape.push_back(static_cast(d)); + } + + // Create a tensor and borrow the NumPy array data + cudaqx::tensor tensor_H(shape); + tensor_H.borrow(static_cast(buf.ptr), shape); + return tensor_H; +} +} // namespace cudaqx diff --git a/libs/qec/python/cudaq_qec/__init__.py b/libs/qec/python/cudaq_qec/__init__.py new file mode 100644 index 0000000..26d0672 --- /dev/null +++ b/libs/qec/python/cudaq_qec/__init__.py @@ -0,0 +1,45 @@ + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +from .patch import patch +from ._pycudaqx_qec_the_suffix_matters_cudaq_qec import * + +code = qecrt.code +Code = qecrt.Code +decoder = qecrt.decoder +Decoder = qecrt.Decoder +TwoQubitDepolarization = qecrt.TwoQubitDepolarization +TwoQubitBitFlip = qecrt.TwoQubitBitFlip +operation = qecrt.operation +get_code = qecrt.get_code +get_available_codes = qecrt.get_available_codes +get_decoder = qecrt.get_decoder +DecoderResult = qecrt.DecoderResult +generate_random_bit_flips = qecrt.generate_random_bit_flips +sample_memory_circuit = qecrt.sample_memory_circuit +sample_code_capacity = qecrt.sample_code_capacity + +from .plugins import decoders, codes +import pkgutil, importlib, traceback + +def iter_namespace(ns_pkg): + return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") + +for finder, name, ispkg in iter_namespace(plugins.decoders): + try: + importlib.import_module(name) + except ModuleNotFoundError: + pass + +for finder, name, ispkg in iter_namespace(plugins.codes): + try: + importlib.import_module(name) + except ModuleNotFoundError as e: + pass + diff --git a/libs/qec/python/cudaq_qec/patch.py b/libs/qec/python/cudaq_qec/patch.py new file mode 100644 index 0000000..c3e40a8 --- /dev/null +++ b/libs/qec/python/cudaq_qec/patch.py @@ -0,0 +1,49 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +from cudaq import qvector +from dataclasses import dataclass + + +@dataclass +class patch: + """A logical qubit patch representation for surface code quantum error correction. + + This class represents a logical qubit encoded in a 2D patch, containing + data qubits and both X and Z ancilla qubits arranged in a 2D lattice pattern. + The patch structure is fundamental to implementing quantum error correction + and fault-tolerant quantum computation in CUDA-Q. + + Attributes + ---------- + data : qvector + The collection of data qubits that encode the logical qubit state. + These qubits store the actual quantum information being protected. + + ancx : qvector + The X-basis ancilla qubits used for syndrome measurement. + These qubits are used to detect and correct bit-flip (X) errors + on the data qubits through stabilizer measurements. + + ancz : qvector + The Z-basis ancilla qubits used for syndrome measurement. + These qubits are used to detect and correct phase-flip (Z) errors + on the data qubits through stabilizer measurements. + + Notes + ----- + The patch layout follows the standard surface code arrangement where: + - Data qubits are placed at the vertices + - X ancillas are placed on horizontal edges + - Z ancillas are placed on vertical edges + + This structure enables the implementation of weight-4 stabilizer + measurements required for surface code error correction. + """ + data: qvector + ancx: qvector + ancz: qvector diff --git a/libs/qec/python/cudaq_qec/plugins/codes/example.py b/libs/qec/python/cudaq_qec/plugins/codes/example.py new file mode 100644 index 0000000..eb3efb4 --- /dev/null +++ b/libs/qec/python/cudaq_qec/plugins/codes/example.py @@ -0,0 +1,70 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import cudaq_qec as qec +import cudaq +from cudaq_qec import patch + +@cudaq.kernel +def prep0(logicalQubit: patch): + h(logicalQubit.data[0], logicalQubit.data[4], logicalQubit.data[6]) + x.ctrl(logicalQubit.data[0], logicalQubit.data[1]) + x.ctrl(logicalQubit.data[4], logicalQubit.data[5]) + x.ctrl(logicalQubit.data[6], logicalQubit.data[3]) + x.ctrl(logicalQubit.data[6], logicalQubit.data[5]) + x.ctrl(logicalQubit.data[4], logicalQubit.data[2]) + x.ctrl(logicalQubit.data[0], logicalQubit.data[3]) + x.ctrl(logicalQubit.data[4], logicalQubit.data[1]) + x.ctrl(logicalQubit.data[3], logicalQubit.data[2]) + + +@cudaq.kernel +def stabilizer(logicalQubit: patch, x_stabilizers: list[int], + z_stabilizers: list[int]) -> list[bool]: + h(logicalQubit.ancx) + for xi in range(len(logicalQubit.ancx)): + for di in range(len(logicalQubit.data)): + if x_stabilizers[xi * len(logicalQubit.data) + di] == 1: + x.ctrl(logicalQubit.ancx[xi], logicalQubit.data[di]) + + h(logicalQubit.ancx) + for zi in range(len(logicalQubit.ancx)): + for di in range(len(logicalQubit.data)): + if z_stabilizers[zi * len(logicalQubit.data) + di] == 1: + x.ctrl(logicalQubit.data[di], logicalQubit.ancz[zi]) + + results = mz(logicalQubit.ancx, logicalQubit.ancz) + + reset(logicalQubit.ancx) + reset(logicalQubit.ancz) + return results + + +@qec.code('py-steane-example') +class MySteaneCodeImpl: + + def __init__(self, **kwargs): + qec.Code.__init__(self, **kwargs) + self.stabilizers = [cudaq.SpinOperator.from_word(word) for word in + ["XXXXIII", "IXXIXXI", "IIXXIXX", "ZZZZIII", "IZZIZZI", "IIZZIZZ"]] + self.operation_encodings = { + qec.operation.prep0: prep0, + qec.operation.stabilizer_round: stabilizer + } + + def get_num_data_qubits(self): + return 7 + + def get_num_ancilla_x_qubits(self): + return 3 + + def get_num_ancilla_z_qubits(self): + return 3 + + def get_num_ancilla_qubits(self): + return 6 diff --git a/libs/qec/python/cudaq_qec/plugins/decoders/example.py b/libs/qec/python/cudaq_qec/plugins/decoders/example.py new file mode 100644 index 0000000..20e0533 --- /dev/null +++ b/libs/qec/python/cudaq_qec/plugins/decoders/example.py @@ -0,0 +1,23 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import cudaq_qec as qec +import numpy as np + +@qec.decoder("example_byod") +class ExampleDecoder: + def __init__(self, H, **kwargs): + qec.Decoder.__init__(self, H) + self.H = H + if 'weights' in kwargs: + print(kwargs['weights']) + + def decode(self, syndrome): + res = qec.DecoderResult() + res.converged = True + res.result = np.random.random(len(syndrome)).tolist() + return res diff --git a/libs/qec/python/tests/test_code.py b/libs/qec/python/tests/test_code.py new file mode 100644 index 0000000..edb972c --- /dev/null +++ b/libs/qec/python/tests/test_code.py @@ -0,0 +1,227 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import pytest +import numpy as np +import cudaq +import cudaq_qec as qec + + +def test_get_code(): + steane = qec.get_code("steane") + assert isinstance(steane, qec.Code) + + +def test_get_available_codes(): + codes = qec.get_available_codes() + assert isinstance(codes, list) + assert "steane" in codes + + +def test_code_parity_matrices(): + steane = qec.get_code("steane") + + parity = steane.get_parity() + assert isinstance(parity, np.ndarray) + assert parity.shape == (6, 14) + + parity_x = steane.get_parity_x() + assert isinstance(parity, np.ndarray) + assert parity_x.shape == (3, 7) + + parity_z = steane.get_parity_z() + assert isinstance(parity, np.ndarray) + assert parity_z.shape == (3, 7) + + +def test_code_stabilizers(): + steane = qec.get_code("steane") + stabilizers = steane.get_stabilizers() + assert isinstance(stabilizers, list) + assert len(stabilizers) == 6 + assert all(isinstance(stab, cudaq.Operator) for stab in stabilizers) + stabStrings = [s.to_string(False) for s in stabilizers] + expected = [ + "ZZZZIII", "XXXXIII", "IXXIXXI", "IIXXIXX", "IZZIZZI", "IIZZIZZ" + ] + assert set(expected) == set(stabStrings) + + +def test_sample_memory_circuit(): + steane = qec.get_code("steane") + + syndromes, dataResults = qec.sample_memory_circuit(steane, + numShots=10, + numRounds=4) + assert isinstance(syndromes, np.ndarray) + assert syndromes.shape == (30, 6) + print(syndromes) + + syndromes_with_op, dataResults = qec.sample_memory_circuit( + steane, qec.operation.prep1, 10, 4) + assert isinstance(syndromes_with_op, np.ndarray) + print(syndromes_with_op) + assert syndromes_with_op.shape == (30, 6) + + +def test_custom_steane_code(): + ops = ["ZZZZIII", "XXXXIII", "IXXIXXI", "IIXXIXX", "IZZIZZI", "IIZZIZZ"] + custom_steane = qec.get_code("steane", stabilizers=ops) + assert isinstance(custom_steane, qec.Code) + + parity = custom_steane.get_parity() + assert parity.shape == (6, 14) + + expected_parity = np.array([ + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1 + ]) + print(parity) + np.testing.assert_array_equal(parity, expected_parity.reshape(6, 14)) + + +def test_noisy_simulation(): + cudaq.set_target('stim') + + noise = cudaq.NoiseModel() + noise.add_all_qubit_channel('x', + qec.TwoQubitDepolarization(.1), + num_controls=1) + steane = qec.get_code("steane") + syndromes, dataResults = qec.sample_memory_circuit(steane, + numShots=10, + numRounds=4, + noise=noise) + assert isinstance(syndromes, np.ndarray) + assert syndromes.shape == (30, 6) + print(syndromes) + assert np.any(syndromes) + cudaq.reset_target() + + +def test_python_code(): + steane = qec.get_code("py-steane-example") + syndromes, dataResults = qec.sample_memory_circuit(steane, + numShots=10, + numRounds=4) + assert isinstance(syndromes, np.ndarray) + assert syndromes.shape == (30, 6) + print(syndromes) + assert not np.any(syndromes) + + +def test_invalid_code(): + with pytest.raises(RuntimeError): + qec.get_code("invalid_code_name") + + +def test_invalid_operation(): + steane = qec.get_code("steane") + with pytest.raises(TypeError): + qec.sample_memory_circuit(steane, "invalid_op", 10, 4) + + +def test_generate_random_bit_flips(): + # Test case 1: error_prob = 0 + nBits = 10 + error_prob = 0 + + data = qec.generate_random_bit_flips(nBits, error_prob) + print(f"data shape: {data.shape}") + + assert len(data.shape) == 1 + assert data.shape[0] == 10 + assert np.all(data == 0) + +def test_steane_code_capacity(): + # Test case 1: error_prob = 0 + steane = qec.get_code("steane") + Hz = steane.get_parity_z() + n_shots = 10 + error_prob = 0 + + syndromes, data = qec.sample_code_capacity(Hz, n_shots, error_prob) + + assert len(Hz.shape) == 2 + assert Hz.shape[0] == 3 + assert Hz.shape[1] == 7 + assert syndromes.shape[0] == n_shots + assert syndromes.shape[1] == Hz.shape[0] + assert data.shape[0] == n_shots + assert data.shape[1] == Hz.shape[1] + + # Error prob = 0 should be all zeros + assert np.all(data == 0) + assert np.all(syndromes == 0) + + # Test case 2: error_prob = 0.15 + error_prob = 0.15 + seed = 1337 + + syndromes, data = qec.sample_code_capacity(Hz, + n_shots, + error_prob, + seed=seed) + + assert len(Hz.shape) == 2 + assert Hz.shape[0] == 3 + assert Hz.shape[1] == 7 + assert syndromes.shape[0] == n_shots + assert syndromes.shape[1] == Hz.shape[0] + assert data.shape[0] == n_shots + assert data.shape[1] == Hz.shape[1] + + # Known seeded data for error_prob = 0.15 + seeded_data = np.array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) + + checked_syndromes = np.array([[1, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 0], + [0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], + [0, 1, 1], [0, 0, 0]]) + + assert np.array_equal(data, seeded_data) + assert np.array_equal(syndromes, checked_syndromes) + + # Test case 3: error_prob = 0.25 + error_prob = 0.25 + seed = 1337 + + syndromes, data = qec.sample_code_capacity(Hz, + n_shots, + error_prob, + seed=seed) + + assert len(Hz.shape) == 2 + assert Hz.shape[0] == 3 + assert Hz.shape[1] == 7 + assert syndromes.shape[0] == n_shots + assert syndromes.shape[1] == Hz.shape[0] + assert data.shape[0] == n_shots + assert data.shape[1] == Hz.shape[1] + + # Known seeded data for error_prob = 0.25 + seeded_data = np.array([[0, 1, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) + + checked_syndromes = np.array([[0, 0, 1], [1, 0, 0], [1, 1, 1], [0, 1, 0], + [0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], + [0, 1, 1], [0, 0, 0]]) + + assert np.array_equal(data, seeded_data) + assert np.array_equal(syndromes, checked_syndromes) + + +if __name__ == "__main__": + pytest.main() diff --git a/libs/qec/python/tests/test_decoder.py b/libs/qec/python/tests/test_decoder.py new file mode 100644 index 0000000..a79bc0b --- /dev/null +++ b/libs/qec/python/tests/test_decoder.py @@ -0,0 +1,98 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import pytest +import numpy as np +import cudaq_qec as qec + +def create_test_matrix(): + np.random.seed(42) + return np.random.randint(0, 2, (10, 20)).astype(np.uint8) + +def create_test_syndrome(): + np.random.seed(42) + return np.random.random(10).tolist() + +H = create_test_matrix() + +def test_decoder_initialization(): + decoder = qec.get_decoder('example_byod', H) + assert decoder is not None + assert hasattr(decoder, 'decode') + +def test_decoder_result_structure(): + decoder = qec.get_decoder('example_byod', H) + result = decoder.decode(create_test_syndrome()) + + assert hasattr(result, 'converged') + assert hasattr(result, 'result') + assert isinstance(result.converged, bool) + assert isinstance(result.result, list) + assert len(result.result) == 10 + +def test_decoder_result_values(): + decoder = qec.get_decoder('example_byod', H) + result = decoder.decode(create_test_syndrome()) + + assert result.converged is True + assert all(isinstance(x, float) for x in result.result) + assert all(0 <= x <= 1 for x in result.result) + + +@pytest.mark.parametrize("matrix_shape,syndrome_size", [ + ((5, 10), 5), + ((15, 30), 15), + ((20, 40), 20) +]) +def test_decoder_different_matrix_sizes(matrix_shape, syndrome_size): + np.random.seed(42) + H = np.random.randint(0, 2, matrix_shape).astype(np.uint8) + syndrome = np.random.random(syndrome_size).tolist() + + decoder = qec.get_decoder('example_byod', H) + result = decoder.decode(syndrome) + + assert len(result.result) == syndrome_size + assert result.converged is True + assert all(isinstance(x, float) for x in result.result) + assert all(0 <= x <= 1 for x in result.result) + +# FIXME add this back +# def test_decoder_error_handling(): +# H = Tensor(create_test_matrix()) +# decoder = qec.get_decoder('example_byod', H) + +# # Test with incorrect syndrome size +# with pytest.raises(ValueError): +# wrong_syndrome = np.random.random(15).tolist() # Wrong size +# decoder.decode(wrong_syndrome) + +# # Test with invalid syndrome type +# with pytest.raises(TypeError): +# wrong_type_syndrome = "invalid" +# decoder.decode(wrong_type_syndrome) + +def test_decoder_reproducibility(): + decoder = qec.get_decoder('example_byod', H) + + np.random.seed(42) + result1 = decoder.decode(create_test_syndrome()) + + np.random.seed(42) + result2 = decoder.decode(create_test_syndrome()) + + assert result1.result == result2.result + assert result1.converged == result2.converged + +def test_pass_weights(): + error_probability = 0.1 + weights = np.ones(H.shape[1]) * np.log((1-error_probability)/error_probability) + decoder = qec.get_decoder('example_byod', H, weights=weights) + # Test is that no error is thrown + +if __name__ == "__main__": + pytest.main() diff --git a/libs/qec/unittests/CMakeLists.txt b/libs/qec/unittests/CMakeLists.txt new file mode 100644 index 0000000..d9638c5 --- /dev/null +++ b/libs/qec/unittests/CMakeLists.txt @@ -0,0 +1,46 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# External Dependencies +# ============================================================================== + +FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v1.15.2 + EXCLUDE_FROM_ALL +) +FetchContent_MakeAvailable(googletest) + +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +# Bug in GCC 12 leads to spurious warnings (-Wrestrict) +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105329 +if (CMAKE_COMPILER_IS_GNUCXX + AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0 + AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13.0.0) + target_compile_options(gtest PUBLIC --param=evrp-mode=legacy) +endif() +include(GoogleTest) + +# ============================================================================== + +add_executable(test_decoders test_decoders.cpp decoders/sample_decoder.cpp) +target_link_libraries(test_decoders PRIVATE GTest::gtest_main cudaq-qec) +add_dependencies(CUDAQXQECUnitTests test_decoders) +gtest_discover_tests(test_decoders) + +add_executable(test_qec test_qec.cpp) +target_link_libraries(test_qec PRIVATE GTest::gtest_main cudaq-qec) +add_dependencies(CUDAQXQECUnitTests test_qec) +gtest_discover_tests(test_qec) + +# ============================================================================== + +add_subdirectory(backend-specific) + diff --git a/libs/qec/unittests/backend-specific/CMakeLists.txt b/libs/qec/unittests/backend-specific/CMakeLists.txt new file mode 100644 index 0000000..ea6d5dd --- /dev/null +++ b/libs/qec/unittests/backend-specific/CMakeLists.txt @@ -0,0 +1,9 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_subdirectory(stim) diff --git a/libs/qec/unittests/backend-specific/stim/CMakeLists.txt b/libs/qec/unittests/backend-specific/stim/CMakeLists.txt new file mode 100644 index 0000000..6714d49 --- /dev/null +++ b/libs/qec/unittests/backend-specific/stim/CMakeLists.txt @@ -0,0 +1,26 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# FIXME This needs to be in CUDA-Q. +add_library(cudaq::cudaq-stim-target SHARED IMPORTED) +set_target_properties(cudaq::cudaq-stim-target PROPERTIES + IMPORTED_LOCATION "${CUDAQ_LIBRARY_DIR}/libnvqir-stim${CMAKE_SHARED_LIBRARY_SUFFIX}" + IMPORTED_SONAME "libnvqir-stim${CMAKE_SHARED_LIBRARY_SUFFIX}" + IMPORTED_LINK_INTERFACE_LIBRARIES "cudaq::cudaq-platform-default;cudaq::cudaq-em-default") + +cudaqx_set_target(stim) + +add_executable(test_qec_stim test_qec_stim.cpp) +target_link_libraries(test_qec_stim + PRIVATE + GTest::gtest_main + cudaq::cudaq_stim + cudaq-qec +) +add_dependencies(CUDAQXQECUnitTests test_qec_stim) +gtest_discover_tests(test_qec_stim) diff --git a/libs/qec/unittests/backend-specific/stim/test_qec_stim.cpp b/libs/qec/unittests/backend-specific/stim/test_qec_stim.cpp new file mode 100644 index 0000000..79a4dac --- /dev/null +++ b/libs/qec/unittests/backend-specific/stim/test_qec_stim.cpp @@ -0,0 +1,537 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include + +#include "cudaq.h" + +#include "cudaq/qec/decoder.h" +#include "cudaq/qec/experiments.h" + +TEST(QECCodeTester, checkRepetitionNoiseStim) { + + auto repetition = cudaq::qec::get_code("repetition", {{"distance", 9}}); + { + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_bitflip(0.1), + /*num_controls=*/1); + + auto [syndromes, d] = + cudaq::qec::sample_memory_circuit(*repetition, 2, 2, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + int sum = 0; + for (std::size_t i = 0; i < 2; i++) + for (std::size_t j = 0; j < 8; j++) + sum += syndromes.at({i, j}); + + EXPECT_TRUE(sum > 0); + } + { + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_depolarization(0.1), + /*num_controls=*/1); + + auto [syndromes, d] = + cudaq::qec::sample_memory_circuit(*repetition, 2, 2, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + int sum = 0; + for (std::size_t i = 0; i < 2; i++) + for (std::size_t j = 0; j < 8; j++) + sum += syndromes.at({i, j}); + + EXPECT_TRUE(sum > 0); + } +} + +TEST(QECCodeTester, checkSteaneNoiseStim) { + + auto repetition = cudaq::qec::get_code("steane"); + int nShots = 10; + int nRounds = 3; + { + // two qubit bitflip + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_bitflip(0.1), 1); + + auto [syndromes, d] = + cudaq::qec::sample_memory_circuit(*repetition, nShots, nRounds, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + // bitflip should only trigger x syndromes + int x_sum = 0; + int z_sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) { + for (std::size_t j_x = 0; j_x < syndromes.shape()[1] / 2; j_x++) { + x_sum += syndromes.at({i, j_x}); + } + for (std::size_t j_z = syndromes.shape()[1] / 2; + j_z < syndromes.shape()[1]; j_z++) { + z_sum += syndromes.at({i, j_z}); + } + } + EXPECT_TRUE(x_sum > 0); + EXPECT_TRUE(z_sum == 0); + } + { + // two qubit depol + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_depolarization(0.1), + 1); + + auto [syndromes, d] = + cudaq::qec::sample_memory_circuit(*repetition, nShots, nRounds, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + // depolarizing triggers x and z syndromes + int x_sum = 0; + int z_sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) { + for (std::size_t j_x = 0; j_x < syndromes.shape()[1] / 2; j_x++) { + x_sum += syndromes.at({i, j_x}); + } + for (std::size_t j_z = syndromes.shape()[1] / 2; + j_z < syndromes.shape()[1]; j_z++) { + z_sum += syndromes.at({i, j_z}); + } + } + EXPECT_TRUE(x_sum > 0); + EXPECT_TRUE(z_sum > 0); + } + { + // one qubit bitflip + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("h", cudaq::bit_flip_channel(0.1)); + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *repetition, cudaq::qec::operation::prepp, nShots, nRounds, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + // only getting detectible errors on s_z ancillas + int x_sum = 0; + int z_sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) { + for (std::size_t j_x = 0; j_x < syndromes.shape()[1] / 2; j_x++) { + x_sum += syndromes.at({i, j_x}); + } + for (std::size_t j_z = syndromes.shape()[1] / 2; + j_z < syndromes.shape()[1]; j_z++) { + z_sum += syndromes.at({i, j_z}); + } + } + EXPECT_TRUE(x_sum == 0); + EXPECT_TRUE(z_sum > 0); + } + { + // one qubit phase + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("h", cudaq::phase_flip_channel(0.1)); + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *repetition, cudaq::qec::operation::prepp, nShots, nRounds, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + // only getting detectible errors on s_z ancillas + int x_sum = 0; + int z_sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) { + for (std::size_t j_x = 0; j_x < syndromes.shape()[1] / 2; j_x++) { + x_sum += syndromes.at({i, j_x}); + } + for (std::size_t j_z = syndromes.shape()[1] / 2; + j_z < syndromes.shape()[1]; j_z++) { + z_sum += syndromes.at({i, j_z}); + } + } + EXPECT_TRUE(x_sum == 0); + EXPECT_TRUE(z_sum > 0); + } + { + // one qubit depol + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("h", cudaq::depolarization_channel(0.1)); + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *repetition, cudaq::qec::operation::prepp, nShots, nRounds, noise); + printf("syndrome\n"); + syndromes.dump(); + printf("data\n"); + d.dump(); + + // Should have some 1s since it's noisy + // only getting detectible errors on s_z ancillas + int x_sum = 0; + int z_sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) { + for (std::size_t j_x = 0; j_x < syndromes.shape()[1] / 2; j_x++) { + x_sum += syndromes.at({i, j_x}); + } + for (std::size_t j_z = syndromes.shape()[1] / 2; + j_z < syndromes.shape()[1]; j_z++) { + z_sum += syndromes.at({i, j_z}); + } + } + EXPECT_TRUE(x_sum == 0); + EXPECT_TRUE(z_sum > 0); + } +} + +TEST(QECCodeTester, checkSampleMemoryCircuit) { + { + // Steane tests + auto steane = cudaq::qec::get_code("steane"); + cudaqx::tensor observables = + steane->get_pauli_observables_matrix(); + cudaqx::tensor Lx = steane->get_observables_x(); + cudaqx::tensor Lz = steane->get_observables_z(); + + int nShots = 10; + int nRounds = 4; + { + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep0, nShots, nRounds); + syndromes.dump(); + + // No noise here, should be all zeros + int sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) + for (std::size_t j = 0; j < syndromes.shape()[1]; j++) + sum += syndromes.at({i, j}); + EXPECT_TRUE(sum == 0); + + // Prep0, should measure out logical 0 each shot + printf("data:\n"); + d.dump(); + printf("Lz:\n"); + Lz.dump(); + cudaqx::tensor logical_mz = Lz.dot(d.transpose()) % 2; + printf("logical_mz:\n"); + logical_mz.dump(); + EXPECT_FALSE(logical_mz.any()); + } + { + // Prep1, should measure out logical 1 each shot + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep1, nShots, nRounds); + printf("data:\n"); + d.dump(); + printf("Lz:\n"); + Lz.dump(); + cudaqx::tensor logical_mz = Lz.dot(d.transpose()) % 2; + printf("logical_mz:\n"); + logical_mz.dump(); + EXPECT_EQ(nShots, logical_mz.sum_all()); + } + { + // Prepp, should measure out logical + each shot + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prepp, nShots, nRounds); + printf("data:\n"); + d.dump(); + printf("Lx:\n"); + Lx.dump(); + cudaqx::tensor logical_mx = Lx.dot(d.transpose()) % 2; + printf("logical_mx:\n"); + logical_mx.dump(); + EXPECT_FALSE(logical_mx.any()); + } + { + // Prepm, should measure out logical - each shot + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prepm, nShots, nRounds); + printf("data:\n"); + d.dump(); + printf("Lx:\n"); + Lx.dump(); + cudaqx::tensor logical_mx = Lx.dot(d.transpose()) % 2; + printf("logical_mx:\n"); + logical_mx.dump(); + EXPECT_EQ(nShots, logical_mx.sum_all()); + } + { + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_bitflip(0.1), 1); + + nShots = 10; + nRounds = 4; + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep0, nShots, nRounds, noise); + printf("syndromes:\n"); + syndromes.dump(); + + // Noise here, expect a nonzero + int sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) + for (std::size_t j = 0; j < syndromes.shape()[1]; j++) + sum += syndromes.at({i, j}); + EXPECT_TRUE(sum > 0); + + // With noise, Lz will sometimes be flipped + printf("data:\n"); + d.dump(); + printf("Lz:\n"); + Lz.dump(); + cudaqx::tensor logical_mz = Lz.dot(d.transpose()) % 2; + printf("logical_mz:\n"); + logical_mz.dump(); + EXPECT_TRUE(logical_mz.any()); + } + } +} + +TEST(QECCodeTester, checkTwoQubitBitflip) { + // This circuit should read out |00> with and without bitflip noise + struct null1 { + void operator()() __qpu__ { + cudaq::qvector q(2); + h(q); + x(q[0], q[1]); + h(q); + } + }; + + // This circuit should read out |00> without bitflip noise, and random values + // with + struct null2 { + void operator()() __qpu__ { + cudaq::qvector q(2); + x(q[0], q[1]); + } + }; + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_bitflip(0.1), 1); + cudaq::set_noise(noise); + + auto counts1 = cudaq::sample(100, null1{}); + EXPECT_FLOAT_EQ(1.0, counts1.probability("00")); + + auto counts2 = cudaq::sample(100, null2{}); + EXPECT_TRUE(counts2.probability("00") < 0.9); + cudaq::unset_noise(); +} + +TEST(QECCodeTester, checkBitflip) { + // This circuit should read out |0> when noiseless + struct null1 { + void operator()() __qpu__ { + cudaq::qubit q; + h(q); + h(q); + } + }; + + auto counts1 = cudaq::sample(100, null1{}); + EXPECT_FLOAT_EQ(1.0, counts1.probability("0")); + + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("h", cudaq::bit_flip_channel(0.5)); + cudaq::set_noise(noise); + auto counts2 = cudaq::sample(100, null1{}); + cudaq::unset_noise(); + EXPECT_TRUE(counts2.probability("0") < 0.9); +} + +TEST(QECCodeTester, checkNoisySampleMemoryCircuitAndDecode) { + { + // Steane tests + auto steane = cudaq::qec::get_code("steane"); + cudaqx::tensor H = steane->get_parity(); + cudaqx::tensor observables = + steane->get_pauli_observables_matrix(); + cudaqx::tensor Lx = steane->get_observables_x(); + cudaqx::tensor Lz = steane->get_observables_z(); + + int nShots = 1; + int nRounds = 10; + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_depolarization(0.01), + 1); + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep0, nShots, nRounds, noise); + printf("syndromes:\n"); + syndromes.dump(); + + // Noise here, expect a nonzero + int sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) + for (std::size_t j = 0; j < syndromes.shape()[1]; j++) + sum += syndromes.at({i, j}); + EXPECT_TRUE(sum > 0); + + // With noise, Lz will sometimes be flipped + printf("data:\n"); + d.dump(); + printf("Lz:\n"); + Lz.dump(); + cudaqx::tensor logical_mz = Lz.dot(d.transpose()) % 2; + printf("logical_mz:\n"); + logical_mz.dump(); + + // s = (sx | sz) + // sx = Hz . ex + // sz = Hx . ez + + printf("Obs:\n"); + observables.dump(); + auto decoder = cudaq::qec::get_decoder("single_error_lut", H); + printf("Hz:\n"); + H.dump(); + printf("end\n"); + size_t numLerrors = 0; + size_t stride = syndromes.shape()[1]; + cudaqx::tensor pauli_frame({observables.shape()[0]}); + for (size_t i = 0; i < nRounds - 1; ++i) { + cudaqx::tensor syndrome({stride}); + syndrome.borrow(syndromes.data() + i * stride); + printf("syndrome:\n"); + syndrome.dump(); + auto [converged, v_result] = decoder->decode(syndrome); + cudaqx::tensor result_tensor; + cudaq::qec::convert_vec_soft_to_tensor_hard(v_result, result_tensor); + printf("decode result:\n"); + result_tensor.dump(); + cudaqx::tensor decoded_observables = + observables.dot(result_tensor); + printf("decoded observable:\n"); + decoded_observables.dump(); + pauli_frame = (pauli_frame + decoded_observables) % 2; + printf("pauli frame:\n"); + pauli_frame.dump(); + } + // prep0 means this is a z-basis experiment + // Check if Lz + pauli_frame[0] = 0? + printf("Lz: %d, xFlips: %d\n", Lz.at({0, 0}), pauli_frame.at({0})); + if (Lz.at({0, 0}) != pauli_frame.at({0})) + numLerrors++; + // No logicals errors for this seed + EXPECT_EQ(0, numLerrors); + } + { + // Test x-basis and x-flips + auto steane = cudaq::qec::get_code("steane"); + cudaqx::tensor H = steane->get_parity(); + cudaqx::tensor Hx = steane->get_parity_x(); + cudaqx::tensor Hz = steane->get_parity_z(); + cudaqx::tensor observables = + steane->get_pauli_observables_matrix(); + cudaqx::tensor Lx = steane->get_observables_x(); + cudaqx::tensor Lz = steane->get_observables_z(); + + int nShots = 10; + int nRounds = 4; + cudaq::set_random_seed(13); + cudaq::noise_model noise; + noise.add_all_qubit_channel("x", cudaq::qec::two_qubit_bitflip(0.05), 1); + + // Bitflip is X-type error, detected by Z stabilizers (Hz) + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prepp, nShots, nRounds, noise); + printf("syndromes:\n"); + syndromes.dump(); + + // With noise, Lx will sometimes be flipped + printf("data:\n"); + d.dump(); + printf("Lx:\n"); + Lx.dump(); + cudaqx::tensor logical_mx = Lx.dot(d.transpose()) % 2; + // Can make a column vector + printf("logical_mx:\n"); + logical_mx.dump(); + // bit flip errors trigger Z-type stabilizers (ZZIII) + // these will be extracted into the ancx syndrome registers + // (s_x | s_z ) = ( X flip syndromes, Z Flip syndromes) + + cudaqx::tensor final_sx = Hz.dot(d.transpose()) % 2; + // If x basis experiment, this would be final sx + printf("final sx:\n"); + final_sx.dump(); + + printf("Obs:\n"); + observables.dump(); + auto decoder = cudaq::qec::get_decoder("single_error_lut", H); + printf("end\n"); + size_t numLerrors = 0; + size_t stride = syndromes.shape()[1]; + for (size_t shot = 0; shot < nShots; ++shot) { + cudaqx::tensor pauli_frame({observables.shape()[0]}); + for (size_t i = 0; i < nRounds - 1; ++i) { + size_t count = shot * (nRounds - 1) + i; + printf("shot: %zu, round: %zu, count: %zu\n", shot, i, count); + cudaqx::tensor syndrome({stride}); + syndrome.borrow(syndromes.data() + stride * count); + printf("syndrome:\n"); + syndrome.dump(); + auto [converged, v_result] = decoder->decode(syndrome); + cudaqx::tensor result_tensor; + cudaq::qec::convert_vec_soft_to_tensor_hard(v_result, result_tensor); + + printf("decode result:\n"); + result_tensor.dump(); + cudaqx::tensor decoded_observables = + observables.dot(result_tensor); + printf("decoded observable:\n"); + decoded_observables.dump(); + pauli_frame = (pauli_frame + decoded_observables) % 2; + printf("pauli frame:\n"); + pauli_frame.dump(); + } + // prepp means this is a x-basis experiment + // does LMx + pauli_frame[1] = |+>? (+ is read out as 0 after rotation) + + printf("Obs_x: %d, pfZ: %d\n", logical_mx.at({0, shot}), + pauli_frame.at({1})); + uint8_t corrected_obs = + (logical_mx.at({0, shot}) + pauli_frame.at({1})) % 2; + std::cout << "corrected_obs: " << +corrected_obs << "\n"; + if (corrected_obs != 0) + numLerrors++; + } + printf("numLerrors: %zu\n", numLerrors); + EXPECT_TRUE(numLerrors > 0); + } +} diff --git a/libs/qec/unittests/decoders/sample_decoder.cpp b/libs/qec/unittests/decoders/sample_decoder.cpp new file mode 100644 index 0000000..bd7ae7b --- /dev/null +++ b/libs/qec/unittests/decoders/sample_decoder.cpp @@ -0,0 +1,46 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/qec/decoder.h" +#include + +using namespace cudaqx; + +namespace cudaq::qec { + +/// @brief This is a sample (dummy) decoder that demonstrates how to build a +/// bare bones custom decoder based on the `cudaqx::qec::decoder` interface. +class sample_decoder : public decoder { +public: + sample_decoder(const cudaqx::tensor &H, + const cudaqx::heterogeneous_map ¶ms) + : decoder(H) { + // Decoder-specific constructor arguments can be placed in `params`. + } + + virtual decoder_result decode(const std::vector &syndrome) { + // This is a simple decoder that simply results + decoder_result result; + result.converged = true; + result.result = std::vector(block_size, 0.0f); + return result; + } + + virtual ~sample_decoder() {} + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + sample_decoder, static std::unique_ptr create( + const cudaqx::tensor &H, + const cudaqx::heterogeneous_map ¶ms) { + return std::make_unique(H, params); + }) +}; + +CUDAQ_REGISTER_TYPE(sample_decoder) + +} // namespace cudaq::qec diff --git a/libs/qec/unittests/test_decoders.cpp b/libs/qec/unittests/test_decoders.cpp new file mode 100644 index 0000000..1b0af72 --- /dev/null +++ b/libs/qec/unittests/test_decoders.cpp @@ -0,0 +1,149 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/qec/decoder.h" +#include +#include + +TEST(DecoderUtils, CovertHardToSoft) { + std::vector in = {1, 0, 1, 1}; + std::vector out; + std::vector expected_out = {1.0, 0.0, 1.0, 1.0}; + + cudaq::qec::convert_vec_hard_to_soft(in, out); + ASSERT_EQ(out.size(), expected_out.size()); + for (int i = 0; i < out.size(); i++) + ASSERT_EQ(out[i], expected_out[i]); + + expected_out = {0.9, 0.1, 0.9, 0.9}; + cudaq::qec::convert_vec_hard_to_soft(in, out, 0.9f, 0.1f); + ASSERT_EQ(out.size(), expected_out.size()); + for (int i = 0; i < out.size(); i++) + ASSERT_EQ(out[i], expected_out[i]); + + std::vector> in2 = {{1, 0}, {0, 1}}; + std::vector> out2; + std::vector> expected_out2 = {{0.9, 0.1}, {0.1, 0.9}}; + cudaq::qec::convert_vec_hard_to_soft(in2, out2, 0.9, 0.1); + for (int r = 0; r < out2.size(); r++) { + ASSERT_EQ(out2.size(), expected_out2.size()); + for (int c = 0; c < out2.size(); c++) + ASSERT_EQ(out2[r][c], expected_out2[r][c]); + } +} + +TEST(DecoderUtils, CovertSoftToHard) { + std::vector in = {0.6, 0.4, 0.7, 0.8}; + std::vector out; + std::vector expected_out = {true, false, true, true}; + + cudaq::qec::convert_vec_soft_to_hard(in, out); + ASSERT_EQ(out.size(), expected_out.size()); + for (int i = 0; i < out.size(); i++) + ASSERT_EQ(out[i], expected_out[i]); + + expected_out = {true, true, true, true}; + cudaq::qec::convert_vec_soft_to_hard(in, out, 0.4f); + ASSERT_EQ(out.size(), expected_out.size()); + for (int i = 0; i < out.size(); i++) + ASSERT_EQ(out[i], expected_out[i]); + + std::vector> in2 = {{0.6, 0.4}, {0.7, 0.8}}; + std::vector> out2; + std::vector> expected_out2 = {{1, 0}, {1, 1}}; + cudaq::qec::convert_vec_soft_to_hard(in2, out2); + for (int r = 0; r < out2.size(); r++) { + ASSERT_EQ(out2.size(), expected_out2.size()); + for (int c = 0; c < out2.size(); c++) + ASSERT_EQ(out2[r][c], expected_out2[r][c]); + } +} + +TEST(SampleDecoder, checkAPI) { + using cudaq::qec::float_t; + + std::size_t block_size = 10; + std::size_t syndrome_size = 4; + cudaqx::tensor H({syndrome_size, block_size}); + auto d = cudaq::qec::decoder::get("sample_decoder", H); + std::vector syndromes(syndrome_size); + auto dec_result = d->decode(syndromes); + ASSERT_EQ(dec_result.result.size(), block_size); + for (auto x : dec_result.result) + ASSERT_EQ(x, 0.0f); + + // Async test + dec_result = d->decode_async(syndromes).get(); + ASSERT_EQ(dec_result.result.size(), block_size); + for (auto x : dec_result.result) + ASSERT_EQ(x, 0.0f); + + // Multi test + auto dec_results = d->decode_multi({syndromes, syndromes}); + ASSERT_EQ(dec_results.size(), 2); + for (auto &m : dec_results) + for (auto x : m.result) + ASSERT_EQ(x, 0.0f); +} + +TEST(SteaneLutDecoder, checkAPI) { + using cudaq::qec::float_t; + + // Use Hx from the [7,1,3] Steane code from + // https://en.wikipedia.org/wiki/Steane_code. + std::size_t block_size = 7; + std::size_t syndrome_size = 3; + cudaqx::heterogeneous_map custom_args; + + std::vector H_vec = {0, 0, 0, 1, 1, 1, 1, // IIIXXXX + 0, 1, 1, 0, 0, 1, 1, // IXXIIXX + 1, 0, 1, 0, 1, 0, 1}; // XIXIXIX + cudaqx::tensor H; + H.copy(H_vec.data(), {syndrome_size, block_size}); + auto d = cudaq::qec::decoder::get("single_error_lut", H, custom_args); + + // Run decoding on all possible syndromes. + const std::size_t num_syndromes_to_check = 1 << syndrome_size; + bool convergeTrueFound = false; + bool convergeFalseFound = false; + assert(syndrome_size <= 64); // Assert due to "1 << bit" below. + for (std::size_t syn_idx = 0; syn_idx < num_syndromes_to_check; syn_idx++) { + // Construct a syndrome. + std::vector syndrome(syndrome_size, 0.0); + for (int bit = 0; bit < syndrome_size; bit++) + if (syn_idx & (1 << bit)) + syndrome[bit] = 1.0; + + // Perform decoding. + auto dec_result = d->decode(syndrome); + + // Check results. + ASSERT_EQ(dec_result.result.size(), block_size); + const auto printResults = true; + if (printResults) { + std::string syndrome_str(syndrome_size, '0'); + for (std::size_t j = 0; j < syndrome_size; j++) + if (syndrome[j] >= 0.5) + syndrome_str[j] = '1'; + std::cout << "Syndrome " << syndrome_str + << " returned: {converged: " << dec_result.converged + << ", result: {"; + for (std::size_t j = 0; j < block_size; j++) { + std::cout << dec_result.result[j]; + if (j < block_size - 1) + std::cout << ","; + else + std::cout << "}}\n"; + } + } + convergeTrueFound |= dec_result.converged; + convergeFalseFound |= !dec_result.converged; + } + ASSERT_TRUE(convergeTrueFound); + ASSERT_FALSE(convergeFalseFound); +} diff --git a/libs/qec/unittests/test_qec.cpp b/libs/qec/unittests/test_qec.cpp new file mode 100644 index 0000000..936ba5a --- /dev/null +++ b/libs/qec/unittests/test_qec.cpp @@ -0,0 +1,584 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include + +#include "cudaq.h" + +#include "cudaq/qec/experiments.h" + +TEST(StabilizerTester, checkConstructFromSpinOps) { + { + // Constructor will always auto sort + std::vector stab{cudaq::spin_op::from_word("ZZZZIII"), + cudaq::spin_op::from_word("XXXXIII"), + cudaq::spin_op::from_word("IXXIXXI"), + cudaq::spin_op::from_word("IIXXIXX"), + cudaq::spin_op::from_word("IZZIZZI"), + cudaq::spin_op::from_word("IIZZIZZ")}; + EXPECT_EQ(stab.size(), 6); + auto parity = cudaq::qec::to_parity_matrix(stab); + parity.dump(); + EXPECT_EQ(parity.rank(), 2); + std::vector expected_shape{6, 14}; + EXPECT_EQ(parity.shape(), expected_shape); + + { + std::vector data = { + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 0 */ + 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* row 1 */ + 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* row 2 */ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, /* row 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, /* row 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1}; /* row 5 */ + + cudaqx::tensor t(expected_shape); + t.borrow(data.data(), expected_shape); + for (std::size_t i = 0; i < 6; i++) + for (std::size_t j = 0; j < 14; j++) + EXPECT_EQ(t.at({i, j}), parity.at({i, j})); + } + { + auto parity_x = + cudaq::qec::to_parity_matrix(stab, cudaq::qec::stabilizer_type::X); + printf("Hx:\n"); + parity_x.dump(); + EXPECT_EQ(parity_x.rank(), 2); + std::vector expected_shape{3, 7}; + EXPECT_EQ(parity_x.shape(), expected_shape); + std::vector data = {1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 1, 1, 0, 1, 1}; + cudaqx::tensor t(expected_shape); + t.borrow(data.data(), expected_shape); + for (std::size_t i = 0; i < 3; i++) + for (std::size_t j = 0; j < 7; j++) + EXPECT_EQ(t.at({i, j}), parity_x.at({i, j})); + } + } + { + + // Note testing here also that constructor sorts them + std::vector stab{"ZZZZIII", "XXXXIII", "IXXIXXI", + "IIXXIXX", "IZZIZZI", "IIZZIZZ"}; + EXPECT_EQ(stab.size(), 6); + auto parity = cudaq::qec::to_parity_matrix(stab); + parity.dump(); + EXPECT_EQ(parity.rank(), 2); + std::vector expected_shape{6, 14}; + EXPECT_EQ(parity.shape(), expected_shape); + { + std::vector data = { + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 0 */ + 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* row 1 */ + 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* row 2 */ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, /* row 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, /* row 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1}; /* row 5 */ + + cudaqx::tensor t(expected_shape); + t.borrow(data.data(), expected_shape); + for (std::size_t i = 0; i < 6; i++) + for (std::size_t j = 0; j < 14; j++) + EXPECT_EQ(t.at({i, j}), parity.at({i, j})); + } + { + auto parity_z = + cudaq::qec::to_parity_matrix(stab, cudaq::qec::stabilizer_type::Z); + parity_z.dump(); + EXPECT_EQ(parity_z.rank(), 2); + std::vector expected_shape{3, 7}; + EXPECT_EQ(parity_z.shape(), expected_shape); + std::vector data = {1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 1, 1, 0, 1, 1}; + cudaqx::tensor t(expected_shape); + t.borrow(data.data(), expected_shape); + for (std::size_t i = 0; i < 3; i++) + for (std::size_t j = 0; j < 7; j++) + EXPECT_EQ(t.at({i, j}), parity_z.at({i, j})); + } + } +} + +TEST(QECCodeTester, checkSampleMemoryCircuit) { + { + // Steane tests + auto steane = cudaq::qec::get_code("steane"); + cudaqx::tensor parity = steane->get_parity(); + cudaqx::tensor observables = + steane->get_pauli_observables_matrix(); + cudaqx::tensor Lx = steane->get_observables_x(); + cudaqx::tensor Lz = steane->get_observables_z(); + + int nShots = 10; + int nRounds = 4; + { + // Prep0 experiment. Prep all data qubits in Z basis. + // Measure all data qubits in the Z basis. + // To correct it, find out how many times it flipped. + // X errors flip the Z observable. + // So when we get the predicted error data string E = E_X | E_Z + // from the decoder, we apply E_X to our L_mz to correct it. + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep0, nShots, nRounds); + syndromes.dump(); + + // No noise here, should be all zeros + int sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) + for (std::size_t j = 0; j < syndromes.shape()[1]; j++) + sum += syndromes.at({i, j}); + EXPECT_TRUE(sum == 0); + + // Prep0, should measure out logical |0> each shot + printf("data:\n"); + d.dump(); + printf("Lz:\n"); + Lz.dump(); + cudaqx::tensor logical_mz = Lz.dot(d.transpose()) % 2; + printf("logical_mz:\n"); + logical_mz.dump(); + EXPECT_FALSE(logical_mz.any()); + } + { + // Prep1, should measure out logical |1> each shot + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prep1, nShots, nRounds); + printf("data:\n"); + d.dump(); + printf("Lz:\n"); + Lz.dump(); + cudaqx::tensor logical_mz = Lz.dot(d.transpose()) % 2; + printf("logical_mz:\n"); + logical_mz.dump(); + EXPECT_EQ(nShots, logical_mz.sum_all()); + } + { + // Prepp, should measure out logical |+> each shot + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prepp, nShots, nRounds); + printf("data:\n"); + d.dump(); + printf("Lx:\n"); + Lx.dump(); + cudaqx::tensor logical_mx = Lx.dot(d.transpose()) % 2; + printf("logical_mx:\n"); + logical_mx.dump(); + EXPECT_FALSE(logical_mx.any()); + } + { + // Prepm, should measure out logical |-> each shot + auto [syndromes, d] = cudaq::qec::sample_memory_circuit( + *steane, cudaq::qec::operation::prepm, nShots, nRounds); + printf("data:\n"); + d.dump(); + printf("Lx:\n"); + Lx.dump(); + cudaqx::tensor logical_mx = Lx.dot(d.transpose()) % 2; + printf("logical_mx:\n"); + logical_mx.dump(); + EXPECT_EQ(nShots, logical_mx.sum_all()); + } + } +} + +TEST(QECCodeTester, checkSteane) { + { + // with default stabilizers + auto steane = cudaq::qec::get_code("steane"); + cudaqx::tensor parity = steane->get_parity(); + cudaqx::tensor observables = + steane->get_pauli_observables_matrix(); + cudaqx::tensor Lx = steane->get_observables_x(); + cudaqx::tensor Lz = steane->get_observables_z(); + { + std::vector data = { + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 0 */ + 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* row 1 */ + 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* row 2 */ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, /* row 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, /* row 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1}; /* row 5 */ + + std::vector expected_shape{6, 14}; + cudaqx::tensor t(expected_shape); + t.borrow(data.data()); + for (std::size_t i = 0; i < 6; i++) + for (std::size_t j = 0; j < 14; j++) + EXPECT_EQ(t.at({i, j}), parity.at({i, j})); + } + EXPECT_EQ(2, observables.rank()); + EXPECT_EQ(2, observables.shape()[0]); + EXPECT_EQ(14, observables.shape()[1]); + EXPECT_EQ(2, Lx.rank()); + EXPECT_EQ(1, Lx.shape()[0]); + EXPECT_EQ(7, Lx.shape()[1]); + EXPECT_EQ(2, Lz.rank()); + EXPECT_EQ(1, Lz.shape()[0]); + EXPECT_EQ(7, Lz.shape()[1]); + { + std::vector> true_observables = { + {0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}; + std::vector> true_Lx = {{0, 0, 0, 0, 1, 1, 1}}; + std::vector> true_Lz = {{0, 0, 0, 0, 1, 1, 1}}; + for (std::size_t i = 0; i < observables.shape()[0]; ++i) + for (std::size_t j = 0; j < observables.shape()[1]; ++j) + EXPECT_EQ(true_observables[i][j], observables.at({i, j})); + + for (std::size_t i = 0; i < Lx.shape()[0]; ++i) + for (std::size_t j = 0; j < Lx.shape()[1]; ++j) + EXPECT_EQ(true_Lx[i][j], Lx.at({i, j})); + + for (std::size_t i = 0; i < Lz.shape()[0]; ++i) + for (std::size_t j = 0; j < Lz.shape()[1]; ++j) + EXPECT_EQ(true_Lz[i][j], Lz.at({i, j})); + } + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit(*steane, 10, 4); + syndromes.dump(); + + // No noise here, should be all zeros + int sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) + for (std::size_t j = 0; j < syndromes.shape()[1]; j++) + sum += syndromes.at({i, j}); + + EXPECT_TRUE(sum == 0); + } + { + // From Stabilizers + std::vector words{"ZZZZIII", "XXXXIII", "IXXIXXI", + "IIXXIXX", "IZZIZZI", "IIZZIZZ"}; + std::vector ops; + for (auto &os : words) + ops.emplace_back(cudaq::spin_op::from_word(os)); + cudaq::qec::sortStabilizerOps(ops); + auto steane = cudaq::qec::get_code("steane", ops); + auto parity = steane->get_parity(); + { + std::vector data = { + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 0 */ + 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* row 1 */ + 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* row 2 */ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, /* row 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, /* row 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1}; /* row 5 */ + std::vector expected_shape{6, 14}; + cudaqx::tensor t(expected_shape); + t.borrow(data.data()); + for (std::size_t i = 0; i < 6; i++) + for (std::size_t j = 0; j < 14; j++) + EXPECT_EQ(t.at({i, j}), parity.at({i, j})); + } + + auto [syndromes, d] = cudaq::qec::sample_memory_circuit(*steane, 10, 4); + syndromes.dump(); + // No noise here, should be all zeros + int sum = 0; + for (std::size_t i = 0; i < syndromes.shape()[0]; i++) + for (std::size_t j = 0; j < syndromes.shape()[1]; j++) + sum += syndromes.at({i, j}); + + EXPECT_TRUE(sum == 0); + } +} + +TEST(QECCodeTester, checkCodeCapacity) { + { + auto steane = cudaq::qec::get_code("steane"); + auto Hz = steane->get_parity_z(); + int nShots = 10; + double error_prob = 0; + + auto [syndromes, data] = + cudaq::qec::sample_code_capacity(Hz, nShots, error_prob); + EXPECT_EQ(2, Hz.rank()); + EXPECT_EQ(3, Hz.shape()[0]); + EXPECT_EQ(7, Hz.shape()[1]); + EXPECT_EQ(nShots, syndromes.shape()[0]); + EXPECT_EQ(Hz.shape()[0], syndromes.shape()[1]); + EXPECT_EQ(nShots, data.shape()[0]); + EXPECT_EQ(Hz.shape()[1], data.shape()[1]); + + // Error prob = 0 should be all zeros + for (size_t i = 0; i < nShots; ++i) { + for (size_t j = 0; j < Hz.shape()[1]; ++j) { + EXPECT_EQ(0, data.at({i, j})); + } + } + + for (size_t i = 0; i < nShots; ++i) { + for (size_t j = 0; j < Hz.shape()[0]; ++j) { + EXPECT_EQ(0, syndromes.at({i, j})); + } + } + } + { + auto steane = cudaq::qec::get_code("steane"); + auto Hz = steane->get_parity_z(); + int nShots = 10; + double error_prob = 0.15; + unsigned seed = 1337; + + auto [syndromes, data] = + cudaq::qec::sample_code_capacity(Hz, nShots, error_prob, seed); + EXPECT_EQ(2, Hz.rank()); + EXPECT_EQ(3, Hz.shape()[0]); + EXPECT_EQ(7, Hz.shape()[1]); + EXPECT_EQ(nShots, syndromes.shape()[0]); + EXPECT_EQ(Hz.shape()[0], syndromes.shape()[1]); + EXPECT_EQ(nShots, data.shape()[0]); + EXPECT_EQ(Hz.shape()[1], data.shape()[1]); + // seed = 1337, error_prob = 0.15, nShots = 10 + // produces this data set: + // This seed happens to only have weight 0 or 1 errors, + // which are easy to check by hand. + std::vector> seeded_data = { + {0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0}, + {0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 0}, + {0, 0, 0, 0, 0, 0, 0}}; + EXPECT_EQ(nShots, seeded_data.size()); + EXPECT_EQ(Hz.shape()[1], seeded_data[0].size()); + for (size_t i = 0; i < nShots; ++i) { + for (size_t j = 0; j < Hz.shape()[1]; ++j) { + EXPECT_EQ(seeded_data[i][j], data.at({i, j})); + } + } + + // Hand-checked syndromes + std::vector> checked_syndromes = { + {1, 0, 1}, {1, 0, 1}, {1, 1, 1}, {0, 1, 0}, {0, 0, 0}, + {0, 0, 0}, {0, 1, 0}, {0, 0, 0}, {0, 1, 1}, {0, 0, 0}}; + EXPECT_EQ(nShots, checked_syndromes.size()); + EXPECT_EQ(Hz.shape()[0], checked_syndromes[0].size()); + for (size_t i = 0; i < nShots; ++i) { + for (size_t j = 0; j < Hz.shape()[0]; ++j) { + EXPECT_EQ(checked_syndromes[i][j], syndromes.at({i, j})); + } + } + } + { + auto steane = cudaq::qec::get_code("steane"); + auto Hz = steane->get_parity_z(); + int nShots = 10; + double error_prob = 0.25; + unsigned seed = 1337; + + auto [syndromes, data] = + cudaq::qec::sample_code_capacity(Hz, nShots, error_prob, seed); + EXPECT_EQ(2, Hz.rank()); + EXPECT_EQ(3, Hz.shape()[0]); + EXPECT_EQ(7, Hz.shape()[1]); + EXPECT_EQ(nShots, syndromes.shape()[0]); + EXPECT_EQ(Hz.shape()[0], syndromes.shape()[1]); + EXPECT_EQ(nShots, data.shape()[0]); + EXPECT_EQ(Hz.shape()[1], data.shape()[1]); + // seed = 1337, error_prob = 0.25, nShots = 10 + // produces this data set: + // This seed has some higher weight errors which + // where checked by hand + std::vector> seeded_data = { + {0, 1, 0, 1, 1, 0, 0}, {0, 0, 0, 1, 0, 0, 1}, {0, 0, 1, 0, 0, 0, 0}, + {0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 0}, + {0, 0, 0, 0, 0, 0, 0}}; + EXPECT_EQ(nShots, seeded_data.size()); + EXPECT_EQ(Hz.shape()[1], seeded_data[0].size()); + for (size_t i = 0; i < nShots; ++i) { + for (size_t j = 0; j < Hz.shape()[1]; ++j) { + EXPECT_EQ(seeded_data[i][j], data.at({i, j})); + } + } + + // Hand-checked syndromes + std::vector> checked_syndromes = { + {0, 0, 1}, {1, 0, 0}, {1, 1, 1}, {0, 1, 0}, {0, 0, 0}, + {0, 0, 0}, {0, 1, 0}, {0, 0, 0}, {0, 1, 1}, {0, 0, 0}}; + + EXPECT_EQ(nShots, checked_syndromes.size()); + EXPECT_EQ(Hz.shape()[0], checked_syndromes[0].size()); + for (size_t i = 0; i < nShots; ++i) { + for (size_t j = 0; j < Hz.shape()[0]; ++j) { + EXPECT_EQ(checked_syndromes[i][j], syndromes.at({i, j})); + } + } + } +} + +TEST(QECCodeTester, checkRepetition) { + { + // must provide distance + EXPECT_THROW(cudaq::qec::get_code("repetition"), std::runtime_error); + } + auto repetition = cudaq::qec::get_code("repetition", {{"distance", 9}}); + + { + auto stabilizers = repetition->get_stabilizers(); + + std::vector actual_stabs; + for (auto &s : stabilizers) + actual_stabs.push_back(s.to_string(false)); + + std::vector expected_strings = { + "ZZIIIIIII", "IZZIIIIII", "IIZZIIIII", "IIIZZIIII", + "IIIIZZIII", "IIIIIZZII", "IIIIIIZZI", "IIIIIIIZZ"}; + + EXPECT_EQ(actual_stabs, expected_strings); + auto parity = repetition->get_parity(); + auto Hx = repetition->get_parity_x(); + auto Hz = repetition->get_parity_z(); + EXPECT_EQ(0, Hx.rank()); + EXPECT_EQ(2, Hz.rank()); + parity.dump(); + std::vector data = { + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 0 */ + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 1 */ + 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 2 */ + 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 3 */ + 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 4 */ + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 5 */ + 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* row 6 */ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* row 7 */ + std::vector expected_shape{8, 18}; + cudaqx::tensor t(expected_shape); + t.borrow(data.data()); + for (std::size_t i = 0; i < t.shape()[0]; i++) + for (std::size_t j = 0; j < t.shape()[1]; j++) + EXPECT_EQ(t.at({i, j}), parity.at({i, j})); + } + { + cudaqx::tensor observables = + repetition->get_pauli_observables_matrix(); + cudaqx::tensor Lx = repetition->get_observables_x(); + cudaqx::tensor Lz = repetition->get_observables_z(); + + EXPECT_EQ(2, observables.rank()); + EXPECT_EQ(1, observables.shape()[0]); + EXPECT_EQ(18, observables.shape()[1]); + EXPECT_EQ(0, Lx.rank()); + EXPECT_EQ(2, Lz.rank()); + EXPECT_EQ(1, Lz.shape()[0]); + EXPECT_EQ(9, Lz.shape()[1]); + { + std::vector> true_observables = { + {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}; + std::vector> true_Lz = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; + for (std::size_t i = 0; i < observables.shape()[0]; ++i) + for (std::size_t j = 0; j < observables.shape()[1]; ++j) + EXPECT_EQ(true_observables[i][j], observables.at({i, j})); + + for (std::size_t i = 0; i < Lz.shape()[0]; ++i) + for (std::size_t j = 0; j < Lz.shape()[1]; ++j) + EXPECT_EQ(true_Lz[i][j], Lz.at({i, j})); + } + } + { + auto parity = repetition->get_parity(); + auto parity_z = repetition->get_parity_z(); + int nShots = 10; + int nRounds = 4; + auto [syndromes, data_mz] = + cudaq::qec::sample_memory_circuit(*repetition, nShots, nRounds); + syndromes.dump(); + data_mz.dump(); + EXPECT_EQ(nShots * (nRounds - 1), syndromes.shape()[0]); + EXPECT_EQ(parity.shape()[0], syndromes.shape()[1]); + EXPECT_EQ(nShots, data_mz.shape()[0]); + EXPECT_EQ(parity_z.shape()[1], data_mz.shape()[1]); + // No noise here, should be all zeros + int sum = 0; + for (std::size_t i = 0; i < nShots - 1; i++) + for (std::size_t j = 0; j < parity.shape()[0]; j++) + sum += syndromes.at({i, j}); + + EXPECT_TRUE(sum == 0); + } +} + +// expect |0>, |+> to measure out 0 in respective bases +// expect |1>, |-> to measure out 1 in respective bases +bool noiseless_logical_SPAM_test(const cudaq::qec::code &code, + cudaq::qec::operation statePrep, + uint8_t expected_result) { + cudaqx::tensor Lx = code.get_observables_x(); + cudaqx::tensor Lz = code.get_observables_z(); + + // We measure Z observable in prep0, prep1 experiments + cudaqx::tensor measured_observable(Lz.shape()); + measured_observable.borrow(Lz.data()); + // We measure X observable in prepp, prepm experiments + if (statePrep == cudaq::qec::operation::prepp || + statePrep == cudaq::qec::operation::prepm) { + measured_observable = cudaqx::tensor(Lx.shape()); + measured_observable.borrow(Lx.data()); + } + + int nShots = 10; + // Number of rounds does not matter for noiseless, all should be zero. + int nRounds = 4; + auto [syndromes, d] = + cudaq::qec::sample_memory_circuit(code, statePrep, nShots, nRounds); + syndromes.dump(); + + printf("data:\n"); + d.dump(); + printf("Obs:\n"); + measured_observable.dump(); + cudaqx::tensor logical_measurement = + measured_observable.dot(d.transpose()) % 2; + printf("logical_measurement:\n"); + logical_measurement.dump(); + + // With no noise, each shot should measure out the expected value + for (size_t shot = 0; shot < nShots; ++shot) { + // All codes have only 1 logical qubit for now + for (size_t lQ = 0; lQ < 1; ++lQ) { + if (logical_measurement.at({lQ, shot}) != expected_result) { + printf("shot: %zu, lQ: %zu\n", shot, lQ); + std::cout << +logical_measurement.at({lQ, shot}) << "\n"; + return false; + } + } + } + return true; +} + +TEST(QECCodeTester, checkSteaneSPAM) { + auto steane = cudaq::qec::get_code("steane"); + EXPECT_TRUE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prep0, 0)); + EXPECT_TRUE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prep1, 1)); + EXPECT_TRUE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prepp, 0)); + EXPECT_TRUE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prepm, 1)); + EXPECT_FALSE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prep0, 1)); + EXPECT_FALSE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prep1, 0)); + EXPECT_FALSE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prepp, 1)); + EXPECT_FALSE( + noiseless_logical_SPAM_test(*steane, cudaq::qec::operation::prepm, 0)); +} + +TEST(QECCodeTester, checkRepetitionSPAM) { + // only Z basis for repetition + auto repetition = cudaq::qec::get_code("repetition", {{"distance", 9}}); + EXPECT_TRUE(noiseless_logical_SPAM_test(*repetition, + cudaq::qec::operation::prep0, 0)); + EXPECT_TRUE(noiseless_logical_SPAM_test(*repetition, + cudaq::qec::operation::prep1, 1)); + EXPECT_FALSE(noiseless_logical_SPAM_test(*repetition, + cudaq::qec::operation::prep0, 1)); + EXPECT_FALSE(noiseless_logical_SPAM_test(*repetition, + cudaq::qec::operation::prep1, 0)); +} diff --git a/libs/solvers/CMakeLists.txt b/libs/solvers/CMakeLists.txt new file mode 100644 index 0000000..4d265bc --- /dev/null +++ b/libs/solvers/CMakeLists.txt @@ -0,0 +1,145 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# We need 3.28 because of the `EXCLUDE_FROM_ALL` in FetchContent_Declare +cmake_minimum_required(VERSION 3.28 FATAL_ERROR) + +# This policy was added in version 3.30 in which calling `FetchContent_Populate()` +# with a single argument (the name of a declared dependency) is deprecated. +# We use the functionality to import the solvers libraries. +if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) +endif() + +# Project setup +# ============================================================================== + +add_compile_options(-Wno-attributes) + +# Check if core is built as a standalone project. +if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + project(cudaq-solvers) + set(CUDAQX_SOLVERS_STANDALONE_BUILD TRUE) + + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + + # Add our Modules to the path + list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/../../cmake/Modules") + + # Include custom CUDA-QX modules + include(CUDA-QX) + + # Helper target to collect python modules + add_custom_target(cudaqx-pymodules) +endif() + +enable_language(Fortran) + +# The following must go after `project(...)` +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED TRUE) +set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) + +set(CUDAQX_SOLVERS_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +set(CUDAQX_SOLVERS_INCLUDE_DIR ${CUDAQX_SOLVERS_SOURCE_DIR}/include) + +# Options +# ============================================================================== + +option(CUDAQX_SOLVERS_INCLUDE_TESTS + "Generate build targets for the CUDA-QX Solvers unit tests." + ${CUDAQX_INCLUDE_TESTS}) + +option(CUDAQX_SOLVERS_BINDINGS_PYTHON + "Generate build targets for python bindings." + ${CUDAQX_BINDINGS_PYTHON}) + +option(CUDAQX_SOLVERS_INSTALL_PYTHON + "Install python files alongside the library." + ${CUDAQX_INSTALL_PYTHON}) + +# External Dependencies +# ============================================================================== + +include(FetchContent) + +FetchContent_Declare( + cppitertools + GIT_REPOSITORY https://github.com/ryanhaining/cppitertools.git + GIT_TAG master + EXCLUDE_FROM_ALL +) + +# We need version 3.11.1 because that is what CUDA-Q uses. If CUDA-Q updates, +# then we need to remember to update. +FetchContent_Declare( + json + GIT_REPOSITORY https://github.com/nlohmann/json + GIT_TAG v3.11.1 + EXCLUDE_FROM_ALL +) + +FetchContent_MakeAvailable(cppitertools json) + +if (CUDAQX_SOLVERS_STANDALONE_BUILD) + # FIXME for now, we only use library mode + set(CUDAQ_LIBRARY_MODE ON) + find_package(CUDAQ REQUIRED) + + # FIXME + add_subdirectory(../core core_build) +endif() + +# Wheel building setup +# ============================================================================== + +if (SKBUILD) + # When building with scikit, i.e., building wheels, we want all the install + # to be on the package directory. + set(CMAKE_INSTALL_BINDIR cudaq_solvers/bin) + set(CMAKE_INSTALL_INCLUDEDIR cudaq_solvers/include) + set(CMAKE_INSTALL_LIBDIR cudaq_solvers/lib) +endif() + +# Directory setup +# ============================================================================== + +add_subdirectory(lib) +add_subdirectory(tools) + +if (CUDAQX_SOLVERS_BINDINGS_PYTHON) + add_subdirectory(python) +endif() + +if (CUDAQX_SOLVERS_INCLUDE_TESTS) + add_custom_target(CUDAQXSolversUnitTests) + if (CUDAQX_SOLVERS_STANDALONE_BUILD) + include(CTest) + + add_custom_target(run_tests + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH="${CUDAQ_INSTALL_DIR}:${CMAKE_BINARY_DIR}/python" + ${CMAKE_CTEST_COMMAND} --output-on-failure + DEPENDS CUDAQXSolversUnitTests + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + if (CUDAQX_SOLVERS_BINDINGS_PYTHON) + add_custom_target(run_python_tests + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH="${CUDAQ_INSTALL_DIR}:${CMAKE_BINARY_DIR}/python" + pytest -v ${CUDAQX_SOLVERS_SOURCE_DIR}/python/tests + DEPENDS cudaqx-pymodules + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + endif() + else() + add_dependencies(CUDAQXUnitTests CUDAQXSolversUnitTests) + endif() + add_subdirectory(unittests) +endif() diff --git a/libs/solvers/README.md b/libs/solvers/README.md new file mode 100644 index 0000000..4df9229 --- /dev/null +++ b/libs/solvers/README.md @@ -0,0 +1,32 @@ +# CUDA-Q Solvers Library + +CUDA-Q Solvers provides GPU-accelerated implementations of common +quantum-classical hybrid algorithms and numerical routines frequently +used in quantum computing applications. The library is designed to +work seamlessly with CUDA-Q quantum programs. + +**Note**: CUDA-Q Solvers is currently only supported on Linux operating systems using +`x86_64` processors. CUDA-Q Solvers does not require a GPU to use, but some +components are GPU-accelerated. + +**Note**: CUDA-Q Solvers will require the presence of `libgfortran`, which is not distributed with the Python wheel, for provided classical optimizers. If `libgfortran` is not installed, you will need to install it via your distribution's package manager. On debian based systems, you can install this with `apt-get install gfortran`. + +## Features + +- Variational quantum eigensolvers (VQE) +- ADAPT-VQE +- Quantum approximate optimization algorithm (QAOA) +- Hamiltonian simulation routines + +## Getting Started + +For detailed documentation, tutorials, and API reference, +visit the [CUDA-Q Solvers Documentation](https://nvidia.github.io/cudaqx/components/solvers/introduction.html). + +## License + +CUDA-Q Solvers is an open source project. The source code is available on +[GitHub][github_link] and licensed under [Apache License +2.0](https://github.com/NVIDIA/cudaqx/blob/main/LICENSE). + +[github_link]: https://github.com/NVIDIA/cudaqx/tree/main/libs/solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/adapt.h b/libs/solvers/include/cudaq/solvers/adapt.h new file mode 100644 index 0000000..4fc5866 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/adapt.h @@ -0,0 +1,141 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/qis/qubit_qis.h" +#include "cudaq/solvers/operators/operator_pool.h" +#include "cudaq/solvers/vqe.h" + +#include + +using namespace cudaqx; + +/** + * @file + * @brief Implementation of the ADAPT-VQE algorithm + * + * This file contains the implementation of the Adaptive Derivative-Assembled + * Pseudo-Trotter Variational Quantum Eigensolver (ADAPT-VQE) algorithm. + * + * @details + * ADAPT-VQE is an advanced quantum algorithm designed to improve upon the + * standard Variational Quantum Eigensolver (VQE) approach for solving quantum + * chemistry problems. It addresses key challenges faced by traditional VQE + * methods by dynamically constructing a problem-specific ansatz, offering + * several advantages: + * + * - Faster convergence: Adaptively selects the most impactful operators, + * potentially achieving convergence more quickly than fixed-ansatz VQE + * methods. + * - Enhanced efficiency: Builds a compact ansatz tailored to the specific + * problem, potentially reducing overall circuit depth. + * - Increased accuracy: Has demonstrated the ability to outperform standard + * VQE approaches in terms of accuracy for certain molecular systems. + * - Adaptability: Automatically adjusts to different molecular systems without + * requiring significant user intervention or prior knowledge of the system's + * electronic structure. + * + * The ADAPT-VQE algorithm works by iteratively growing the quantum circuit + * ansatz, selecting operators from a predefined pool based on their gradient + * magnitudes. This adaptive approach allows the algorithm to focus + * computational resources on the most relevant aspects of the problem, + * potentially leading to more efficient and accurate simulations of molecular + * systems on quantum computers. + */ + +namespace cudaq::solvers { + +namespace adapt { + +/// Result type for ADAPT-VQE algorithm +/// @return Tuple containing: +/// - Final energy (double) +/// - Optimized parameters (vector of doubles) +/// - Selected operators (vector of cudaq::spin_op) +using result = + std::tuple, std::vector>; + +/// Abstract base class for ADAPT-VQE implementation +class adapt_impl : public extension_point { +public: + /// Run the ADAPT-VQE algorithm + /// @param initState Initial state preparation quantum kernel + /// @param H Hamiltonian operator + /// @param pool Pool of operators + /// @param optimizer Optimization algorithm + /// @param gradient Gradient calculation method + /// @param options Additional options for the algorithm + /// @return Result of the ADAPT-VQE algorithm + virtual result run(const cudaq::qkernel &)> &initState, + const spin_op &H, const std::vector &pool, + const optim::optimizer &optimizer, + const std::string &gradient, + const heterogeneous_map options) = 0; + + /// Virtual destructor + virtual ~adapt_impl() {} +}; + +} // namespace adapt + +/// @brief Run ADAPT-VQE algorithm with default optimizer +/// @param initialState Initial state preparation quantum kernel +/// @param H Hamiltonian operator +/// @param poolList Pool of operators +/// @param options Additional options for the algorithm +/// @return Result of the ADAPT-VQE algorithm +static inline adapt::result +adapt_vqe(const cudaq::qkernel &)> &initialState, + const spin_op &H, const std::vector &poolList, + const heterogeneous_map options = heterogeneous_map()) { + auto &platform = cudaq::get_platform(); + auto impl = + adapt::adapt_impl::get(platform.is_simulator() ? "simulator" : "remote"); + auto opt = optim::optimizer::get("cobyla"); + return impl->run(initialState, H, poolList, *opt, "", options); +} + +/// @brief Run ADAPT-VQE algorithm with custom optimizer +/// @param initialState Initial state preparation quantum kernel +/// @param H Hamiltonian operator +/// @param poolList Pool of operators +/// @param optimizer Custom optimization algorithm +/// @param options Additional options for the algorithm +/// @return Result of the ADAPT-VQE algorithm +static inline adapt::result +adapt_vqe(const cudaq::qkernel &)> &initialState, + const spin_op &H, const std::vector &poolList, + const optim::optimizer &optimizer, + const heterogeneous_map options = heterogeneous_map()) { + auto &platform = cudaq::get_platform(); + auto impl = + adapt::adapt_impl::get(platform.is_simulator() ? "simulator" : "remote"); + return impl->run(initialState, H, poolList, optimizer, "", options); +} + +/// @brief Run ADAPT-VQE algorithm with custom optimizer and gradient method +/// @param initialState Initial state preparation quantum kernel +/// @param H Hamiltonian operator +/// @param poolList Pool of operators +/// @param optimizer Custom optimization algorithm +/// @param gradient Gradient calculation method +/// @param options Additional options for the algorithm +/// @return Result of the ADAPT-VQE algorithm +static inline adapt::result +adapt_vqe(const cudaq::qkernel &)> &initialState, + const spin_op &H, const std::vector &poolList, + const optim::optimizer &optimizer, const std::string &gradient, + const heterogeneous_map options = heterogeneous_map()) { + auto &platform = cudaq::get_platform(); + auto impl = + adapt::adapt_impl::get(platform.is_simulator() ? "simulator" : "remote"); + return impl->run(initialState, H, poolList, optimizer, gradient, options); +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/adapt/adapt_simulator.h b/libs/solvers/include/cudaq/solvers/adapt/adapt_simulator.h new file mode 100644 index 0000000..1e1f403 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/adapt/adapt_simulator.h @@ -0,0 +1,49 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "../adapt.h" + +using namespace cudaqx; +namespace cudaq::solvers::adapt { + +/// @brief Simulator implementation of the ADAPT-VQE algorithm +/// +/// This class provides a simulator-specific implementation of the ADAPT-VQE +/// algorithm. It is designed to run the algorithm on quantum simulators rather +/// than actual quantum hardware. It attempts to distribute the work with MPI +/// if possible. +class simulator : public adapt_impl { +public: + /// @brief Run the ADAPT-VQE algorithm on a simulator + /// @param initState Function to initialize the quantum state + /// @param H Hamiltonian operator + /// @param pool Pool of operators + /// @param optimizer Optimization algorithm (unused in this implementation) + /// @param gradient Gradient calculation method (optional) + /// @param options Additional options for the algorithm + /// @return Energy value obtained from the ADAPT-VQE algorithm + /// @note This implementation is specific to quantum simulators + result run(const cudaq::qkernel &)> &initState, + const spin_op &H, const std::vector &pool, + const optim::optimizer &optimizer, const std::string &gradient, + const heterogeneous_map options) override; + + /// @brief Creator function for the simulator implementation + /// @details This function is used by the extension point mechanism to create + /// instances of the simulator class. + CUDAQ_EXTENSION_CREATOR_FUNCTION(adapt_impl, simulator); + + virtual ~simulator() {} +}; + +/// @brief Register the simulator type with the CUDA-Q framework +CUDAQ_REGISTER_TYPE(simulator) + +} // namespace cudaq::solvers::adapt \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/observe_gradient.h b/libs/solvers/include/cudaq/solvers/observe_gradient.h new file mode 100644 index 0000000..9edc2d9 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/observe_gradient.h @@ -0,0 +1,149 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#pragma once + +#include "cudaq/algorithms/observe.h" +#include "optimizer.h" + +using namespace cudaq; +using namespace cudaqx; + +namespace cudaq { + +/// Parameterized kernels for observe optimization must take on this +/// signature - take a vector of double and return void. +using ParameterizedKernel = std::function)>; + +/// @brief Observe executions can be used for function +/// evaluation or gradient evaluation. This enumeration +/// allows us to distinguish these execution types. +enum class observe_execution_type { function, gradient }; + +/// @brief Storage type for a single observe iteration. Keeps +/// track of the parameters evaluated at, the result of the +/// observation (shots data and expectations), and the type of +/// the execution. +struct observe_iteration { + std::vector parameters; + cudaq::observe_result result; + observe_execution_type type; +}; + +/// @brief The observe_gradient provides an extension point +/// for developers to inject custom gradient strategies to be +/// used in global optimization of expectation values in +/// typical quantum variational tasks. +class observe_gradient + : public extension_point { +protected: + /// The spin operator used in computing expectation values + /// via `cudaq::observe` + spin_op op; + + /// The parameterized CUDA Quantum kernel function. + ParameterizedKernel quantumFunction; + + /// The current batch expectation value to be computed + std::size_t batchIdx = 0; + + /// The total number of expectation values required + std::size_t numRequiredExpectations = 0; + + /// @brief The number of shots for expectation value computation + int shots = -1; + + /// @brief Compute the expectation value at the given parameters. + double expectation(std::vector &x) { + auto &platform = cudaq::get_platform(); + std::string kernelName = + "auto_gradient_kernel_calc_" + std::to_string(batchIdx); + auto result = cudaq::details::runObservation( + [&]() { quantumFunction(x); }, const_cast(op), platform, + shots, kernelName, 0, nullptr, batchIdx, numRequiredExpectations); + data.emplace_back(x, result.value(), observe_execution_type::gradient); + batchIdx++; + return result.value().expectation(); + } + + /// @brief Compute the gradient at the given multi-dimensional point. + /// The gradient vector is provided as a non-const reference, subtypes + /// therefore should update the vector in place. + virtual void calculateGradient(const std::vector &x, + std::vector &dx, + double expectationAtX) = 0; + + /// @brief Return the number of expectation computations required to + /// compute the gradient, e.g. 2 for a single parameter parameter-shift rule. + virtual std::size_t + getRequiredNumExpectationComputations(const std::vector &x) = 0; + +public: + observe_gradient() = default; + + /// The constructor + observe_gradient(const ParameterizedKernel &functor, const spin_op &op) + : op(op), quantumFunction(functor) {} + + /// @brief Storage for all data produced during gradient computation. + std::vector data; + + template + static std::unique_ptr + get(const std::string &name, NonStdKernel &&kernel, const spin_op &op, + ArgTranslator &&translator) { + auto ®istry = get_registry(); + auto iter = registry.find(name); + if (iter == registry.end()) + throw std::runtime_error("Cannot find extension with name = " + name); + + return iter->second( + [&](std::vector x) { + std::apply([&](auto &&...arg) { return kernel(arg...); }, + translator(x)); + }, + op); + } + + static std::unique_ptr + get(const std::string &name, const ParameterizedKernel &kernel, + const spin_op &op) { + return extension_point::get(name, kernel, op); + } + + void set_spin_op(const spin_op in_op) { op = in_op; } + void set_parameterized_kernel(const ParameterizedKernel kernel) { + quantumFunction = kernel; + } + + /// @brief Compute the gradient at the given multi-dimensional point. + /// The gradient vector is provided as a non-const reference, subtypes + /// therefore should update the vector in place. This delegates to specific + /// subtype implementations. It tracks the number of expectations that + /// need to be computed, and executes them as a batch (e.g. allocates the + /// state once, zeros the state between each iteration instead of + /// deallocating). + void compute(const std::vector &x, std::vector &dx, + double expectationAtX, int inShots = -1) { + if (!quantumFunction) + throw std::runtime_error("[observe_gradient] kernel function not set."); + + shots = inShots; + numRequiredExpectations = getRequiredNumExpectationComputations(x); + calculateGradient(x, dx, expectationAtX); + // reset + numRequiredExpectations = 0; + batchIdx = 0; + } + + virtual ~observe_gradient() {} +}; + +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/observe_gradients/central_difference.h b/libs/solvers/include/cudaq/solvers/observe_gradients/central_difference.h new file mode 100644 index 0000000..fd471a0 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/observe_gradients/central_difference.h @@ -0,0 +1,37 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/solvers/observe_gradient.h" + +namespace cudaq { + +class central_difference : public observe_gradient { +protected: + std::size_t + getRequiredNumExpectationComputations(const std::vector &x) override; + +public: + double step = 1e-4; + using observe_gradient::observe_gradient; + + void calculateGradient(const std::vector &x, std::vector &dx, + double exp_h) override; + + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + central_difference, + static std::unique_ptr create( + const ParameterizedKernel &functor, const spin_op &op) { + return std::make_unique(functor, op); + }) +}; + +CUDAQ_REGISTER_TYPE(central_difference) + +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/observe_gradients/forward_difference.h b/libs/solvers/include/cudaq/solvers/observe_gradients/forward_difference.h new file mode 100644 index 0000000..f8845ce --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/observe_gradients/forward_difference.h @@ -0,0 +1,34 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/solvers/observe_gradient.h" + +namespace cudaq { + +class forward_difference : public observe_gradient { +protected: + std::size_t + getRequiredNumExpectationComputations(const std::vector &x) override; + +public: + double step = 1e-4; + using observe_gradient::observe_gradient; + + void calculateGradient(const std::vector &x, std::vector &dx, + double exp_h) override; + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + forward_difference, + static std::unique_ptr create( + const ParameterizedKernel &functor, const spin_op &op) { + return std::make_unique(functor, op); + }) +}; +CUDAQ_REGISTER_TYPE(forward_difference) +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/observe_gradients/parameter_shift.h b/libs/solvers/include/cudaq/solvers/observe_gradients/parameter_shift.h new file mode 100644 index 0000000..2ca25a0 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/observe_gradients/parameter_shift.h @@ -0,0 +1,34 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/solvers/observe_gradient.h" + +namespace cudaq { + +class parameter_shift : public observe_gradient { +protected: + std::size_t + getRequiredNumExpectationComputations(const std::vector &x) override; + +public: + double shiftScalar = 0.5; + using observe_gradient::observe_gradient; + + void calculateGradient(const std::vector &x, std::vector &dx, + double exp_h) override; + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( + parameter_shift, + static std::unique_ptr create( + const ParameterizedKernel &functor, const spin_op &op) { + return std::make_unique(functor, op); + }) +}; +CUDAQ_REGISTER_TYPE(parameter_shift) +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators.h b/libs/solvers/include/cudaq/solvers/operators.h new file mode 100644 index 0000000..0f8c26d --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators.h @@ -0,0 +1,14 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "operators/graph/clique.h" +#include "operators/graph/max_cut.h" +#include "operators/molecule.h" +#include "operators/operator_pool.h" diff --git a/libs/solvers/include/cudaq/solvers/operators/graph/clique.h b/libs/solvers/include/cudaq/solvers/operators/graph/clique.h new file mode 100644 index 0000000..420a5fc --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/graph/clique.h @@ -0,0 +1,43 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cuda-qx/core/graph.h" +#include "cudaq/spin_op.h" + +namespace cudaq::solvers { + +/// @brief Generates a quantum Hamiltonian for the Maximum Clique problem +/// +/// This function constructs a spin Hamiltonian whose ground state corresponds +/// to the maximum clique in the input graph. The Hamiltonian consists of two +/// terms: +/// 1. A node term that rewards including nodes in the clique +/// 2. A penalty term that enforces the clique constraint (all selected nodes +/// must be connected) +/// +/// The Hamiltonian takes the form: +/// H = Σ_i w_i/2(Z_i - I) + p/4 Σ_{(i,j) ∉ E} (Z_iZ_j - Z_i - Z_j + I) +/// where: +/// - w_i is the weight of node i +/// - p is the penalty strength +/// - E is the set of edges in the graph +/// +/// @param graph The input graph to find the maximum clique in +/// @param penalty The penalty strength for violating clique constraints +/// (default: 4.0) +/// @return cudaq::spin_op The quantum Hamiltonian for the Maximum Clique +/// problem +/// +/// @note The penalty parameter should be chosen large enough to ensure that +/// invalid +/// solutions (non-cliques) have higher energy than valid solutions +cudaq::spin_op get_clique_hamiltonian(const cudaqx::graph &graph, + double penalty = 4.0); +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/graph/max_cut.h b/libs/solvers/include/cudaq/solvers/operators/graph/max_cut.h new file mode 100644 index 0000000..78ca952 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/graph/max_cut.h @@ -0,0 +1,48 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cuda-qx/core/graph.h" +#include "cudaq/spin_op.h" + +namespace cudaq::solvers { + +/// @brief Generates a quantum Hamiltonian for the Maximum Cut problem +/// +/// This function constructs a spin Hamiltonian whose ground state corresponds +/// to the maximum cut in the input graph. The Hamiltonian is constructed such +/// that its ground state represents a partition of the graph's vertices into +/// two sets that maximizes the sum of weights of edges crossing between the +/// sets. +/// +/// The Hamiltonian takes the form: +/// H = Σ_{(i,j)∈E} w_{ij}/2(Z_iZ_j - I) +/// where: +/// - E is the set of edges in the graph +/// - w_{ij} is the weight of edge (i,j) +/// - Z_i is the Pauli Z operator on qubit i +/// - I is the identity operator +/// +/// For an unweighted graph, all w_{ij} = 1.0 +/// +/// The resulting Hamiltonian has the following properties: +/// - Each qubit represents a vertex in the graph +/// - Z_i = +1 assigns vertex i to one partition +/// - Z_i = -1 assigns vertex i to the other partition +/// - The ground state energy corresponds to the negative of the maximum cut +/// value +/// +/// @param graph The input graph to find the maximum cut in +/// @return cudaq::spin_op The quantum Hamiltonian for the MaxCut problem +/// +/// @note The Hamiltonian is constructed to be symmetric under global spin flip, +/// reflecting the symmetry of the MaxCut problem under swapping the +/// partitions +cudaq::spin_op get_maxcut_hamiltonian(const cudaqx::graph &graph); +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/molecule.h b/libs/solvers/include/cudaq/solvers/operators/molecule.h new file mode 100644 index 0000000..a1b3967 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/molecule.h @@ -0,0 +1,203 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/spin_op.h" + +#include "cuda-qx/core/tensor.h" + +#include + +using namespace cudaqx; + +namespace cudaq::solvers { + +/// @struct atom +/// @brief Represents an atom with a name and 3D coordinates +struct atom { + const std::string name; + const double coordinates[3]; +}; + +/// @class molecular_geometry +/// @brief Represents the geometry of a molecule as a collection of atoms +class molecular_geometry { +private: + std::vector atoms; + +public: + /// @brief Constructor using initializer list + /// @param args Initializer list of atoms + molecular_geometry(std::initializer_list &&args) + : atoms(args.begin(), args.end()) {} + + /// @brief Constructor using vector of atoms + /// @param args Vector of atoms + molecular_geometry(const std::vector &args) : atoms(args) {} + + /// @brief Get the number of atoms in the molecule + /// @return Size of the molecule + std::size_t size() const { return atoms.size(); } + + /// @brief Get iterator to the beginning of atoms + /// @return Iterator to the beginning + auto begin() { return atoms.begin(); } + + /// @brief Get iterator to the end of atoms + /// @return Iterator to the end + auto end() { return atoms.end(); } + + /// @brief Get const iterator to the beginning of atoms + /// @return Const iterator to the beginning + auto begin() const { return atoms.cbegin(); }; + + /// @brief Get const iterator to the end of atoms + /// @return Const iterator to the end + auto end() const { return atoms.cend(); } + + /// @brief Get the name of the molecule + /// @return Name of the molecule + std::string name() const; + + /// @brief Create a molecular geometry from an XYZ file + /// @param xyzFile Path to the XYZ file + /// @return Molecular geometry object + static molecular_geometry from_xyz(const std::string &xyzFile); +}; + +/// @struct molecular_hamiltonian +/// @brief Represents a molecular Hamiltonian in both spin and fermionic forms +struct molecular_hamiltonian { + /// @brief The molecular Hamiltonian represented as a spin operator + cudaq::spin_op hamiltonian; + + /// @brief One-electron integrals tensor + /// @details Represents the one-body terms in the second quantized Hamiltonian + cudaqx::tensor<> hpq; + + /// @brief Two-electron integrals tensor + /// @details Represents the two-body terms in the second quantized Hamiltonian + cudaqx::tensor<> hpqrs; + + /// @brief Number of electrons in the molecule + std::size_t n_electrons; + + /// @brief Number of orbitals (or spatial orbitals) in the basis set + std::size_t n_orbitals; + + /// @brief Map of various energy contributions + /// @details Keys may include "nuclear_repulsion", "hf", "mp2", "ccsd", etc. + std::unordered_map energies; +}; + +/// @struct molecule_options +/// @brief Options for molecule creation and calculation +struct molecule_options { + /// @brief Driver for the quantum chemistry calculations + /// default "RESTPySCFDriver" + std::string driver = "RESTPySCFDriver"; + + /// @brief Method for mapping fermionic operators to qubit operators + /// default "jordan_wigner" + std::string fermion_to_spin = "jordan_wigner"; + + /// @brief Type of molecular system + /// default "gas_phase" + std::string type = "gas_phase"; + + /// @brief Whether to use symmetry in calculations + /// default false + bool symmetry = false; + + /// @brief Amount of memory to allocate for calculations (in MB) + /// default 4000.0 + double memory = 4000.; + + /// @brief Maximum number of SCF cycles + /// default 100 + std::size_t cycles = 100; + + /// @brief Initial guess method for SCF calculations + /// default "minao" + std::string initguess = "minao"; + + /// @brief Whether to use unrestricted calculations + /// default false + bool UR = false; + + /// @brief Number of electrons in the active space for CAS calculations + /// default std::nullopt (not set) + std::optional nele_cas = std::nullopt; + + /// @brief Number of orbitals in the active space for CAS calculations + /// default std::nullopt (not set) + std::optional norb_cas = std::nullopt; + + /// @brief Whether to perform MP2 calculations + /// default false + bool MP2 = false; + + /// @brief Whether to use natural orbitals + /// default false + bool natorb = false; + + /// @brief Whether to perform CASCI calculations + /// default false + bool casci = false; + + /// @brief Whether to perform CCSD calculations + /// default false + bool ccsd = false; + + /// @brief Whether to perform CASSCF calculations + /// default false + bool casscf = false; + + /// @brief Whether to use natural orbitals for integrals + /// default false + bool integrals_natorb = false; + + /// @brief Whether to use CASSCF orbitals for integrals + /// default false + bool integrals_casscf = false; + + /// @brief Path to the potential file (if applicable) + /// default std::nullopt (not set) + std::optional potfile = std::nullopt; + + /// @brief Whether to enable verbose output + /// default false + bool verbose = false; + + /// @brief Dump the options to output + void dump(); +}; + +/// @brief Create a molecular Hamiltonian +/// @param geometry Molecular geometry +/// @param basis Basis set +/// @param spin Spin of the molecule +/// @param charge Charge of the molecule +/// @param options Molecule options +/// @return Molecular Hamiltonian +molecular_hamiltonian +create_molecule(const molecular_geometry &geometry, const std::string &basis, + int spin, int charge, + molecule_options options = molecule_options()); + +/// @brief Create a one-particle operator +/// @param numQubits Number of qubits +/// @param p First orbital index +/// @param q Second orbital index +/// @param fermionCompiler Fermion-to-qubit mapping method +/// @return One-particle operator as a spin operator +cudaq::spin_op +one_particle_op(std::size_t numQubits, std::size_t p, std::size_t q, + const std::string fermionCompiler = "jordan_wigner"); +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compiler.h b/libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compiler.h new file mode 100644 index 0000000..337f157 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compiler.h @@ -0,0 +1,32 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/tensor.h" + +#include "cudaq/spin_op.h" + +namespace cudaq::solvers { + +/// @brief The `fermion_compiler` type serves as a base class defining +/// and interface for clients to map fermionic molecular operators to +/// `cudaq::spin_op` instances. The fermionic operator is represented +/// via its one body and two body electron overlap integrals. +class fermion_compiler : public cudaqx::extension_point { +public: + /// @brief Given a fermionic representation of an operator + /// generate an equivalent operator on spins. + virtual cudaq::spin_op generate(const double constant, + const cudaqx::tensor<> &hpq, + const cudaqx::tensor<> &hpqrs) = 0; + virtual ~fermion_compiler() {} +}; + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compilers/jordan_wigner.h b/libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compilers/jordan_wigner.h new file mode 100644 index 0000000..3288785 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/molecule/fermion_compilers/jordan_wigner.h @@ -0,0 +1,23 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq/solvers/operators/molecule/fermion_compiler.h" + +namespace cudaq::solvers { +/// @brief Map fermionic operators to spin operators via the +/// Jordan-Wigner transformation. +class jordan_wigner : public fermion_compiler { +public: + cudaq::spin_op generate(const double constant, const cudaqx::tensor<> &hpq, + const cudaqx::tensor<> &hpqrs) override; + + CUDAQ_EXTENSION_CREATOR_FUNCTION(fermion_compiler, jordan_wigner) +}; +CUDAQ_REGISTER_TYPE(jordan_wigner) +} // namespace cudaq::solvers diff --git a/libs/solvers/include/cudaq/solvers/operators/molecule/molecule_package_driver.h b/libs/solvers/include/cudaq/solvers/operators/molecule/molecule_package_driver.h new file mode 100644 index 0000000..116dbfb --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/molecule/molecule_package_driver.h @@ -0,0 +1,43 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/tear_down.h" + +#include "cudaq/solvers/operators/molecule.h" + +namespace cudaq::solvers { + +/// @brief MoleculePackageDriver provides an extensible interface for +/// generating molecular Hamiltonians and associated metadata. +class MoleculePackageDriver : public extension_point { +public: + /// @brief Return a `molecular_hamiltonian` described by the given + /// geometry, basis set, spin, and charge. Optionally + /// restrict the active space. + virtual molecular_hamiltonian + createMolecule(const molecular_geometry &geometry, const std::string &basis, + int spin, int charge, + molecule_options options = molecule_options()) = 0; + + /// @brief Return true if this driver is available. + virtual bool is_available() const { return true; } + + /// @brief In the case that this service is not available, + /// make it available and return any required application shutdown + /// routines as a new tear_down instance. + virtual std::unique_ptr make_available() const = 0; + + /// Virtual destructor needed when deleting an instance of a derived class + /// via a pointer to the base class. + virtual ~MoleculePackageDriver(){}; +}; + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/operator_pool.h b/libs/solvers/include/cudaq/solvers/operators/operator_pool.h new file mode 100644 index 0000000..4d067a5 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/operator_pool.h @@ -0,0 +1,45 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include +#include +#include + +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/heterogeneous_map.h" +#include "cudaq/spin_op.h" + +using namespace cudaqx; + +namespace cudaq::solvers { + +/// @brief Interface for generating quantum operator pools used in quantum +/// algorithms. +/// @details This class extends the extension_point template, allowing for +/// runtime extensibility. +class operator_pool : public extension_point { +public: + /// @brief Default constructor. + operator_pool() = default; + + /// @brief Virtual destructor to ensure proper cleanup of derived classes. + virtual ~operator_pool() {} + + /// @brief Generate a vector of spin operators based on the provided + /// configuration. + /// @param config A heterogeneous map containing configuration parameters for + /// operator generation. + /// @return A vector of cudaq::spin_op objects representing the generated + /// operator pool. + virtual std::vector + generate(const heterogeneous_map &config) const = 0; +}; + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/operator_pools/qaoa_operator_pool.h b/libs/solvers/include/cudaq/solvers/operators/operator_pools/qaoa_operator_pool.h new file mode 100644 index 0000000..6722438 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/operator_pools/qaoa_operator_pool.h @@ -0,0 +1,27 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "../operator_pool.h" + +namespace cudaq::solvers { + +/// @brief Test +class qaoa_pool : public operator_pool { +public: + std::vector + generate(const heterogeneous_map &config) const override; + CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION_WITH_NAME( + qaoa_pool, "qaoa", static std::unique_ptr create() { + return std::make_unique(); + }) +}; +CUDAQ_REGISTER_TYPE(qaoa_pool) + +} // namespace cudaq::solvers diff --git a/libs/solvers/include/cudaq/solvers/operators/operator_pools/spin_complement_gsd.h b/libs/solvers/include/cudaq/solvers/operators/operator_pools/spin_complement_gsd.h new file mode 100644 index 0000000..1d2c091 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/operator_pools/spin_complement_gsd.h @@ -0,0 +1,27 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "../operator_pool.h" + +namespace cudaq::solvers { + +// adapted from +// https://github.com/mayhallgroup/adapt-vqe/blob/master/src/operator_pools.py + +/// @brief Test +class spin_complement_gsd : public operator_pool { + +public: + std::vector + generate(const heterogeneous_map &config) const override; + CUDAQ_EXTENSION_CREATOR_FUNCTION(operator_pool, spin_complement_gsd) +}; +CUDAQ_REGISTER_TYPE(spin_complement_gsd) +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/operators/operator_pools/uccsd_operator_pool.h b/libs/solvers/include/cudaq/solvers/operators/operator_pools/uccsd_operator_pool.h new file mode 100644 index 0000000..02387b1 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/operators/operator_pools/uccsd_operator_pool.h @@ -0,0 +1,25 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "../operator_pool.h" + +namespace cudaq::solvers { + +/// @brief Test +class uccsd : public operator_pool { + +public: + std::vector + generate(const heterogeneous_map &config) const override; + CUDAQ_EXTENSION_CREATOR_FUNCTION(operator_pool, uccsd) +}; +CUDAQ_REGISTER_TYPE(uccsd) + +} // namespace cudaq::solvers diff --git a/libs/solvers/include/cudaq/solvers/optimizer.h b/libs/solvers/include/cudaq/solvers/optimizer.h new file mode 100644 index 0000000..f174a1c --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/optimizer.h @@ -0,0 +1,129 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include +#include + +#include "cuda-qx/core/extension_point.h" +#include "cuda-qx/core/heterogeneous_map.h" + +using namespace cudaqx; + +namespace cudaq::optim { + +/// Typedef modeling the result of an optimization strategy, +/// a double representing the optimal value and the corresponding +/// optimal parameters. +using optimization_result = std::tuple>; + +/// An optimizable_function wraps a user-provided objective function +/// to be optimized. +class optimizable_function { +private: + // Useful typedefs + using NoGradientSignature = + std::function &)>; + using GradientSignature = + std::function &, std::vector &)>; + + // The function we are optimizing + GradientSignature _opt_func; + bool _providesGradients = true; + +public: + optimizable_function() = default; + optimizable_function &operator=(const optimizable_function &other) = default; + + template + optimizable_function(const Callable &callable) { + static_assert( + std::is_invocable_v> || + std::is_invocable_v, + std::vector &>, + "Invalid optimization function. Must have signature double(const " + "std::vector&) or double(const std::vector&, " + "std::vector&) for gradient-free or gradient-based " + "optimizations, respectively."); + + if constexpr (std::is_invocable_v>) { + _opt_func = [c = std::move(callable)](const std::vector &x, + std::vector &) { + return c(x); + }; + _providesGradients = false; + } else { + _opt_func = std::move(callable); + } + } + + bool providesGradients() const { return _providesGradients; } + double operator()(const std::vector &x, + std::vector &dx) const { + return _opt_func(x, dx); + } +}; + +/// +/// The optimizer provides a high-level interface for general +/// optimization of user-specified objective functions. This is meant +/// to serve an interface for clients working with concrete +/// subtypes providing specific optimization algorithms possibly delegating +/// to third party libraries. This interface provides an optimize(...) method +/// that takes the number of objective function input parameters +/// (the dimension), and a user-specified objective function that takes the +/// function input parameters as a immutable (const) vector reference +/// and a mutable vector reference modeling the current iteration +/// gradient vector (df / dx_i, for all i parameters). This function +/// must return a scalar double, the value of this function at the +/// current input parameters. The optimizer also +/// exposes a method for querying whether the current optimization strategy +/// requires gradients or not. Parameterizing optimization strategies +/// is left as a task for sub-types (things like initial parameters, max +/// function evaluations, etc.). +class optimizer : public extension_point { +public: + virtual ~optimizer() = default; + + /// Returns true if this optimization strategy requires + /// gradients to achieve its optimization goals. + virtual bool requiresGradients() const = 0; + + /// Run the optimization strategy defined by concrete sub-type + /// implementations. Takes the number of variational parameters, + /// and a custom objective function that takes the + /// function input parameters as a immutable (`const`) `vector` + /// reference and a mutable `vector` reference modeling the current + /// iteration gradient vector (`df / dx_i`, for all `i` parameters). This + /// function must return a scalar double, the value of this function at the + /// current input parameters. + virtual optimization_result + optimize(std::size_t dim, const optimizable_function &opt_function) { + return optimize(dim, opt_function, heterogeneous_map()); + } + + /// Run the optimization strategy defined by concrete sub-type + /// implementations. Takes the number of variational parameters, + /// and a custom objective function that takes the + /// function input parameters as a immutable (`const`) `vector` + /// reference and a mutable `vector` reference modeling the current + /// iteration gradient vector (`df / dx_i`, for all `i` parameters). This + /// function must return a scalar double, the value of this function at the + /// current input parameters. Optionally provide optimizer options. + virtual optimization_result optimize(std::size_t dim, + const optimizable_function &opt_function, + const heterogeneous_map &options) = 0; + + /// @brief Return the optimization history, e.g. + /// the value and parameters found for each iteration of + /// the optimization. + std::vector history; +}; + +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/optimizers/cobyla.h b/libs/solvers/include/cudaq/solvers/optimizers/cobyla.h new file mode 100644 index 0000000..5db3be2 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/optimizers/cobyla.h @@ -0,0 +1,37 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/solvers/optimizer.h" + +using namespace cudaqx; + +namespace cudaq::optim { + +/// @brief The +class cobyla : public optimizer { +public: + using optimizer::optimize; + + /// @brief Return true indicating this optimizer requires an + /// optimization functor that produces gradients. + bool requiresGradients() const override { return false; } + + /// @brief Optimize the provided function according to the + /// cobyla algorithm. + optimization_result optimize(std::size_t dim, + const optimizable_function &opt_function, + const heterogeneous_map &options) override; + + CUDAQ_EXTENSION_CREATOR_FUNCTION(optimizer, cobyla); +}; + +CUDAQ_REGISTER_TYPE(cobyla) + +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/optimizers/lbfgs.h b/libs/solvers/include/cudaq/solvers/optimizers/lbfgs.h new file mode 100644 index 0000000..5b000a4 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/optimizers/lbfgs.h @@ -0,0 +1,36 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include "cudaq/solvers/optimizer.h" + +using namespace cudaqx; + +namespace cudaq::optim { + +/// @brief The limited-memory Broyden-Fletcher-Goldfarb-Shanno +/// gradient based black-box function optimizer. +class lbfgs : public optimizer { +public: + using optimizer::optimize; + + /// @brief Return true indicating this optimizer requires an + /// optimization functor that produces gradients. + bool requiresGradients() const override { return true; } + + /// @brief Optimize the provided function according to the + /// LBFGS algorithm. + optimization_result + optimize(std::size_t dim, const optimizable_function &opt_function, + const cudaqx::heterogeneous_map &options) override; + + CUDAQ_EXTENSION_CREATOR_FUNCTION(optimizer, lbfgs) +}; +CUDAQ_REGISTER_TYPE(lbfgs) +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/qaoa.h b/libs/solvers/include/cudaq/solvers/qaoa.h new file mode 100644 index 0000000..f1dc2a2 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/qaoa.h @@ -0,0 +1,160 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "common/MeasureCounts.h" +#include "cudaq/spin_op.h" + +#include "cuda-qx/core/graph.h" +#include "optimizer.h" + +namespace cudaq::solvers { + +/// @brief Result structure for QAOA optimization +struct qaoa_result { + /// @brief The optimal value found by the QAOA algorithm + double optimal_value = 0.0; + + /// @brief The optimal variational parameters that achieved the optimal value + std::vector optimal_parameters; + + /// @brief The measurement results for the optimal circuit configuration + cudaq::sample_result optimal_config; +}; + +/// @brief Execute the Quantum Approximate Optimization Algorithm (QAOA) with +/// custom mixing Hamiltonian +/// @param problemHamiltonian The cost Hamiltonian encoding the optimization +/// problem +/// @param referenceHamiltonian The mixing Hamiltonian for the QAOA evolution +/// (typically X-rotation terms) +/// @param optimizer The classical optimizer to use for parameter optimization +/// @param numLayers The number of QAOA layers (p-value) +/// @param initialParameters Initial guess for the variational parameters +/// @param options Additional algorithm options passed as key-value pairs +/// +/// @note User can provide the following options - {"counterdiabatic", +/// true/false} to run Digitized-Counterdiabatic QAOA (adds Ry rotations after +/// QAOA single layer) +/// +/// @return qaoa_result containing the optimization results +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, + const optim::optimizer &optimizer, std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options = {}); + +/// @brief Execute QAOA with default transverse field mixing Hamiltonian +/// @param problemHamiltonian The cost Hamiltonian encoding the optimization +/// problem +/// @param optimizer The classical optimizer to use for parameter optimization +/// @param numLayers The number of QAOA layers (p-value) +/// @param initialParameters Initial guess for the variational parameters +/// @param options Additional algorithm options passed as key-value pairs +/// +/// @note User can provide the following options - {"counterdiabatic", +/// true/false} to run Digitized-Counterdiabatic QAOA (adds Ry rotations after +/// QAOA single layer) +/// +/// @return qaoa_result containing the optimization results +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + const optim::optimizer &optimizer, std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options = {}); + +/// @brief Execute QAOA with default optimizer and mixing Hamiltonian +/// @param problemHamiltonian The cost Hamiltonian encoding the optimization +/// problem +/// @param numLayers The number of QAOA layers (p-value) +/// @param initialParameters Initial guess for the variational parameters +/// be size 2*numLayers) +/// @param options Additional algorithm options passed as key-value pairs +/// +/// @note User can provide the following options - {"counterdiabatic", +/// true/false} to run Digitized-Counterdiabatic QAOA (adds Ry rotations after +/// QAOA single layer) +/// +/// @return qaoa_result containing the optimization results +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options = {}); + +/// @brief Execute the Quantum Approximate Optimization Algorithm (QAOA) with +/// custom mixing Hamiltonian +/// @param problemHamiltonian The cost Hamiltonian encoding the optimization +/// problem +/// @param referenceHamiltonian The mixing Hamiltonian for the QAOA evolution +/// (typically X-rotation terms) +/// @param numLayers The number of QAOA layers (p-value) +/// @param initialParameters Initial guess for the variational parameters +/// @param options Additional algorithm options passed as key-value pairs +/// +/// @note User can provide the following options - {"counterdiabatic", +/// true/false} to run Digitized-Counterdiabatic QAOA (adds Ry rotations after +/// QAOA single layer) +/// +/// @return qaoa_result containing the optimization results +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, + std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options = {}); + +/// @brief Calculate the number of variational parameters needed for QAOA with +/// custom mixing Hamiltonian +/// +/// @details This function determines the total number of variational parameters +/// required for QAOA execution based on the problem setup and options. When +/// full_parameterization is true, an angle will be used for every term in both +/// the problem and reference Hamiltonians. Otherwise, one angle per layer is +/// used for each Hamiltonian. +/// +/// @param problemHamiltonian The cost Hamiltonian encoding the optimization +/// problem +/// @param referenceHamiltonian The mixing Hamiltonian for the QAOA evolution +/// @param numLayers The number of QAOA layers (p-value) +/// @param options Additional algorithm options: +/// - "full_parameterization": bool - Use individual angles for +/// each Hamiltonian term +/// - "counterdiabatic": bool - Enable counterdiabatic QAOA +/// variant, adds an Ry to every qubit in the system with its own +/// angle to optimize. +/// +/// @return The total number of variational parameters needed +//// +std::size_t get_num_qaoa_parameters(const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, + std::size_t numLayers, + const heterogeneous_map options = {}); + +/// @brief Calculate the number of variational parameters needed for QAOA with +/// default mixing Hamiltonian +/// +/// @details This function determines the total number of variational parameters +/// required for QAOA execution using the default transverse field mixing +/// Hamiltonian. When full_parameterization is true, an angle will be used for +/// every term in both the problem and reference Hamiltonians. Otherwise, one +/// angle per layer is used for each Hamiltonian. +/// +/// @param problemHamiltonian The cost Hamiltonian encoding the optimization +/// problem +/// @param numLayers The number of QAOA layers (p-value) +/// @param options Additional algorithm options: +/// - "full_parameterization": bool - Use individual angles for +/// each Hamiltonian term +/// - "counterdiabatic": bool - Enable counterdiabatic QAOA +/// variant, adds an Ry to every qubit in the system with its own +/// angle to optimize. +/// +/// @return The total number of variational parameters needed +/// +std::size_t get_num_qaoa_parameters(const cudaq::spin_op &problemHamiltonian, + std::size_t numLayers, + const heterogeneous_map options = {}); +} // namespace cudaq::solvers diff --git a/libs/solvers/include/cudaq/solvers/qaoa/qaoa_device.h b/libs/solvers/include/cudaq/solvers/qaoa/qaoa_device.h new file mode 100644 index 0000000..fcc2157 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/qaoa/qaoa_device.h @@ -0,0 +1,51 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq.h" + +namespace cudaq::solvers { + +/// @brief QAOA quantum kernel implementation for quantum approximate +/// optimization +/// @details This kernel implements the Quantum Approximate Optimization +/// Algorithm (QAOA) circuit structure, which alternates between problem and +/// mixing Hamiltonians for a specified number of layers. The circuit begins +/// with an equal superposition state and applies parameterized evolution under +/// both Hamiltonians. +/// +/// The circuit structure is: +/// 1. Initialize all qubits in superposition with Hadamard gates +/// 2. For each layer: +/// - Apply problem Hamiltonian evolution with gamma parameters +/// - Apply mixing Hamiltonian evolution with beta parameters +/// +/// @param numQubits Number of qubits in the QAOA circuit +/// @param numLayers Number of QAOA layers (p-value) to apply +/// @param gamma_beta Vector of alternating gamma/beta variational parameters +/// @param problemHCoeffs Coefficients for each term in the problem Hamiltonian +/// @param problemH Pauli string operators representing the problem Hamiltonian +/// terms +/// @param referenceHCoeffs Coefficients for each term in the mixing Hamiltonian +/// @param referenceH Pauli string operators representing the mixing Hamiltonian +/// terms +/// @param full_parameterization if true, use a parameter for every term in both +/// problem and reference Hamltonians. +/// @param counterdiabatic if true, add ry rotations to every qubit after +/// reference Hamiltonian. +/// +/// @see qaoa_result For the structure containing optimization results +/// @see exp_pauli For the primitive implementing parameterized Pauli evolution +__qpu__ void qaoa_kernel(std::size_t numQubits, std::size_t numLayers, + const std::vector &gamma_beta, + const std::vector &problemHCoeffs, + const std::vector &problemH, + const std::vector &referenceHCoeffs, + const std::vector &referenceH, + bool full_parameterization, bool counterdiabatic); +} // namespace cudaq::solvers diff --git a/libs/solvers/include/cudaq/solvers/stateprep/uccsd.h b/libs/solvers/include/cudaq/solvers/stateprep/uccsd.h new file mode 100644 index 0000000..a25181d --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/stateprep/uccsd.h @@ -0,0 +1,80 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq.h" +#include +#include + +namespace cudaq::solvers::stateprep { + +/// @brief Represents a list of excitations +using excitation_list = std::vector>; + +/// @brief Get UCCSD excitations for a given system +/// @param numElectrons Number of electrons in the system +/// @param numQubits Number of qubits in the system +/// @param spin Spin of the system +/// @return Tuple containing five excitation lists +std::tuple +get_uccsd_excitations(std::size_t numElectrons, std::size_t numQubits, + std::size_t spin = 0); + +/// @brief Calculate the number of UCCSD parameters +/// @param numElectrons Number of electrons in the system +/// @param numQubits Number of qubits in the system +/// @param spin Spin of the system (default 0) +/// @return Number of UCCSD parameters +std::size_t get_num_uccsd_parameters(std::size_t numElectrons, + std::size_t numQubits, + std::size_t spin = 0); + +/// \pure_device_kernel +/// +/// @brief Perform a single excitation operation +/// @param qubits Qubit register +/// @param theta Rotation angle +/// @param p_occ Occupied orbital index +/// @param q_virt Virtual orbital index +__qpu__ void single_excitation(cudaq::qview<> qubits, double theta, + std::size_t p_occ, std::size_t q_virt); + +/// \pure_device_kernel +/// +/// @brief Perform a double excitation operation +/// @param qubits Qubit register +/// @param theta Rotation angle +/// @param p_occ First occupied orbital index +/// @param q_occ Second occupied orbital index +/// @param r_virt First virtual orbital index +/// @param s_virt Second virtual orbital index +__qpu__ void double_excitation(cudaq::qview<> qubits, double theta, + std::size_t p_occ, std::size_t q_occ, + std::size_t r_virt, std::size_t s_virt); + +/// \pure_device_kernel +/// +/// @brief Apply UCCSD ansatz to a qubit register +/// @param qubits Qubit register +/// @param thetas Vector of rotation angles +/// @param numElectrons Number of electrons in the system +/// @param spin Spin of the system +__qpu__ void uccsd(cudaq::qview<> qubits, const std::vector &thetas, + std::size_t numElectrons, std::size_t spin); + +/// \pure_device_kernel +/// +/// @brief Apply UCCSD ansatz to a qubit vector +/// @param qubits Qubit vector +/// @param thetas Vector of rotation angles +/// @param numElectrons Number of electrons in the system +__qpu__ void uccsd(cudaq::qview<> qubits, const std::vector &thetas, + std::size_t numElectrons); + +} // namespace cudaq::solvers::stateprep \ No newline at end of file diff --git a/libs/solvers/include/cudaq/solvers/vqe.h b/libs/solvers/include/cudaq/solvers/vqe.h new file mode 100644 index 0000000..a0c3074 --- /dev/null +++ b/libs/solvers/include/cudaq/solvers/vqe.h @@ -0,0 +1,404 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "observe_gradient.h" +#include "optimizer.h" + +using namespace cudaqx; + +namespace cudaq::solvers { + +/// @brief A vqe_result encapsulates all the data produced +/// by a standard variational quantum eigensolver execution. It +/// provides the programmer with the optimal energy and parameters +/// as well as a list of all execution data at each iteration. +struct vqe_result { + double energy; + std::vector optimal_parameters; + std::vector iteration_data; + operator double() { return energy; } + + // FIXME add to/from file functionality +}; + +/// @brief Compute the minimal eigenvalue of the given Hamiltonian with VQE. +/// @details Given a quantum kernel of signature `void(std::vector)`, +/// run the variational quantum eigensolver routine to compute +/// the minimum eigenvalue of the specified hermitian `spin_op`. +/// @tparam QuantumKernel Type of the quantum kernel +/// @param kernel Quantum kernel to be optimized +/// @param hamiltonian Spin operator representing the Hamiltonian +/// @param optimizer Optimization algorithm to use +/// @param gradient Gradient computation method +/// @param initial_parameters Initial parameters for the optimization +/// @param options Additional options for the VQE algorithm +/// @return VQE result containing optimal energy, parameters, and iteration data +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + optim::optimizer &optimizer, + observe_gradient &gradient, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + if (!optimizer.requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer does not require " + "gradients, yet gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer.optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + gradient.compute(x, dx, res.expectation(), options.get("shots", -1)); + for (auto datum : gradient.data) + data.push_back(datum); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +/// @brief Overloaded VQE function using string-based optimizer and gradient +/// selection +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + const std::string &optName, + const std::string &gradName, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + + if (!cudaq::optim::optimizer::is_registered(optName)) + throw std::runtime_error("provided optimizer is not valid."); + + if (!cudaq::observe_gradient::is_registered(gradName)) + throw std::runtime_error("provided optimizer is not valid."); + + auto optimizer = cudaq::optim::optimizer::get(optName); + auto gradient = cudaq::observe_gradient::get(gradName, kernel, hamiltonian); + + if (!optimizer->requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer does not require " + "gradients, yet gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer->optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + gradient->compute(x, dx, res.expectation(), options.get("shots", -1)); + for (auto datum : gradient->data) + data.push_back(datum); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +/// @brief Overloaded VQE function using string-based optimizer selection +/// without gradient +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + const std::string &optName, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + + if (!cudaq::optim::optimizer::is_registered(optName)) + throw std::runtime_error("provided optimizer is not valid."); + + auto optimizer = cudaq::optim::optimizer::get(optName); + + if (optimizer->requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer requires " + "gradients, yet no gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer->optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +/// @brief Overloaded VQE function using string-based optimizer and provided +/// gradient object +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + const std::string &optName, + observe_gradient &gradient, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + + if (!cudaq::optim::optimizer::is_registered(optName)) + throw std::runtime_error("provided optimizer is not valid."); + + auto optimizer = cudaq::optim::optimizer::get(optName); + if (!optimizer->requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer does not require " + "gradients, yet gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer->optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + gradient.compute(x, dx, res.expectation(), options.get("shots", -1)); + for (auto datum : gradient.data) + data.push_back(datum); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +/// @brief Overloaded VQE function using provided optimizer and string-based +/// gradient selection +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + optim::optimizer &optimizer, + const std::string &gradName, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + + if (!cudaq::observe_gradient::is_registered(gradName)) + throw std::runtime_error("provided optimizer is not valid."); + + auto gradient = cudaq::observe_gradient::get(gradName, kernel, hamiltonian); + + if (!optimizer.requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer does not require " + "gradients, yet gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer.optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + gradient->compute(x, dx, res.expectation(), options.get("shots", -1)); + for (auto datum : gradient->data) + data.push_back(datum); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +/// @brief Overloaded VQE function using provided optimizer without gradient +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + optim::optimizer &optimizer, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + + if (optimizer.requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer does not require " + "gradients, yet gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer.optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +template + requires std::invocable> +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + const std::vector &initial_parameters, + heterogeneous_map options = heterogeneous_map()) { + + auto optimizer = optim::optimizer::get("cobyla"); + options.insert("initial_parameters", initial_parameters); + + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer->optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = + cudaq::observe(options.get("shots", -1), kernel, hamiltonian, x); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +template +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + optim::optimizer &optimizer, + const std::vector &initial_parameters, + ArgTranslator &&translator, + heterogeneous_map options = heterogeneous_map()) { + + if (optimizer.requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer requires " + "gradients, yet gradient instance not provided."); + + options.insert("initial_parameters", initial_parameters); + + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer.optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = std::apply( + [&](auto &&...arg) { + return cudaq::observe(kernel, hamiltonian, arg...); + }, + translator(x)); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +template +static inline vqe_result vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + const std::vector &initial_parameters, + ArgTranslator &&translator, + heterogeneous_map options = heterogeneous_map()) { + + auto optimizer = optim::optimizer::get("cobyla"); + options.insert("initial_parameters", initial_parameters); + + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer->optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = std::apply( + [&](auto &&...arg) { + return cudaq::observe(kernel, hamiltonian, arg...); + }, + translator(x)); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +template +static inline vqe_result +vqe(QuantumKernel &&kernel, const spin_op &hamiltonian, + optim::optimizer &optimizer, observe_gradient &gradient, + const std::vector &initial_parameters, ArgTranslator &&translator, + heterogeneous_map options = heterogeneous_map()) { + + if (!optimizer.requiresGradients()) + throw std::runtime_error("[vqe] provided optimizer does not require " + "gradients, yet gradient instance provided."); + + options.insert("initial_parameters", initial_parameters); + + std::vector data; + + /// Run the optimization + auto [groundEnergy, optParams] = optimizer.optimize( + initial_parameters.size(), + [&](const std::vector &x, std::vector &dx) { + auto res = std::apply( + [&](auto &&...arg) { + return cudaq::observe(kernel, hamiltonian, arg...); + }, + translator(x)); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + data.emplace_back(x, res, observe_execution_type::function); + gradient.compute(x, dx, res.expectation(), options.get("shots", -1)); + return res.expectation(); + }, + options); + + return {groundEnergy, optParams, data}; +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/CMakeLists.txt b/libs/solvers/lib/CMakeLists.txt new file mode 100644 index 0000000..2fcb44e --- /dev/null +++ b/libs/solvers/lib/CMakeLists.txt @@ -0,0 +1,91 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_library(cudaq-solvers SHARED + observe_gradients/central_difference.cpp + observe_gradients/forward_difference.cpp + observe_gradients/observe_gradient.cpp + observe_gradients/parameter_shift.cpp + operators/molecule/drivers/process.cpp + operators/molecule/drivers/pyscf_driver.cpp + operators/molecule/fermion_compilers/fermion_compiler.cpp + operators/molecule/fermion_compilers/jordan_wigner.cpp + operators/molecule/molecule.cpp + operators/graph/max_cut.cpp + operators/graph/clique.cpp + operators/operator_pools/operator_pool.cpp + operators/operator_pools/spin_complement_gsd.cpp + operators/operator_pools/uccsd_operator_pool.cpp + operators/operator_pools/qaoa_operator_pool.cpp + +) + +add_subdirectory(adapt) +add_subdirectory(optimizers) +add_subdirectory(stateprep) +add_subdirectory(qaoa) + +target_include_directories(cudaq-solvers + PUBLIC + $ + $ + $ +) + +target_link_options(cudaq-solvers PUBLIC + $<$:-Wl,--no-as-needed> +) + +target_link_libraries(cudaq-solvers + PUBLIC + cudaqx-core + cudaq::cudaq + cudaq::cudaq-spin + PRIVATE + cudaq::cudaq-common + nlohmann_json::nlohmann_json + cppitertools::cppitertools +) + +set_target_properties(cudaq-solvers PROPERTIES + LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) + +# RPATH configuration +# ============================================================================== + +if (NOT SKBUILD) + set_target_properties(cudaq-solvers PROPERTIES + BUILD_RPATH "$ORIGIN" + INSTALL_RPATH "$ORIGIN:$ORIGIN/../lib" + ) + + # Let CMake automatically add paths of linked libraries to the RPATH: + set_target_properties(cudaq-solvers PROPERTIES + INSTALL_RPATH_USE_LINK_PATH TRUE) +else() + # CUDA-Q install its libraries in site-packages/lib (or dist-packages/lib) + # Thus, we need the $ORIGIN/../lib + set_target_properties(cudaq-solvers PROPERTIES + INSTALL_RPATH "$ORIGIN/../../lib" + ) +endif() + +# Install +# ============================================================================== + +install(TARGETS cudaq-solvers + COMPONENT solvers-lib + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + +install(DIRECTORY ${CUDAQX_SOLVERS_INCLUDE_DIR}/cudaq + COMPONENT solvers-headers + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING PATTERN "*.h" +) + diff --git a/libs/solvers/lib/adapt/CMakeLists.txt b/libs/solvers/lib/adapt/CMakeLists.txt new file mode 100644 index 0000000..8b5a2f3 --- /dev/null +++ b/libs/solvers/lib/adapt/CMakeLists.txt @@ -0,0 +1,11 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_subdirectory(device) + +target_sources(cudaq-solvers PRIVATE adapt.cpp adapt_simulator.cpp) diff --git a/libs/solvers/lib/adapt/adapt.cpp b/libs/solvers/lib/adapt/adapt.cpp new file mode 100644 index 0000000..92b5454 --- /dev/null +++ b/libs/solvers/lib/adapt/adapt.cpp @@ -0,0 +1,14 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "common/Logger.h" +#include "cudaq.h" + +#include "cudaq/solvers/adapt.h" + +INSTANTIATE_REGISTRY_NO_ARGS(cudaq::solvers::adapt::adapt_impl) diff --git a/libs/solvers/lib/adapt/adapt_simulator.cpp b/libs/solvers/lib/adapt/adapt_simulator.cpp new file mode 100644 index 0000000..5ec07a0 --- /dev/null +++ b/libs/solvers/lib/adapt/adapt_simulator.cpp @@ -0,0 +1,219 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "common/Logger.h" +#include "cudaq.h" + +#include "cudaq/solvers/adapt/adapt_simulator.h" +#include "cudaq/solvers/vqe.h" +#include "device/adapt.h" +#include "device/prepare_state.h" + +#include +#include + +namespace cudaq::solvers::adapt { + +result +simulator::run(const cudaq::qkernel &)> &initialState, + const spin_op &H, const std::vector &pool, + const optim::optimizer &optimizer, const std::string &gradient, + const heterogeneous_map options) { + + if (pool.empty()) + throw std::runtime_error("Invalid adapt input, operator pool is empty."); + + std::vector pauliWords; + std::vector thetas, coefficients; + std::vector chosenOps; + auto tol = options.get("grad_norm_tolerance", 1e-5); + auto numQubits = H.num_qubits(); + // Assumes each rank can see numQpus, models a distributed + // architecture where each rank is a compute node, and each node + // has numQpus GPUs available. Each GPU is indexed 0, 1, 2, .. + std::size_t numQpus = cudaq::get_platform().num_qpus(); + std::size_t numRanks = + cudaq::mpi::is_initialized() ? cudaq::mpi::num_ranks() : 1; + std::size_t rank = cudaq::mpi::is_initialized() ? cudaq::mpi::rank() : 0; + double energy = 0.0, lastNorm = std::numeric_limits::max(); + + // poolList is split into numRanks chunks, and each chunk can be + // further parallelized across numQpus. + // Compute the [H,Oi] + std::vector commutators; + std::size_t total_elements = pool.size(); + std::size_t elements_per_rank = total_elements / numRanks; + std::size_t remainder = total_elements % numRanks; + std::size_t start = rank * elements_per_rank + std::min(rank, remainder); + std::size_t end = start + elements_per_rank + (rank < remainder ? 1 : 0); + for (int i = start; i < end; i++) { + auto op = pool[i]; + commutators.emplace_back(H * op - op * H); + } + + nlohmann::json initInfo = {{"num-qpus", numQpus}, + {"numRanks", numRanks}, + {"num-pool-elements", pool.size()}, + {"num-elements-per-rank", end - start}}; + if (rank == 0) + cudaq::info("[adapt] init info: {}", initInfo.dump(4)); + + // We'll need to know the local to global index map + std::vector localToGlobalMap(end - start); + for (int i = 0; i < end - start; i++) + localToGlobalMap[i] = start + i; + + // Start of with the initial |psi_n> + cudaq::state state = get_state(adapt_kernel, numQubits, initialState, thetas, + coefficients, pauliWords); + std::size_t count = 0; + while (true) { + + // Step 1 - compute vector + std::vector gradients; + double gradNorm = 0.0; + std::vector resultHandles; + for (std::size_t i = 0, qpuCounter = 0; i < commutators.size(); i++) { + if (rank == 0) + cudaq::info("Compute commutator {}", i); + if (qpuCounter % numQpus == 0) + qpuCounter = 0; + + resultHandles.emplace_back( + observe_async(qpuCounter++, prepare_state, commutators[i], state)); + } + + std::vector results; + for (auto &handle : resultHandles) + results.emplace_back(handle.get()); + + // Get the gradient results + std::transform(results.begin(), results.end(), + std::back_inserter(gradients), + [](auto &&el) { return std::fabs(el.expectation()); }); + + // Compute the local gradient norm + double norm = 0.0; + for (auto &g : gradients) + norm += g * g; + + // All ranks have a norm, need to reduce that across all + if (mpi::is_initialized()) + norm = cudaq::mpi::all_reduce(norm, std::plus()); + + // All ranks have a max gradient and index + auto iter = std::max_element(gradients.begin(), gradients.end()); + double maxGrad = *iter; + auto maxOpIdx = std::distance(gradients.begin(), iter); + if (mpi::is_initialized()) { + std::vector allMaxOpIndices(numRanks); + std::vector allMaxGrads(numRanks); + // Distribute the max gradient from this rank to others + cudaq::mpi::all_gather(allMaxGrads, {*iter}); + // Distribute the corresponding idx from this rank to others, + // make sure we map back to global indices + cudaq::mpi::all_gather(allMaxOpIndices, + {static_cast(localToGlobalMap[maxOpIdx])}); + + // Everyone has the indices, loop over and pick out the + // max from all calculations + std::size_t cachedIdx = 0; + double cachedGrad = 0.0; + for (std::size_t i = 0; i < allMaxGrads.size(); i++) + if (allMaxGrads[i] > cachedGrad) { + cachedGrad = allMaxGrads[i]; + cachedIdx = allMaxOpIndices[i]; + } + + maxOpIdx = cachedIdx; + } + + if (rank == 0) { + cudaq::info("[adapt] index of element with max gradient is {}", maxOpIdx); + cudaq::info("current norm is {} and last iteration norm is {}", norm, + lastNorm); + } + + // Convergence is reached if gradient values are small + if (std::sqrt(std::fabs(norm)) < tol || std::fabs(lastNorm - norm) < tol) + break; + + // Use the operator from the pool + auto op = pool[maxOpIdx]; + chosenOps.push_back(op); + thetas.push_back(0.0); + for (auto o : op) { + pauliWords.emplace_back(o.to_string(false)); + coefficients.push_back(o.get_coefficient().imag()); + } + + optim::optimizable_function objective; + std::unique_ptr defaultGradient; + // If we don't need gradients, objective is simple + if (!optimizer.requiresGradients()) { + objective = [&, thetas, coefficients](const std::vector &x, + std::vector &dx) mutable { + auto res = cudaq::observe(adapt_kernel, H, numQubits, initialState, x, + coefficients, pauliWords); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + // data.emplace_back(x, res, observe_execution_type::function); + // for (auto datum : gradient->data) + // data.push_back(datum); + + return res.expectation(); + }; + } else { + auto localGradientName = gradient; + if (gradient.empty()) + localGradientName = "parameter_shift"; + + defaultGradient = observe_gradient::get( + localGradientName, + [&, thetas, coefficients, pauliWords](const std::vector xx) { + std::apply([&](auto &&...new_args) { adapt_kernel(new_args...); }, + std::forward_as_tuple(numQubits, initialState, xx, + coefficients, pauliWords)); + }, + H); + objective = [&, thetas, coefficients](const std::vector &x, + std::vector &dx) mutable { + // FIXME get shots in here... + auto res = cudaq::observe(adapt_kernel, H, numQubits, initialState, x, + coefficients, pauliWords); + if (options.get("verbose", false)) + printf(" = %.12lf\n", res.expectation()); + defaultGradient->compute(x, dx, res.expectation(), + options.get("shots", -1)); + + // data.emplace_back(x, res, observe_execution_type::function); + // for (auto datum : gradient->data) + // data.push_back(datum); + + return res.expectation(); + }; + } + + // FIXME fix the const_cast + auto [groundEnergy, optParams] = + const_cast(optimizer).optimize(thetas.size(), + objective, options); + // Set the new optimzal parameters + thetas = optParams; + energy = groundEnergy; + + // Set the norm for the next iteration's check + lastNorm = norm; + state = get_state(adapt_kernel, numQubits, initialState, thetas, + coefficients, pauliWords); + } + + return std::make_tuple(energy, thetas, chosenOps); +} + +} // namespace cudaq::solvers::adapt \ No newline at end of file diff --git a/libs/solvers/lib/adapt/device/CMakeLists.txt b/libs/solvers/lib/adapt/device/CMakeLists.txt new file mode 100644 index 0000000..94c5304 --- /dev/null +++ b/libs/solvers/lib/adapt/device/CMakeLists.txt @@ -0,0 +1,13 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +cudaqx_add_device_code(cudaq-solvers + SOURCES + adapt.cpp + prepare_state.cpp +) diff --git a/libs/solvers/lib/adapt/device/adapt.cpp b/libs/solvers/lib/adapt/device/adapt.cpp new file mode 100644 index 0000000..c589585 --- /dev/null +++ b/libs/solvers/lib/adapt/device/adapt.cpp @@ -0,0 +1,27 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq.h" + +namespace cudaq { + +__qpu__ void +adapt_kernel(std::size_t numQubits, + const cudaq::qkernel &)> &statePrep, + const std::vector &thetas, + const std::vector &coefficients, + const std::vector &trotterOpList) { + cudaq::qvector q(numQubits); + statePrep(q); + for (std::size_t i = 0; i < thetas.size(); i++) + for (std::size_t j = 0; j < trotterOpList.size(); j++) { + auto &term = trotterOpList[j]; + exp_pauli(thetas[i] * coefficients[j], q, term); + } +} +} // namespace cudaq diff --git a/libs/solvers/lib/adapt/device/adapt.h b/libs/solvers/lib/adapt/device/adapt.h new file mode 100644 index 0000000..2363b75 --- /dev/null +++ b/libs/solvers/lib/adapt/device/adapt.h @@ -0,0 +1,42 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq/qis/pauli_word.h" +#include "cudaq/qis/qkernel.h" +#include "cudaq/qis/qubit_qis.h" +#include "cudaq/qis/qview.h" +#include + +namespace cudaq { + +/// @brief CUDA-Q quantum kernel function for the ADAPT-VQE algorithm +/// +/// This function represents a quantum kernel that implements the core +/// quantum operations of the ADAPT-VQE (Adaptive Derivative-Assembled +/// Pseudo-Trotter Variational Quantum Eigensolver) algorithm. +/// +/// @param numQubits The number of qubits in the quantum system +/// @param statePrep A function to prepare the initial quantum state +/// @param thetas Vector of rotation angles for the variational circuit +/// @param coefficients Vector of coefficients for the Hamiltonian terms +/// @param trotterOpList Vector of Pauli words representing the Trotter +/// operators +/// +/// @note This is a CUDA-Q quantum kernel function and should be executed within +/// the CUDA-Q framework. It applies the ADAPT-VQE circuit construction based on +/// the provided parameters and operators. +/// +/// @see ADAPT-VQE algorithm for more details on the method +void adapt_kernel(std::size_t numQubits, + const cudaq::qkernel &)> &statePrep, + const std::vector &thetas, + const std::vector &coefficients, + const std::vector &trotterOpList); + +} // namespace cudaq diff --git a/libs/solvers/lib/adapt/device/prepare_state.cpp b/libs/solvers/lib/adapt/device/prepare_state.cpp new file mode 100644 index 0000000..2ac494b --- /dev/null +++ b/libs/solvers/lib/adapt/device/prepare_state.cpp @@ -0,0 +1,12 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq.h" + +namespace cudaq { +__qpu__ void prepare_state(cudaq::state &state) { cudaq::qvector q{state}; } +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/lib/adapt/device/prepare_state.h b/libs/solvers/lib/adapt/device/prepare_state.h new file mode 100644 index 0000000..d4dc65d --- /dev/null +++ b/libs/solvers/lib/adapt/device/prepare_state.h @@ -0,0 +1,19 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cudaq/qis/state.h" + +namespace cudaq { + +/// @brief prepare_state is an entry-point kernel that +/// simply prepares a know state provided as input. This is +/// useful for sampling or observation on a known state vector. +/// @param state +void prepare_state(cudaq::state &state); +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/lib/observe_gradients/central_difference.cpp b/libs/solvers/lib/observe_gradients/central_difference.cpp new file mode 100644 index 0000000..73b612d --- /dev/null +++ b/libs/solvers/lib/observe_gradients/central_difference.cpp @@ -0,0 +1,32 @@ + +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/observe_gradients/central_difference.h" + +namespace cudaq { +std::size_t central_difference::getRequiredNumExpectationComputations( + const std::vector &x) { + return 2 * x.size(); +} + +void central_difference::calculateGradient(const std::vector &x, + std::vector &dx, + double exp_h) { + auto tmpX = x; + for (std::size_t i = 0; i < x.size(); i++) { + // increase value to x_i + dx_i + tmpX[i] += step; + auto px = expectation(tmpX); + // decrease the value to x_i - dx_i + tmpX[i] -= 2 * step; + auto mx = expectation(tmpX); + dx[i] = (px - mx) / (2. * step); + } +} +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/lib/observe_gradients/forward_difference.cpp b/libs/solvers/lib/observe_gradients/forward_difference.cpp new file mode 100644 index 0000000..c19c6b2 --- /dev/null +++ b/libs/solvers/lib/observe_gradients/forward_difference.cpp @@ -0,0 +1,28 @@ + +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/observe_gradients/forward_difference.h" + +namespace cudaq { +std::size_t forward_difference::getRequiredNumExpectationComputations( + const std::vector &x) { + return x.size(); +} +void forward_difference::calculateGradient(const std::vector &x, + std::vector &dx, + double exp_h) { + auto tmpX = x; + for (std::size_t i = 0; i < x.size(); i++) { + // increase value to x_i + dx_i + tmpX[i] += step; + auto px = expectation(tmpX); + dx[i] = (px - exp_h) / step; + } +} +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/lib/observe_gradients/observe_gradient.cpp b/libs/solvers/lib/observe_gradients/observe_gradient.cpp new file mode 100644 index 0000000..1376e2f --- /dev/null +++ b/libs/solvers/lib/observe_gradients/observe_gradient.cpp @@ -0,0 +1,14 @@ + +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/observe_gradient.h" + +INSTANTIATE_REGISTRY(cudaq::observe_gradient, + std::function)> const &, + cudaq::spin_op const &) diff --git a/libs/solvers/lib/observe_gradients/parameter_shift.cpp b/libs/solvers/lib/observe_gradients/parameter_shift.cpp new file mode 100644 index 0000000..49d738f --- /dev/null +++ b/libs/solvers/lib/observe_gradients/parameter_shift.cpp @@ -0,0 +1,31 @@ + +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/observe_gradients/parameter_shift.h" + +namespace cudaq { +std::size_t parameter_shift::getRequiredNumExpectationComputations( + const std::vector &x) { + return 2 * x.size(); +} + +void parameter_shift::calculateGradient(const std::vector &x, + std::vector &dx, double exp_h) { + auto tmpX = x; + for (std::size_t i = 0; i < x.size(); i++) { + // increase value to x_i + (shiftScalar * pi) + tmpX[i] += shiftScalar * M_PI; + auto px = expectation(tmpX); + // decrease value to x_i - (shiftScalar * pi) + tmpX[i] -= 2 * shiftScalar * M_PI; + auto mx = expectation(tmpX); + dx[i] = (px - mx) / 2.; + } +} +} // namespace cudaq \ No newline at end of file diff --git a/libs/solvers/lib/operators/graph/clique.cpp b/libs/solvers/lib/operators/graph/clique.cpp new file mode 100644 index 0000000..eb16fa1 --- /dev/null +++ b/libs/solvers/lib/operators/graph/clique.cpp @@ -0,0 +1,51 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/graph/clique.h" + +namespace cudaq::solvers { + +cudaq::spin_op get_clique_hamiltonian(const cudaqx::graph &graph, + double penalty) { + // Get all nodes + auto nodes = graph.get_nodes(); + if (nodes.empty()) + return cudaq::spin_op(); + + // Initialize empty spin operator + cudaq::spin_op hamiltonian(nodes.size()); + + // First term: Sum over all nodes + for (const auto &node : nodes) { + // Get node degree for weight calculation + double weight = graph.get_node_weight(node); + + // Add 0.5 * weight * (Z_i - I) + hamiltonian += 0.5 * weight * + (cudaq::spin::z(node) - cudaq::spin::i(nodes.size() - 1)); + } + + // Second term: Sum over non-edges + // Get disconnected vertex pairs (non-edges) + auto non_edges = graph.get_disconnected_vertices(); + + // Add penalty terms for non-edges + for (const auto &non_edge : non_edges) { + int u = non_edge.first; + int v = non_edge.second; + + // Add penalty/4 * (Z_u Z_v - Z_u - Z_v + I) + hamiltonian += penalty / 4.0 * + (cudaq::spin::z(u) * cudaq::spin::z(v) - cudaq::spin::z(u) - + cudaq::spin::z(v) + cudaq::spin::i(nodes.size() - 1)); + } + + return hamiltonian - cudaq::spin_op(nodes.size() - 1); +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/operators/graph/max_cut.cpp b/libs/solvers/lib/operators/graph/max_cut.cpp new file mode 100644 index 0000000..c464565 --- /dev/null +++ b/libs/solvers/lib/operators/graph/max_cut.cpp @@ -0,0 +1,45 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/graph/max_cut.h" + +namespace cudaq::solvers { + +cudaq::spin_op get_maxcut_hamiltonian(const cudaqx::graph &graph) { + // Get all nodes to iterate through edges + auto nodes = graph.get_nodes(); + if (nodes.empty()) + return cudaq::spin_op(); + + // Initialize empty spin operator + cudaq::spin_op hamiltonian(nodes.size()); + + // Iterate through all nodes + for (const auto &u : nodes) { + // Get neighbors of current node + auto neighbors = graph.get_neighbors(u); + + // For each neighbor v where v > u to avoid counting edges twice + for (const auto &v : neighbors) { + if (v > u) { + // Get the weight for this edge + double weight = graph.get_edge_weight(u, v); + + // For each weighted edge (u,v), add w/2(Z_u Z_v - I) to the Hamiltonian + // This matches the mathematical form: H = Σ w_ij/2(Z_i Z_j - I) + hamiltonian += weight * 0.5 * + (cudaq::spin::z(u) * cudaq::spin::z(v) - + cudaq::spin::i(nodes.size() - 1)); + } + } + } + + return hamiltonian - cudaq::spin_op(nodes.size() - 1); +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/operators/molecule/drivers/library_utils.h b/libs/solvers/lib/operators/molecule/drivers/library_utils.h new file mode 100644 index 0000000..54da7bb --- /dev/null +++ b/libs/solvers/lib/operators/molecule/drivers/library_utils.h @@ -0,0 +1,74 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once +/// @file library_utils.h +/// @brief Provides functionality to retrieve the path of the CUDAQX library. + +#include + +#if defined(__APPLE__) && defined(__MACH__) +#include +#else +#include +#endif + +namespace cudaqx::__internal__ { + +/// @brief Structure to hold CUDAQX library data. +struct CUDAQXLibraryData { + std::string path; ///< The path to the CUDAQX library. +}; + +#if defined(__APPLE__) && defined(__MACH__) +/// @brief Retrieves the CUDAQX library path on macOS systems. +/// @param data Pointer to CUDAQXLibraryData structure to store the library +/// path. +inline static void getCUDAQXLibraryPath(CUDAQXLibraryData *data) { + auto nLibs = _dyld_image_count(); + for (uint32_t i = 0; i < nLibs; i++) { + auto ptr = _dyld_get_image_name(i); + std::string libName(ptr); + if (libName.find("cudaq-core") != std::string::npos) { + auto casted = static_cast(data); + casted->path = std::string(ptr); + } + } +} +#else +/// @brief Callback function for dl_iterate_phdr to find CUDAQX library path on +/// non-macOS systems. +/// @param info Pointer to dl_phdr_info structure containing shared object +/// information. +/// @param size Size of the structure. +/// @param data Pointer to user-provided data (CUDAQXLibraryData in this case). +/// @return Always returns 0 to continue iteration. +inline static int getCUDAQXLibraryPath(struct dl_phdr_info *info, size_t size, + void *data) { + std::string libraryName(info->dlpi_name); + if (libraryName.find("cudaq-solvers") != std::string::npos) { + auto casted = static_cast(data); + casted->path = std::string(info->dlpi_name); + } + return 0; +} +#endif + +/// @brief Retrieves the path of the CUDAQX library. +/// @return A string containing the path to the CUDAQX library. +inline static std::string getCUDAQXLibraryPath() { + __internal__::CUDAQXLibraryData data; +#if defined(__APPLE__) && defined(__MACH__) + getCUDAQXLibraryPath(&data); +#else + dl_iterate_phdr(__internal__::getCUDAQXLibraryPath, &data); +#endif + return data.path; +} + +} // namespace cudaqx::__internal__ diff --git a/libs/solvers/lib/operators/molecule/drivers/process.cpp b/libs/solvers/lib/operators/molecule/drivers/process.cpp new file mode 100644 index 0000000..9fa9ab7 --- /dev/null +++ b/libs/solvers/lib/operators/molecule/drivers/process.cpp @@ -0,0 +1,80 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "process.h" + +#include +#include +#include + +namespace cudaqx { + +std::pair launchProcess(const char *command) { + // Create temporary files for storing stdout and stderr + char tempStdout[] = "/tmp/stdout_XXXXXX"; + char tempStderr[] = "/tmp/stderr_XXXXXX"; + + int fdOut = mkstemp(tempStdout); + int fdErr = mkstemp(tempStderr); + + if (fdOut == -1 || fdErr == -1) { + throw std::runtime_error("Failed to create temporary files"); + } + + // Construct command to redirect both stdout and stderr to temporary files + std::string argString = std::string(command) + " 1>" + tempStdout + " 2>" + + tempStderr + " & echo $!"; + + // Launch the process + FILE *pipe = popen(argString.c_str(), "r"); + if (!pipe) { + close(fdOut); + close(fdErr); + unlink(tempStdout); + unlink(tempStderr); + throw std::runtime_error("Error launching process: " + + std::string(command)); + } + + // Read PID + char buffer[128]; + std::string pidStr = ""; + while (!feof(pipe)) { + if (fgets(buffer, 128, pipe) != nullptr) + pidStr += buffer; + } + pclose(pipe); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + // Read any error output + std::string errorOutput; + FILE *errorFile = fopen(tempStderr, "r"); + if (errorFile) { + while (fgets(buffer, 128, errorFile) != nullptr) { + errorOutput += buffer; + } + fclose(errorFile); + } + + // Clean up temporary files + close(fdOut); + close(fdErr); + unlink(tempStdout); + unlink(tempStderr); + + // Convert PID string to integer + pid_t pid = 0; + try { + pid = std::stoi(pidStr); + } catch (const std::exception &e) { + throw std::runtime_error("Failed to get process ID: " + errorOutput); + } + + return std::make_pair(pid, errorOutput); +} +} // namespace cudaqx diff --git a/libs/solvers/lib/operators/molecule/drivers/process.h b/libs/solvers/lib/operators/molecule/drivers/process.h new file mode 100644 index 0000000..e1d254e --- /dev/null +++ b/libs/solvers/lib/operators/molecule/drivers/process.h @@ -0,0 +1,33 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace cudaqx { +/// @brief Launches a new process and returns its process ID. +/// +/// This function creates a new process with the specified name. +/// It then returns the process ID of the newly created process. +/// +/// @param processName The name of the process to launch. This should be a +/// null-terminated string containing the path to the +/// executable or a command name that can be found in the +/// system's PATH. +/// +/// @return The process ID (pid_t) of the newly created process on success, +/// or -1 on failure. Also returns any potential error message from the +/// launched process. +std::pair launchProcess(const char *processName); +} // namespace cudaqx \ No newline at end of file diff --git a/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp b/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp new file mode 100644 index 0000000..45cc022 --- /dev/null +++ b/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp @@ -0,0 +1,192 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "nlohmann/json.hpp" + +#include "cuda-qx/core/tensor.h" +#include "cudaq/solvers/operators/molecule/fermion_compiler.h" +#include "cudaq/solvers/operators/molecule/molecule_package_driver.h" +#include "library_utils.h" +#include "process.h" + +#include "common/Logger.h" +#include "common/RestClient.h" + +#include +#include +#include + +using namespace cudaqx; + +namespace cudaq::solvers { + +// Create a tear down service +class PySCFTearDown : public tear_down { +private: + pid_t pid; + +public: + PySCFTearDown(pid_t p) : pid(p) {} + void runTearDown() const { + // shut down the web server + [[maybe_unused]] auto success = ::kill(pid, SIGTERM); + using namespace std::chrono_literals; + std::this_thread::sleep_for(100ms); + } +}; + +class RESTPySCFDriver : public MoleculePackageDriver { + +public: + CUDAQ_EXTENSION_CREATOR_FUNCTION(MoleculePackageDriver, RESTPySCFDriver) + + bool is_available() const override { + cudaq::RestClient client; + std::map headers; + try { + auto res = client.get("localhost:8000/", "status", headers); + if (res.contains("status") && + res["status"].get() == "available") + return true; + } catch (std::exception &e) { + return false; + } + return true; + } + + std::unique_ptr make_available() const override { + + // Start up the web service, if failed, return nullptr + std::filesystem::path libPath{cudaqx::__internal__::getCUDAQXLibraryPath()}; + auto cudaqLibPath = libPath.parent_path(); + auto cudaqPySCFTool = cudaqLibPath.parent_path() / "bin" / "cudaq-pyscf"; + auto argString = cudaqPySCFTool.string() + " --server-mode"; + int a0, a1; + auto [ret, msg] = cudaqx::launchProcess(argString.c_str()); + if (ret == -1) + return nullptr; + + if (!msg.empty()) { + cudaq::info("pyscf error: {}", libPath.parent_path().string()); + cudaq::info("pyscf error: {}", msg); + cudaq::info("advice - check `lsof -n -i :8000` for dead pyscf process. " + "kill it."); + throw std::runtime_error( + "error encountered when launching pyscf molecule generation server."); + } + + cudaq::RestClient client; + using namespace std::chrono_literals; + std::size_t ticker = 0; + std::map headers{ + {"Content-Type", "application/json"}}; + while (true) { + std::this_thread::sleep_for(100ms); + + nlohmann::json metadata; + try { + metadata = client.get("localhost:8000/", "status", headers); + if (metadata.count("status")) + break; + } catch (...) { + continue; + } + + if (ticker > 5000) + return nullptr; + + ticker += 100; + } + + return std::make_unique(ret); + } + + /// @brief Create the molecular hamiltonian + molecular_hamiltonian createMolecule(const molecular_geometry &geometry, + const std::string &basis, int spin, + int charge, + molecule_options options) override { + std::string xyzFileStr = ""; + // Convert the geometry to an XYZ string + for (auto &atom : geometry) + xyzFileStr += + fmt::format("{} {:f} {:f} {:f}; ", atom.name, atom.coordinates[0], + atom.coordinates[1], atom.coordinates[2]); + + cudaq::RestClient client; + nlohmann::json payload = {{"xyz", xyzFileStr}, + {"basis", basis}, + {"spin", spin}, + {"charge", charge}, + {"type", "gas_phase"}, + {"symmetry", false}, + {"cycles", options.cycles}, + {"initguess", options.initguess}, + {"UR", options.UR}, + {"MP2", options.MP2}, + {"natorb", options.natorb}, + {"casci", options.casci}, + {"ccsd", options.ccsd}, + {"casscf", options.casscf}, + {"integrals_natorb", options.integrals_natorb}, + {"integrals_casscf", options.integrals_casscf}, + {"verbose", options.verbose}}; + if (options.nele_cas.has_value()) + payload["nele_cas"] = options.nele_cas.value(); + if (options.norb_cas.has_value()) + payload["norb_cas"] = options.norb_cas.value(); + if (options.potfile.has_value()) + payload["potfile"] = options.potfile.value(); + + std::map headers{ + {"Content-Type", "application/json"}}; + auto metadata = client.post("localhost:8000/", "create_molecule", payload, + headers, true); + + // Get the energy, num orbitals, and num qubits + std::unordered_map energies; + for (auto &[energyName, E] : metadata["energies"].items()) + energies.insert({energyName, E}); + + double energy = 0.0; + if (energies.contains("nuclear_energy")) + energy = energies["nuclear_energy"]; + else if (energies.contains("core_energy")) + energy = energies["core_energy"]; + + auto numOrb = metadata["num_orbitals"].get(); + auto numQubits = 2 * numOrb; + auto num_electrons = metadata["num_electrons"].get(); + + // Get the operators + auto hpqElements = metadata["hpq"]["data"]; + auto hpqrsElements = metadata["hpqrs"]["data"]; + std::vector> hpqValues, hpqrsValues; + for (auto &element : hpqElements) + hpqValues.push_back({element[0].get(), element[1].get()}); + for (auto &element : hpqrsElements) + hpqrsValues.push_back( + {element[0].get(), element[1].get()}); + + tensor hpq, hpqrs; + hpq.copy(hpqValues.data(), {numQubits, numQubits}); + hpqrs.copy(hpqrsValues.data(), + {numQubits, numQubits, numQubits, numQubits}); + + // Transform to a spin operator + auto transform = fermion_compiler::get(options.fermion_to_spin); + auto spinHamiltonian = transform->generate(energy, hpq, hpqrs); + + // Return the molecular hamiltonian + return molecular_hamiltonian{spinHamiltonian, hpq, hpqrs, + num_electrons, numOrb, energies}; + } +}; +CUDAQ_REGISTER_TYPE(RESTPySCFDriver) + +} // namespace cudaq::solvers diff --git a/libs/solvers/lib/operators/molecule/fermion_compilers/fermion_compiler.cpp b/libs/solvers/lib/operators/molecule/fermion_compilers/fermion_compiler.cpp new file mode 100644 index 0000000..e4f03e2 --- /dev/null +++ b/libs/solvers/lib/operators/molecule/fermion_compilers/fermion_compiler.cpp @@ -0,0 +1,10 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/solvers/operators/molecule/fermion_compiler.h" + +INSTANTIATE_REGISTRY_NO_ARGS(cudaq::solvers::fermion_compiler) diff --git a/libs/solvers/lib/operators/molecule/fermion_compilers/jordan_wigner.cpp b/libs/solvers/lib/operators/molecule/fermion_compilers/jordan_wigner.cpp new file mode 100644 index 0000000..f3c1176 --- /dev/null +++ b/libs/solvers/lib/operators/molecule/fermion_compilers/jordan_wigner.cpp @@ -0,0 +1,443 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/solvers/operators/molecule/fermion_compilers/jordan_wigner.h" + +#include +#include +#include +#include + +using namespace cudaqx; + +namespace cudaq::solvers { + +cudaq::spin_op one_body(std::size_t p, std::size_t q, + std::complex coeff) { + if (p == q) + return 0.5 * coeff * cudaq::spin::i(p) - 0.5 * coeff * cudaq::spin::z(p); + + if (p > q) { + std::swap(p, q); + coeff = std::conj(coeff); + } + + std::vector zIndices; + for (std::size_t i = p + 1; i < q; i++) + zIndices.push_back(i); + + cudaq::spin_op parity = 1.; + for (auto i : zIndices) + parity *= cudaq::spin::z(i); + + auto spin_hamiltonian = + 0.5 * coeff.real() * cudaq::spin::x(p) * parity * cudaq::spin::x(q); + spin_hamiltonian += + 0.5 * coeff.real() * cudaq::spin::y(p) * parity * cudaq::spin::y(q); + spin_hamiltonian += + 0.5 * coeff.imag() * cudaq::spin::y(p) * parity * cudaq::spin::x(q); + spin_hamiltonian -= + 0.5 * coeff.imag() * cudaq::spin::x(p) * parity * cudaq::spin::y(q); + + return spin_hamiltonian; +} + +cudaq::spin_op two_body(std::size_t p, std::size_t q, std::size_t r, + std::size_t s, std::complex coef) { + std::set tmp{p, q, r, s}; + if (tmp.size() == 2) { + auto spin_hamiltonian = + -0.25 * coef * cudaq::spin::i(p) * cudaq::spin::i(q); + if (p == r) { + spin_hamiltonian += 0.25 * coef * cudaq::spin::i(p) * cudaq::spin::z(q); + spin_hamiltonian += 0.25 * coef * cudaq::spin::z(p) * cudaq::spin::i(q); + spin_hamiltonian -= 0.25 * coef * cudaq::spin::z(p) * cudaq::spin::z(q); + } else if (q == r) { + spin_hamiltonian *= -1.; + spin_hamiltonian -= 0.25 * coef * cudaq::spin::i(p) * cudaq::spin::z(q); + spin_hamiltonian -= 0.25 * coef * cudaq::spin::z(p) * cudaq::spin::i(q); + spin_hamiltonian += 0.25 * coef * cudaq::spin::z(p) * cudaq::spin::z(q); + } + return spin_hamiltonian; + } + + if (tmp.size() == 3) { + std::size_t a, b, c, d; + if (q == r) { + if (p > r) { + // a,b=s,p + a = s; + b = p; + coef = std::conj(coef); + } else { + // a,b=p,s + a = p; + b = s; + } + c = q; + } else if (q == s) { + if (p > r) { + // a,b=r,p + a = r; + b = p; + coef = -1.0 * std::conj(coef); + } else { + // a,b=p,r + a = p; + b = r; + coef *= -1.0; + } + c = q; + } else if (p == r) { + if (q > s) { + // a,b=s,q + a = s; + b = q; + coef = -1.0 * std::conj(coef); + } else { + // a,b=q,s + a = q; + b = s; + coef = -1.0 * coef; + } + c = p; + } else if (p == s) { + if (q > r) { + // a,b=r,q + a = r; + b = q; + coef = std::conj(coef); + } else { + // a,b=q,r + a = q; + b = r; + } + c = p; + } + + std::vector zIndices; + for (std::size_t i = a + 1; i < b; i++) + zIndices.push_back(i); + + cudaq::spin_op parity = 1.; + for (auto i : zIndices) + parity *= cudaq::spin::z(i); + + auto spin_hamiltonian = 0.25 * coef.real() * cudaq::spin::x(a) * parity * + cudaq::spin::x(b) * cudaq::spin::i(c); + spin_hamiltonian += 0.25 * coef.real() * cudaq::spin::y(a) * parity * + cudaq::spin::y(b) * cudaq::spin::i(c); + spin_hamiltonian += 0.25 * coef.imag() * cudaq::spin::y(a) * parity * + cudaq::spin::x(b) * cudaq::spin::i(c); + spin_hamiltonian -= 0.25 * coef.imag() * cudaq::spin::x(a) * parity * + cudaq::spin::y(b) * cudaq::spin::i(c); + + spin_hamiltonian -= 0.25 * coef.real() * cudaq::spin::x(a) * parity * + cudaq::spin::x(b) * cudaq::spin::z(c); + spin_hamiltonian -= 0.25 * coef.real() * cudaq::spin::y(a) * parity * + cudaq::spin::y(b) * cudaq::spin::z(c); + spin_hamiltonian -= 0.25 * coef.imag() * cudaq::spin::y(a) * parity * + cudaq::spin::x(b) * cudaq::spin::z(c); + spin_hamiltonian += 0.25 * coef.imag() * cudaq::spin::x(a) * parity * + cudaq::spin::y(b) * cudaq::spin::z(c); + return spin_hamiltonian; + } + + if ((p > q) ^ (r > s)) + coef *= -1.0; + + if (p < q && q < r && r < s) { + // a,b,c,d=p,q,r,s + auto a = p; + auto b = q; + auto c = r; + auto d = s; + + std::vector zIndices; + for (std::size_t i = a + 1; i < b; i++) + zIndices.push_back(i); + + cudaq::spin_op parityA = 1.; + for (auto i : zIndices) + parityA *= cudaq::spin::z(i); + + zIndices.clear(); + for (std::size_t i = c + 1; i < d; i++) + zIndices.push_back(i); + + cudaq::spin_op parityB = 1.; + for (auto i : zIndices) + parityB *= cudaq::spin::z(i); + + auto spin_hamiltonian = -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian -= -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian -= -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + return spin_hamiltonian; + } + + if (p < r && r < q && q < s) { + // a,b,c,d=p,r,q,s + auto a = p; + auto b = r; + auto c = q; + auto d = s; + + std::vector zIndices; + for (std::size_t i = a + 1; i < b; i++) + zIndices.push_back(i); + + cudaq::spin_op parityA = 1.; + for (auto i : zIndices) + parityA *= cudaq::spin::z(i); + + zIndices.clear(); + for (std::size_t i = c + 1; i < d; i++) + zIndices.push_back(i); + + cudaq::spin_op parityB = 1.; + for (auto i : zIndices) + parityB *= cudaq::spin::z(i); + + auto spin_hamiltonian = -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian -= -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian -= -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::y(d); + + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::x(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityA * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityA * + cudaq::spin::x(d); + return spin_hamiltonian; + } + + if (p < r && r < s && s < q) { + // a,b,c,d=p,r,s,q + auto a = p; + auto b = r; + auto c = s; + auto d = q; + + std::vector zIndices; + for (std::size_t i = a + 1; i < b; i++) + zIndices.push_back(i); + + cudaq::spin_op parityA = 1.; + for (auto i : zIndices) + parityA *= cudaq::spin::z(i); + + zIndices.clear(); + for (std::size_t i = c + 1; i < d; i++) + zIndices.push_back(i); + + cudaq::spin_op parityB = 1.; + for (auto i : zIndices) + parityB *= cudaq::spin::z(i); + + auto spin_hamiltonian = -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian -= -0.125 * coef.real() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian -= -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += -0.125 * coef.real() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::x(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::x(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::x(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian -= 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::x(c) * parityB * + cudaq::spin::y(d); + spin_hamiltonian += 0.125 * coef.imag() * cudaq::spin::y(a) * parityA * + cudaq::spin::y(b) * cudaq::spin::y(c) * parityB * + cudaq::spin::x(d); + + return spin_hamiltonian; + } + + throw std::runtime_error( + "Invalid condition in two_body jordan wigner function."); +} + +cudaq::spin_op jordan_wigner::generate(const double constant, + const tensor<> &hpq, + const tensor<> &hpqrs) { + assert(hpq.rank() == 2 && "hpq must be a rank-2 tensor"); + assert(hpqrs.rank() == 4 && "hpqrs must be a rank-4 tensor"); + auto spin_hamiltonian = constant * cudaq::spin_op(); + std::size_t nqubit = hpq.shape()[0]; + double tolerance = 1e-15; + for (auto p : cudaq::range(nqubit)) { + auto coef = hpq.at({p, p}); + if (std::fabs(coef) > tolerance) + spin_hamiltonian += one_body(p, p, coef); + } + + std::vector> next; + for (auto &&combo : iter::combinations(cudaq::range(nqubit), 2)) { + auto p = combo[0]; + auto q = combo[1]; + next.push_back({p, q}); + auto coef = 0.5 * (hpq.at({p, q}) + std::conj(hpq.at({q, p}))); + if (std::fabs(coef) > tolerance) + spin_hamiltonian += one_body(p, q, coef); + + coef = hpqrs.at({p, q, p, q}) + hpqrs.at({q, p, q, p}); + if (std::fabs(coef) > tolerance) + spin_hamiltonian += two_body(p, q, p, q, coef); + + coef = hpqrs.at({p, q, q, p}) + hpqrs.at({q, p, p, q}); + if (std::fabs(coef) > tolerance) + spin_hamiltonian += two_body(p, q, q, p, coef); + } + + for (auto combo : iter::combinations(next, 2)) { + auto p = combo[0][0]; + auto q = combo[0][1]; + auto r = combo[1][0]; + auto s = combo[1][1]; + auto coef = + 0.5 * (hpqrs.at({p, q, r, s}) + std::conj(hpqrs.at({s, r, q, p})) - + hpqrs.at({q, p, r, s}) - std::conj(hpqrs.at({s, r, p, q})) - + hpqrs.at({p, q, s, r}) - std::conj(hpqrs.at({r, s, q, p})) + + hpqrs.at({q, p, s, r}) + std::conj(hpqrs.at({r, s, p, q}))); + + if (std::fabs(coef) > tolerance) + spin_hamiltonian += two_body(p, q, r, s, coef); + } + + // Remove terms with 0.0 coefficient + std::vector nonZeros; + for (auto term : spin_hamiltonian) { + auto coeff = term.get_coefficient(); + if (std::fabs(coeff) > tolerance) + nonZeros.push_back(term); + } + auto op = nonZeros[0]; + for (std::size_t i = 1; i < nonZeros.size(); i++) + op += nonZeros[i]; + + return op; +} +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/operators/molecule/molecule.cpp b/libs/solvers/lib/operators/molecule/molecule.cpp new file mode 100644 index 0000000..3ef5fd8 --- /dev/null +++ b/libs/solvers/lib/operators/molecule/molecule.cpp @@ -0,0 +1,144 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/solvers/operators/molecule/molecule_package_driver.h" + +#include +#include + +INSTANTIATE_REGISTRY_NO_ARGS(cudaq::solvers::MoleculePackageDriver) + +namespace cudaq::solvers { + +std::string molecular_geometry::name() const { + std::string ret = ""; + for (auto &a : atoms) + ret += a.name; + + return ret; +} + +molecular_geometry molecular_geometry::from_xyz(const std::string &xyzFile) { + + std::ifstream stream(xyzFile); + std::string contents((std::istreambuf_iterator(stream)), + std::istreambuf_iterator()); + + if (contents.empty()) + throw std::runtime_error("could not extract file contents for " + xyzFile); + auto lines = cudaq::split(contents, '\n'); + char *endptr; + int result = std::strtol(lines[0].c_str(), &endptr, 10); + + std::vector atoms; + for (std::size_t i = 0; auto &line : lines) { + if (i++ == 0) + continue; + + cudaq::trim(line); + if (line.empty()) + continue; + + bool seenFirstSpace = false; + std::vector components(/*atom+3coord*/ 4); + for (std::size_t k = 0, componentCounter = 0; k < line.length(); k++) { + if (line[k] == ' ') { + if (!seenFirstSpace) { + seenFirstSpace = true; + components[componentCounter] += " "; + componentCounter++; + } + continue; + } + + seenFirstSpace = false; + components[componentCounter] += line[k]; + } + + std::vector coords; + for (std::size_t ii = 1; ii < 4; ii++) + coords.push_back(std::stod(components[ii])); + atoms.push_back(atom{components[0], {coords[0], coords[1], coords[2]}}); + } + + return molecular_geometry(atoms); +} + +molecular_hamiltonian create_molecule(const molecular_geometry &geometry, + const std::string &basis, int spin, + int charge, molecule_options options) { + if (!MoleculePackageDriver::is_registered(options.driver)) + throw std::runtime_error("invalid molecule package driver (" + + options.driver + ")"); + auto driver = MoleculePackageDriver::get(options.driver); + if (!driver->is_available()) { + auto tearDownRoutine = driver->make_available(); + if (!tearDownRoutine) + throw std::runtime_error("invalid molecule generator."); + + cudaqx::scheduleTearDown(std::move(tearDownRoutine)); + } + + return driver->createMolecule(geometry, basis, spin, charge, options); +} + +void molecule_options::dump() { + std::cout << "\tmolecule_options dump() [\n"; + std::cout << "\tfermion_to_spin: " << fermion_to_spin << "\n"; + std::cout << "\ttype: " << type << "\n"; + std::cout << "\tsymmetry: " << symmetry << "\n"; + std::cout << "\tcycles: " << cycles << "\n"; + std::cout << "\tinitguess: " << initguess << "\n"; + std::cout << "\tnele_cas: " << (nele_cas.has_value() ? nele_cas.value() : -1) + << "\n"; + std::cout << "\tnorb_cas: " << (norb_cas.has_value() ? norb_cas.value() : -1) + << "\n"; + std::cout << "\tUR: " << std::boolalpha << UR << "\n"; + std::cout << "\tMP2: " << std::boolalpha << MP2 << "\n"; + std::cout << "\tnatorb: " << std::boolalpha << natorb << "\n"; + std::cout << "\tcasci: " << std::boolalpha << casci << "\n"; + std::cout << "\tccsd: " << std::boolalpha << ccsd << "\n"; + std::cout << "\tcasscf: " << std::boolalpha << casscf << "\n"; + std::cout << "\tintegrals_natorb: " << std::boolalpha << integrals_natorb + << "\n"; + std::cout << "\tintegrals_casscf: " << std::boolalpha << integrals_casscf + << "\n"; +} + +cudaq::spin_op one_particle_op(std::size_t numQubits, std::size_t p, + std::size_t q, + const std::string fermionCompiler) { + using namespace cudaq; + + if (p == q) + return 0.5 * spin::i(numQubits - 1) * spin::i(p) - 0.5 * spin::z(p); + + std::complex coeff(0., 1.); + double m = -.25; + if (p > q) { + std::swap(p, q); + coeff = std::conj(coeff); + } + + std::vector z_indices; + for (auto i : cudaq::range((long)p + 1, (long)q)) + z_indices.push_back(i); + + auto parity = spin::z(z_indices.front()); + for (std::size_t i = 1; i < z_indices.size(); i++) { + parity *= spin::z(i); + } + + auto ret = m * spin::x(p) * parity * spin::x(q); + + ret += m * spin::y(p) * parity * spin::y(q); + ret -= coeff * m * spin::y(p) * parity * spin::x(q); + ret += coeff * m * spin::x(p) * parity * spin::y(q); + return spin::i(numQubits - 1) * ret; +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/operators/operator_pools/operator_pool.cpp b/libs/solvers/lib/operators/operator_pools/operator_pool.cpp new file mode 100644 index 0000000..f0b203d --- /dev/null +++ b/libs/solvers/lib/operators/operator_pools/operator_pool.cpp @@ -0,0 +1,11 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/operator_pool.h" + +INSTANTIATE_REGISTRY_NO_ARGS(cudaq::solvers::operator_pool) diff --git a/libs/solvers/lib/operators/operator_pools/qaoa_operator_pool.cpp b/libs/solvers/lib/operators/operator_pools/qaoa_operator_pool.cpp new file mode 100644 index 0000000..40b28a6 --- /dev/null +++ b/libs/solvers/lib/operators/operator_pools/qaoa_operator_pool.cpp @@ -0,0 +1,94 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/operator_pools/qaoa_operator_pool.h" + +namespace cudaq::solvers { + +std::vector +qaoa_pool::generate(const heterogeneous_map &config) const { + if (!config.contains({"num-qubits", "num_qubits", "n-qubits", "n_qubits"})) + throw std::runtime_error( + "must provide num-qubits when constructing the qaoa operator pool."); + auto qubits_num = config.get( + {"num-qubits", "n-qubits", "num_qubits", "n_qubits"}); + if (qubits_num == 0) + return {}; + + std::vector op; + + // Single qubit X terms + for (std::size_t i = 0; i < qubits_num; ++i) { + op.push_back(cudaq::spin::x(i)); + } + + // Single qubit Y terms + for (std::size_t i = 0; i < qubits_num; ++i) { + op.push_back(cudaq::spin::y(i)); + } + + // XX terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::x(i) * cudaq::spin::x(j)); + } + } + + // YY terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::y(i) * cudaq::spin::y(j)); + } + } + + // YZ terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::y(i) * cudaq::spin::z(j)); + } + } + + // ZY terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::z(i) * cudaq::spin::y(j)); + } + } + + // XY terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::x(i) * cudaq::spin::y(j)); + } + } + + // YX terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::y(i) * cudaq::spin::x(j)); + } + } + + // XZ terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::x(i) * cudaq::spin::z(j)); + } + } + + // ZX terms + for (std::size_t i = 0; i < qubits_num - 1; ++i) { + for (std::size_t j = i + 1; j < qubits_num; ++j) { + op.push_back(cudaq::spin::z(i) * cudaq::spin::x(j)); + } + } + + return op; +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/operators/operator_pools/spin_complement_gsd.cpp b/libs/solvers/lib/operators/operator_pools/spin_complement_gsd.cpp new file mode 100644 index 0000000..30541c2 --- /dev/null +++ b/libs/solvers/lib/operators/operator_pools/spin_complement_gsd.cpp @@ -0,0 +1,160 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/operator_pools/spin_complement_gsd.h" + +using namespace cudaqx; + +namespace cudaq::solvers { + +inline cudaq::spin_op adag(std::size_t numQubits, std::size_t j) { + cudaq::spin_op zprod(numQubits); + for (std::size_t k = 0; k < j; k++) + zprod *= cudaq::spin::z(k); + return 0.5 * zprod * + (cudaq::spin::x(j) - std::complex{0, 1} * cudaq::spin::y(j)); +} + +inline cudaq::spin_op a(std::size_t numQubits, std::size_t j) { + cudaq::spin_op zprod(numQubits); + for (std::size_t k = 0; k < j; k++) + zprod *= cudaq::spin::z(k); + return 0.5 * zprod * + (cudaq::spin::x(j) + std::complex{0, 1} * cudaq::spin::y(j)); +} + +std::vector +spin_complement_gsd::generate(const heterogeneous_map &config) const { + auto numOrbitals = config.get({"num-orbitals", "num_orbitals"}); + + std::vector pool; + auto numQubits = 2 * numOrbitals; + std::vector alphaOrbs, betaOrbs; + for (auto i : cudaq::range(numOrbitals)) { + alphaOrbs.push_back(2 * i); + betaOrbs.push_back(alphaOrbs.back() + 1); + } + + for (auto p : alphaOrbs) { + for (auto q : alphaOrbs) { + if (p >= q) + continue; + auto oneElectron = adag(numQubits, q) * a(numQubits, p) - + adag(numQubits, p) * a(numQubits, q); + oneElectron += adag(numQubits, q + 1) * a(numQubits, p + 1) - + adag(numQubits, p + 1) * a(numQubits, q + 1); + + std::unordered_map> + terms; + oneElectron.for_each_term([&](cudaq::spin_op &term) { + auto coeff = term.get_coefficient(); + if (std::fabs(coeff.real()) < 1e-12 && std::fabs(coeff.imag()) < 1e-12) + return; + + if (std::fabs(coeff.real()) < 1e-12) + terms.insert({term.get_raw_data().first[0], + std::complex{0., coeff.imag()}}); + }); + + if (!terms.empty()) + pool.emplace_back(terms); + } + } + + int pq = 0; + for (auto p : alphaOrbs) { + for (auto q : alphaOrbs) { + if (p > q) + continue; + + int rs = 0; + for (auto r : alphaOrbs) { + for (auto s : alphaOrbs) { + if (r > s) + continue; + if (pq < rs) + continue; + + auto twoElectron = adag(numQubits, r) * a(numQubits, p) * + adag(numQubits, s) * a(numQubits, q) - + adag(numQubits, q) * a(numQubits, s) * + adag(numQubits, p) * a(numQubits, r); + twoElectron += adag(numQubits, r + 1) * a(numQubits, p + 1) * + adag(numQubits, s + 1) * a(numQubits, q + 1) - + adag(numQubits, q + 1) * a(numQubits, s + 1) * + adag(numQubits, p + 1) * a(numQubits, r + 1); + + std::unordered_map> + terms; + twoElectron.for_each_term([&](cudaq::spin_op &term) { + auto coeff = term.get_coefficient(); + if (std::fabs(coeff.real()) < 1e-12 && + std::fabs(coeff.imag()) < 1e-12) + return; + + if (std::fabs(coeff.real()) < 1e-12) + terms.insert({term.get_raw_data().first[0], + std::complex{0., coeff.imag()}}); + }); + + if (!terms.empty()) + pool.push_back(terms); + rs++; + } + } + pq++; + } + } + + pq = 0; + for (auto p : alphaOrbs) { + for (auto q : betaOrbs) { + + int rs = 0; + for (auto r : alphaOrbs) { + for (auto s : betaOrbs) { + + if (pq < rs) + continue; + + auto twoElectron = adag(numQubits, r) * a(numQubits, p) * + adag(numQubits, s) * a(numQubits, q) - + adag(numQubits, q) * a(numQubits, s) * + adag(numQubits, p) * a(numQubits, r); + if (p > q) + continue; + + twoElectron += adag(numQubits, s - 1) * a(numQubits, q - 1) * + adag(numQubits, r + 1) * a(numQubits, p + 1) - + adag(numQubits, p + 1) * a(numQubits, r + 1) * + adag(numQubits, q - 1) * a(numQubits, s - 1); + std::unordered_map> + terms; + twoElectron.for_each_term([&](cudaq::spin_op &term) { + auto coeff = term.get_coefficient(); + if (std::fabs(coeff.real()) < 1e-12 && + std::fabs(coeff.imag()) < 1e-12) + return; + + if (std::fabs(coeff.real()) < 1e-12) + terms.insert({term.get_raw_data().first[0], + std::complex{0., coeff.imag()}}); + }); + if (!terms.empty()) + pool.push_back(terms); + rs++; + } + } + pq++; + } + } + + return pool; +} + +} // namespace cudaq::solvers diff --git a/libs/solvers/lib/operators/operator_pools/uccsd_operator_pool.cpp b/libs/solvers/lib/operators/operator_pools/uccsd_operator_pool.cpp new file mode 100644 index 0000000..ca7dd90 --- /dev/null +++ b/libs/solvers/lib/operators/operator_pools/uccsd_operator_pool.cpp @@ -0,0 +1,122 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/operator_pools/uccsd_operator_pool.h" +#include "cudaq/solvers/stateprep/uccsd.h" + +using namespace cudaqx; + +namespace cudaq::solvers { + +using excitation_list = std::vector>; + +std::vector +uccsd::generate(const heterogeneous_map &config) const { + + auto numQubits = config.get({"num-qubits", "num_qubits"}); + auto numElectrons = config.get({"num-electrons", "num_electrons"}); + std::size_t spin = 0; + if (config.contains("spin")) + spin = config.get("spin"); + + auto [singlesAlpha, singlesBeta, doublesMixed, doublesAlpha, doublesBeta] = + cudaq::solvers::stateprep::get_uccsd_excitations(numElectrons, numQubits, + spin); + + std::vector ops; + + auto addSinglesExcitation = [numQubits](std::vector &ops, + std::size_t p, std::size_t q) { + double parity = 1.0; + + cudaq::spin_op o(numQubits); + for (std::size_t i = p + 1; i < q; i++) + o *= cudaq::spin::z(i); + + ops.emplace_back(cudaq::spin::y(p) * o * cudaq::spin::x(q)); + ops.emplace_back(cudaq::spin::x(p) * o * cudaq::spin::y(q)); + }; + + auto addDoublesExcitation = [numQubits](std::vector &ops, + std::size_t p, std::size_t q, + std::size_t r, std::size_t s) { + cudaq::spin_op parity_a(numQubits), parity_b(numQubits); + std::size_t i_occ = 0, j_occ = 0, a_virt = 0, b_virt = 0; + if (p < q && r < s) { + i_occ = p; + j_occ = q; + a_virt = r; + b_virt = s; + } + + else if (p > q && r > s) { + i_occ = q; + j_occ = p; + a_virt = s; + b_virt = r; + } else if (p < q && r > s) { + i_occ = p; + j_occ = q; + a_virt = s; + b_virt = r; + } else if + + (p > q && r < s) { + i_occ = q; + j_occ = p; + a_virt = r; + b_virt = s; + } + for (std::size_t i = i_occ + 1; i < j_occ; i++) + parity_a *= cudaq::spin::z(i); + + for (std::size_t i = a_virt + 1; i < b_virt; i++) + parity_b *= cudaq::spin::z(i); + + ops.emplace_back(cudaq::spin::x(i_occ) * parity_a * cudaq::spin::x(j_occ) * + cudaq::spin::x(a_virt) * parity_b * + cudaq::spin::y(b_virt)); + ops.emplace_back(cudaq::spin::x(i_occ) * parity_a * cudaq::spin::x(j_occ) * + cudaq::spin::y(a_virt) * parity_b * + cudaq::spin::x(b_virt)); + ops.emplace_back(cudaq::spin::x(i_occ) * parity_a * cudaq::spin::y(j_occ) * + cudaq::spin::y(a_virt) * parity_b * + cudaq::spin::y(b_virt)); + ops.emplace_back(cudaq::spin::y(i_occ) * parity_a * cudaq::spin::x(j_occ) * + cudaq::spin::y(a_virt) * parity_b * + cudaq::spin::y(b_virt)); + ops.emplace_back(cudaq::spin::x(i_occ) * parity_a * cudaq::spin::y(j_occ) * + cudaq::spin::x(a_virt) * parity_b * + cudaq::spin::x(b_virt)); + ops.emplace_back(cudaq::spin::y(i_occ) * parity_a * cudaq::spin::x(j_occ) * + cudaq::spin::x(a_virt) * parity_b * + cudaq::spin::x(b_virt)); + ops.emplace_back(cudaq::spin::y(i_occ) * parity_a * cudaq::spin::y(j_occ) * + cudaq::spin::x(a_virt) * parity_b * + cudaq::spin::y(b_virt)); + ops.emplace_back(cudaq::spin::y(i_occ) * parity_a * cudaq::spin::y(j_occ) * + cudaq::spin::y(a_virt) * parity_b * + cudaq::spin::x(b_virt)); + }; + + for (auto &sa : singlesAlpha) + addSinglesExcitation(ops, sa[0], sa[1]); + for (auto &sa : singlesBeta) + addSinglesExcitation(ops, sa[0], sa[1]); + + for (auto &d : doublesMixed) + addDoublesExcitation(ops, d[0], d[1], d[2], d[3]); + for (auto &d : doublesAlpha) + addDoublesExcitation(ops, d[0], d[1], d[2], d[3]); + for (auto &d : doublesBeta) + addDoublesExcitation(ops, d[0], d[1], d[2], d[3]); + + return ops; +} + +} // namespace cudaq::solvers \ No newline at end of file diff --git a/libs/solvers/lib/optimizers/CMakeLists.txt b/libs/solvers/lib/optimizers/CMakeLists.txt new file mode 100644 index 0000000..f791e94 --- /dev/null +++ b/libs/solvers/lib/optimizers/CMakeLists.txt @@ -0,0 +1,11 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_subdirectory(prima) +add_subdirectory(lbfgs) +target_sources(cudaq-solvers PRIVATE optimizer.cpp) diff --git a/libs/solvers/lib/optimizers/lbfgs/CMakeLists.txt b/libs/solvers/lib/optimizers/lbfgs/CMakeLists.txt new file mode 100644 index 0000000..eccfd36 --- /dev/null +++ b/libs/solvers/lib/optimizers/lbfgs/CMakeLists.txt @@ -0,0 +1,25 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +FetchContent_Declare(liblbfgs + GIT_REPOSITORY https://github.com/chokkan/liblbfgs.git + GIT_TAG master +) + +if(NOT liblbfgs_POPULATED) + FetchContent_Populate(liblbfgs) + set(LBFGS_USE_SSE ON) + set(LBFGS_lib_TARGET_NAME liblbfgs) + set(LBFGS_BUILD_SHARED_LIBS OFF) + add_subdirectory(${liblbfgs_SOURCE_DIR} ${liblbfgs_BINARY_DIR} EXCLUDE_FROM_ALL) +endif() + +# ============================================================================== + +target_sources(cudaq-solvers PRIVATE lbfgs.cpp LBFGSObjective.cpp) +target_link_libraries(cudaq-solvers PRIVATE LBFGS::lib) diff --git a/libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.cpp b/libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.cpp new file mode 100644 index 0000000..c075db2 --- /dev/null +++ b/libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.cpp @@ -0,0 +1,25 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "LBFGSObjective.h" + +namespace cudaq::optim { +optimization_result LBFGSObjective::run(int N) { + double value; + std::vector parameters = initialParameters; + + /* + Start the L-BFGS optimization; this will invoke the callback functions + evaluate() and progress() when necessary. + */ + int ret = + lbfgs(N, parameters.data(), &value, _evaluate, _progress, this, NULL); + + return std::make_tuple(value, parameters); +} +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.h b/libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.h new file mode 100644 index 0000000..389cfcd --- /dev/null +++ b/libs/solvers/lib/optimizers/lbfgs/LBFGSObjective.h @@ -0,0 +1,119 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once +#include +#include +#include + +#include "cudaq/solvers/optimizer.h" +#include + +namespace cudaq::optim { + +/// @brief +class LBFGSObjective { +protected: + /// @brief The function to optimize + const optimizable_function &function; + + /// @brief verbose printout + bool verbose; + + /// @brief The initial parameters + std::vector initialParameters; + + /// @brief Vector reference to record history of the + /// optimization + std::vector &history; + + /// @brief The function tolerance. + double functionTolerance; + + /// @brief The function value from the last iteration + // double lastFunctionVal = std::numeric_limits::max(); + std::size_t maxIterations; + +public: + LBFGSObjective(const optimizable_function &func, + const std::vector &init, double functionTol, + std::size_t maxIter, + std::vector &in_history, bool verbose) + : function(func), initialParameters(init), functionTolerance(functionTol), + maxIterations(maxIter), history(in_history), verbose(verbose) {} + + /// @brief Run the optimization + optimization_result run(int N); + +protected: + /// @brief Required hook into liblbfgs lbfgs() evaluation. Will delegate to + /// non-static method on this class + static lbfgsfloatval_t _evaluate(void *instance, const lbfgsfloatval_t *x, + lbfgsfloatval_t *g, const int n, + const lbfgsfloatval_t step) { + return reinterpret_cast(instance)->evaluate(x, g, n, + step); + } + + lbfgsfloatval_t evaluate(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, + const int n, const lbfgsfloatval_t step) { + std::vector params(x, x + n), grad(g, g + n); + // evaluate the function + auto val = function(params, grad); + // set the grad pointer + for (int i = 0; i < n; i++) + g[i] = grad[i]; + return val; + } + + /// @brief Required hook into liblbfgs lbfgs() evaluation. Will delegate to + /// non-static method on this class + static int _progress(void *instance, const lbfgsfloatval_t *x, + const lbfgsfloatval_t *g, const lbfgsfloatval_t fx, + const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm, + const lbfgsfloatval_t step, int n, int k, int ls) { + return reinterpret_cast(instance)->progress( + x, g, fx, xnorm, gnorm, step, n, k, ls); + } + + /// @brief Check the progress of the optimization. + int progress(const lbfgsfloatval_t *x, const lbfgsfloatval_t *g, + const lbfgsfloatval_t fx, const lbfgsfloatval_t xnorm, + const lbfgsfloatval_t gnorm, const lbfgsfloatval_t step, int n, + int k, int ls) { + if (verbose) { + printf("Iteration %d:\n", k); + printf(" fx = %f\n ", fx); + for (int i = 0; i < n; i++) + if (i > 3) + printf("..."); + else + printf("x[%d] = %lf, ", i, x[i]); + + printf("\n xnorm = %f, gnorm = %f, step = %f\n", xnorm, gnorm, step); + printf("\n"); + } + + // append to the history + history.push_back(std::make_tuple(fx, std::vector(x, x + n))); + + if (k >= maxIterations) { + return 1; + } + + // If this is the first evaluation, then we don't have a lastFunctionVal, + // if its not, then get the second to last element of the history + auto lastFunctionVal = k == 1 ? std::numeric_limits::max() + : std::get<0>(history.rbegin()[1]); + + if (std::fabs(fx - lastFunctionVal) < functionTolerance) + return 1; + + return 0; + } +}; +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/lib/optimizers/lbfgs/lbfgs.cpp b/libs/solvers/lib/optimizers/lbfgs/lbfgs.cpp new file mode 100644 index 0000000..52bca68 --- /dev/null +++ b/libs/solvers/lib/optimizers/lbfgs/lbfgs.cpp @@ -0,0 +1,25 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/optimizers/lbfgs.h" +#include "LBFGSObjective.h" + +namespace cudaq::optim { +optimization_result lbfgs::optimize(std::size_t dim, + const optimizable_function &opt_function, + const cudaqx::heterogeneous_map &options) { + history.clear(); + cudaq::optim::LBFGSObjective f( + opt_function, options.get("initial_parameters", std::vector(dim)), + options.get("function_tolerance", 1e-12), + options.get("max_iterations", std::numeric_limits::max()), + history, options.get("verbose", false)); + return f.run(dim); +} + +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/lib/optimizers/optimizer.cpp b/libs/solvers/lib/optimizers/optimizer.cpp new file mode 100644 index 0000000..b14f880 --- /dev/null +++ b/libs/solvers/lib/optimizers/optimizer.cpp @@ -0,0 +1,11 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/optimizer.h" + +INSTANTIATE_REGISTRY_NO_ARGS(cudaq::optim::optimizer) diff --git a/libs/solvers/lib/optimizers/prima/CMakeLists.txt b/libs/solvers/lib/optimizers/prima/CMakeLists.txt new file mode 100644 index 0000000..5ee27b7 --- /dev/null +++ b/libs/solvers/lib/optimizers/prima/CMakeLists.txt @@ -0,0 +1,38 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +FetchContent_Declare( + prima + GIT_REPOSITORY http://github.com/libprima/prima + GIT_TAG v0.7.2 + GIT_SUBMODULES "" +) +if(NOT prima_POPULATED) + FetchContent_Populate(prima) + # Prima doesn't honor its PRIMA_ENABLE_TESTING flag. Regardless of its setting + # test will be added. The workarround here is to define `add_test` to an empty + # macro. + macro(add_test) + endmacro() + set(BUILD_SHARED_LIBS OFF) + add_subdirectory(${prima_SOURCE_DIR} ${prima_BINARY_DIR} EXCLUDE_FROM_ALL) + macro(add_test) + _add_test(${ARGN}) + endmacro() +endif() + +# ============================================================================== + +target_sources(cudaq-solvers PRIVATE cobyla.cpp) + +target_include_directories(cudaq-solvers + PUBLIC + $ +) + +target_link_libraries(cudaq-solvers PUBLIC primac gfortran) diff --git a/libs/solvers/lib/optimizers/prima/cobyla.cpp b/libs/solvers/lib/optimizers/prima/cobyla.cpp new file mode 100644 index 0000000..76cd8d1 --- /dev/null +++ b/libs/solvers/lib/optimizers/prima/cobyla.cpp @@ -0,0 +1,70 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include +#include +#include + +#include "cudaq/solvers/optimizers/cobyla.h" + +#include "prima/prima.h" + +namespace cudaq::optim { + +struct PrimaContainer { + const optimizable_function &function; + std::size_t dim = 0; +}; + +static void evaluator(const double x[], double *retVal, double constr[], + const void *data) { + const auto *container = reinterpret_cast(data); + std::vector parameters(x, x + container->dim), dummy; + *retVal = container->function(parameters, dummy); +} + +optimization_result cobyla::optimize(std::size_t dim, + const optimizable_function &opt_function, + const heterogeneous_map &options) { + history.clear(); + + // Create a container type for our objective function + // and pass as the forwarded void* data pointer to prima_cobyla + PrimaContainer container{opt_function, dim}; + void *data = reinterpret_cast(&container); + + // Get the initial parameters + auto initParams = options.get>( + "initial_parameters", std::vector(dim, 0.)); + + // Set the upper and lower bounds + std::vector xupper(dim, M_PI), xlower(dim, -M_PI); + + // Set the max iterations / max function calls + const int maxfun = options.get("max_iterations", dim * 200); + + // Default values needed for coobyla + double value = 0.0; + double cstrv = 0.0; + const double rhobeg = 1.0; + const double rhoend = 1e-4; + const double ftarget = -INFINITY; + const int iprint = PRIMA_MSG_NONE; + int nf = 0; + + // Run the optimization + int rc = prima_cobyla(0, &evaluator, data, dim, initParams.data(), &value, + &cstrv, nullptr, 0, nullptr, nullptr, 0, nullptr, + nullptr, xlower.data(), xupper.data(), &nf, rhobeg, + rhoend, ftarget, maxfun, iprint); + + return std::make_tuple(value, initParams); +} + +} // namespace cudaq::optim \ No newline at end of file diff --git a/libs/solvers/lib/qaoa/CMakeLists.txt b/libs/solvers/lib/qaoa/CMakeLists.txt new file mode 100644 index 0000000..2a9f679 --- /dev/null +++ b/libs/solvers/lib/qaoa/CMakeLists.txt @@ -0,0 +1,11 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_subdirectory(device) + +target_sources(cudaq-solvers PRIVATE qaoa.cpp) diff --git a/libs/solvers/lib/qaoa/device/CMakeLists.txt b/libs/solvers/lib/qaoa/device/CMakeLists.txt new file mode 100644 index 0000000..4193cc4 --- /dev/null +++ b/libs/solvers/lib/qaoa/device/CMakeLists.txt @@ -0,0 +1,10 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + + +cudaqx_add_device_code(cudaq-solvers SOURCES qaoa_device.cpp) diff --git a/libs/solvers/lib/qaoa/device/qaoa_device.cpp b/libs/solvers/lib/qaoa/device/qaoa_device.cpp new file mode 100644 index 0000000..eb76cec --- /dev/null +++ b/libs/solvers/lib/qaoa/device/qaoa_device.cpp @@ -0,0 +1,45 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/qaoa/qaoa_device.h" + +namespace cudaq::solvers { +__qpu__ void qaoa_kernel(std::size_t numQubits, std::size_t numLayers, + const std::vector &gamma_beta, + const std::vector &problemHCoeffs, + const std::vector &problemH, + const std::vector &referenceHCoeffs, + const std::vector &referenceH, + bool full_param, bool counterdiabatic) { + cudaq::qvector q(numQubits); + h(q); + for (std::size_t angleCounter = 0, layer = 0; layer < numLayers; layer++) { + for (std::size_t i = 0; i < problemHCoeffs.size(); i++) { + exp_pauli(gamma_beta[angleCounter] * problemHCoeffs[i], q, problemH[i]); + if (full_param) + angleCounter++; + } + + if (!full_param) + angleCounter++; + + for (std::size_t i = 0; i < referenceHCoeffs.size(); i++) { + exp_pauli(gamma_beta[angleCounter] * referenceHCoeffs[i], q, + referenceH[i]); + if (full_param) + angleCounter++; + } + if (!full_param) + angleCounter++; + + if (counterdiabatic) + for (std::size_t i = 0; i < numQubits; i++) + ry(gamma_beta[angleCounter++], q[i]); + } +} +} // namespace cudaq::solvers diff --git a/libs/solvers/lib/qaoa/qaoa.cpp b/libs/solvers/lib/qaoa/qaoa.cpp new file mode 100644 index 0000000..369b819 --- /dev/null +++ b/libs/solvers/lib/qaoa/qaoa.cpp @@ -0,0 +1,158 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/qaoa/qaoa_device.h" +#include "cudaq/solvers/vqe.h" + +#include "cudaq/qis/pauli_word.h" +#include "cudaq/solvers/qaoa.h" + +namespace cudaq::solvers { +cudaq::spin_op +getDefaultReferenceHamiltonian(const cudaq::spin_op &problemHamiltonian) { + cudaq::spin_op referenceHamiltonian; + auto numQubits = problemHamiltonian.num_qubits(); + + // Add X terms for each qubit as the default mixer + for (std::size_t i = 0; i < numQubits; i++) { + referenceHamiltonian += cudaq::spin::i(numQubits - 1) * cudaq::spin::x(i); + } + + referenceHamiltonian -= cudaq::spin::i(numQubits - 1); + return referenceHamiltonian; +} + +std::size_t get_num_qaoa_parameters(const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, + std::size_t numLayers, + const heterogeneous_map options) { + auto counterdiabatic = options.get("counterdiabatic", false); + auto full_parameterization = + options.get(std::vector{"full-parameterization", + "full_parameterization"}, + false); + + // Compute the expected number of parameters + std::size_t expectedNumParams = 0; + if (full_parameterization) { + auto nonIdTerms = 0; + referenceHamiltonian.for_each_term([&](cudaq::spin_op &term) { + if (!term.is_identity()) + nonIdTerms++; + }); + expectedNumParams = + numLayers * (problemHamiltonian.num_terms() + nonIdTerms); + } else { + expectedNumParams = 2 * numLayers; + } + + if (counterdiabatic) + expectedNumParams += numLayers * problemHamiltonian.num_qubits(); + + return expectedNumParams; +} + +std::size_t get_num_qaoa_parameters(const cudaq::spin_op &problemHamiltonian, + std::size_t numLayers, + const heterogeneous_map options) { + return get_num_qaoa_parameters( + problemHamiltonian, getDefaultReferenceHamiltonian(problemHamiltonian), + numLayers, options); +} + +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, + const optim::optimizer &optimizer, std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options) { + auto expectedNumParams = get_num_qaoa_parameters( + problemHamiltonian, referenceHamiltonian, numLayers, options); + + if (initialParameters.size() != expectedNumParams) + throw std::runtime_error( + "qaoa error - invalid number of initial parameters. " + + std::to_string(expectedNumParams) + " parameters are required, but " + + std::to_string(initialParameters.size()) + " provided."); + + auto counterdiabatic = options.get("counterdiabatic", false); + auto full_parameterization = + options.get(std::vector{"full-parameterization", + "full_parameterization"}, + false); + std::vector probHCoeffs, refHCoeffs; + std::vector probHWords, refHWords; + for (const auto &o : problemHamiltonian) { + probHWords.emplace_back(o.to_string(false)); + probHCoeffs.push_back(o.get_coefficient().real()); + } + + for (const auto &o : referenceHamiltonian) { + refHWords.emplace_back(o.to_string(false)); + refHCoeffs.push_back(o.get_coefficient().real()); + } + + auto numQubits = problemHamiltonian.num_qubits(); + auto argsTranslator = [&](std::vector x) { + return std::make_tuple(numQubits, numLayers, x, probHCoeffs, probHWords, + refHCoeffs, refHWords, full_parameterization, + counterdiabatic); + }; + + auto [optVal, optParams, data] = + vqe(qaoa_kernel, problemHamiltonian, initialParameters, argsTranslator, + options); + auto counts = cudaq::sample(qaoa_kernel, numQubits, numLayers, optParams, + probHCoeffs, probHWords, refHCoeffs, refHWords, + full_parameterization, counterdiabatic); + return qaoa_result{optVal, optParams, counts}; +} + +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + const optim::optimizer &optimizer, std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options) { + // Create default transverse field mixing Hamiltonian + cudaq::spin_op referenceHamiltonian = + getDefaultReferenceHamiltonian(problemHamiltonian); + + // Delegate to the full implementation + return qaoa(problemHamiltonian, referenceHamiltonian, optimizer, numLayers, + initialParameters, options); +} + +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options) { + // Validate inputs + if (initialParameters.empty()) + throw std::invalid_argument("Initial parameters cannot be empty"); + + // Create default COBYLA optimizer + auto defaultOptimizer = optim::optimizer::get("cobyla"); + + // Delegate to the version with explicit optimizer + return qaoa(problemHamiltonian, *defaultOptimizer, numLayers, + initialParameters, options); +} + +qaoa_result qaoa(const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, + std::size_t numLayers, + const std::vector &initialParameters, + const heterogeneous_map options) { + // Create default transverse field mixing Hamiltonian + auto numQubits = problemHamiltonian.num_qubits(); + auto defaultOptimizer = optim::optimizer::get("cobyla"); + + // Delegate to the full implementation + return qaoa(problemHamiltonian, referenceHamiltonian, *defaultOptimizer, + numLayers, initialParameters, options); +} + +} // namespace cudaq::solvers diff --git a/libs/solvers/lib/stateprep/CMakeLists.txt b/libs/solvers/lib/stateprep/CMakeLists.txt new file mode 100644 index 0000000..041fd24 --- /dev/null +++ b/libs/solvers/lib/stateprep/CMakeLists.txt @@ -0,0 +1,12 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +cudaqx_add_device_code(cudaq-solvers + SOURCES + uccsd.cpp +) diff --git a/libs/solvers/lib/stateprep/uccsd.cpp b/libs/solvers/lib/stateprep/uccsd.cpp new file mode 100644 index 0000000..a2cfc97 --- /dev/null +++ b/libs/solvers/lib/stateprep/uccsd.cpp @@ -0,0 +1,584 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/solvers/stateprep/uccsd.h" +#include "cudaq.h" + +namespace cudaq::solvers::stateprep { + +std::tuple +get_uccsd_excitations(std::size_t numElectrons, std::size_t numQubits, + std::size_t spin) { + if (numQubits % 2 != 0) + throw std::runtime_error("The total number of qubits should be even."); + + auto numSpatialOrbs = numQubits / 2; + std::vector occupiedAlpha, virtualAlpha, occupiedBeta, + virtualBeta; + if (spin > 0) { + auto n_occupied_beta = + static_cast(std::floor((float)(numElectrons - spin) / 2)); + auto n_occupied_alpha = numElectrons - n_occupied_beta; + auto n_virtual_alpha = numSpatialOrbs - n_occupied_alpha; + auto n_virtual_beta = numSpatialOrbs - n_occupied_beta; + + for (auto i : cudaq::range(n_occupied_alpha)) + occupiedAlpha.push_back(i * 2); + + for (auto i : cudaq::range(n_virtual_alpha)) + virtualAlpha.push_back(i * 2 + numElectrons + 1); + + for (auto i : cudaq::range(n_occupied_beta)) + occupiedBeta.push_back(i * 2 + 1); + + for (auto i : cudaq::range(n_virtual_beta)) + virtualBeta.push_back(i * 2 + numElectrons - 1); + } else if (numElectrons % 2 == 0 && spin == 0) { + auto numOccupied = + static_cast(std::floor((float)numElectrons / 2)); + auto numVirtual = numSpatialOrbs - numOccupied; + + for (auto i : cudaq::range(numOccupied)) + occupiedAlpha.push_back(i * 2); + + for (auto i : cudaq::range(numVirtual)) + virtualAlpha.push_back(i * 2 + numElectrons); + + for (auto i : cudaq::range(numOccupied)) + occupiedBeta.push_back(i * 2 + 1); + + for (auto i : cudaq::range(numVirtual)) + virtualBeta.push_back(i * 2 + numElectrons + 1); + + } else + throw std::runtime_error("Incorrect spin multiplicity. Number of electrons " + "is odd but spin is 0 " + + std::to_string(numElectrons) + ", " + + std::to_string(spin)); + + excitation_list singlesAlpha, singlesBeta, doublesMixed, doublesAlpha, + doublesBeta; + + for (auto p : occupiedAlpha) + for (auto q : virtualAlpha) + singlesAlpha.push_back({p, q}); + + for (auto p : occupiedBeta) + for (auto q : virtualBeta) + singlesBeta.push_back({p, q}); + + for (auto p : occupiedAlpha) + for (auto q : occupiedBeta) + for (auto r : virtualBeta) + for (auto s : virtualAlpha) + doublesMixed.push_back({p, q, r, s}); + + auto numOccAlpha = occupiedAlpha.size(); + auto numOccBeta = occupiedBeta.size(); + auto numVirtAlpha = virtualAlpha.size(); + auto numVirtBeta = virtualBeta.size(); + + for (auto p : cudaq::range(numOccAlpha - 1)) + for (std::size_t q = p + 1; q < numOccAlpha; q++) + for (auto r : cudaq::range(numVirtAlpha - 1)) + for (std::size_t s = r + 1; s < numVirtAlpha; s++) + doublesAlpha.push_back({occupiedAlpha[p], occupiedAlpha[q], + virtualAlpha[r], virtualAlpha[s]}); + + for (auto p : cudaq::range(numOccBeta - 1)) + for (std::size_t q = p + 1; q < numOccBeta; q++) + for (auto r : cudaq::range(numVirtBeta - 1)) + for (std::size_t s = r + 1; s < numVirtBeta; s++) + doublesBeta.push_back({occupiedBeta[p], occupiedBeta[q], + virtualBeta[r], virtualBeta[s]}); + + return std::make_tuple(singlesAlpha, singlesBeta, doublesMixed, doublesAlpha, + doublesBeta); +} + +__qpu__ void single_excitation(cudaq::qview<> qubits, double theta, + std::size_t p_occ, std::size_t q_virt) { + // Y_p X_q + rx(M_PI_2, qubits[p_occ]); + h(qubits[q_virt]); + + for (std::size_t i = p_occ; i < q_virt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(0.5 * theta, qubits[q_virt]); + + for (std::size_t i = q_virt; i > p_occ; i--) + cx(qubits[i - 1], qubits[i]); + + h(qubits[q_virt]); + rx(-M_PI_2, qubits[p_occ]); + + // -X_p Y_q + h(qubits[p_occ]); + rx(M_PI_2, qubits[q_virt]); + + for (std::size_t i = p_occ; i < q_virt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(-0.5 * theta, qubits[q_virt]); + + for (std::size_t i = q_virt; i > p_occ; i--) + cx(qubits[i - 1], qubits[i]); + + rx(-M_PI_2, qubits[q_virt]); + h(qubits[p_occ]); +} + +__qpu__ void double_excitation(cudaq::qview<> qubits, double theta, + std::size_t pOcc, std::size_t qOcc, + std::size_t rVirt, std::size_t sVirt) { + std::size_t iOcc = 0, jOcc = 0, aVirt = 0, bVirt = 0; + if ((pOcc < qOcc) && (rVirt < sVirt)) { + iOcc = pOcc; + jOcc = qOcc; + aVirt = rVirt; + bVirt = sVirt; + } else if ((pOcc > qOcc) && (rVirt > sVirt)) { + iOcc = qOcc; + jOcc = pOcc; + aVirt = sVirt; + bVirt = rVirt; + } else if ((pOcc < qOcc) && (rVirt > sVirt)) { + iOcc = pOcc; + jOcc = qOcc; + aVirt = sVirt; + bVirt = rVirt; + theta *= -1.; + } else if ((pOcc > qOcc) && (rVirt < sVirt)) { + iOcc = qOcc; + jOcc = pOcc; + aVirt = rVirt; + bVirt = sVirt; + theta *= -1.; + } + + h(qubits[iOcc]); + h(qubits[jOcc]); + h(qubits[aVirt]); + rx(M_PI_2, qubits[bVirt]); + + for (std::size_t i = iOcc; i < jOcc; i++) + cx(qubits[i], qubits[i + 1]); + + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + + cx(qubits[jOcc], qubits[aVirt]); + + rx(-M_PI_2, qubits[bVirt]); + h(qubits[aVirt]); + + rx(M_PI_2, qubits[aVirt]); + h(qubits[bVirt]); + + cx(qubits[jOcc], qubits[aVirt]); + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = jOcc; i > iOcc; i--) + cx(qubits[i - 1], qubits[i]); + + rx(-M_PI_2, qubits[aVirt]); + h(qubits[jOcc]); + + rx(M_PI_2, qubits[jOcc]); + h(qubits[aVirt]); + + for (std::size_t i = iOcc; i < jOcc; i++) + cx(qubits[i], qubits[i + 1]); + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(-0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + cx(qubits[jOcc], qubits[aVirt]); + + h(qubits[bVirt]); + h(qubits[aVirt]); + + rx(M_PI_2, qubits[aVirt]); + rx(M_PI_2, qubits[bVirt]); + + cx(qubits[jOcc], qubits[aVirt]); + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = jOcc; i > iOcc; i--) + cx(qubits[i - 1], qubits[i]); + + rx(-M_PI_2, qubits[jOcc]); + h(qubits[iOcc]); + + rx(M_PI_2, qubits[iOcc]); + h(qubits[jOcc]); + + for (std::size_t i = iOcc; i < jOcc; i++) + cx(qubits[i], qubits[i + 1]); + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + cx(qubits[jOcc], qubits[aVirt]); + + rx(-M_PI_2, qubits[bVirt]); + rx(-M_PI_2, qubits[aVirt]); + + h(qubits[aVirt]); + h(qubits[bVirt]); + + cx(qubits[jOcc], qubits[aVirt]); + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(-0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = jOcc; i > iOcc; i--) + cx(qubits[i - 1], qubits[i]); + + h(qubits[bVirt]); + h(qubits[jOcc]); + + rx(M_PI_2, qubits[jOcc]); + rx(M_PI_2, qubits[bVirt]); + + for (std::size_t i = iOcc; i < jOcc; i++) + cx(qubits[i], qubits[i + 1]); + + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(-0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + cx(qubits[jOcc], qubits[aVirt]); + + rx(-M_PI_2, qubits[bVirt]); + h(qubits[aVirt]); + + rx(M_PI_2, qubits[aVirt]); + h(qubits[bVirt]); + + cx(qubits[jOcc], qubits[aVirt]); + for (std::size_t i = aVirt; i < bVirt; i++) + cx(qubits[i], qubits[i + 1]); + + rz(-0.125 * theta, qubits[bVirt]); + + for (std::size_t i = bVirt; i > aVirt; i--) + cx(qubits[i - 1], qubits[i]); + cx(qubits[jOcc], qubits[aVirt]); + + for (std::size_t i = jOcc; i > iOcc; i--) + cx(qubits[i - 1], qubits[i]); + + h(qubits[bVirt]); + rx(-M_PI_2, qubits[aVirt]); + rx(-M_PI_2, qubits[jOcc]); + rx(-M_PI_2, qubits[iOcc]); +} + +/// @brief Return the number of UCCSD ansatz parameters. +std::size_t get_num_uccsd_parameters(std::size_t numElectrons, + std::size_t numQubits, std::size_t spin) { + auto [singlesAlpha, singlesBeta, doublesMixed, doublesAlpha, doublesBeta] = + get_uccsd_excitations(numElectrons, numQubits, spin); + return singlesAlpha.size() + singlesBeta.size() + doublesMixed.size() + + doublesAlpha.size() + doublesBeta.size(); +} + +__qpu__ float positive_floor(float x) { + int integer_part = (int)x; + return (float)integer_part; +} + +__qpu__ std::size_t getNumOccupiedAlpha(std::size_t numElectrons, + std::size_t spin, + std::size_t numQubits) { + auto numSpatialOrbs = numQubits / 2; + if (spin > 0) { + auto n_occupied_beta = static_cast( + positive_floor((float)(numElectrons - spin) / 2)); + auto n_occupied_alpha = numElectrons - n_occupied_beta; + return n_occupied_alpha; + } + + auto n_occupied_alpha = + static_cast(positive_floor((float)numElectrons / 2)); + return n_occupied_alpha; +} + +__qpu__ std::size_t getNumOccupiedBeta(std::size_t numElectrons, + std::size_t spin, + std::size_t numQubits) { + + auto numSpatialOrbs = numQubits / 2; + if (spin > 0) { + auto n_occupied_beta = static_cast( + positive_floor((float)(numElectrons - spin) / 2)); + return n_occupied_beta; + } + + auto n_occupied_alpha = + static_cast(positive_floor((float)numElectrons / 2)); + return n_occupied_alpha; +} + +__qpu__ std::size_t getNumVirtualAlpha(std::size_t numElectrons, + std::size_t spin, + std::size_t numQubits) { + + auto numSpatialOrbs = numQubits / 2; + if (spin > 0) { + auto n_occupied_beta = static_cast( + positive_floor((float)(numElectrons - spin) / 2)); + auto n_occupied_alpha = numElectrons - n_occupied_beta; + auto n_virtual_alpha = numSpatialOrbs - n_occupied_alpha; + return n_virtual_alpha; + } + auto n_occupied_alpha = + static_cast(positive_floor((float)numElectrons / 2)); + auto n_virtual_alpha = numSpatialOrbs - n_occupied_alpha; + return n_virtual_alpha; +} + +__qpu__ std::size_t getNumVirtualBeta(std::size_t numElectrons, + std::size_t spin, std::size_t numQubits) { + + auto numSpatialOrbs = numQubits / 2; + if (spin > 0) { + auto n_occupied_beta = static_cast( + positive_floor((float)(numElectrons - spin) / 2)); + auto n_virtual_beta = numSpatialOrbs - n_occupied_beta; + return n_virtual_beta; + } + + auto n_occupied_alpha = + static_cast(positive_floor((float)numElectrons / 2)); + auto n_virtual_beta = numSpatialOrbs - n_occupied_alpha; + return n_virtual_beta; +} + +__qpu__ void uccsd(cudaq::qview<> qubits, const std::vector &thetas, + std::size_t numElectrons, std::size_t spin) { + + auto numOccAlpha = getNumOccupiedAlpha(numElectrons, spin, qubits.size()); + auto numOccBeta = getNumOccupiedBeta(numElectrons, spin, qubits.size()); + auto numVirtAlpha = getNumVirtualAlpha(numElectrons, spin, qubits.size()); + auto numVirtBeta = getNumVirtualBeta(numElectrons, spin, qubits.size()); + std::vector occupiedAlpha(numOccAlpha), + virtualAlpha(numVirtAlpha), occupiedBeta(numOccBeta), + virtualBeta(numVirtBeta); + if (spin > 0) { + + int counter = 0; + for (std::size_t i = 0; i < numOccAlpha; i++) { + occupiedAlpha[counter] = i * 2; + counter++; + } + counter = 0; + + for (std::size_t i = 0; i < numVirtAlpha; i++) { + virtualAlpha[counter] = i * 2 + numElectrons + 1; + counter++; + } + + counter = 0; + for (std::size_t i = 0; i < numOccBeta; i++) { + occupiedBeta[counter] = i * 2 + 1; + counter++; + } + counter = 0; + + for (std::size_t i = 0; i < numVirtBeta; i++) { + virtualBeta[counter] = i * 2 + numElectrons - 1; + } + + } else { + auto numOccupied = numOccAlpha; + auto numVirtual = numVirtAlpha; + + int counter = 0; + for (std::size_t i = 0; i < numOccupied; i++) { + occupiedAlpha[counter] = i * 2; + counter++; + } + counter = 0; + for (std::size_t i = 0; i < numVirtual; i++) { + virtualAlpha[counter] = i * 2 + numElectrons; + counter++; + } + counter = 0; + + for (std::size_t i = 0; i < numOccupied; i++) { + occupiedBeta[counter] = i * 2 + 1; + counter++; + } + counter = 0; + for (std::size_t i = 0; i < numVirtual; i++) { + virtualBeta[counter] = i * 2 + numElectrons + 1; + counter++; + } + } + + std::size_t counter = 0; + std::vector singlesAlpha(2 * occupiedAlpha.size() * + virtualAlpha.size()); + for (auto p : occupiedAlpha) + for (auto q : virtualAlpha) { + singlesAlpha[counter] = p; + counter++; + singlesAlpha[counter] = q; + counter++; + } + + counter = 0; + std::vector singlesBeta(2 * occupiedBeta.size() * + virtualBeta.size()); + for (auto p : occupiedBeta) + for (auto q : virtualBeta) { + singlesBeta[counter] = p; + counter++; + singlesBeta[counter] = q; + counter++; + } + + counter = 0; + std::vector doublesMixed( + 4 * occupiedAlpha.size() * virtualAlpha.size() * occupiedBeta.size() * + virtualBeta.size()); + for (auto p : occupiedAlpha) + for (auto q : occupiedBeta) + for (auto r : virtualBeta) + for (auto s : virtualAlpha) { + doublesMixed[counter] = p; + counter++; + doublesMixed[counter] = q; + counter++; + doublesMixed[counter] = r; + counter++; + doublesMixed[counter] = s; + counter++; + } + + counter = 0; + for (std::size_t p = 0; p < numOccAlpha - 1; p++) + for (std::size_t q = p + 1; q < numOccAlpha; q++) + for (std::size_t r = 0; r < numVirtAlpha - 1; r++) + for (std::size_t s = r + 1; s < numVirtAlpha; s++) + counter++; + + std::vector doublesAlpha(4 * counter); + counter = 0; + for (std::size_t p = 0; p < numOccAlpha - 1; p++) + for (std::size_t q = p + 1; q < numOccAlpha; q++) + for (std::size_t r = 0; r < numVirtAlpha - 1; r++) + for (std::size_t s = r + 1; s < numVirtAlpha; s++) { + doublesAlpha[counter] = occupiedAlpha[p]; + counter++; + doublesAlpha[counter] = occupiedAlpha[q]; + counter++; + doublesAlpha[counter] = virtualAlpha[r]; + counter++; + doublesAlpha[counter] = virtualAlpha[s]; + counter++; + } + + counter = 0; + for (std::size_t p = 0; p < numOccBeta - 1; p++) + for (std::size_t q = p + 1; q < numOccBeta; q++) + for (std::size_t r = 0; r < numVirtBeta - 1; r++) + for (std::size_t s = r + 1; s < numVirtBeta; s++) + counter++; + std::vector doublesBeta(4 * counter); + counter = 0; + for (std::size_t p = 0; p < numOccBeta - 1; p++) + for (std::size_t q = p + 1; q < numOccBeta; q++) + for (std::size_t r = 0; r < numVirtBeta - 1; r++) + for (std::size_t s = r + 1; s < numVirtBeta; s++) { + doublesBeta[counter] = occupiedBeta[p]; + counter++; + doublesBeta[counter] = occupiedBeta[q]; + counter++; + doublesBeta[counter] = virtualBeta[r]; + counter++; + doublesBeta[counter] = virtualBeta[s]; + counter++; + } + + std::size_t thetaCounter = 0; + for (std::size_t i = 0; i < singlesAlpha.size(); i += 2) + single_excitation(qubits, thetas[thetaCounter++], singlesAlpha[i], + singlesAlpha[i + 1]); + + for (std::size_t i = 0; i < singlesBeta.size(); i += 2) + single_excitation(qubits, thetas[thetaCounter++], singlesBeta[i], + singlesBeta[i + 1]); + + for (std::size_t i = 0; i < doublesMixed.size(); i += 4) + double_excitation(qubits, thetas[thetaCounter++], doublesMixed[i], + doublesMixed[i + 1], doublesMixed[i + 2], + doublesMixed[i + 3]); + + for (std::size_t i = 0; i < doublesAlpha.size(); i += 4) + double_excitation(qubits, thetas[thetaCounter++], doublesAlpha[i], + doublesAlpha[i + 1], doublesAlpha[i + 2], + doublesAlpha[i + 3]); + + for (std::size_t i = 0; i < doublesBeta.size(); i += 4) + double_excitation(qubits, thetas[thetaCounter++], doublesBeta[i], + doublesBeta[i + 1], doublesBeta[i + 2], + doublesBeta[i + 3]); +} + +__qpu__ void uccsd(cudaq::qview<> qubits, const std::vector &thetas, + std::size_t numElectrons) { + uccsd(qubits, thetas, numElectrons, 0); +} + +} // namespace cudaq::solvers::stateprep \ No newline at end of file diff --git a/libs/solvers/pyproject.toml b/libs/solvers/pyproject.toml new file mode 100644 index 0000000..ebe26a3 --- /dev/null +++ b/libs/solvers/pyproject.toml @@ -0,0 +1,52 @@ +[build-system] +requires = ["scikit-build-core>=0.10"] +build-backend = "scikit_build_core.build" + +[project] +name = "cudaq-solvers" +version = "0.1.0" +description = "Accelerated libraries for quantum-classical solvers built on CUDA-Q" +authors = [{name = "NVIDIA Corporation & Affiliates"}] +maintainers = [{name = "NVIDIA Corporation & Affiliates"}] +requires-python = ">=3.10" +readme = "README.md" +dependencies = [ + 'cuda-quantum-cu12 ~= 0.9.0', + 'fastapi', + 'networkx', + 'pyscf', + 'scipy', + 'uvicorn', +] +classifiers = [ + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + "Environment :: GPU :: NVIDIA CUDA", + "Environment :: GPU :: NVIDIA CUDA :: 12", + 'Topic :: Software Development', + 'Topic :: Scientific/Engineering', +] + +[project.urls] +Homepage = "https://nvidia.github.io/cudaqx" +Documentation = "https://nvidia.github.io/cudaqx/components/solvers/introduction.html" +Repository = "https://github.com/NVIDIA/cudaqx" + +[tool.scikit-build] +build-dir = "_skbuild" +build.verbose = true +cmake.version = ">=3.28" +cmake.build-type = "Release" +install.components = ["solvers-python", "solvers-lib", "solvers-tools"] +wheel.packages = [] +logging.level = "DEBUG" +ninja.version = ">=1.10" + +[tool.scikit-build.cmake.define] +CUDAQX_SOLVERS_INCLUDE_TESTS = false +CUDAQX_SOLVERS_BINDINGS_PYTHON = true diff --git a/libs/solvers/python/CMakeLists.txt b/libs/solvers/python/CMakeLists.txt new file mode 100644 index 0000000..5f45806 --- /dev/null +++ b/libs/solvers/python/CMakeLists.txt @@ -0,0 +1,90 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# External Dependencies +# ============================================================================== + +FetchContent_Declare( + pybind11 + GIT_REPOSITORY https://github.com/pybind/pybind11 + GIT_TAG v2.13 + EXCLUDE_FROM_ALL +) +FetchContent_MakeAvailable(pybind11) + +find_package(Python COMPONENTS Interpreter REQUIRED) + +# ============================================================================== + +set(MODULE_NAME _pycudaqx_solvers_the_suffix_matters_cudaq_solvers) + +cudaqx_add_pymodule(${MODULE_NAME} + bindings/cudaqx_solvers.cpp + bindings/solvers/py_optim.cpp + bindings/solvers/py_solvers.cpp +) + +target_include_directories(${MODULE_NAME} PRIVATE .) + +target_link_libraries(${MODULE_NAME} + PRIVATE + cudaq-solvers + cudaq::cudaq + cudaq::cudaq-python-interop +) + +set_target_properties(${MODULE_NAME} PROPERTIES + LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/python/cudaq_solvers") + +if (NOT SKBUILD) + add_custom_target( + copy_solvers_python_files ALL + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_SOURCE_DIR}/cudaq_solvers + ${CMAKE_BINARY_DIR}/python/cudaq_solvers + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/cudaq_solvers + COMMENT "Copying Python files to binary directory" + ) + + add_dependencies(${MODULE_NAME} copy_solvers_python_files) +endif() + +# RPATH configuration +# ============================================================================== + +if (NOT SKBUILD) + set_target_properties(${MODULE_NAME} PROPERTIES + BUILD_RPATH "$ORIGIN" + INSTALL_RPATH "$ORIGIN/../lib" + ) + + # Let CMake automatically add paths of linked libraries to the RPATH: + set_target_properties(${MODULE_NAME} PROPERTIES + INSTALL_RPATH_USE_LINK_PATH TRUE + ) +else() + # CUDA-Q install its libraries in site-packages/lib (or dist-packages/lib) + # Thus, we need the $ORIGIN/../lib + set_target_properties(${MODULE_NAME} PROPERTIES + INSTALL_RPATH "$ORIGIN/lib:$ORIGIN/../lib" + ) +endif() + +# Install +# ============================================================================== + +install(DIRECTORY cudaq_solvers + COMPONENT solvers-python + DESTINATION . +) + +install(TARGETS ${MODULE_NAME} + COMPONENT solvers-python + DESTINATION cudaq_solvers/ +) + diff --git a/libs/solvers/python/bindings/cudaqx_solvers.cpp b/libs/solvers/python/bindings/cudaqx_solvers.cpp new file mode 100644 index 0000000..6be50c7 --- /dev/null +++ b/libs/solvers/python/bindings/cudaqx_solvers.cpp @@ -0,0 +1,19 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "solvers/py_optim.h" +#include "solvers/py_solvers.h" + +#include +#include + +PYBIND11_MODULE(_pycudaqx_solvers_the_suffix_matters_cudaq_solvers, mod) { + mod.doc() = "Python bindings for the CUDA-Q Solver Libraries."; + cudaq::optim::bindOptim(mod); + cudaq::solvers::bindSolvers(mod); +} diff --git a/libs/solvers/python/bindings/solvers/py_optim.cpp b/libs/solvers/python/bindings/solvers/py_optim.cpp new file mode 100644 index 0000000..3666612 --- /dev/null +++ b/libs/solvers/python/bindings/solvers/py_optim.cpp @@ -0,0 +1,179 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include +#include +#include +#include + +#include "cudaq/solvers/observe_gradient.h" +#include "cudaq/solvers/optimizer.h" + +#include "bindings/utils/kwargs_utils.h" +#include "bindings/utils/type_casters.h" + +namespace py = pybind11; + +namespace cudaq::optim { + +void bindOptim(py::module &mod) { + + auto optim = mod.def_submodule("optim"); + py::class_(optim, "OptimizableFunction") + .def(py::init<>(), R"docstring( + Default constructor for OptimizableFunction. + )docstring") + .def(py::init(), R"docstring( + Copy constructor for OptimizableFunction. + + Args: + other (OptimizableFunction): The OptimizableFunction to copy. + )docstring") + .def( + "__call__", + [](const optimizable_function &self, const std::vector &x) { + std::vector dx; + return self(x, dx); + }, + R"docstring( + Evaluate the function without returning gradients. + + Args: + x (List[float]): Input vector. + + Returns: + float: The function value at x. + )docstring") + .def( + "__call__", + [](const optimizable_function &self, const std::vector &x, + std::vector &dx) { return self(x, dx); }, + R"docstring( + Evaluate the function and compute gradients. + + Args: + x (List[float]): Input vector. + dx (List[float]): Output vector to store gradients. + + Returns: + float: The function value at x. + )docstring") + .def("provides_gradients", &optimizable_function::providesGradients, + R"docstring( + Check if the function provides gradient information. + + Returns: + bool: True if the function can compute gradients, False otherwise. + )docstring"); + optim.def( + "optimize", + [](const py::function &function, std::vector xInit, + std::string method, py::kwargs options) { + heterogeneous_map optOptions; + optOptions.insert("initial_parameters", xInit); + + if (!cudaq::optim::optimizer::is_registered(method)) + throw std::runtime_error( + method + " is not a valid, registered cudaq-x optimizer."); + + auto opt = cudaq::optim::optimizer::get(method); + auto result = opt->optimize( + xInit.size(), + [&](std::vector x, std::vector &grad) { + // Call the function. + auto ret = function(x); + // Does it return a tuple? + auto isTupleReturn = py::isinstance(ret); + // If we don't need gradients, and it does, just grab the value + // and return. + if (!opt->requiresGradients() && isTupleReturn) + return ret.cast()[0].cast(); + // If we dont need gradients and it doesn't return tuple, then + // just pass what we got. + if (!opt->requiresGradients() && !isTupleReturn) + return ret.cast(); + + // Throw an error if we need gradients and they weren't provided. + if (opt->requiresGradients() && !isTupleReturn) + throw std::runtime_error( + "Invalid return type on objective function, must return " + "(float,list[float]) for gradient-based optimizers"); + + // If here, we require gradients, and the signature is right. + auto tuple = ret.cast(); + auto val = tuple[0]; + auto gradIn = tuple[1].cast(); + for (std::size_t i = 0; i < gradIn.size(); i++) + grad[i] = gradIn[i].cast(); + + return val.cast(); + }, + optOptions); + + return result; + }, + py::arg("function"), py::arg("initial_parameters"), + py::arg("method") = "cobyla", R"#( +Optimize a given objective function using various optimization methods. + +This function performs optimization on a user-provided objective function +using the specified optimization method. It supports both gradient-based +and gradient-free optimization algorithms. + +Parameters: +----------- +function : callable + The objective function to be minimized. It should take a list of parameters + as input and return either: + - A single float value (for gradient-free methods) + - A tuple (float, list[float]) where the first element is the function value and the second is the gradient (for gradient-based methods) +initial_parameters : list[float] + Initial guess for the parameters to be optimized. +method : str, optional + The optimization method to use. Default is 'cobyla'. + Must be a valid, registered cudaq-x optimizer. +options : dict + Additional options for the optimizer. These are method-specific. + +Returns: +-------- +OptimizationResult + An object containing the results of the optimization process. + +Raises: +------- +RuntimeError + If an invalid optimization method is specified or if the objective function + returns an incorrect format for gradient-based optimizers. + +Examples: +--------- +>>> def objective(x): +... return sum([xi**2 for xi in x]), [2*xi for xi in x] +>>> result = optimize(objective, [1.0, 2.0, 3.0], method='l-bfgs-b') +>>> print(result.optimal_parameters) +[0.0, 0.0, 0.0] + +>>> def simple_objective(x): +... return sum([xi**2 for xi in x]) +>>> result = optimize(simple_objective, [1.0, 2.0, 3.0], method='cobyla') +>>> print(result.optimal_value) +0.0 + +Notes: +------ +- The function automatically detects whether the optimization method requires + gradients and expects the objective function to return the appropriate format. +- For gradient-based methods, the objective function must return a tuple of + (value, gradient). +- For gradient-free methods, the objective function should return only the value. +- The optimization process uses the cudaq-x backend, which must be properly + set up and have the specified optimization method registered. +)#"); +} + +} // namespace cudaq::optim diff --git a/libs/solvers/python/bindings/solvers/py_optim.h b/libs/solvers/python/bindings/solvers/py_optim.h new file mode 100644 index 0000000..fd6cc57 --- /dev/null +++ b/libs/solvers/python/bindings/solvers/py_optim.h @@ -0,0 +1,15 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include + +namespace py = pybind11; + +namespace cudaq::optim { +void bindOptim(py::module &mod); +} // namespace cudaq::optim diff --git a/libs/solvers/python/bindings/solvers/py_solvers.cpp b/libs/solvers/python/bindings/solvers/py_solvers.cpp new file mode 100644 index 0000000..f7d605e --- /dev/null +++ b/libs/solvers/python/bindings/solvers/py_solvers.cpp @@ -0,0 +1,946 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include +#include +#include +#include +#include + +#include "cudaq/python/PythonCppInterop.h" + +#include "cudaq/solvers/adapt.h" +#include "cudaq/solvers/qaoa.h" +#include "cudaq/solvers/stateprep/uccsd.h" +#include "cudaq/solvers/vqe.h" + +#include "cudaq/solvers/operators/graph/clique.h" +#include "cudaq/solvers/operators/graph/max_cut.h" +#include "cudaq/solvers/operators/molecule.h" +#include "cudaq/solvers/operators/molecule/fermion_compiler.h" +#include "cudaq/solvers/operators/operator_pool.h" + +#include "bindings/utils/kwargs_utils.h" +#include "bindings/utils/type_casters.h" + +namespace py = pybind11; + +using namespace cudaqx; + +namespace cudaq::solvers { + +cudaqx::graph convert_networkx_graph(py::object nx_graph) { + cudaqx::graph g; + + // Get nodes from NetworkX graph + py::list nodes = nx_graph.attr("nodes")(); + + // Add nodes and their weights if present + for (const auto &node : nodes) { + int node_id = py::cast(node); + + // Try to get node weight if it exists + try { + py::dict node_data = nx_graph.attr("nodes")[node].cast(); + if (node_data.contains("weight")) { + double weight = py::cast(node_data["weight"]); + g.add_node(node_id, weight); + } else { + g.add_node(node_id); + } + } catch (const py::error_already_set &) { + // If no node attributes, add with default weight + g.add_node(node_id); + } + } + + // Get edges from NetworkX graph + py::list edges = nx_graph.attr("edges")(); + + // Add edges and their weights if present + for (const auto &edge : edges) { + py::tuple edge_tuple = edge.cast(); + int u = py::cast(edge_tuple[0]); + int v = py::cast(edge_tuple[1]); + + // Try to get edge weight if it exists + try { + py::dict edge_data = nx_graph.attr("edges")[edge].cast(); + if (edge_data.contains("weight")) { + double weight = py::cast(edge_data["weight"]); + g.add_edge(u, v, weight); + } else { + g.add_edge(u, v); + } + } catch (const py::error_already_set &) { + // If no edge attributes, add with default weight + g.add_edge(u, v); + } + } + + return g; +} + +/// @class PythonOptimizer +/// @brief A Pybind wrapper around SciPy's function optimization. +/// +/// This class provides an interface to use SciPy's optimization functions +/// within a C++ environment using Pybind11. It inherits from the +/// `optim::optimizer` class and overrides its methods to utilize SciPy's +/// optimization capabilities. +class PythonOptimizer : public optim::optimizer { +private: + py::function minimize; + py::kwargs kwargs; + std::vector initParams; + +public: + using optimizer::optimize; + + /// @brief Constructor for PythonOptimizer + /// @param optCallback The SciPy optimization function (e.g., + /// scipy.optimize.minimize) + /// @param kw Keyword arguments to pass to the optimization function + /// @param init Initial parameters for optimization (optional) + PythonOptimizer(py::function optCallback, py::kwargs kw, + const std::vector init = {}) + : minimize(optCallback), kwargs(kw), initParams(init) {} + + /// @brief always false + bool requiresGradients() const override { return false; } + + /// @brief Performs optimization using the SciPy minimize function + /// @param dim Dimension of the optimization problem + /// @param opt_function The function to be optimized + /// @param options Additional options for the optimizer (not used in this + /// implementation) + /// @return A tuple containing the optimal function value and the optimal + /// parameters + optimization_result optimize(std::size_t dim, + const optim::optimizable_function &opt_function, + const heterogeneous_map &options) override { + if (kwargs.contains("gradient")) + kwargs.attr("pop")("gradient"); + + if (kwargs.contains("optimizer")) + kwargs.attr("pop")("optimizer"); + + if (kwargs.contains("verbose")) + kwargs.attr("pop")("verbose"); + + if (initParams.empty()) + initParams.resize(dim); + + double value = 0.0; + std::vector parameters(dim); + auto result = minimize(py::cpp_function([&](const std::vector &x) { + std::vector dx(x.size()); + value = opt_function(x, dx); + parameters = x; + return value; + }), + initParams, **kwargs); + return std::make_tuple(value, parameters); + } +}; + +void addStatePrepKernels(py::module &mod) { + cudaq::python::addDeviceKernelInterop< + cudaq::qview<>, const std::vector &, std::size_t, std::size_t>( + mod, "stateprep", "uccsd", + "Unitary Coupled Cluster Singles Doubles Ansatz. Takes as input the " + "qubits to apply the ansatz on, the rotational parameters, the number of " + "electrons in the system, and the total spin (the number of unpaired " + "electrons)."); + cudaq::python::addDeviceKernelInterop, double, std::size_t, + std::size_t>( + mod, "stateprep", "single_excitation", + "Perform a single fermionic excitation."); + cudaq::python::addDeviceKernelInterop, double, std::size_t, + std::size_t, std::size_t, std::size_t>( + mod, "stateprep", "double_excitation", + "Perform a double fermionic excitation."); + auto stateprep = mod.attr("stateprep").cast(); + stateprep.def("get_num_uccsd_parameters", + &cudaq::solvers::stateprep::get_num_uccsd_parameters, + py::arg("num_electrons"), py::arg("num_qubits"), + py::arg("spin") = 0, + "Calculate the number of UCCSD parameters\n\n" + "Args:\n" + " num_electrons (int): Number of electrons\n" + " num_qubits (int): Number of qubits\n" + " spin (int): Spin value. Optional, defaults to 0.\n\n" + "Returns:\n" + " int: Number of UCCSD parameters"); + stateprep.def("get_uccsd_excitations", + &cudaq::solvers::stateprep::get_uccsd_excitations, ""); +} + +// Helper function to convert tensor to numpy array +template > +py::array_t tensor_to_numpy(const cudaqx::tensor &tensor_data) { + // Get the dimensions of the tensor + const auto &shape = tensor_data.shape(); + + // Create numpy array with appropriate shape + py::array_t numpy_array(shape); + + // Get raw pointer to numpy array data + auto buf = numpy_array.request(); + T *ptr = static_cast(buf.ptr); + + // Copy data from tensor to numpy array + std::copy(tensor_data.data(), tensor_data.data() + tensor_data.size(), ptr); + + return numpy_array; +} + +void bindOperators(py::module &mod) { + + mod.def( + "jordan_wigner", + [](py::buffer hpq, py::buffer hpqrs, double core_energy = 0.0) { + auto hpqInfo = hpq.request(); + auto hpqrsInfo = hpqrs.request(); + auto *hpqData = reinterpret_cast *>(hpqInfo.ptr); + auto *hpqrsData = + reinterpret_cast *>(hpqrsInfo.ptr); + + cudaqx::tensor hpqT, hpqrsT; + hpqT.borrow(hpqData, {hpqInfo.shape.begin(), hpqInfo.shape.end()}); + hpqrsT.borrow(hpqrsData, + {hpqrsInfo.shape.begin(), hpqrsInfo.shape.end()}); + + return fermion_compiler::get("jordan_wigner") + ->generate(core_energy, hpqT, hpqrsT); + }, + py::arg("hpq"), py::arg("hpqrs"), py::arg("core_energy") = 0.0, + R"#( +Perform the Jordan-Wigner transformation on fermionic operators. + +This function applies the Jordan-Wigner transformation to convert fermionic operators +(represented by one- and two-body integrals) into qubit operators. + +Parameters: +----------- +hpq : numpy.ndarray + A 2D complex numpy array representing the one-body integrals. + Shape should be (N, N) where N is the number of spin molecular orbitals. +hpqrs : numpy.ndarray + A 4D complex numpy array representing the two-body integrals. + Shape should be (N, N, N, N) where N is the number of spin molecular orbitals. +core_energy : float, optional + The core energy of the system when using active space Hamiltonian, nuclear energy otherwise. Default is 0.0. + +Returns: +-------- +cudaq.SpinOperator + A qubit operator (spin operator) resulting from the Jordan-Wigner transformation. + +Raises: +------- +ValueError + If the input arrays have incorrect shapes or types. +RuntimeError + If the Jordan-Wigner transformation fails for any reason. + +Examples: +--------- +>>> import numpy as np +>>> h1 = np.array([[0, 1], [1, 0]], dtype=np.complex128) +>>> h2 = np.zeros((2, 2, 2, 2), dtype=np.complex128) +>>> h2[0, 1, 1, 0] = h2[1, 0, 0, 1] = 0.5 +>>> qubit_op = jordan_wigner(h1, h2, core_energy=0.1) + +Notes: +------ +- The input arrays `hpq` and `hpqrs` must be contiguous and in row-major order. +- This function uses the "jordan_wigner" fermion compiler internally to perform + the transformation. +- The resulting qubit operator can be used directly in quantum algorithms or + further manipulated using CUDA Quantum operations. +)#"); + + mod.def( + "jordan_wigner", + [](py::buffer buffer, double core_energy = 0.0) { + auto info = buffer.request(); + auto *data = reinterpret_cast *>(info.ptr); + std::size_t size = 1; + for (auto &s : info.shape) + size *= s; + std::vector> vec(data, data + size); + if (info.shape.size() == 2) { + std::size_t dim = info.shape[0]; + cudaqx::tensor hpq, hpqrs({dim, dim, dim, dim}); + hpq.borrow(data, {info.shape.begin(), info.shape.end()}); + return fermion_compiler::get("jordan_wigner") + ->generate(core_energy, hpq, hpqrs); + } + + std::size_t dim = info.shape[0]; + cudaqx::tensor hpq({dim, dim}), hpqrs; + hpqrs.borrow(data, {info.shape.begin(), info.shape.end()}); + return fermion_compiler::get("jordan_wigner") + ->generate(core_energy, hpq, hpqrs); + }, + py::arg("hpq"), py::arg("core_energy") = 0.0, + R"#( +Perform the Jordan-Wigner transformation on fermionic operators. + +This function applies the Jordan-Wigner transformation to convert fermionic operators +(represented by either one-body or two-body integrals) into qubit operators. + +Parameters: +----------- +hpq : numpy.ndarray + A complex numpy array representing either: + - One-body integrals: 2D array with shape (N, N) + - Two-body integrals: 4D array with shape (N, N, N, N) + where N is the number of orbitals. +core_energy : float, optional + The core energy of the system. Default is 0.0. + +Returns: +-------- +cudaq.SpinOperator + A qubit operator (spin operator) resulting from the Jordan-Wigner transformation. + +Raises: +------- +ValueError + If the input array has an incorrect shape or type. +RuntimeError + If the Jordan-Wigner transformation fails for any reason. + +Examples: +--------- +>>> import numpy as np +>>> # One-body integrals +>>> h1 = np.array([[0, 1], [1, 0]], dtype=np.complex128) +>>> qubit_op1 = jordan_wigner(h1, core_energy=0.1) + +>>> # Two-body integrals +>>> h2 = np.zeros((2, 2, 2, 2), dtype=np.complex128) +>>> h2[0, 1, 1, 0] = h2[1, 0, 0, 1] = 0.5 +>>> qubit_op2 = jordan_wigner(h2) + +Notes: +------ +- The input array must be contiguous and in row-major order. +- This function automatically detects whether the input represents one-body or + two-body integrals based on its shape. +- For one-body integrals input, a zero-initialized two-body tensor is used internally. +- For two-body integrals input, a zero-initialized one-body tensor is used internally. +- This function uses the "jordan_wigner" fermion compiler internally to perform + the transformation. +- The resulting qubit operator can be used directly in quantum algorithms or + further manipulated using CUDA Quantum operations. +)#"); + + py::class_(mod, "MolecularHamiltonian") + .def_readonly("energies", &molecular_hamiltonian::energies, + R"#( + Dictionary of energies from classical computation. + )#") + .def_readonly("hamiltonian", &molecular_hamiltonian::hamiltonian, + R"#( + :class:`cudaq.SpinOperator`: The qubit representation of the molecular Hamiltonian. + + This is the full electronic Hamiltonian of the molecule, transformed into + qubit operators using a specific mapping (e.g., Jordan-Wigner). + )#") + .def_readonly("n_electrons", &molecular_hamiltonian::n_electrons, + R"#( + int: The number of electrons in the molecule. + + This represents the total number of electrons in the molecular system, + which is crucial for determining the filling of orbitals and the overall + electronic structure. + )#") + .def_readonly("n_orbitals", &molecular_hamiltonian::n_orbitals, + R"#( + int: The number of molecular orbitals. + + This is the total number of molecular orbitals considered in the + calculation, which determines the size of the Hamiltonian and the + complexity of the quantum simulation. + )#") + .def_property_readonly( + "hpq", + [](const molecular_hamiltonian &self) { + return tensor_to_numpy(self.hpq); + }, + R"#( + numpy.ndarray: One-electron integrals. + + A 2D complex array of shape (n_orbitals, n_orbitals), where n_orbitals is the + number of spin molecular orbitals, representing + the one-electron integrals in the molecular orbital basis. These + include kinetic energy and electron-nuclear attraction terms. + )#") + .def_property_readonly( + "hpqrs", + [](const molecular_hamiltonian &self) { + return tensor_to_numpy(self.hpqrs); + }, + R"#( + numpy.ndarray: Two-electron integrals. + + A 4D complex array of shape (n_orbitals, n_orbitals, n_orbitals, n_orbitals), + where n_orbitals is the number of spin molecular orbitals, + representing the two-electron integrals in the molecular orbital basis. + These describe electron-electron interactions. + )#"); + + auto creator = [](molecular_geometry &molGeom, const std::string basis, + int spin, int charge, py::kwargs options) { + molecule_options inOptions; + inOptions.type = getValueOr(options, "type", "gas_phase"); + std::optional nele_cas = + getValueOr(options, "nele_cas", -1); + inOptions.nele_cas = nele_cas == -1 ? std::nullopt : nele_cas; + std::optional norb_cas = + getValueOr(options, "norb_cas", -1); + inOptions.norb_cas = norb_cas == -1 ? std::nullopt : norb_cas; + inOptions.symmetry = getValueOr(options, "symmetry", false); + inOptions.memory = getValueOr(options, "memory", 4000.); + inOptions.cycles = getValueOr(options, "cycles", 100); + inOptions.initguess = + getValueOr(options, "initguess", "minao"); + inOptions.UR = getValueOr(options, "UR", false); + inOptions.MP2 = getValueOr(options, "MP2", false); + inOptions.natorb = getValueOr(options, "natorb", false); + inOptions.casci = getValueOr(options, "casci", false); + inOptions.ccsd = getValueOr(options, "ccsd", false); + inOptions.casscf = getValueOr(options, "casscf", false); + inOptions.integrals_natorb = + getValueOr(options, "integrals_natorb", false); + inOptions.integrals_casscf = + getValueOr(options, "integrals_casscf", false); + inOptions.verbose = getValueOr(options, "verbose", false); + + if (inOptions.verbose) + inOptions.dump(); + return create_molecule(molGeom, basis, spin, charge, inOptions); + }; + + mod.def( + "create_molecule", + [&](py::list geometry, const std::string basis, int spin, int charge, + py::kwargs options) { + std::vector atoms; + for (auto el : geometry) { + if (!py::isinstance(el)) + throw std::runtime_error( + "geometry must be a list of tuples ('NAME', (X, Y, Z))"); + auto casted = el.cast(); + if (!py::isinstance(casted[1])) + throw std::runtime_error( + "geometry must be a list of tuples ('NAME', (X, Y, Z))"); + + auto name = casted[0].cast(); + auto coords = casted[1].cast(); + atoms.push_back( + atom{name, + {coords[0].cast(), coords[1].cast(), + coords[2].cast()}}); + } + molecular_geometry molGeom(atoms); + + return creator(molGeom, basis, spin, charge, options); + }, + py::arg("geometry"), py::arg("basis"), py::arg("spin"), py::arg("charge"), + R"#(Create a molecular hamiltonian from an XYZ file and additional parameters. + +This function generates a molecular hamiltonian based on the geometry specified in an XYZ file +and additional quantum chemical parameters. + +Parameters: +----------- +geometry : list of tuples + List of tuples representing the molecular geometry. Each tuple should be in the format + ('ELEMENT', (X, Y, Z)), where 'ELEMENT' is the element symbol and X, Y, Z are coordinates. +basis : str + The basis set to be used for the molecular calculation (e.g., "sto-3g", "6-31g"). +spin : int + The spin multiplicity of the molecule (2S + 1, where S is the total spin). +charge : int + The total charge of the molecule. +options : dict + Additional keyword arguments for customizing the molecular model creation. + These may include method-specific parameters or computational settings. + +Returns: +-------- +object + A molecular hamiltonian encoding the spin_op, one and two body overlap integrals, and energies relevant for the model. + +Raises: +------- +RuntimeError + If the molecular model creation fails for any other reason. + + +)#"); + + mod.def( + "create_molecule", + [&](const std::string &xyz_file, const std::string basis, int spin, + int charge, py::kwargs options) { + auto geom = molecular_geometry::from_xyz(xyz_file); + return creator(geom, basis, spin, charge, options); + }, + py::arg("xyz_file"), py::arg("basis"), py::arg("spin"), py::arg("charge"), + R"#(Create a molecular hamiltonian from an XYZ file and additional parameters.)#"); + + mod.def( + "get_operator_pool", + [](const std::string &name, py::kwargs config) { + heterogeneous_map asCpp; + for (auto &[k, v] : config) { + std::string asStr = k.cast(); + if (py::isinstance(v)) + asCpp.insert(asStr, v.cast()); + if (py::isinstance(v)) + asCpp.insert(asStr, v.cast>()); + } + return operator_pool::get(name)->generate(asCpp); + }, + R"#(Get and generate an operator pool based on the specified name and configuration. + +This function retrieves an operator pool implementation by name and generates +a set of operators using the provided configuration. + +Parameters: +----------- +name : str + The name of the operator pool implementation to use. +config : dict + Keyword arguments representing the configuration for operator pool generation. + Supported value types: + - int: Converted to std::size_t in C++. + - list: Converted to std::vector in C++. + +Returns: +-------- +list + A list of generated operators (:class:`cudaq.SpinOperator` objects). + +Raises: +------- +RuntimeError + If the specified operator pool implementation is not found. +TypeError + If an unsupported configuration value type is provided. + +Examples: +--------- +>>> ops = get_operator_pool("uccsd", n_qubits=4, n_electrons=2) +>>> ops = get_operator_pool("custom_pool", cutoff=1e-5, parameters=[0.1, 0.2, 0.3]) + +Notes: +------ +The function internally converts Python types to C++ types and uses the +cudaq::operator_pool extension point system to retrieve and generate the +operator pool. Only integer and list configuration values are currently supported. +)#"); +} + +void bindSolvers(py::module &mod) { + + addStatePrepKernels(mod); + + auto solvers = mod; //.def_submodule("solvers"); + bindOperators(solvers); + + py::enum_( + solvers, "ObserveExecutionType", + R"#(An enumeration representing different types of execution in an optimization process. + +This enum defines the various types of operations that can occur during an +optimization iteration, specifically distinguishing between function evaluations +and gradient computations. + +Usage: +------ +This enum is typically used in conjunction with optimization algorithms and +observation mechanisms to indicate the nature of a particular step or evaluation +in the optimization process. + +Examples: +--------- +>>> def callback(iteration): +... if iteration.type == ObserveExecutionType.function: +... print("Function evaluation") +... elif iteration.type == ObserveExecutionType.gradient: +... print("Gradient computation") + +>>> # In an optimization loop +>>> for step in optimization_steps: +... if step.type == ObserveExecutionType.function: +... # Process function evaluation +... elif step.type == ObserveExecutionType.gradient: +... # Process gradient information + +Notes: +------ +- The distinction between function evaluations and gradient computations is + particularly important for gradient-based optimization methods. +- Some optimization algorithms may only use function evaluations (gradient-free methods), + while others rely heavily on gradient information. +- This enum can be used for logging, debugging, or implementing custom behaviors + based on the type of operation being performed during optimization. +)#") + .value( + "function", observe_execution_type::function, + R"#(Represents a standard function evaluation of the objective function. + +This typically involves computing the value of the objective function +at a given point in the parameter space.)#") + .value("gradient", observe_execution_type::gradient, + R"#(Represents a gradient computation. + +This involves calculating the partial derivatives of the objective)#"); + + py::class_( + solvers, "ObserveIteration", + R"#(A class representing a single iteration of an optimization process. + +This class encapsulates the state of an optimization iteration, including +the current parameter values, the result of the objective function evaluation, +and the type of iteration)#") + .def_readonly( + "parameters", &observe_iteration::parameters, + R"#(The current values of the optimization parameters at this iteration. +These represent the point in the parameter space being evaluated.)#") + .def_readonly( + "result", &observe_iteration::result, + R"#(The value of the objective function evaluated at the current parameters. +For minimization problems, lower values indicate better solutions.)#") + .def_readonly( + "type", &observe_iteration::type, + R"#(A string indicating the type or purpose of this iteration. Common types might include: +- 'function': A standard function evaluation +- 'gradient': An iteration where gradients were computed +The exact set of possible types may depend on the specific optimization algorithm used.)#"); + + solvers.def( + "vqe", + [](const py::function &kernel, cudaq::spin_op op, + std::vector initial_parameters, py::kwargs options) { + heterogeneous_map optOptions; + optOptions.insert("shots", + cudaqx::getValueOr(options, "shots", -1)); + if (options.contains("max_iterations")) + optOptions.insert( + "max_iterations", + cudaqx::getValueOr(options, "max_iterations", -1)); + + optOptions.insert("verbose", + cudaqx::getValueOr(options, "verbose", false)); + + // Handle the case where the user has provided a SciPy optimizer + if (options.contains("optimizer") && + py::isinstance(options["optimizer"])) { + auto func = options["optimizer"].cast(); + if (func.attr("__name__").cast() != "minimize") + throw std::runtime_error( + "Invalid functional optimizer provided (only " + "scipy.optimize.minimize supported)."); + PythonOptimizer opt(func, options, initial_parameters); + auto result = + cudaq::solvers::vqe([&](std::vector x) { kernel(x); }, op, + opt, initial_parameters, optOptions); + return py::make_tuple(result.energy, result.optimal_parameters, + result.iteration_data); + } + + auto optimizerName = + cudaqx::getValueOr(options, "optimizer", "cobyla"); + auto optimizer = cudaq::optim::optimizer::get(optimizerName); + auto kernelWrapper = [&](std::vector x) { kernel(x); }; + + if (!optimizer->requiresGradients()) { + auto result = cudaq::solvers::vqe(kernelWrapper, op, *optimizer, + initial_parameters, optOptions); + return py::make_tuple(result.energy, result.optimal_parameters, + result.iteration_data); + } + + auto gradientName = cudaqx::getValueOr(options, "gradient", + "parameter_shift"); + auto gradient = + cudaq::observe_gradient::get(gradientName, kernelWrapper, op); + + auto result = cudaq::solvers::vqe(kernelWrapper, op, *optimizer.get(), + *gradient.get(), initial_parameters, + optOptions); + return py::make_tuple(result.energy, result.optimal_parameters, + result.iteration_data); + }, + py::arg("kernel"), py::arg("spin_op"), py::arg("initial_parameters"), R"#( +Execute the Variational Quantum Eigensolver (VQE) algorithm. + +This function implements the VQE algorithm, a hybrid quantum-classical algorithm +used to find the ground state energy of a given Hamiltonian using a parameterized +quantum circuit. + +Parameters: +----------- +kernel : callable + A function representing the parameterized quantum circuit (ansatz). + It should take a list of parameters as input and prepare the quantum state. + +spin_op : cudaq.SpinOperator + The Hamiltonian operator for which to find the ground state energy. + +initial_parameters : List[float] + Initial values for the variational parameters of the quantum circuit. + +options : dict + Additional options for the VQE algorithm. Supported options include: + - shots : int, optional, Number of measurement shots. Default is -1 (use maximum available). + - max_iterations : int, optional Maximum number of optimization iterations. Default is -1 (no limit). + - verbose : bool, optional Whether to print verbose output. Default is False. + - optimizer : str, optional Name of the classical optimizer to use. Default is 'cobyla'. + - gradient : str, optional Method for gradient computation (for gradient-based optimizers). Default is 'parameter_shift'. + +Returns: +-------- +Tuple[float, List[float], List[ObserveIteration]] + A tuple containing: + 1. The optimized ground state energy. + 2. The optimal variational parameters. + 3. A list of ObserveIteration objects containing data from each iteration. + +Raises: +------- +RuntimeError + If an invalid optimizer or gradient method is specified. + +Examples: +--------- +>>> def ansatz(params): +... # Define your quantum circuit here +... pass +>>> hamiltonian = cudaq.SpinOperator(...) # Define your Hamiltonian +>>> initial_params = [0.1, 0.2, 0.3] +>>> energy, opt_params, iterations = vqe(ansatz, hamiltonian, initial_params, +... optimizer='cobyla', shots=1000) +>>> print(f"Ground state energy: {energy}") +>>> print(f"Optimal parameters: {opt_params}") + +Notes: +------ +- The function automatically selects between gradient-free and gradient-based + optimization based on the chosen optimizer. +- For gradient-based optimization, the 'parameter_shift' method is used by default, + but can be changed using the 'gradient' option. +- The ObserveIteration objects in the returned list contain detailed information + about each optimization step, useful for analysis and visualization. +- The performance of VQE heavily depends on the choice of ansatz, initial parameters, + and optimization method. + +)#"); + + solvers.def( + "adapt_vqe", + [](py::object initialStateKernel, cudaq::spin_op op, + const std::vector &pool, py::kwargs options) { + cudaq::python::CppPyKernelDecorator initialStateKernelWrapper( + initialStateKernel); + initialStateKernelWrapper.compile(); + auto baseName = initialStateKernel.attr("name").cast(); + std::string kernelName = "__nvqpp__mlirgen__" + baseName; + auto fptr = + initialStateKernelWrapper + .extract_c_function_pointer &>(kernelName); + auto *p = reinterpret_cast(fptr); + cudaq::registry::__cudaq_registerLinkableKernel(p, baseName.c_str(), p); + heterogeneous_map optOptions; + optOptions.insert("verbose", + getValueOr(options, "verbose", false)); + + // Handle the case where the user has provided a SciPy optimizer + if (options.contains("optimizer") && + py::isinstance(options["optimizer"])) { + auto func = options["optimizer"].cast(); + if (func.attr("__name__").cast() != "minimize") + throw std::runtime_error( + "Invalid functional optimizer provided (only " + "scipy.optimize.minimize supported)."); + PythonOptimizer opt(func, options); + return cudaq::solvers::adapt_vqe(fptr, op, pool, opt, optOptions); + } + + auto optimizerName = + cudaqx::getValueOr(options, "optimizer", "cobyla"); + auto optimizer = cudaq::optim::optimizer::get(optimizerName); + auto gradName = + cudaqx::getValueOr(options, "gradient", ""); + + // FIXME Convert options from kwargs + return cudaq::solvers::adapt_vqe(fptr, op, pool, *optimizer, gradName, + optOptions); + }, + R"( + Perform ADAPT-VQE (Adaptive Derivative-Assembled Pseudo-Trotter Variational Quantum Eigensolver) optimization. + + Args: + initialStateKernel (object): Python object representing the initial state kernel. + op (cudaq.SpinOperator): The Hamiltonian operator to be optimized. + pool (list of cudaq.SpinOperator): Pool of operators for ADAPT-VQE. + options: Additional options for the optimization process. + + Keyword Args: + optimizer (str): Optional name of the optimizer to use. Defaults to cobyla. + gradient (str): Optional name of the gradient method to use. Defaults to empty. + + Returns: + The result of the ADAPT-VQE optimization. + + Note: + This function wraps the C++ implementation of ADAPT-VQE in CUDA-QX. + It compiles and registers the initial state kernel, sets up the optimizer, + and performs the ADAPT-VQE optimization using the provided parameters. + )"); + + // Bind the qaoa_result struct + py::class_( + solvers, "QAOAResult", + "The QAOAResult encodes the optimal value, optimal parameters, and final " + "sampled state as a cudaq.SampleResult.") + .def(py::init<>()) + .def_readwrite("optimal_value", + &cudaq::solvers::qaoa_result::optimal_value) + .def_readwrite("optimal_parameters", + &cudaq::solvers::qaoa_result::optimal_parameters) + .def_readwrite("optimal_config", + &cudaq::solvers::qaoa_result::optimal_config) + // Add tuple interface + .def("__len__", [](const cudaq::solvers::qaoa_result &) { return 3; }) + .def("__getitem__", + [](const cudaq::solvers::qaoa_result &r, size_t i) { + switch (i) { + case 0: + return py::cast(r.optimal_value); + case 1: + return py::cast(r.optimal_parameters); + case 2: + return py::cast(r.optimal_config); + default: + throw py::index_error(); + } + }) + // Enable iteration protocol + .def("__iter__", [](const cudaq::solvers::qaoa_result &r) -> py::object { + return py::iter(py::make_tuple(r.optimal_value, r.optimal_parameters, + r.optimal_config)); + }); + + // Bind QAOA functions using lambdas + solvers.def( + "qaoa", + [](const cudaq::spin_op &problemHamiltonian, + const cudaq::spin_op &referenceHamiltonian, std::size_t numLayers, + const std::vector &initialParameters, py::kwargs options) { + if (initialParameters.empty()) + throw std::runtime_error("qaoa initial parameters empty."); + // Handle the case where the user has provided a SciPy optimizer + if (options.contains("optimizer") && + py::isinstance(options["optimizer"])) { + auto func = options["optimizer"].cast(); + if (func.attr("__name__").cast() != "minimize") + throw std::runtime_error( + "Invalid functional optimizer provided (only " + "scipy.optimize.minimize supported)."); + PythonOptimizer opt(func, options); + return cudaq::solvers::qaoa(problemHamiltonian, opt, numLayers, + initialParameters, + hetMapFromKwargs(options)); + } + + auto optimizerName = + cudaqx::getValueOr(options, "optimizer", "cobyla"); + auto optimizer = cudaq::optim::optimizer::get(optimizerName); + auto gradName = + cudaqx::getValueOr(options, "gradient", ""); + + return cudaq::solvers::qaoa(problemHamiltonian, referenceHamiltonian, + *optimizer, numLayers, initialParameters, + hetMapFromKwargs(options)); + }, + py::arg("problemHamiltonian"), py::arg("referenceHamiltonian"), + py::arg("numLayers"), py::arg("initialParameters")); + + solvers.def( + "qaoa", + [](const cudaq::spin_op &problemHamiltonian, std::size_t numLayers, + const std::vector &initialParameters, py::kwargs options) { + if (initialParameters.empty()) + throw std::runtime_error("qaoa initial parameters empty."); + // Handle the case where the user has provided a SciPy optimizer + if (options.contains("optimizer") && + py::isinstance(options["optimizer"])) { + auto func = options["optimizer"].cast(); + if (func.attr("__name__").cast() != "minimize") + throw std::runtime_error( + "Invalid functional optimizer provided (only " + "scipy.optimize.minimize supported)."); + PythonOptimizer opt(func, options); + return cudaq::solvers::qaoa(problemHamiltonian, opt, numLayers, + initialParameters, + hetMapFromKwargs(options)); + } + + auto optimizerName = + cudaqx::getValueOr(options, "optimizer", "cobyla"); + auto optimizer = cudaq::optim::optimizer::get(optimizerName); + auto gradName = + cudaqx::getValueOr(options, "gradient", ""); + return cudaq::solvers::qaoa(problemHamiltonian, *optimizer, numLayers, + initialParameters, + hetMapFromKwargs(options)); + }, + py::arg("problemHamiltonian"), py::arg("numLayers"), + py::arg("initialParameters")); + + solvers.def( + "get_num_qaoa_parameters", + [](const cudaq::spin_op &problemHamiltonian, std::size_t numLayers, + py::kwargs options) { + return cudaq::solvers::get_num_qaoa_parameters( + problemHamiltonian, numLayers, hetMapFromKwargs(options)); + }, + "Return the number of required QAOA rotation parameters."); + + solvers.def( + "get_maxcut_hamiltonian", + [](py::object nx_graph) { + // Convert NetworkX graph to our internal representation + cudaqx::graph g = convert_networkx_graph(nx_graph); + + // Generate and return the Hamiltonian + return cudaq::solvers::get_maxcut_hamiltonian(g); + }, + "Generate MaxCut Hamiltonian from a NetworkX graph", py::arg("graph")); + + solvers.def( + "get_clique_hamiltonian", + [](py::object nx_graph, double penalty = 4.0) { + // Convert NetworkX graph to our internal representation + cudaqx::graph g = convert_networkx_graph(nx_graph); + + // Generate and return the Hamiltonian + return cudaq::solvers::get_clique_hamiltonian(g, penalty); + }, + "Generate Clique Hamiltonian from a NetworkX graph", py::arg("graph"), + py::arg("penalty") = 4.0); +} + +} // namespace cudaq::solvers diff --git a/libs/solvers/python/bindings/solvers/py_solvers.h b/libs/solvers/python/bindings/solvers/py_solvers.h new file mode 100644 index 0000000..37fb03a --- /dev/null +++ b/libs/solvers/python/bindings/solvers/py_solvers.h @@ -0,0 +1,15 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include + +namespace py = pybind11; + +namespace cudaq::solvers { +void bindSolvers(py::module &mod); +} // namespace cudaq::solvers diff --git a/libs/solvers/python/bindings/utils/kwargs_utils.h b/libs/solvers/python/bindings/utils/kwargs_utils.h new file mode 100644 index 0000000..7e42cbb --- /dev/null +++ b/libs/solvers/python/bindings/utils/kwargs_utils.h @@ -0,0 +1,53 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cuda-qx/core/heterogeneous_map.h" +#include "pybind11/pybind11.h" + +namespace py = pybind11; + +namespace cudaqx { + +/// @brief Return the value of given type corresponding to the provided +/// key string from the provided options `kwargs` `dict`. Return the `orVal` +/// if the key is not in the `dict`. +template +T getValueOr(py::kwargs &options, const std::string &key, const T &orVal) { + if (options.contains(key)) + for (auto item : options) + if (item.first.cast() == key) + return item.second.cast(); + + return orVal; +} + +inline heterogeneous_map hetMapFromKwargs(const py::kwargs &kwargs) { + cudaqx::heterogeneous_map result; + + for (const auto &item : kwargs) { + std::string key = py::cast(item.first); + auto value = item.second; + + if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else if (py::isinstance(value)) { + result.insert(key, value.cast()); + } else { + throw std::runtime_error( + "Invalid python type for mapping kwargs to a heterogeneous_map."); + } + } + + return result; +} +} // namespace cudaqx diff --git a/libs/solvers/python/bindings/utils/type_casters.h b/libs/solvers/python/bindings/utils/type_casters.h new file mode 100644 index 0000000..e4ebde4 --- /dev/null +++ b/libs/solvers/python/bindings/utils/type_casters.h @@ -0,0 +1,82 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "common/ObserveResult.h" +#include "pybind11/pybind11.h" +#include "pybind11/pytypes.h" +namespace py = pybind11; + +namespace pybind11 { +namespace detail { +template <> +struct type_caster { + PYBIND11_TYPE_CASTER(cudaq::spin_op, const_name("SpinOperator")); + + bool load(handle src, bool) { + if (!src) + return false; + auto data = src.attr("serialize")().cast>(); + auto numQubits = src.attr("get_qubit_count")().cast(); + value = cudaq::spin_op(data, numQubits); + return true; + } + + static handle cast(cudaq::spin_op v, return_value_policy /*policy*/, + handle /*parent*/) { + py::object tv_py = py::module::import("cudaq").attr("SpinOperator")( + v.getDataRepresentation(), v.num_qubits()); // Construct new python obj + return tv_py.release(); + } +}; + +template <> +struct type_caster { + PYBIND11_TYPE_CASTER(cudaq::sample_result, const_name("SampleResult")); + + bool load(handle src, bool) { + if (!src) + return false; + + auto data = src.attr("serialize")().cast>(); + value = cudaq::sample_result(); + value.deserialize(data); + return true; + } + + static handle cast(cudaq::sample_result v, return_value_policy /*policy*/, + handle /*parent*/) { + py::object tv_py = py::module::import("cudaq").attr("SampleResult")(); + tv_py.attr("deserialize")(v.serialize()); + return tv_py.release(); + } +}; + +template <> +struct type_caster { + PYBIND11_TYPE_CASTER(cudaq::observe_result, const_name("ObserveResult")); + + bool load(handle src, bool) { + if (!src) + return false; + + auto e = src.attr("expectation")().cast(); + value = cudaq::observe_result(e, cudaq::spin_op()); + // etc. + return true; + } + + static handle cast(cudaq::observe_result v, return_value_policy /*policy*/, + handle /*parent*/) { + py::object tv_py = py::module::import("cudaq").attr("ObserveResult")( + v.expectation(), v.get_spin(), v.raw_data()); + return tv_py.release(); + } +}; +} // namespace detail +} // namespace pybind11 diff --git a/libs/solvers/python/cudaq_solvers/__init__.py b/libs/solvers/python/cudaq_solvers/__init__.py new file mode 100644 index 0000000..21ae53c --- /dev/null +++ b/libs/solvers/python/cudaq_solvers/__init__.py @@ -0,0 +1,10 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +from ._pycudaqx_solvers_the_suffix_matters_cudaq_solvers import * + diff --git a/libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/generators/gas_phase_generator.py b/libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/generators/gas_phase_generator.py new file mode 100644 index 0000000..6974097 --- /dev/null +++ b/libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/generators/gas_phase_generator.py @@ -0,0 +1,670 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +from ..hamiltonian_generator import HamiltonianGenerator +import numpy as np +import json +from functools import reduce +try: + from pyscf import gto, scf, cc, ao2mo, mp, mcscf, solvent, fci +except RuntimeError: + print( + 'PySCF should be installed to use cudaq-pyscf tool. Use pip install pyscf' + ) + + +class GasPhaseGenerator(HamiltonianGenerator): + + def name(self): + return 'gas_phase' + + def generate_molecular_spin_ham_restricted(self, h1e, h2e, ecore): + + # This function generates the molecular spin Hamiltonian + # H= E_core+sum_{pq} h_{pq} a_p^dagger a_q + + # 0.5 * h_{pqrs} a_p^dagger a_q^dagger a_r a_s + # h1e: one body integrals h_{pq} + # h2e: two body integrals h_{pqrs} + # ecore: constant (nuclear repulsion or core energy in the active space Hamiltonian) + + # Total number of qubits equals the number of spin molecular orbitals + nqubits = 2 * h1e.shape[0] + + # Initialization + one_body_coeff = np.zeros((nqubits, nqubits)) + two_body_coeff = np.zeros((nqubits, nqubits, nqubits, nqubits)) + + for p in range(nqubits // 2): + for q in range(nqubits // 2): + + # p & q have the same spin = =1 + # ==0 (orthogonal) + one_body_coeff[2 * p, 2 * q] = h1e[p, q] + one_body_coeff[2 * p + 1, 2 * q + 1] = h1e[p, q] + + for r in range(nqubits // 2): + for s in range(nqubits // 2): + + # Same spin (aaaa, bbbbb) , + two_body_coeff[2 * p, 2 * q, 2 * r, + 2 * s] = 0.5 * h2e[p, q, r, s] + two_body_coeff[2 * p + 1, 2 * q + 1, 2 * r + 1, + 2 * s + 1] = 0.5 * h2e[p, q, r, s] + + # Mixed spin(abab, baba) , + #= 0 (orthogoanl) + two_body_coeff[2 * p, 2 * q + 1, 2 * r + 1, + 2 * s] = 0.5 * h2e[p, q, r, s] + two_body_coeff[2 * p + 1, 2 * q, 2 * r, + 2 * s + 1] = 0.5 * h2e[p, q, r, s] + + return one_body_coeff, two_body_coeff, ecore + + def generate_molecular_spin_ham_ur(self, h1e, h2e, h2e_prime, ecore): + + # This function generates the molecular spin Hamiltonian + # H= E_core+sum_{pq} h_{pq} a_p^dagger a_q + + # 0.5 * h_{pqrs} a_p^dagger a_q^dagger a_r a_s + # h1e: one body integrals h_{pq} + # h2e: two body integrals h_{pqrs} + # ecore: constant (nuclear repulsion or core energy in the active space Hamiltonian) + + # Total number of qubits equals the number of spin molecular orbitals + nqubits = 2 * h1e[0].shape[0] + + # Initialization + one_body_coeff = np.zeros((nqubits, nqubits)) + two_body_coeff = np.zeros((nqubits, nqubits, nqubits, nqubits)) + + for p in range(nqubits // 2): + for q in range(nqubits // 2): + + # p & q have the same spin , + one_body_coeff[2 * p, 2 * q] = h1e[0, p, q] + one_body_coeff[2 * p + 1, 2 * q + 1] = h1e[1, p, q] + + for r in range(nqubits // 2): + for s in range(nqubits // 2): + + # Same spin (aaaa, bbbbb) , + two_body_coeff[2 * p, 2 * q, 2 * r, + 2 * s] = 0.5 * h2e[0, p, q, r, s] + two_body_coeff[2 * p + 1, 2 * q + 1, 2 * r + 1, + 2 * s + 1] = 0.5 * h2e[2, p, q, r, s] + + # Mixed spin(abba, baab) , + two_body_coeff[2 * p + 1, 2 * q, 2 * r, + 2 * s + 1] = 0.5 * h2e_prime[p, q, r, s] + two_body_coeff[2 * p, 2 * q + 1, 2 * r + 1, + 2 * s] = 0.5 * h2e[1, p, q, r, s] + + return one_body_coeff, two_body_coeff, ecore + + +#################################################################### + +# A- Without solvent +############################# +## Beginning of simulation +############################# + + def get_spin_hamiltonian(self, xyz:str, spin:int, charge: int, basis:str, symmetry:bool=False, memory:float=4000,cycles:int=100, \ + initguess:str='minao', UR:bool=False, nele_cas=None, norb_cas=None, MP2:bool=False, natorb:bool=False,\ + casci:bool=False, ccsd:bool=False, casscf:bool=False,integrals_natorb:bool=False, \ + integrals_casscf:bool=False, verbose:bool=False, cache_data=True, outFileName=None): + ################################ + # Initialize the molecule + ################################ + filename = xyz.split('.')[0] if outFileName == None else outFileName + energies = {} + + if (nele_cas is None) and (norb_cas is not None): + raise RuntimeError( + "WARN: nele_cas is None and norb_cas is not None. nele_cas and norb_cas should be either both None\ + or have values") + + if (nele_cas is not None) and (norb_cas is None): + raise RuntimeError( + "WARN: nele_cas is not None and norb_cas is None. nele_cas and norb_cas should be either both None\ + or have values") + + ######################################################################## + # To add (coming soon) + + if UR and nele_cas is None: + raise RuntimeError( + "WARN: Unrestricted spin calculation for the full space is not supported yet on Cudaq.\ + Only active space is currently supported for the unrestricted spin calculations." + ) + + mol = gto.M(atom=xyz, + spin=spin, + charge=charge, + basis=basis, + max_memory=memory, + symmetry=symmetry, + output=filename + '-pyscf.log', + verbose=4) + + ################################## + # Mean field (HF) + ################################## + + if UR: + myhf = scf.UHF(mol) + myhf.max_cycle = cycles + myhf.chkfile = filename + '-pyscf.chk' + myhf.init_guess = initguess + myhf.kernel() + + norb = myhf.mo_coeff[0].shape[1] + if verbose: + print('[pyscf] Total number of alpha molecular orbitals = ', + norb) + norb = myhf.mo_coeff[1].shape[1] + if verbose: + print('[pyscf] Total number of beta molecular orbitals = ', + norb) + + else: + myhf = scf.RHF(mol) + myhf.max_cycle = cycles + myhf.chkfile = filename + '-pyscf.chk' + myhf.init_guess = initguess + myhf.kernel() + + norb = myhf.mo_coeff.shape[1] + if verbose: + print('[pyscf] Total number of orbitals = ', norb) + + nelec = mol.nelectron + energies['hf_energy'] = myhf.e_tot + if verbose: + print('[pyscf] Total number of electrons = ', nelec) + print('[pyscf] HF energy = ', myhf.e_tot) + + ########################## + # MP2 + ########################## + if MP2: + + if UR: + mymp = mp.UMP2(myhf) + mp_ecorr, mp_t2 = mymp.kernel() + if verbose: + print('[pyscf] UR-MP2 energy= ', mymp.e_tot) + + if integrals_natorb or natorb: + # Compute natural orbitals + dma, dmb = mymp.make_rdm1() + noon_a, U_a = np.linalg.eigh(dma) + noon_b, U_b = np.linalg.eigh(dmb) + noon_a = np.flip(noon_a) + noon_b = np.flip(noon_b) + + if verbose: + print( + '[pyscf] Natural orbital (alpha orbitals) occupation number from UR-MP2: ' + ) + print(noon_a) + print( + '[pyscf] Natural orbital (beta orbitals) occupation number from UR-MP2: ' + ) + print(noon_b) + + natorbs = np.zeros(np.shape(myhf.mo_coeff)) + natorbs[0, :, :] = np.dot(myhf.mo_coeff[0], U_a) + natorbs[0, :, :] = np.fliplr(natorbs[0, :, :]) + natorbs[1, :, :] = np.dot(myhf.mo_coeff[1], U_b) + natorbs[1, :, :] = np.fliplr(natorbs[1, :, :]) + + else: + if spin != 0: + raise RuntimeError("WARN: ROMP2 is unvailable in pyscf.") + else: + mymp = mp.MP2(myhf) + mp_ecorr, mp_t2 = mymp.kernel() + if verbose: + print('[pyscf] R-MP2 energy= ', mymp.e_tot) + + if integrals_natorb or natorb: + # Compute natural orbitals + noons, natorbs = mcscf.addons.make_natural_orbitals( + mymp) + if verbose: + print( + '[pyscf] Natural orbital occupation number from R-MP2: ' + ) + print(noons) + + ####################################### + # CASCI if active space is defined + # FCI if the active space is None + ###################################### + if casci: + + if UR: + if natorb: + mycasci = mcscf.UCASCI(myhf, norb_cas, nele_cas) + mycasci.kernel(natorbs) + if verbose: + print( + '[pyscf] UR-CASCI energy using natural orbitals= ', + mycasci.e_tot) + energies['UR-CASCI'] = mycasci.e_tot + else: + mycasci_mo = mcscf.UCASCI(myhf, norb_cas, nele_cas) + mycasci_mo.kernel() + if verbose: + print( + '[pyscf] UR-CASCI energy using molecular orbitals= ', + mycasci_mo.e_tot) + energies['UR-CASCI'] = mycasci_mo.e_tot + + else: + if nele_cas is None: + myfci = fci.FCI(myhf) + result = myfci.kernel() + if verbose: + print('[pyscf] FCI energy = ', result[0]) + energies['fci_energy'] = result[0] + + else: + if natorb and (spin == 0): + mycasci = mcscf.CASCI(myhf, norb_cas, nele_cas) + mycasci.kernel(natorbs) + if verbose: + print( + '[pyscf] R-CASCI energy using natural orbitals= ', + mycasci.e_tot) + + energies['R-CASCI'] = mycasci.e_tot + + elif natorb and (spin != 0): + raise RuntimeError( + "WARN: Natural orbitals cannot be computed. ROMP2 is unvailable in pyscf." + ) + + else: + mycasci_mo = mcscf.CASCI(myhf, norb_cas, nele_cas) + mycasci_mo.kernel() + if verbose: + print( + '[pyscf] R-CASCI energy using molecular orbitals= ', + mycasci_mo.e_tot) + + energies['R-CASCI'] = mycasci_mo.e_tot + + + ######################## + # CCSD + ######################## + if ccsd: + + if UR: + + mc = mcscf.UCASCI(myhf, norb_cas, nele_cas) + frozen = [] + frozen = [y for y in range(0, mc.ncore[0])] + frozen += [ + y for y in range(mc.ncore[0] + + mc.ncas, len(myhf.mo_coeff[0])) + ] + + if natorb: + mycc = cc.UCCSD(myhf, frozen=frozen, mo_coeff=natorbs) + mycc.max_cycle = cycles + mycc.kernel() + if verbose: + print( + '[pyscf] UR-CCSD energy of the active space using natural orbitals= ', + mycc.e_tot) + + else: + mycc = cc.UCCSD(myhf, frozen=frozen) + mycc.max_cycle = cycles + mycc.kernel() + if verbose: + print( + '[pyscf] UR-CCSD energy of the active space using molecular orbitals= ', + mycc.e_tot) + + energies['UR-CCSD'] = mycc.e_tot + + + else: + if nele_cas is None: + mycc = cc.CCSD(myhf) + mycc.max_cycle = cycles + mycc.kernel() + if verbose: + print('[pyscf] Total R-CCSD energy = ', mycc.e_tot) + + else: + mc = mcscf.CASCI(myhf, norb_cas, nele_cas) + frozen = [] + frozen += [y for y in range(0, mc.ncore)] + frozen += [ + y + for y in range(mc.ncore + norb_cas, len(myhf.mo_coeff)) + ] + if natorb and (spin == 0): + mycc = cc.CCSD(myhf, frozen=frozen, mo_coeff=natorbs) + mycc.max_cycle = cycles + mycc.kernel() + if verbose: + print( + '[pyscf] R-CCSD energy of the active space using natural orbitals= ', + mycc.e_tot) + + elif natorb and (spin != 0): + raise RuntimeError( + "WARN: Natural orbitals cannot be computed. ROMP2 is unvailable in pyscf." + ) + + else: + mycc = cc.CCSD(myhf, frozen=frozen) + mycc.max_cycle = cycles + mycc.kernel() + if verbose: + print( + '[pyscf] R-CCSD energy of the active space using molecular orbitals= ', + mycc.e_tot) + + energies['R-CCSD'] = mycc.e_tot + + + ######################### + # CASSCF + ######################### + if casscf: + if nele_cas is None: + raise RuntimeError("WARN: You should define the active space.") + + if UR: + if natorb: + mycas = mcscf.UCASSCF(myhf, norb_cas, nele_cas) + mycas.max_cycle_macro = cycles + mycas.kernel(natorbs) + if verbose: + print( + '[pyscf] UR-CASSCF energy using natural orbitals= ', + mycas.e_tot) + else: + mycas = mcscf.UCASSCF(myhf, norb_cas, nele_cas) + mycas.max_cycle_macro = cycles + mycas.kernel() + if verbose: + print( + '[pyscf] UR-CASSCF energy using molecular orbitals= ', + mycas.e_tot) + + energies['UR-CASSCF'] = mycas.e_tot + + else: + + if natorb and (spin == 0): + mycas = mcscf.CASSCF(myhf, norb_cas, nele_cas) + mycas.max_cycle_macro = cycles + mycas.kernel(natorbs) + if verbose: + print( + '[pyscf] R-CASSCF energy using natural orbitals= ', + mycas.e_tot) + + elif natorb and (spin != 0): + raise RuntimeError( + "WARN: Natural orbitals cannot be computed. ROMP2 is unvailable in pyscf." + ) + + else: + mycas = mcscf.CASSCF(myhf, norb_cas, nele_cas) + mycas.max_cycle_macro = cycles + mycas.kernel() + if verbose: + print( + '[pyscf] R-CASSCF energy using molecular orbitals= ', + mycas.e_tot) + + energies['R-CASSCF'] = mycas.e_tot + + + ################################### + # CASCI: FCI of the active space + ################################## + if casci and casscf: + + if UR: + h1e_cas, ecore = mycas.get_h1eff() + h2e_cas = mycas.get_h2eff() + + e_fci, fcivec = fci.direct_uhf.kernel(h1e_cas, + h2e_cas, + norb_cas, + nele_cas, + ecore=ecore) + if verbose: + print( + '[pyscf] UR-CASCI energy using the casscf orbitals= ', + e_fci) + + energies['UR-CASCI'] = e_fci + + else: + if natorb and (spin != 0): + raise RuntimeError( + "WARN: Natural orbitals cannot be computed. ROMP2 is unavailable in pyscf." + ) + + h1e_cas, ecore = mycas.get_h1eff() + h2e_cas = mycas.get_h2eff() + + e_fci, fcivec = fci.direct_spin1.kernel(h1e_cas, + h2e_cas, + norb_cas, + nele_cas, + ecore=ecore) + if verbose: + print( + '[pyscf] R-CASCI energy using the casscf orbitals= ', + e_fci) + + energies['R-CASCI'] = e_fci + + + ################################################################################### + # Computation of one- and two- electron integrals for the active space Hamiltonian + ################################################################################### + + if UR: + if integrals_natorb: + mc = mcscf.UCASCI(myhf, norb_cas, nele_cas) + h1e, ecore = mc.get_h1eff(natorbs) + h1e_cas = np.array(h1e) + h2e = mc.get_h2eff(natorbs) + h2e_cas = np.array(h2e) + h2e_cas[0] = np.asarray(h2e_cas[0].transpose(0, 2, 3, 1), + order='C') + h2e_cas[1] = np.asarray(h2e_cas[1].transpose(0, 2, 3, 1), + order='C') + h2e_cas[2] = np.asarray(h2e_cas[2].transpose(0, 2, 3, 1), + order='C') + h2e_cas_prime = np.asarray(h2e_cas[1].transpose(2, 0, 1, 3), + order='C') + + elif integrals_casscf: + if casscf: + h1e, ecore = mycas.get_h1eff() + h1e_cas = np.array(h1e) + h2e = mycas.get_h2eff() + h2e_cas = np.array(h2e) + h2e_cas[0] = np.asarray(h2e_cas[0].transpose(0, 2, 3, 1), + order='C') + h2e_cas[1] = np.asarray(h2e_cas[1].transpose(0, 2, 3, 1), + order='C') + h2e_cas[2] = np.asarray(h2e_cas[2].transpose(0, 2, 3, 1), + order='C') + h2e_cas_prime = np.asarray(h2e_cas[1].transpose( + 2, 0, 1, 3), + order='C') + else: + raise RuntimeError( + "WARN: You need to run casscf. Use casscf=True.") + + else: + mc = mcscf.UCASCI(myhf, norb_cas, nele_cas) + h1e, ecore = mc.get_h1eff(myhf.mo_coeff) + h1e_cas = np.array(h1e) + h2e = mc.get_h2eff(myhf.mo_coeff) + h2e_cas = np.array(h2e) + h2e_cas[0] = np.asarray(h2e_cas[0].transpose(0, 2, 3, 1), + order='C') + h2e_cas[1] = np.asarray(h2e_cas[1].transpose(0, 2, 3, 1), + order='C') + h2e_cas[2] = np.asarray(h2e_cas[2].transpose(0, 2, 3, 1), + order='C') + h2e_cas_prime = np.asarray(h2e_cas[1].transpose(2, 0, 1, 3), + order='C') + + # Compute the molecular spin electronic Hamiltonian from the + # molecular electron integrals + obi, tbi, core_energy = self.generate_molecular_spin_ham_ur( + h1e_cas, h2e_cas, h2e_cas_prime, ecore) + + else: + + if nele_cas is None: + # Compute the 1e integral in atomic orbital then convert to HF basis + h1e_ao = mol.intor("int1e_kin") + mol.intor("int1e_nuc") + ## Ways to convert from ao to mo + #h1e=np.einsum('pi,pq,qj->ij', myhf.mo_coeff, h1e_ao, myhf.mo_coeff) + h1e = reduce(np.dot, (myhf.mo_coeff.T, h1e_ao, myhf.mo_coeff)) + #h1e=reduce(np.dot, (myhf.mo_coeff.conj().T, h1e_ao, myhf.mo_coeff)) + + # Compute the 2e integrals then convert to HF basis + h2e_ao = mol.intor("int2e_sph", aosym='1') + h2e = ao2mo.incore.full(h2e_ao, myhf.mo_coeff) + + # Reorder the chemist notation (pq|rs) ERI h_prqs to h_pqrs + # a_p^dagger a_r a_q^dagger a_s --> a_p^dagger a_q^dagger a_r a_s + h2e = h2e.transpose(0, 2, 3, 1) + + nuclear_repulsion = myhf.energy_nuc() + + # Compute the molecular spin electronic Hamiltonian from the + # molecular electron integrals + obi, tbi, e_nn = self.generate_molecular_spin_ham_restricted( + h1e, h2e, nuclear_repulsion) + energies['nuclear_energy'] = e_nn + + else: + + if integrals_natorb: + if spin != 0: + raise RuntimeError( + "WARN: ROMP2 is unvailable in pyscf.") + else: + mc = mcscf.CASCI(myhf, norb_cas, nele_cas) + h1e_cas, ecore = mc.get_h1eff(natorbs) + h2e_cas = mc.get_h2eff(natorbs) + h2e_cas = ao2mo.restore('1', h2e_cas, norb_cas) + h2e_cas = np.asarray(h2e_cas.transpose(0, 2, 3, 1), + order='C') + + elif integrals_casscf: + if casscf: + h1e_cas, ecore = mycas.get_h1eff() + h2e_cas = mycas.get_h2eff() + h2e_cas = ao2mo.restore('1', h2e_cas, norb_cas) + h2e_cas = np.asarray(h2e_cas.transpose(0, 2, 3, 1), + order='C') + else: + raise RuntimeError( + "WARN: You need to run casscf. Use casscf=True.") + + else: + mc = mcscf.CASCI(myhf, norb_cas, nele_cas) + h1e_cas, ecore = mc.get_h1eff(myhf.mo_coeff) + h2e_cas = mc.get_h2eff(myhf.mo_coeff) + h2e_cas = ao2mo.restore('1', h2e_cas, norb_cas) + h2e_cas = np.asarray(h2e_cas.transpose(0, 2, 3, 1), + order='C') + + # Compute the molecular spin electronic Hamiltonian from the + # molecular electron integrals + obi, tbi, core_energy = self.generate_molecular_spin_ham_restricted( + h1e_cas, h2e_cas, ecore) + energies['core_energy'] = ecore + + ###################################################### + # Dump energies / etc to a metadata file + results = { + 'num_electrons': nelec if nele_cas == None else nele_cas, + 'num_orbitals': norb if nele_cas == None else norb_cas, + 'hf_energy': myhf.e_tot, + 'energies': energies, + 'hpq': {'data':[(x.real,x.imag) for x in obi.astype(complex).flatten().tolist()]}, + 'hpqrs': {'data': [(x.real, x.imag) for x in tbi.astype(complex).flatten().tolist()]}, + 'operators': { + f'{filename}_one_body.dat': 'obi', + f'{filename}_two_body.dat': 'tbi' + } + } + + if cache_data: + with open(f'{filename}_metadata.json', 'w') as f: + json.dump(results, f) + + return results + + def generate(self, xyz, basis, **kwargs): + if xyz == None: + raise RuntimeError("no molecular geometry provided.") + + if basis == None: + raise RuntimeError("no basis provided.") + + requiredOptions = ['spin', 'charge'] + for option in requiredOptions: + if option not in kwargs or kwargs[option] == None: + raise RuntimeError( + f'solvent Hamiltonian generator missing required option - {option}' + ) + + spin = kwargs['spin'] + charge = kwargs['charge'] + symmetry = kwargs['symmetry'] if 'symmetry' in kwargs else False + memory = kwargs['memory'] if 'memory' in kwargs else 4000 + cycles = kwargs['cycles'] if 'cycles' in kwargs else 100 + initguess = kwargs['initguess'] if 'initguess' in kwargs else 'minao' + UR = kwargs['UR'] if 'UR' in kwargs else False + nele_cas = kwargs['nele_cas'] if 'nele_cas' in kwargs else None + norb_cas = kwargs['norb_cas'] if 'norb_cas' in kwargs else None + MP2 = kwargs['MP2'] if 'MP2' in kwargs else False + natorb = kwargs['natorb'] if 'natorb' in kwargs else False + casci = kwargs['casci'] if 'casci' in kwargs else False + ccsd = kwargs['ccsd'] if 'ccsd' in kwargs else False + casscf = kwargs['casscf'] if 'casscf' in kwargs else False + integrals_natorb = kwargs[ + 'integrals_natorb'] if 'integrals_natorb' in kwargs else False + integrals_casscf = kwargs[ + 'integrals_casscf'] if 'integrals_casscf' in kwargs else False + verbose = kwargs['verbose'] if 'verbose' in kwargs else False + cache_data = kwargs['cache'] if 'cache' in kwargs else True + outfilename = kwargs[ + 'out_file_name'] if 'out_file_name' in kwargs else None + return self.get_spin_hamiltonian(xyz, spin, charge, basis, symmetry, + memory, cycles, initguess, UR, + nele_cas, norb_cas, MP2, natorb, + casci, ccsd, casscf, integrals_natorb, + integrals_casscf, verbose, cache_data, + outfilename) + + +def get_hamiltonian_generator(): + return GasPhaseGenerator() \ No newline at end of file diff --git a/libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/hamiltonian_generator.py b/libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/hamiltonian_generator.py new file mode 100644 index 0000000..153a880 --- /dev/null +++ b/libs/solvers/python/cudaq_solvers/tools/molecule/pyscf/hamiltonian_generator.py @@ -0,0 +1,18 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +import abc + +class HamiltonianGenerator(abc.ABC): + + @abc.abstractmethod + def name(self): + pass + + @abc.abstractmethod + def generate(self, xyz, basis, **kwargs): + pass \ No newline at end of file diff --git a/libs/solvers/python/tests/resources/LiH.xyz b/libs/solvers/python/tests/resources/LiH.xyz new file mode 100644 index 0000000..d8773ef --- /dev/null +++ b/libs/solvers/python/tests/resources/LiH.xyz @@ -0,0 +1,4 @@ +2 + +Li 0.0 0.0 0.0 +H 0.0 0.0 1.5 \ No newline at end of file diff --git a/libs/solvers/python/tests/test_adapt.py b/libs/solvers/python/tests/test_adapt.py new file mode 100644 index 0000000..60007f5 --- /dev/null +++ b/libs/solvers/python/tests/test_adapt.py @@ -0,0 +1,79 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import os + +import pytest +import numpy as np + +import cudaq +import cudaq_solvers as solvers + + +def test_solvers_adapt(): + geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + casci=True) + operators = solvers.get_operator_pool( + "spin_complement_gsd", num_orbitals=molecule.n_orbitals) + + numElectrons = molecule.n_electrons + + @cudaq.kernel + def initState(q: cudaq.qview): + for i in range(numElectrons): + x(q[i]) + + energy, thetas, ops = solvers.adapt_vqe(initState, + molecule.hamiltonian, + operators) + print(energy) + assert np.isclose(energy, -1.137, atol=1e-3) + + energy, thetas, ops = solvers.adapt_vqe( + initState, + molecule.hamiltonian, + operators, + optimizer='lbfgs', + gradient='central_difference') + print(energy) + assert np.isclose(energy, -1.137, atol=1e-3) + + +def test_solvers_scipy_adapt(): + geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + casci=True) + operators = solvers.get_operator_pool( + "spin_complement_gsd", num_orbitals=molecule.n_orbitals) + + numElectrons = molecule.n_electrons + + from scipy.optimize import minimize + + @cudaq.kernel + def initState(q: cudaq.qview): + for i in range(numElectrons): + x(q[i]) + + energy, thetas, ops = solvers.adapt_vqe(initState, + molecule.hamiltonian, + operators, + optimizer=minimize, + method='L-BFGS-B', + jac='3-point', + tol=1e-8, + options={'disp': True}) + print(energy) + assert np.isclose(energy, -1.137, atol=1e-3) diff --git a/libs/solvers/python/tests/test_molecule.py b/libs/solvers/python/tests/test_molecule.py new file mode 100644 index 0000000..e21f127 --- /dev/null +++ b/libs/solvers/python/tests/test_molecule.py @@ -0,0 +1,181 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import os + +import pytest, pathlib +import numpy as np +import cudaq_solvers as solvers + +currentPath = pathlib.Path(__file__).parent.resolve() + + +def test_operators(): + geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + verbose=True, + casci=True) + print(molecule.hamiltonian.to_string()) + print(molecule.energies) + assert np.isclose(-1.11, molecule.energies['hf_energy'], atol=1e-2) + assert np.isclose(-1.13, molecule.energies['fci_energy'], atol=1e-2) + from scipy.linalg import eigh + minE = eigh(molecule.hamiltonian.to_matrix(), eigvals_only=True)[0] + assert np.isclose(-1.13, minE, atol=1e-2) + + +def test_from_xyz_filename(): + molecule = solvers.create_molecule(str(currentPath) + + '/resources/LiH.xyz', + 'sto-3g', + 0, + 0, + verbose=True) + print(molecule.energies) + print(molecule.n_orbitals) + print(molecule.n_electrons) + assert molecule.n_orbitals == 6 + assert molecule.n_electrons == 4 + + +def test_jordan_wigner(): + geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + verbose=True, + casci=True) + op = solvers.jordan_wigner(molecule.hpq, molecule.hpqrs, + molecule.energies['nuclear_energy']) + assert molecule.hamiltonian == op + hpq = np.array(molecule.hpq) + hpqrs = np.array(molecule.hpqrs) + hpqJw = solvers.jordan_wigner(hpq, + molecule.energies['nuclear_energy']) + hpqrsJw = solvers.jordan_wigner(hpqrs) + op2 = hpqJw + hpqrsJw + assert op2 == molecule.hamiltonian + + spin_ham_matrix = molecule.hamiltonian.to_matrix() + e, c = np.linalg.eig(spin_ham_matrix) + assert np.isclose(np.min(e), -1.13717, rtol=1e-4) + + spin_ham_matrix = op2.to_matrix() + e, c = np.linalg.eig(spin_ham_matrix) + assert np.isclose(np.min(e), -1.13717, rtol=1e-4) + +def test_active_space(): + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=4, + norb_cas=4, + ccsd=True, + casci=True, + verbose=True) + assert molecule.n_orbitals == 4 + assert molecule.n_electrons == 4 + assert np.isclose(molecule.energies['core_energy'], -102.139973, rtol=1e-4) + assert np.isclose(molecule.energies['R-CCSD'], -107.5421878, rtol=1e-4) + assert np.isclose(molecule.energies['R-CASCI'], -107.5421983, rtol=1e-4) + + print(molecule.energies) + print(molecule.n_orbitals) + print(molecule.n_electrons) + +def test_jordan_wigner_as(): + geometry=[('N', (0.0, 0.0, 0.5600)), ('N', (0.0,0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=4, + norb_cas=4, + ccsd=True, + casci=True, + verbose=True) + + op = solvers.jordan_wigner(molecule.hpq, molecule.hpqrs, + molecule.energies['core_energy']) + + print(op.to_string()) + assert molecule.hamiltonian == op + + hpq = np.array(molecule.hpq) + hpqrs = np.array(molecule.hpqrs) + hpqJw = solvers.jordan_wigner(hpq, + molecule.energies['core_energy']) + hpqrsJw = solvers.jordan_wigner(hpqrs) + op2 = hpqJw + hpqrsJw + + spin_ham_matrix = molecule.hamiltonian.to_matrix() + e, c = np.linalg.eig(spin_ham_matrix) + print(np.min(e)) + assert np.isclose(np.min(e), -107.542198, rtol=1e-4) + + spin_ham_matrix = op2.to_matrix() + e, c = np.linalg.eig(spin_ham_matrix) + print(np.min(e)) + assert np.isclose(np.min(e), -107.542198, rtol=1e-4) + +def test_as_with_natorb(): + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=4, + norb_cas=4, + MP2=True, + ccsd=True, + casci=True, + natorb=True, + integrals_natorb=True, + verbose=True) + assert molecule.n_orbitals == 4 + assert molecule.n_electrons == 4 + assert np.isclose(molecule.energies['R-CCSD'], -107.6059540, rtol=1e-4) + assert np.isclose(molecule.energies['R-CASCI'], -107.6076127, rtol=1e-4) + + print(molecule.energies) + print(molecule.n_orbitals) + print(molecule.n_electrons) + + +def test_as_with_casscf(): + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=4, + norb_cas=4, + MP2=True, + ccsd=True, + casci=True, + casscf=True, + natorb=True, + integrals_casscf=True, + verbose=True) + + assert molecule.n_orbitals == 4 + assert molecule.n_electrons == 4 + assert np.isclose(molecule.energies['R-CASSCF'], -107.607626, rtol=1e-4) + + print(molecule.energies) + print(molecule.n_orbitals) + print(molecule.n_electrons) diff --git a/libs/solvers/python/tests/test_operator_pools.py b/libs/solvers/python/tests/test_operator_pools.py new file mode 100644 index 0000000..00874a1 --- /dev/null +++ b/libs/solvers/python/tests/test_operator_pools.py @@ -0,0 +1,103 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import pytest +import numpy as np +import cudaq_solvers as solvers + + +def test_generate_with_default_config(): + operators = solvers.get_operator_pool("uccsd", + num_qubits=4, + num_electrons=2) + assert operators + assert len(operators) == 2 * 2 + 1 * 8 + + for op in operators: + assert op.get_qubit_count() == 4 + + +def test_generate_with_custom_coefficients(): + operators = solvers.get_operator_pool("uccsd", + num_qubits=4, + num_electrons=2) + + assert operators + assert len(operators) == (2 * 2 + 1 * 8) + + for i, op in enumerate(operators): + assert op.get_qubit_count() == 4 + expected_coeff = 1.0 + assert np.isclose(op.get_coefficient().real, expected_coeff) + + +def test_generate_with_odd_electrons(): + operators = solvers.get_operator_pool("uccsd", + num_qubits=6, + num_electrons=3, + spin=1) + + assert operators + assert len(operators) == 2 * 4 + 4 * 8 + + for op in operators: + assert op.get_qubit_count() == 6 + + +def test_generate_with_large_system(): + operators = solvers.get_operator_pool("uccsd", + num_qubits=20, + num_electrons=10) + + assert operators + assert len(operators) > 875 + + for op in operators: + assert op.get_qubit_count() == 20 + + +def test_uccsd_operator_pool_correctness(): + # Generate the UCCSD operator pool + pool = solvers.get_operator_pool("uccsd", + num_qubits=4, + num_electrons=2) + + # Convert SpinOperators to strings + pool_strings = [op.to_string(False) for op in pool] + + # Expected result + expected_pool = [ + 'YZXI', 'XZYI', 'IYZX', 'IXZY', 'XXXY', 'XXYX', 'XYYY', 'YXYY', 'XYXX', + 'YXXX', 'YYXY', 'YYYX' + ] + + # Assert that the generated pool matches the expected result + assert pool_strings == expected_pool, f"Expected {expected_pool}, but got {pool_strings}" + + # Additional checks + assert len(pool) == len( + expected_pool + ), f"Expected {len(expected_pool)} operators, but got {len(pool)}" + + # Check that all operators have the correct length (4 qubits) + for op_string in pool_strings: + assert len( + op_string + ) == 4, f"Operator {op_string} does not have the expected length of 4" + + # Check that all operators contain only valid characters (I, X, Y, Z) + valid_chars = set('IXYZ') + for op_string in pool_strings: + assert set(op_string).issubset( + valid_chars), f"Operator {op_string} contains invalid characters" + + +def test_generate_with_invalid_config(): + # Missing required parameters + with pytest.raises(RuntimeError): + pool = solvers.get_operator_pool("uccsd") diff --git a/libs/solvers/python/tests/test_optim.py b/libs/solvers/python/tests/test_optim.py new file mode 100644 index 0000000..9ed4798 --- /dev/null +++ b/libs/solvers/python/tests/test_optim.py @@ -0,0 +1,34 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import os + +import pytest +import numpy as np + +import cudaq_solvers as solvers + + +def objective(x: list[float]): + return 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2, [ + -2 * (1 - x[0]) + 400 * (x[0]**3 - x[1] * x[0]), 200 * (x[1] - x[0]**2) + ] + + +def test_lbfgs(): + opt, params = solvers.optim.optimize(objective, [0., 0.], method='lbfgs') + assert np.isclose(0.0, opt, atol=1e-6) + assert np.isclose(1.0, params[0], atol=1e-6) + assert np.isclose(1.0, params[1], atol=1e-6) + + +def test_cobyla(): + opt, params = solvers.optim.optimize(objective, [1., 1.], method='cobyla') + assert np.isclose(0.0, opt, atol=1e-6) + assert np.isclose(1.0, params[0], atol=1e-6) + assert np.isclose(1.0, params[1], atol=1e-6) \ No newline at end of file diff --git a/libs/solvers/python/tests/test_qaoa.py b/libs/solvers/python/tests/test_qaoa.py new file mode 100644 index 0000000..0e89542 --- /dev/null +++ b/libs/solvers/python/tests/test_qaoa.py @@ -0,0 +1,379 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import os + +import pytest +import numpy as np +import networkx as nx + +import cudaq +from cudaq import spin +import cudaq_solvers as solvers + + +def test_simple_qaoa(): + Hp = 0.5 * spin.z(0) * spin.z(1) + 0.5 * spin.z(1) * spin.z(2) + \ + 0.5 * spin.z(0) * spin.z(3) + 0.5 * spin.z(2) * spin.z(3) + Href = spin.x(0) + spin.x(1) + spin.x(2) + spin.x(3) + + n_qubits = Hp.get_qubit_count() + n_layers = 2 + n_params = 2 * n_layers + initial_parameters = np.random.uniform(-np.pi / 2, np.pi / 2, n_params) + + result = solvers.qaoa(Hp, + Href, + n_layers, + initial_parameters, + optimizer='cobyla') + print(result) + # Can unpack like a tuple + optval, optp, config = result + print(optval) + + +def test_custom_mixing_hamiltonian_execution(): + problem_ham = 0.5 * spin.z(0) * spin.z(1) + mixing_ham = spin.x(0) + spin.x(1) + init_params = [0.1, 0.1] + + result = solvers.qaoa(problem_ham, mixing_ham, 1, init_params) + + assert len(result.optimal_parameters) > 0 + assert len(result.optimal_parameters) == 2 + assert -1.0 <= result.optimal_value <= 1.0 + + +def test_default_mixing_hamiltonian_execution(): + problem_ham = spin.z(0) + init_params = [0.1, 0.1] + + result = solvers.qaoa(problem_ham, 1, init_params) + + assert len(result.optimal_parameters) > 0 + assert len(result.optimal_parameters) == 2 + assert -1.0 <= result.optimal_value <= 1.0 + + +def test_parameter_validation(): + problem_ham = spin.z(0) + empty_params = [] + + with pytest.raises(RuntimeError): + solvers.qaoa(problem_ham, 1, empty_params) + + +def test_multi_layer_execution(): + problem_ham = spin.z(0) * spin.z(1) + init_params = [0.1, 0.1, 0.2, 0.2] # 2 layers + + result = solvers.qaoa(problem_ham, 2, init_params) + + assert len(result.optimal_parameters) == 4 + assert -1.0 <= result.optimal_value <= 1.0 + + +def test_overload_consistency(): + problem_ham = spin.z(0) * spin.z(1) + mixing_ham = spin.x(0) + spin.x(1) + init_params = [0.1, 0.1] + + result1 = solvers.qaoa(problem_ham, mixing_ham, 1, init_params) + result2 = solvers.qaoa(problem_ham, 1, init_params) + + # Results should be similar within numerical precision + assert abs(result1.optimal_value - result2.optimal_value) < 1e-6 + + +def test_maxcut_single_edge(): + G = nx.Graph() + G.add_edge(0, 1) + + ham = solvers.get_maxcut_hamiltonian(G) + + # Should have two terms: 0.5*Z0Z1 and -0.5*I0I1 + assert ham.get_term_count() == 2 + expected_ham = 0.5 * spin.z(0) * spin.z(1) - 0.5 * spin.i(0) * spin.i(1) + assert ham == expected_ham + + +def test_maxcut_triangle(): + # Create triangle graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (0, 2)]) + + ham = solvers.get_maxcut_hamiltonian(G) + print(ham) + + # Should have 4 terms + assert ham.get_term_count() == 4 + + # Create expected Hamiltonian using the exact structure + expected_data = [ + # ZIZ term + 2, + 0, + 2, + 0.5, + 0.0, + # ZZI term + 2, + 2, + 0, + 0.5, + 0.0, + # IZZ term + 0, + 2, + 2, + 0.5, + 0.0, + # III term + 0, + 0, + 0, + -1.5, + 0.0, + 4 + ] + + # Convert to spin operator + expected_ham = cudaq.SpinOperator(expected_data, 3) + + # Compare Hamiltonians + assert ham == expected_ham + + +def test_maxcut_disconnected(): + # Create disconnected graph + G = nx.Graph() + G.add_edges_from([(0, 1), (2, 3)]) + + ham = solvers.get_maxcut_hamiltonian(G) + + # Should have 3 terms + assert ham.get_term_count() == 3 + + # Create expected Hamiltonian using the exact structure + expected_data = [ + # ZZII term + 2, + 2, + 0, + 0, + 0.5, + 0.0, + # IIZZ term + 0, + 0, + 2, + 2, + 0.5, + 0.0, + # IIII term + 0, + 0, + 0, + 0, + -1.0, + 0.0, + 3 + ] + + # Convert to spin operator + expected_ham = cudaq.SpinOperator(expected_data, 4) + + # Compare Hamiltonians + assert ham == expected_ham + + +def test_clique_single_node(): + G = nx.Graph() + G.add_node(0, weight=1.5) + + ham = solvers.get_clique_hamiltonian(G) + + assert ham.get_term_count() == 2 + expected_ham = 0.75 * spin.z(0) - 0.75 * spin.i(0) + assert ham == expected_ham + + +def test_clique_complete_graph(): + G = nx.Graph() + node_w = {0: 2., 1: 1.5, 2: 1.} + for node, weight in node_w.items(): + G.add_node(node, weight=weight) + edges = [(0, 1, 1.0), (1, 2, 1.0), (0, 2, 1.0)] + G.add_weighted_edges_from(edges) + + ham = solvers.get_clique_hamiltonian(G, penalty=4.0) + + assert ham.get_term_count() == 4 + + expected_data = [ + 2, + 0, + 0, + 1.0, + 0.0, # ZII + 0, + 2, + 0, + 0.75, + 0.0, # IZI + 0, + 0, + 2, + 0.5, + 0.0, # IIZ + 0, + 0, + 0, + -2.25, + 0.0, # III + 4 + ] + expected_ham = cudaq.SpinOperator(expected_data, 3) + assert ham == expected_ham + + +def test_clique_disconnected_nodes(): + G = nx.Graph() + G.add_nodes_from([(0, {'weight': 1.0}), (1, {'weight': 1.0})]) + + ham = solvers.get_clique_hamiltonian(G, penalty=2.0) + + assert ham.get_term_count() == 4 + + expected_data = [ + 2, + 2, + 0.5, + 0.0, # ZZ + 2, + 0, + 0.0, + 0.0, # ZI + 0, + 2, + 0.0, + 0.0, # IZ + 0, + 0, + -0.5, + 0.0, # II + 4 + ] + expected_ham = cudaq.SpinOperator(expected_data, 2) + assert ham == expected_ham + + +def test_clique_triangle_with_disconnected(): + G = nx.Graph() + nodes = [(i, {'weight': 1.0}) for i in range(4)] + G.add_nodes_from(nodes) + edges = [(0, 1), (1, 2), (0, 2)] + G.add_edges_from(edges) + + ham = solvers.get_clique_hamiltonian(G, penalty=4.0) + + assert ham.get_term_count() == 8 + + expected_data = [ + 0, + 0, + 2, + 2, + 1.0, + 0.0, # IIZZ + 0, + 2, + 0, + 2, + 1.0, + 0.0, # IZIZ + 2, + 0, + 0, + 2, + 1.0, + 0.0, # ZIIZ + 0, + 0, + 0, + 2, + -2.5, + 0., # IIIZ + 0, + 2, + 0, + 0, + -0.5, + 0.0, # IZII + 0, + 0, + 0, + 0, + 1.0, + 0.0, # IIII + 0, + 0, + 2, + 0, + -0.5, + 0.0, # IIZI + 2, + 0, + 0, + 0, + -0.5, + 0.0, # ZIII + 8 + ] + expected_ham = cudaq.SpinOperator(expected_data, 4) + assert ham == expected_ham + + +def test_clique_different_penalties(): + G = nx.Graph() + G.add_nodes_from([(0, {'weight': 1.0}), (1, {'weight': 1.0})]) + + ham1 = solvers.get_clique_hamiltonian(G, penalty=2.0) + ham2 = solvers.get_clique_hamiltonian(G, penalty=4.0) + + assert ham1.get_term_count() == ham2.get_term_count() + assert str(ham1) != str(ham2) + + +def test_clique_weighted_nodes(): + G = nx.Graph() + G.add_nodes_from([(0, {'weight': 2.0}), (1, {'weight': 3.0})]) + G.add_edge(0, 1, weight=1.0) + + ham = solvers.get_clique_hamiltonian(G) + + assert ham.get_term_count() == 3 + + expected_data = [ + 2, + 0, + 1.0, + 0.0, # ZI + 0, + 2, + 1.5, + 0.0, # IZ + 0, + 0, + -2.5, + 0.0, # II + 3 + ] + expected_ham = cudaq.SpinOperator(expected_data, 2) + assert ham == expected_ham diff --git a/libs/solvers/python/tests/test_uccsd.py b/libs/solvers/python/tests/test_uccsd.py new file mode 100644 index 0000000..887f262 --- /dev/null +++ b/libs/solvers/python/tests/test_uccsd.py @@ -0,0 +1,143 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import os + +import pytest +import numpy as np + +import cudaq, cudaq_solvers as solvers + +from scipy.optimize import minimize + + +def test_solvers_uccsd(): + geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))] + molecule = solvers.create_molecule(geometry, 'sto-3g', 0, 0, casci=True) + + numQubits = molecule.n_orbitals * 2 + numElectrons = molecule.n_electrons + spin = 0 + + @cudaq.kernel + def ansatz(thetas: list[float]): + q = cudaq.qvector(numQubits) + for i in range(numElectrons): + x(q[i]) + solvers.stateprep.uccsd(q, thetas, numElectrons, spin) + + ansatz.compile() + + energy, params, all_data = solvers.vqe(ansatz, + molecule.hamiltonian, + [-.2, -.2, -.2], + optimizer=minimize, + method='L-BFGS-B', + jac='3-point', + tol=1e-4, + options={'disp': True}) + print(energy) + assert np.isclose(energy, -1.13, 1e-2) + + +def test_uccsd_active_space(): + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=4, + norb_cas=4, + ccsd=True, + casci=True, + verbose=True) + + numQubits = molecule.n_orbitals * 2 + numElectrons = molecule.n_electrons + spin = 0 + + alphasingle, betasingle, mixeddouble, alphadouble, betadouble = solvers.stateprep.get_uccsd_excitations( + numElectrons, numQubits, spin) + a_single = [[0, 4], [0, 6], [2, 4], [2, 6]] + a_double = [[0, 2, 4, 6]] + assert alphasingle == a_single + assert alphadouble == a_double + + parameter_count = solvers.stateprep.get_num_uccsd_parameters( + numElectrons, numQubits, spin) + + @cudaq.kernel + def ansatz(thetas: list[float]): + q = cudaq.qvector(numQubits) + for i in range(numElectrons): + x(q[i]) + solvers.stateprep.uccsd(q, thetas, numElectrons, spin) + + ansatz.compile() + + np.random.seed(42) + x0 = np.random.normal(-np.pi / 8.0, np.pi / 8.0, parameter_count) + + energy, params, all_data = solvers.vqe(ansatz, + molecule.hamiltonian, + x0, + optimizer=minimize, + method='COBYLA', + tol=1e-5, + options={'disp': True}) + + print(energy) + assert np.isclose(energy, -107.542, 1e-2) + + +def test_uccsd_active_space_natorb(): + + geometry = [('N', (0.0, 0.0, 0.5600)), ('N', (0.0, 0.0, -0.5600))] + molecule = solvers.create_molecule(geometry, + 'sto-3g', + 0, + 0, + nele_cas=4, + norb_cas=4, + MP2=True, + ccsd=True, + casci=True, + natorb=True, + integrals_natorb=True, + verbose=True) + + numQubits = molecule.n_orbitals * 2 + numElectrons = molecule.n_electrons + spin = 0 + + parameter_count = solvers.stateprep.get_num_uccsd_parameters( + numElectrons, numQubits, spin) + + @cudaq.kernel + def ansatz(thetas: list[float]): + q = cudaq.qvector(numQubits) + for i in range(numElectrons): + x(q[i]) + solvers.stateprep.uccsd(q, thetas, numElectrons, spin) + + ansatz.compile() + + np.random.seed(42) + x0 = np.random.normal(-np.pi / 8.0, np.pi / 8.0, parameter_count) + + energy, params, all_data = solvers.vqe(ansatz, + molecule.hamiltonian, + x0, + optimizer=minimize, + method='COBYLA', + tol=1e-5, + options={'disp': True}) + + print(energy) + assert np.isclose(energy, -107.6059, 1e-2) diff --git a/libs/solvers/python/tests/test_vqe.py b/libs/solvers/python/tests/test_vqe.py new file mode 100644 index 0000000..41d3649 --- /dev/null +++ b/libs/solvers/python/tests/test_vqe.py @@ -0,0 +1,104 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import os + +import pytest +import numpy as np + +import cudaq +from cudaq import spin + +import cudaq_solvers as solvers + + +def test_solvers_vqe(): + + @cudaq.kernel + def ansatz(theta: float): + q = cudaq.qvector(2) + x(q[0]) + ry(theta, q[1]) + x.ctrl(q[1], q[0]) + + hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y( + 0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1) + + # Can specify optimizer and gradient + energy, params, all_data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + hamiltonian, [0.], + optimizer='lbfgs', + gradient='parameter_shift') + assert np.isclose(-1.74, energy, atol=1e-2) + all_data[0].result.dump() + + # For gradient-based optimizer, can pick up default gradient (parameter_shift) + energy, params, all_data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + hamiltonian, [0.], + optimizer='lbfgs', + verbose=True) + assert np.isclose(-1.74, energy, atol=1e-2) + + # Can pick up default optimizer (cobyla) + energy, params, all_data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), hamiltonian, [0.], verbose=True) + assert np.isclose(-1.74, energy, atol=1e-2) + + cudaq.set_random_seed(22) + + # Can pick up default optimizer (cobyla) + energy, params, all_data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + hamiltonian, [0.], + verbose=True, + shots=10000, + max_iterations=10) + assert energy > -2 and energy < -1.5 + print(energy) + all_data[0].result.dump() + counts = all_data[0].result.counts() + assert 5 == len(counts.register_names) + assert 4 == len(counts.get_register_counts('XX')) + assert 4 == len(counts.get_register_counts('YY')) + assert 1 == len(counts.get_register_counts('ZI')) + assert 1 == len(counts.get_register_counts('IZ')) + + +def test_scipy_optimizer(): + + @cudaq.kernel + def ansatz(theta: float): + q = cudaq.qvector(2) + x(q[0]) + ry(theta, q[1]) + x.ctrl(q[1], q[0]) + + hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y( + 0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1) + + from scipy.optimize import minimize + + exp_vals = [] + + def callback(xk): + exp_vals.append(cudaq.observe(ansatz, hamiltonian, xk[0]).expectation()) + + # Can specify optimizer and gradient + energy, params, all_data = solvers.vqe( + lambda thetas: ansatz(thetas[0]), + hamiltonian, [0.], + optimizer=minimize, + callback=callback, + method='L-BFGS-B', + jac='3-point', + tol=1e-4, + options={'disp': True}) + assert np.isclose(-1.74, energy, atol=1e-2) + print(exp_vals) diff --git a/libs/solvers/tools/CMakeLists.txt b/libs/solvers/tools/CMakeLists.txt new file mode 100644 index 0000000..7f6f1fb --- /dev/null +++ b/libs/solvers/tools/CMakeLists.txt @@ -0,0 +1,8 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +add_subdirectory(molecule) \ No newline at end of file diff --git a/libs/solvers/tools/molecule/CMakeLists.txt b/libs/solvers/tools/molecule/CMakeLists.txt new file mode 100644 index 0000000..49ea3ec --- /dev/null +++ b/libs/solvers/tools/molecule/CMakeLists.txt @@ -0,0 +1,27 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +file(COPY cudaq-pyscf.py + DESTINATION ${CMAKE_BINARY_DIR}/bin + FILE_PERMISSIONS + OWNER_READ OWNER_EXECUTE + GROUP_READ GROUP_EXECUTE + WORLD_READ WORLD_EXECUTE +) + +file(RENAME "${CMAKE_BINARY_DIR}/bin/cudaq-pyscf.py" + "${CMAKE_BINARY_DIR}/bin/cudaq-pyscf") + +# Install +# ============================================================================== + +install(PROGRAMS cudaq-pyscf.py + COMPONENT solvers-tools + DESTINATION ${CMAKE_INSTALL_BINDIR} + RENAME cudaq-pyscf +) diff --git a/libs/solvers/tools/molecule/cudaq-pyscf.py b/libs/solvers/tools/molecule/cudaq-pyscf.py new file mode 100644 index 0000000..b899227 --- /dev/null +++ b/libs/solvers/tools/molecule/cudaq-pyscf.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 + +import argparse +import importlib, pkgutil +import cudaq_solvers.tools.molecule.pyscf.generators + +from fastapi import FastAPI, Response +from pydantic import BaseModel, PlainValidator, PlainSerializer +import uvicorn, os, signal, importlib, pkgutil +from typing import List, Annotated +import numpy as np + + +def iter_namespace(ns_pkg): + return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") + + +discovered_plugins = {} +for finder, name, ispkg in iter_namespace( + cudaq_solvers.tools.molecule.pyscf.generators): + try: + discovered_plugins[name] = importlib.import_module(name) + except ModuleNotFoundError: + pass + +hamiltonianGenerators = { + plugin.get_hamiltonian_generator().name(): plugin + for _, plugin in discovered_plugins.items() +} + +############################# +# Argument Parser +############################# + +parser = argparse.ArgumentParser() + +parser.add_argument('--server-mode', action='store_true', default=False) + +# Add arguments +parser.add_argument( + '--type', + type=str, + help='type of simulation (hamiltonian generator) - options include {}'. + format([k for k, v in hamiltonianGenerators.items()]), + default='gas_phase') +parser.add_argument('--xyz', help="xyz file", type=str) +parser.add_argument('--basis', help='', type=str) +parser.add_argument('--charge', help="charge of the system", type=int) +parser.add_argument('--out-file-name', + help='base file name for output data.', + type=str) +parser.add_argument('--spin', + help="no. of unpaired electrons (2 *s)", + type=int) +parser.add_argument('--symmetry', help="", action='store_true', default=False) +parser.add_argument('--memory', help="", type=float, default=4000) +parser.add_argument('--cycles', help="", type=int, default=100) +parser.add_argument('--initguess', help="", type=str, default='minao') +parser.add_argument('--UR', help="", action='store_true', default=False) +parser.add_argument('--MP2', help="", action='store_true', default=False) +parser.add_argument('--nele_cas', help="", type=int, default=None) +parser.add_argument('--norb_cas', help="", type=int, default=None) +parser.add_argument('--natorb', help="", action='store_true', default=False) +parser.add_argument('--casci', help="", action='store_true', default=False) +parser.add_argument('--ccsd', help="", action='store_true', default=False) +parser.add_argument('--casscf', help="", action='store_true', default=False) +parser.add_argument('--integrals_natorb', + help="", + action='store_true', + default=False) +parser.add_argument('--integrals_casscf', + help="", + action='store_true', + default=False) +parser.add_argument('--potfile', help="", type=str, default=None) +parser.add_argument('--verbose', + help="Verbose printout", + action='store_true', + default=False) + +# Parse the arguments +args = parser.parse_args() + +if not args.server_mode: + + if args.type not in hamiltonianGenerators: + raise RuntimeError(f'invalid hamiltonian generator type - {args.type}') + hamiltonianGenerator = hamiltonianGenerators[ + args.type].get_hamiltonian_generator() + + filterArgs = ['xyz', 'basis'] + filteredArgs = { + k: v + for (k, v) in vars(args).items() if k not in filterArgs + } + res = hamiltonianGenerator.generate(args.xyz, args.basis, **filteredArgs) + print(res) + + exit(0) + +app = FastAPI() + + +@app.get("/shutdown") +async def shutdown(): + os.kill(os.getpid(), signal.SIGTERM) + return Response(status_code=200, content='Server shutting down...') + + +class IntegralsData(BaseModel): + data: List[List] + + +class MoleculeInput(BaseModel): + basis: str + xyz: str + spin: int + charge: int + type: str = 'gas_phase' + symmetry: bool = False + cycles: int = 100 + memory: float = 4000. + initguess: str = 'minao' + UR: bool = False + MP2: bool = False + natorb: bool = False + casci: bool = False + ccsd: bool = False + casscf: bool = False + integrals_natorb: bool = False + integrals_casscf: bool = False + verbose: bool = False + nele_cas: int = None + norb_cas: int = None + potfile: str = None + + + +class Molecule(BaseModel): + energies: dict + num_orbitals: int + num_electrons: int + hpq: IntegralsData + hpqrs: IntegralsData + + +@app.get("/status") +async def get_status(): + return {"status" : "available"} + +@app.post("/create_molecule") +async def create_molecule(molecule: MoleculeInput): + hamiltonianGenerator = hamiltonianGenerators[ + molecule.type].get_hamiltonian_generator() + + filterArgs = ['xyz', 'basis'] + filteredArgs = { + k: v + for (k, v) in vars(molecule).items() if k not in filterArgs + } + filteredArgs['cache_data'] = False + res = hamiltonianGenerator.generate(molecule.xyz, molecule.basis, + **filteredArgs) + return Molecule(energies=res['energies'], + num_orbitals=res['num_orbitals'], + num_electrons=res['num_electrons'], + hpq=IntegralsData(data=res['hpq']['data']), + hpqrs=IntegralsData(data=res['hpqrs']['data'])) + + +if __name__ == "__main__": + uvicorn.run(app, host="0.0.0.0", port=8000, log_level='critical') \ No newline at end of file diff --git a/libs/solvers/unittests/CMakeLists.txt b/libs/solvers/unittests/CMakeLists.txt new file mode 100644 index 0000000..2982583 --- /dev/null +++ b/libs/solvers/unittests/CMakeLists.txt @@ -0,0 +1,69 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# External Dependencies +# ============================================================================== + +FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v1.15.2 + EXCLUDE_FROM_ALL +) +FetchContent_MakeAvailable(googletest) + +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +# Bug in GCC 12 leads to spurious warnings (-Wrestrict) +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105329 +if (CMAKE_COMPILER_IS_GNUCXX + AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0 + AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13.0.0) + target_compile_options(gtest PUBLIC --param=evrp-mode=legacy) +endif() +include(GoogleTest) + +# ============================================================================== + +add_subdirectory(nvqpp) + +# ============================================================================== + +add_executable(test_adapt test_adapt.cpp) +target_link_libraries(test_adapt PRIVATE GTest::gtest_main cudaq-solvers test-kernels) +add_dependencies(CUDAQXSolversUnitTests test_adapt) +gtest_discover_tests(test_adapt) + +add_executable(test_molecule test_molecule.cpp) +target_link_libraries(test_molecule PRIVATE GTest::gtest_main cudaq-solvers) +add_dependencies(CUDAQXSolversUnitTests test_molecule) +gtest_discover_tests(test_molecule) + +add_executable(test_optimizers test_optimizers.cpp) +target_link_libraries(test_optimizers PRIVATE GTest::gtest_main cudaq-solvers) +add_dependencies(CUDAQXSolversUnitTests test_optimizers) +gtest_discover_tests(test_optimizers) + +add_executable(test_operator_pools test_operator_pools.cpp) +target_link_libraries(test_operator_pools PRIVATE GTest::gtest_main cudaq-solvers) +add_dependencies(CUDAQXSolversUnitTests test_operator_pools) +gtest_discover_tests(test_operator_pools) + +add_executable(test_vqe test_vqe.cpp) +target_link_libraries(test_vqe PRIVATE GTest::gtest_main cudaq-solvers test-kernels) +add_dependencies(CUDAQXSolversUnitTests test_vqe) +gtest_discover_tests(test_vqe) + +add_executable(test_uccsd test_uccsd.cpp) +target_link_libraries(test_uccsd PRIVATE GTest::gtest_main cudaq-solvers test-kernels) +add_dependencies(CUDAQXSolversUnitTests test_uccsd) +gtest_discover_tests(test_uccsd) + +add_executable(test_qaoa test_qaoa.cpp) +target_link_libraries(test_qaoa PRIVATE GTest::gtest_main cudaq-solvers test-kernels) +gtest_discover_tests(test_qaoa) \ No newline at end of file diff --git a/libs/solvers/unittests/nvqpp/CMakeLists.txt b/libs/solvers/unittests/nvqpp/CMakeLists.txt new file mode 100644 index 0000000..cfbe3bb --- /dev/null +++ b/libs/solvers/unittests/nvqpp/CMakeLists.txt @@ -0,0 +1,11 @@ +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +include_directories(${CUDAQX_SOLVERS_INCLUDE_DIR}) +set(CMAKE_CXX_COMPILER "${CUDAQ_INSTALL_DIR}/bin/nvq++") +set(CMAKE_CXX_COMPILE_OBJECT " --enable-mlir -fPIC -o -c ") +add_library(test-kernels SHARED test_kernels.cpp) diff --git a/libs/solvers/unittests/nvqpp/test_kernels.cpp b/libs/solvers/unittests/nvqpp/test_kernels.cpp new file mode 100644 index 0000000..255d368 --- /dev/null +++ b/libs/solvers/unittests/nvqpp/test_kernels.cpp @@ -0,0 +1,40 @@ +#include "test_kernels.h" + +#include "cudaq/solvers/stateprep/uccsd.h" + +__qpu__ void hartreeFock2Electrons(cudaq::qvector<> &q) { + for (std::size_t i = 0; i < 2; i++) + x(q[i]); +} + +__qpu__ void ansatz(std::vector theta) { + cudaq::qvector q(2); + x(q[0]); + ry(theta[0], q[1]); + x(q[1], q[0]); +} + +__qpu__ void ansatzNonStdSignature(double theta, int N) { + cudaq::qvector q(N); + x(q[0]); + ry(theta, q[1]); + x(q[1], q[0]); +} + +__qpu__ void callUccsdStatePrep(std::vector params) { + cudaq::qvector q(4); + for (auto i : cudaq::range(2)) + x(q[i]); + + cudaq::solvers::stateprep::uccsd(q, params, 2, 0); +} + +__qpu__ void callUccsdStatePrepWithArgs(std::vector params, + std::size_t numQubits, + std::size_t numElectrons) { + cudaq::qvector q(numQubits); + for (auto i : cudaq::range(numElectrons)) + x(q[i]); + + cudaq::solvers::stateprep::uccsd(q, params, numElectrons); +} diff --git a/libs/solvers/unittests/nvqpp/test_kernels.h b/libs/solvers/unittests/nvqpp/test_kernels.h new file mode 100644 index 0000000..b4ccd5b --- /dev/null +++ b/libs/solvers/unittests/nvqpp/test_kernels.h @@ -0,0 +1,9 @@ +#include "cudaq.h" + +__qpu__ void hartreeFock2Electrons(cudaq::qvector<> &q); +__qpu__ void ansatz(std::vector theta); +__qpu__ void ansatzNonStdSignature(double theta, int N); +__qpu__ void callUccsdStatePrep(std::vector params); +__qpu__ void callUccsdStatePrepWithArgs(std::vector params, + std::size_t numQubits, + std::size_t numElectrons); \ No newline at end of file diff --git a/libs/solvers/unittests/test_adapt.cpp b/libs/solvers/unittests/test_adapt.cpp new file mode 100644 index 0000000..d9d7e05 --- /dev/null +++ b/libs/solvers/unittests/test_adapt.cpp @@ -0,0 +1,48 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include + +#include "cudaq.h" +#include "cudaq/solvers/adapt.h" +#include "nvqpp/test_kernels.h" + +std::vector h2_data{ + 3, 1, 1, 3, 0.0454063, 0, 2, 0, 0, 0, 0.17028, 0, + 0, 0, 2, 0, -0.220041, -0, 1, 3, 3, 1, 0.0454063, 0, + 0, 0, 0, 0, -0.106477, 0, 0, 2, 0, 0, 0.17028, 0, + 0, 0, 0, 2, -0.220041, -0, 3, 3, 1, 1, -0.0454063, -0, + 2, 2, 0, 0, 0.168336, 0, 2, 0, 2, 0, 0.1202, 0, + 0, 2, 0, 2, 0.1202, 0, 2, 0, 0, 2, 0.165607, 0, + 0, 2, 2, 0, 0.165607, 0, 0, 0, 2, 2, 0.174073, 0, + 1, 1, 3, 3, -0.0454063, -0, 15}; + +TEST(SolversTester, checkSimpleAdapt) { + cudaq::spin_op h(h2_data, 4); + auto pool = cudaq::solvers::operator_pool::get("spin_complement_gsd"); + auto poolList = pool->generate({{"num-orbitals", h.num_qubits() / 2}}); + auto [energy, thetas, ops] = cudaq::solvers::adapt_vqe( + hartreeFock2Electrons, h, poolList, + {{"grad_norm_tolerance", 1e-3}, {"verbose", true}}); + EXPECT_NEAR(energy, -1.13, 1e-2); +} + +TEST(SolversTester, checkSimpleAdaptGradient) { + cudaq::spin_op h(h2_data, 4); + auto pool = cudaq::solvers::operator_pool::get("spin_complement_gsd"); + auto poolList = pool->generate({{"num-orbitals", h.num_qubits() / 2}}); + auto opt = cudaq::optim::optimizer::get("lbfgs"); + auto [energy, thetas, ops] = cudaq::solvers::adapt_vqe( + hartreeFock2Electrons, h, poolList, *opt, "central_difference", + {{"grad_norm_tolerance", 1e-3}, {"verbose", true}}); + EXPECT_NEAR(energy, -1.13, 1e-2); + + for (std::size_t i = 0; i < thetas.size(); i++) + printf("%lf -> %s\n", thetas[i], ops[i].to_string().c_str()); +} \ No newline at end of file diff --git a/libs/solvers/unittests/test_molecule.cpp b/libs/solvers/unittests/test_molecule.cpp new file mode 100644 index 0000000..3c4aea8 --- /dev/null +++ b/libs/solvers/unittests/test_molecule.cpp @@ -0,0 +1,138 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/solvers/operators.h" +#include "cudaq/solvers/operators/molecule/molecule_package_driver.h" + +#include +#include +#include + +#include + +TEST(MoleculeTester, checkSimple) { + auto registeredNames = + cudaq::solvers::MoleculePackageDriver::get_registered(); + + EXPECT_EQ(registeredNames.size(), 1); + EXPECT_TRUE(std::find(registeredNames.begin(), registeredNames.end(), + "RESTPySCFDriver") != registeredNames.end()); + { + cudaq::solvers::molecular_geometry geometry{{"H", {0., 0., 0.}}, + {"H", {0., 0., .7474}}}; + auto molecule = cudaq::solvers::create_molecule( + geometry, "sto-3g", 0, 0, {.casci = true, .verbose = true}); + + molecule.hamiltonian.dump(); + + EXPECT_NEAR(molecule.energies["fci_energy"], -1.137, 1e-3); + EXPECT_NEAR(molecule.energies["hf_energy"], -1.1163255644, 1e-3); + EXPECT_EQ(molecule.n_electrons, 2); + EXPECT_EQ(molecule.n_orbitals, 2); + + // EXPECT_NEAR(molecule.fermionOperator.constant, 0.7080240981000804, 1e-3); + EXPECT_EQ(2, molecule.hpq.shape().size()); + EXPECT_EQ(4, molecule.hpq.shape()[0]); + EXPECT_EQ(4, molecule.hpq.shape()[1]); + EXPECT_NEAR(molecule.hpq.at({0, 0}).real(), -1.2488, 1e-3); + EXPECT_NEAR(molecule.hpq.at({1, 1}).real(), -1.2488, 1e-3); + EXPECT_NEAR(molecule.hpq.at({2, 2}).real(), -.47967, 1e-3); + EXPECT_NEAR(molecule.hpq.at({3, 3}).real(), -.47967, 1e-3); + EXPECT_EQ(4, molecule.hpqrs.shape().size()); + for (int i = 0; i < 4; i++) + EXPECT_EQ(4, molecule.hpqrs.shape()[i]); + EXPECT_NEAR(molecule.hpqrs.at({0, 0, 0, 0}).real(), 0.3366719725032414, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 0, 2, 2}).real(), 0.0908126657382825, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 1, 1, 0}).real(), 0.3366719725032414, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 1, 3, 2}).real(), 0.0908126657382825, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 2, 0, 2}).real(), 0.09081266573828267, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 2, 2, 0}).real(), 0.33121364716348484, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 3, 1, 2}).real(), 0.09081266573828267, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({0, 3, 3, 0}).real(), 0.33121364716348484, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 0, 0, 1}).real(), 0.3366719725032414, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 0, 2, 3}).real(), 0.0908126657382825, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 1, 1, 1}).real(), 0.3366719725032414, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 1, 3, 3}).real(), 0.0908126657382825, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 2, 0, 3}).real(), 0.09081266573828267, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 2, 2, 1}).real(), 0.33121364716348484, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 3, 1, 3}).real(), 0.09081266573828267, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({1, 3, 3, 1}).real(), 0.33121364716348484, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 0, 0, 2}).real(), 0.3312136471634851, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 0, 2, 0}).real(), 0.09081266573828246, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 1, 1, 2}).real(), 0.3312136471634851, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 1, 3, 0}).real(), 0.09081266573828246, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 2, 0, 0}).real(), 0.09081266573828264, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 2, 2, 2}).real(), 0.34814578499360427, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 3, 1, 0}).real(), 0.09081266573828264, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({2, 3, 3, 2}).real(), 0.34814578499360427, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 0, 0, 3}).real(), 0.3312136471634851, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 0, 2, 1}).real(), 0.09081266573828246, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 1, 1, 3}).real(), 0.3312136471634851, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 1, 3, 1}).real(), 0.09081266573828246, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 2, 0, 1}).real(), 0.09081266573828264, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 2, 2, 3}).real(), 0.34814578499360427, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 3, 1, 1}).real(), 0.09081266573828264, + 1e-3); + EXPECT_NEAR(molecule.hpqrs.at({3, 3, 3, 3}).real(), 0.34814578499360427, + 1e-3); + } +} + +TEST(OperatorsTester, checkH2OActiveSpace) { + std::string contents = R"#(3 + +O 0.1173 0.0 0.0 +H -0.4691 0.7570 0.0 +H -0.4691 -0.7570 0.0 +)#"; + + { + std::ofstream out(".tmpH2O.xyz"); + out << contents; + } + + auto geometry = cudaq::solvers::molecular_geometry::from_xyz(".tmpH2O.xyz"); + std::remove(".tmpH2O.xyz"); + + auto molecule = cudaq::solvers::create_molecule( + geometry, "631g", 0, 0, + {.nele_cas = 6, .norb_cas = 6, .ccsd = true, .verbose = true}); + + // molecule.hamiltonian.dump(); + EXPECT_EQ(molecule.n_electrons, 6); + EXPECT_EQ(molecule.n_orbitals, 6); +} diff --git a/libs/solvers/unittests/test_operator_pools.cpp b/libs/solvers/unittests/test_operator_pools.cpp new file mode 100644 index 0000000..3961816 --- /dev/null +++ b/libs/solvers/unittests/test_operator_pools.cpp @@ -0,0 +1,188 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/solvers/operators/operator_pool.h" +#include + +using namespace cudaqx; + +TEST(UCCSDTest, GenerateWithDefaultConfig) { + auto pool = cudaq::solvers::operator_pool::get("uccsd"); + heterogeneous_map config; + config.insert("num-qubits", 4); + config.insert("num-electrons", 2); + + auto operators = pool->generate(config); + ASSERT_FALSE(operators.empty()); + EXPECT_EQ(operators.size(), 2 * 2 + 1 * 8); + + for (const auto &op : operators) { + EXPECT_EQ(op.num_qubits(), 4); + } +} + +TEST(UCCSDTest, GenerateWithCustomCoefficients) { + auto pool = cudaq::solvers::operator_pool::get("uccsd"); + heterogeneous_map config; + config.insert("num-qubits", 4); + config.insert("num-electrons", 2); + + auto operators = pool->generate(config); + + ASSERT_FALSE(operators.empty()); + EXPECT_EQ(operators.size(), (2 * 2 + 1 * 8)); + + for (size_t i = 0; i < operators.size(); ++i) { + EXPECT_EQ(operators[i].num_qubits(), 4); + EXPECT_DOUBLE_EQ(1.0, operators[i].get_coefficient().real()); + } +} + +TEST(UCCSDTest, GenerateWithOddElectrons) { + auto pool = cudaq::solvers::operator_pool::get("uccsd"); + heterogeneous_map config; + config.insert("num-qubits", 6); + config.insert("num-electrons", 3); + config.insert("spin", 1); + + auto operators = pool->generate(config); + + ASSERT_FALSE(operators.empty()); + EXPECT_EQ(operators.size(), 2 * 4 + 4 * 8); + + for (const auto &op : operators) + EXPECT_EQ(op.num_qubits(), 6); +} + +TEST(UCCSDTest, GenerateWithLargeSystem) { + auto pool = cudaq::solvers::operator_pool::get("uccsd"); + heterogeneous_map config; + config.insert("num-qubits", 20); + config.insert("num-electrons", 10); + + auto operators = pool->generate(config); + + ASSERT_FALSE(operators.empty()); + EXPECT_GT(operators.size(), 875); + + for (const auto &op : operators) { + EXPECT_EQ(op.num_qubits(), 20); + } +} + +TEST(UccsdOperatorPoolTest, GeneratesCorrectOperators) { + // Arrange + auto pool = cudaq::solvers::operator_pool::get("uccsd"); + heterogeneous_map config; + config.insert("num-qubits", 4); + config.insert("num-electrons", 2); + + // Act + auto operators = pool->generate(config); + + // Convert SpinOperators to strings + std::vector operator_strings; + for (const auto &op : operators) { + operator_strings.push_back(op.to_string(false)); + } + + // Assert + std::vector expected_operators = { + "YZXI", "XZYI", "IYZX", "IXZY", "XXXY", "XXYX", + "XYYY", "YXYY", "XYXX", "YXXX", "YYXY", "YYYX"}; + + ASSERT_EQ(operator_strings.size(), expected_operators.size()) + << "Number of generated operators does not match expected count"; + + for (size_t i = 0; i < expected_operators.size(); ++i) { + EXPECT_EQ(operator_strings[i], expected_operators[i]) + << "Mismatch at index " << i; + } + + // Additional checks + for (const auto &op_string : operator_strings) { + EXPECT_EQ(op_string.length(), 4) + << "Operator " << op_string + << " does not have the expected length of 4"; + + EXPECT_TRUE(op_string.find_first_not_of("IXYZ") == std::string::npos) + << "Operator " << op_string << " contains invalid characters"; + } +} + +TEST(UCCSDTest, GenerateWithInvalidConfig) { + auto pool = cudaq::solvers::operator_pool::get("uccsd"); + heterogeneous_map config; + // Missing required parameters + + EXPECT_THROW(pool->generate(config), std::runtime_error); +} + +// Test for single qubit terms +TEST(MixerPoolTest, SingleQubitTerms) { + auto opPool = cudaq::solvers::operator_pool::get("qaoa"); + std::vector ops = opPool->generate({{"n-qubits", 2}}); + + // First 2 operators should be X(0) and X(1) + EXPECT_EQ(ops[0], cudaq::spin::x(0)); + EXPECT_EQ(ops[1], cudaq::spin::x(1)); + + // Next 2 operators should be Y(0) and Y(1) + EXPECT_EQ(ops[2], cudaq::spin::y(0)); + EXPECT_EQ(ops[3], cudaq::spin::y(1)); +} + +// Test for two-qubit XX terms +TEST(MixerPoolTest, TwoQubitXXTerms) { + auto opPool = cudaq::solvers::operator_pool::get("qaoa"); + std::vector ops = opPool->generate({{"n-qubits", 3}}); + + // Find XX terms (they start after single qubit terms) + int xx_start_idx = 6; // After 3 X terms and 3 Y terms + EXPECT_EQ(ops[xx_start_idx], cudaq::spin::x(0) * cudaq::spin::x(1)); + EXPECT_EQ(ops[xx_start_idx + 1], cudaq::spin::x(0) * cudaq::spin::x(2)); + EXPECT_EQ(ops[xx_start_idx + 2], cudaq::spin::x(1) * cudaq::spin::x(2)); +} + +// Test vector size for different qubit numbers +TEST(MixerPoolTest, VectorSizes) { + // For n qubits, we expect: + // - n single X terms + // - n single Y terms + // - (n*(n-1))/2 terms for each two-qubit combination (XX, YY, YZ, ZY, XY, YX, + // XZ, ZX) + + // Test for 2 qubits + auto opPool = cudaq::solvers::operator_pool::get("qaoa"); + std::vector ops_2q = opPool->generate({{"n-qubits", 2}}); + int expected_size_2q = 4 + 8; // 4 single-qubit + 8 two-qubit terms + EXPECT_EQ(ops_2q.size(), expected_size_2q); + + // Test for 3 qubits + std::vector ops_3q = opPool->generate({{"n-qubits", 3}}); + + int expected_size_3q = 6 + 24; // 6 single-qubit + 24 two-qubit terms + EXPECT_EQ(ops_3q.size(), expected_size_3q); +} + +// Test for empty and single qubit cases +TEST(MixerPoolTest, EdgeCases) { + // Test with 0 qubits + auto opPool = cudaq::solvers::operator_pool::get("qaoa"); + std::vector ops_0q = opPool->generate({{"n-qubits", 0}}); + + EXPECT_EQ(ops_0q.size(), 0); + + // Test with 1 qubit + // auto ops_1q = mixer_pool(1); + std::vector ops_1q = opPool->generate({{"n-qubits", 1}}); + + EXPECT_EQ(ops_1q.size(), 2); // Only X(0) and Y(0) + EXPECT_EQ(ops_1q[0], cudaq::spin::x(0)); + EXPECT_EQ(ops_1q[1], cudaq::spin::y(0)); +} \ No newline at end of file diff --git a/libs/solvers/unittests/test_optimizers.cpp b/libs/solvers/unittests/test_optimizers.cpp new file mode 100644 index 0000000..600fe85 --- /dev/null +++ b/libs/solvers/unittests/test_optimizers.cpp @@ -0,0 +1,86 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#include "cudaq/solvers/optimizer.h" + +#include +#include +#include + +#include + +TEST(CoreTester, checkCobyla) { + auto registered = cudaq::optim::optimizer::get_registered(); + EXPECT_TRUE(cudaq::optim::optimizer::is_registered("cobyla")); + auto cobyla = cudaq::optim::optimizer::get("cobyla"); + + // optimizer.verbose = true; + auto f = [](const std::vector &x, std::vector &dx) { + return 100 * std::pow(x[1] - std::pow(x[0], 2), 2) + std::pow(1 - x[0], 2); + }; + + auto [opt, params] = cobyla->optimize( + 2, f, {{"initial_parameters", std::vector(2, 1.)}}); + EXPECT_NEAR(0.0, opt, 1e-3); + EXPECT_EQ(2, params.size()); + EXPECT_NEAR(1.0, params[0], 1e-3); + EXPECT_NEAR(1.0, params[1], 1e-3); +} + +TEST(OptimTester, checkLBFGS) { + + EXPECT_TRUE(cudaq::optim::optimizer::is_registered("lbfgs")); + auto optimizer = cudaq::optim::optimizer::get("lbfgs"); + { + auto f = [](const std::vector &x, std::vector &dx) { + dx[0] = -2 * (1 - x[0]) + 400 * (std::pow(x[0], 3) - x[1] * x[0]); + dx[1] = 200 * (x[1] - std::pow(x[0], 2)); + return 100 * std::pow(x[1] - std::pow(x[0], 2), 2) + + std::pow(1 - x[0], 2); + }; + + { + auto [opt, params] = optimizer->optimize(2, f); + EXPECT_NEAR(0.0, opt, 1e-3); + EXPECT_EQ(2, params.size()); + EXPECT_NEAR(1.0, params[0], 1e-3); + EXPECT_NEAR(1.0, params[1], 1e-3); + } + + { + auto nIters = optimizer->history.size(); + // Try to set the tolerance + auto [opt, params] = + optimizer->optimize(2, f, {{"function_tolerance", 1e-3}}); + + EXPECT_TRUE(optimizer->history.size() < nIters); + } + + { + // Try to set the tolerance + auto [opt, params] = optimizer->optimize(2, f, {{"max_iterations", 12}}); + EXPECT_TRUE(optimizer->history.size() == 12); + } + } + { + // optimizer.verbose = true; + auto f = [](const std::vector &x, std::vector &dx) { + dx[0] = -2 * (1 - x[0]) + 400 * (std::pow(x[0], 3) - x[1] * x[0]); + dx[1] = 200 * (x[1] - std::pow(x[0], 2)); + return 100 * std::pow(x[1] - std::pow(x[0], 2), 2) + + std::pow(1 - x[0], 2); + }; + + { + auto [opt, params] = optimizer->optimize(2, f); + EXPECT_NEAR(0.0, opt, 1e-3); + EXPECT_EQ(2, params.size()); + EXPECT_NEAR(1.0, params[0], 1e-3); + EXPECT_NEAR(1.0, params[1], 1e-3); + } + } +} diff --git a/libs/solvers/unittests/test_qaoa.cpp b/libs/solvers/unittests/test_qaoa.cpp new file mode 100644 index 0000000..cdef34f --- /dev/null +++ b/libs/solvers/unittests/test_qaoa.cpp @@ -0,0 +1,473 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include + +#include "cudaq.h" +#include "cudaq/solvers/operators.h" +#include "cudaq/solvers/qaoa.h" +#include "nvqpp/test_kernels.h" + +using namespace cudaq::spin; + +TEST(SolversTester, checkSimpleQAOA) { + + auto Hp = 0.5 * z(0) * z(1) + 0.5 * z(1) * z(2) + 0.5 * z(0) * z(3) + + 0.5 * z(2) * z(3); + auto Href = x(0) + x(1) + x(2) + x(3); + + const int n_qubits = Hp.num_qubits(); + const int n_layers = 2; + const int n_params = 2 * n_layers; // * (n_qubits + Href.num_qubits()); + auto initialParameters = cudaq::random_vector(-M_PI_2, M_PI_2, n_params); + + auto optimizer = cudaq::optim::optimizer::get("cobyla"); + + auto [optval, optparam, config] = + cudaq::solvers::qaoa(Hp, Href, *optimizer, n_layers, initialParameters); +} + +// Test basic QAOA execution with custom mixing Hamiltonian +TEST(QAOATest, CustomMixingHamiltonianExecution) { + // Create a simple 2-qubit problem Hamiltonian + cudaq::spin_op problemHam = 0.5 * cudaq::spin::z(0) * cudaq::spin::z(1); + + // Create mixing Hamiltonian (X terms) + cudaq::spin_op mixingHam = cudaq::spin::x(0) + cudaq::spin::x(1); + + // Create optimizer + auto opt = cudaq::optim::optimizer::get("cobyla"); + + // Initial parameters for 1 layer (gamma, beta) + std::vector initParams = {0.1, 0.1}; + + auto result = + cudaq::solvers::qaoa(problemHam, mixingHam, *opt, 1, initParams); + + EXPECT_FALSE(result.optimal_parameters.empty()); + EXPECT_EQ(result.optimal_parameters.size(), 2); + EXPECT_GE(result.optimal_value, -1.0); + EXPECT_LE(result.optimal_value, 1.0); +} + +// Test QAOA with default mixing Hamiltonian +TEST(QAOATest, DefaultMixingHamiltonianExecution) { + // Single-qubit problem Hamiltonian + cudaq::spin_op problemHam = cudaq::spin::z(0); + + auto opt = cudaq::optim::optimizer::get("cobyla"); + std::vector initParams = {0.1, 0.1}; + + auto result = cudaq::solvers::qaoa(problemHam, *opt, 1, initParams); + + EXPECT_FALSE(result.optimal_parameters.empty()); + EXPECT_EQ(result.optimal_parameters.size(), 2); + EXPECT_GE(result.optimal_value, -1.0); + EXPECT_LE(result.optimal_value, 1.0); +} + +// Test parameter validation +TEST(QAOATest, ParameterValidation) { + cudaq::spin_op problemHam = cudaq::spin::z(0); + std::vector emptyParams; + + EXPECT_THROW(cudaq::solvers::qaoa(problemHam, 1, emptyParams), + std::invalid_argument); +} + +// Test multi-layer QAOA +TEST(QAOATest, MultiLayerExecution) { + cudaq::spin_op problemHam = cudaq::spin::z(0) * cudaq::spin::z(1); + std::vector initParams = {0.1, 0.1, 0.2, 0.2}; // 2 layers + + auto result = cudaq::solvers::qaoa(problemHam, 2, initParams); + + EXPECT_EQ(result.optimal_parameters.size(), 4); + EXPECT_GE(result.optimal_value, -1.0); + EXPECT_LE(result.optimal_value, 1.0); +} + +// // Test QAOA with options +// TEST(QAOATest, OptionsHandling) { +// cudaq::spin_op problemHam = cudaq::spin::z(0)[1]; +// std::vector initParams = {0.1, 0.1}; + +// cudaq::heterogeneous_map options; +// options["shots"] = 1000; +// options["optimizer.maxiter"] = 100; + +// auto result = cudaq::solvers::qaoa(problemHam, 1, initParams, options); + +// EXPECT_FALSE(result.optimalConfig.empty()); +// EXPECT_GE(result.optimalConfig.counts().size(), 1); +// } + +// Test consistency between different QAOA overloads +TEST(QAOATest, OverloadConsistency) { + cudaq::spin_op problemHam = cudaq::spin::z(0) * cudaq::spin::z(1); + cudaq::spin_op mixingHam = cudaq::spin::x(0) + cudaq::spin::x(1); + auto opt = cudaq::optim::optimizer::get("cobyla"); + std::vector initParams = {0.1, 0.1}; + + auto result1 = + cudaq::solvers::qaoa(problemHam, mixingHam, *opt, 1, initParams); + auto result2 = cudaq::solvers::qaoa(problemHam, *opt, 1, initParams); + + // Results should be similar within numerical precision + EXPECT_NEAR(result1.optimal_value, result2.optimal_value, 1e-6); +} + +TEST(MaxCutHamiltonianTest, SingleEdge) { + cudaqx::graph g; + g.add_edge(0, 1); + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + ham.dump(); + // Should have two terms: 0.5*Z0Z1 and -0.5*I0I1 + EXPECT_EQ(ham.num_terms(), 2); + + // Verify the coefficients + EXPECT_EQ(0.5 * cudaq::spin_op::from_word("ZZ") - + .5 * cudaq::spin_op::from_word("II"), + ham); +} + +TEST(MaxCutHamiltonianTest, Triangle) { + cudaqx::graph g; + g.add_edge(0, 1); + g.add_edge(1, 2); + g.add_edge(0, 2); + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + ham.dump(); + // Should have 6 terms: 0.5*(Z0Z1 + Z1Z2 + Z0Z2) - 0.5*(I0I1 + I1I2 + I0I2) + EXPECT_EQ(ham.num_terms(), 4); + + std::vector data{// Term 1: ZIZ with coefficient 0.5 + 0j + 2, 0, 2, 0.5, 0.0, + + // Term 2: ZZI with coefficient 0.5 + 0j + 2, 2, 0, 0.5, 0.0, + + // Term 3: IZZ with coefficient 0.5 + 0j + 0, 2, 2, 0.5, 0.0, + + // Term 4: III with coefficient -1.5 + 0j + 0, 0, 0, -1.5, 0.0, + + // Total number of terms + 4}; + EXPECT_EQ(ham, cudaq::spin_op(data, 3)); +} + +TEST(MaxCutHamiltonianTest, DisconnectedGraph) { + cudaqx::graph g; + g.add_edge(0, 1); + g.add_edge(2, 3); // Disconnected edge + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + + // Should have 4 terms: 0.5*(Z0Z1 + Z2Z3) - 0.5*(I0I1 + I2I3) + EXPECT_EQ(ham.num_terms(), 3); + ham.dump(); + + std::vector data{// Term 1: ZZII with coefficient 0.5 + 0j + 2, 2, 0, 0, 0.5, 0.0, + + // Term 2: IIZZ with coefficient 0.5 + 0j + 0, 0, 2, 2, 0.5, 0.0, + + // Term 3: IIII with coefficient -1.0 + 0j + 0, 0, 0, 0, -1.0, 0.0, + + // Total number of terms + 3}; + EXPECT_EQ(ham, cudaq::spin_op(data, 4)); +} + +TEST(CliqueHamiltonianTest, SingleNode) { + cudaqx::graph g; + g.add_node(0, 1.5); // Add node with weight 1.5 + + auto ham = cudaq::solvers::get_clique_hamiltonian(g); + ham.dump(); + EXPECT_EQ(ham.num_terms(), 2); + EXPECT_EQ(ham, 0.75 * cudaq::spin::z(0) - .75 * cudaq::spin::i(0)); +} + +TEST(CliqueHamiltonianTest, CompleteGraph) { + cudaqx::graph g; + + // Create K3 (triangle) with node and edge weights + g.add_node(0, 2.0); + g.add_node(1, 1.5); + g.add_node(2, 1.0); + + g.add_edge(0, 1, 1.0); + g.add_edge(1, 2, 1.0); + g.add_edge(0, 2, 1.0); + + auto ham = cudaq::solvers::get_clique_hamiltonian(g, 4.0); + ham.dump(); + EXPECT_EQ(ham.num_terms(), 4); + + std::vector data{// Term 1: ZII with coefficient 1.0 + 0j + 2, 0, 0, 1.0, 0.0, + + // Term 2: IZI with coefficient 0.75 + 0j + 0, 2, 0, 0.75, 0.0, + + // Term 3: IIZ with coefficient 0.5 + 0j + 0, 0, 2, 0.5, 0.0, + + // Term 4: III with coefficient -2.25 + 0j + 0, 0, 0, -2.25, 0.0, + + // Total number of terms + 4}; + EXPECT_EQ(ham, cudaq::spin_op(data, 3)); +} + +TEST(CliqueHamiltonianTest, DisconnectedNodes) { + cudaqx::graph g; + + // Create two disconnected nodes + g.add_node(0, 1.0); + g.add_node(1, 1.0); + + auto ham = cudaq::solvers::get_clique_hamiltonian(g, 2.0); + ham.dump(); + // Should have 2 vertex terms + 1 penalty term for the non-edge + EXPECT_EQ(ham.num_terms(), 4); + std::vector data{// Term 1: ZZ with coefficient 0.5 + 0j + 2, 2, 0.5, 0.0, + + // Term 2: ZI with coefficient 0.0 + 0j + 2, 0, 0.0, 0.0, + + // Term 3: IZ with coefficient 0.0 + 0j + 0, 2, 0.0, 0.0, + + // Term 4: II with coefficient -0.5 + 0j + 0, 0, -0.5, 0.0, + + // Total number of terms + 4}; + EXPECT_EQ(ham, cudaq::spin_op(data, 2)); +} + +TEST(CliqueHamiltonianTest, TriangleWithDisconnectedNode) { + cudaqx::graph g; + + // Create K3 + one disconnected node + g.add_node(0, 1.0); + g.add_node(1, 1.0); + g.add_node(2, 1.0); + g.add_node(3, 1.0); + + g.add_edge(0, 1, 1.0); + g.add_edge(1, 2, 1.0); + g.add_edge(0, 2, 1.0); + + auto none_edges = g.get_disconnected_vertices(); + for (auto &ee : none_edges) + printf("%d %d \n", ee.first, ee.second); + auto ham = cudaq::solvers::get_clique_hamiltonian(g, 4.0); + ham.dump(); + + EXPECT_EQ(ham.num_terms(), 8); + + std::vector data{// Term 1: IIZZ with coefficient 1.0 + 0j + 0, 0, 2, 2, 1.0, 0.0, + + // Term 2: IZIZ with coefficient 1.0 + 0j + 0, 2, 0, 2, 1.0, 0.0, + + // Term 3: ZIIZ with coefficient 1.0 + 0j + 2, 0, 0, 2, 1.0, 0.0, + + // Term 4: IIIZ with coefficient -2.5 + 0j + 0, 0, 0, 2, -2.5, 0.0, + + // Term 5: IZII with coefficient -0.5 + 0j + 0, 2, 0, 0, -0.5, 0.0, + + // Term 6: IIII with coefficient 1.0 + 0j + 0, 0, 0, 0, 1.0, 0.0, + + // Term 7: IIZI with coefficient -0.5 + 0j + 0, 0, 2, 0, -0.5, 0.0, + + // Term 8: ZIII with coefficient -0.5 + 0j + 2, 0, 0, 0, -0.5, 0.0, + + // Total number of terms + 8}; + EXPECT_EQ(ham, cudaq::spin_op(data, 4)); +} + +TEST(CliqueHamiltonianTest, DifferentPenalties) { + cudaqx::graph g; + + // Create two disconnected nodes + g.add_node(0, 1.0); + g.add_node(1, 1.0); + + auto ham1 = cudaq::solvers::get_clique_hamiltonian(g, 2.0); + auto ham2 = cudaq::solvers::get_clique_hamiltonian(g, 4.0); + + // Same number of terms but different coefficients + EXPECT_EQ(ham1.num_terms(), ham2.num_terms()); + EXPECT_NE(ham1.to_string(), ham2.to_string()); +} + +TEST(CliqueHamiltonianTest, WeightedNodes) { + cudaqx::graph g; + + // Create two connected nodes with different weights + g.add_node(0, 2.0); + g.add_node(1, 3.0); + g.add_edge(0, 1, 1.0); + + auto ham = cudaq::solvers::get_clique_hamiltonian(g); + ham.dump(); + // Should have 2 vertex terms with different coefficients + EXPECT_EQ(ham.num_terms(), 3); + std::vector data{// Term 1: ZI with coefficient 1.0 + 0j + 2, 0, 1.0, 0.0, + + // Term 2: IZ with coefficient 1.5 + 0j + 0, 2, 1.5, 0.0, + + // Term 3: II with coefficient -2.5 + 0j + 0, 0, -2.5, 0.0, + + // Total number of terms + 3}; + EXPECT_EQ(ham, cudaq::spin_op(data, 2)); +} + +TEST(QAOAMaxCutTest, SingleEdge) { + // Create simple graph with single edge + cudaqx::graph g; + g.add_edge(0, 1); + + // Get MaxCut Hamiltonian + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + + // Set up QAOA parameters + std::size_t num_layers = 1; + std::vector initial_params = {0.5, 0.5}; // gamma, beta + + // Run QAOA + auto result = cudaq::solvers::qaoa(ham, num_layers, initial_params, + {{"verbose", true}}); + + // Verify results + EXPECT_GT(std::abs(result.optimal_value), + 0.5); // Should be better than random guess + EXPECT_EQ(result.optimal_parameters.size(), 2 * num_layers); +} + +TEST(QAOAMaxCutTest, Triangle) { + cudaqx::graph g; + g.add_edge(0, 1); + g.add_edge(1, 2); + g.add_edge(0, 2); + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + ham.dump(); + // Try with 2 QAOA layers + std::size_t num_layers = 2; + std::vector initial_params = {0.5, 0.5, 0.5, + 0.5}; // gamma1, beta1, gamma2, beta2 + + auto result = cudaq::solvers::qaoa(ham, num_layers, initial_params, + {{"verbose", true}}); + + result.optimal_config.dump(); + + // For triangle, max cut value should be 2 + EXPECT_NEAR(std::abs(result.optimal_value), 2.0, 0.1); +} + +TEST(QAOAMaxCutTest, WeightedGraph) { + cudaqx::graph g; + g.add_edge(0, 1, 2.0); + g.add_edge(1, 2, 1.0); + g.add_edge(0, 2, 0.5); + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + + std::size_t num_layers = 3; + std::vector initial_params(2 * num_layers, 0.5); + + auto result = cudaq::solvers::qaoa(ham, num_layers, initial_params); + + // Max weighted cut should be at least 2.5 + EXPECT_GT(std::abs(result.optimal_value), 2.4); +} + +TEST(QAOAMaxCutTest, CustomMixer) { + cudaqx::graph g; + g.add_edge(0, 1); + g.add_edge(1, 2); + + auto problem_ham = cudaq::solvers::get_maxcut_hamiltonian(g); + + // Create custom X-mixer Hamiltonian + auto mixer_ham = cudaq::spin::x(0) + cudaq::spin::x(1) + cudaq::spin::x(2); + + std::size_t num_layers = 2; + std::vector initial_params = {0.5, 0.5, 0.5, 0.5}; + + auto result = + cudaq::solvers::qaoa(problem_ham, mixer_ham, num_layers, initial_params); + + EXPECT_GT(std::abs(result.optimal_value), 1.0); +} + +TEST(QAOAMaxCutTest, DisconnectedGraph) { + cudaqx::graph g; + g.add_edge(0, 1); + g.add_edge(2, 3); // Disconnected component + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + + std::size_t num_layers = 2; + std::vector initial_params(2 * num_layers, 0.5); + + auto result = cudaq::solvers::qaoa(ham, num_layers, initial_params); + + // Should find max cut of 2 (one cut per component) + EXPECT_NEAR(std::abs(result.optimal_value), 2.0, 0.1); + + // Check measurement results + // auto counts = result.optimal_config; + // EXPECT_GT(counts.size(), 0); +} + +TEST(QAOAMaxCutTest, ParameterOptimization) { + cudaqx::graph g; + g.add_edge(0, 1); + g.add_edge(1, 2); + + auto ham = cudaq::solvers::get_maxcut_hamiltonian(g); + + // Try different initial parameters + std::vector params1 = {0.1, 0.1}; + std::vector params2 = {1.0, 1.0}; + + auto result1 = cudaq::solvers::qaoa(ham, 1, params1); + auto result2 = cudaq::solvers::qaoa(ham, 1, params2); + + // Both should converge to similar optimal values + EXPECT_NEAR(std::abs(result1.optimal_value), std::abs(result2.optimal_value), + 0.1); +} diff --git a/libs/solvers/unittests/test_uccsd.cpp b/libs/solvers/unittests/test_uccsd.cpp new file mode 100644 index 0000000..b12925d --- /dev/null +++ b/libs/solvers/unittests/test_uccsd.cpp @@ -0,0 +1,67 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include + +#include "cudaq/solvers/operators.h" +#include "cudaq/solvers/stateprep/uccsd.h" +#include "cudaq/solvers/vqe.h" + +#include "nvqpp/test_kernels.h" + +TEST(SolversUCCSDTester, checkUCCSD) { + + cudaq::solvers::molecular_geometry geometry{{"H", {0., 0., 0.}}, + {"H", {0., 0., .7474}}}; + auto molecule = cudaq::solvers::create_molecule(geometry, "sto-3g", 0, 0, + {.verbose = true}); + + auto numElectrons = molecule.n_electrons; + auto numQubits = molecule.n_orbitals * 2; + + // EXPECT_NEAR(molecule.fci_energy, -1.137, 1e-3); + EXPECT_NEAR(molecule.energies["hf_energy"], -1.1163255644, 1e-3); + EXPECT_EQ(numElectrons, 2); + EXPECT_EQ(numQubits, 4); + + auto [singlesAlpha, singlesBeta, doublesMixed, doublesAlpha, doublesBeta] = + cudaq::solvers::stateprep::get_uccsd_excitations(numElectrons, numQubits); + EXPECT_TRUE(doublesAlpha.empty()); + EXPECT_TRUE(doublesBeta.empty()); + EXPECT_TRUE(singlesAlpha.size() == 1); + EXPECT_EQ(singlesAlpha[0][0], 0); + EXPECT_EQ(singlesAlpha[0][1], 2); + EXPECT_EQ(singlesBeta[0][0], 1); + EXPECT_EQ(singlesBeta[0][1], 3); + EXPECT_EQ(doublesMixed[0][0], 0); + EXPECT_EQ(doublesMixed[0][1], 1); + EXPECT_EQ(doublesMixed[0][2], 3); + EXPECT_EQ(doublesMixed[0][3], 2); + EXPECT_TRUE(singlesBeta.size() == 1); + EXPECT_TRUE(doublesMixed.size() == 1); + + auto numParams = cudaq::solvers::stateprep::get_num_uccsd_parameters( + numElectrons, numQubits); + EXPECT_EQ(numParams, 3); + std::vector init{-2., -2., -2.}; + auto optimizer = cudaq::optim::optimizer::get("cobyla"); + { + auto result = cudaq::solvers::vqe(callUccsdStatePrep, molecule.hamiltonian, + *optimizer, init, {{"verbose", true}}); + EXPECT_NEAR(result.energy, -1.137, 1e-3); + } + + { + auto result = cudaq::solvers::vqe( + callUccsdStatePrepWithArgs, molecule.hamiltonian, *optimizer, init, + [&](std::vector x) { return std::make_tuple(x, 4, 2); }, + {{"verbose", true}}); + EXPECT_NEAR(result.energy, -1.137, 1e-3); + } +} diff --git a/libs/solvers/unittests/test_vqe.cpp b/libs/solvers/unittests/test_vqe.cpp new file mode 100644 index 0000000..bbe7262 --- /dev/null +++ b/libs/solvers/unittests/test_vqe.cpp @@ -0,0 +1,93 @@ +/******************************************************************************* + * Copyright (c) 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include +#include + +#include "nvqpp/test_kernels.h" + +#include "cudaq.h" +#include "cudaq/solvers/vqe.h" + +TEST(SolversVQETester, checkAPI) { + + using namespace cudaq::spin; + + cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) + + .21829 * z(0) - 6.125 * z(1); + { + auto cobyla = cudaq::optim::optimizer::get("cobyla"); + auto result = + cudaq::solvers::vqe(ansatz, h, *cobyla, {0.0}, {{"verbose", true}}); + EXPECT_NEAR(result.energy, -1.748, 1e-3); + } + + { + auto lbfgs = cudaq::optim::optimizer::get("lbfgs"); + auto gradient = + cudaq::observe_gradient::get("central_difference", ansatz, h); + auto [energy, params, data] = + cudaq::solvers::vqe(ansatz, h, *lbfgs, *gradient, {0.0}); + EXPECT_NEAR(energy, -1.748, 1e-3); + } + + { + // Test how one can handle non-standard kernel signature + auto optimizer = cudaq::optim::optimizer::get("cobyla"); + constexpr int N = 2; + auto result = cudaq::solvers::vqe( + ansatzNonStdSignature, h, *optimizer, {0.0}, + [&](const std::vector &x) { return std::make_tuple(x[0], N); }); + + EXPECT_NEAR(result.energy, -1.748, 1e-3); + EXPECT_TRUE(result.iteration_data.size() > 1); + } + + { + // Test how one can handle non-standard kernel signature + constexpr int N = 2; + auto translator = [&](const std::vector &x) { + return std::make_tuple(x[0], N); + }; + auto optimizer = cudaq::optim::optimizer::get("lbfgs"); + auto gradient = cudaq::observe_gradient::get( + "central_difference", ansatzNonStdSignature, h, translator); + // Wrap the kernel in another kernel with the standard signature + auto result = cudaq::solvers::vqe(ansatzNonStdSignature, h, *optimizer, + *gradient, {0.0}, translator); + + EXPECT_NEAR(result.energy, -1.748, 1e-3); + EXPECT_TRUE(result.iteration_data.size() > 1); + } + // Handle shots-based simulation + { + cudaq::set_random_seed(13); + auto optimizer = cudaq::optim::optimizer::get("cobyla"); + auto result = cudaq::solvers::vqe( + ansatz, h, *optimizer, std::vector{0.0}, {{"shots", 10000}}); + printf("TEST %lf\n", result.energy); + result.iteration_data[0].result.dump(); + EXPECT_TRUE(result.energy > -2.0 && result.energy < -1.5); + } + + // Handle shots-based simulation with gradient + { + cudaq::set_random_seed(13); + auto optimizer = cudaq::optim::optimizer::get("lbfgs"); + auto gradient = cudaq::observe_gradient::get("parameter_shift", ansatz, h); + auto result = cudaq::solvers::vqe(ansatz, h, *optimizer, *gradient, {0.0}, + {{"shots", 10000}}); + printf("TEST %lf\n", result.energy); + result.iteration_data[0].result.dump(); + for (auto &o : result.iteration_data) { + printf("Type: %s\n", static_cast(o.type) ? "gradient" : "function"); + o.result.dump(); + } + EXPECT_TRUE(result.energy > -2.0 && result.energy < -1.5); + } +} diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh new file mode 100644 index 0000000..6e46b07 --- /dev/null +++ b/scripts/build_docs.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +CUDAQX_INSTALL_PREFIX=${CUDAQX_INSTALL_PREFIX:-"$HOME/.cudaqx"} +DOCS_INSTALL_PREFIX=${DOCS_INSTALL_PREFIX:-"$CUDAQX_INSTALL_PREFIX/docs"} +export PYTHONPATH="$CUDAQX_INSTALL_PREFIX:${PYTHONPATH}" +export CUDAQX_DOCS_GEN_IMPORT_CUDAQ=ON + +# Process command line arguments +force_update="" + +__optind__=$OPTIND +OPTIND=1 +while getopts ":u:" opt; do + case $opt in + u) force_update="$OPTARG" + ;; + \?) echo "Invalid command line option -$OPTARG" >&2 + (return 0 2>/dev/null) && return 1 || exit 1 + ;; + esac +done +OPTIND=$__optind__ + +# Need to know the top-level of the repo +working_dir=`pwd` +repo_root=$(git rev-parse --show-toplevel) +docs_exit_code=0 # updated in each step + +# Make sure these are full path so that it doesn't matter where we use them +docs_build_output="$repo_root/build/docs" +sphinx_output_dir="$docs_build_output/sphinx" +doxygen_output_dir="$docs_build_output/doxygen" +dialect_output_dir="$docs_build_output/Dialects" +rm -rf "$docs_build_output" +doxygen_exe=doxygen + +# Generate API documentation using Doxygen +echo "Generating XML documentation using Doxygen..." +mkdir -p "${doxygen_output_dir}" +sed 's@${DOXYGEN_OUTPUT_PREFIX}@'"${doxygen_output_dir}"'@' "$repo_root/docs/Doxyfile.in" | \ +sed 's@${CUDAQX_REPO_ROOT}@'"${repo_root}"'@' > "${doxygen_output_dir}/Doxyfile" +"$doxygen_exe" "${doxygen_output_dir}/Doxyfile" 2> "$logs_dir/doxygen_error.txt" 1> "$logs_dir/doxygen_output.txt" +doxygen_exit_code=$? +if [ ! "$doxygen_exit_code" -eq "0" ]; then + cat "$logs_dir/doxygen_output.txt" "$logs_dir/doxygen_error.txt" + echo "Failed to generate documentation using doxygen." + echo "Doxygen exit code: $doxygen_exit_code" + docs_exit_code=11 +fi + +echo "Building CUDA-QX documentation using Sphinx..." +cd "$repo_root/docs" +# The docs build so far is fast such that we do not care about the cached outputs. +# Revisit this when caching becomes necessary. + +rm -rf sphinx/_doxygen/ +rm -rf sphinx/_mdgen/ +cp -r "$doxygen_output_dir" sphinx/_doxygen/ +# cp -r "$dialect_output_dir" sphinx/_mdgen/ # uncomment once we use the content from those files + +rm -rf "$sphinx_output_dir" +sphinx-build -v -n -W --keep-going -b html sphinx "$sphinx_output_dir" -j auto #2> "$logs_dir/sphinx_error.txt" 1> "$logs_dir/sphinx_output.txt" +sphinx_exit_code=$? +if [ ! "$sphinx_exit_code" -eq "0" ]; then + echo "Failed to generate documentation using sphinx-build." + echo "Sphinx exit code: $sphinx_exit_code" + echo "======== logs ========" + cat "$logs_dir/sphinx_output.txt" "$logs_dir/sphinx_error.txt" + echo "======================" + docs_exit_code=12 +fi + +rm -rf sphinx/_doxygen/ +rm -rf sphinx/_mdgen/ + +mkdir -p "$DOCS_INSTALL_PREFIX" +if [ "$docs_exit_code" -eq "0" ]; then + cp -r "$sphinx_output_dir"/* "$DOCS_INSTALL_PREFIX" + touch "$DOCS_INSTALL_PREFIX/.nojekyll" + echo "Documentation was generated in $DOCS_INSTALL_PREFIX." + echo "To browse it, open this url in a browser: file://$DOCS_INSTALL_PREFIX/index.html" +else + echo "Documentation generation failed with exit code $docs_exit_code." + echo "Check the logs in $logs_dir, and the documentation build output in $docs_build_output." +fi + +cd "$working_dir" && (return 0 2>/dev/null) && return $docs_exit_code || exit $docs_exit_code diff --git a/scripts/build_wheels.sh b/scripts/build_wheels.sh new file mode 100755 index 0000000..0db6b0e --- /dev/null +++ b/scripts/build_wheels.sh @@ -0,0 +1,67 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +IMAGE_NAME=ghcr.io/nvidia/cuda-quantum-devdeps:manylinux-amd64-cu12.0-gcc11-main + +CONTAINER_NAME=cudaqx_wheel_builder +CONTAINER_STATUS=$(docker container inspect -f '{{.State.Status}}' $CONTAINER_NAME 2>/dev/null) + +# Function to check if image was updated +check_image_update() { + local pull_output + + echo "Checking for updates to $IMAGE_NAME..." + + pull_output=$(docker pull "$IMAGE_NAME" 2>&1) + + if echo "$pull_output" | grep -q "Image is up to date"; then + echo "Image $IMAGE_NAME is already up to date" + return 1 + elif echo "$pull_output" | grep -q "Downloaded newer image"; then + echo "Image $IMAGE_NAME was updated" + return 0 + else + echo "Unable to determine if $IMAGE_NAME was updated" + return 2 + fi +} + +if check_image_update; then + if [ "$CONTAINER_STATUS" = "running" ]; then + docker stop $CONTAINER_NAME + docker rm $CONTAINER_NAME + elif [ "$CONTAINER_STATUS" != "" ]; then + docker rm $CONTAINER_NAME + fi + CONTAINER_STATUS="" +fi + +# Create the container if it doesn't exits. +if [ "$CONTAINER_STATUS" = "" ]; then + docker run -d --name $CONTAINER_NAME $IMAGE_NAME tail -f /dev/null + docker exec -it $CONTAINER_NAME /bin/sh -c "$(cat ./scripts/ci/build_cudaq_wheel.sh)" +fi + +echo "Starting container..." +docker start $CONTAINER_NAME + +echo "Copying CUDA-QX source to the container" +docker exec $CONTAINER_NAME rm -rf /cuda-qx +docker cp $(pwd) $CONTAINER_NAME:/cuda-qx + +echo "Building CUDA-QX wheels in the container..." +docker exec -it $CONTAINER_NAME /bin/sh -c "$(cat ./scripts/ci/build_qec_wheel.sh)" +docker exec -it $CONTAINER_NAME /bin/sh -c "$(cat ./scripts/ci/build_solvers_wheel.sh)" + +echo "Copying wheels from container..." +docker cp $CONTAINER_NAME:/wheels/ . + +echo "Stopping container..." +docker stop $CONTAINER_NAME diff --git a/scripts/ci/build_cudaq_wheel.sh b/scripts/ci/build_cudaq_wheel.sh new file mode 100644 index 0000000..c39a4a7 --- /dev/null +++ b/scripts/ci/build_cudaq_wheel.sh @@ -0,0 +1,157 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# /!\ This script must be run inside an docker container /!\ + +mkdir /wheels + +git clone --filter=tree:0 https://github.com/NVIDIA/cuda-quantum.git /cuda-quantum +cd /cuda-quantum + +export CUDA_VERSION=12.0 + +# We need to use a newer toolchain because CUDA-QX libraries rely on c++20 +source /opt/rh/gcc-toolset-11/enable + +export CC=gcc +export CXX=g++ + +# ============================================================================== +# Installing dependencies +# ============================================================================== + +python_version=3.10 +python=python${python_version} +${python} -m pip install --no-cache-dir numpy auditwheel + +echo "Building MLIR bindings for ${python}" && \ + rm -rf "$LLVM_INSTALL_PREFIX/src" "$LLVM_INSTALL_PREFIX/python_packages" && \ + Python3_EXECUTABLE="$(which ${python})" \ + LLVM_PROJECTS='clang;mlir;python-bindings' \ + LLVM_CMAKE_CACHE=/cmake/caches/LLVM.cmake LLVM_SOURCE=/llvm-project \ + bash /scripts/build_llvm.sh -c Release -v + +# ============================================================================== +# Building CUDA-Q wheel +# ============================================================================== + +echo "Building CUDA-Q wheel for ${python}." +cd /cuda-quantum + +# Patch the pyproject.toml file to change the CUDA version if needed +if [ "${CUDA_VERSION#12.}" != "${CUDA_VERSION}" ]; then \ + sed -i "s/-cu11/-cu12/g" pyproject.toml && \ + sed -i -E "s/(nvidia-cublas-cu[0-9]* ~= )[0-9\.]*/\1${CUDA_VERSION}/g" pyproject.toml; \ + sed -i -E "s/(nvidia-cuda-runtime-cu[0-9]* ~= )[0-9\.]*/\1${CUDA_VERSION}/g" pyproject.toml; \ +fi + +# Build the wheel +echo "Building wheel for python${python_version}." + +# Find any external NVQIR simulator assets to be pulled in during wheel packaging. +export CUDAQ_EXTERNAL_NVQIR_SIMS=$(bash scripts/find_wheel_assets.sh assets) +export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/assets" +export CUQUANTUM_INSTALL_PREFIX=/usr/local/cuquantum +export CUTENSOR_INSTALL_PREFIX=/usr/local/cutensor + +bash scripts/configure_build.sh install-cuquantum +bash scripts/configure_build.sh install-cutensor + +SETUPTOOLS_SCM_PRETEND_VERSION=${CUDA_QUANTUM_VERSION:-0.0.0} \ +CUDACXX="$CUDA_INSTALL_PREFIX/bin/nvcc" CUDAHOSTCXX=$CXX \ +$python -m build --wheel + +cudaq_major=$(echo ${CUDA_VERSION} | cut -d . -f1) + +LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/_skbuild/lib" \ +$python -m auditwheel -v repair dist/cuda_quantum*linux_*.whl \ + --exclude libcustatevec.so.1 \ + --exclude libcutensornet.so.2 \ + --exclude libcublas.so.$cudaq_major \ + --exclude libcublasLt.so.$cudaq_major \ + --exclude libcusolver.so.$cudaq_major \ + --exclude libcutensor.so.2 \ + --exclude libnvToolsExt.so.1 \ + --exclude libcudart.so.$cudaq_major.0 \ + --wheel-dir /wheels + +# ============================================================================== +# Building CUDA-Q +# ============================================================================== + +echo "Building CUDA-Q." +cd /cuda-quantum + +CUDAQ_PATCH='diff --git a/CMakeLists.txt b/CMakeLists.txt +index 3f2c138..ddb15b3 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -540,7 +540,7 @@ add_subdirectory(tools) + add_subdirectory(utils) + + if (CUDAQ_ENABLE_PYTHON) +- find_package(Python 3 COMPONENTS Interpreter Development) ++ find_package(Python 3 COMPONENTS Interpreter Development.Module) + + # Apply specific patch to pybind11 for our documentation. + # Only apply the patch if not already applied. +diff --git a/python/runtime/cudaq/domains/plugins/CMakeLists.txt b/python/runtime/cudaq/domains/plugins/CMakeLists.txt +index 7b7541d..2261334 100644 +--- a/python/runtime/cudaq/domains/plugins/CMakeLists.txt ++++ b/python/runtime/cudaq/domains/plugins/CMakeLists.txt +@@ -17,6 +17,6 @@ if (SKBUILD) + if (NOT Python_FOUND) + message(FATAL_ERROR "find_package(Python) not run?") + endif() +- target_link_libraries(cudaq-pyscf PRIVATE Python::Python pybind11::pybind11 cudaq-chemistry cudaq-spin cudaq cudaq-py-utils) ++ target_link_libraries(cudaq-pyscf PRIVATE Python::Module pybind11::pybind11 cudaq-chemistry cudaq-spin cudaq cudaq-py-utils) + endif() + install(TARGETS cudaq-pyscf DESTINATION lib/plugins)' + +CUDAQ_PATCH2='diff --git a/lib/Frontend/nvqpp/ConvertDecl.cpp b/lib/Frontend/nvqpp/ConvertDecl.cpp +index 149959c8e..ea23990f6 100644 +--- a/lib/Frontend/nvqpp/ConvertDecl.cpp ++++ b/lib/Frontend/nvqpp/ConvertDecl.cpp +@@ -169,8 +169,10 @@ bool QuakeBridgeVisitor::interceptRecordDecl(clang::RecordDecl *x) { + auto fnTy = cast(popType()); + return pushType(cc::IndirectCallableType::get(fnTy)); + } +- auto loc = toLocation(x); +- TODO_loc(loc, "unhandled type, " + name + ", in cudaq namespace"); ++ if (!isInNamespace(x, "solvers") && !isInNamespace(x, "qec")) { ++ auto loc = toLocation(x); ++ TODO_loc(loc, "unhandled type, " + name + ", in cudaq namespace"); ++ } + } + if (isInNamespace(x, "std")) { + if (name.equals("vector")) { +diff --git a/lib/Frontend/nvqpp/ConvertExpr.cpp b/lib/Frontend/nvqpp/ConvertExpr.cpp +index e6350d1c5..28c98c6cb 100644 +--- a/lib/Frontend/nvqpp/ConvertExpr.cpp ++++ b/lib/Frontend/nvqpp/ConvertExpr.cpp +@@ -2050,7 +2050,9 @@ bool QuakeBridgeVisitor::VisitCallExpr(clang::CallExpr *x) { + return pushValue(call.getResult(0)); + } + +- TODO_loc(loc, "unknown function, " + funcName + ", in cudaq namespace"); ++ if (!isInNamespace(func, "solvers") && !isInNamespace(func, "qec")) { ++ TODO_loc(loc, "unknown function, " + funcName + ", in cudaq namespace"); ++ } + } // end in cudaq namespace + + if (isInNamespace(func, "std")) {' + +echo "$CUDAQ_PATCH" | git apply --verbose +echo "$CUDAQ_PATCH2" | git apply --verbose + +$python -m venv --system-site-packages .venv +source .venv/bin/activate +CUDAQ_BUILD_TESTS=FALSE bash scripts/build_cudaq.sh -v + diff --git a/scripts/ci/build_qec_wheel.sh b/scripts/ci/build_qec_wheel.sh new file mode 100644 index 0000000..b61e6b5 --- /dev/null +++ b/scripts/ci/build_qec_wheel.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# /!\ This script must be run inside an docker container /!\ + +python_version=3.10 +python=python${python_version} + +# Delete previous wheels +rm wheels/cudaq_qec*.whl + +# Exit immediately if any command returns a non-zero status +set -e + +git config --global --add safe.directory /cuda-qx + +cd /cuda-qx/libs/qec + +# We need to use a newer toolchain because CUDA-QX libraries rely on c++20 +source /opt/rh/gcc-toolset-11/enable + +export CC=gcc +export CXX=g++ + +SKBUILD_CMAKE_ARGS="-DCUDAQ_DIR=$HOME/.cudaq/lib/cmake/cudaq;-DCMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN=/opt/rh/gcc-toolset-11/root/usr/lib/gcc/x86_64-redhat-linux/11/" \ +$python -m build --wheel + +LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/_skbuild/lib" \ +$python -m auditwheel -v repair dist/*.whl \ + --exclude libcudaq-em-default.so \ + --exclude libcudaq-python-interop.so \ + --exclude libcudaq-ensmallen.so \ + --exclude libcudaq-common.so \ + --exclude libcudaq-platform-default.so \ + --exclude libnvqir-qpp.so \ + --exclude libnvqir.so \ + --exclude libcudaq.so \ + --exclude libcudaq-spin.so \ + --exclude libcudaq-nlopt.so \ + --wheel-dir /wheels + diff --git a/scripts/ci/build_solvers_wheel.sh b/scripts/ci/build_solvers_wheel.sh new file mode 100644 index 0000000..1024fad --- /dev/null +++ b/scripts/ci/build_solvers_wheel.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# /!\ This script must be run inside an docker container /!\ + +python_version=3.10 +python=python${python_version} + +# Delete previous wheels +rm wheels/cudaq_solvers*.whl + +# Exit immediately if any command returns a non-zero status +set -e + +git config --global --add safe.directory /cuda-qx + +cd /cuda-qx/libs/solvers + +# We need to use a newer toolchain because CUDA-QX libraries rely on c++20 +source /opt/rh/gcc-toolset-11/enable + +export CC=gcc +export CXX=g++ + +SKBUILD_CMAKE_ARGS="-DCUDAQ_DIR=$HOME/.cudaq/lib/cmake/cudaq;-DCMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN=/opt/rh/gcc-toolset-11/root/usr/lib/gcc/x86_64-redhat-linux/11/" \ +$python -m build --wheel + +LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)/_skbuild/lib" \ +$python -m auditwheel -v repair dist/*.whl \ + --exclude libcudaq-em-default.so \ + --exclude libcudaq-python-interop.so \ + --exclude libcudaq-ensmallen.so \ + --exclude libcudaq-common.so \ + --exclude libcudaq-platform-default.so \ + --exclude libnvqir-qpp.so \ + --exclude libnvqir.so \ + --exclude libcudaq.so \ + --exclude libcudaq-spin.so \ + --exclude libcudaq-nlopt.so \ + --exclude libgfortran.so.5 \ + --exclude libquadmath.so.0 \ + --exclude libmvec.so.1 \ + --wheel-dir /wheels + diff --git a/scripts/ci/test_wheels.sh b/scripts/ci/test_wheels.sh new file mode 100644 index 0000000..6983b41 --- /dev/null +++ b/scripts/ci/test_wheels.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# Exit immediately if any command returns a non-zero status +set -e + +# Installing dependencies +python_version=3.10 +python=python${python_version} + +apt-get update && apt-get install -y --no-install-recommends \ + libgfortran5 python${python_version} python$(echo ${python_version} | cut -d . -f 1)-pip + +${python} -m pip install --no-cache-dir pytest nvidia-cublas-cu11 + +cd /cuda-qx + +${python} -m pip install wheels/cuda_quantum_cu12-0.0.0-cp310-cp310-manylinux_2_28_x86_64.whl + +# QEC library +# ====================================== + +${python} -m pip install wheels/cudaq_qec-0.0.1-cp310-cp310-*.whl +${python} -m pytest libs/qec/python/tests/ + +# Solvers library +# ====================================== + +${python} -m pip install wheels/cudaq_solvers-0.0.1-cp310-cp310-*.whl +${python} -m pytest libs/solvers/python/tests/ + diff --git a/scripts/run_clang_format.sh b/scripts/run_clang_format.sh new file mode 100644 index 0000000..9ea9c24 --- /dev/null +++ b/scripts/run_clang_format.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# Usage: +# bash scripts/run_clang_format.sh +# -or- +# bash scripts/run_clang_format.sh -p /path/to/clang-format +# +# By default, this script will use the clang-format executable +# in your PATH, but you can modify that with the -p command line option. + +# Process command line arguments +__optind__=$OPTIND +OPTIND=1 +while getopts ":p:" opt; do + case $opt in + p) clang_format_executable="$OPTARG" + ;; + \?) echo "Invalid command line option -$OPTARG" >&2 + exit 1 + ;; + esac +done +OPTIND=$__optind__ +clang_format_executable=${clang_format_executable:-clang-format} + +# Run the script from the top-level of the repo +cd $(git rev-parse --show-toplevel) + +# Run Clang Format +git ls-files -- '*.cpp' '*.h' ':!:tpls/*' ':!:test' ':!:targettests' | xargs $clang_format_executable -i + +# Take us back to where we were +cd - diff --git a/scripts/test_cudaqx_build.sh b/scripts/test_cudaqx_build.sh new file mode 100755 index 0000000..06877a0 --- /dev/null +++ b/scripts/test_cudaqx_build.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# Exit immediately if any command returns a non-zero status +set -e + +# ============================================================================== +# Handling options +# ============================================================================== + +show_help() { + echo "Usage: $0 [options]" + echo "Options:" + echo " -h, --help Show this help message" + echo " -i, --install Install libs" + echo " --cudaq-prefix Path to CUDA-Q's install prefix" + echo " (default: \$HOME/.cudaq)" + echo " --install-prefix Path to install prefix" + echo " (default: cudaq-prefix)" +} + +parse_options() { + while (( $# > 0 )); do + case "$1" in + -h|--help) + show_help + exit 0 + ;; + -i|--install) + install=1 + shift 1 + ;; + --cudaq-prefix) + if [[ -n "$2" && "$2" != -* ]]; then + cudaq_prefix=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + --install-prefix) + if [[ -n "$2" && "$2" != -* ]]; then + install_prefix=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + -*) + echo "Error: Unknown option $1" >&2 + show_help + exit 1 + ;; + *) + echo "Error: Unknown argument $1" >&2 + show_help + exit 1 + ;; + esac + done +} + +# Initialize an empty array to store libs names +libs=() +install=0 +cudaq_prefix=$HOME/.cudaq + +# Parse options +parse_options "$@" + +install_prefix=${install_prefix:-$cudaq_prefix} + + +# ============================================================================== +# Test top-level build +# ============================================================================== + +cmake -S . -B "build" \ + -DCUDAQ_DIR=$cudaq_prefix/lib/cmake/cudaq/ \ + -DCUDAQX_ENABLE_LIBS="all" \ + -DCMAKE_INSTALL_PREFIX=$install_prefix \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON +cmake --build "build" -j +cmake --build "build" --target run_tests +cmake --build "build" --target run_python_tests +if [ $install -eq 1 ]; then + cmake --build "build" --target install +fi + diff --git a/scripts/test_libs_builds.sh b/scripts/test_libs_builds.sh new file mode 100755 index 0000000..e077410 --- /dev/null +++ b/scripts/test_libs_builds.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# This script test and possibly install standalone builds. + +# Exit immediately if any command returns a non-zero status +set -e + +# ============================================================================== +# Handling options +# ============================================================================== + +show_help() { + echo "Usage: $0 [options]" + echo "Options:" + echo " -h, --help Show this help message" + echo " -i, --install Install lib(s)" + echo " -l, --lib Path to lib" + echo " --cudaq-prefix Path to to CUDA-Q's install prefix" + echo " (default: \$HOME/.cudaq)" + echo " --install-prefix Path to to CUDA-Q's install prefix" + echo " (default: cudaq-prefix)" +} + +parse_options() { + while (( $# > 0 )); do + case "$1" in + -h|--help) + show_help + exit 0 + ;; + -i|--install) + install=1 + shift 1 + ;; + -l|--lib) + if [[ -n "$2" && "$2" != -* ]]; then + libs+=("$2") + test_all=0 + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + --cudaq-prefix) + if [[ -n "$2" && "$2" != -* ]]; then + cudaq_prefix=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + --install-prefix) + if [[ -n "$2" && "$2" != -* ]]; then + install_prefix=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + -*) + echo "Error: Unknown option $1" >&2 + show_help + exit 1 + ;; + *) + echo "Error: Unknown argument $1" >&2 + show_help + exit 1 + ;; + esac + done +} + +# Initialize an empty array to store libs names +libs=() +install=0 +cudaq_prefix=$HOME/.cudaq + +# Parse options +parse_options "$@" + +install_prefix=${install_prefix:-$cudaq_prefix} + +if [[ ${#libs[@]} -eq 0 ]]; then + # Use find command to get all `libs/` subdirectories, excluding `core` + while IFS= read -r dir; do + libs+=("$dir") + done < <(find libs/ -mindepth 1 -maxdepth 1 -type d -not -name "core") +fi + +# ============================================================================== +# Test standalone builds +# ============================================================================== + +# Build and test each library standalone in `build_$lib_name` directory. +for lib_path in "${libs[@]}"; do + lib_name=$(basename "$lib_path") + cmake -S $lib_path -B "build_$lib_name" \ + -DCUDAQ_DIR=$cudaq_prefix/lib/cmake/cudaq/ \ + -DCMAKE_INSTALL_PREFIX=$install_prefix \ + -DCUDAQX_INCLUDE_TESTS=ON \ + -DCUDAQX_BINDINGS_PYTHON=ON + cmake --build "build_$lib_name" -j + cmake --build "build_$lib_name" --target run_tests + cmake --build "build_$lib_name" --target run_python_tests + if [ $install -eq 1 ]; then + cmake --build "build_$lib_name" --target install + fi +done + diff --git a/scripts/test_wheels.sh b/scripts/test_wheels.sh new file mode 100755 index 0000000..8eb7fe1 --- /dev/null +++ b/scripts/test_wheels.sh @@ -0,0 +1,53 @@ +#!/bin/sh + +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +IMAGE_NAME=ubuntu:22.04 + +# Helper to stop and remove the container +cleanup() { + echo "Stopping and removing container..." + docker stop $CONTAINER_ID + docker rm $CONTAINER_ID +} + +docker pull $IMAGE_NAME +if [ $? -ne 0 ]; then + echo "Failed to pull image $IMAGE_NAME" + exit 1 +fi + +echo "Creating and starting temporary container..." +CONTAINER_ID=$(docker run -d $IMAGE_NAME tail -f /dev/null) +if [ $? -ne 0 ]; then + echo "Failed to create and start container" + exit 1 +fi + +echo "Copying workspace into the container..." +docker cp $(pwd) $CONTAINER_ID:/cuda-qx +if [ $? -ne 0 ]; then + echo "Failed to copy source" + cleanup + exit 1 +fi + +echo "Testing wheels in the container..." +docker exec -it $CONTAINER_ID /bin/sh -c "$(cat ./scripts/ci/test_wheels.sh)" + +if [ $? -ne 0 ]; then + echo "Something went wrong." + # If we are in a terminal, then we can start a interactive shell + if [ -t 1 ]; then + echo "Starting interactive shell.." + docker exec -it $CONTAINER_ID /bin/bash + fi +fi + +cleanup diff --git a/scripts/validation/container/validate_container.sh b/scripts/validation/container/validate_container.sh new file mode 100755 index 0000000..219b7d1 --- /dev/null +++ b/scripts/validation/container/validate_container.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +set -e + +CURRENT_ARCH=$(uname -m) +PY_TARGETS=("nvidia" "nvidia --option fp64", "qpp-cpu") +CPP_TARGETS=("nvidia" "nvidia --target-option fp64", "qpp-cpu") + +FINAL_IMAGE="ghcr.io/nvidia/cudaqx-private:latest" + +# Function to run Python tests +run_python_tests() { + local container_name=$1 + + echo "Running Python tests..." + + # Install pytest and other test dependencies + docker exec ${container_name} bash -c "\ + python3 -m pip install pytest --user" + + # Clone repository and run tests with specific target + docker exec ${container_name} bash -c "\ + cd /home/cudaq && \ + python3 -m pytest /opt/nvidia/cudaq/cudaqx_pytests -v" + + local test_result=$? + if [ ${test_result} -ne 0 ]; then + echo "Python tests failed for target ${target}" + return 1 + fi + + echo "Python tests completed successfully for target ${target}" + return 0 +} + +# Function to test examples +test_examples() { + local tag=$1 + local container_name="cudaqx-test-$(date +%s)" + + echo "Testing examples in ${tag}..." + # Start container with a command that keeps it running + docker run --net=host -d -it --name ${container_name} --gpus all ${tag} + + # Wait for container to be fully up + sleep 2 + + # Verify container is running + if ! docker ps | grep -q ${container_name}; then + echo "Container failed to start properly" + docker logs ${container_name} + return 1 + fi + + # Run Python tests first + if ! run_python_tests ${container_name} ${target}; then + docker stop ${container_name} + docker rm ${container_name} + return 1 + fi + + # Run tests for each target + for target in "${PY_TARGETS[@]}"; do + echo "Testing with target: ${target}" + + # Test Python examples + for domain in "solvers" "qec"; do + if docker exec ${container_name} bash -c "[ -d /home/cudaq/cudaqx-examples/${domain}/python ] && [ -n \"\$(ls -A /home/cudaq/cudaqx-examples/${domain}/python/*.py 2>/dev/null)\" ]"; then + echo "Testing ${domain} Python examples with target ${target}..." + if ! docker exec ${container_name} bash -c "cd /home/cudaq/cudaqx-examples/${domain}/python && \ + for f in *.py; do echo Testing \$f...; python3 \$f --target ${target} || exit 1; done"; then + echo "Python tests failed for ${domain} with target ${target}" + docker stop ${container_name} + docker rm ${container_name} + return 1 + fi + else + echo "Skipping ${domain} Python examples - directory empty or not found" + fi + done + done + + for target in "${CPP_TARGETS[@]}"; do + + # Test C++ examples + for domain in "solvers" "qec"; do + if docker exec ${container_name} bash -c "[ -d /home/cudaq/cudaqx-examples/${domain}/cpp ] && [ -n \"\$(ls -A /home/cudaq/cudaqx-examples/${domain}/cpp/*.cpp 2>/dev/null)\" ]"; then + echo "Testing ${domain} C++ examples with target ${target}..." + if ! docker exec ${container_name} bash -c "cd /home/cudaq/cudaqx-examples/${domain}/cpp && \ + for f in *.cpp; do \ + echo Compiling and running \$f...; \ + nvq++ --enable-mlir -lcudaq-${domain} --target ${target} \$f -o test_prog && \ + ./test_prog || exit 1; \ + rm test_prog; \ + done"; then + echo "C++ tests failed for ${domain} with target ${target}" + docker stop ${container_name} + docker rm ${container_name} + return 1 + fi + else + echo "Skipping ${domain} C++ examples - directory empty or not found" + fi + done + done + + # Cleanup + docker stop ${container_name} + docker rm ${container_name} +} + +# Main execution +echo "Starting CUDA-Q image validation for ${CURRENT_ARCH}..." + +tag="${FINAL_IMAGE}-${CURRENT_ARCH}" +test_examples ${tag} || { + echo "Tests failed for Python on ${CURRENT_ARCH}" + exit 1 +} + +echo "Validation complete successfully for ${CURRENT_ARCH}!" \ No newline at end of file diff --git a/scripts/validation/wheel/install_packages.sh b/scripts/validation/wheel/install_packages.sh new file mode 100644 index 0000000..9281431 --- /dev/null +++ b/scripts/validation/wheel/install_packages.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +PYTHON_WHEEL_VER=$1 +PYTHON_VERSION=$2 + +# Install packages +PYENV_VERSION=${PYTHON_VERSION} pyenv exec pip install --user \ + custatevec_cu12-1.7.0-py3-none-manylinux2014_x86_64.whl \ + cutensornet_cu12-2.6.0-py3-none-manylinux2014_x86_64.whl \ + cudensitymat_cu12-0.0.5-py3-none-manylinux2014_x86_64.whl \ + cuquantum_python_cu12-24.11.0-77-${PYTHON_WHEEL_VER}-${PYTHON_WHEEL_VER}-linux_x86_64.whl + +# Install CUDA-Q packages +PYENV_VERSION=${PYTHON_VERSION} pyenv exec pip install --user matplotlib \ + wheelhouse/cuda_quantum_cu12-0.9.0-${PYTHON_WHEEL_VER}-${PYTHON_WHEEL_VER}-manylinux_2_28_x86_64.whl \ + wheels-py$(echo ${PYTHON_VERSION} | cut -d'.' -f1,2)-$(dpkg --print-architecture)/cudaq_qec-0.1.0-${PYTHON_WHEEL_VER}-${PYTHON_WHEEL_VER}-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl \ + wheels-py$(echo ${PYTHON_VERSION} | cut -d'.' -f1,2)-$(dpkg --print-architecture)/cudaq_solvers-0.1.0-${PYTHON_WHEEL_VER}-${PYTHON_WHEEL_VER}-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + +# Install test dependencies +PYENV_VERSION=${PYTHON_VERSION} pyenv exec pip install pytest networkx --user \ No newline at end of file diff --git a/scripts/validation/wheel/validate_wheels.sh b/scripts/validation/wheel/validate_wheels.sh new file mode 100755 index 0000000..5ebc1b8 --- /dev/null +++ b/scripts/validation/wheel/validate_wheels.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +set -e + +CURRENT_ARCH=$(uname -m) +PYTHON_VERSIONS=("3.10.13" "3.11.7" "3.12.1") +TARGETS=("nvidia" "nvidia --option fp64", "qpp-cpu") +FINAL_IMAGE="ghcr.io/nvidia/cudaqx-private-wheels-test:latest" + +# Function to run Python tests +run_python_tests() { + local container_name=$1 + local python_version=$2 + + echo "Running Python tests for Python ${python_version} with target..." + + # Install pytest and other test dependencies + docker exec ${container_name} bash -c "\ + PYENV_VERSION=${python_version} pyenv exec pip install pytest networkx --user" + + # Clone repository and run tests with specific target + docker exec ${container_name} bash -c "\ + PYENV_VERSION=${python_version} pyenv exec python3 -m pytest /workspace/cudaqx-private/libs/ -v" + + local test_result=$? + if [ ${test_result} -ne 0 ]; then + echo "Python tests failed for Python ${python_version}" + return 1 + fi + + echo "Python tests completed successfully for Python ${python_version}" + return 0 +} + +# Function to test examples +test_examples() { + local tag=$1 + local container_name="cudaqx-test-$(date +%s)" + + echo "Testing examples in ${tag}..." + # Start container with a command that keeps it running + docker run --net=host -d -it --name ${container_name} --gpus all ${tag} + + # Wait for container to be fully up + sleep 2 + + # Verify container is running + if ! docker ps | grep -q ${container_name}; then + echo "Container failed to start properly" + docker logs ${container_name} + return 1 + fi + + # Loop through Python versions + for python_version in "${PYTHON_VERSIONS[@]}"; do + echo "Testing with Python version: ${python_version}" + + # Run Python tests first + if ! run_python_tests ${container_name} ${python_version} ${target}; then + docker stop ${container_name} + docker rm ${container_name} + return 1 + fi + + # Loop through targets + for target in "${TARGETS[@]}"; do + echo "Testing with target: ${target}" + + # Test Python examples + for domain in "solvers" "qec"; do + if docker exec ${container_name} bash -c "[ -d /workspace/cudaqx-private/examples/${domain}/python ] && [ -n \"\$(ls -A /workspace/cudaqx-private/examples/${domain}/python/*.py 2>/dev/null)\" ]"; then + echo "Testing ${domain} Python examples with Python ${python_version} and target ${target}..." + if ! docker exec ${container_name} bash -c "cd /workspace/cudaqx-private/examples/${domain}/python && \ + for f in *.py; do \ + echo Testing \$f...; \ + PYENV_VERSION=${python_version} pyenv exec python3 \$f --target ${target} || exit 1; \ + done"; then + echo "Python tests failed for ${domain} with Python ${python_version} and target ${target}" + docker stop ${container_name} + docker rm ${container_name} + return 1 + fi + else + echo "Skipping ${domain} Python examples - directory empty or not found" + fi + done + done + done + + # Cleanup + docker stop ${container_name} + docker rm ${container_name} +} + +# Main execution +echo "Starting CUDA-Q image validation for ${CURRENT_ARCH}..." + +tag="${FINAL_IMAGE}-${CURRENT_ARCH}" +test_examples ${tag} || { + echo "Tests failed for Python on ${CURRENT_ARCH}" + exit 1 +} + +echo "Validation complete successfully for ${CURRENT_ARCH}!" \ No newline at end of file