diff --git a/test/__init__.py b/test/__init__.py index b2b23fa4..6dadfb34 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -23,4 +23,3 @@ """ from .utils import * - diff --git a/test/algorithm/test_hamiltonian.py b/test/algorithm/test_hamiltonian.py index 8f24c007..e5e8a60f 100644 --- a/test/algorithm/test_hamiltonian.py +++ b/test/algorithm/test_hamiltonian.py @@ -25,74 +25,208 @@ from torchquantum.algorithm import Hamiltonian import numpy as np + def test_hamiltonian(): coeffs = [1.0, 1.0] paulis = ["ZZ", "ZX"] hamil = Hamiltonian(coeffs, paulis) assert np.allclose( - hamil.get_matrix().cpu().detach().numpy(), + hamil.get_matrix().cpu().detach().numpy(), np.array( - [[ 1.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], - [ 1.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, -1.+0.j, -1.+0.j], - [ 0.+0.j, 0.+0.j, -1.+0.j, 1.+0.j]])) - + [ + [1.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], + [1.0 + 0.0j, -1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], + [0.0 + 0.0j, 0.0 + 0.0j, -1.0 + 0.0j, -1.0 + 0.0j], + [0.0 + 0.0j, 0.0 + 0.0j, -1.0 + 0.0j, 1.0 + 0.0j], + ] + ), + ) coeffs = [0.6] paulis = ["XXZ"] hamil = Hamiltonian(coeffs, paulis) assert np.allclose( - hamil.get_matrix().cpu().detach().numpy(), + hamil.get_matrix().cpu().detach().numpy(), np.array( - [[ 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, - 0.6000+0.j, 0.0000+0.j], - [ 0.0000+0.j, -0.0000+0.j, 0.0000+0.j, -0.0000+0.j, 0.0000+0.j, -0.0000+0.j, - 0.0000+0.j, -0.6000+0.j], - [ 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.6000+0.j, 0.0000+0.j, - 0.0000+0.j, 0.0000+0.j], - [ 0.0000+0.j, -0.0000+0.j, 0.0000+0.j, -0.0000+0.j, 0.0000+0.j, -0.6000+0.j, - 0.0000+0.j, -0.0000+0.j], - [ 0.0000+0.j, 0.0000+0.j, 0.6000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, - 0.0000+0.j, 0.0000+0.j], - [ 0.0000+0.j, -0.0000+0.j, 0.0000+0.j, -0.6000+0.j, 0.0000+0.j, -0.0000+0.j, - 0.0000+0.j, -0.0000+0.j], - [ 0.6000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, 0.0000+0.j, - 0.0000+0.j, 0.0000+0.j], - [ 0.0000+0.j, -0.6000+0.j, 0.0000+0.j, -0.0000+0.j, 0.0000+0.j, -0.0000+0.j, - 0.0000+0.j, -0.0000+0.j]])) - + [ + [ + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.6000 + 0.0j, + 0.0000 + 0.0j, + ], + [ + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.6000 + 0.0j, + ], + [ + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.6000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + ], + [ + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.6000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + ], + [ + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.6000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + ], + [ + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.6000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + ], + [ + 0.6000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + 0.0000 + 0.0j, + ], + [ + 0.0000 + 0.0j, + -0.6000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + 0.0000 + 0.0j, + -0.0000 + 0.0j, + ], + ] + ), + ) + hamil = Hamiltonian.from_file("test/algorithm/h2.txt") assert np.allclose( - hamil.matrix.cpu().detach().numpy(), + hamil.matrix.cpu().detach().numpy(), np.array( - [[-1.0636533 +0.j, 0. +0.j, 0. +0.j, - 0. +0.j, 0. +0.j, 0. +0.j, - 0.1809312 +0.j, 0. +0.j], - [ 0. +0.j, -1.0636533 +0.j, 0. +0.j, - 0. +0.j, 0. +0.j, 0. +0.j, - 0. +0.j, 0.1809312 +0.j], - [ 0. +0.j, 0. +0.j, -1.8369681 +0.j, - 0. +0.j, 0.1809312 +0.j, 0. +0.j, - 0. +0.j, 0. +0.j], - [ 0. +0.j, 0. +0.j, 0. +0.j, - -1.8369681 +0.j, 0. +0.j, 0.1809312 +0.j, - 0. +0.j, 0. +0.j], - [ 0. +0.j, 0. +0.j, 0.1809312 +0.j, - 0. +0.j, -0.24521835+0.j, 0. +0.j, - 0. +0.j, 0. +0.j], - [ 0. +0.j, 0. +0.j, 0. +0.j, - 0.1809312 +0.j, 0. +0.j, -0.24521835+0.j, - 0. +0.j, 0. +0.j], - [ 0.1809312 +0.j, 0. +0.j, 0. +0.j, - 0. +0.j, 0. +0.j, 0. +0.j, - -1.0636533 +0.j, 0. +0.j], - [ 0. +0.j, 0.1809312 +0.j, 0. +0.j, - 0. +0.j, 0. +0.j, 0. +0.j, - 0. +0.j, -1.0636533 +0.j]])) + [ + [ + -1.0636533 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.1809312 + 0.0j, + 0.0 + 0.0j, + ], + [ + 0.0 + 0.0j, + -1.0636533 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.1809312 + 0.0j, + ], + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + -1.8369681 + 0.0j, + 0.0 + 0.0j, + 0.1809312 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ], + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + -1.8369681 + 0.0j, + 0.0 + 0.0j, + 0.1809312 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ], + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.1809312 + 0.0j, + 0.0 + 0.0j, + -0.24521835 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ], + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.1809312 + 0.0j, + 0.0 + 0.0j, + -0.24521835 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ], + [ + 0.1809312 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + -1.0636533 + 0.0j, + 0.0 + 0.0j, + ], + [ + 0.0 + 0.0j, + 0.1809312 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + -1.0636533 + 0.0j, + ], + ] + ), + ) print("hamiltonian test passed!") -if __name__ == '__main__': + +if __name__ == "__main__": import pdb + pdb.set_trace() test_hamiltonian() diff --git a/test/functional/test_controlled_unitary.py b/test/functional/test_controlled_unitary.py index 0236d165..652ece59 100644 --- a/test/functional/test_controlled_unitary.py +++ b/test/functional/test_controlled_unitary.py @@ -26,6 +26,7 @@ from test.utils import check_all_close import numpy as np + def test_controlled_unitary(): state = tq.QuantumDevice(n_wires=2) # print(state) @@ -35,10 +36,16 @@ def test_controlled_unitary(): # ) state.paulix(0) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array([[0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j]]), + ) # print(state) state.controlled_unitary(params=tq.PauliX().matrix, c_wires=0, t_wires=1) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array([[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j]]), + ) # gate(state) # print(state) @@ -57,9 +64,10 @@ def test_controlled_unitary(): state.paulix(0) rx_gate = tq.RX(has_params=True, init_params=0.25) state.controlled_unitary(params=rx_gate.matrix, c_wires=0, t_wires=1) - check_all_close(state.get_states_1d(), np.array([[0. +0.j , 0. +0.j , - 0.9921977+0.j , 0. -0.12467473j]])) - + check_all_close( + state.get_states_1d(), + np.array([[0.0 + 0.0j, 0.0 + 0.0j, 0.9921977 + 0.0j, 0.0 - 0.12467473j]]), + ) ############################ gate0 = tq.PauliX(n_wires=1, wires=0) @@ -71,33 +79,125 @@ def test_controlled_unitary(): state.paulix(0) state.paulix(1) state.controlled_unitary(params=tq.CNOT().matrix, c_wires=0, t_wires=[1, 2]) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################ state = tq.QuantumDevice(n_wires=5) state.paulix(0) state.paulix(1) state.paulix(4) - state.controlled_unitary(params=tq.CNOT().matrix, c_wires=1, t_wires=[[0, 2], [4, 3]]) - check_all_close(state.get_states_1d(), - np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) - + state.controlled_unitary( + params=tq.CNOT().matrix, c_wires=1, t_wires=[[0, 2], [4, 3]] + ) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) + ############################ state = tq.QuantumDevice(n_wires=5) state.paulix(0) state.paulix(1) state.paulix(2) state.paulix(3) - state.controlled_unitary(params=tq.Toffoli().matrix, c_wires=[0, 1], t_wires=[2, 3, 4]) - check_all_close(state.get_states_1d(), - np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) - + state.controlled_unitary( + params=tq.Toffoli().matrix, c_wires=[0, 1], t_wires=[2, 3, 4] + ) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) + ############################ state = tq.QuantumDevice(n_wires=9) state.paulix(0) @@ -108,72 +208,530 @@ def test_controlled_unitary(): state.paulix(6) state.paulix(7) - state.controlled_unitary(params=tq.Toffoli().matrix, c_wires=[0, 1, 2], t_wires=[[3, 4, 5], [6, 7, 8]]) - check_all_close(state.get_states_1d(), - np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + state.controlled_unitary( + params=tq.Toffoli().matrix, c_wires=[0, 1, 2], t_wires=[[3, 4, 5], [6, 7, 8]] + ) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################### gate_cx = tq.QubitUnitaryFast.from_controlled_operation( @@ -183,7 +741,24 @@ def test_controlled_unitary(): state.paulix(0) state.paulix(1) state.controlled_unitary(params=gate_cx.matrix, c_wires=0, t_wires=[1, 2]) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) + if __name__ == "__main__": test_controlled_unitary() diff --git a/test/functional/test_func_mat_exp.py b/test/functional/test_func_mat_exp.py index 1e0cbdb3..e2a2c293 100644 --- a/test/functional/test_func_mat_exp.py +++ b/test/functional/test_func_mat_exp.py @@ -31,34 +31,74 @@ def test_func_mat_exp(): qdev = tq.QuantumDevice(n_wires=3) qdev.reset_states(bsz=1) - qdev.matrix_exp(wires=[0], params=torch.tensor([[1., 2.], [3., 4.+1.j]])) + qdev.matrix_exp(wires=[0], params=torch.tensor([[1.0, 2.0], [3.0, 4.0 + 1.0j]])) assert np.allclose( - qdev.get_states_1d().cpu().detach().numpy(), + qdev.get_states_1d().cpu().detach().numpy(), np.array( - [[44.2796+23.9129j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, - 85.5304+68.1896j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j]]) - ) - + [ + [ + 44.2796 + 23.9129j, + 0.0000 + 0.0000j, + 0.0000 + 0.0000j, + 0.0000 + 0.0000j, + 85.5304 + 68.1896j, + 0.0000 + 0.0000j, + 0.0000 + 0.0000j, + 0.0000 + 0.0000j, + ] + ] + ), + ) + qdev = tq.QuantumDevice(n_wires=3) qdev.reset_states(bsz=2) - qdev.matrix_exp(wires=[0, 2], params=torch.tensor([[1., 2., 2, 1], - [3., 4.+1.j, 2, 1], - [1., 2., 2, 1], - [3., 4.+1.j, 2, 1]]) - ) # type: ignore + qdev.matrix_exp( + wires=[0, 2], + params=torch.tensor( + [ + [1.0, 2.0, 2, 1], + [3.0, 4.0 + 1.0j, 2, 1], + [1.0, 2.0, 2, 1], + [3.0, 4.0 + 1.0j, 2, 1], + ] + ), + ) # type: ignore # print(qdev.get_states_1d().cpu().detach().numpy()) assert np.allclose( - qdev.get_states_1d().cpu().detach().numpy(), + qdev.get_states_1d().cpu().detach().numpy(), np.array( - [[483.20386+254.27155j, 747.27014+521.95013j, 0.+0.j, 0.+0.j, 482.2038+254.27151j, 747.27014+521.95013j, 0.+0.j, 0.+0.j], - [483.20386+254.27155j, 747.27014+521.95013j, 0.+0.j, 0.+0.j, 482.2038+254.27151j, 747.27014+521.95013j, 0.+0.j, 0.+0.j]] - )) + [ + [ + 483.20386 + 254.27155j, + 747.27014 + 521.95013j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 482.2038 + 254.27151j, + 747.27014 + 521.95013j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ], + [ + 483.20386 + 254.27155j, + 747.27014 + 521.95013j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 482.2038 + 254.27151j, + 747.27014 + 521.95013j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ], + ] + ), + ) + -if __name__ == '__main__': +if __name__ == "__main__": import pdb + pdb.set_trace() - test_func_mat_exp() \ No newline at end of file + test_func_mat_exp() diff --git a/test/hadamard_grad/test_hadamard_grad.py b/test/hadamard_grad/test_hadamard_grad.py index 21b5cc0f..62fdb21e 100644 --- a/test/hadamard_grad/test_hadamard_grad.py +++ b/test/hadamard_grad/test_hadamard_grad.py @@ -5,16 +5,12 @@ @pytest.mark.skip def test_hadamard_grad(): - ''' + """ We assume the circuits have unique and ordered parameters for now. This simplifies the hadamard_grad function so that it only needs to return a list ordered as op_history - ''' + """ - example_circuits = [ - Circ1, - Circ2, - Circ3 - ] + example_circuits = [Circ1, Circ2, Circ3] for Circ in example_circuits: circ = Circ() @@ -23,19 +19,23 @@ def test_hadamard_grad(): # hadamard grad op_history = qdev.op_history n_wires = qdev.n_wires - observable = 'ZZZZ' + observable = "ZZZZ" hadamard_grad_result = hadamard_grad(op_history, n_wires, observable) - hadamard_grad_result = [gradient for gradient in hadamard_grad_result if gradient != None] + hadamard_grad_result = [ + gradient for gradient in hadamard_grad_result if gradient != None + ] # backpropagation expval.backward() # comparison for i, (name, param) in enumerate(circ.named_parameters()): - assert np.isclose(hadamard_grad_result[i], param.grad, atol=0.001), "The gradient for {} is incorrect.".format(name) + assert np.isclose( + hadamard_grad_result[i], param.grad, atol=0.001 + ), "The gradient for {} is incorrect.".format(name) print("tq.hadamard_grad test passed") -if __name__ == "__main__": - test_hadamard_grad() +if __name__ == "__main__": + test_hadamard_grad() \ No newline at end of file diff --git a/test/measurement/test_eval_observable.py b/test/measurement/test_eval_observable.py index 2968fe64..58245ee0 100644 --- a/test/measurement/test_eval_observable.py +++ b/test/measurement/test_eval_observable.py @@ -58,7 +58,9 @@ def test_expval_observable(): random_layer(qdev) expval_tq = expval_joint_analytical(qdev, observable="".join(obs))[0].item() - expval_tq_sampling = expval_joint_sampling(qdev, observable="".join(obs), n_shots=100000)[0].item() + expval_tq_sampling = expval_joint_sampling( + qdev, observable="".join(obs), n_shots=100000 + )[0].item() qiskit_circ = op_history2qiskit(qdev.n_wires, qdev.op_history) operator = pauli_str_op_dict[obs[0]] @@ -75,7 +77,9 @@ def test_expval_observable(): expval_qiskit = (~psi @ operator @ psi).eval().real # print(expval_tq, expval_qiskit) assert np.isclose(expval_tq, expval_qiskit, atol=1e-5) - if n_wires <= 3: # if too many wires, the stochastic method is not accurate due to limited shots + if ( + n_wires <= 3 + ): # if too many wires, the stochastic method is not accurate due to limited shots assert np.isclose(expval_tq_sampling, expval_qiskit, atol=1e-2) print("expval observable test passed") diff --git a/test/measurement/test_expval_joint_sampling_grouping.py b/test/measurement/test_expval_joint_sampling_grouping.py index 3d430c23..09492458 100644 --- a/test/measurement/test_expval_joint_sampling_grouping.py +++ b/test/measurement/test_expval_joint_sampling_grouping.py @@ -23,11 +23,15 @@ """ import torchquantum as tq -from torchquantum.measurement import expval_joint_analytical, expval_joint_sampling_grouping +from torchquantum.measurement import ( + expval_joint_analytical, + expval_joint_sampling_grouping, +) import numpy as np import random + def test_expval_joint_sampling_grouping(): n_obs = 20 n_wires = 4 @@ -44,12 +48,15 @@ def test_expval_joint_sampling_grouping(): expval_ana = {} for obs in obs_all: expval_ana[obs] = expval_joint_analytical(qdev, observable=obs)[0].item() - - expval_sam = expval_joint_sampling_grouping(qdev, observables=obs_all, n_shots_per_group=1000000) + + expval_sam = expval_joint_sampling_grouping( + qdev, observables=obs_all, n_shots_per_group=1000000 + ) for obs in obs_all: - # assert + # assert assert np.isclose(expval_ana[obs], expval_sam[obs][0].item(), atol=1e-2) print(obs, expval_ana[obs], expval_sam[obs][0].item()) -if __name__ == '__main__': + +if __name__ == "__main__": test_expval_joint_sampling_grouping() diff --git a/test/measurement/test_find_observable_groups.py b/test/measurement/test_find_observable_groups.py index c3e81348..656ac4a1 100644 --- a/test/measurement/test_find_observable_groups.py +++ b/test/measurement/test_find_observable_groups.py @@ -25,24 +25,53 @@ from torchquantum.measurement import find_observable_groups from random import shuffle + def test_find_observable_groups(): - in1 = ["XXIII", "YZXXX", "YZIXX", "YZIIX", "YZIIY", - "YZIYI", "YZIYZ", "IZIYI", "ZZZZZ", "ZZZZI", - "IZIIX", "XIZZX"] + in1 = [ + "XXIII", + "YZXXX", + "YZIXX", + "YZIIX", + "YZIIY", + "YZIYI", + "YZIYZ", + "IZIYI", + "ZZZZZ", + "ZZZZI", + "IZIIX", + "XIZZX", + ] out1 = find_observable_groups(in1) - assert out1 == {'YZIYZ': ['YZIYZ'], 'YZIYY': ['YZIIY', 'YZIYI', 'IZIYI'], 'ZZZZZ': ['ZZZZZ', 'ZZZZI'], 'YZXXX': ['YZXXX', 'YZIXX', 'YZIIX', 'IZIIX'], 'XXZZX': ['XXIII', 'XIZZX']} + assert out1 == { + "YZIYZ": ["YZIYZ"], + "YZIYY": ["YZIIY", "YZIYI", "IZIYI"], + "ZZZZZ": ["ZZZZZ", "ZZZZI"], + "YZXXX": ["YZXXX", "YZIXX", "YZIIX", "IZIIX"], + "XXZZX": ["XXIII", "XIZZX"], + } # print(out1) + def find_observable_groups_multi(): - in1 = ["XXIII", "YZXXX", "YZIXX", "YZIIX", "YZIIY", - "YZIYI", "YZIYZ", "IZIYI", "ZZZZZ", "ZZZZI", - "IZIIX", "XIZZX"] + in1 = [ + "XXIII", + "YZXXX", + "YZIXX", + "YZIIX", + "YZIIY", + "YZIYI", + "YZIYZ", + "IZIYI", + "ZZZZZ", + "ZZZZI", + "IZIIX", + "XIZZX", + ] for _ in range(100): shuffle(in1) print(find_observable_groups(in1)) -if __name__ == '__main__': +if __name__ == "__main__": test_find_observable_groups() # find_observable_groups_multi() - diff --git a/test/measurement/test_measure.py b/test/measurement/test_measure.py index 5873d3c7..38c45df6 100644 --- a/test/measurement/test_measure.py +++ b/test/measurement/test_measure.py @@ -30,7 +30,6 @@ def test_measure(): - n_shots = 10000 qdev = tq.QuantumDevice(n_wires=3, bsz=1, record_op=True) qdev.x(wires=2) # type: ignore diff --git a/test/operator/test_ControlledU.py b/test/operator/test_ControlledU.py index 1348862d..5bc01096 100644 --- a/test/operator/test_ControlledU.py +++ b/test/operator/test_ControlledU.py @@ -28,6 +28,7 @@ import torchquantum as tq import torchquantum.functional as tqf from test.utils import check_all_close + # import pdb # pdb.set_trace() import numpy as np @@ -35,6 +36,7 @@ flag = 4 + def test_ContorlledU(): state = tq.QuantumDevice(n_wires=3) # print(state) @@ -43,7 +45,23 @@ def test_ContorlledU(): state.qubitunitaryfast(wires=2, params=[[0, 1], [1, 0]]) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ] + ] + ), + ) state.qubitunitaryfast( wires=[0, 2, 1], params=( @@ -61,12 +79,27 @@ def test_ContorlledU(): ) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) state = tq.QuantumDevice(n_wires=3) # print(state) - gate1 = tq.QubitUnitaryFast(init_params=[[0, 1], [1, 0]], n_wires=1, wires=0) gate1(state) @@ -74,7 +107,23 @@ def test_ContorlledU(): gate2(state) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + ] + ] + ), + ) gate3 = tq.QubitUnitaryFast( init_params=( @@ -97,8 +146,23 @@ def test_ContorlledU(): gate3(state) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) - + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################ state = tq.QuantumDevice(n_wires=2) @@ -110,12 +174,17 @@ def test_ContorlledU(): gate0(state) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array([[0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j]]), + ) gate(state) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) - + check_all_close( + state.get_states_1d(), + np.array([[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j]]), + ) ############################ state = tq.QuantumDevice(n_wires=2) @@ -131,11 +200,12 @@ def test_ContorlledU(): gate0(state) gate(state) - check_all_close(state.get_states_1d(), np.array([[0. +0.j , 0. +0.j , - 0.9921977+0.j , 0. -0.12467473j]])) + check_all_close( + state.get_states_1d(), + np.array([[0.0 + 0.0j, 0.0 + 0.0j, 0.9921977 + 0.0j, 0.0 - 0.12467473j]]), + ) # print(state) - ############################ state = tq.QuantumDevice(n_wires=3) gate0 = tq.PauliX(n_wires=1, wires=0) @@ -152,8 +222,23 @@ def test_ContorlledU(): gate(state) # print(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) - + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################ @@ -171,11 +256,47 @@ def test_ContorlledU(): gate2(state) gate(state) - check_all_close(state.get_states_1d(), - np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################ state = tq.QuantumDevice(n_wires=5) @@ -196,11 +317,47 @@ def test_ContorlledU(): gate3(state) gate(state) - check_all_close(state.get_states_1d(), - np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################ state = tq.QuantumDevice(n_wires=9) @@ -230,71 +387,527 @@ def test_ContorlledU(): gate6(state) gate(state) - check_all_close(state.get_states_1d(), - np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, - 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) ############################## state = tq.QuantumDevice(n_wires=3) @@ -314,8 +927,23 @@ def test_ContorlledU(): gate_ccx(state) - check_all_close(state.get_states_1d(), np.array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])) - + check_all_close( + state.get_states_1d(), + np.array( + [ + [ + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 0.0 + 0.0j, + 1.0 + 0.0j, + ] + ] + ), + ) if __name__ == "__main__": diff --git a/test/operator/test_op.py b/test/operator/test_op.py index 519bb2ef..677cb3b2 100644 --- a/test/operator/test_op.py +++ b/test/operator/test_op.py @@ -69,6 +69,7 @@ {"qiskit": qiskit_gate.CRYGate, "tq": tq.CRY}, {"qiskit": qiskit_gate.CRZGate, "tq": tq.CRZ}, # {'qiskit': qiskit_gate.?, 'tq': tq.CRot}, + {"qiskit": qiskit_gate.UGate, "tq": tq.U}, {"qiskit": qiskit_gate.U1Gate, "tq": tq.U1}, {"qiskit": qiskit_gate.U2Gate, "tq": tq.U2}, {"qiskit": qiskit_gate.U3Gate, "tq": tq.U3}, diff --git a/test/operator/test_op_hamil_exp.py b/test/operator/test_op_hamil_exp.py index de65fa06..082d31b0 100644 --- a/test/operator/test_op_hamil_exp.py +++ b/test/operator/test_op_hamil_exp.py @@ -28,29 +28,56 @@ from test.utils import check_all_close from torchquantum.device import QuantumDevice + def test_op_hamil_exp(): - hamil = Hamiltonian(coeffs=[1.0, 0.5], paulis=['ZZ', 'XX']) - op = OpHamilExp(hamil=hamil, - trainable=True, - theta=0.45) - + hamil = Hamiltonian(coeffs=[1.0, 0.5], paulis=["ZZ", "XX"]) + op = OpHamilExp(hamil=hamil, trainable=True, theta=0.45) + print(op.matrix) print(op.exponent_matrix) check_all_close( op.matrix, - np.array([[ 0.9686-0.2217j, 0.0000+0.0000j, 0.0000+0.0000j, -0.0250-0.1094j], - [ 0.0000+0.0000j, 0.9686+0.2217j, 0.0250-0.1094j, 0.0000+0.0000j], - [ 0.0000+0.0000j, 0.0250-0.1094j, 0.9686+0.2217j, 0.0000+0.0000j], - [-0.0250-0.1094j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9686-0.2217j]]) + np.array( + [ + [ + 0.9686 - 0.2217j, + 0.0000 + 0.0000j, + 0.0000 + 0.0000j, + -0.0250 - 0.1094j, + ], + [ + 0.0000 + 0.0000j, + 0.9686 + 0.2217j, + 0.0250 - 0.1094j, + 0.0000 + 0.0000j, + ], + [ + 0.0000 + 0.0000j, + 0.0250 - 0.1094j, + 0.9686 + 0.2217j, + 0.0000 + 0.0000j, + ], + [ + -0.0250 - 0.1094j, + 0.0000 + 0.0000j, + 0.0000 + 0.0000j, + 0.9686 - 0.2217j, + ], + ] + ), ) check_all_close( op.exponent_matrix, - np.array([[0.-0.2250j, 0.+0.0000j, 0.+0.0000j, 0.-0.1125j], - [0.+0.0000j, 0.+0.2250j, 0.-0.1125j, 0.+0.0000j], - [0.+0.0000j, 0.-0.1125j, 0.+0.2250j, 0.+0.0000j], - [0.-0.1125j, 0.+0.0000j, 0.+0.0000j, 0.-0.2250j]]) + np.array( + [ + [0.0 - 0.2250j, 0.0 + 0.0000j, 0.0 + 0.0000j, 0.0 - 0.1125j], + [0.0 + 0.0000j, 0.0 + 0.2250j, 0.0 - 0.1125j, 0.0 + 0.0000j], + [0.0 + 0.0000j, 0.0 - 0.1125j, 0.0 + 0.2250j, 0.0 + 0.0000j], + [0.0 - 0.1125j, 0.0 + 0.0000j, 0.0 + 0.0000j, 0.0 - 0.2250j], + ] + ), ) qdev = QuantumDevice(n_wires=2) @@ -62,11 +89,26 @@ def test_op_hamil_exp(): check_all_close( qdev.get_states_1d().cpu().detach().numpy(), - np.array([[ 0.9686322 -0.22169423j , 0. +0.j , 0. +0.j, -0.02504631-0.1094314j ], - [ 0.9686322 -0.22169423j , 0. +0.j , 0. +0.j, -0.02504631-0.1094314j ]]) + np.array( + [ + [ + 0.9686322 - 0.22169423j, + 0.0 + 0.0j, + 0.0 + 0.0j, + -0.02504631 - 0.1094314j, + ], + [ + 0.9686322 - 0.22169423j, + 0.0 + 0.0j, + 0.0 + 0.0j, + -0.02504631 - 0.1094314j, + ], + ] + ), ) -if __name__ == '__main__': + +if __name__ == "__main__": # import pdb # pdb.set_trace() test_op_hamil_exp() diff --git a/test/plugin/test_qiskit2tq_op_history.py b/test/plugin/test_qiskit2tq_op_history.py index 48f2c82e..67a67e80 100644 --- a/test/plugin/test_qiskit2tq_op_history.py +++ b/test/plugin/test_qiskit2tq_op_history.py @@ -41,8 +41,8 @@ def test_qiskit2tp_op_history(): print(qmodule.Operator_list) - -if __name__ == '__main__': +if __name__ == "__main__": import pdb + pdb.set_trace() test_qiskit2tp_op_history() diff --git a/test/plugin/test_qiskit_plugins.py b/test/plugin/test_qiskit_plugins.py index b8e2cef7..684dbfc6 100644 --- a/test/plugin/test_qiskit_plugins.py +++ b/test/plugin/test_qiskit_plugins.py @@ -59,7 +59,9 @@ def test_expval_observable(): random_layer(qdev) qiskit_circ = op_history2qiskit(qdev.n_wires, qdev.op_history) - expval_qiskit_processor = processor.process_circs_get_joint_expval([qiskit_circ], "".join(obs), parallel=False) + expval_qiskit_processor = processor.process_circs_get_joint_expval( + [qiskit_circ], "".join(obs), parallel=False + ) operator = pauli_str_op_dict[obs[0]] for ob in obs[1:]: @@ -74,13 +76,16 @@ def test_expval_observable(): expval_qiskit = (~psi @ operator @ psi).eval().real # print(expval_qiskit_processor, expval_qiskit) - if n_wires <= 3: # if too many wires, the stochastic method is not accurate due to limited shots + if ( + n_wires <= 3 + ): # if too many wires, the stochastic method is not accurate due to limited shots assert np.isclose(expval_qiskit_processor, expval_qiskit, atol=1e-2) print("expval observable test passed") -if __name__ == '__main__': +if __name__ == "__main__": import pdb + pdb.set_trace() test_expval_observable() diff --git a/test/static_mode_test.py b/test/static_mode_test.py index 6c2a3428..a9629a63 100644 --- a/test/static_mode_test.py +++ b/test/static_mode_test.py @@ -29,7 +29,7 @@ import numpy as np from torchpack.utils.logging import logger -from torchquantum.operator.operators import op_name_dict +from torchquantum.operator import op_name_dict from torchquantum.functional import func_name_dict from torchquantum.macro import F_DTYPE from torchquantum.plugin.qiskit import ( diff --git a/test/utils.py b/test/utils.py index a1af86e5..6d3c1686 100644 --- a/test/utils.py +++ b/test/utils.py @@ -26,6 +26,7 @@ import numpy as np import torch + def check_all_close(a, b, rtol=1e-5, atol=1e-4): """Check that all elements of a and b are close.""" if isinstance(a, torch.Tensor): diff --git a/torchquantum/graph/graphs.py b/torchquantum/graph/graphs.py index 0dd7baa1..45bb13fc 100644 --- a/torchquantum/graph/graphs.py +++ b/torchquantum/graph/graphs.py @@ -238,11 +238,11 @@ def build_static_matrix(self): # for wire_modules in self.wire_module_list: for module in self.flat_module_list: name = module.name - if name in tq.Operator.fixed_ops: + if name in tq.operator.fixed_ops: if name not in self.static_matrix_dict.keys(): # fixed operator, all share one static matrix self.static_matrix_dict[module.name] = module.matrix.to(self.device) - elif name in tq.Operator.parameterized_ops and name not in [ + elif name in tq.operator.parameterized_ops and name not in [ "QubitUnitary", "QubitUnitaryFast", "TrainableUnitary", @@ -281,9 +281,9 @@ def build_static_matrix(self): # for wire_modules in self.wire_module_list: for module in self.flat_module_list: name = module.name - if name in tq.Operator.fixed_ops: + if name in tq.operator.fixed_ops: module.static_matrix = self.static_matrix_dict[name] - elif name in tq.Operator.parameterized_ops and name not in [ + elif name in tq.operator.parameterized_ops and name not in [ "QubitUnitary", "QubitUnitaryFast", "TrainableUnitary", diff --git a/torchquantum/layer/layers.py b/torchquantum/layer/layers.py index bd88fb13..9f129acc 100644 --- a/torchquantum/layer/layers.py +++ b/torchquantum/layer/layers.py @@ -391,7 +391,7 @@ def build_random_layer(self): ) else: operation = op(n_wires=n_op_wires, wires=op_wires) - elif op().name in tq.Operator.parameterized_ops: + elif op().name in tq.operator.parameterized_ops: operation = op(has_params=True, trainable=True, wires=op_wires) else: operation = op(wires=op_wires) diff --git a/torchquantum/measurement/measurements.py b/torchquantum/measurement/measurements.py index ec0221a0..2220e563 100644 --- a/torchquantum/measurement/measurements.py +++ b/torchquantum/measurement/measurements.py @@ -10,7 +10,8 @@ from collections import Counter, OrderedDict from torchquantum.functional import mat_dict -from torchquantum.operator import op_name_dict, Observable +# from ..operator import op_name_dict, Observable +import torchquantum.operator as op from copy import deepcopy import matplotlib.pyplot as plt @@ -120,10 +121,10 @@ def expval_joint_sampling_grouping( # rotation to the desired basis n_wires = qdev.n_wires - paulix = op_name_dict["paulix"] - pauliy = op_name_dict["pauliy"] - pauliz = op_name_dict["pauliz"] - iden = op_name_dict["i"] + paulix = op.op_name_dict["paulix"] + pauliy = op.op_name_dict["pauliy"] + pauliz = op.op_name_dict["pauliz"] + iden = op.op_name_dict["i"] pauli_dict = {"X": paulix, "Y": pauliy, "Z": pauliz, "I": iden} expval_all_obs = {} @@ -189,10 +190,10 @@ def expval_joint_sampling( """ # rotation to the desired basis n_wires = qdev.n_wires - paulix = op_name_dict["paulix"] - pauliy = op_name_dict["pauliy"] - pauliz = op_name_dict["pauliz"] - iden = op_name_dict["i"] + paulix = op.op_name_dict["paulix"] + pauliy = op.op_name_dict["pauliy"] + pauliz = op.op_name_dict["pauliz"] + iden = op.op_name_dict["i"] pauli_dict = {"X": paulix, "Y": pauliy, "Z": pauliz, "I": iden} qdev_clone = tq.QuantumDevice(n_wires=qdev.n_wires, bsz=qdev.bsz, device=qdev.device) @@ -277,7 +278,7 @@ def expval_joint_analytical( def expval( qdev: tq.QuantumDevice, wires: Union[int, List[int]], - observables: Union[Observable, List[Observable]], + observables: Union[op.Observable, List[op.Observable]], ): all_dims = np.arange(qdev.states.dim()) diff --git a/torchquantum/operator/__init__.py b/torchquantum/operator/__init__.py index 3d08a33e..d3070218 100644 --- a/torchquantum/operator/__init__.py +++ b/torchquantum/operator/__init__.py @@ -22,5 +22,8 @@ SOFTWARE. """ -from .operators import * +from .op_types import * from .op_hamil_exp import * +from .standard_gates import * + +from .standard_gates import op_name_dict diff --git a/torchquantum/operator/op_hamil_exp.py b/torchquantum/operator/op_hamil_exp.py index e9e16809..a8cec8bb 100644 --- a/torchquantum/operator/op_hamil_exp.py +++ b/torchquantum/operator/op_hamil_exp.py @@ -32,9 +32,9 @@ import numpy as np __all__ = [ - 'OpHamilExp', - 'OpPauliExp', - ] + "OpHamilExp", + "OpPauliExp", +] class OpHamilExp(QuantumModule): @@ -42,18 +42,16 @@ class OpHamilExp(QuantumModule): exp(-i * theta * H / 2) the default theta is 0.0 """ - def __init__(self, - hamil: Hamiltonian, - trainable: bool = True, - theta: float = 0.0): + + def __init__(self, hamil: Hamiltonian, trainable: bool = True, theta: float = 0.0): """Initialize the OpHamilExp module. - + Args: hamil: The Hamiltonian. has_params: Whether the module has parameters. trainable: Whether the parameters are trainable. theta: The initial value of theta. - + """ super().__init__() if trainable: @@ -61,11 +59,11 @@ def __init__(self, else: self.theta = torch.tensor(theta) self.hamil = hamil - + def get_exponent_matrix(self): """Get the matrix on exponent.""" return self.hamil.matrix * -1j * self.theta / 2 - + @property def exponent_matrix(self): """Get the matrix on exponent.""" @@ -74,12 +72,12 @@ def exponent_matrix(self): def get_matrix(self): """Get the overall matrix.""" return torch.matrix_exp(self.exponent_matrix) - + @property def matrix(self): """Get the overall matrix.""" return self.get_matrix() - + def forward(self, qdev, wires): """Forward the OpHamilExp module. Args: @@ -96,21 +94,23 @@ def forward(self, qdev, wires): class OpPauliExp(OpHamilExp): - def __init__(self, - coeffs: List[float], - paulis: List[str], - endianness: str = "big", - trainable: bool = True, - theta: float = 0.0): + def __init__( + self, + coeffs: List[float], + paulis: List[str], + endianness: str = "big", + trainable: bool = True, + theta: float = 0.0, + ): """Initialize the OpPauliExp module. - + Args: coeffs: The coefficients of the Hamiltonian. paulis: The operators of the Hamiltonian, described in strings. endianness: The endianness of the operators. Default is big. Qubit 0 is the most significant bit. trainable: Whether the parameters are trainable. theta: The initial value of theta. - + """ self.hamil = Hamiltonian(coeffs, paulis, endianness) super().__init__( @@ -121,7 +121,7 @@ def __init__(self, self.coeffs = coeffs self.paulis = paulis self.trainable = trainable - + def forward(self, qdev, wires): """Forward the OpHamilExp module. Args: @@ -132,17 +132,17 @@ def forward(self, qdev, wires): matrix = self.matrix.to(qdev.device) if qdev.record_op: qdev.op_history.append( - { - "name": self.__class__.__name__, # type: ignore - "wires": np.array(wires).squeeze().tolist(), - "coeffs": self.coeffs, - "paulis": self.paulis, - "inverse": False, - "trainable": self.trainable, - "params": self.theta.item(), - } - ) - + { + "name": self.__class__.__name__, # type: ignore + "wires": np.array(wires).squeeze().tolist(), + "coeffs": self.coeffs, + "paulis": self.paulis, + "inverse": False, + "trainable": self.trainable, + "params": self.theta.item(), + } + ) + tqf.qubitunitaryfast( q_device=qdev, wires=wires, diff --git a/torchquantum/operator/op_types.py b/torchquantum/operator/op_types.py new file mode 100644 index 00000000..bdf35337 --- /dev/null +++ b/torchquantum/operator/op_types.py @@ -0,0 +1,443 @@ +import torch +import torch.nn as nn +import torchquantum as tq +import torchquantum.functional.functionals as tqf +import numpy as np +from abc import ABCMeta +from ..macro import C_DTYPE, F_DTYPE +from typing import Iterable, Union, List +from enum import IntEnum + +__all__ = [ + "Operator", + "Operation", + "DiagonalOperation", + "Observable", + "WiresEnum", + "NParamsEnum", + "AnyNParams", + "AllWires", + "AnyWires", +] + + +class WiresEnum(IntEnum): + """Integer enumeration class + to represent the number of wires + an operation acts on.""" + + AnyWires = -1 + AllWires = 0 + + +class NParamsEnum(IntEnum): + """Integer enumeration class + to represent the number of wires + an operation acts on""" + + AnyNParams = -1 + + +AnyNParams = NParamsEnum.AnyNParams + + +AllWires = WiresEnum.AllWires +"""IntEnum: An enumeration which represents all wires in the +subsystem. It is equivalent to an integer with value 0.""" + +AnyWires = WiresEnum.AnyWires +"""IntEnum: An enumeration which represents any wires in the +subsystem. It is equivalent to an integer with value -1.""" + + +class Operator(tq.QuantumModule): + """The class for quantum operators.""" + + @property + def name(self): + """String for the name of the operator.""" + return self._name + + @name.setter + def name(self, value): + """Set the name of the operator. + + Args: + value (str): operator name. + + """ + self._name = value + + def __init__( + self, + has_params: bool = False, + trainable: bool = False, + init_params=None, + n_wires=None, + wires=None, + inverse=False, + ): + """__init__ function for Operator. + + Args: + has_params (bool, optional): Whether the operations has parameters. + Defaults to False. + trainable (bool, optional): Whether the parameters are trainable + (if contains parameters). Defaults to False. + init_params (torch.Tensor, optional): Initial parameters. + Defaults to None. + n_wires (int, optional): Number of qubits. Defaults to None. + wires (Union[int, List[int]], optional): Which qubit the operation + is applied to. Defaults to None. + """ + super().__init__() + self.params = None + # number of wires of the operator + # n_wires is used in gates that can be applied to arbitrary number + # of qubits such as MultiRZ + self.n_wires = n_wires + # wires that the operator applies to + self.wires = wires + self._name = self.__class__.__name__ + # for static mode + self.static_matrix = None + self.inverse = inverse + self.clifford_quantization = False + + try: + assert not (trainable and not has_params) + except AssertionError: + has_params = True + logger.warning( + f"Module must have parameters to be trainable; " + f"Switched 'has_params' to True." + ) + + self.has_params = has_params + self.trainable = trainable + if self.has_params: + self.params = self.build_params(trainable=self.trainable) + self.reset_params(init_params) + + @classmethod + def _matrix(cls, params): + """The unitary matrix of the operator. + + Args: + params (torch.Tensor, optional): The parameters for parameterized + operators. + + Returns: None. + + """ + raise NotImplementedError + + @property + def matrix(self): + """The unitary matrix of the operator.""" + return self._matrix(self.params) + + @classmethod + def _eigvals(cls, params): + """The eigenvalues of the unitary matrix of the operator. + + Args: + params (torch.Tensor, optional): The parameters for parameterized + operators. + + Returns: None. + + """ + # Warning: The eigenvalues of the operator {cls.__name__} are not defined. + return None + + @property + def eigvals(self): + """The eigenvalues of the unitary matrix of the operator. + + Returns: Eigenvalues. + + """ + return self._eigvals(self.params) + + def _get_unitary_matrix(self): + """Obtain the unitary matrix of the operator. + + Returns: Unitary matrix. + + """ + return self.matrix + + def set_wires(self, wires): + """Set which qubits the operator is applied to. + + Args: + wires (Union[int, List[int]]): Qubits the operator is applied to. + + Returns: None. + + """ + self.wires = [wires] if isinstance(wires, int) else wires + + def forward( + self, q_device: tq.QuantumDevice, wires=None, params=None, inverse=None + ): + """Apply the operator to the quantum device states. + + Args: + q_device (torchquantum.QuantumDevice): Quantum Device that the + operator is applied to. + wires (Union[int, List[int]]): Qubits that the operator is + applied to. + params (torch.Tensor): Parameters of the operator + inverse (bool): Whether inverse the unitary matrix of the operator. + + Returns: + + """ + if inverse is not None: + logger.warning("replace the inverse flag with the input") + self.inverse = inverse + # try: + # assert self.name in self.fixed_ops or \ + # self.has_params ^ (params is not None) + # except AssertionError as err: + # logger.exception(f"Parameterized gate either has its " + # f"own parameters or has input as parameters") + # raise err + + # try: + # assert not (self.wires is None and wires is None) + # except AssertionError as err: + # logger.exception(f"Need to specify the wires either when " + # f"initialize or when forward") + # raise err + + if params is not None: + self.params = params + + if self.params is not None: + self.params = ( + self.params.unsqueeze(-1) if self.params.dim() == 1 else self.params + ) + + if wires is not None: + # update the wires + wires = [wires] if isinstance(wires, int) else wires + self.wires = wires + + # self.inverse = inverse + + if self.static_mode: + self.parent_graph.add_op(self) + return + + # non-parameterized gate + if self.params is None: + if self.n_wires is None: + self.func(q_device, self.wires, inverse=self.inverse) # type: ignore + else: + self.func(q_device, self.wires, n_wires=self.n_wires, inverse=self.inverse) # type: ignore + else: + if isinstance(self.noise_model_tq, tq.NoiseModelTQPhase): + params = self.noise_model_tq.add_noise(self.params) + else: + params = self.params + + if self.clifford_quantization: + params = CliffordQuantizer.quantize_sse(params) + if self.n_wires is None: + self.func(q_device, self.wires, params=params, inverse=self.inverse) + else: + self.func( + q_device, + self.wires, + params=params, + n_wires=self.n_wires, + inverse=self.inverse, + ) + + if self.noise_model_tq is not None and self.noise_model_tq.is_add_noise: + noise_ops = self.noise_model_tq.sample_noise_op(self) + if len(noise_ops): + for noise_op in noise_ops: + noise_op(q_device) + + def __repr__(self): + return f" class: {self.name} \n parameters: {self.params} \n wires: {self.wires} \n inverse: {self.inverse}" + + +class Observable(Operator, metaclass=ABCMeta): + """Class for Observables.""" + + def __init__( + self, + has_params: bool = False, + trainable: bool = False, + init_params=None, + n_wires=None, + wires=None, + inverse=False, + ): + """Init function of the Observable class + + Args: + has_params (bool, optional): Whether the operations has parameters. + Defaults to False. + trainable (bool, optional): Whether the parameters are trainable + (if contains parameters). Defaults to False. + init_params (torch.Tensor, optional): Initial parameters. + Defaults to None. + n_wires (int, optional): Number of qubits. Defaults to None. + wires (Union[int, List[int]], optional): Which qubit the operation + is applied to. Defaults to None. + """ + super().__init__( + has_params=has_params, + trainable=trainable, + init_params=init_params, + n_wires=n_wires, + wires=wires, + inverse=inverse, + ) + self.return_type = None + + def diagonalizing_gates(self): + """The diagonalizing gates when perform measurements. + + Returns: None. + + """ + raise NotImplementedError + + +class Operation(Operator, metaclass=ABCMeta): + """_summary_""" + + def __init__( + self, + has_params: bool = False, + trainable: bool = False, + init_params=None, + n_wires=None, + wires=None, + inverse=False, + ): + """_summary_ + + Args: + has_params (bool, optional): Whether the operations has parameters. + Defaults to False. + trainable (bool, optional): Whether the parameters are trainable + (if contains parameters). Defaults to False. + init_params (torch.Tensor, optional): Initial parameters. + Defaults to None. + n_wires (int, optional): Number of qubits. Defaults to None. + wires (Union[int, List[int]], optional): Which qubit the operation is applied to. + Defaults to None. + """ + super().__init__( + has_params=has_params, + trainable=trainable, + init_params=init_params, + n_wires=n_wires, + wires=wires, + inverse=inverse, + ) + if type(self.num_wires) == int: + self.n_wires = self.num_wires + + @property + def matrix(self): + """The unitary matrix of the operator.""" + op_matrix = self._matrix(self.params) + + return op_matrix + + @property + def eigvals(self): + """ "The eigenvalues of the unitary matrix of the operator. + + Returns: + torch.Tensor: Eigenvalues. + + """ + op_eigvals = self._eigvals(self.params) + + return op_eigvals + + def init_params(self): + """Initialize the parameters. + + Raises: + NotImplementedError: The init param function is not implemented. + """ + raise NotImplementedError + + def build_params(self, trainable): + """Build parameters. + + Args: + trainable (bool): Whether the parameters are trainable. + + Returns: + torch.Tensor: Built parameters. + """ + parameters = nn.Parameter(torch.empty([1, self.num_params], dtype=F_DTYPE)) + parameters.requires_grad = True if trainable else False + # self.register_parameter(f"{self.name}_params", parameters) + return parameters + + def reset_params(self, init_params=None): + """Reset parameters. + + Args: + init_params (torch.Tensor, optional): Input the initialization + parameters. Defaults to None. + """ + if init_params is not None: + if isinstance(init_params, Iterable): + for k, init_param in enumerate(init_params): + torch.nn.init.constant_(self.params[:, k], init_param) + else: + torch.nn.init.constant_(self.params, init_params) + else: + torch.nn.init.uniform_(self.params, -np.pi, np.pi) + + +class DiagonalOperation(Operation, metaclass=ABCMeta): + """Class for Diagonal Operation.""" + + @classmethod + def _eigvals(cls, params): + """The eigenvalues of the unitary matrix of the operator. + + Args: + params (torch.Tensor, optional): The parameters for parameterized + operators. + + Returns: None. + raise NotImplementedError + """ + + @property + def eigvals(self): + """The eigenvalues of the unitary matrix of the operator. + + Returns: Eigenvalues. + + """ + return super().eigvals + + @classmethod + def _matrix(cls, params): + """The unitary matrix of the operator. + + Args: + params (torch.Tensor, optional): The parameters for parameterized + operators. + + Returns: None. + + """ + return torch.diag(cls._eigvals(params)) diff --git a/torchquantum/operator/operators.py b/torchquantum/operator/operators.py deleted file mode 100644 index 1d1d4b8b..00000000 --- a/torchquantum/operator/operators.py +++ /dev/null @@ -1,1778 +0,0 @@ -""" -MIT License - -Copyright (c) 2020-present TorchQuantum Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -""" - -import torch -import torch.nn as nn -import torchquantum as tq -import torchquantum.functional.functionals as tqf -import numpy as np - -from enum import IntEnum -from torchquantum.functional import mat_dict -from torchquantum.util.quantization.clifford_quantization import CliffordQuantizer -from abc import ABCMeta -from ..macro import C_DTYPE, F_DTYPE -from torchpack.utils.logging import logger -from typing import Iterable, Union, List - -__all__ = [ - "op_name_dict", - "Operator", - "Operation", - "DiagonalOperation", - "Observable", - "Hadamard", - "H", - "SHadamard", - "SH", - "PauliX", - "PauliY", - "PauliZ", - "I", - "S", - "T", - "SX", - "CNOT", - "CZ", - "CY", - "RX", - "RY", - "RZ", - "RXX", - "RYY", - "RZZ", - "RZX", - "SWAP", - "SSWAP", - "CSWAP", - "Toffoli", - "PhaseShift", - "Rot", - "MultiRZ", - "CRX", - "CRY", - "CRZ", - "CRot", - "U1", - "U2", - "U3", - "CU", - "CU1", - "CU2", - "CU3", - "QubitUnitary", - "QubitUnitaryFast", - "TrainableUnitary", - "TrainableUnitaryStrict", - "MultiCNOT", - "MultiXCNOT", - "Reset", - "SingleExcitation", - "EchoedCrossResonance", - "ECR", - "QFT", - "SDG", - "TDG", - "SXDG", - "CH", - "CCZ", - "ISWAP", - "CS", - "CSDG", - "CSX", - "CHadamard", - "CCZ", - "DCX", - "XXMINYY", - "XXPLUSYY", - "C3X", - "R", - "C4X", - "RC3X", - "RCCX", - "GlobalPhase", - "C3SX", -] - - -class WiresEnum(IntEnum): - """Integer enumeration class - to represent the number of wires - an operation acts on.""" - - AnyWires = -1 - AllWires = 0 - - -class NParamsEnum(IntEnum): - """Integer enumeration class - to represent the number of wires - an operation acts on""" - - AnyNParams = -1 - - -AnyNParams = NParamsEnum.AnyNParams - - -AllWires = WiresEnum.AllWires -"""IntEnum: An enumeration which represents all wires in the -subsystem. It is equivalent to an integer with value 0.""" - -AnyWires = WiresEnum.AnyWires -"""IntEnum: An enumeration which represents any wires in the -subsystem. It is equivalent to an integer with value -1.""" - - -class Operator(tq.QuantumModule): - """The class for quantum operators.""" - - fixed_ops = [ - "Hadamard", - "SHadamard", - "PauliX", - "PauliY", - "PauliZ", - "I", - "S", - "T", - "SX", - "CNOT", - "CZ", - "CY", - "SWAP", - "SSWAP", - "CSWAP", - "Toffoli", - "MultiCNOT", - "MultiXCNOT", - "Reset", - "EchoedCrossResonance", - "QFT", - "SDG", - "TDG", - "SXDG", - "CH", - "CCZ", - "ISWAP", - "CS", - "CSDG", - "CSX", - "CHadamard", - "DCX", - "C3X", - "C3SX", - "RCCX", - "RC3X", - "C4X", - ] - - parameterized_ops = [ - "RX", - "RY", - "RZ", - "RXX", - "RYY", - "RZZ", - "RZX", - "PhaseShift", - "Rot", - "MultiRZ", - "CRX", - "CRY", - "CRZ", - "CRot", - "U1", - "U2", - "U3", - "CU", - "CU1", - "CU2", - "CU3", - "QubitUnitary", - "QubitUnitaryFast", - "TrainableUnitary", - "TrainableUnitaryStrict", - "SingleExcitation", - "XXMINYY", - "XXPLUSYY", - "R", - "GlobalPhase", - ] - - @property - def name(self): - """String for the name of the operator.""" - return self._name - - @name.setter - def name(self, value): - """Set the name of the operator. - - Args: - value (str): operator name. - - """ - self._name = value - - def __init__( - self, - has_params: bool = False, - trainable: bool = False, - init_params=None, - n_wires=None, - wires=None, - inverse=False, - ): - """__init__ function for Operator. - - Args: - has_params (bool, optional): Whether the operations has parameters. - Defaults to False. - trainable (bool, optional): Whether the parameters are trainable - (if contains parameters). Defaults to False. - init_params (torch.Tensor, optional): Initial parameters. - Defaults to None. - n_wires (int, optional): Number of qubits. Defaults to None. - wires (Union[int, List[int]], optional): Which qubit the operation - is applied to. Defaults to None. - """ - super().__init__() - self.params = None - # number of wires of the operator - # n_wires is used in gates that can be applied to arbitrary number - # of qubits such as MultiRZ - self.n_wires = n_wires - # wires that the operator applies to - self.wires = wires - self._name = self.__class__.__name__ - # for static mode - self.static_matrix = None - self.inverse = inverse - self.clifford_quantization = False - - try: - assert not (trainable and not has_params) - except AssertionError: - has_params = True - logger.warning( - f"Module must have parameters to be trainable; " - f"Switched 'has_params' to True." - ) - - self.has_params = has_params - self.trainable = trainable - if self.has_params: - self.params = self.build_params(trainable=self.trainable) - self.reset_params(init_params) - - @classmethod - def _matrix(cls, params): - """The unitary matrix of the operator. - - Args: - params (torch.Tensor, optional): The parameters for parameterized - operators. - - Returns: None. - - """ - raise NotImplementedError - - @property - def matrix(self): - """The unitary matrix of the operator.""" - return self._matrix(self.params) - - @classmethod - def _eigvals(cls, params): - """The eigenvalues of the unitary matrix of the operator. - - Args: - params (torch.Tensor, optional): The parameters for parameterized - operators. - - Returns: None. - - """ - # Warning: The eigenvalues of the operator {cls.__name__} are not defined. - return None - - @property - def eigvals(self): - """The eigenvalues of the unitary matrix of the operator. - - Returns: Eigenvalues. - - """ - return self._eigvals(self.params) - - def _get_unitary_matrix(self): - """Obtain the unitary matrix of the operator. - - Returns: Unitary matrix. - - """ - return self.matrix - - def set_wires(self, wires): - """Set which qubits the operator is applied to. - - Args: - wires (Union[int, List[int]]): Qubits the operator is applied to. - - Returns: None. - - """ - self.wires = [wires] if isinstance(wires, int) else wires - - def forward( - self, q_device: tq.QuantumDevice, wires=None, params=None, inverse=None - ): - """Apply the operator to the quantum device states. - - Args: - q_device (torchquantum.QuantumDevice): Quantum Device that the - operator is applied to. - wires (Union[int, List[int]]): Qubits that the operator is - applied to. - params (torch.Tensor): Parameters of the operator - inverse (bool): Whether inverse the unitary matrix of the operator. - - Returns: - - """ - if inverse is not None: - logger.warning("replace the inverse flag with the input") - self.inverse = inverse - # try: - # assert self.name in self.fixed_ops or \ - # self.has_params ^ (params is not None) - # except AssertionError as err: - # logger.exception(f"Parameterized gate either has its " - # f"own parameters or has input as parameters") - # raise err - - # try: - # assert not (self.wires is None and wires is None) - # except AssertionError as err: - # logger.exception(f"Need to specify the wires either when " - # f"initialize or when forward") - # raise err - - if params is not None: - self.params = params - - if self.params is not None: - self.params = ( - self.params.unsqueeze(-1) if self.params.dim() == 1 else self.params - ) - - if wires is not None: - # update the wires - wires = [wires] if isinstance(wires, int) else wires - self.wires = wires - - # self.inverse = inverse - - if self.static_mode: - self.parent_graph.add_op(self) - return - - # non-parameterized gate - if self.params is None: - if self.n_wires is None: - self.func(q_device, self.wires, inverse=self.inverse) # type: ignore - else: - self.func(q_device, self.wires, n_wires=self.n_wires, inverse=self.inverse) # type: ignore - else: - if isinstance(self.noise_model_tq, tq.NoiseModelTQPhase): - params = self.noise_model_tq.add_noise(self.params) - else: - params = self.params - - if self.clifford_quantization: - params = CliffordQuantizer.quantize_sse(params) - if self.n_wires is None: - self.func(q_device, self.wires, params=params, inverse=self.inverse) - else: - self.func( - q_device, - self.wires, - params=params, - n_wires=self.n_wires, - inverse=self.inverse, - ) - - if self.noise_model_tq is not None and self.noise_model_tq.is_add_noise: - noise_ops = self.noise_model_tq.sample_noise_op(self) - if len(noise_ops): - for noise_op in noise_ops: - noise_op(q_device) - - def __repr__(self): - return f" class: {self.name} \n parameters: {self.params} \n wires: {self.wires} \n inverse: {self.inverse}" - - -class Observable(Operator, metaclass=ABCMeta): - """Class for Observables.""" - - def __init__( - self, - has_params: bool = False, - trainable: bool = False, - init_params=None, - n_wires=None, - wires=None, - inverse=False, - ): - """Init function of the Observable class - - Args: - has_params (bool, optional): Whether the operations has parameters. - Defaults to False. - trainable (bool, optional): Whether the parameters are trainable - (if contains parameters). Defaults to False. - init_params (torch.Tensor, optional): Initial parameters. - Defaults to None. - n_wires (int, optional): Number of qubits. Defaults to None. - wires (Union[int, List[int]], optional): Which qubit the operation - is applied to. Defaults to None. - """ - super().__init__( - has_params=has_params, - trainable=trainable, - init_params=init_params, - n_wires=n_wires, - wires=wires, - inverse=inverse, - ) - self.return_type = None - - def diagonalizing_gates(self): - """The diagonalizing gates when perform measurements. - - Returns: None. - - """ - raise NotImplementedError - - -class Operation(Operator, metaclass=ABCMeta): - """_summary_""" - - def __init__( - self, - has_params: bool = False, - trainable: bool = False, - init_params=None, - n_wires=None, - wires=None, - inverse=False, - ): - """_summary_ - - Args: - has_params (bool, optional): Whether the operations has parameters. - Defaults to False. - trainable (bool, optional): Whether the parameters are trainable - (if contains parameters). Defaults to False. - init_params (torch.Tensor, optional): Initial parameters. - Defaults to None. - n_wires (int, optional): Number of qubits. Defaults to None. - wires (Union[int, List[int]], optional): Which qubit the operation is applied to. - Defaults to None. - """ - super().__init__( - has_params=has_params, - trainable=trainable, - init_params=init_params, - n_wires=n_wires, - wires=wires, - inverse=inverse, - ) - if type(self.num_wires) == int: - self.n_wires = self.num_wires - - @property - def matrix(self): - """The unitary matrix of the operator.""" - op_matrix = self._matrix(self.params) - - return op_matrix - - @property - def eigvals(self): - """ "The eigenvalues of the unitary matrix of the operator. - - Returns: - torch.Tensor: Eigenvalues. - - """ - op_eigvals = self._eigvals(self.params) - - return op_eigvals - - def init_params(self): - """Initialize the parameters. - - Raises: - NotImplementedError: The init param function is not implemented. - """ - raise NotImplementedError - - def build_params(self, trainable): - """Build parameters. - - Args: - trainable (bool): Whether the parameters are trainable. - - Returns: - torch.Tensor: Built parameters. - """ - parameters = nn.Parameter(torch.empty([1, self.num_params], dtype=F_DTYPE)) - parameters.requires_grad = True if trainable else False - # self.register_parameter(f"{self.name}_params", parameters) - return parameters - - def reset_params(self, init_params=None): - """Reset parameters. - - Args: - init_params (torch.Tensor, optional): Input the initialization - parameters. Defaults to None. - """ - if init_params is not None: - if isinstance(init_params, Iterable): - for k, init_param in enumerate(init_params): - torch.nn.init.constant_(self.params[:, k], init_param) - else: - torch.nn.init.constant_(self.params, init_params) - else: - torch.nn.init.uniform_(self.params, -np.pi, np.pi) - - -class DiagonalOperation(Operation, metaclass=ABCMeta): - """Class for Diagonal Operation.""" - - @classmethod - def _eigvals(cls, params): - """The eigenvalues of the unitary matrix of the operator. - - Args: - params (torch.Tensor, optional): The parameters for parameterized - operators. - - Returns: None. - raise NotImplementedError - """ - - @property - def eigvals(self): - """The eigenvalues of the unitary matrix of the operator. - - Returns: Eigenvalues. - - """ - return super().eigvals - - @classmethod - def _matrix(cls, params): - """The unitary matrix of the operator. - - Args: - params (torch.Tensor, optional): The parameters for parameterized - operators. - - Returns: None. - - """ - return torch.diag(cls._eigvals(params)) - - -class Hadamard(Observable, metaclass=ABCMeta): - """Class for Hadamard Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, -1], dtype=C_DTYPE) - matrix = mat_dict["hadamard"] - func = staticmethod(tqf.hadamard) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - def diagonalizing_gates(self): - return [tq.RY(has_params=True, trainable=False, init_params=-np.pi / 4)] - - -class SHadamard(Operation, metaclass=ABCMeta): - """Class for SHadamard Gate.""" - - num_params = 0 - num_wires = 1 - matrix = mat_dict["shadamard"] - func = staticmethod(tqf.shadamard) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class PauliX(Observable, metaclass=ABCMeta): - """Class for Pauli X Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, -1], dtype=C_DTYPE) - matrix = mat_dict["paulix"] - func = staticmethod(tqf.paulix) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - def diagonalizing_gates(self): - return [tq.Hadamard()] - - -class PauliY(Observable, metaclass=ABCMeta): - """Class for Pauli Y Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, -1], dtype=C_DTYPE) - matrix = mat_dict["pauliy"] - func = staticmethod(tqf.pauliy) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - def diagonalizing_gates(self): - return [tq.PauliZ(), tq.S(), tq.Hadamard()] - - -class PauliZ(Observable, metaclass=ABCMeta): - """Class for Pauli Z Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, -1], dtype=C_DTYPE) - matrix = mat_dict["pauliz"] - func = staticmethod(tqf.pauliz) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - def diagonalizing_gates(self): - return [] - - -class I(Observable, metaclass=ABCMeta): - """Class for Identity Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, 1], dtype=C_DTYPE) - matrix = mat_dict["i"] - func = staticmethod(tqf.i) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - def diagonalizing_gates(self): - return [] - - -class S(DiagonalOperation, metaclass=ABCMeta): - """Class for S Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, 1j], dtype=C_DTYPE) - matrix = mat_dict["s"] - func = staticmethod(tqf.s) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class T(DiagonalOperation, metaclass=ABCMeta): - """Class for T Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, 1j], dtype=C_DTYPE) - matrix = mat_dict["t"] - func = staticmethod(tqf.t) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class SX(Operation, metaclass=ABCMeta): - """Class for SX Gate.""" - - num_params = 0 - num_wires = 1 - eigvals = torch.tensor([1, 1j], dtype=C_DTYPE) - matrix = mat_dict["sx"] - func = staticmethod(tqf.sx) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class CNOT(Operation, metaclass=ABCMeta): - """Class for CNOT Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["cnot"] - func = staticmethod(tqf.cnot) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class CZ(DiagonalOperation, metaclass=ABCMeta): - """Class for CZ Gate.""" - - num_params = 0 - num_wires = 2 - eigvals = np.array([1, 1, 1, -1]) - matrix = mat_dict["cz"] - func = staticmethod(tqf.cz) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class CY(Operation, metaclass=ABCMeta): - """Class for CY Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["cy"] - func = staticmethod(tqf.cy) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class SWAP(Operation, metaclass=ABCMeta): - """Class for SWAP Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["swap"] - func = staticmethod(tqf.swap) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class SSWAP(Operation, metaclass=ABCMeta): - """Class for SSWAP Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["sswap"] - func = staticmethod(tqf.sswap) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class CSWAP(Operation, metaclass=ABCMeta): - """Class for CSWAP Gate.""" - - num_params = 0 - num_wires = 3 - matrix = mat_dict["cswap"] - func = staticmethod(tqf.cswap) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class Toffoli(Operation, metaclass=ABCMeta): - """Class for Toffoli Gate.""" - - num_params = 0 - num_wires = 3 - matrix = mat_dict["toffoli"] - func = staticmethod(tqf.toffoli) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class RX(Operation, metaclass=ABCMeta): - """Class for RX Gate.""" - - num_params = 1 - num_wires = 1 - func = staticmethod(tqf.rx) - - @classmethod - def _matrix(cls, params): - return tqf.rx_matrix(params) - - -class RY(Operation, metaclass=ABCMeta): - """Class for RY Gate.""" - - num_params = 1 - num_wires = 1 - func = staticmethod(tqf.ry) - - @classmethod - def _matrix(cls, params): - return tqf.ry_matrix(params) - - -class RZ(DiagonalOperation, metaclass=ABCMeta): - """Class for RZ Gate.""" - - num_params = 1 - num_wires = 1 - func = staticmethod(tqf.rz) - - @classmethod - def _matrix(cls, params): - return tqf.rz_matrix(params) - - -class PhaseShift(DiagonalOperation, metaclass=ABCMeta): - """Class for PhaseShift Gate.""" - - num_params = 1 - num_wires = 1 - func = staticmethod(tqf.phaseshift) - - @classmethod - def _matrix(cls, params): - return tqf.phaseshift_matrix(params) - - -class Rot(Operation, metaclass=ABCMeta): - """Class for Rotation Gate.""" - - num_params = 3 - num_wires = 1 - func = staticmethod(tqf.rot) - - @classmethod - def _matrix(cls, params): - return tqf.rot_matrix(params) - - -class MultiRZ(DiagonalOperation, metaclass=ABCMeta): - """Class for Multi-qubit RZ Gate.""" - - num_params = 1 - num_wires = AnyWires - func = staticmethod(tqf.multirz) - - @classmethod - def _matrix(cls, params, n_wires): - return tqf.multirz_matrix(params, n_wires) - - -class RXX(Operation, metaclass=ABCMeta): - """Class for RXX Gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.rxx) - - @classmethod - def _matrix(cls, params): - return tqf.rxx_matrix(params) - - -class RYY(Operation, metaclass=ABCMeta): - """Class for RYY Gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.ryy) - - @classmethod - def _matrix(cls, params): - return tqf.ryy_matrix(params) - - -class RZZ(DiagonalOperation, metaclass=ABCMeta): - """Class for RZZ Gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.rzz) - - @classmethod - def _matrix(cls, params): - return tqf.rzz_matrix(params) - - -class RZX(Operation, metaclass=ABCMeta): - """Class for RZX Gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.rzx) - - @classmethod - def _matrix(cls, params): - return tqf.rzx_matrix(params) - - -class TrainableUnitary(Operation, metaclass=ABCMeta): - """Class for TrainableUnitary Gate.""" - - num_params = AnyNParams - num_wires = AnyWires - func = staticmethod(tqf.qubitunitaryfast) - - def build_params(self, trainable): - """Build the parameters for the gate. - - Args: - trainable (bool): Whether the parameters are trainble. - - Returns: - torch.Tensor: Parameters. - - """ - parameters = nn.Parameter( - torch.empty(1, 2**self.n_wires, 2**self.n_wires, dtype=C_DTYPE) - ) - parameters.requires_grad = True if trainable else False - # self.register_parameter(f"{self.name}_params", parameters) - return parameters - - def reset_params(self, init_params=None): - """Reset the parameters. - - Args: - init_params (torch.Tensor, optional): Initial parameters. - - Returns: - None. - - """ - mat = torch.randn((1, 2**self.n_wires, 2**self.n_wires), dtype=C_DTYPE) - U, Sigma, V = torch.svd(mat) - self.params.data.copy_(U.matmul(V.permute(0, 2, 1))) - - @staticmethod - def _matrix(self, params): - return tqf.qubitunitaryfast(params) - - -class TrainableUnitaryStrict(TrainableUnitary, metaclass=ABCMeta): - """Class for Strict Unitary matrix gate.""" - - num_params = AnyNParams - num_wires = AnyWires - func = staticmethod(tqf.qubitunitarystrict) - - -class CRX(Operation, metaclass=ABCMeta): - """Class for Controlled Rotation X gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.crx) - - @classmethod - def _matrix(cls, params): - return tqf.crx_matrix(params) - - -class CRY(Operation, metaclass=ABCMeta): - """Class for Controlled Rotation Y gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.cry) - - @classmethod - def _matrix(cls, params): - return tqf.cry_matrix(params) - - -class CRZ(Operation, metaclass=ABCMeta): - """Class for Controlled Rotation Z gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.crz) - - @classmethod - def _matrix(cls, params): - return tqf.crz_matrix(params) - - -class CRot(Operation, metaclass=ABCMeta): - """Class for Controlled Rotation gate.""" - - num_params = 3 - num_wires = 2 - func = staticmethod(tqf.crot) - - @classmethod - def _matrix(cls, params): - return tqf.crot_matrix(params) - - -class U1(DiagonalOperation, metaclass=ABCMeta): - """Class for Controlled Rotation Y gate. U1 is the same - as phaseshift. - """ - - num_params = 1 - num_wires = 1 - func = staticmethod(tqf.u1) - - @classmethod - def _matrix(cls, params): - return tqf.u1_matrix(params) - - -class CU(Operation, metaclass=ABCMeta): - """Class for Controlled U gate (4-parameter two-qubit gate).""" - - num_params = 4 - num_wires = 2 - func = staticmethod(tqf.cu) - - @classmethod - def _matrix(cls, params): - return tqf.cu_matrix(params) - - -class CU1(DiagonalOperation, metaclass=ABCMeta): - """Class for controlled U1 gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.cu1) - - @classmethod - def _matrix(cls, params): - return tqf.cu1_matrix(params) - - -class U2(Operation, metaclass=ABCMeta): - """Class for U2 gate.""" - - num_params = 2 - num_wires = 1 - func = staticmethod(tqf.u2) - - @classmethod - def _matrix(cls, params): - return tqf.u2_matrix(params) - - -class CU2(Operation, metaclass=ABCMeta): - """Class for controlled U2 gate.""" - - num_params = 2 - num_wires = 2 - func = staticmethod(tqf.cu2) - - @classmethod - def _matrix(cls, params): - return tqf.cu2_matrix(params) - - -class U3(Operation, metaclass=ABCMeta): - """Class for U3 gate.""" - - num_params = 3 - num_wires = 1 - func = staticmethod(tqf.u3) - - @classmethod - def _matrix(cls, params): - return tqf.u3_matrix(params) - - -class CU3(Operation, metaclass=ABCMeta): - """Class for Controlled U3 gate.""" - - num_params = 3 - num_wires = 2 - func = staticmethod(tqf.cu3) - - @classmethod - def _matrix(cls, params): - return tqf.cu3_matrix(params) - - -class QubitUnitary(Operation, metaclass=ABCMeta): - """Class for controlled Qubit Unitary gate.""" - - num_params = AnyNParams - num_wires = AnyWires - func = staticmethod(tqf.qubitunitary) - - @classmethod - def _matrix(cls, params): - return tqf.qubitunitary_matrix(params) - - def build_params(self, trainable): - return None - - def reset_params(self, init_params=None): - self.params = torch.tensor(init_params, dtype=C_DTYPE) - self.register_buffer(f"{self.name}_unitary", self.params) - - -class QubitUnitaryFast(Operation, metaclass=ABCMeta): - """Class for fast implementation of - controlled Qubit Unitary gate.""" - - num_params = AnyNParams - num_wires = AnyWires - func = staticmethod(tqf.qubitunitaryfast) - - def __init__( - self, - has_params: bool = False, - trainable: bool = False, - init_params=None, - n_wires=None, - wires=None, - ): - super().__init__( - has_params=True, - trainable=trainable, - init_params=init_params, - n_wires=n_wires, - wires=wires, - ) - - @classmethod - def from_controlled_operation( - cls, - op, - c_wires, - t_wires, - trainable, - ): - """ - - Args: - op: the operation - c_wires: controlled wires, will only be a set such as 1, [2,3] - t_wires: can be a list of list of wires, multiple sets - [[1,2], [3,4]] - trainable: - """ - op = op - c_wires = np.array(c_wires) - t_wires = np.array(t_wires) - trainable = trainable - # self.n_t_wires = op.n_wires - # assert len(t_wires) == op.n_wires - - orig_u = op.matrix - orig_u_n_wires = op.n_wires - - wires = [] - - if c_wires.ndim == 0: - # only one control qubit - # 1 - n_c_wires = 1 - wires.append(c_wires.item()) - elif c_wires.ndim == 1: - # multiple control qubits - # [1, 2] - n_c_wires = c_wires.shape[0] - wires.extend(list(c_wires)) - - if t_wires.ndim == 0: - # single qubit U on one set - # 2 - n_t_wires = 1 - n_set_t_wires = 1 - wires.append(t_wires.item()) - elif t_wires.ndim == 1: - # single qubit U on multiple sets - # [1, 2, 3] - # or multi qubit U on one set - # [2, 3] - n_t_wires = t_wires.shape[0] - n_set_t_wires = n_t_wires // orig_u_n_wires - wires.extend(list(t_wires.flatten())) - - elif t_wires.ndim == 2: - # multi qubit unitary on multiple sets - # [[2, 3], [4, 5]] - n_t_wires = t_wires.flatten().shape[0] - n_set_t_wires = n_t_wires // orig_u_n_wires - wires.extend(list(t_wires.flatten())) - - n_wires = n_c_wires + n_t_wires - - # compute the new unitary, then permute - unitary = torch.tensor(torch.zeros(2**n_wires, 2**n_wires, dtype=C_DTYPE)) - for k in range(2**n_wires - 2**n_t_wires): - unitary[k, k] = 1.0 + 0.0j - - # compute kronecker product of all the controlled target - - controlled_u = None - for k in range(n_set_t_wires): - if controlled_u is None: - controlled_u = orig_u - else: - controlled_u = torch.kron(controlled_u, orig_u) - - d_controlled_u = controlled_u.shape[-1] - unitary[-d_controlled_u:, -d_controlled_u:] = controlled_u - - return cls( - has_params=True, - trainable=trainable, - init_params=unitary, - n_wires=n_wires, - wires=wires, - ) - - @classmethod - def _matrix(cls, params): - return tqf.qubitunitaryfast_matrix(params) - - def build_params(self, trainable): - return None - - def reset_params(self, init_params=None): - self.params = torch.tensor(init_params, dtype=C_DTYPE) - self.register_buffer(f"{self.name}_unitary", self.params) - - -class MultiCNOT(Operation, metaclass=ABCMeta): - """Class for Multi qubit CNOT gate.""" - - num_params = 0 - num_wires = AnyWires - func = staticmethod(tqf.multicnot) - - @classmethod - def _matrix(cls, params, n_wires): - return tqf.multicnot_matrix(n_wires) - - @property - def matrix(self): - op_matrix = self._matrix(self.params, self.n_wires) - return op_matrix - - -class MultiXCNOT(Operation, metaclass=ABCMeta): - """Class for Multi qubit XCNOT gate.""" - - num_params = 0 - num_wires = AnyWires - func = staticmethod(tqf.multixcnot) - - @classmethod - def _matrix(cls, params, n_wires): - return tqf.multixcnot_matrix(n_wires) - - @property - def matrix(self): - op_matrix = self._matrix(self.params, self.n_wires) - return op_matrix - - -class Reset(Operator, metaclass=ABCMeta): - """Class for Reset gate.""" - - num_params = 0 - num_wires = AnyWires - func = staticmethod(tqf.reset) - - @classmethod - def _matrix(cls, params): - return None - - -class SingleExcitation(Operator, metaclass=ABCMeta): - """Class for SingleExcitation gate.""" - - num_params = 1 - num_wires = 2 - func = staticmethod(tqf.singleexcitation) - - @classmethod - def _matrix(cls, params): - return tqf.singleexcitation_matrix(params) - - -class ECR(Operation, metaclass=ABCMeta): - """Class for Echoed Cross Resonance Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["ecr"] - func = staticmethod(tqf.ecr) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class QFT(Observable, metaclass=ABCMeta): - """Class for Quantum Fourier Transform.""" - - num_params = 0 - num_wires = AnyWires - func = staticmethod(tqf.qft) - - @classmethod - def _matrix(cls, params, n_wires): - return tqf.qft_matrix(n_wires) - - -class SDG(Operation, metaclass=ABCMeta): - """Class for SDG Gate.""" - - num_params = 0 - num_wires = 1 - - matrix = mat_dict["sdg"] - func = staticmethod(tqf.sdg) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class TDG(Operation, metaclass=ABCMeta): - """Class for TDG Gate.""" - - num_params = 0 - num_wires = 1 - matrix = mat_dict["tdg"] - func = staticmethod(tqf.tdg) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class SXDG(Operation, metaclass=ABCMeta): - """Class for SXDG Gate.""" - - num_params = 0 - num_wires = 1 - matrix = mat_dict["sxdg"] - func = staticmethod(tqf.sxdg) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class CCZ(Operation, metaclass=ABCMeta): - """Class for CCZ Gate.""" - - num_params = 0 - num_wires = 3 - matrix = mat_dict["ccz"] - func = staticmethod(tqf.ccz) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class ISWAP(Operation, metaclass=ABCMeta): - """Class for ISWAP Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["iswap"] - func = staticmethod(tqf.iswap) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class CS(Operation, metaclass=ABCMeta): - """Class for CS Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["cs"] - eigvals = np.array([1, 1, 1, 1j]) - func = staticmethod(tqf.cs) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class CSDG(DiagonalOperation, metaclass=ABCMeta): - """Class for CS Dagger Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["csdg"] - eigvals = np.array([1, 1, 1, -1j]) - func = staticmethod(tqf.csdg) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class CSX(Operation, metaclass=ABCMeta): - """Class for CSX Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["csx"] - func = staticmethod(tqf.csx) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class CHadamard(Operation, metaclass=ABCMeta): - """Class for CHadamard Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["chadamard"] - func = staticmethod(tqf.chadamard) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class CCZ(DiagonalOperation, metaclass=ABCMeta): - """Class for CCZ Gate.""" - - num_params = 0 - num_wires = 3 - matrix = mat_dict["ccz"] - eigvals = np.array([1, 1, 1, 1, 1, 1, 1, -1]) - func = staticmethod(tqf.ccz) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - @classmethod - def _eigvals(cls, params): - return cls.eigvals - - -class DCX(Operation, metaclass=ABCMeta): - """Class for DCX Gate.""" - - num_params = 0 - num_wires = 2 - matrix = mat_dict["dcx"] - func = staticmethod(tqf.dcx) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class XXMINYY(Operation, metaclass=ABCMeta): - """Class for XXMinusYY gate.""" - - num_params = 2 - num_wires = 2 - func = staticmethod(tqf.xxminyy_matrix) - - @classmethod - def _matrix(cls, params): - return tqf.xxminyy_matrix(params) - - -class XXPLUSYY(Operation, metaclass=ABCMeta): - """Class for XXPlusYY gate.""" - - num_params = 2 - num_wires = 2 - func = staticmethod(tqf.xxplusyy_matrix) - - @classmethod - def _matrix(cls, params): - return tqf.xxplusyy_matrix(params) - - -class C3X(Operation, metaclass=ABCMeta): - """Class for C3X gate.""" - - num_params = 0 - num_wires = 4 - matrix = mat_dict["c3x"] - func = staticmethod(tqf.c3x) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class R(DiagonalOperation, metaclass=ABCMeta): - """Class for R Gate.""" - - num_params = 2 - num_wires = 1 - func = staticmethod(tqf.r) - - @classmethod - def _matrix(cls, params): - return tqf.r_matrix(params) - - -class C4X(Operation, metaclass=ABCMeta): - """Class for C4X Gate.""" - - num_params = 0 - num_wires = 5 - matrix = mat_dict["c4x"] - func = staticmethod(tqf.c4x) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class RC3X(Operation, metaclass=ABCMeta): - """Class for RC3X Gate.""" - - num_params = 0 - num_wires = 4 - matrix = mat_dict["rc3x"] - func = staticmethod(tqf.rc3x) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class RCCX(Operation, metaclass=ABCMeta): - """Class for RCCX Gate.""" - - num_params = 0 - num_wires = 3 - matrix = mat_dict["rccx"] - func = staticmethod(tqf.rccx) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -class GlobalPhase(Operation, metaclass=ABCMeta): - """Class for Global Phase gate.""" - - num_params = 1 - num_wires = 0 - func = staticmethod(tqf.globalphase) - - @classmethod - def _matrix(cls, params): - return tqf.globalphase_matrix(params) - - -class C3SX(Operation, metaclass=ABCMeta): - """Class for C3SX Gate.""" - - num_params = 0 - num_wires = 4 - matrix = mat_dict["c3sx"] - func = staticmethod(tqf.c3sx) - - @classmethod - def _matrix(cls, params): - return cls.matrix - - -H = Hadamard -SH = SHadamard -EchoedCrossResonance = ECR -CH = CHadamard - -op_name_dict = { - "hadamard": Hadamard, - "h": Hadamard, - "shadamard": SHadamard, - "sh": SHadamard, - "paulix": PauliX, - "x": PauliX, - "pauliy": PauliY, - "y": PauliY, - "pauliz": PauliZ, - "z": PauliZ, - "i": I, - "s": S, - "t": T, - "sx": SX, - "cx": CNOT, - "cnot": CNOT, - "cz": CZ, - "cy": CY, - "rx": RX, - "ry": RY, - "rz": RZ, - "rxx": RXX, - "xx": RXX, - "ryy": RYY, - "yy": RYY, - "rzz": RZZ, - "zz": RZZ, - "rzx": RZX, - "zx": RZX, - "swap": SWAP, - "sswap": SSWAP, - "cswap": CSWAP, - "toffoli": Toffoli, - "ccx": Toffoli, - "phaseshift": PhaseShift, - "rot": Rot, - "multirz": MultiRZ, - "crx": CRX, - "cry": CRY, - "crz": CRZ, - "crot": CRot, - "u1": U1, - "p": U1, - "u2": U2, - "u3": U3, - "u": U3, - "cu1": CU1, - "cp": CU1, - "cr": CU1, - "cphase": CU1, - "cu2": CU2, - "cu3": CU3, - "cu": CU, - "qubitunitary": QubitUnitary, - "qubitunitarystrict": QubitUnitaryFast, - "qubitunitaryfast": QubitUnitaryFast, - "trainableunitary": TrainableUnitary, - "trainableunitarystrict": TrainableUnitaryStrict, - "multicnot": MultiCNOT, - "multixcnot": MultiXCNOT, - "reset": Reset, - "singleexcitation": SingleExcitation, - "ecr": ECR, - "echoedcrossresonance": ECR, - "QFT": QFT, - "sdg": SDG, - "cs": CS, - "chadamard": CHadamard, - "ch": CH, - "dcx": DCX, - "xxminyy": XXMINYY, - "xxplusyy": XXPLUSYY, - "c3x": C3X, - "tdg": TDG, - "sxdg": SXDG, - "ch": CH, - "ccz": CCZ, - "iswap": ISWAP, - "csdg": CSDG, - "csx": CSX, - "r": R, - "c3sx": C3SX, - "globalphase": GlobalPhase, - "rccx": RCCX, - "rc3x": RC3X, - "c4x": C4X, -} diff --git a/torchquantum/operator/standard_gates/__init__.py b/torchquantum/operator/standard_gates/__init__.py new file mode 100644 index 00000000..98f55997 --- /dev/null +++ b/torchquantum/operator/standard_gates/__init__.py @@ -0,0 +1,165 @@ +""" +MIT License + +Copyright (c) 2020-present TorchQuantum Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +from .ecr import EchoedCrossResonance, ECR +from .global_phase import GlobalPhase +from .i import I +from .iswap import ISWAP + +# TODO: Make paulix/y/z alias as X/Y/Z +from .paulix import PauliX, CNOT, C4X, C3X, DCX, MultiCNOT, MultiXCNOT +from .pauliy import PauliY, CY +from .pauliz import PauliZ, CZ, CCZ +from .hadamard import Hadamard, SHadamard, CHadamard, H, SH, CH +from .phase_shift import PhaseShift +from .qft import QFT +from .r import R +from .reset import Reset +from .rot import Rot, CRot +from .rx import RX, RXX, CRX +from .ry import RY, RYY, CRY +from .rz import RZ, MultiRZ, RZZ, RZX, CRZ +from .toffoli import Toffoli, CCX, RC3X, RCCX +from .qubit_unitary import QubitUnitary, QubitUnitaryFast +from .trainable_unitary import TrainableUnitary, TrainableUnitaryStrict +from .s import S, SDG, CS, CSDG +from .single_excitation import SingleExcitation +from .swap import SWAP, SSWAP, CSWAP +from .sx import SX, CSX, C3SX, SXDG +from .t import T, TDG +from .u1 import U1, CU1 +from .u2 import U2, CU2 +from .u3 import U3, CU3, CU, U +from .xx_min_yy import XXMINYY +from .xx_plus_yy import XXPLUSYY + +all_variables = [ + EchoedCrossResonance, + ECR, + GlobalPhase, + I, + ISWAP, + PauliX, + CNOT, + C4X, + C3X, + DCX, + MultiCNOT, + MultiXCNOT, + PauliY, + CY, + PauliZ, + CZ, + CCZ, + Hadamard, + SHadamard, + CHadamard, + H, + SH, + CH, + PhaseShift, + QFT, + R, + Reset, + Rot, + CRot, + RX, + RXX, + CRX, + RY, + RYY, + CRY, + RZ, + MultiRZ, + RZZ, + RZX, + CRZ, + Toffoli, + CCX, + RC3X, + RCCX, + S, + SDG, + CS, + CSDG, + SingleExcitation, + SWAP, + SSWAP, + CSWAP, + SX, + CSX, + C3SX, + SXDG, + T, + TDG, + TrainableUnitary, + TrainableUnitaryStrict, + U1, + CU1, + U2, + CU2, + U3, + CU3, + CU, + U, + XXMINYY, + XXPLUSYY, +] + +__all__ = [a().__class__.__name__ for a in all_variables] + +# add the aliased and incomptaible classes +__all__.extend(["U", "CH", "QubitUnitary", "QubitUnitaryFast"]) + +# add the dictionary +__all__.extend(["op_name_dict", "fixed_ops", "parameterized_ops"]) + +# create the operations dictionary +op_name_dict = {x.op_name: x for x in all_variables} + +# add aliases as well +op_name_dict.update( + { + "h": H, + "sh": SH, + "u": U, + "qubitunitary": QubitUnitary, + "qubitunitaryfast": QubitUnitaryFast, + "x": PauliX, + "y": PauliY, + "z": PauliZ, + "cx": CNOT, + "xx": RXX, + "yy": RYY, + "zz": RZZ, + "zx": RZX, + "ccx": Toffoli, + "p": U1, + "cp": CU1, + "cr": CU1, + } +) + +fixed_ops = [a().__class__.__name__ for a in all_variables if a.num_params == 0] +parameterized_ops = [a().__class__.__name__ for a in all_variables if a.num_params > 0] diff --git a/torchquantum/operator/standard_gates/ecr.py b/torchquantum/operator/standard_gates/ecr.py new file mode 100644 index 00000000..32202a34 --- /dev/null +++ b/torchquantum/operator/standard_gates/ecr.py @@ -0,0 +1,25 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class ECR(Operation, metaclass=ABCMeta): + """Class for Echoed Cross Resonance Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "ecr" + matrix = mat_dict["ecr"] + func = staticmethod(tqf.ecr) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +EchoedCrossResonance = ECR +EchoedCrossResonance.name = "echoedcrossresonance" diff --git a/torchquantum/operator/standard_gates/global_phase.py b/torchquantum/operator/standard_gates/global_phase.py new file mode 100644 index 00000000..6dad8825 --- /dev/null +++ b/torchquantum/operator/standard_gates/global_phase.py @@ -0,0 +1,20 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class GlobalPhase(Operation, metaclass=ABCMeta): + """Class for Global Phase gate.""" + + num_params = 1 + num_wires = 0 + op_name = "globalphase" + func = staticmethod(tqf.globalphase) + + @classmethod + def _matrix(cls, params): + return tqf.globalphase_matrix(params) diff --git a/torchquantum/operator/standard_gates/hadamard.py b/torchquantum/operator/standard_gates/hadamard.py new file mode 100644 index 00000000..d2a62657 --- /dev/null +++ b/torchquantum/operator/standard_gates/hadamard.py @@ -0,0 +1,64 @@ +from ..op_types import * +import torch +import torch.nn as nn +import torchquantum as tq +import torchquantum.functional.functionals as tqf +import numpy as np +from abc import ABCMeta +from torchquantum.macro import C_DTYPE, F_DTYPE +from torchquantum.functional import mat_dict + + +class Hadamard(Observable, metaclass=ABCMeta): + """Class for Hadamard Gate.""" + + num_params = 0 + num_wires = 1 + op_name = "hadamard" + eigvals = torch.tensor([1, -1], dtype=C_DTYPE) + matrix = mat_dict["hadamard"] + func = staticmethod(tqf.hadamard) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + def diagonalizing_gates(self): + return [tq.RY(has_params=True, trainable=False, init_params=-np.pi / 4)] + + +class SHadamard(Operation, metaclass=ABCMeta): + """Class for SHadamard Gate.""" + + num_params = 0 + num_wires = 1 + op_name = "shadamard" + matrix = mat_dict["shadamard"] + func = staticmethod(tqf.shadamard) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class CHadamard(Operation, metaclass=ABCMeta): + """Class for CHadamard Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "chadamard" + matrix = mat_dict["chadamard"] + func = staticmethod(tqf.chadamard) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +H = Hadamard +SH = SHadamard +CH = CHadamard diff --git a/torchquantum/operator/standard_gates/i.py b/torchquantum/operator/standard_gates/i.py new file mode 100644 index 00000000..36a18c15 --- /dev/null +++ b/torchquantum/operator/standard_gates/i.py @@ -0,0 +1,29 @@ +from ..op_types import Observable +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class I(Observable, metaclass=ABCMeta): + """Class for Identity Gate.""" + + num_params = 0 + num_wires = 1 + op_name = "i" + eigvals = torch.tensor([1, 1], dtype=C_DTYPE) + matrix = mat_dict["i"] + func = staticmethod(tqf.i) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + def diagonalizing_gates(self): + return [] diff --git a/torchquantum/operator/standard_gates/iswap.py b/torchquantum/operator/standard_gates/iswap.py new file mode 100644 index 00000000..51bbc04c --- /dev/null +++ b/torchquantum/operator/standard_gates/iswap.py @@ -0,0 +1,21 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class ISWAP(Operation, metaclass=ABCMeta): + """Class for ISWAP Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "iswap" + matrix = mat_dict["iswap"] + func = staticmethod(tqf.iswap) + + @classmethod + def _matrix(cls, params): + return cls.matrix diff --git a/torchquantum/operator/standard_gates/paulix.py b/torchquantum/operator/standard_gates/paulix.py new file mode 100644 index 00000000..ced51e33 --- /dev/null +++ b/torchquantum/operator/standard_gates/paulix.py @@ -0,0 +1,121 @@ +from ..op_types import * +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class PauliX(Observable, metaclass=ABCMeta): + """Class for Pauli X Gate.""" + + num_params = 0 + num_wires = 1 + eigvals = torch.tensor([1, -1], dtype=C_DTYPE) + op_name = "paulix" + matrix = mat_dict["paulix"] + func = staticmethod(tqf.paulix) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + def diagonalizing_gates(self): + return [tq.Hadamard()] + + +class CNOT(Operation, metaclass=ABCMeta): + """Class for CNOT Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "cnot" + matrix = mat_dict["cnot"] + func = staticmethod(tqf.cnot) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class C4X(Operation, metaclass=ABCMeta): + """Class for C4X Gate.""" + + num_params = 0 + num_wires = 5 + op_name = "c4x" + matrix = mat_dict["c4x"] + func = staticmethod(tqf.c4x) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class C3X(Operation, metaclass=ABCMeta): + """Class for C3X gate.""" + + num_params = 0 + num_wires = 4 + op_name = "c3x" + matrix = mat_dict["c3x"] + func = staticmethod(tqf.c3x) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class DCX(Operation, metaclass=ABCMeta): + """Class for DCX Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "dcx" + matrix = mat_dict["dcx"] + func = staticmethod(tqf.dcx) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class MultiCNOT(Operation, metaclass=ABCMeta): + """Class for Multi qubit CNOT gate.""" + + num_params = 0 + num_wires = AnyWires + op_name = "multicnot" + func = staticmethod(tqf.multicnot) + + @classmethod + def _matrix(cls, params, n_wires): + return tqf.multicnot_matrix(n_wires) + + @property + def matrix(self): + op_matrix = self._matrix(self.params, self.n_wires) + return op_matrix + + +class MultiXCNOT(Operation, metaclass=ABCMeta): + """Class for Multi qubit XCNOT gate.""" + + num_params = 0 + num_wires = AnyWires + op_name = "multixcnot" + func = staticmethod(tqf.multixcnot) + + @classmethod + def _matrix(cls, params, n_wires): + return tqf.multixcnot_matrix(n_wires) + + @property + def matrix(self): + op_matrix = self._matrix(self.params, self.n_wires) + return op_matrix diff --git a/torchquantum/operator/standard_gates/pauliy.py b/torchquantum/operator/standard_gates/pauliy.py new file mode 100644 index 00000000..ed3f0556 --- /dev/null +++ b/torchquantum/operator/standard_gates/pauliy.py @@ -0,0 +1,43 @@ +from ..op_types import Observable, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class PauliY(Observable, metaclass=ABCMeta): + """Class for Pauli Y Gate.""" + + num_params = 0 + num_wires = 1 + eigvals = torch.tensor([1, -1], dtype=C_DTYPE) + op_name = "pauliy" + matrix = mat_dict["pauliy"] + func = staticmethod(tqf.pauliy) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + def diagonalizing_gates(self): + return [tq.PauliZ(), tq.S(), tq.Hadamard()] + + +class CY(Operation, metaclass=ABCMeta): + """Class for CY Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "cy" + matrix = mat_dict["cy"] + func = staticmethod(tqf.cy) + + @classmethod + def _matrix(cls, params): + return cls.matrix diff --git a/torchquantum/operator/standard_gates/pauliz.py b/torchquantum/operator/standard_gates/pauliz.py new file mode 100644 index 00000000..a4903147 --- /dev/null +++ b/torchquantum/operator/standard_gates/pauliz.py @@ -0,0 +1,67 @@ +from ..op_types import Observable, DiagonalOperation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class PauliZ(Observable, metaclass=ABCMeta): + """Class for Pauli Z Gate.""" + + num_params = 0 + num_wires = 1 + eigvals = torch.tensor([1, -1], dtype=C_DTYPE) + op_name = "pauliz" + matrix = mat_dict["pauliz"] + func = staticmethod(tqf.pauliz) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + def diagonalizing_gates(self): + return [] + + +class CZ(DiagonalOperation, metaclass=ABCMeta): + """Class for CZ Gate.""" + + num_params = 0 + num_wires = 2 + eigvals = torch.tensor([1, 1, 1, -1], dtype=C_DTYPE) + op_name = "cz" + matrix = mat_dict["cz"] + func = staticmethod(tqf.cz) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + +class CCZ(DiagonalOperation, metaclass=ABCMeta): + """Class for CCZ Gate.""" + + num_params = 0 + num_wires = 3 + op_name = "ccz" + matrix = mat_dict["ccz"] + eigvals = torch.tensor([1, 1, 1, 1, 1, 1, 1, -1], dtype=C_DTYPE) + func = staticmethod(tqf.ccz) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals diff --git a/torchquantum/operator/standard_gates/phase_shift.py b/torchquantum/operator/standard_gates/phase_shift.py new file mode 100644 index 00000000..d60efd5f --- /dev/null +++ b/torchquantum/operator/standard_gates/phase_shift.py @@ -0,0 +1,20 @@ +from ..op_types import DiagonalOperation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class PhaseShift(DiagonalOperation, metaclass=ABCMeta): + """Class for PhaseShift Gate.""" + + num_params = 1 + num_wires = 1 + op_name = "phaseshift" + func = staticmethod(tqf.phaseshift) + + @classmethod + def _matrix(cls, params): + return tqf.phaseshift_matrix(params) diff --git a/torchquantum/operator/standard_gates/qft.py b/torchquantum/operator/standard_gates/qft.py new file mode 100644 index 00000000..66ac6022 --- /dev/null +++ b/torchquantum/operator/standard_gates/qft.py @@ -0,0 +1,20 @@ +from ..op_types import Observable, AnyWires +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class QFT(Observable, metaclass=ABCMeta): + """Class for Quantum Fourier Transform.""" + + num_params = 0 + num_wires = AnyWires + op_name = "qft" + func = staticmethod(tqf.qft) + + @classmethod + def _matrix(cls, params, n_wires): + return tqf.qft_matrix(n_wires) diff --git a/torchquantum/operator/standard_gates/qubit_unitary.py b/torchquantum/operator/standard_gates/qubit_unitary.py new file mode 100644 index 00000000..f6413ed5 --- /dev/null +++ b/torchquantum/operator/standard_gates/qubit_unitary.py @@ -0,0 +1,154 @@ +from ..op_types import * +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf +import numpy as np + + +class QubitUnitary(Operation, metaclass=ABCMeta): + """Class for controlled Qubit Unitary gate.""" + + num_params = AnyNParams + num_wires = AnyWires + op_name = "qubitunitary" + func = staticmethod(tqf.qubitunitary) + + @classmethod + def _matrix(cls, params): + return tqf.qubitunitary_matrix(params) + + def build_params(self, trainable): + return None + + def reset_params(self, init_params=None): + self.params = torch.tensor(init_params, dtype=C_DTYPE) + self.register_buffer(f"{self.name}_unitary", self.params) + + +class QubitUnitaryFast(Operation, metaclass=ABCMeta): + """Class for fast implementation of + controlled Qubit Unitary gate.""" + + num_params = AnyNParams + num_wires = AnyWires + op_name = "qubitunitaryfast" + func = staticmethod(tqf.qubitunitaryfast) + + def __init__( + self, + has_params: bool = False, + trainable: bool = False, + init_params=None, + n_wires=None, + wires=None, + ): + super().__init__( + has_params=True, + trainable=trainable, + init_params=init_params, + n_wires=n_wires, + wires=wires, + ) + + @classmethod + def from_controlled_operation( + cls, + op, + c_wires, + t_wires, + trainable, + ): + """ + + Args: + op: the operation + c_wires: controlled wires, will only be a set such as 1, [2,3] + t_wires: can be a list of list of wires, multiple sets + [[1,2], [3,4]] + trainable: + """ + op = op + c_wires = np.array(c_wires) + t_wires = np.array(t_wires) + trainable = trainable + # self.n_t_wires = op.n_wires + # assert len(t_wires) == op.n_wires + + orig_u = op.matrix + orig_u_n_wires = op.n_wires + + wires = [] + + if c_wires.ndim == 0: + # only one control qubit + # 1 + n_c_wires = 1 + wires.append(c_wires.item()) + elif c_wires.ndim == 1: + # multiple control qubits + # [1, 2] + n_c_wires = c_wires.shape[0] + wires.extend(list(c_wires)) + + if t_wires.ndim == 0: + # single qubit U on one set + # 2 + n_t_wires = 1 + n_set_t_wires = 1 + wires.append(t_wires.item()) + elif t_wires.ndim == 1: + # single qubit U on multiple sets + # [1, 2, 3] + # or multi qubit U on one set + # [2, 3] + n_t_wires = t_wires.shape[0] + n_set_t_wires = n_t_wires // orig_u_n_wires + wires.extend(list(t_wires.flatten())) + + elif t_wires.ndim == 2: + # multi qubit unitary on multiple sets + # [[2, 3], [4, 5]] + n_t_wires = t_wires.flatten().shape[0] + n_set_t_wires = n_t_wires // orig_u_n_wires + wires.extend(list(t_wires.flatten())) + + n_wires = n_c_wires + n_t_wires + + # compute the new unitary, then permute + unitary = torch.tensor(torch.zeros(2**n_wires, 2**n_wires, dtype=C_DTYPE)) + for k in range(2**n_wires - 2**n_t_wires): + unitary[k, k] = 1.0 + 0.0j + + # compute kronecker product of all the controlled target + + controlled_u = None + for k in range(n_set_t_wires): + if controlled_u is None: + controlled_u = orig_u + else: + controlled_u = torch.kron(controlled_u, orig_u) + + d_controlled_u = controlled_u.shape[-1] + unitary[-d_controlled_u:, -d_controlled_u:] = controlled_u + + return cls( + has_params=True, + trainable=trainable, + init_params=unitary, + n_wires=n_wires, + wires=wires, + ) + + @classmethod + def _matrix(cls, params): + return tqf.qubitunitaryfast_matrix(params) + + def build_params(self, trainable): + return None + + def reset_params(self, init_params=None): + self.params = torch.tensor(init_params, dtype=C_DTYPE) + self.register_buffer(f"{self.name}_unitary", self.params) diff --git a/torchquantum/operator/standard_gates/r.py b/torchquantum/operator/standard_gates/r.py new file mode 100644 index 00000000..34cf50e4 --- /dev/null +++ b/torchquantum/operator/standard_gates/r.py @@ -0,0 +1,20 @@ +from ..op_types import DiagonalOperation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class R(DiagonalOperation, metaclass=ABCMeta): + """Class for R Gate.""" + + num_params = 2 + num_wires = 1 + op_name = "r" + func = staticmethod(tqf.r) + + @classmethod + def _matrix(cls, params): + return tqf.r_matrix(params) diff --git a/torchquantum/operator/standard_gates/reset.py b/torchquantum/operator/standard_gates/reset.py new file mode 100644 index 00000000..d97c4de7 --- /dev/null +++ b/torchquantum/operator/standard_gates/reset.py @@ -0,0 +1,20 @@ +from ..op_types import Operator, AnyWires +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class Reset(Operator, metaclass=ABCMeta): + """Class for Reset gate.""" + + num_params = 0 + num_wires = AnyWires + op_name = "reset" + func = staticmethod(tqf.reset) + + @classmethod + def _matrix(cls, params): + return None diff --git a/torchquantum/operator/standard_gates/rot.py b/torchquantum/operator/standard_gates/rot.py new file mode 100644 index 00000000..ba9fd8bb --- /dev/null +++ b/torchquantum/operator/standard_gates/rot.py @@ -0,0 +1,33 @@ +from ..op_types import Observable, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class Rot(Operation, metaclass=ABCMeta): + """Class for Rotation Gate.""" + + num_params = 3 + num_wires = 1 + op_name = "rot" + func = staticmethod(tqf.rot) + + @classmethod + def _matrix(cls, params): + return tqf.rot_matrix(params) + + +class CRot(Operation, metaclass=ABCMeta): + """Class for Controlled Rotation gate.""" + + num_params = 3 + num_wires = 2 + op_name = "crot" + func = staticmethod(tqf.crot) + + @classmethod + def _matrix(cls, params): + return tqf.crot_matrix(params) diff --git a/torchquantum/operator/standard_gates/rx.py b/torchquantum/operator/standard_gates/rx.py new file mode 100644 index 00000000..fa805e26 --- /dev/null +++ b/torchquantum/operator/standard_gates/rx.py @@ -0,0 +1,46 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class RX(Operation, metaclass=ABCMeta): + """Class for RX Gate.""" + + num_params = 1 + num_wires = 1 + op_name = "rx" + func = staticmethod(tqf.rx) + + @classmethod + def _matrix(cls, params): + return tqf.rx_matrix(params) + + +class RXX(Operation, metaclass=ABCMeta): + """Class for RXX Gate.""" + + num_params = 1 + num_wires = 2 + op_name = "rxx" + func = staticmethod(tqf.rxx) + + @classmethod + def _matrix(cls, params): + return tqf.rxx_matrix(params) + + +class CRX(Operation, metaclass=ABCMeta): + """Class for Controlled Rotation X gate.""" + + num_params = 1 + num_wires = 2 + op_name = "crx" + func = staticmethod(tqf.crx) + + @classmethod + def _matrix(cls, params): + return tqf.crx_matrix(params) diff --git a/torchquantum/operator/standard_gates/ry.py b/torchquantum/operator/standard_gates/ry.py new file mode 100644 index 00000000..f4c7a7a5 --- /dev/null +++ b/torchquantum/operator/standard_gates/ry.py @@ -0,0 +1,46 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class RY(Operation, metaclass=ABCMeta): + """Class for RY Gate.""" + + num_params = 1 + num_wires = 1 + op_name = "ry" + func = staticmethod(tqf.ry) + + @classmethod + def _matrix(cls, params): + return tqf.ry_matrix(params) + + +class RYY(Operation, metaclass=ABCMeta): + """Class for RYY Gate.""" + + num_params = 1 + num_wires = 2 + op_name = "ryy" + func = staticmethod(tqf.ryy) + + @classmethod + def _matrix(cls, params): + return tqf.ryy_matrix(params) + + +class CRY(Operation, metaclass=ABCMeta): + """Class for Controlled Rotation Y gate.""" + + num_params = 1 + num_wires = 2 + op_name = "cry" + func = staticmethod(tqf.cry) + + @classmethod + def _matrix(cls, params): + return tqf.cry_matrix(params) diff --git a/torchquantum/operator/standard_gates/rz.py b/torchquantum/operator/standard_gates/rz.py new file mode 100644 index 00000000..91dee8db --- /dev/null +++ b/torchquantum/operator/standard_gates/rz.py @@ -0,0 +1,72 @@ +from ..op_types import * +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class RZ(DiagonalOperation, metaclass=ABCMeta): + """Class for RZ Gate.""" + + num_params = 1 + num_wires = 1 + op_name = "rz" + func = staticmethod(tqf.rz) + + @classmethod + def _matrix(cls, params): + return tqf.rz_matrix(params) + + +class MultiRZ(DiagonalOperation, metaclass=ABCMeta): + """Class for Multi-qubit RZ Gate.""" + + num_params = 1 + num_wires = AnyWires + op_name = "multirz" + func = staticmethod(tqf.multirz) + + @classmethod + def _matrix(cls, params, n_wires): + return tqf.multirz_matrix(params, n_wires) + + +class RZZ(DiagonalOperation, metaclass=ABCMeta): + """Class for RZZ Gate.""" + + num_params = 1 + num_wires = 2 + op_name = "rzz" + func = staticmethod(tqf.rzz) + + @classmethod + def _matrix(cls, params): + return tqf.rzz_matrix(params) + + +class RZX(Operation, metaclass=ABCMeta): + """Class for RZX Gate.""" + + num_params = 1 + num_wires = 2 + op_name = "rzx" + func = staticmethod(tqf.rzx) + + @classmethod + def _matrix(cls, params): + return tqf.rzx_matrix(params) + + +class CRZ(Operation, metaclass=ABCMeta): + """Class for Controlled Rotation Z gate.""" + + num_params = 1 + num_wires = 2 + op_name = "crz" + func = staticmethod(tqf.crz) + + @classmethod + def _matrix(cls, params): + return tqf.crz_matrix(params) diff --git a/torchquantum/operator/standard_gates/s.py b/torchquantum/operator/standard_gates/s.py new file mode 100644 index 00000000..b91aa0c3 --- /dev/null +++ b/torchquantum/operator/standard_gates/s.py @@ -0,0 +1,79 @@ +from ..op_types import Operation, DiagonalOperation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class S(DiagonalOperation, metaclass=ABCMeta): + """Class for S Gate.""" + + num_params = 0 + num_wires = 1 + eigvals = torch.tensor([1, 1j], dtype=C_DTYPE) + op_name = "s" + matrix = mat_dict["s"] + func = staticmethod(tqf.s) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + +class SDG(Operation, metaclass=ABCMeta): + """Class for SDG Gate.""" + + num_params = 0 + num_wires = 1 + + op_name = "sdg" + matrix = mat_dict["sdg"] + func = staticmethod(tqf.sdg) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class CS(Operation, metaclass=ABCMeta): + """Class for CS Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "cs" + matrix = mat_dict["cs"] + eigvals = torch.tensor([1, 1, 1, 1j], dtype=C_DTYPE) + func = staticmethod(tqf.cs) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + +class CSDG(DiagonalOperation, metaclass=ABCMeta): + """Class for CS Dagger Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "csdg" + matrix = mat_dict["csdg"] + eigvals = torch.tensor([1, 1, 1, -1j], dtype=C_DTYPE) + func = staticmethod(tqf.csdg) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals diff --git a/torchquantum/operator/standard_gates/single_excitation.py b/torchquantum/operator/standard_gates/single_excitation.py new file mode 100644 index 00000000..2011aca1 --- /dev/null +++ b/torchquantum/operator/standard_gates/single_excitation.py @@ -0,0 +1,20 @@ +from ..op_types import Operator +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class SingleExcitation(Operator, metaclass=ABCMeta): + """Class for SingleExcitation gate.""" + + num_params = 1 + num_wires = 2 + op_name = "singleexcitation" + func = staticmethod(tqf.singleexcitation) + + @classmethod + def _matrix(cls, params): + return tqf.singleexcitation_matrix(params) diff --git a/torchquantum/operator/standard_gates/swap.py b/torchquantum/operator/standard_gates/swap.py new file mode 100644 index 00000000..1214a4c5 --- /dev/null +++ b/torchquantum/operator/standard_gates/swap.py @@ -0,0 +1,49 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class SWAP(Operation, metaclass=ABCMeta): + """Class for SWAP Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "swap" + matrix = mat_dict["swap"] + func = staticmethod(tqf.swap) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class SSWAP(Operation, metaclass=ABCMeta): + """Class for SSWAP Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "sswap" + matrix = mat_dict["sswap"] + func = staticmethod(tqf.sswap) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class CSWAP(Operation, metaclass=ABCMeta): + """Class for CSWAP Gate.""" + + num_params = 0 + num_wires = 3 + op_name = "cswap" + matrix = mat_dict["cswap"] + func = staticmethod(tqf.cswap) + + @classmethod + def _matrix(cls, params): + return cls.matrix diff --git a/torchquantum/operator/standard_gates/sx.py b/torchquantum/operator/standard_gates/sx.py new file mode 100644 index 00000000..5728839a --- /dev/null +++ b/torchquantum/operator/standard_gates/sx.py @@ -0,0 +1,68 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class SX(Operation, metaclass=ABCMeta): + """Class for SX Gate.""" + + num_params = 0 + num_wires = 1 + eigvals = torch.tensor([1, 1j], dtype=C_DTYPE) + op_name = "sx" + matrix = mat_dict["sx"] + func = staticmethod(tqf.sx) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + +class CSX(Operation, metaclass=ABCMeta): + """Class for CSX Gate.""" + + num_params = 0 + num_wires = 2 + op_name = "csx" + matrix = mat_dict["csx"] + func = staticmethod(tqf.csx) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class C3SX(Operation, metaclass=ABCMeta): + """Class for C3SX Gate.""" + + num_params = 0 + num_wires = 4 + op_name = "c3sx" + matrix = mat_dict["c3sx"] + func = staticmethod(tqf.c3sx) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class SXDG(Operation, metaclass=ABCMeta): + """Class for SXDG Gate.""" + + num_params = 0 + num_wires = 1 + op_name = "sxdg" + matrix = mat_dict["sxdg"] + func = staticmethod(tqf.sxdg) + + @classmethod + def _matrix(cls, params): + return cls.matrix diff --git a/torchquantum/operator/standard_gates/t.py b/torchquantum/operator/standard_gates/t.py new file mode 100644 index 00000000..282fc68b --- /dev/null +++ b/torchquantum/operator/standard_gates/t.py @@ -0,0 +1,40 @@ +from ..op_types import DiagonalOperation, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class T(DiagonalOperation, metaclass=ABCMeta): + """Class for T Gate.""" + + num_params = 0 + num_wires = 1 + op_name = "t" + eigvals = torch.tensor([1, 1j], dtype=C_DTYPE) + matrix = mat_dict["t"] + func = staticmethod(tqf.t) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + @classmethod + def _eigvals(cls, params): + return cls.eigvals + + +class TDG(Operation, metaclass=ABCMeta): + """Class for TDG Gate.""" + + num_params = 0 + num_wires = 1 + op_name = "tdg" + matrix = mat_dict["tdg"] + func = staticmethod(tqf.tdg) + + @classmethod + def _matrix(cls, params): + return cls.matrix diff --git a/torchquantum/operator/standard_gates/toffoli.py b/torchquantum/operator/standard_gates/toffoli.py new file mode 100644 index 00000000..d70178b0 --- /dev/null +++ b/torchquantum/operator/standard_gates/toffoli.py @@ -0,0 +1,52 @@ +from ..op_types import Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class Toffoli(Operation, metaclass=ABCMeta): + """Class for Toffoli Gate.""" + + num_params = 0 + num_wires = 3 + op_name = "toffoli" + matrix = mat_dict["toffoli"] + func = staticmethod(tqf.toffoli) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +CCX = Toffoli + + +class RC3X(Operation, metaclass=ABCMeta): + """Class for RC3X Gate.""" + + num_params = 0 + num_wires = 4 + op_name = "rc3x" + matrix = mat_dict["rc3x"] + func = staticmethod(tqf.rc3x) + + @classmethod + def _matrix(cls, params): + return cls.matrix + + +class RCCX(Operation, metaclass=ABCMeta): + """Class for RCCX Gate.""" + + num_params = 0 + num_wires = 3 + op_name = "rccx" + matrix = mat_dict["rccx"] + func = staticmethod(tqf.rccx) + + @classmethod + def _matrix(cls, params): + return cls.matrix diff --git a/torchquantum/operator/standard_gates/trainable_unitary.py b/torchquantum/operator/standard_gates/trainable_unitary.py new file mode 100644 index 00000000..c6ea0451 --- /dev/null +++ b/torchquantum/operator/standard_gates/trainable_unitary.py @@ -0,0 +1,60 @@ +from ..op_types import * +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class TrainableUnitary(Operation, metaclass=ABCMeta): + """Class for TrainableUnitary Gate.""" + + num_params = AnyNParams + num_wires = AnyWires + op_name = "trainableunitary" + func = staticmethod(tqf.qubitunitaryfast) + + def build_params(self, trainable): + """Build the parameters for the gate. + + Args: + trainable (bool): Whether the parameters are trainble. + + Returns: + torch.Tensor: Parameters. + + """ + parameters = nn.Parameter( + torch.empty(1, 2**self.n_wires, 2**self.n_wires, dtype=C_DTYPE) + ) + parameters.requires_grad = True if trainable else False + # self.register_parameter(f"{self.name}_params", parameters) + return parameters + + def reset_params(self, init_params=None): + """Reset the parameters. + + Args: + init_params (torch.Tensor, optional): Initial parameters. + + Returns: + None. + + """ + mat = torch.randn((1, 2**self.n_wires, 2**self.n_wires), dtype=C_DTYPE) + U, Sigma, V = torch.svd(mat) + self.params.data.copy_(U.matmul(V.permute(0, 2, 1))) + + @staticmethod + def _matrix(self, params): + return tqf.qubitunitaryfast(params) + + +class TrainableUnitaryStrict(TrainableUnitary, metaclass=ABCMeta): + """Class for Strict Unitary matrix gate.""" + + num_params = AnyNParams + num_wires = AnyWires + op_name = "trainableunitarystrict" + func = staticmethod(tqf.qubitunitarystrict) diff --git a/torchquantum/operator/standard_gates/u1.py b/torchquantum/operator/standard_gates/u1.py new file mode 100644 index 00000000..e29728f1 --- /dev/null +++ b/torchquantum/operator/standard_gates/u1.py @@ -0,0 +1,35 @@ +from ..op_types import Observable, DiagonalOperation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class U1(DiagonalOperation, metaclass=ABCMeta): + """Class for Controlled Rotation Y gate. U1 is the same + as phaseshift. + """ + + num_params = 1 + num_wires = 1 + op_name = "u1" + func = staticmethod(tqf.u1) + + @classmethod + def _matrix(cls, params): + return tqf.u1_matrix(params) + + +class CU1(DiagonalOperation, metaclass=ABCMeta): + """Class for controlled U1 gate.""" + + num_params = 1 + num_wires = 2 + op_name = "cu1" + func = staticmethod(tqf.cu1) + + @classmethod + def _matrix(cls, params): + return tqf.cu1_matrix(params) diff --git a/torchquantum/operator/standard_gates/u2.py b/torchquantum/operator/standard_gates/u2.py new file mode 100644 index 00000000..bd22c777 --- /dev/null +++ b/torchquantum/operator/standard_gates/u2.py @@ -0,0 +1,33 @@ +from ..op_types import Observable, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class U2(Operation, metaclass=ABCMeta): + """Class for U2 gate.""" + + num_params = 2 + num_wires = 1 + op_name = "u2" + func = staticmethod(tqf.u2) + + @classmethod + def _matrix(cls, params): + return tqf.u2_matrix(params) + + +class CU2(Operation, metaclass=ABCMeta): + """Class for controlled U2 gate.""" + + num_params = 2 + num_wires = 2 + op_name = "cu2" + func = staticmethod(tqf.cu2) + + @classmethod + def _matrix(cls, params): + return tqf.cu2_matrix(params) diff --git a/torchquantum/operator/standard_gates/u3.py b/torchquantum/operator/standard_gates/u3.py new file mode 100644 index 00000000..62279194 --- /dev/null +++ b/torchquantum/operator/standard_gates/u3.py @@ -0,0 +1,49 @@ +from ..op_types import Observable, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class U3(Operation, metaclass=ABCMeta): + """Class for U3 gate.""" + + num_params = 3 + num_wires = 1 + op_name = "u3" + func = staticmethod(tqf.u3) + + @classmethod + def _matrix(cls, params): + return tqf.u3_matrix(params) + + +U = U3 + + +class CU3(Operation, metaclass=ABCMeta): + """Class for Controlled U3 gate.""" + + num_params = 3 + num_wires = 2 + op_name = "cu3" + func = staticmethod(tqf.cu3) + + @classmethod + def _matrix(cls, params): + return tqf.cu3_matrix(params) + + +class CU(Operation, metaclass=ABCMeta): + """Class for Controlled U gate (4-parameter two-qubit gate).""" + + num_params = 4 + num_wires = 2 + op_name = "cu" + func = staticmethod(tqf.cu) + + @classmethod + def _matrix(cls, params): + return tqf.cu_matrix(params) diff --git a/torchquantum/operator/standard_gates/xx_min_yy.py b/torchquantum/operator/standard_gates/xx_min_yy.py new file mode 100644 index 00000000..5a5e4b00 --- /dev/null +++ b/torchquantum/operator/standard_gates/xx_min_yy.py @@ -0,0 +1,20 @@ +from ..op_types import Observable, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class XXMINYY(Operation, metaclass=ABCMeta): + """Class for XXMinusYY gate.""" + + num_params = 2 + num_wires = 2 + op_name = "xxminyy" + func = staticmethod(tqf.xxminyy_matrix) + + @classmethod + def _matrix(cls, params): + return tqf.xxminyy_matrix(params) diff --git a/torchquantum/operator/standard_gates/xx_plus_yy.py b/torchquantum/operator/standard_gates/xx_plus_yy.py new file mode 100644 index 00000000..68de1c7b --- /dev/null +++ b/torchquantum/operator/standard_gates/xx_plus_yy.py @@ -0,0 +1,20 @@ +from ..op_types import Observable, Operation +from abc import ABCMeta +from torchquantum.macro import C_DTYPE +import torchquantum as tq +import torch +from torchquantum.functional import mat_dict +import torchquantum.functional.functionals as tqf + + +class XXPLUSYY(Operation, metaclass=ABCMeta): + """Class for XXPlusYY gate.""" + + num_params = 2 + num_wires = 2 + op_name = "xxplusyy" + func = staticmethod(tqf.xxplusyy_matrix) + + @classmethod + def _matrix(cls, params): + return tqf.xxplusyy_matrix(params)