Skip to content

Commit

Permalink
fix import module problem
Browse files Browse the repository at this point in the history
  • Loading branch information
Jue-Xu committed Jul 28, 2024
1 parent 2ba0530 commit c062f05
Show file tree
Hide file tree
Showing 5 changed files with 186 additions and 168 deletions.
4 changes: 2 additions & 2 deletions quantum_simulation_recipe/bounds.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import scipy.sparse.linalg as ssla

import numpy as np
from measure import commutator, norm
from quantum_simulation_recipe.measure import commutator, norm

# def commutator(A, B):
# return A @ B - B @ A
Expand Down Expand Up @@ -187,7 +187,7 @@ def analytic_loose_commutator_bound(n, J, h, dt, pbc=False, verbose=False):
analytic_error_bound = c1 * dt**3 / 12 + c2 * dt**3 / 24
return analytic_error_bound

from spin import *
from quantum_simulation_recipe.spin import *

# def relaxed_commutator_bound(n, J, h, dt, verbose=False):
# hnn = Nearest_Neighbour_1d(n, Jx=J, Jy=J, Jz=J, hx=h, hy=0, hz=0, pbc=False, verbose=False)
Expand Down
222 changes: 111 additions & 111 deletions quantum_simulation_recipe/measure.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import numpy as np
from jax.scipy.linalg import expm
import jax.numpy as jnp
from scipy.sparse import csr_matrix, csc_matrix

from bounds import *
from trotter import pf

pf_r = pf
# from quantum_simulation_recipe.bounds import *
# from quantum_simulation_recipe.trotter import pf
# pf_r = pf

def commutator(A, B):
return A @ B - B @ A
Expand All @@ -25,114 +25,114 @@ def norm(A, ord='spectral'):
# raise ValueError('norm is not defined')
return np.linalg.norm(A, ord=ord)

def measure_error(r, h_list, t, exact_U, type, rand_states=[], ob=None, pf_ord=2, coeffs=[], use_jax=False, verbose=False, return_error_list=False):
# print(type)
if type == 'worst_empirical':
return 2 * np.linalg.norm(exact_U - pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax), ord=2)
elif type == 'worst_bound':
if coeffs != []:
return 2 * tight_bound(h_list, 2, t, r) * coeffs[0]
else:
return 2 * tight_bound(h_list, 2, t, r)
elif type == 'worst_ob_empirical':
appro_U = pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)
# appro_U = pf_r(h_list, t, r, order=pf_ord)
exact_ob = exact_U.conj().T @ ob @ exact_U
appro_ob = appro_U.conj().T @ ob @ appro_U
# ob_error = np.linalg.norm(exact_ob - appro_ob, ord=2)
ob_error = np.sort(abs(np.linalg.eigvalsh(exact_ob - appro_ob)))[-1]
print('ob error (operator norm, largest eigen): ', ob_error, '; r:', r, '; t:', t)
return ob_error
elif type == 'worst_loose_bound':
return relaxed_st_bound(r, coeffs[1], coeffs[2], t, ob_type=coeffs[0])
elif type == 'lightcone_bound':
return lc_tail_bound(r, coeffs[1], coeffs[2], t, ob_type=coeffs[0], verbose=False)
# return relaxed_lc_bound(r, coeffs[1], coeffs[2], t, ob_type=coeffs[0], verbose=False)
elif type == 'average_bound':
# return tight_bound(h_list, 2, t, r, type='4')
if coeffs != []:
return 2 * tight_bound(h_list, 2, t, r, type='fro') * coeffs[0]
else:
return 2 * tight_bound(h_list, 2, t, r, type='fro')
elif type == 'average_empirical':
appro_U = pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)
err_list = [np.linalg.norm(np.outer(exact_U @ state.data.conj().T , (exact_U @ state.data.conj().T).conj().T) - np.outer(appro_U @ state.data.conj().T, (appro_U @ state.data.conj().T).conj().T), ord='nuc') for state in rand_states]
# err_list = [np.linalg.norm((exact_U - pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)) @ state.data) for state in rand_states]
if type(ob) == list:
ob_norm = sum([np.linalg.norm(o, ord=2) for o in ob])
else:
ob_norm = np.linalg.norm(ob, ord=2)
if return_error_list:
return np.array(err_list) * ob_norm
# return np.array(err_list) * np.linalg.norm(ob, ord=2)
else:
# return np.mean(err_list) * np.linalg.norm(ob, ord=2)
return np.mean(err_list) * ob_norm
elif type == 'average_ob_bound_legacy':
# elif type == 'average_ob_bound':
if isinstance(h_list[0], csr_matrix):
onestep_exactU = scipy.linalg.expm(-1j * t/r * sum([herm.toarray() for herm in h_list]))
d = len(h_list[0].toarray())
elif isinstance(h_list[0], np.ndarray):
onestep_exactU = scipy.linalg.expm(-1j * t/r * sum([herm for herm in h_list]))
d = len(h_list[0])
E_op = onestep_exactU - pf_r(h_list, t/r, 1, order=pf_ord, use_jax=use_jax)
# print((np.trace(E_op @ E_op.conj().T @ E_op @ E_op.conj().T)/d)**(1/4))
bound = 2 * r * (np.trace(E_op @ E_op.conj().T @ E_op @ E_op.conj().T)/d)**(1/4) * (np.trace(ob @ ob @ ob @ ob)/d)**(1/4)
# print(f'bound_e={bound_e}, bound={bound}')
return bound
elif type == 'average_ob_bound':
# elif type == 'average_ob_bound_nc':
if isinstance(h_list[0], csr_matrix):
d = len(h_list[0].toarray())
elif isinstance(h_list[0], np.ndarray):
d = len(h_list[0])
# if coeffs == []:
# bound = 2 * tight_bound(h_list, 2, t, r, type='4') * (np.trace(ob @ ob @ ob @ ob)/d)**(1/4)
# else:
# bound = np.sqrt(2) * tight_bound(h_list, 2, t, r, type='fro') * (sum([np.linalg.norm(ob, ord='fro') for ob in coeffs[0]])/(d+1)**(1/2))
if type(ob) == list:
bound = np.sqrt(2) * tight_bound(h_list, 2, t, r, type='fro') * sum([np.linalg.norm(o, ord='fro') for o in ob])/(d+1)**(1/2)
else:
bound = np.sqrt(2) * tight_bound(h_list, 2, t, r, type='fro') * np.linalg.norm(ob, ord='fro')/(d+1)**(1/2)
return bound
# elif type == 'observable_empirical':
elif type == 'average_ob_empirical':
approx_U = pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)
exact_final_states = [exact_U @ state.data.T for state in rand_states]
appro_final_states = [approx_U @ state.data.T for state in rand_states]
if type(ob) == list:
err_list = [sum([abs(appro_final_states[i].conj().T @ o @ appro_final_states[i] - exact_final_states[i].conj().T @ o @ exact_final_states[i]) for o in ob]) for i in range(len(rand_states))]
else:
err_list = [abs(appro_final_states[i].conj().T @ ob @ appro_final_states[i] - exact_final_states[i].conj().T @ ob @ exact_final_states[i]) for i in range(len(rand_states))]
if return_error_list:
return np.array(err_list)
else:
return np.mean(err_list)
# elif type == 'observable_bound':
# return None
else:
raise ValueError(f'type={type} is not defined!')
# def measure_error(r, h_list, t, exact_U, type, rand_states=[], ob=None, pf_ord=2, coeffs=[], use_jax=False, verbose=False, return_error_list=False):
# # print(type)
# if type == 'worst_empirical':
# return 2 * np.linalg.norm(exact_U - pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax), ord=2)
# elif type == 'worst_bound':
# if coeffs != []:
# return 2 * tight_bound(h_list, 2, t, r) * coeffs[0]
# else:
# return 2 * tight_bound(h_list, 2, t, r)
# elif type == 'worst_ob_empirical':
# appro_U = pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)
# # appro_U = pf_r(h_list, t, r, order=pf_ord)
# exact_ob = exact_U.conj().T @ ob @ exact_U
# appro_ob = appro_U.conj().T @ ob @ appro_U
# # ob_error = np.linalg.norm(exact_ob - appro_ob, ord=2)
# ob_error = np.sort(abs(np.linalg.eigvalsh(exact_ob - appro_ob)))[-1]
# print('ob error (operator norm, largest eigen): ', ob_error, '; r:', r, '; t:', t)
# return ob_error
# elif type == 'worst_loose_bound':
# return relaxed_st_bound(r, coeffs[1], coeffs[2], t, ob_type=coeffs[0])
# elif type == 'lightcone_bound':
# return lc_tail_bound(r, coeffs[1], coeffs[2], t, ob_type=coeffs[0], verbose=False)
# # return relaxed_lc_bound(r, coeffs[1], coeffs[2], t, ob_type=coeffs[0], verbose=False)
# elif type == 'average_bound':
# # return tight_bound(h_list, 2, t, r, type='4')
# if coeffs != []:
# return 2 * tight_bound(h_list, 2, t, r, type='fro') * coeffs[0]
# else:
# return 2 * tight_bound(h_list, 2, t, r, type='fro')
# elif type == 'average_empirical':
# appro_U = pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)
# err_list = [np.linalg.norm(np.outer(exact_U @ state.data.conj().T , (exact_U @ state.data.conj().T).conj().T) - np.outer(appro_U @ state.data.conj().T, (appro_U @ state.data.conj().T).conj().T), ord='nuc') for state in rand_states]
# # err_list = [np.linalg.norm((exact_U - pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)) @ state.data) for state in rand_states]
# if type(ob) == list:
# ob_norm = sum([np.linalg.norm(o, ord=2) for o in ob])
# else:
# ob_norm = np.linalg.norm(ob, ord=2)
# if return_error_list:
# return np.array(err_list) * ob_norm
# # return np.array(err_list) * np.linalg.norm(ob, ord=2)
# else:
# # return np.mean(err_list) * np.linalg.norm(ob, ord=2)
# return np.mean(err_list) * ob_norm
# elif type == 'average_ob_bound_legacy':
# # elif type == 'average_ob_bound':
# if isinstance(h_list[0], csr_matrix):
# onestep_exactU = scipy.linalg.expm(-1j * t/r * sum([herm.toarray() for herm in h_list]))
# d = len(h_list[0].toarray())
# elif isinstance(h_list[0], np.ndarray):
# onestep_exactU = scipy.linalg.expm(-1j * t/r * sum([herm for herm in h_list]))
# d = len(h_list[0])
# E_op = onestep_exactU - pf_r(h_list, t/r, 1, order=pf_ord, use_jax=use_jax)
# # print((np.trace(E_op @ E_op.conj().T @ E_op @ E_op.conj().T)/d)**(1/4))
# bound = 2 * r * (np.trace(E_op @ E_op.conj().T @ E_op @ E_op.conj().T)/d)**(1/4) * (np.trace(ob @ ob @ ob @ ob)/d)**(1/4)
# # print(f'bound_e={bound_e}, bound={bound}')
# return bound
# elif type == 'average_ob_bound':
# # elif type == 'average_ob_bound_nc':
# if isinstance(h_list[0], csr_matrix):
# d = len(h_list[0].toarray())
# elif isinstance(h_list[0], np.ndarray):
# d = len(h_list[0])
# # if coeffs == []:
# # bound = 2 * tight_bound(h_list, 2, t, r, type='4') * (np.trace(ob @ ob @ ob @ ob)/d)**(1/4)
# # else:
# # bound = np.sqrt(2) * tight_bound(h_list, 2, t, r, type='fro') * (sum([np.linalg.norm(ob, ord='fro') for ob in coeffs[0]])/(d+1)**(1/2))
# if type(ob) == list:
# bound = np.sqrt(2) * tight_bound(h_list, 2, t, r, type='fro') * sum([np.linalg.norm(o, ord='fro') for o in ob])/(d+1)**(1/2)
# else:
# bound = np.sqrt(2) * tight_bound(h_list, 2, t, r, type='fro') * np.linalg.norm(ob, ord='fro')/(d+1)**(1/2)
# return bound
# # elif type == 'observable_empirical':
# elif type == 'average_ob_empirical':
# approx_U = pf_r(h_list, t, r, order=pf_ord, use_jax=use_jax)
# exact_final_states = [exact_U @ state.data.T for state in rand_states]
# appro_final_states = [approx_U @ state.data.T for state in rand_states]
# if type(ob) == list:
# err_list = [sum([abs(appro_final_states[i].conj().T @ o @ appro_final_states[i] - exact_final_states[i].conj().T @ o @ exact_final_states[i]) for o in ob]) for i in range(len(rand_states))]
# else:
# err_list = [abs(appro_final_states[i].conj().T @ ob @ appro_final_states[i] - exact_final_states[i].conj().T @ ob @ exact_final_states[i]) for i in range(len(rand_states))]
# if return_error_list:
# return np.array(err_list)
# else:
# return np.mean(err_list)
# # elif type == 'observable_bound':
# # return None
# else:
# raise ValueError(f'type={type} is not defined!')

def op_error(exact, approx, norm='spectral'):
'''
Frobenius norm of the difference between the exact and approximated operator
input:
exact: exact operator
approx: approximated operator
return: error of the operator
'''
if norm == 'fro':
return jnp.linalg.norm(exact - approx)
elif norm == 'spectral':
# if the input is in csr_matrix format
if isinstance(exact, csc_matrix) and isinstance(approx, csc_matrix):
return jnp.linalg.norm(jnp.array(exact.toarray() - approx.toarray()), ord=2)
else:
return jnp.linalg.norm(exact - approx, ord=2)
else:
raise ValueError('norm is not defined')
# return np.linalg.norm(exact - approx)/len(exact)
# def op_error(exact, approx, norm='spectral'):
# '''
# Frobenius norm of the difference between the exact and approximated operator
# input:
# exact: exact operator
# approx: approximated operator
# return: error of the operator
# '''
# if norm == 'fro':
# return jnp.linalg.norm(exact - approx)
# elif norm == 'spectral':
# # if the input is in csr_matrix format
# if isinstance(exact, csc_matrix) and isinstance(approx, csc_matrix):
# return jnp.linalg.norm(jnp.array(exact.toarray() - approx.toarray()), ord=2)
# else:
# return jnp.linalg.norm(exact - approx, ord=2)
# else:
# raise ValueError('norm is not defined')
# # return np.linalg.norm(exact - approx)/len(exact)


# # evaluate trotter error for different number of trotter steps
Expand Down
84 changes: 51 additions & 33 deletions quantum_simulation_recipe/test.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -119,39 +119,6 @@
"plh.ham_xyz"
]
},
{
"cell_type": "markdown",
"id": "e1692c9e",
"metadata": {},
"source": [
"## Trotter (Product formula)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcf5fb1c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<16x16 sparse matrix of type '<class 'numpy.complex128'>'\n",
"\twith 70 stored elements in Compressed Sparse Row format>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from trotter import pf_r\n",
"\n",
"pf_r(nnh.ham_par, 2, 100, order=2)\n",
"pf_r(nnh.ham_par, 2, 100, order=2, use_jax=True)\n",
"pf_r([term.to_matrix(True) for term in nnh.ham_par], 2, 100, order=2)"
]
},
{
"cell_type": "markdown",
"id": "d7fe9c22",
Expand Down Expand Up @@ -329,6 +296,57 @@
"h2.jw"
]
},
{
"cell_type": "markdown",
"id": "c7f7990d",
"metadata": {},
"source": [
"## Trotter (Product formula)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "85dc8331",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<16x16 sparse matrix of type '<class 'numpy.complex128'>'\n",
"\twith 70 stored elements in Compressed Sparse Row format>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from trotter import pf_r\n",
"\n",
"pf_r(nnh.ham_par, 2, 100, order=2)\n",
"pf_r(nnh.ham_par, 2, 100, order=2, use_jax=True)\n",
"pf_r([term.to_matrix(True) for term in nnh.ham_par], 2, 100, order=2)"
]
},
{
"cell_type": "markdown",
"id": "55a3dfba",
"metadata": {},
"source": [
"### bounds"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "63ed8f8e",
"metadata": {},
"outputs": [],
"source": [
"from bounds import *"
]
},
{
"cell_type": "markdown",
"id": "f981124d",
Expand Down
Loading

0 comments on commit c062f05

Please sign in to comment.