Skip to content

Added x normalization #143

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/diffpy/snmf/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@
# print(f"My final guess for Y: {my_model.Y}")
# print(f"Compare to true X: {X_norm}")
# print(f"Compare to true Y: {Y_norm}")
np.savetxt("my_new_X.txt", my_model.X, fmt="%.6g", delimiter=" ")
np.savetxt("my_new_Y.txt", my_model.Y, fmt="%.6g", delimiter=" ")
np.savetxt("my_new_A.txt", my_model.A, fmt="%.6g", delimiter=" ")
np.savetxt("my_norm_X.txt", my_model.X, fmt="%.6g", delimiter=" ")
np.savetxt("my_norm_Y.txt", my_model.Y, fmt="%.6g", delimiter=" ")
np.savetxt("my_norm_A.txt", my_model.A, fmt="%.6g", delimiter=" ")
33 changes: 30 additions & 3 deletions src/diffpy/snmf/snmf_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,17 +80,44 @@ def __init__(self, MM, Y0=None, X0=None, A=None, rho=1e12, eta=610, maxiter=300,
)

# Convergence check: Stop if diffun is small and at least 20 iterations have passed
print(self.objective_difference, " < ", self.objective_function * 1e-6)
if self.objective_difference < self.objective_function * 1e-6 and outiter >= 20:
# MATLAB uses 1e-6 but also gets faster convergence, so this makes up that difference
print(self.objective_difference, " < ", self.objective_function * 5e-7)
if self.objective_difference < self.objective_function * 5e-7 and outiter >= 20:
break

# Normalize our results
# TODO make this much cleaner
Y_row_max = np.max(self.Y, axis=1, keepdims=True)
self.Y = self.Y / Y_row_max
A_row_max = np.max(self.A, axis=1, keepdims=True)
self.A = self.A / A_row_max
# TODO loop to normalize X (currently not normalized)
# loop to normalize X
# effectively just re-running class with non-normalized X, normalized Y/A as inputs, then only update X
# reset difference trackers and initialize
self.preX = self.X.copy() # Previously stored X (like X0 for now)
self.GraX = np.zeros_like(self.X) # Gradient of X (zeros for now)
self.preGraX = np.zeros_like(self.X) # Previous gradient of X (zeros for now)
self.R = self.get_residual_matrix()
self.objective_function = self.get_objective_function()
self.objective_difference = None
self.objective_history = [self.objective_function]
self.outiter = 0
self.iter = 0
for outiter in range(100):
if iter == 1:
self.iter = 1 # So step size can adapt without an inner loop
self.updateX()
self.R = self.get_residual_matrix()
self.objective_function = self.get_objective_function()
print(f"Objective function after normX: {self.objective_function:.5e}")
self.objective_history.append(self.objective_function)
self.objective_difference = self.objective_history[-2] - self.objective_history[-1]
if self.objective_difference < self.objective_function * 5e-7 and outiter >= 20:
break
# end of normalization (and program)
# note that objective function does not fully recover after normalization
# it is still higher than pre-normalization, but that is okay and matches MATLAB
print("Finished optimization.")

def outer_loop(self):
# This inner loop runs up to four times per outer loop, making updates to X, Y
Expand Down
Loading