From aab82768060cff112447d46cce11baa29adc11b9 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 08:40:01 -0500 Subject: [PATCH 01/22] refactor: optimal vehicle speed -> vehicle speed scheduling --- ...Speed.lean => VehicleSpeedScheduling.lean} | 20 +++++++++---------- ...e_speed.py => vehicle_speed_scheduling.py} | 0 2 files changed, 10 insertions(+), 10 deletions(-) rename CvxLean/Examples/{OptimalVehicleSpeed.lean => VehicleSpeedScheduling.lean} (92%) rename CvxLean/Examples/{optimal_vehicle_speed.py => vehicle_speed_scheduling.py} (100%) diff --git a/CvxLean/Examples/OptimalVehicleSpeed.lean b/CvxLean/Examples/VehicleSpeedScheduling.lean similarity index 92% rename from CvxLean/Examples/OptimalVehicleSpeed.lean rename to CvxLean/Examples/VehicleSpeedScheduling.lean index 9063f8a7..f842721d 100644 --- a/CvxLean/Examples/OptimalVehicleSpeed.lean +++ b/CvxLean/Examples/VehicleSpeedScheduling.lean @@ -2,7 +2,7 @@ import CvxLean noncomputable section -namespace OptimalVehicleSpeed +namespace VehicleSpeedSched open CvxLean Minimization Real BigOperators Matrix @@ -23,7 +23,7 @@ variable (F : ℝ → ℝ) open FinsetInterval -def optimalVehicleSpeed [Fact (0 < n)] := +def vehSpeedSched [Fact (0 < n)] := optimization (s : Fin n → ℝ) minimize ∑ i, (d i / s i) * F (s i) subject to @@ -46,9 +46,9 @@ private lemma fold_partial_sum [hn : Fact (0 < n)] (t : Fin n → ℝ) (i : Fin . rfl . linarith [hn.out] -equivalence' eqv₁/optimalVehicleSpeedConvex (n : ℕ) (d : Fin n → ℝ) +equivalence' eqv₁/vehSpeedSchedConvex (n : ℕ) (d : Fin n → ℝ) (τmin τmax smin smax : Fin n → ℝ) (F : ℝ → ℝ) (h_n_pos : 0 < n) (h_d_pos : StrongLT 0 d) - (h_smin_pos : StrongLT 0 smin) : @optimalVehicleSpeed n d τmin τmax smin smax F ⟨h_n_pos⟩ := by + (h_smin_pos : StrongLT 0 smin) : @vehSpeedSched n d τmin τmax smin smax F ⟨h_n_pos⟩ := by replace h_d_pos : ∀ i, 0 < d i := fun i => h_d_pos i replace h_smin_pos : ∀ i, 0 < smin i := fun i => h_smin_pos i haveI : Fact (0 < n) := ⟨h_n_pos⟩ @@ -81,7 +81,7 @@ equivalence' eqv₁/optimalVehicleSpeedConvex (n : ℕ) (d : Fin n → ℝ) rename_vars [t] rename_constrs [c_smin, c_smax, c_τmin, c_τmax] -#print optimalVehicleSpeedConvex +#print vehSpeedSchedConvex #check eqv₁.backward_map @@ -92,10 +92,10 @@ equivalence' eqv₁/optimalVehicleSpeedConvex (n : ℕ) (d : Fin n → ℝ) -- We fix `F` and declare an atom for this particular application of the perspective function. -- Let `F(s) = a * s^2 + b * s + c` with `0 ≤ a`. -equivalence' eqv₂/optimalVehicleSpeedQuadratic (n : ℕ) (d : Fin n → ℝ) +equivalence' eqv₂/vehSpeedSchedQuadratic (n : ℕ) (d : Fin n → ℝ) (τmin τmax smin smax : Fin n → ℝ) (a b c : ℝ) (h_n_pos : 0 < n) (h_d_pos : StrongLT 0 d) (h_smin_pos : StrongLT 0 smin) : - optimalVehicleSpeedConvex n d τmin τmax smin smax (fun s => a • s ^ (2 : ℝ) + b • s + c) + vehSpeedSchedConvex n d τmin τmax smin smax (fun s => a • s ^ (2 : ℝ) + b • s + c) h_n_pos h_d_pos h_smin_pos := by have t_pos_of_c_smin : ∀ t, smin ≤ d / t → StrongLT 0 t := fun t h i => by have h_di_div_ti_pos := lt_of_lt_of_le (h_smin_pos i) (h i) @@ -129,7 +129,7 @@ equivalence' eqv₂/optimalVehicleSpeedQuadratic (n : ℕ) (d : Fin n → ℝ) rename_constrs [c_t, c_smin, c_smax, c_τmin, c_τmax] -- Finally, we can apply `dcp`! (or we can call `solve`, as we do below). -#print optimalVehicleSpeedQuadratic +#print vehSpeedSchedQuadratic #check eqv₂.backward_map @@ -195,7 +195,7 @@ def bₚ : ℝ := 6 @[optimization_param] def cₚ : ℝ := 10 -def p := optimalVehicleSpeedQuadratic nₚ dₚ τminₚ τmaxₚ sminₚ smaxₚ aₚ bₚ cₚ nₚ_pos dₚ_pos sminₚ_pos +def p := vehSpeedSchedQuadratic nₚ dₚ τminₚ τmaxₚ sminₚ smaxₚ aₚ bₚ cₚ nₚ_pos dₚ_pos sminₚ_pos set_option trace.Meta.debug true @@ -221,6 +221,6 @@ def sol := eqv₁.backward_mapₚ (eqv₂.backward_mapₚ p.solution) -- ![0.955578, 0.955548, 0.955565, 0.955532, 0.955564, 0.955560, 0.912362, 0.960401, 0.912365, -- 0.912375] -end OptimalVehicleSpeed +end VehicleSpeedSched end diff --git a/CvxLean/Examples/optimal_vehicle_speed.py b/CvxLean/Examples/vehicle_speed_scheduling.py similarity index 100% rename from CvxLean/Examples/optimal_vehicle_speed.py rename to CvxLean/Examples/vehicle_speed_scheduling.py From 636f447e95aa42b0f68ebe052e47378e5f6b13b7 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 09:05:17 -0500 Subject: [PATCH 02/22] feat: Python versions of current case studies --- CvxLean/Examples/covariance_estimation.py | 32 ++++++++++++++++++++ CvxLean/Examples/vehicle_speed_scheduling.py | 1 - 2 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 CvxLean/Examples/covariance_estimation.py diff --git a/CvxLean/Examples/covariance_estimation.py b/CvxLean/Examples/covariance_estimation.py new file mode 100644 index 00000000..7d2be7b6 --- /dev/null +++ b/CvxLean/Examples/covariance_estimation.py @@ -0,0 +1,32 @@ +import numpy as np +import cvxpy as cp + +n = 2 + +N = 4 + +alpha = 1 + +Y = np.array([[0, 2], [2, 0], [-2, 0], [0, -2]]) + +R = cp.Variable(shape=(n, n), PSD=True) + +def covMat(X): + # TODO: Different from np.cov(X, rowvar=False) ? + (N, n) = X.shape + res = np.zeros((n, n)) + for i in range(n): + for j in range(n): + res[i, j] = np.sum([X[k, i] * X[k, j] for k in range(N)]) / N + return res + +p = cp.Problem( + cp.Minimize( + -(-(N * np.log(np.sqrt((2 * np.pi) ** n)) + (N * (-cp.log_det(R) / 2))) + + -(N * cp.trace(covMat(Y) * cp.transpose(R)) / 2)) + ), [ + cp.sum(cp.abs(R)) <= alpha + ]) +p.solve(solver=cp.MOSEK, verbose=True) + +print("R* = ", R.value) diff --git a/CvxLean/Examples/vehicle_speed_scheduling.py b/CvxLean/Examples/vehicle_speed_scheduling.py index d126fb5a..77ae6212 100644 --- a/CvxLean/Examples/vehicle_speed_scheduling.py +++ b/CvxLean/Examples/vehicle_speed_scheduling.py @@ -1,6 +1,5 @@ import numpy as np import cvxpy as cp -import matplotlib.pyplot as plt n = 10 From ae1cec46ef9d980802819af7abd390ea7c3f9fe1 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 09:13:36 -0500 Subject: [PATCH 03/22] feat: optional `subject to` --- CvxLean/Examples/FittingSphere.lean | 4 ++-- CvxLean/Syntax/Minimization.lean | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index a4e2b8ea..cd98b365 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -18,8 +18,8 @@ variable (x : Fin m → Fin n → ℝ) def fittingSphere := optimization (c : Fin n → ℝ) (r : ℝ) minimize (∑ i, (‖(x i) - c‖ ^ 2 - r ^ 2) ^ 2 : ℝ) - subject to - _ : True + +#print fittingSphere end FittingSphere diff --git a/CvxLean/Syntax/Minimization.lean b/CvxLean/Syntax/Minimization.lean index e4bee31e..e10c62f1 100644 --- a/CvxLean/Syntax/Minimization.lean +++ b/CvxLean/Syntax/Minimization.lean @@ -30,12 +30,16 @@ partial def elabVars (idents : Array Syntax) : TermElabM (Array (Lean.Name × Ex | _ => throwError "Expected identifier: {id}" return idents +macro_rules +| `(optimization $idents* $minOrMax:minOrMax $obj) => + `(optimization $idents* $minOrMax:minOrMax $obj subject to _ : True) + -- TODO: allow dependently typed variables? /-- Elaborate "optimization" problem syntax. -/ @[term_elab «optimization»] def elabOptmiziation : Term.TermElab := fun stx expectedType? => do match stx with - | `(optimization $idents* $minOrMax:minOrMax $obj subject to $constraints ) => + | `(optimization $idents* $minOrMax:minOrMax $obj subject to $constraints) => -- Determine names and types of the variables. let vars ← elabVars <| idents.map (·.raw) -- Construct domain type. From ed2fd765678f692282b4f97e050e83ebec0482b2 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 09:29:24 -0500 Subject: [PATCH 04/22] fix: example imports --- CvxLean/Examples/All.lean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CvxLean/Examples/All.lean b/CvxLean/Examples/All.lean index ea00d3ce..eed9a70d 100644 --- a/CvxLean/Examples/All.lean +++ b/CvxLean/Examples/All.lean @@ -1,5 +1,5 @@ import CvxLean.Examples.CovarianceEstimation import CvxLean.Examples.FittingSphere import CvxLean.Examples.HypersonicShapeDesign -import CvxLean.Examples.OptimalVehicleSpeed import CvxLean.Examples.TrussDesign +import CvxLean.Examples.VehicleSpeedScheduling From 751194c816935c81079f525d85487aa5d0200f78 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 09:29:46 -0500 Subject: [PATCH 05/22] feat: nicely `delab` problems with no constraints --- CvxLean/Examples/FittingSphere.lean | 4 ++++ CvxLean/Syntax/Minimization.lean | 35 +++++++++++++++-------------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index cd98b365..5b589118 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -21,6 +21,10 @@ def fittingSphere := #print fittingSphere +equivalence eqv/fittingSphereConvex (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by + equivalence_rfl + + end FittingSphere end diff --git a/CvxLean/Syntax/Minimization.lean b/CvxLean/Syntax/Minimization.lean index e10c62f1..b2c89616 100644 --- a/CvxLean/Syntax/Minimization.lean +++ b/CvxLean/Syntax/Minimization.lean @@ -129,32 +129,33 @@ def withDomainBinding [Inhabited α] (domain : Expr) (x : DelabM α) : DelabM α partial def delabMinimization : Delab := do if not (pp.optMinimization.get (← getOptions)) then Alternative.failure match ← getExpr with - | Expr.app - (Expr.app - (Expr.app - (Expr.app (Expr.const `Minimization.mk _) domain) - codomain) - objFun) constraints => - let constraints : Array Syntax := #[] - let idents ← withType $ withNaryArg 0 do + | .app (.app (.app (.app (.const `Minimization.mk _) domain) codomain) objFun) constraints => + let idents ← withType <| withNaryArg 0 do let tys ← delabDomain - let tys ← tys.mapM fun (name, stx) => do - `(Parser.minimizationVar|($(mkIdent name) : $stx)) + let tys ← tys.mapM fun (name, stx) => do `(Parser.minimizationVar| ($(mkIdent name) : $stx)) return tys.toArray let (objFun, isMax) ← withNaryArg 2 do withDomainBinding domain do match ← getExpr with - | Expr.app (Expr.app (Expr.app (Expr.const ``maximizeNeg _) _) _) e => - withExpr e do - return (← delab, true) + | .app (.app (.app (.const ``maximizeNeg _) _) _) e => + withExpr e do + return (← delab, true) | _ => - return (← delab, false) + return (← delab, false) + let noConstrs ← withLambdaBody constraints fun _ constrsBody => do + isDefEq constrsBody (mkConst ``True) let constraints := ← withNaryArg 3 do let cs ← withDomainBinding domain delabConstraints return mkNode ``Parser.constraints #[mkNullNode <| cs.toArray.map (·.raw)] - if isMax then - `(optimization $idents* maximize $objFun subject to $constraints) + if noConstrs then + if isMax then + `(optimization $idents* maximize $objFun) + else + `(optimization $idents* minimize $objFun) else - `(optimization $idents* minimize $objFun subject to $constraints) + if isMax then + `(optimization $idents* maximize $objFun subject to $constraints) + else + `(optimization $idents* minimize $objFun subject to $constraints) | _ => Alternative.failure end Delab From e855140e3c1415503b517188b5c6d9c1cf01d6ee Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 15:44:31 -0500 Subject: [PATCH 06/22] feat: do not use custom norm --- CvxLean/Lib/Math/Data/Vec.lean | 4 ++-- CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CvxLean/Lib/Math/Data/Vec.lean b/CvxLean/Lib/Math/Data/Vec.lean index a7e39c5a..cf6d0425 100644 --- a/CvxLean/Lib/Math/Data/Vec.lean +++ b/CvxLean/Lib/Math/Data/Vec.lean @@ -1,3 +1,4 @@ +import Mathlib.Analysis.NormedSpace.PiLp import CvxLean.Lib.Math.Data.Real import CvxLean.Lib.Math.Data.Fin @@ -55,8 +56,7 @@ Named functions on real vectors, including those defined in open Real BigOperators /-- See `CvxLean.Tactic.DCP.AtomLibrary.Fns.Norm2`. -/ -instance : Norm (m → ℝ) where - norm x := sqrt (∑ i, (x i) ^ 2) +instance : Norm (m → ℝ) := PiLp.hasNorm 2 _ variable (x y : m → ℝ) diff --git a/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean b/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean index 94ee4cb3..43692178 100644 --- a/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean +++ b/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean @@ -7,6 +7,8 @@ namespace CvxLean open Real +#check sqrt_eq_rpow + declare_atom norm2 [convex] (n : Nat)& (x : Fin n → ℝ)? : ‖x‖ := vconditions implementationVars (t : ℝ) @@ -18,10 +20,10 @@ solutionEqualsAtom by rfl feasibility (c : by unfold soCone - simp [Norm.norm]) + simp [Norm.norm, sqrt_eq_rpow]) optimality by unfold soCone at c - simp [Norm.norm] at c ⊢ + simp [Norm.norm, sqrt_eq_rpow] at c ⊢ exact c vconditionElimination @@ -41,6 +43,6 @@ optimality by vconditionElimination lemma norm2₂_eq_norm2 {x y : ℝ} : ‖![x, y]‖ = sqrt (x ^ 2 + y ^ 2) := - by simp [Norm.norm] + by simp [Norm.norm, sqrt_eq_rpow] end CvxLean From cd3861dce5380fe98ccf5fbd293fa6ef20a91407 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Fri, 26 Jan 2024 18:52:42 -0500 Subject: [PATCH 07/22] wip: towards fitting sphere --- CvxLean/Examples/FittingSphere.lean | 145 +++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 3 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 5b589118..812e1191 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -18,12 +18,151 @@ variable (x : Fin m → Fin n → ℝ) def fittingSphere := optimization (c : Fin n → ℝ) (r : ℝ) minimize (∑ i, (‖(x i) - c‖ ^ 2 - r ^ 2) ^ 2 : ℝ) + subject to + h₁ : 1 / 10000 ≤ r -#print fittingSphere +instance : ChangeOfVariables fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (ct.2 + ‖ct.1‖ ^ 2)) := + { inv := fun (c, r) => (c, r ^ 2 - ‖c‖ ^ 2), + condition := fun (_, t) => 0 ≤ t, + property := fun ⟨c, t⟩ h => by simp [sqrt_sq h] } -equivalence eqv/fittingSphereConvex (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by - equivalence_rfl +def Vec.norm {m n} (x : Fin m → Fin n → ℝ) : Fin m → ℝ := fun i => ‖x i‖ +equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by + -- Change of variables. + equivalence_step => + apply ChangeOfVariables.toEquivalence + (fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (ct.2 + ‖ct.1‖ ^ 2))) + . rintro _ h; exact le_trans (by norm_num) h + rename_vars [c, t] + -- Clean up. + conv_constr h₁ => dsimp + -- conv_constr h₂ => dsimp + conv_obj => dsimp + -- Rewrite objective. + rw_obj => + have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| lt_of_lt_of_le (by norm_num) h₁; + conv => + left; congr; congr; ext i; congr; simp; + rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] + rw [sq_sqrt (rpow_two _ ▸ le_of_lt h')] + ring_nf; simp + equivalence_step => + apply Equivalence.rewrite_objFun + (g := fun (ct : (Fin n → ℝ) × ℝ) => + Vec.sum (((Vec.norm x) ^ 2 - 2 * (Matrix.mulVec x ct.1) - Vec.const m ct.2) ^ 2)) + . rintro ⟨c, t⟩ h + dsimp at h ⊢; simp [Vec.sum, Vec.norm, Vec.const] + congr; funext i; congr 1; rw [add_sub, ← sub_eq_add_neg] + congr 2; simp [mulVec, inner, dotProduct, Finset.sum_mul, Finset.mul_sum] + congr; funext j; ring + rename_vars [c, t] + -- equivalence_step => + -- apply Equivalence.rewrite_constraints + -- (cs' := fun (ct : (Fin n → ℝ) × ℝ) => 0 ≤ ct.2 ∧ ‖ct.1‖ ^ 2 ≤ 50) + -- . rintro ⟨c, t⟩; dsimp; constructor + -- . rintro ⟨h₁, h₂⟩ + -- refine ⟨?_, h₂⟩ + -- rw [← neg_le_neg_iff] at h₂ + -- apply le_trans h₂ + -- rw [neg_le_iff_add_nonneg] + -- apply le_of_lt + -- rw [← sqrt_pos] + -- exact lt_of_lt_of_le (by norm_num) h₁ + -- . rintro ⟨h₁, h₂⟩ + -- refine ⟨?_, h₂⟩ + -- have h_num : 1 / 10000 = sqrt ((1 / 10000) ^ 2) := by rw [rpow_two, sqrt_sq (by norm_num)] + -- rw [h_num] + -- apply sqrt_le_sqrt + -- sorry + +private lemma reduced_constraint (c : Fin n → ℝ) (t : ℝ) (h : 1 / 10000 ≤ t) : + 1 / 100 ≤ sqrt (t + ‖c‖ ^ 2) := by + simp; rw [le_sqrt (by norm_num), ←add_zero (_ ^ 2)] + . apply add_le_add _ (sq_nonneg _) + exact le_trans (by norm_num) h + . exact add_nonneg (le_trans (by norm_num) h) (sq_nonneg _) + +#check le_sqrt_of_sq_le +#check le_sqrt' + +def rrr : Reduction + (optimization (c : Fin n → ℝ) (t : ℝ) + minimize (Vec.sum ((Vec.norm x ^ 2 - 2 * mulVec x c - Vec.const m t) ^ 2) : ℝ) + subject to + h₁ : 1 / 100 ≤ sqrt (t + ‖c‖ ^ 2) + h₂ : ‖c‖ ^ 2 ≤ 50 + ) + (optimization (c : Fin n → ℝ) (t : ℝ) + minimize (Vec.sum ((Vec.norm x ^ 2 - 2 * mulVec x c - Vec.const m t) ^ 2) : ℝ) + subject to + h₁ : 1 / 10000 ≤ t + h₂ : ‖c‖ ^ 2 ≤ 50 + ) := + { psi := id, + psi_feasibility := fun ⟨c, t⟩ ⟨h₁, h₂⟩ => ⟨reduced_constraint n c t h₁, h₂⟩, + psi_optimality := fun ⟨c, t⟩ ⟨⟨h₁, h₂⟩, h_opt⟩ => + ⟨⟨reduced_constraint n c t h₁, h₂⟩, by + rintro ⟨c', t'⟩ ⟨h₁', h₂'⟩ + simp [fittingSphere₁, feasible] at h₁ h₂ h₁' h₂' h_opt ⊢ + have ht' : 1 / 10000 ≤ t' := by + simp + rw [le_sqrt' (by norm_num)] at h₁' + -- have : 0 < t' + ‖c'‖ ^ 2 := by + -- by_contra hc + -- simp [not_lt] at hc + -- simp [sqrt_eq_zero_of_nonpos hc] at h₁' + -- linarith [h₁'] + + sorry + simp at ht' + exact h_opt c' t' ht' h₂'⟩ } + +-- def fittingSphere₁InitialRed : +-- fittingSphere₁ n m x ≼ +-- optimization (c : Fin n → ℝ) (t : ℝ) +-- minimize (∑ i, (‖(x i) - c‖ ^ 2 - sqrt (t + ‖c‖ ^ 2) ^ 2) ^ 2 : ℝ) +-- subject to +-- h : 1 / 10000 ≤ t := +-- { psi := id, +-- psi_feasibility := fun ⟨c, t⟩ h => reduced_constraint n c t h +-- psi_optimality := fun ⟨c, t⟩ ⟨h_feas, h_opt⟩ => +-- ⟨reduced_constraint n c t h_feas, by +-- rintro ⟨c', t'⟩ h_feas' +-- have h₁ := reduced_constraint n c t h_feas +-- simp [fittingSphere₁, feasible] at h₁ h_feas h_feas' h_opt ⊢ +-- have ht' : 1 / 10000 ≤ t' := by +-- simp +-- apply le_trans h_feas' +-- sorry +-- simp at ht' +-- exact h_opt c' t' ht' +-- ⟩ } + +-- reduction red/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by +-- reduction_step => +-- sorry +-- -- apply Reduction.rewrite_objFun +-- -- (g := fun (ct : (Fin n → ℝ) × ℝ) => +-- -- Vec.sum (((Vec.norm x) ^ 2 - 2 * (Matrix.mulVec x ct.1) - Vec.const m ct.2) ^ 2)) +-- -- rintro ⟨c, t⟩ h +-- -- simp at h +-- -- simp [Vec.sum] +-- -- congr <;> ext i <;> congr 1 +-- -- rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] +-- -- simp [Vec.norm, Vec.const] + +-- -- by_cases (0 < t + ‖c‖ ^ 2) + +-- -- rw_obj => +-- -- have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| h; +-- -- conv => +-- -- left; congr; congr; ext i; congr; simp; +-- -- rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] +-- -- rw [sq_sqrt (rpow_two _ ▸ le_of_lt h')] +-- -- ring_nf; simp + +#print eqv end FittingSphere From baaf2da6492e4acae0fdbb3df3656bc97c7604ad Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sat, 27 Jan 2024 19:59:14 -0500 Subject: [PATCH 08/22] wip: more attempts on least squares --- CvxLean/Examples/FittingSphere.lean | 108 ++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 812e1191..1f138ec1 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -6,6 +6,114 @@ namespace FittingSphere open CvxLean Minimization Real BigOperators Matrix +def leastSquares (n : ℕ) (a : Fin n → ℝ) := + optimization (x : ℝ) + minimize (∑ i, ((x - (a i)) ^ 2) : ℝ) + +lemma leastSquares_optimal (n : ℕ) (a : Fin n → ℝ) : + (leastSquares n a).optimal ((1 / n) * ∑ i, a i) := by + refine ⟨trivial, ?_⟩ + intros y _ + simp [leastSquares] + sorry + + -- I was using norms here... + -- induction n with + -- | zero => + -- refine ⟨trivial, ?_⟩ + -- intro y _ + -- simp [leastSquares] + -- | succ m ih => + -- refine ⟨trivial, ?_⟩ + -- intro y _ + -- simp only [leastSquares] + -- rw [Fin.sum_univ_succ] + -- conv => right; rw [Fin.sum_univ_succ] + + -- apply add_le_add + -- . sorry + -- . simp [leastSquares, optimal, feasible] at ih + -- have ih_applied := ih (fun i => a i.succ) y + -- simp at ih_applied + -- calc + -- -- step 1 + -- ∑ i : Fin m, ‖(1 / ↑(m.succ)) * ∑ j : Fin m.succ, a j - a i.succ‖ ^ 2 = + -- ∑ i : Fin m, ‖(1 / (m.succ : ℝ)) • ∑ j : Fin m.succ, a j - a i.succ‖ ^ 2 := by + -- congr + -- -- step 2 + -- _ = + -- ∑ i : Fin m, ‖(1 / (m.succ : ℝ)) • ∑ j : Fin m.succ, a j - (1 / (m.succ : ℝ)) • ∑ j : Fin m.succ, a i.succ‖ ^ 2 := by + -- congr; funext i; congr + -- funext j; simp [Finset.sum_const]; field_simp; ring + -- -- step 3 + -- _ = + -- ∑ i : Fin m, ‖(1 / (m.succ : ℝ)) • (∑ j : Fin m.succ, a j - (m.succ : ℝ) • a i.succ)‖ ^ 2 := by + -- congr; funext i; congr + -- rw [← smul_sub]; congr; funext j; simp [Finset.sum_const] + -- -- step 4 + -- _ = + -- ∑ i : Fin m, ((1 / (m.succ : ℝ)) * ‖(∑ j : Fin m.succ, a j - (m.succ : ℝ) • a i.succ)‖) ^ 2 := by + -- congr; funext i; congr + -- rw [@norm_smul_of_nonneg (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _) (PiLp.normedSpace 2 ℝ _)] + -- positivity + -- -- step 5 (key) + -- _ ≤ + -- ∑ i : Fin m, ((1 / (m : ℝ)) * ‖(∑ j : Fin m, a j.succ - (m : ℝ) • a i.succ)‖) ^ 2 := by + -- apply Finset.sum_le_sum; intros i _ + -- rw [rpow_two, rpow_two, sq_le_sq, abs_mul, abs_mul] + -- rw [@abs_norm (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _)] + -- rw [@abs_norm (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _)] + -- have : m ≠ 0 := fun h => by rw [h] at i; exact Nat.not_lt_zero _ i.2 + -- have : m > 0 := Nat.pos_of_ne_zero this + -- have : (m : ℝ) > 0 := by norm_num [this] + -- have h2 : (m : ℝ) + 1 > 0 := add_pos (this) (by norm_num) + -- apply mul_le_mul + -- . apply abs_le_abs + -- . apply div_le_div + -- . norm_num + -- . norm_num + -- . exact this + -- . norm_num + -- . apply le_trans (b := 0) + -- . simp; exact (le_of_lt h2) + -- . simp + -- . rw [Fin.sum_univ_succ] + -- sorry + -- . exact @norm_nonneg _ (PiLp.seminormedAddCommGroup _ _).toSeminormedAddGroup _ + -- -- step 6 + -- _ = + -- ∑ i : Fin m, ‖((1 / (m : ℝ)) • (∑ j : Fin m, a j.succ - (m : ℝ) • a i.succ))‖ ^ 2 := by + -- congr; funext i; congr + -- rw [@norm_smul_of_nonneg (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _) (PiLp.normedSpace 2 ℝ _)] + -- positivity + -- -- step 7 + -- _ = + -- ∑ i : Fin m, ‖((1 / (m : ℝ)) • ∑ j : Fin m, a j.succ) - ((1 / (m : ℝ)) • ∑ j : Fin m, a i.succ)‖ ^ 2 := by + -- congr; funext i; congr + -- rw [← smul_sub]; congr; funext j; simp [Finset.sum_const] + -- -- step 8 + -- _ = + -- ∑ i : Fin m, ‖((1 / (m : ℝ)) • ∑ j : Fin m, a j.succ) - a i.succ‖ ^ 2 := by + -- congr; funext i; congr + -- funext j; simp [Finset.sum_const] + -- have : m ≠ 0 := fun h => by rw [h] at i; exact Nat.not_lt_zero _ i.2 + -- field_simp; ring + -- -- final step (by IH) + -- _ ≤ + -- ∑ i : Fin m, ‖y - a i.succ‖ ^ 2 := by + -- simp; convert ih_applied + -- apply Finset.sum_le_sum + -- intros i _ + -- rw [rpow_two, rpow_two, sq_le_sq] + -- iterate 2 rw [@abs_norm (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _)] + -- have hai : a i = (1 / m) * ∑ j : Fin m, a i := by + -- funext j; simp [Finset.sum_const]; field_simp; ring + -- nth_rewrite 1 [hai] + -- rw [← mul_sub, Finset.sum_sub_distrib] + -- rw [norm_mul] + -- sorry + + -- Dimension. variable (n : ℕ) From c17ceee53d9dff24acc06e4695c891874cde8384 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sat, 27 Jan 2024 20:16:58 -0500 Subject: [PATCH 09/22] wip: correct formulation of least squares --- CvxLean/Examples/FittingSphere.lean | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 1f138ec1..858f91bc 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -6,16 +6,20 @@ namespace FittingSphere open CvxLean Minimization Real BigOperators Matrix -def leastSquares (n : ℕ) (a : Fin n → ℝ) := - optimization (x : ℝ) - minimize (∑ i, ((x - (a i)) ^ 2) : ℝ) +def leastSquares (n : ℕ) (a : ℝ) := + optimization (x : Fin n → ℝ) + minimize (∑ i, ((x i - a) ^ 2) : ℝ) -lemma leastSquares_optimal (n : ℕ) (a : Fin n → ℝ) : - (leastSquares n a).optimal ((1 / n) * ∑ i, a i) := by - refine ⟨trivial, ?_⟩ - intros y _ +lemma leastSquares_optimal (n : ℕ) (a : ℝ) (x : Fin n → ℝ) : + (leastSquares n a).optimal x → (1 / n) * ∑ i, (x i) = a := by + intros h simp [leastSquares] sorry + -- if x is optimal then avg(x) = 1 + -- https://math.stackexchange.com/questions/2554243/understanding-the-mean-minimizes-the-mean-squared-error + + + -- sorry -- I was using norms here... -- induction n with From 3303ca4f1e9fe93fe04e253f3392ec3fe58888a6 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 14:14:09 -0500 Subject: [PATCH 10/22] wip: relaxation setup --- CvxLean/Examples/FittingSphere.lean | 285 +++++++--------------------- CvxLean/Lib/Relaxation.lean | 5 + 2 files changed, 78 insertions(+), 212 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 858f91bc..2ee8fd29 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -2,121 +2,43 @@ import CvxLean noncomputable section -namespace FittingSphere - open CvxLean Minimization Real BigOperators Matrix -def leastSquares (n : ℕ) (a : ℝ) := - optimization (x : Fin n → ℝ) - minimize (∑ i, ((x i - a) ^ 2) : ℝ) +section LeastSquares -lemma leastSquares_optimal (n : ℕ) (a : ℝ) (x : Fin n → ℝ) : - (leastSquares n a).optimal x → (1 / n) * ∑ i, (x i) = a := by - intros h +def leastSquares {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) := + optimization (x : ℝ) + minimize (∑ i, ((a i - x) ^ 2) : ℝ) + subject to + h₁ : lb ≤ x + +lemma leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) (x : ℝ) + (h : (leastSquares a lb).optimal x) : x = (1 / n) * ∑ i, (a i) := by simp [leastSquares] sorry - -- if x is optimal then avg(x) = 1 - -- https://math.stackexchange.com/questions/2554243/understanding-the-mean-minimizes-the-mean-squared-error - - -- sorry +def Vec.leastSquares {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) := + optimization (x : ℝ) + minimize (Vec.sum ((a - Vec.const n x) ^ 2) : ℝ) + subject to + h₁ : lb ≤ x + +lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) (x : ℝ) + (h : (Vec.leastSquares a lb).optimal x) : x = (1 / n) * ∑ i, (a i) := by + apply leastSquares_optimal_eq_mean a lb + simp [Vec.leastSquares, leastSquares, optimal, feasible] at h ⊢ + have ⟨h, h_opt⟩ := h + refine ⟨h, ?_⟩ + intros y + simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const] at h_opt + exact h_opt y - -- I was using norms here... - -- induction n with - -- | zero => - -- refine ⟨trivial, ?_⟩ - -- intro y _ - -- simp [leastSquares] - -- | succ m ih => - -- refine ⟨trivial, ?_⟩ - -- intro y _ - -- simp only [leastSquares] - -- rw [Fin.sum_univ_succ] - -- conv => right; rw [Fin.sum_univ_succ] + -- if x is optimal then avg(x) = 1 + -- https://math.stackexchange.com/questions/2554243/understanding-the-mean-minimizes-the-mean-squared-error - -- apply add_le_add - -- . sorry - -- . simp [leastSquares, optimal, feasible] at ih - -- have ih_applied := ih (fun i => a i.succ) y - -- simp at ih_applied - -- calc - -- -- step 1 - -- ∑ i : Fin m, ‖(1 / ↑(m.succ)) * ∑ j : Fin m.succ, a j - a i.succ‖ ^ 2 = - -- ∑ i : Fin m, ‖(1 / (m.succ : ℝ)) • ∑ j : Fin m.succ, a j - a i.succ‖ ^ 2 := by - -- congr - -- -- step 2 - -- _ = - -- ∑ i : Fin m, ‖(1 / (m.succ : ℝ)) • ∑ j : Fin m.succ, a j - (1 / (m.succ : ℝ)) • ∑ j : Fin m.succ, a i.succ‖ ^ 2 := by - -- congr; funext i; congr - -- funext j; simp [Finset.sum_const]; field_simp; ring - -- -- step 3 - -- _ = - -- ∑ i : Fin m, ‖(1 / (m.succ : ℝ)) • (∑ j : Fin m.succ, a j - (m.succ : ℝ) • a i.succ)‖ ^ 2 := by - -- congr; funext i; congr - -- rw [← smul_sub]; congr; funext j; simp [Finset.sum_const] - -- -- step 4 - -- _ = - -- ∑ i : Fin m, ((1 / (m.succ : ℝ)) * ‖(∑ j : Fin m.succ, a j - (m.succ : ℝ) • a i.succ)‖) ^ 2 := by - -- congr; funext i; congr - -- rw [@norm_smul_of_nonneg (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _) (PiLp.normedSpace 2 ℝ _)] - -- positivity - -- -- step 5 (key) - -- _ ≤ - -- ∑ i : Fin m, ((1 / (m : ℝ)) * ‖(∑ j : Fin m, a j.succ - (m : ℝ) • a i.succ)‖) ^ 2 := by - -- apply Finset.sum_le_sum; intros i _ - -- rw [rpow_two, rpow_two, sq_le_sq, abs_mul, abs_mul] - -- rw [@abs_norm (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _)] - -- rw [@abs_norm (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _)] - -- have : m ≠ 0 := fun h => by rw [h] at i; exact Nat.not_lt_zero _ i.2 - -- have : m > 0 := Nat.pos_of_ne_zero this - -- have : (m : ℝ) > 0 := by norm_num [this] - -- have h2 : (m : ℝ) + 1 > 0 := add_pos (this) (by norm_num) - -- apply mul_le_mul - -- . apply abs_le_abs - -- . apply div_le_div - -- . norm_num - -- . norm_num - -- . exact this - -- . norm_num - -- . apply le_trans (b := 0) - -- . simp; exact (le_of_lt h2) - -- . simp - -- . rw [Fin.sum_univ_succ] - -- sorry - -- . exact @norm_nonneg _ (PiLp.seminormedAddCommGroup _ _).toSeminormedAddGroup _ - -- -- step 6 - -- _ = - -- ∑ i : Fin m, ‖((1 / (m : ℝ)) • (∑ j : Fin m, a j.succ - (m : ℝ) • a i.succ))‖ ^ 2 := by - -- congr; funext i; congr - -- rw [@norm_smul_of_nonneg (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _) (PiLp.normedSpace 2 ℝ _)] - -- positivity - -- -- step 7 - -- _ = - -- ∑ i : Fin m, ‖((1 / (m : ℝ)) • ∑ j : Fin m, a j.succ) - ((1 / (m : ℝ)) • ∑ j : Fin m, a i.succ)‖ ^ 2 := by - -- congr; funext i; congr - -- rw [← smul_sub]; congr; funext j; simp [Finset.sum_const] - -- -- step 8 - -- _ = - -- ∑ i : Fin m, ‖((1 / (m : ℝ)) • ∑ j : Fin m, a j.succ) - a i.succ‖ ^ 2 := by - -- congr; funext i; congr - -- funext j; simp [Finset.sum_const] - -- have : m ≠ 0 := fun h => by rw [h] at i; exact Nat.not_lt_zero _ i.2 - -- field_simp; ring - -- -- final step (by IH) - -- _ ≤ - -- ∑ i : Fin m, ‖y - a i.succ‖ ^ 2 := by - -- simp; convert ih_applied - -- apply Finset.sum_le_sum - -- intros i _ - -- rw [rpow_two, rpow_two, sq_le_sq] - -- iterate 2 rw [@abs_norm (Fin n → ℝ) (PiLp.seminormedAddCommGroup _ _)] - -- have hai : a i = (1 / m) * ∑ j : Fin m, a i := by - -- funext j; simp [Finset.sum_const]; field_simp; ring - -- nth_rewrite 1 [hai] - -- rw [← mul_sub, Finset.sum_sub_distrib] - -- rw [norm_mul] - -- sorry +end LeastSquares +namespace FittingSphere -- Dimension. variable (n : ℕ) @@ -132,6 +54,7 @@ def fittingSphere := minimize (∑ i, (‖(x i) - c‖ ^ 2 - r ^ 2) ^ 2 : ℝ) subject to h₁ : 1 / 10000 ≤ r + h₂ : ‖c‖ ^ 2 ≤ 50 instance : ChangeOfVariables fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (ct.2 + ‖ct.1‖ ^ 2)) := { inv := fun (c, r) => (c, r ^ 2 - ‖c‖ ^ 2), @@ -145,7 +68,7 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit equivalence_step => apply ChangeOfVariables.toEquivalence (fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (ct.2 + ‖ct.1‖ ^ 2))) - . rintro _ h; exact le_trans (by norm_num) h + . rintro _ ⟨h, _⟩; exact le_trans (by norm_num) h rename_vars [c, t] -- Clean up. conv_constr h₁ => dsimp @@ -153,6 +76,7 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit conv_obj => dsimp -- Rewrite objective. rw_obj => + -- NOTE: this is why we need strict postivity of `r`, to be able to apply `sq_sqrt`. have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| lt_of_lt_of_le (by norm_num) h₁; conv => left; congr; congr; ext i; congr; simp; @@ -169,112 +93,49 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit congr 2; simp [mulVec, inner, dotProduct, Finset.sum_mul, Finset.mul_sum] congr; funext j; ring rename_vars [c, t] - -- equivalence_step => - -- apply Equivalence.rewrite_constraints - -- (cs' := fun (ct : (Fin n → ℝ) × ℝ) => 0 ≤ ct.2 ∧ ‖ct.1‖ ^ 2 ≤ 50) - -- . rintro ⟨c, t⟩; dsimp; constructor - -- . rintro ⟨h₁, h₂⟩ - -- refine ⟨?_, h₂⟩ - -- rw [← neg_le_neg_iff] at h₂ - -- apply le_trans h₂ - -- rw [neg_le_iff_add_nonneg] - -- apply le_of_lt - -- rw [← sqrt_pos] - -- exact lt_of_lt_of_le (by norm_num) h₁ - -- . rintro ⟨h₁, h₂⟩ - -- refine ⟨?_, h₂⟩ - -- have h_num : 1 / 10000 = sqrt ((1 / 10000) ^ 2) := by rw [rpow_two, sqrt_sq (by norm_num)] - -- rw [h_num] - -- apply sqrt_le_sqrt - -- sorry - -private lemma reduced_constraint (c : Fin n → ℝ) (t : ℝ) (h : 1 / 10000 ≤ t) : - 1 / 100 ≤ sqrt (t + ‖c‖ ^ 2) := by - simp; rw [le_sqrt (by norm_num), ←add_zero (_ ^ 2)] - . apply add_le_add _ (sq_nonneg _) - exact le_trans (by norm_num) h - . exact add_nonneg (le_trans (by norm_num) h) (sq_nonneg _) - -#check le_sqrt_of_sq_le -#check le_sqrt' - -def rrr : Reduction - (optimization (c : Fin n → ℝ) (t : ℝ) - minimize (Vec.sum ((Vec.norm x ^ 2 - 2 * mulVec x c - Vec.const m t) ^ 2) : ℝ) - subject to - h₁ : 1 / 100 ≤ sqrt (t + ‖c‖ ^ 2) - h₂ : ‖c‖ ^ 2 ≤ 50 - ) - (optimization (c : Fin n → ℝ) (t : ℝ) - minimize (Vec.sum ((Vec.norm x ^ 2 - 2 * mulVec x c - Vec.const m t) ^ 2) : ℝ) - subject to - h₁ : 1 / 10000 ≤ t - h₂ : ‖c‖ ^ 2 ≤ 50 - ) := - { psi := id, - psi_feasibility := fun ⟨c, t⟩ ⟨h₁, h₂⟩ => ⟨reduced_constraint n c t h₁, h₂⟩, - psi_optimality := fun ⟨c, t⟩ ⟨⟨h₁, h₂⟩, h_opt⟩ => - ⟨⟨reduced_constraint n c t h₁, h₂⟩, by - rintro ⟨c', t'⟩ ⟨h₁', h₂'⟩ - simp [fittingSphere₁, feasible] at h₁ h₂ h₁' h₂' h_opt ⊢ - have ht' : 1 / 10000 ≤ t' := by - simp - rw [le_sqrt' (by norm_num)] at h₁' - -- have : 0 < t' + ‖c'‖ ^ 2 := by - -- by_contra hc - -- simp [not_lt] at hc - -- simp [sqrt_eq_zero_of_nonpos hc] at h₁' - -- linarith [h₁'] - - sorry - simp at ht' - exact h_opt c' t' ht' h₂'⟩ } - --- def fittingSphere₁InitialRed : --- fittingSphere₁ n m x ≼ --- optimization (c : Fin n → ℝ) (t : ℝ) --- minimize (∑ i, (‖(x i) - c‖ ^ 2 - sqrt (t + ‖c‖ ^ 2) ^ 2) ^ 2 : ℝ) --- subject to --- h : 1 / 10000 ≤ t := --- { psi := id, --- psi_feasibility := fun ⟨c, t⟩ h => reduced_constraint n c t h --- psi_optimality := fun ⟨c, t⟩ ⟨h_feas, h_opt⟩ => --- ⟨reduced_constraint n c t h_feas, by --- rintro ⟨c', t'⟩ h_feas' --- have h₁ := reduced_constraint n c t h_feas --- simp [fittingSphere₁, feasible] at h₁ h_feas h_feas' h_opt ⊢ --- have ht' : 1 / 10000 ≤ t' := by --- simp --- apply le_trans h_feas' --- sorry --- simp at ht' --- exact h_opt c' t' ht' --- ⟩ } - --- reduction red/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by --- reduction_step => --- sorry --- -- apply Reduction.rewrite_objFun --- -- (g := fun (ct : (Fin n → ℝ) × ℝ) => --- -- Vec.sum (((Vec.norm x) ^ 2 - 2 * (Matrix.mulVec x ct.1) - Vec.const m ct.2) ^ 2)) --- -- rintro ⟨c, t⟩ h --- -- simp at h --- -- simp [Vec.sum] --- -- congr <;> ext i <;> congr 1 --- -- rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] --- -- simp [Vec.norm, Vec.const] - --- -- by_cases (0 < t + ‖c‖ ^ 2) - --- -- rw_obj => --- -- have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| h; --- -- conv => --- -- left; congr; congr; ext i; congr; simp; --- -- rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] --- -- rw [sq_sqrt (rpow_two _ ▸ le_of_lt h')] --- -- ring_nf; simp -#print eqv +#print fittingSphere₁ + +private lemma nonconvex__implies_relaxed_constraint (c : Fin n → ℝ) (t : ℝ) + (h₁ : 1 / 10000 ≤ sqrt (t + ‖c‖ ^ 2)) (h₂ : ‖c‖ ^ 2 ≤ 50) : -50 ≤ t := by + rw [le_sqrt' (by norm_num)] at h₁ + linarith + +relaxation red/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by + relaxation_step => + apply Relaxation.weaken_constraint (cs' := fun ct => -50 ≤ ct.2 ∧ ‖ct.1‖ ^ 2 ≤ 50) + . rintro ⟨c, t⟩ ⟨h₁, h₂⟩ + exact ⟨nonconvex__implies_relaxed_constraint n c t h₁ h₂, h₂⟩ + +lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) + (h : (fittingSphere₂ n m x).optimal (c, t)) : (fittingSphere₁ n m x).optimal (c, t) := by + simp [fittingSphere₁, fittingSphere₂, optimal, feasible] at h ⊢ + have ⟨⟨h₁, h₂⟩, h_opt⟩ := h + constructor + . constructor + . let a := Vec.norm x ^ 2 - 2 * mulVec x c + have h_ls : optimal (Vec.leastSquares a (-50)) t := by + refine ⟨h₁, ?_⟩ + intros y hy + simp [objFun, Vec.leastSquares] + exact h_opt c y hy h₂ + have ht_eq := vec_leastSquares_optimal_eq_mean a (-50) t h_ls + have hc2_eq : ‖c‖ ^ 2 = (1 / m) * ∑ i : Fin m, ‖c‖ ^ 2 := by + simp [Finset.sum_const] + field_simp; ring + have ht : t + ‖c‖ ^ 2 = (1 / m) * ∑ i, ‖(x i) - c‖ ^ 2 := by + rw [ht_eq]; dsimp + rw [hc2_eq, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum, ← Finset.sum_add_distrib] + congr; funext i; + rw [← mul_add] + congr; simp [Vec.norm] + rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] + congr + + sorry + . exact h₂ + . intros c' x' h₁' h₂' + sorry end FittingSphere diff --git a/CvxLean/Lib/Relaxation.lean b/CvxLean/Lib/Relaxation.lean index ab3ec4ea..a43d3988 100644 --- a/CvxLean/Lib/Relaxation.lean +++ b/CvxLean/Lib/Relaxation.lean @@ -106,6 +106,11 @@ def remove_constraint {c cs' : D → Prop} (hcs : ∀ x, cs x ↔ c x ∧ cs' x) phi_feasibility := fun x h_feas_x => ((hcs x).mp h_feas_x).2, phi_optimality := fun _ _ => le_refl _ } +def weaken_constraint (cs' : D → Prop) (hcs : ∀ x, cs x → cs' x) : ⟨f, cs⟩ ≽' ⟨f, cs'⟩ := + { phi := id, + phi_feasibility := fun x h_feas_x => hcs x h_feas_x, + phi_optimality := fun _ _ => le_refl _ } + end Relaxation end Minimization From c43998631894d69a52f16b5f1ea2690df3bd69ae Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 14:48:47 -0500 Subject: [PATCH 11/22] wip: only missing least squares optimality --- CvxLean/Examples/FittingSphere.lean | 148 ++++++++++++++++++---------- 1 file changed, 95 insertions(+), 53 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 2ee8fd29..01bd8535 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -6,35 +6,62 @@ open CvxLean Minimization Real BigOperators Matrix section LeastSquares -def leastSquares {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) := +def leastSquares {n : ℕ} (a : Fin n → ℝ) := optimization (x : ℝ) minimize (∑ i, ((a i - x) ^ 2) : ℝ) - subject to - h₁ : lb ≤ x -lemma leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) (x : ℝ) - (h : (leastSquares a lb).optimal x) : x = (1 / n) * ∑ i, (a i) := by +lemma leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (x : ℝ) + (h : (leastSquares a).optimal x) : x = (1 / n) * ∑ i, (a i) := by simp [leastSquares] sorry + -- if x is optimal then avg(x) = 1 + -- https://math.stackexchange.com/questions/2554243/understanding-the-mean-minimizes-the-mean-squared-error -def Vec.leastSquares {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) := +def Vec.leastSquares {n : ℕ} (a : Fin n → ℝ) := optimization (x : ℝ) minimize (Vec.sum ((a - Vec.const n x) ^ 2) : ℝ) - subject to - h₁ : lb ≤ x -lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (lb : ℝ) (x : ℝ) - (h : (Vec.leastSquares a lb).optimal x) : x = (1 / n) * ∑ i, (a i) := by - apply leastSquares_optimal_eq_mean a lb +lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (x : ℝ) + (h : (Vec.leastSquares a).optimal x) : x = (1 / n) * ∑ i, (a i) := by + apply leastSquares_optimal_eq_mean a simp [Vec.leastSquares, leastSquares, optimal, feasible] at h ⊢ - have ⟨h, h_opt⟩ := h - refine ⟨h, ?_⟩ intros y - simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const] at h_opt - exact h_opt y + simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const] at h + exact h y - -- if x is optimal then avg(x) = 1 - -- https://math.stackexchange.com/questions/2554243/understanding-the-mean-minimizes-the-mean-squared-error +lemma squared_error_eq_zero_iff {n : ℕ} (a : Fin n → ℝ) (x : ℝ) : + ∑ i, (a i - x) ^ 2 = 0 ↔ ∀ i, a i = x := by + simp [rpow_two] + constructor + . intros h i + rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] at h + have hi := h i (by simp) + rw [sq_eq_zero_iff, sub_eq_zero] at hi + exact hi + . intros h + rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] + intros i _ + simp [sq_eq_zero_iff, sub_eq_zero] + exact h i + +lemma vec_squared_norm_error_eq_zero_iff {n m : ℕ} (a : Fin m → Fin n → ℝ) (x : Fin n → ℝ) : + ∑ i, ‖a i - x‖ ^ 2 = 0 ↔ ∀ i, a i = x := by + simp [rpow_two] + constructor + . intros h i + rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] at h + have hi := h i (by simp) + rw [sq_eq_zero_iff] at hi + rw [@norm_eq_zero _ (PiLp.normedAddCommGroup _ _).toNormedAddGroup] at hi + rw [sub_eq_zero] at hi + exact hi + . intros h + rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] + intros i _ + rw [sq_eq_zero_iff] + rw [@norm_eq_zero _ (PiLp.normedAddCommGroup _ _).toNormedAddGroup] + rw [sub_eq_zero] + exact h i end LeastSquares @@ -53,8 +80,7 @@ def fittingSphere := optimization (c : Fin n → ℝ) (r : ℝ) minimize (∑ i, (‖(x i) - c‖ ^ 2 - r ^ 2) ^ 2 : ℝ) subject to - h₁ : 1 / 10000 ≤ r - h₂ : ‖c‖ ^ 2 ≤ 50 + h₁ : 0 < r instance : ChangeOfVariables fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (ct.2 + ‖ct.1‖ ^ 2)) := { inv := fun (c, r) => (c, r ^ 2 - ‖c‖ ^ 2), @@ -68,7 +94,7 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit equivalence_step => apply ChangeOfVariables.toEquivalence (fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (ct.2 + ‖ct.1‖ ^ 2))) - . rintro _ ⟨h, _⟩; exact le_trans (by norm_num) h + . rintro _ h; exact le_of_lt h rename_vars [c, t] -- Clean up. conv_constr h₁ => dsimp @@ -77,7 +103,7 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit -- Rewrite objective. rw_obj => -- NOTE: this is why we need strict postivity of `r`, to be able to apply `sq_sqrt`. - have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| lt_of_lt_of_le (by norm_num) h₁; + have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| h₁; conv => left; congr; congr; ext i; congr; simp; rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] @@ -96,46 +122,62 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit #print fittingSphere₁ -private lemma nonconvex__implies_relaxed_constraint (c : Fin n → ℝ) (t : ℝ) - (h₁ : 1 / 10000 ≤ sqrt (t + ‖c‖ ^ 2)) (h₂ : ‖c‖ ^ 2 ≤ 50) : -50 ≤ t := by - rw [le_sqrt' (by norm_num)] at h₁ - linarith +-- private lemma nonconvex__implies_relaxed_constraint (c : Fin n → ℝ) (t : ℝ) +-- (h₁ : 1 / 10000 ≤ sqrt (t + ‖c‖ ^ 2)) (h₂ : ‖c‖ ^ 2 ≤ 50) : -50 ≤ t := by +-- rw [le_sqrt' (by norm_num)] at h₁ +-- linarith relaxation red/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by relaxation_step => - apply Relaxation.weaken_constraint (cs' := fun ct => -50 ≤ ct.2 ∧ ‖ct.1‖ ^ 2 ≤ 50) - . rintro ⟨c, t⟩ ⟨h₁, h₂⟩ - exact ⟨nonconvex__implies_relaxed_constraint n c t h₁ h₂, h₂⟩ + apply Relaxation.weaken_constraint (cs' := fun _ => True) + . rintro ⟨c, t⟩ _; trivial lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) + (h_nontrivial : x ≠ Vec.const m c) (h : (fittingSphere₂ n m x).optimal (c, t)) : (fittingSphere₁ n m x).optimal (c, t) := by simp [fittingSphere₁, fittingSphere₂, optimal, feasible] at h ⊢ - have ⟨⟨h₁, h₂⟩, h_opt⟩ := h constructor - . constructor - . let a := Vec.norm x ^ 2 - 2 * mulVec x c - have h_ls : optimal (Vec.leastSquares a (-50)) t := by - refine ⟨h₁, ?_⟩ - intros y hy - simp [objFun, Vec.leastSquares] - exact h_opt c y hy h₂ - have ht_eq := vec_leastSquares_optimal_eq_mean a (-50) t h_ls - have hc2_eq : ‖c‖ ^ 2 = (1 / m) * ∑ i : Fin m, ‖c‖ ^ 2 := by - simp [Finset.sum_const] - field_simp; ring - have ht : t + ‖c‖ ^ 2 = (1 / m) * ∑ i, ‖(x i) - c‖ ^ 2 := by - rw [ht_eq]; dsimp - rw [hc2_eq, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum, ← Finset.sum_add_distrib] - congr; funext i; - rw [← mul_add] - congr; simp [Vec.norm] - rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] - congr - - sorry - . exact h₂ - . intros c' x' h₁' h₂' - sorry + . let a := Vec.norm x ^ 2 - 2 * mulVec x c + have h_ls : optimal (Vec.leastSquares a) t := by + refine ⟨trivial, ?_⟩ + intros y _ + simp [objFun, Vec.leastSquares] + exact h c y + have ht_eq := vec_leastSquares_optimal_eq_mean a t h_ls + have hc2_eq : ‖c‖ ^ 2 = (1 / m) * ∑ i : Fin m, ‖c‖ ^ 2 := by + simp [Finset.sum_const] + field_simp; ring + have ht : t + ‖c‖ ^ 2 = (1 / m) * ∑ i, ‖(x i) - c‖ ^ 2 := by + rw [ht_eq]; dsimp + rw [hc2_eq, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum, ← Finset.sum_add_distrib] + congr; funext i; + rw [← mul_add] + congr; simp [Vec.norm] + rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] + congr + have h_tc2_nonneg : 0 ≤ t + ‖c‖ ^ 2 := by + rw [ht] + apply mul_nonneg (by norm_num) + apply Finset.sum_nonneg + intros i _ + rw [rpow_two] + exact sq_nonneg _ + cases (lt_or_eq_of_le h_tc2_nonneg) with + | inl h_tc2_lt_zero => + convert h_tc2_lt_zero; simp + | inr h_tc2_eq_zero => + exfalso + rw [ht, zero_eq_mul] at h_tc2_eq_zero + rcases h_tc2_eq_zero with (hc | h_sum_eq_zero) + . simp at hc; linarith + rw [vec_squared_norm_error_eq_zero_iff] at h_sum_eq_zero + apply h_nontrivial + funext i + exact h_sum_eq_zero i + . intros c' x' _ + exact h c' x' + +#print fittingSphere₂ end FittingSphere From 7691205577e2a51cfa689a67388d1b822974b03c Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 15:26:37 -0500 Subject: [PATCH 12/22] feat: full proof for fitting sphere --- CvxLean/Examples/FittingSphere.lean | 61 +++++++++++++++++++++-------- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 01bd8535..b987cfee 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -10,20 +10,52 @@ def leastSquares {n : ℕ} (a : Fin n → ℝ) := optimization (x : ℝ) minimize (∑ i, ((a i - x) ^ 2) : ℝ) -lemma leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (x : ℝ) - (h : (leastSquares a).optimal x) : x = (1 / n) * ∑ i, (a i) := by - simp [leastSquares] - sorry - -- if x is optimal then avg(x) = 1 - -- https://math.stackexchange.com/questions/2554243/understanding-the-mean-minimizes-the-mean-squared-error +@[reducible] +def mean {n : ℕ} (a : Fin n → ℝ) : ℝ := (1 / n) * ∑ i, (a i) + +lemma leastSquares_alt_objFun {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : ℝ) : + (∑ i, ((a i - x) ^ 2)) = n * ((x - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2) := by + calc + _ = ∑ i, ((a i) ^ 2 - 2 * (a i) * x + (x ^ 2)) := by congr; funext i; simp; ring + _ = ∑ i, ((a i) ^ 2) - 2 * x * ∑ i, (a i) + n * (x ^ 2) := by + rw [Finset.sum_add_distrib, Finset.sum_sub_distrib, ← Finset.sum_mul, ← Finset.mul_sum] + simp [Finset.sum_const]; ring + _ = n * mean (a ^ 2) - 2 * x * n * mean a + n * (x ^ 2) := by + simp [mean]; field_simp; ring + _ = n * ((x - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2) := by + simp [mean]; field_simp; ring + +lemma leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : ℝ) + (h : (leastSquares a).optimal x) : x = mean a := by + simp [optimal, feasible, leastSquares] at h + replace h : ∀ y, + (x - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2 ≤ + (y - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2 := by + intros y + have hy := h y + have h_rw_x := leastSquares_alt_objFun hn a x + have h_rw_y := leastSquares_alt_objFun hn a y + simp only [rpow_two] at h_rw_x h_rw_y ⊢ + rw [h_rw_x, h_rw_y] at hy + rw [mul_le_mul_left (by positivity)] at hy + exact hy + replace h : ∀ y, (x - mean a) ^ 2 ≤ (y - mean a) ^ 2 := by + intros y + have hy := h y + rw [← add_sub, ← add_sub, add_le_add_iff_right] at hy + exact hy + have hmean := h (mean a) + simp at hmean + have hz := le_antisymm hmean (sq_nonneg _) + rwa [sq_eq_zero_iff, sub_eq_zero] at hz def Vec.leastSquares {n : ℕ} (a : Fin n → ℝ) := optimization (x : ℝ) minimize (Vec.sum ((a - Vec.const n x) ^ 2) : ℝ) -lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (a : Fin n → ℝ) (x : ℝ) - (h : (Vec.leastSquares a).optimal x) : x = (1 / n) * ∑ i, (a i) := by - apply leastSquares_optimal_eq_mean a +lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : ℝ) + (h : (Vec.leastSquares a).optimal x) : x = mean a := by + apply leastSquares_optimal_eq_mean hn a simp [Vec.leastSquares, leastSquares, optimal, feasible] at h ⊢ intros y simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const] at h @@ -122,16 +154,13 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit #print fittingSphere₁ --- private lemma nonconvex__implies_relaxed_constraint (c : Fin n → ℝ) (t : ℝ) --- (h₁ : 1 / 10000 ≤ sqrt (t + ‖c‖ ^ 2)) (h₂ : ‖c‖ ^ 2 ≤ 50) : -50 ≤ t := by --- rw [le_sqrt' (by norm_num)] at h₁ --- linarith - relaxation red/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by relaxation_step => apply Relaxation.weaken_constraint (cs' := fun _ => True) . rintro ⟨c, t⟩ _; trivial +-- This tells us that solving the relaxed problem is sufficient for optimal points if the solution +-- is non-trivial. lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) (h_nontrivial : x ≠ Vec.const m c) (h : (fittingSphere₂ n m x).optimal (c, t)) : (fittingSphere₁ n m x).optimal (c, t) := by @@ -143,12 +172,12 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) intros y _ simp [objFun, Vec.leastSquares] exact h c y - have ht_eq := vec_leastSquares_optimal_eq_mean a t h_ls + have ht_eq := vec_leastSquares_optimal_eq_mean hm a t h_ls have hc2_eq : ‖c‖ ^ 2 = (1 / m) * ∑ i : Fin m, ‖c‖ ^ 2 := by simp [Finset.sum_const] field_simp; ring have ht : t + ‖c‖ ^ 2 = (1 / m) * ∑ i, ‖(x i) - c‖ ^ 2 := by - rw [ht_eq]; dsimp + rw [ht_eq]; dsimp [mean] rw [hc2_eq, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum, ← Finset.sum_add_distrib] congr; funext i; rw [← mul_add] From c0ba5a152d3b2feb9db48e06035af63fc2fe3a8f Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 15:46:35 -0500 Subject: [PATCH 13/22] doc: comments for fitting sphere --- CvxLean/Examples/FittingSphere.lean | 105 +++++++++++----------------- 1 file changed, 41 insertions(+), 64 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index b987cfee..f46791a7 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -13,36 +13,37 @@ def leastSquares {n : ℕ} (a : Fin n → ℝ) := @[reducible] def mean {n : ℕ} (a : Fin n → ℝ) : ℝ := (1 / n) * ∑ i, (a i) +/-- It is useful to rewrite the sum of squares in the following way to prove +`leastSquares_optimal_eq_mean`, following Marty Cohen's answer in +https://math.stackexchange.com/questions/2554243. -/ lemma leastSquares_alt_objFun {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : ℝ) : - (∑ i, ((a i - x) ^ 2)) = n * ((x - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2) := by + (∑ i, ((a i - x) ^ 2)) = n * ((x - mean a) ^ 2 + (mean (a ^ 2) - (mean a) ^ 2)) := by calc - _ = ∑ i, ((a i) ^ 2 - 2 * (a i) * x + (x ^ 2)) := by congr; funext i; simp; ring + -- 1) Σ (aᵢ - x)² = Σ (aᵢ² - 2aᵢx + x²) + _ = ∑ i, ((a i) ^ 2 - 2 * (a i) * x + (x ^ 2)) := by + congr; funext i; simp; ring + -- 2) Σ (aᵢ² - 2aᵢx + x²) = Σ aᵢ² - 2xΣ aᵢ + nx² _ = ∑ i, ((a i) ^ 2) - 2 * x * ∑ i, (a i) + n * (x ^ 2) := by rw [Finset.sum_add_distrib, Finset.sum_sub_distrib, ← Finset.sum_mul, ← Finset.mul_sum] simp [Finset.sum_const]; ring + -- 3) Σ aᵢ² - 2xΣ aᵢ + nx² = n{a²} - 2xn{a} + nx² _ = n * mean (a ^ 2) - 2 * x * n * mean a + n * (x ^ 2) := by simp [mean]; field_simp; ring - _ = n * ((x - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2) := by + -- 4) n{a²} - 2xn{a} + nx² = n((x - {a})² + ({a²} - {a}²)) + _ = n * ((x - mean a) ^ 2 + (mean (a ^ 2) - (mean a) ^ 2)) := by simp [mean]; field_simp; ring +/-- Key result about least squares: `x* = mean a`. -/ lemma leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : ℝ) (h : (leastSquares a).optimal x) : x = mean a := by simp [optimal, feasible, leastSquares] at h - replace h : ∀ y, - (x - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2 ≤ - (y - mean a) ^ 2 + mean (a ^ 2) - (mean a) ^ 2 := by + replace h : ∀ y, (x - mean a) ^ 2 ≤ (y - mean a) ^ 2 := by intros y have hy := h y have h_rw_x := leastSquares_alt_objFun hn a x have h_rw_y := leastSquares_alt_objFun hn a y simp only [rpow_two] at h_rw_x h_rw_y ⊢ - rw [h_rw_x, h_rw_y] at hy - rw [mul_le_mul_left (by positivity)] at hy - exact hy - replace h : ∀ y, (x - mean a) ^ 2 ≤ (y - mean a) ^ 2 := by - intros y - have hy := h y - rw [← add_sub, ← add_sub, add_le_add_iff_right] at hy + rw [h_rw_x, h_rw_y, mul_le_mul_left (by positivity), add_le_add_iff_right] at hy exact hy have hmean := h (mean a) simp at hmean @@ -53,6 +54,7 @@ def Vec.leastSquares {n : ℕ} (a : Fin n → ℝ) := optimization (x : ℝ) minimize (Vec.sum ((a - Vec.const n x) ^ 2) : ℝ) +/-- Same as `leastSquares_optimal_eq_mean` in vector notation. -/ lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : ℝ) (h : (Vec.leastSquares a).optimal x) : x = mean a := by apply leastSquares_optimal_eq_mean hn a @@ -61,40 +63,6 @@ lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const] at h exact h y -lemma squared_error_eq_zero_iff {n : ℕ} (a : Fin n → ℝ) (x : ℝ) : - ∑ i, (a i - x) ^ 2 = 0 ↔ ∀ i, a i = x := by - simp [rpow_two] - constructor - . intros h i - rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] at h - have hi := h i (by simp) - rw [sq_eq_zero_iff, sub_eq_zero] at hi - exact hi - . intros h - rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] - intros i _ - simp [sq_eq_zero_iff, sub_eq_zero] - exact h i - -lemma vec_squared_norm_error_eq_zero_iff {n m : ℕ} (a : Fin m → Fin n → ℝ) (x : Fin n → ℝ) : - ∑ i, ‖a i - x‖ ^ 2 = 0 ↔ ∀ i, a i = x := by - simp [rpow_two] - constructor - . intros h i - rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] at h - have hi := h i (by simp) - rw [sq_eq_zero_iff] at hi - rw [@norm_eq_zero _ (PiLp.normedAddCommGroup _ _).toNormedAddGroup] at hi - rw [sub_eq_zero] at hi - exact hi - . intros h - rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] - intros i _ - rw [sq_eq_zero_iff] - rw [@norm_eq_zero _ (PiLp.normedAddCommGroup _ _).toNormedAddGroup] - rw [sub_eq_zero] - exact h i - end LeastSquares namespace FittingSphere @@ -130,37 +98,43 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit rename_vars [c, t] -- Clean up. conv_constr h₁ => dsimp - -- conv_constr h₂ => dsimp conv_obj => dsimp -- Rewrite objective. - rw_obj => - -- NOTE: this is why we need strict postivity of `r`, to be able to apply `sq_sqrt`. - have h' : 0 < t + ‖c‖ ^ 2 := sqrt_pos.mp <| h₁; - conv => - left; congr; congr; ext i; congr; simp; - rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] - rw [sq_sqrt (rpow_two _ ▸ le_of_lt h')] - ring_nf; simp equivalence_step => apply Equivalence.rewrite_objFun (g := fun (ct : (Fin n → ℝ) × ℝ) => Vec.sum (((Vec.norm x) ^ 2 - 2 * (Matrix.mulVec x ct.1) - Vec.const m ct.2) ^ 2)) . rintro ⟨c, t⟩ h dsimp at h ⊢; simp [Vec.sum, Vec.norm, Vec.const] - congr; funext i; congr 1; rw [add_sub, ← sub_eq_add_neg] - congr 2; simp [mulVec, inner, dotProduct, Finset.sum_mul, Finset.mul_sum] - congr; funext j; ring + congr; funext i; congr 1; + rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] + rw [sq_sqrt (rpow_two _ ▸ le_of_lt (sqrt_pos.mp <| h))] + simp [mulVec, inner, dotProduct] rename_vars [c, t] #print fittingSphere₁ -relaxation red/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by +relaxation rel/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by relaxation_step => apply Relaxation.weaken_constraint (cs' := fun _ => True) . rintro ⟨c, t⟩ _; trivial --- This tells us that solving the relaxed problem is sufficient for optimal points if the solution --- is non-trivial. +/-- If the squared error is zero, then `aᵢ = x`. -/ +lemma vec_squared_norm_error_eq_zero_iff {n m : ℕ} (a : Fin m → Fin n → ℝ) (x : Fin n → ℝ) : + ∑ i, ‖a i - x‖ ^ 2 = 0 ↔ ∀ i, a i = x := by + simp [rpow_two] + rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] + constructor + . intros h i + have hi := h i (by simp) + rw [sq_eq_zero_iff, @norm_eq_zero _ (PiLp.normedAddCommGroup _ _).toNormedAddGroup] at hi + rwa [sub_eq_zero] at hi + . intros h i _ + rw [sq_eq_zero_iff, @norm_eq_zero _ (PiLp.normedAddCommGroup _ _).toNormedAddGroup, sub_eq_zero] + exact h i + +/-- This tells us that solving the relaxed problem is sufficient for optimal points if the solution +is non-trivial. -/ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) (h_nontrivial : x ≠ Vec.const m c) (h : (fittingSphere₂ n m x).optimal (c, t)) : (fittingSphere₁ n m x).optimal (c, t) := by @@ -172,6 +146,7 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) intros y _ simp [objFun, Vec.leastSquares] exact h c y + -- Apply key result about least squares. have ht_eq := vec_leastSquares_optimal_eq_mean hm a t h_ls have hc2_eq : ‖c‖ ^ 2 = (1 / m) * ∑ i : Fin m, ‖c‖ ^ 2 := by simp [Finset.sum_const] @@ -179,11 +154,11 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) have ht : t + ‖c‖ ^ 2 = (1 / m) * ∑ i, ‖(x i) - c‖ ^ 2 := by rw [ht_eq]; dsimp [mean] rw [hc2_eq, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum, ← Finset.sum_add_distrib] - congr; funext i; - rw [← mul_add] + congr; funext i; rw [← mul_add] congr; simp [Vec.norm] rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] congr + -- We use the result to establish that `t + ‖c‖ ^ 2` is non-negative. have h_tc2_nonneg : 0 ≤ t + ‖c‖ ^ 2 := by rw [ht] apply mul_nonneg (by norm_num) @@ -193,8 +168,10 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) exact sq_nonneg _ cases (lt_or_eq_of_le h_tc2_nonneg) with | inl h_tc2_lt_zero => + -- If it is positive, we are done. convert h_tc2_lt_zero; simp | inr h_tc2_eq_zero => + -- Otherwise, it contradicts the non-triviality assumption. exfalso rw [ht, zero_eq_mul] at h_tc2_eq_zero rcases h_tc2_eq_zero with (hc | h_sum_eq_zero) From 7cb6c4e07802d6f95736b66613e6ccb33f6516cb Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 17:58:16 -0500 Subject: [PATCH 14/22] feat: `Vec.norm` --- CvxLean/Command/Solve/Float/RealToFloat.lean | 11 ++++++++++- CvxLean/Lib/Math/Data/Vec.lean | 9 ++++++++- CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean | 18 ++++++++++++++++++ 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/CvxLean/Command/Solve/Float/RealToFloat.lean b/CvxLean/Command/Solve/Float/RealToFloat.lean index 2ccfe240..5399a168 100644 --- a/CvxLean/Command/Solve/Float/RealToFloat.lean +++ b/CvxLean/Command/Solve/Float/RealToFloat.lean @@ -210,6 +210,12 @@ addRealToFloat : @Real.sqrt := addRealToFloat : @Real.log := Float.log +def Float.norm {n : ℕ} (x : Fin n → Float) : Float := + Float.sqrt (Vec.Computable.sum (Vec.map (Float.pow · 2) x)) + +addRealToFloat (n) (i) : @Norm.norm.{0} (Fin n → ℝ) i := + @Float.norm n + addRealToFloat (i) : @OfScientific.ofScientific Real i := Float.ofScientific @@ -260,9 +266,12 @@ addRealToFloat (n) (i) (hn) : @Vec.sum.{0} ℝ i (Fin n) hn := addRealToFloat (n) (i) (hn) : @Matrix.sum (Fin n) Real hn i := @Matrix.Computable.sum n -addRealToFloat (n) : @Vec.cumsum n := +addRealToFloat (n) (i) : @Vec.cumsum.{0} ℝ i n := @Vec.Computable.cumsum n +addRealToFloat : @Vec.norm := + @Vec.Computable.norm + addRealToFloat (n) (i1) (i2) (i3) : @Matrix.dotProduct (Fin n) ℝ i1 i2 i3 := @Matrix.Computable.dotProduct n diff --git a/CvxLean/Lib/Math/Data/Vec.lean b/CvxLean/Lib/Math/Data/Vec.lean index cf6d0425..aff173f4 100644 --- a/CvxLean/Lib/Math/Data/Vec.lean +++ b/CvxLean/Lib/Math/Data/Vec.lean @@ -40,7 +40,7 @@ def sum {m : Type} [Fintype m] (x : m → α) : α := open FinsetInterval /-- See `CvxLean.Tactic.DCP.AtomLibrary.Fns.CumSum`. -/ -def cumsum (t : Fin n → ℝ) : Fin n → ℝ := +def cumsum (t : Fin n → α) : Fin n → α := fun i => if h : 0 < n then ∑ j in [[⟨0, h⟩, i]], t j else 0 end AddCommMonoid @@ -75,6 +75,10 @@ def huber : m → ℝ := fun i => Real.huber (x i) /-- See `CvxLean.Tactic.DCP.AtomLibrary.Fns.KLDiv`. -/ def klDiv : m → ℝ := fun i => Real.klDiv (x i) (y i) +/-- See `CvxLean.Tactic.DCP.AtomLibrary.Fns.Norm2`. -/ +def norm {n m : ℕ} (x : Fin n → Fin m → ℝ) : Fin n → ℝ := + fun i => ‖x i‖ + end Real @@ -116,6 +120,9 @@ def sum (x : Fin n → Float) : Float := def cumsum (x : Fin n → Float) : Fin n → Float := fun i => (((toArray x).toList.take (i.val + 1)).foldl Float.add 0) +def norm {n m : ℕ} (x : Fin n → Fin m → Float) : Fin n → Float := + fun i => Float.sqrt (sum (Vec.map (Float.pow · 2) (x i))) + end Computable end Vec diff --git a/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean b/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean index 43692178..18c06dce 100644 --- a/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean +++ b/CvxLean/Tactic/DCP/AtomLibrary/Fns/Norm.lean @@ -45,4 +45,22 @@ vconditionElimination lemma norm2₂_eq_norm2 {x y : ℝ} : ‖![x, y]‖ = sqrt (x ^ 2 + y ^ 2) := by simp [Norm.norm, sqrt_eq_rpow] +declare_atom Vec.norm [convex] (n : Nat)& (m : Nat)& (x : Fin n → Fin m → ℝ)? : Vec.norm x := +vconditions +implementationVars (t : Fin n → ℝ) +implementationObjective (t) +implementationConstraints + (c : Vec.soCone t x) +solution (t := Vec.norm x) +solutionEqualsAtom by rfl +feasibility + (c : by + unfold Vec.soCone soCone; dsimp; + intros i; simp [Vec.norm, Norm.norm, sqrt_eq_rpow]) +optimality by + unfold Vec.soCone soCone at c + intros i; simp [Vec.norm, Norm.norm, sqrt_eq_rpow] at c ⊢ + exact c i +vconditionElimination + end CvxLean From 9d83ad18e825b1cf7fbb57456a2c6ad71b5b24f9 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 18:04:42 -0500 Subject: [PATCH 15/22] feat: real power default for vectors too --- CvxLean/Examples/FittingSphere.lean | 91 +++++++++++++++++++++-------- CvxLean/Lib/Math/Data/Real.lean | 3 + 2 files changed, 69 insertions(+), 25 deletions(-) diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index f46791a7..2dbc224f 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -2,7 +2,7 @@ import CvxLean noncomputable section -open CvxLean Minimization Real BigOperators Matrix +open CvxLean Minimization Real BigOperators Matrix Finset section LeastSquares @@ -22,14 +22,13 @@ lemma leastSquares_alt_objFun {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x : -- 1) Σ (aᵢ - x)² = Σ (aᵢ² - 2aᵢx + x²) _ = ∑ i, ((a i) ^ 2 - 2 * (a i) * x + (x ^ 2)) := by congr; funext i; simp; ring - -- 2) Σ (aᵢ² - 2aᵢx + x²) = Σ aᵢ² - 2xΣ aᵢ + nx² + -- 2) ... = Σ aᵢ² - 2xΣ aᵢ + nx² _ = ∑ i, ((a i) ^ 2) - 2 * x * ∑ i, (a i) + n * (x ^ 2) := by - rw [Finset.sum_add_distrib, Finset.sum_sub_distrib, ← Finset.sum_mul, ← Finset.mul_sum] - simp [Finset.sum_const]; ring - -- 3) Σ aᵢ² - 2xΣ aᵢ + nx² = n{a²} - 2xn{a} + nx² + rw [sum_add_distrib, sum_sub_distrib, ← sum_mul, ← mul_sum]; simp [sum_const]; ring + -- 3) ... = n{a²} - 2xn{a} + nx² _ = n * mean (a ^ 2) - 2 * x * n * mean a + n * (x ^ 2) := by simp [mean]; field_simp; ring - -- 4) n{a²} - 2xn{a} + nx² = n((x - {a})² + ({a²} - {a}²)) + -- 4) ... = n((x - {a})² + ({a²} - {a}²)) _ = n * ((x - mean a) ^ 2 + (mean (a ^ 2) - (mean a) ^ 2)) := by simp [mean]; field_simp; ring @@ -43,8 +42,7 @@ lemma leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ) (x have h_rw_x := leastSquares_alt_objFun hn a x have h_rw_y := leastSquares_alt_objFun hn a y simp only [rpow_two] at h_rw_x h_rw_y ⊢ - rw [h_rw_x, h_rw_y, mul_le_mul_left (by positivity), add_le_add_iff_right] at hy - exact hy + rwa [h_rw_x, h_rw_y, mul_le_mul_left (by positivity), add_le_add_iff_right] at hy have hmean := h (mean a) simp at hmean have hz := le_antisymm hmean (sq_nonneg _) @@ -60,7 +58,7 @@ lemma vec_leastSquares_optimal_eq_mean {n : ℕ} (hn : 0 < n) (a : Fin n → ℝ apply leastSquares_optimal_eq_mean hn a simp [Vec.leastSquares, leastSquares, optimal, feasible] at h ⊢ intros y - simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const] at h + simp only [Vec.sum, Pi.pow_apply, Pi.sub_apply, Vec.const, rpow_two] at h exact h y end LeastSquares @@ -87,9 +85,7 @@ instance : ChangeOfVariables fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (c condition := fun (_, t) => 0 ≤ t, property := fun ⟨c, t⟩ h => by simp [sqrt_sq h] } -def Vec.norm {m n} (x : Fin m → Fin n → ℝ) : Fin m → ℝ := fun i => ‖x i‖ - -equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by +equivalence eqv/fittingSphereT (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by -- Change of variables. equivalence_step => apply ChangeOfVariables.toEquivalence @@ -112,9 +108,9 @@ equivalence eqv/fittingSphere₁ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fit simp [mulVec, inner, dotProduct] rename_vars [c, t] -#print fittingSphere₁ +#print fittingSphereT -relaxation rel/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere₁ n m x := by +relaxation rel/fittingSphereConvex (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphereT n m x := by relaxation_step => apply Relaxation.weaken_constraint (cs' := fun _ => True) . rintro ⟨c, t⟩ _; trivial @@ -123,7 +119,7 @@ relaxation rel/fittingSphere₂ (n m : ℕ) (x : Fin m → Fin n → ℝ) : fitt lemma vec_squared_norm_error_eq_zero_iff {n m : ℕ} (a : Fin m → Fin n → ℝ) (x : Fin n → ℝ) : ∑ i, ‖a i - x‖ ^ 2 = 0 ↔ ∀ i, a i = x := by simp [rpow_two] - rw [Finset.sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] + rw [sum_eq_zero_iff_of_nonneg (fun _ _ => sq_nonneg _)] constructor . intros h i have hi := h i (by simp) @@ -137,23 +133,23 @@ lemma vec_squared_norm_error_eq_zero_iff {n m : ℕ} (a : Fin m → Fin n → is non-trivial. -/ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) (h_nontrivial : x ≠ Vec.const m c) - (h : (fittingSphere₂ n m x).optimal (c, t)) : (fittingSphere₁ n m x).optimal (c, t) := by - simp [fittingSphere₁, fittingSphere₂, optimal, feasible] at h ⊢ + (h_opt : (fittingSphereConvex n m x).optimal (c, t)) : (fittingSphereT n m x).optimal (c, t) := by + simp [fittingSphereT, fittingSphereConvex, optimal, feasible] at h_opt ⊢ constructor . let a := Vec.norm x ^ 2 - 2 * mulVec x c have h_ls : optimal (Vec.leastSquares a) t := by refine ⟨trivial, ?_⟩ intros y _ simp [objFun, Vec.leastSquares] - exact h c y - -- Apply key result about least squares. + exact h_opt c y + -- Apply key result about least squares to `a` and `t`. have ht_eq := vec_leastSquares_optimal_eq_mean hm a t h_ls have hc2_eq : ‖c‖ ^ 2 = (1 / m) * ∑ i : Fin m, ‖c‖ ^ 2 := by - simp [Finset.sum_const] + simp [sum_const] field_simp; ring have ht : t + ‖c‖ ^ 2 = (1 / m) * ∑ i, ‖(x i) - c‖ ^ 2 := by rw [ht_eq]; dsimp [mean] - rw [hc2_eq, Finset.mul_sum, Finset.mul_sum, Finset.mul_sum, ← Finset.sum_add_distrib] + rw [hc2_eq, mul_sum, mul_sum, mul_sum, ← sum_add_distrib] congr; funext i; rw [← mul_add] congr; simp [Vec.norm] rw [@norm_sub_sq ℝ (Fin n → ℝ) _ (PiLp.normedAddCommGroup _ _) (PiLp.innerProductSpace _)] @@ -162,7 +158,7 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) have h_tc2_nonneg : 0 ≤ t + ‖c‖ ^ 2 := by rw [ht] apply mul_nonneg (by norm_num) - apply Finset.sum_nonneg + apply sum_nonneg intros i _ rw [rpow_two] exact sq_nonneg _ @@ -181,9 +177,54 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) funext i exact h_sum_eq_zero i . intros c' x' _ - exact h c' x' - -#print fittingSphere₂ + exact h_opt c' x' + +#print fittingSphereConvex + +-- We proceed to solve the problem on a concrete example. +-- https://github.com/cvxgrp/cvxbook_additional_exercises/blob/main/python/sphere_fit_data.py + +def nₚ := 2 + +def mₚ := 50 + +def xₚ : Fin mₚ → Fin nₚ → ℝ := Matrix.transpose <| ![ + ![1.824183228637652032e+00, 1.349093690455489103e+00, 6.966316403935147727e-01, + 7.599387854623529392e-01, 2.388321695850912363e+00, 8.651370608981923116e-01, + 1.863922545015865406e+00, 7.099743941474848663e-01, 6.005484882320809570e-01, + 4.561429569892232472e-01, 5.328296545713475663e-01, 2.138547819234526415e+00, + 1.906676474276197464e+00, 1.015547309536922516e+00, 8.765948388006337133e-01, + 1.648147347399247842e+00, 1.027902202451572045e+00, 2.145586297520478691e+00, + 1.793440421753045744e+00, 1.020535583041398908e+00, 8.977911075271942654e-01, + 1.530480229262339398e+00, 2.478088034137528872e-01, 2.617415807793897820e+00, + 2.081978553098443374e+00, 1.891226687205936452e+00, 8.222497927065576251e-01, + 5.803514604868882376e-01, 1.158670193449639063e+00, 6.016685032455900695e-01, + 5.605410828151705660e-01, 2.508815467550573164e+00, 2.230201413385580977e+00, + 1.170848897912992514e+00, 2.256355929901105561e+00, 6.686991510936428629e-01, + 2.040269595792217672e+00, 3.634166812924328749e-01, 5.418647611079159265e-01, + 6.631470058399455692e-01, 4.286142597532469622e-01, 2.155925078996823618e+00, + 2.379380016960549682e+00, 6.343212414048013947e-01, 1.469076407947448981e+00, + 1.225322035289937439e+00, 1.467602887401966871e+00, 9.345319187253748883e-01, + 1.985592768641736505e+00, 2.106896115090134636e+00], + ![-9.644136284187876385e-01, 1.069547315003422927e+00, 6.733229334437943470e-01, + 7.788072961810316164e-01, -9.467465278344706636e-01, -8.591303443863639311e-01, + 1.279527420871080956e+00, 5.314829019311283487e-01, 6.975676079749143499e-02, + -4.641873429414754559e-01, -2.094571396598311763e-01, -8.003479827938377866e-01, + 6.135280782546607137e-01, -9.961307468791747999e-01, -8.765215480412106297e-01, + 9.655406812422813179e-01, 1.011230180540185541e+00, 6.105416770440197372e-01, + 9.486552370654932620e-01, -9.863592657836954825e-01, 7.695327845100754516e-01, + -1.060072365810699413e+00, -4.041043465424410952e-01, -2.352952920283236105e-01, + 7.560391050507236921e-01, -9.454246095204003053e-01, -5.303145312191936966e-01, + 5.979590038743245461e-01, -1.154309511133019717e+00, -6.123184171955468047e-01, + -1.464683782538583889e-01, -1.839128688968104386e-01, 4.250070477845909744e-01, + 8.861864983476224200e-01, 3.927648421593328276e-01, -6.726102374256350824e-01, + -1.047252884197514833e+00, 1.825096825995130845e-01, -4.482373962742914886e-01, + 5.115625649313135792e-01, 7.846201103116770548e-02, 6.006325432819290544e-01, + -5.710733714464664157e-01, 4.725559971890586075e-01, -8.440290321502940118e-01, + -1.003920890712479475e+00, -1.067089412136528637e+00, 7.909281966910661765e-01, + -1.059509163675931065e+00, -7.136351632325785843e-01]] + +solve fittingSphereConvex nₚ mₚ xₚ end FittingSphere diff --git a/CvxLean/Lib/Math/Data/Real.lean b/CvxLean/Lib/Math/Data/Real.lean index ea0dd3f7..a333d225 100644 --- a/CvxLean/Lib/Math/Data/Real.lean +++ b/CvxLean/Lib/Math/Data/Real.lean @@ -11,6 +11,9 @@ some components of our automated procedures such as pattern-matching in `dcp` or @[default_instance] noncomputable instance (priority := high) : HPow ℝ ℝ ℝ := by infer_instance +@[default_instance] +noncomputable instance (priority := high) : HPow (Fin n → ℝ) ℝ (Fin n → ℝ) := by infer_instance + section Functions /-! From e15b467e93161ecb8d695441a327cfbe32577cbc Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 18:13:31 -0500 Subject: [PATCH 16/22] feat: trivial cone --- CvxLean/Command/Solve/Float/Coeffs.lean | 2 ++ CvxLean/Tactic/DCP/AtomLibrary/Sets/Cones.lean | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/CvxLean/Command/Solve/Float/Coeffs.lean b/CvxLean/Command/Solve/Float/Coeffs.lean index c8ba9f35..1826989e 100644 --- a/CvxLean/Command/Solve/Float/Coeffs.lean +++ b/CvxLean/Command/Solve/Float/Coeffs.lean @@ -255,6 +255,8 @@ unsafe def determineCoeffsFromExpr (minExpr : Meta.MinimizationExpr) : for c in cs do trace[Meta.debug] "Coeffs going through constraint {c}." match Expr.consumeMData c with + | .const ``True _ => do + idx := idx + 1 | .app (.const ``Real.zeroCone _) e => do let e ← realToFloat e let res ← determineScalarCoeffsAux e p floatDomain diff --git a/CvxLean/Tactic/DCP/AtomLibrary/Sets/Cones.lean b/CvxLean/Tactic/DCP/AtomLibrary/Sets/Cones.lean index 9441c646..a7e7d569 100644 --- a/CvxLean/Tactic/DCP/AtomLibrary/Sets/Cones.lean +++ b/CvxLean/Tactic/DCP/AtomLibrary/Sets/Cones.lean @@ -77,4 +77,12 @@ optimality le_refl _ end PSDCone +/- Trivial cone. -/ +section Trivial + +declare_atom trivial [cone] : True := +optimality le_refl _ + +end Trivial + end CvxLean From 1240359b7b3dcf1ff90f42c353635350e70ae5c5 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 18:18:40 -0500 Subject: [PATCH 17/22] feat: `coeffs` for vector second-order cone --- CvxLean/Command/Solve/Float/Coeffs.lean | 26 ++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/CvxLean/Command/Solve/Float/Coeffs.lean b/CvxLean/Command/Solve/Float/Coeffs.lean index 1826989e..4b0017ae 100644 --- a/CvxLean/Command/Solve/Float/Coeffs.lean +++ b/CvxLean/Command/Solve/Float/Coeffs.lean @@ -3,6 +3,19 @@ import CvxLean.Lib.Cones.All import CvxLean.Command.Solve.Float.ProblemData import CvxLean.Command.Solve.Float.RealToFloat + +/-! +# Extract coefficients from problem to generate problem data + +TODO + +## TODO + +* This is probably a big source of inefficency for the `solve` command. We should come up with + a better way to extract the numerical values from the Lean expressions. +* A first step is to not `unrollVectors` and turn thos expressions into floats directly. +-/ + namespace CvxLean open Lean Meta Elab Tactic @@ -173,13 +186,24 @@ unsafe def unrollVectors (constraints : Expr) : MetaM (Array Expr) := do res := res.push (← mkAppM ``Real.expCone #[ai, bi, ci]) -- Vector second-order cone. | .app (.app (.app (.app (.app (.const ``Real.Vec.soCone _) - exprN@(.app (.const ``Fin _) n)) (.app (.const ``Fin _) m)) finTypeN) t) X => + exprN@(.app (.const ``Fin _) _n)) (.app (.const ``Fin _) m)) finTypeN) t) X => let m : Nat ← evalExpr Nat (mkConst ``Nat) m for i in [:m] do let idxExpr ← mkFinIdxExpr i m let ti := mkApp t idxExpr let Xi := mkApp X idxExpr res := res.push (mkAppN (mkConst ``Real.soCone) #[exprN, finTypeN, ti, Xi]) + -- Vector rotated second-order cone. + -- Vector second-order cone. + | .app (.app (.app (.app (.app (.app (.const ``Real.Vec.rotatedSoCone _) + exprN@(.app (.const ``Fin _) _n)) (.app (.const ``Fin _) m)) finTypeN) v) w) X => + let m : Nat ← evalExpr Nat (mkConst ``Nat) m + for i in [:m] do + let idxExpr ← mkFinIdxExpr i m + let vi := mkApp v idxExpr + let wi := mkApp w idxExpr + let Xi := mkApp X idxExpr + res := res.push (mkAppN (mkConst ``Real.rotatedSoCone) #[exprN, finTypeN, vi, wi, Xi]) | _ => res := res.push c From 58843d4314755bcc84e80ae07b3e8d5fcd923c82 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 18:42:47 -0500 Subject: [PATCH 18/22] fix: do not touch index for trivial cones --- CvxLean/Command/Solve/Float/Coeffs.lean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CvxLean/Command/Solve/Float/Coeffs.lean b/CvxLean/Command/Solve/Float/Coeffs.lean index 4b0017ae..a89e5d54 100644 --- a/CvxLean/Command/Solve/Float/Coeffs.lean +++ b/CvxLean/Command/Solve/Float/Coeffs.lean @@ -280,7 +280,7 @@ unsafe def determineCoeffsFromExpr (minExpr : Meta.MinimizationExpr) : trace[Meta.debug] "Coeffs going through constraint {c}." match Expr.consumeMData c with | .const ``True _ => do - idx := idx + 1 + pure () | .app (.const ``Real.zeroCone _) e => do let e ← realToFloat e let res ← determineScalarCoeffsAux e p floatDomain From c27ccbe14d388c0c1564e07bcbcc7719d154f013 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 18:42:59 -0500 Subject: [PATCH 19/22] feat: vector powers in real-to-float --- CvxLean/Command/Solve/Float/RealToFloat.lean | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CvxLean/Command/Solve/Float/RealToFloat.lean b/CvxLean/Command/Solve/Float/RealToFloat.lean index 5399a168..6ad07d79 100644 --- a/CvxLean/Command/Solve/Float/RealToFloat.lean +++ b/CvxLean/Command/Solve/Float/RealToFloat.lean @@ -186,9 +186,15 @@ addRealToFloat (i) : @HPow.hPow Real Nat Real i := addRealToFloat (i) : @HPow.hPow Real Real Real i := fun f n => Float.pow f n -addRealToFloat (i) : @instHPow Real i := +addRealToFloat (β) (i) : @instHPow Real β i := @HPow.mk Float Float Float Float.pow +addRealToFloat (n) (i) : @HPow.hPow (Fin n → Real) Real (Fin n → Real) i := + fun (x : Fin n → Float) (p : Float) (i : Fin n) => Float.pow (x i) p + +addRealToFloat (n) (β) (i) : @instHPow (Fin n → Real) β i := + @HPow.mk (Fin n → Float) Float (Fin n → Float) (fun x p i => Float.pow (x i) p) + addRealToFloat (i) : @LE.le Real i := Float.le From ba30d9e92f4856a083621ed2672b3de578069311 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 18:47:27 -0500 Subject: [PATCH 20/22] feat: solving fitting sphere to data --- CvxLean/Command/Solve/Float/Coeffs.lean | 6 ++- CvxLean/Examples/FittingSphere.lean | 49 ++++++++++--------------- 2 files changed, 23 insertions(+), 32 deletions(-) diff --git a/CvxLean/Command/Solve/Float/Coeffs.lean b/CvxLean/Command/Solve/Float/Coeffs.lean index a89e5d54..a7f26e2d 100644 --- a/CvxLean/Command/Solve/Float/Coeffs.lean +++ b/CvxLean/Command/Solve/Float/Coeffs.lean @@ -278,9 +278,10 @@ unsafe def determineCoeffsFromExpr (minExpr : Meta.MinimizationExpr) : let mut idx := 0 for c in cs do trace[Meta.debug] "Coeffs going through constraint {c}." + let mut isTrivial := false match Expr.consumeMData c with | .const ``True _ => do - pure () + isTrivial := true | .app (.const ``Real.zeroCone _) e => do let e ← realToFloat e let res ← determineScalarCoeffsAux e p floatDomain @@ -357,7 +358,8 @@ unsafe def determineCoeffsFromExpr (minExpr : Meta.MinimizationExpr) : idx := idx + 1 | _ => throwError "No match: {c}." -- New group, add idx. - sections := sections.push idx + if !isTrivial then + sections := sections.push idx return (data, sections) let (objectiveDataA, objectiveDataB) := objectiveData diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 2dbc224f..1ecac59e 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -85,7 +85,9 @@ instance : ChangeOfVariables fun (ct : (Fin n → ℝ) × ℝ) => (ct.1, sqrt (c condition := fun (_, t) => 0 ≤ t, property := fun ⟨c, t⟩ h => by simp [sqrt_sq h] } -equivalence eqv/fittingSphereT (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by +set_option trace.Meta.debug true + +equivalence' eqv/fittingSphereT (n m : ℕ) (x : Fin m → Fin n → ℝ) : fittingSphere n m x := by -- Change of variables. equivalence_step => apply ChangeOfVariables.toEquivalence @@ -184,48 +186,35 @@ lemma optimal_relaxed_implies_optimal (hm : 0 < m) (c : Fin n → ℝ) (t : ℝ) -- We proceed to solve the problem on a concrete example. -- https://github.com/cvxgrp/cvxbook_additional_exercises/blob/main/python/sphere_fit_data.py +@[optimization_param] def nₚ := 2 -def mₚ := 50 +@[optimization_param] +def mₚ := 10 +@[optimization_param] def xₚ : Fin mₚ → Fin nₚ → ℝ := Matrix.transpose <| ![ ![1.824183228637652032e+00, 1.349093690455489103e+00, 6.966316403935147727e-01, 7.599387854623529392e-01, 2.388321695850912363e+00, 8.651370608981923116e-01, 1.863922545015865406e+00, 7.099743941474848663e-01, 6.005484882320809570e-01, - 4.561429569892232472e-01, 5.328296545713475663e-01, 2.138547819234526415e+00, - 1.906676474276197464e+00, 1.015547309536922516e+00, 8.765948388006337133e-01, - 1.648147347399247842e+00, 1.027902202451572045e+00, 2.145586297520478691e+00, - 1.793440421753045744e+00, 1.020535583041398908e+00, 8.977911075271942654e-01, - 1.530480229262339398e+00, 2.478088034137528872e-01, 2.617415807793897820e+00, - 2.081978553098443374e+00, 1.891226687205936452e+00, 8.222497927065576251e-01, - 5.803514604868882376e-01, 1.158670193449639063e+00, 6.016685032455900695e-01, - 5.605410828151705660e-01, 2.508815467550573164e+00, 2.230201413385580977e+00, - 1.170848897912992514e+00, 2.256355929901105561e+00, 6.686991510936428629e-01, - 2.040269595792217672e+00, 3.634166812924328749e-01, 5.418647611079159265e-01, - 6.631470058399455692e-01, 4.286142597532469622e-01, 2.155925078996823618e+00, - 2.379380016960549682e+00, 6.343212414048013947e-01, 1.469076407947448981e+00, - 1.225322035289937439e+00, 1.467602887401966871e+00, 9.345319187253748883e-01, - 1.985592768641736505e+00, 2.106896115090134636e+00], + 4.561429569892232472e-01], ![-9.644136284187876385e-01, 1.069547315003422927e+00, 6.733229334437943470e-01, 7.788072961810316164e-01, -9.467465278344706636e-01, -8.591303443863639311e-01, 1.279527420871080956e+00, 5.314829019311283487e-01, 6.975676079749143499e-02, - -4.641873429414754559e-01, -2.094571396598311763e-01, -8.003479827938377866e-01, - 6.135280782546607137e-01, -9.961307468791747999e-01, -8.765215480412106297e-01, - 9.655406812422813179e-01, 1.011230180540185541e+00, 6.105416770440197372e-01, - 9.486552370654932620e-01, -9.863592657836954825e-01, 7.695327845100754516e-01, - -1.060072365810699413e+00, -4.041043465424410952e-01, -2.352952920283236105e-01, - 7.560391050507236921e-01, -9.454246095204003053e-01, -5.303145312191936966e-01, - 5.979590038743245461e-01, -1.154309511133019717e+00, -6.123184171955468047e-01, - -1.464683782538583889e-01, -1.839128688968104386e-01, 4.250070477845909744e-01, - 8.861864983476224200e-01, 3.927648421593328276e-01, -6.726102374256350824e-01, - -1.047252884197514833e+00, 1.825096825995130845e-01, -4.482373962742914886e-01, - 5.115625649313135792e-01, 7.846201103116770548e-02, 6.006325432819290544e-01, - -5.710733714464664157e-01, 4.725559971890586075e-01, -8.440290321502940118e-01, - -1.003920890712479475e+00, -1.067089412136528637e+00, 7.909281966910661765e-01, - -1.059509163675931065e+00, -7.136351632325785843e-01]] + -4.641873429414754559e-01]] + +-- We use the `solve` command on the data above. + +set_option maxHeartbeats 1000000 solve fittingSphereConvex nₚ mₚ xₚ +-- Finally, we recover the solution to the original problem. + +#eval fittingSphereConvex.solution + +def sol := eqv.backward_map + end FittingSphere end From 61b0cc93fed807be3a489760db7f36bbfa5ab9e7 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 19:30:30 -0500 Subject: [PATCH 21/22] fix: handling metavariables in real-to-float --- CvxLean/Command/Solve/Float/RealToFloat.lean | 23 +++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/CvxLean/Command/Solve/Float/RealToFloat.lean b/CvxLean/Command/Solve/Float/RealToFloat.lean index 6ad07d79..34630623 100644 --- a/CvxLean/Command/Solve/Float/RealToFloat.lean +++ b/CvxLean/Command/Solve/Float/RealToFloat.lean @@ -23,16 +23,29 @@ partial def realToFloat (e : Expr) : MetaM Expr := do let translations ← discrTree.getMatch e for translation in translations do let (mvars, _, pattern) ← lambdaMetaTelescope translation.real + trace[Meta.debug] (s!"`real-to-float` trying:\n{pattern}\n{e}") + trace[Meta.debug] (s!"`real-to-float` mvars:\n{mvars}") if ← isDefEq pattern e then - -- TODO: Search for conditions. + -- TODO: Search for conditions. let args ← mvars.mapM instantiateMVars + trace[Meta.debug] (s!"`real-to-float` matched:\n{pattern}\n{e}") return mkAppNBeta translation.float args else trace[Meta.debug] "`real-to-float` error: no match for \n{pattern} \n{e}" match e with | Expr.app a b => return mkApp (← realToFloat a) (← realToFloat b) - | Expr.lam n ty b d => return mkLambda n d (← realToFloat ty) (← realToFloat b) - | Expr.forallE n ty b d => return mkForall n d (← realToFloat ty) (← realToFloat b) + | Expr.lam n ty b d => do + withLocalDecl n d (← realToFloat ty) fun fvar => do + let b := b.instantiate1 fvar + let bF ← realToFloat b + mkLambdaFVars #[fvar] bF + -- return mkLambda n d (← realToFloat ty) (← realToFloat b) + | Expr.forallE n ty b d => do + withLocalDecl n d (← realToFloat ty) fun fvar => do + let b := b.instantiate1 fvar + let bF ← realToFloat b + mkForallFVars #[fvar] bF + -- return mkForall n d (← realToFloat ty) (← realToFloat b) | Expr.mdata m e => return mkMData m (← realToFloat e) | Expr.letE n ty t b _ => return mkLet n (← realToFloat ty) (← realToFloat t) (← realToFloat b) | Expr.proj typeName idx struct => return mkProj typeName idx (← realToFloat struct) @@ -183,7 +196,7 @@ addRealToFloat (i) : @instHDiv Real i := addRealToFloat (i) : @HPow.hPow Real Nat Real i := fun f n => Float.pow f (Float.ofNat n) -addRealToFloat (i) : @HPow.hPow Real Real Real i := +addRealToFloat (i) : @HPow.hPow.{0, 0, 0} Real Real Real i := fun f n => Float.pow f n addRealToFloat (β) (i) : @instHPow Real β i := @@ -217,7 +230,7 @@ addRealToFloat : @Real.log := Float.log def Float.norm {n : ℕ} (x : Fin n → Float) : Float := - Float.sqrt (Vec.Computable.sum (Vec.map (Float.pow · 2) x)) + Float.sqrt (Vec.Computable.sum (fun i => (Float.pow (x i) 2))) addRealToFloat (n) (i) : @Norm.norm.{0} (Fin n → ℝ) i := @Float.norm n From 668649e03e602c09fe8019cb9aad1d773f05f1d1 Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Mir Date: Sun, 28 Jan 2024 19:47:24 -0500 Subject: [PATCH 22/22] feat: full solution to fitting sphere to data --- CvxLean/Command/Solve/Float/RealToFloat.lean | 3 -- CvxLean/Examples/FittingSphere.lean | 6 ++-- CvxLean/Examples/fitting_sphere.py | 32 ++++++++++++++++++++ 3 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 CvxLean/Examples/fitting_sphere.py diff --git a/CvxLean/Command/Solve/Float/RealToFloat.lean b/CvxLean/Command/Solve/Float/RealToFloat.lean index 34630623..c8981bdd 100644 --- a/CvxLean/Command/Solve/Float/RealToFloat.lean +++ b/CvxLean/Command/Solve/Float/RealToFloat.lean @@ -23,12 +23,9 @@ partial def realToFloat (e : Expr) : MetaM Expr := do let translations ← discrTree.getMatch e for translation in translations do let (mvars, _, pattern) ← lambdaMetaTelescope translation.real - trace[Meta.debug] (s!"`real-to-float` trying:\n{pattern}\n{e}") - trace[Meta.debug] (s!"`real-to-float` mvars:\n{mvars}") if ← isDefEq pattern e then -- TODO: Search for conditions. let args ← mvars.mapM instantiateMVars - trace[Meta.debug] (s!"`real-to-float` matched:\n{pattern}\n{e}") return mkAppNBeta translation.float args else trace[Meta.debug] "`real-to-float` error: no match for \n{pattern} \n{e}" diff --git a/CvxLean/Examples/FittingSphere.lean b/CvxLean/Examples/FittingSphere.lean index 1ecac59e..9f750a8e 100644 --- a/CvxLean/Examples/FittingSphere.lean +++ b/CvxLean/Examples/FittingSphere.lean @@ -211,9 +211,11 @@ solve fittingSphereConvex nₚ mₚ xₚ -- Finally, we recover the solution to the original problem. -#eval fittingSphereConvex.solution +def sol := eqv.backward_map nₚ mₚ xₚ.float fittingSphereConvex.solution -def sol := eqv.backward_map +#print eqv.backward_map + +#eval sol -- (![1.664863, 0.031932], 1.159033) end FittingSphere diff --git a/CvxLean/Examples/fitting_sphere.py b/CvxLean/Examples/fitting_sphere.py new file mode 100644 index 00000000..025fb7fc --- /dev/null +++ b/CvxLean/Examples/fitting_sphere.py @@ -0,0 +1,32 @@ +import numpy as np +import cvxpy as cp + +n = 2 + +m = 10 + +x = np.array([[ + 1.824183228637652032e+00, 1.349093690455489103e+00, 6.966316403935147727e-01, + 7.599387854623529392e-01, 2.388321695850912363e+00, 8.651370608981923116e-01, + 1.863922545015865406e+00, 7.099743941474848663e-01, 6.005484882320809570e-01, + 4.561429569892232472e-01], [ + -9.644136284187876385e-01, 1.069547315003422927e+00, 6.733229334437943470e-01, + 7.788072961810316164e-01, -9.467465278344706636e-01, -8.591303443863639311e-01, + 1.279527420871080956e+00, 5.314829019311283487e-01, 6.975676079749143499e-02, + -4.641873429414754559e-01]]).T + +c = cp.Variable((n)) +t = cp.Variable(1) + +p = cp.Problem( + cp.Minimize( + cp.sum([((cp.norm(x[i]) ** 2) - 2 * (x[i] @ c) - t) ** 2 for i in range(m)]) + ), []) +p.solve(solver=cp.MOSEK, verbose=True) + +# Backward map from change of variables. +r = np.sqrt(t.value + (np.linalg.norm(c.value) ** 2)) + +print("t* = ", t.value) +print("c* = ", c.value) +print("r* = ", r)