Skip to content

Commit

Permalink
Merge pull request #12 from psrenergy/rs/format
Browse files Browse the repository at this point in the history
Add format
  • Loading branch information
raphasampaio authored Nov 25, 2024
2 parents 7056885 + c6f25ba commit 855512e
Show file tree
Hide file tree
Showing 26 changed files with 170 additions and 114 deletions.
24 changes: 24 additions & 0 deletions .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Configuration file for JuliaFormatter.jl
# For more information, see: https://domluna.github.io/JuliaFormatter.jl/stable/config/

indent = 4
margin = 146
always_for_in = true
whitespace_typedefs = true
whitespace_ops_in_indices = false
remove_extra_newlines = true
import_to_using = false
pipe_to_function_call = false
short_to_long_function_def = false
long_to_short_function_def = false
whitespace_in_kwargs = true
annotate_untyped_fields_with_any = true
format_docstrings = true
conditional_to_if = false
normalize_line_endings = "auto"
trailing_comma = true
join_lines_based_on_source = true
indent_submodule = false
separate_kwargs_with_semicolon = false
surround_whereop_typeparameters = true
always_use_return = true
5 changes: 5 additions & 0 deletions format/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[deps]
JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899"

[compat]
JuliaFormatter = "1"
5 changes: 5 additions & 0 deletions format/format.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
@echo off

SET BASEPATH=%~dp0

CALL "%JULIA_194%" --project=%BASEPATH% %BASEPATH%\format.jl
6 changes: 6 additions & 0 deletions format/format.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
import Pkg
Pkg.instantiate()

using JuliaFormatter

format(dirname(@__DIR__))
2 changes: 1 addition & 1 deletion revise.bat
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
SET BASEPATH=%~dp0
SET REVISE_PATH="%BASEPATH%\revise"

%JULIA_194% --project=%REVISE_PATH% --load=%REVISE_PATH%\revise_load_script.jl
CALL %JULIA_194% --project=%REVISE_PATH% --load=%REVISE_PATH%\revise_load_script.jl
6 changes: 3 additions & 3 deletions src/cut_strategies/cuts_base.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ Abstract type to hold various implementations of cut pools. A cut pool is a data
"""
abstract type AbstractCutPool end


"""
initialize_cut_pool(options)
Expand All @@ -29,6 +28,7 @@ function initialize_cut_pool(options)
return [LightBenders.CutPoolMultiCut() for _ in 1:num_stages]
end
error("Not implemented.")
return nothing
end

"""
Expand Down Expand Up @@ -64,9 +64,9 @@ end
Add a cut to a Model and return the constraint reference.
"""
function add_cut(model::JuMP.Model, epigraph_variable::JuMP.VariableRef, coefs::Vector{T}, rhs::T) where T <: Real
function add_cut(model::JuMP.Model, epigraph_variable::JuMP.VariableRef, coefs::Vector{T}, rhs::T) where {T <: Real}
alpha = epigraph_variable
cache = model.ext[:state]::StateCache
cref = @constraint(model, alpha >= rhs + dot(coefs, cache.variables))
return cref
end
end
12 changes: 6 additions & 6 deletions src/cut_strategies/local_cuts.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@ function number_of_cuts(pool::LocalCutPool)
end

function store_cut!(
pool::LocalCutPool,
coefs::Vector{Float64},
state::Vector{Float64},
rhs::Float64,
obj::Float64
pool::LocalCutPool,
coefs::Vector{Float64},
state::Vector{Float64},
rhs::Float64,
obj::Float64,
)
push!(pool.coefs, coefs)
push!(pool.state, state)
push!(pool.rhs, rhs)
push!(pool.obj, obj)
return nothing
end
end
50 changes: 26 additions & 24 deletions src/cut_strategies/multi_cut.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,21 +14,22 @@ function number_of_cuts(pool::CutPoolMultiCut)
end

function store_cut!(
pool_multicut::CutPoolMultiCut,
local_pool::LocalCutPool
pool_multicut::CutPoolMultiCut,
local_pool::LocalCutPool,
)
push!(pool_multicut.cuts, local_pool)
return nothing
end

function store_cut!(
pool::Vector{CutPoolMultiCut},
pool::Vector{CutPoolMultiCut},
local_cuts::LocalCutPool,
state::Vector{Float64},
options,
t::Integer
t::Integer,
)
store_cut!(pool[t-1], local_cuts)
return nothing
end

function create_epigraph_multi_cut_variables!(model::JuMP.Model, policy_training_options)
Expand All @@ -42,16 +43,16 @@ function create_epigraph_multi_cut_variables!(model::JuMP.Model, policy_training
end

function add_multi_cut_risk_neutral_cuts!(
model::JuMP.Model,
model::JuMP.Model,
alphas::Vector{JuMP.VariableRef},
pool::CutPoolMultiCut,
policy_training_options
pool::CutPoolMultiCut,
policy_training_options,
)
for scen in 1:policy_training_options.num_scenarios
JuMP.set_objective_coefficient(
model,
alphas[scen],
(1.0 - policy_training_options.discount_rate) / policy_training_options.num_scenarios
model,
alphas[scen],
(1.0 - policy_training_options.discount_rate) / policy_training_options.num_scenarios,
)
for i in 1:length(pool.cuts)
add_cut(model, alphas[scen], pool.cuts[i].coefs[scen], pool.cuts[i].rhs[scen])
Expand All @@ -61,33 +62,34 @@ function add_multi_cut_risk_neutral_cuts!(
end

function add_multi_cut_cvar_cuts!(
model::JuMP.Model,
model::JuMP.Model,
alphas::Vector{JuMP.VariableRef},
pool::CutPoolMultiCut,
policy_training_options
pool::CutPoolMultiCut,
policy_training_options,
)
discount_rate_multiplier = (1.0 - policy_training_options.discount_rate)
JuMP.@variable(model, z_explicit_cvar)
# λ * z
JuMP.set_objective_coefficient(
model,
z_explicit_cvar,
discount_rate_multiplier * (policy_training_options.risk_measure.lambda)
model,
z_explicit_cvar,
discount_rate_multiplier * (policy_training_options.risk_measure.lambda),
)
JuMP.@variable(model, delta_explicit_cvar[scen = 1:policy_training_options.num_scenarios] >= 0)
for scen in 1:policy_training_options.num_scenarios
# (1 - λ)/L * sum(alphas)
JuMP.set_objective_coefficient(
model,
alphas[scen],
discount_rate_multiplier * (1 - policy_training_options.risk_measure.lambda) / policy_training_options.num_scenarios
model,
alphas[scen],
discount_rate_multiplier * (1 - policy_training_options.risk_measure.lambda) / policy_training_options.num_scenarios,
)
# λ / ((1 - CVaR_\alpha) * L) * sum(deltas)
JuMP.set_objective_coefficient(
model,
delta_explicit_cvar[scen],
discount_rate_multiplier *
(policy_training_options.risk_measure.lambda) / ((1 - policy_training_options.risk_measure.alpha) * policy_training_options.num_scenarios)
model,
delta_explicit_cvar[scen],
discount_rate_multiplier *
(policy_training_options.risk_measure.lambda) /
((1 - policy_training_options.risk_measure.alpha) * policy_training_options.num_scenarios),
)
# Add delta constraint
JuMP.@constraint(model, delta_explicit_cvar[scen] >= alphas[scen] - z_explicit_cvar)
Expand Down Expand Up @@ -119,4 +121,4 @@ function get_multi_cut_future_cost(model::JuMP.Model)::Float64
end
alphas = model[:epi_multi_cut]::Vector{JuMP.VariableRef}
return mean(JuMP.value.(alphas))
end
end
26 changes: 13 additions & 13 deletions src/cut_strategies/single_cut.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ function number_of_cuts(pool::CutPoolSingleCut)
end

function store_cut!(
pool::CutPoolSingleCut,
coefs::Vector{Float64},
state::Vector{Float64},
rhs::Float64,
obj::Float64
pool::CutPoolSingleCut,
coefs::Vector{Float64},
state::Vector{Float64},
rhs::Float64,
obj::Float64,
)
push!(pool.coefs, coefs)
push!(pool.state, state)
Expand All @@ -33,11 +33,11 @@ function store_cut!(
end

function store_cut!(
pool::Vector{CutPoolSingleCut},
pool::Vector{CutPoolSingleCut},
local_cuts::LocalCutPool,
state::Vector{Float64},
options,
t::Integer
t::Integer,
)
if isa(options.risk_measure, RiskNeutral)
return risk_neutral_single_cut!(pool, local_cuts, state, options, t)
Expand All @@ -49,11 +49,11 @@ function store_cut!(
end

function risk_neutral_single_cut!(
pool::Vector{CutPoolSingleCut},
local_cuts::LocalCutPool,
pool::Vector{CutPoolSingleCut},
local_cuts::LocalCutPool,
state::Vector{Float64},
options,
t::Integer
t::Integer,
)
num_local_cuts = length(local_cuts.obj)
obj = mean(local_cuts.obj)
Expand All @@ -67,11 +67,11 @@ function risk_neutral_single_cut!(
end

function cvar_single_cut!(
pool::Vector{CutPoolSingleCut},
local_cuts::LocalCutPool,
pool::Vector{CutPoolSingleCut},
local_cuts::LocalCutPool,
state::Vector{Float64},
options,
t::Int
t::Int,
)
weights = build_cvar_weights(local_cuts.obj, options.risk_measure.alpha, options.risk_measure.lambda)
obj = dot(weights, local_cuts.obj)
Expand Down
2 changes: 1 addition & 1 deletion src/debugging_options.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
Base.@kwdef mutable struct DebuggingOptions
logs_dir::String = ""
write_lp::Bool = false
end
end
8 changes: 4 additions & 4 deletions src/deterministic_equivalent/deterministic_equivalent.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ function deterministic_equivalent(;
first_stage_builder::Function,
second_stage_builder::Function,
second_stage_modifier::Function,
inputs=nothing,
inputs = nothing,
options::DeterministicEquivalentOptions,
)
num_scenarios = options.num_scenarios
Expand Down Expand Up @@ -61,7 +61,7 @@ function copy_and_replace_variables(
)
return JuMP.GenericAffExpr(
src.constant,
Pair{VariableRef,Float64}[
Pair{VariableRef, Float64}[
src_to_dest_variable[key] => val for (key, val) in src.terms
],
)
Expand All @@ -73,7 +73,7 @@ function copy_and_replace_variables(
)
return JuMP.GenericQuadExpr(
copy_and_replace_variables(src.aff, src_to_dest_variable),
Pair{UnorderedPair{VariableRef},Float64}[
Pair{UnorderedPair{VariableRef}, Float64}[
UnorderedPair{VariableRef}(
src_to_dest_variable[pair.a],
src_to_dest_variable[pair.b],
Expand Down Expand Up @@ -104,7 +104,7 @@ function push_model!(
model::JuMP.Model,
subproblem::JuMP.Model,
scenario::Int,
num_scenarios::Int
num_scenarios::Int,
)
# push variables
src_variables = all_variables_but_state(subproblem)
Expand Down
2 changes: 1 addition & 1 deletion src/optimize_helpers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ function print_conflict_to_file(model::JuMP.Model, filename::String = "infeasibl
write(io, "IIS found\n")
for cref in all_constraints(model, include_variable_in_set_constraints = true)
if MOI.get(model, MOI.ConstraintConflictStatus(), cref) == MOI.IN_CONFLICT
println(io, cref)
println(io, cref)
end
end
end
Expand Down
2 changes: 1 addition & 1 deletion src/progress_logs/abstractions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
Abstract type to hold progress logs for differnt algorithms.
"""
abstract type AbstractProgressLog end
abstract type AbstractProgressLog end
9 changes: 6 additions & 3 deletions src/progress_logs/benders_training_iterations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,20 +55,23 @@ function start_iteration!(progress::BendersTrainingIterationsLog)
push!(progress.LB, 0.0)
push!(progress.UB, 0.0)
progress.current_iteration += 1
return nothing
end

function report_current_bounds(progress::BendersTrainingIterationsLog)
next(progress.progress_table,
next(progress.progress_table,
[
progress.current_iteration,
current_lower_bound(progress),
current_upper_bound(progress),
current_gap(progress),
time() - progress.start_time,
]
],
)
return nothing
end

function finish_training!(progress::BendersTrainingIterationsLog)
finalize(progress.progress_table)
end
return nothing
end
1 change: 1 addition & 0 deletions src/progress_logs/deterministic_equivalent.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ function DeterministicEquivalentLog(num_scenarios::Int)
println(" ")
println("Number of stages: 2")
println("Number of scenarios: ", num_scenarios)
return nothing
end
Loading

0 comments on commit 855512e

Please sign in to comment.