diff --git a/Project.toml b/Project.toml index 66606dd..85fc7f8 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "LightBenders" uuid = "2e9fe063-9687-4d41-bf41-4f062739391f" authors = ["guilhermebodin "] -version = "0.1.0" +version = "0.1.1" [deps] EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" diff --git a/src/LightBenders.jl b/src/LightBenders.jl index 0ad4fe2..7e59da3 100644 --- a/src/LightBenders.jl +++ b/src/LightBenders.jl @@ -1,16 +1,12 @@ module LightBenders -# Standard library dependencies -using JobQueueMPI -using LinearAlgebra -using Statistics - -# Third party dependencies using EnumX +using JobQueueMPI using JuMP +using LinearAlgebra using ProgressTables +using Statistics -# module constants const JQM = JobQueueMPI # Keys aspects of the algorithm diff --git a/src/optimize_helpers.jl b/src/optimize_helpers.jl index b86b167..348b4a2 100644 --- a/src/optimize_helpers.jl +++ b/src/optimize_helpers.jl @@ -43,9 +43,16 @@ function treat_termination_status(model::JuMP.Model, options::PolicyTrainingOpti return nothing end +function treat_termination_status(model::JuMP.Model, options::SimulationOptions) + file_name = "model_first_stage" + info_msg = "Simulation model (first stage), finished with termination status: $(termination_status(model))" + treat_termination_status(model, info_msg, file_name, options.debugging_options) + return nothing +end + function treat_termination_status(model::JuMP.Model, options::SimulationOptions, t::Int, s::Int) file_name = "model_stage_$(t)_scenario_$(s)" - info_msg = "Simulation model of stage $t, scenario $s finished with termination status: $(termination_status(model))" + info_msg = "Simulation model stage $t, scenario $s finished with termination status: $(termination_status(model))" treat_termination_status(model, info_msg, file_name, options.debugging_options) return nothing end diff --git a/src/simulation_strategies/benders_serial.jl b/src/simulation_strategies/benders_serial.jl index 7c30053..6fde523 100644 --- a/src/simulation_strategies/benders_serial.jl +++ b/src/simulation_strategies/benders_serial.jl @@ -7,44 +7,58 @@ function serial_benders_simulate(; policy::Policy, simulation_options::SimulationOptions, ) - stages = 2 scenarios = simulation_options.num_scenarios simulation_total_cost = 0.0 - state = Float64[] - results = Dict{Tuple{String, Int}, Any}() # (variable_name, scenario) => value - for t in 1:stages - if t == 1 # first stage - state_variables_model = state_variables_builder(inputs) - model = first_stage_builder(state_variables_model, inputs) - add_all_cuts!(model, policy.pool[t], policy.policy_training_options) - elseif t == 2 # second stage - state_variables_model = state_variables_builder(inputs) - model = second_stage_builder(state_variables_model, inputs) - set_state(model, state) - end - for s in 1:scenarios - if t == 2 - second_stage_modifier(model, inputs, s) - end - store_retry_data(model, simulation_options) - optimize_with_retry(model) - treat_termination_status(model, simulation_options, t, s) - future_cost = get_future_cost(model, policy.policy_training_options) - simulation_total_cost += (JuMP.objective_value(model) - future_cost) / scenarios - save_benders_results!(results, model, t, s, scenarios) - if simulation_options.state_handling == SimulationStateHandling.StatesRecalculatedInSimulation - state = get_state(model) - elseif simulation_options.state_handling == SimulationStateHandling.StatesFixedInPolicyResult - state = policy.states - else - error("State handling not implemented.") - end - end + # first stage + println("Simulating first stage...") + + state_variables_model = state_variables_builder(inputs) + model = first_stage_builder(state_variables_model, inputs) + add_all_cuts!(model, policy.pool[1], policy.policy_training_options) + + store_retry_data(model, simulation_options) + optimize_with_retry(model) + treat_termination_status(model, simulation_options) + + for s in 1:scenarios + future_cost = get_future_cost(model, policy.policy_training_options) + simulation_total_cost += (JuMP.objective_value(model) - future_cost) / scenarios + save_benders_results!(results, model, 1, s, scenarios) end + + # second stage + println("Simulating second stage...") + + state = if simulation_options.state_handling == SimulationStateHandling.StatesRecalculatedInSimulation + get_state(model) + elseif simulation_options.state_handling == SimulationStateHandling.StatesFixedInPolicyResult + policy.states + else + error("State handling not implemented.") + end + + state_variables_model = state_variables_builder(inputs) + model = second_stage_builder(state_variables_model, inputs) + set_state(model, state) + + for s in 1:scenarios + second_stage_modifier(model, inputs, s) + + store_retry_data(model, simulation_options) + optimize_with_retry(model) + treat_termination_status(model, simulation_options, 2, s) + + future_cost = get_future_cost(model, policy.policy_training_options) + simulation_total_cost += (JuMP.objective_value(model) - future_cost) / scenarios + + save_benders_results!(results, model, 2, s, scenarios) + end + results["objective", 0] = simulation_total_cost + return results end