Biobjective hydro-thermal
using SDDP, HiGHS, Statistics, Test
function biobjective_example()
model = SDDP.LinearPolicyGraph(
stages = 3,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do subproblem, _
@variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50)
@variables(subproblem, begin
0 <= g[i = 1:2] <= 100
0 <= u <= 150
s >= 0
shortage_cost >= 0
end)
@expressions(subproblem, begin
objective_1, g[1] + 10 * g[2]
objective_2, shortage_cost
end)
@constraints(subproblem, begin
inflow_constraint, v.out == v.in - u - s
g[1] + g[2] + u == 150
shortage_cost >= 40 - v.out
shortage_cost >= 60 - 2 * v.out
shortage_cost >= 80 - 4 * v.out
end)
# You must call this for a biobjective problem!
SDDP.initialize_biobjective_subproblem(subproblem)
SDDP.parameterize(subproblem, 0.0:5:50.0) do ω
JuMP.set_normalized_rhs(inflow_constraint, ω)
# You must call `set_biobjective_functions` from within
# `SDDP.parameterize`.
return SDDP.set_biobjective_functions(
subproblem,
objective_1,
objective_2,
)
end
end
pareto_weights =
SDDP.train_biobjective(model, solution_limit = 10, iteration_limit = 10)
solutions = [(k, v) for (k, v) in pareto_weights]
sort!(solutions; by = x -> x[1])
@test length(solutions) == 10
# Test for convexity! The gradient must be decreasing as we move from left
# to right.
gradient(a, b) = (b[2] - a[2]) / (b[1] - a[1])
grad = Inf
for i in 1:9
new_grad = gradient(solutions[i], solutions[i+1])
@test new_grad < grad
grad = new_grad
end
return
end
biobjective_example()------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : false
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 5)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 0.000000e+00 0.000000e+00 8.445024e-03 1 36
2 0.000000e+00 0.000000e+00 1.066399e-02 1 72
3 0.000000e+00 0.000000e+00 1.285410e-02 1 108
4 0.000000e+00 0.000000e+00 1.498318e-02 1 144
5 0.000000e+00 0.000000e+00 1.710105e-02 1 180
6 0.000000e+00 0.000000e+00 1.926017e-02 1 216
7 0.000000e+00 0.000000e+00 2.138805e-02 1 252
8 0.000000e+00 0.000000e+00 2.354407e-02 1 288
9 0.000000e+00 0.000000e+00 2.571416e-02 1 324
10 0.000000e+00 0.000000e+00 2.798700e-02 1 360
Terminating training
Status : iteration_limit
Total time (s) : 2.798700e-02
Total solves : 360
Best bound : 0.000000e+00
Simulation CI : 0.000000e+00 ± 0.000000e+00
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 7)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 6.750000e+02 5.500000e+02 2.908945e-03 1 407
2 7.000000e+02 5.677498e+02 5.672932e-03 1 443
3 3.450000e+02 5.692712e+02 8.913994e-03 1 479
4 4.500000e+02 5.711983e+02 1.211691e-02 1 515
5 2.800000e+02 5.717055e+02 1.518893e-02 1 551
6 3.500000e+02 5.717055e+02 1.876497e-02 1 587
7 4.000000e+02 5.718745e+02 2.233505e-02 1 623
8 5.500000e+02 5.725169e+02 2.577901e-02 1 659
9 8.000000e+02 5.732607e+02 2.907991e-02 1 695
10 4.500000e+02 5.733959e+02 3.244495e-02 1 731
Terminating training
Status : iteration_limit
Total time (s) : 3.244495e-02
Total solves : 731
Best bound : 5.733959e+02
Simulation CI : 5.000000e+02 ± 1.079583e+02
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 14)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 4.850000e+02 3.349793e+02 3.315926e-03 1 778
2 5.600000e+02 3.392618e+02 6.495953e-03 1 814
3 1.525000e+02 3.433337e+02 1.000404e-02 1 850
4 3.817599e+02 3.457611e+02 1.356411e-02 1 886
5 3.650658e+02 3.460565e+02 1.730204e-02 1 922
6 3.837500e+02 3.463392e+02 2.089000e-02 1 958
7 2.500000e+02 3.465082e+02 2.470994e-02 1 994
8 5.571447e+02 3.465799e+02 2.794194e-02 1 1030
9 4.580882e+02 3.466980e+02 3.162313e-02 1 1066
10 3.550000e+02 3.468286e+02 3.514695e-02 1 1102
Terminating training
Status : iteration_limit
Total time (s) : 3.514695e-02
Total solves : 1102
Best bound : 3.468286e+02
Simulation CI : 3.948309e+02 ± 7.954180e+01
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 19)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 1.887500e+02 1.995243e+02 3.044844e-03 1 1149
2 1.753930e+02 1.999460e+02 6.107807e-03 1 1185
3 1.035576e+02 2.002992e+02 8.944988e-03 1 1221
4 1.575000e+02 2.036148e+02 1.232791e-02 1 1257
5 1.562500e+02 2.051277e+02 1.532292e-02 1 1293
6 1.962500e+02 2.052517e+02 1.878500e-02 1 1329
7 2.987500e+02 2.052517e+02 2.222681e-02 1 1365
8 2.212500e+02 2.052855e+02 2.577400e-02 1 1401
9 2.462500e+02 2.052855e+02 2.925181e-02 1 1437
10 2.962500e+02 2.052855e+02 3.276491e-02 1 1473
Terminating training
Status : iteration_limit
Total time (s) : 3.276491e-02
Total solves : 1473
Best bound : 2.052855e+02
Simulation CI : 2.040201e+02 ± 3.876873e+01
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 25)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 3.737500e+02 4.626061e+02 3.706932e-03 1 1520
2 3.333333e+02 4.628015e+02 7.384062e-03 1 1556
3 7.150000e+02 4.650660e+02 1.101494e-02 1 1592
4 2.950000e+02 4.652274e+02 1.453805e-02 1 1628
5 4.425000e+02 4.652614e+02 1.860595e-02 1 1664
6 2.950000e+02 4.652614e+02 2.276492e-02 1 1700
7 4.850000e+02 4.654624e+02 2.687597e-02 1 1736
8 2.387500e+02 4.656172e+02 3.072715e-02 1 1772
9 4.840423e+02 4.656602e+02 3.454804e-02 1 1808
10 2.450000e+02 4.658509e+02 3.839207e-02 1 1844
Terminating training
Status : iteration_limit
Total time (s) : 3.839207e-02
Total solves : 1844
Best bound : 4.658509e+02
Simulation CI : 3.907376e+02 ± 9.045105e+01
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 33)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 1.675000e+02 1.129545e+02 3.047943e-03 1 1891
2 1.070005e+02 1.129545e+02 6.089926e-03 1 1927
3 1.112500e+02 1.129545e+02 9.433985e-03 1 1963
4 1.237500e+02 1.129545e+02 1.276588e-02 1 1999
5 1.175000e+02 1.129715e+02 1.600385e-02 1 2035
6 9.875000e+01 1.129771e+02 1.955700e-02 1 2071
7 1.050000e+02 1.129771e+02 2.296996e-02 1 2107
8 9.250000e+01 1.129771e+02 2.631092e-02 1 2143
9 1.168750e+02 1.129771e+02 2.969599e-02 1 2179
10 1.362500e+02 1.129771e+02 3.311896e-02 1 2215
Terminating training
Status : iteration_limit
Total time (s) : 3.311896e-02
Total solves : 2215
Best bound : 1.129771e+02
Simulation CI : 1.176375e+02 ± 1.334615e+01
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 36)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 2.562500e+02 2.788373e+02 3.612041e-03 1 2262
2 3.437500e+02 2.788373e+02 7.180929e-03 1 2298
3 2.375000e+02 2.791872e+02 1.091194e-02 1 2334
4 2.375000e+02 2.791872e+02 1.447487e-02 1 2370
5 2.500000e+02 2.794614e+02 1.811886e-02 1 2406
6 2.312500e+02 2.794614e+02 2.194905e-02 1 2442
7 2.500000e+02 2.794614e+02 2.583289e-02 1 2478
8 1.562500e+02 2.794614e+02 2.972293e-02 1 2514
9 1.750000e+02 2.795671e+02 3.347397e-02 1 2550
10 2.375000e+02 2.795671e+02 3.748989e-02 1 2586
Terminating training
Status : iteration_limit
Total time (s) : 3.748989e-02
Total solves : 2586
Best bound : 2.795671e+02
Simulation CI : 2.375000e+02 ± 3.099032e+01
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 41)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 3.812500e+02 4.072952e+02 4.050970e-03 1 2633
2 3.593750e+02 4.073680e+02 7.680893e-03 1 2669
3 6.749006e+02 4.074882e+02 1.161289e-02 1 2705
4 4.825000e+02 4.075387e+02 1.574588e-02 1 2741
5 5.506250e+02 4.077605e+02 1.963401e-02 1 2777
6 1.825000e+02 4.079711e+02 2.373290e-02 1 2813
7 1.918750e+02 4.079711e+02 2.793193e-02 1 2849
8 5.212500e+02 4.080005e+02 3.184390e-02 1 2885
9 3.091729e+02 4.080290e+02 3.581691e-02 1 2921
10 5.818750e+02 4.080500e+02 4.010391e-02 1 2957
Terminating training
Status : iteration_limit
Total time (s) : 4.010391e-02
Total solves : 2957
Best bound : 4.080500e+02
Simulation CI : 4.235323e+02 ± 1.029245e+02
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 47)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 8.525000e+02 5.197742e+02 3.970861e-03 1 3004
2 4.125000e+02 5.207890e+02 7.678986e-03 1 3040
3 6.325000e+02 5.208831e+02 1.205993e-02 1 3076
4 5.887500e+02 5.208831e+02 1.667690e-02 1 3112
5 6.812500e+02 5.209321e+02 2.050900e-02 1 3148
6 3.287500e+02 5.209912e+02 2.459598e-02 1 3184
7 7.237500e+02 5.209912e+02 2.858305e-02 1 3220
8 2.375000e+02 5.211301e+02 3.271508e-02 1 3256
9 3.612500e+02 5.211567e+02 3.721595e-02 1 3292
10 4.493750e+02 5.211793e+02 4.177594e-02 1 3328
Terminating training
Status : iteration_limit
Total time (s) : 4.177594e-02
Total solves : 3328
Best bound : 5.211793e+02
Simulation CI : 5.268125e+02 ± 1.227709e+02
------------------------------------------------------------------------------
------------------------------------------------------------------------------
SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23
Problem
Nodes : 3
State variables : 1
Scenarios : 1.33100e+03
Existing cuts : true
Subproblem structure : (min, max)
Variables : (9, 9)
VariableRef in MOI.EqualTo{Float64} : (1, 1)
VariableRef in MOI.LessThan{Float64} : (5, 6)
AffExpr in MOI.GreaterThan{Float64} : (3, 53)
AffExpr in MOI.EqualTo{Float64} : (2, 4)
VariableRef in MOI.GreaterThan{Float64} : (8, 8)
Options
Solver : serial mode
Risk measure : SDDP.Expectation()
Sampling scheme : SDDP.InSampleMonteCarlo
Iteration Simulation Bound Time (s) Proc. ID # Solves
1 3.437500e+01 5.937500e+01 3.466129e-03 1 3375
2 6.875000e+01 5.937500e+01 6.617069e-03 1 3411
3 7.500000e+01 5.937500e+01 9.704113e-03 1 3447
4 1.062500e+02 5.938345e+01 1.303411e-02 1 3483
5 4.687500e+01 5.938557e+01 1.667905e-02 1 3519
6 5.625000e+01 5.938557e+01 2.030611e-02 1 3555
7 7.187500e+01 5.938557e+01 2.369809e-02 1 3591
8 5.000000e+01 5.938557e+01 2.708507e-02 1 3627
9 4.375000e+01 5.938557e+01 3.059316e-02 1 3663
10 3.750000e+01 5.938557e+01 3.414607e-02 1 3699
Terminating training
Status : iteration_limit
Total time (s) : 3.414607e-02
Total solves : 3699
Best bound : 5.938557e+01
Simulation CI : 5.906250e+01 ± 1.352595e+01
------------------------------------------------------------------------------This page was generated using Literate.jl.