SLDP: example 1

This tutorial was generated using Literate.jl. Download the source as a .jl file. Download the source as a .ipynb file.

This example is derived from Section 4.2 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF

using SDDP, HiGHS, Test

function sldp_example_one()
    model = SDDP.LinearPolicyGraph(;
        stages = 8,
        lower_bound = 0.0,
        optimizer = HiGHS.Optimizer,
    ) do sp, t
        @variable(sp, x, SDDP.State, initial_value = 2.0)
        @variables(sp, begin
            x⁺ >= 0
            x⁻ >= 0
            0 <= u <= 1, Bin
            ω
        end)
        @stageobjective(sp, 0.9^(t - 1) * (x⁺ + x⁻))
        @constraints(sp, begin
            x.out == x.in + 2 * u - 1 + ω
            x⁺ >= x.out
            x⁻ >= -x.out
        end)
        points = [
            -0.3089653673606697,
            -0.2718277412744214,
            -0.09611178608243474,
            0.24645863921577763,
            0.5204224537256875,
        ]
        return SDDP.parameterize(φ -> JuMP.fix(ω, φ), sp, [points; -points])
    end
    SDDP.train(model; log_frequency = 10)
    @test SDDP.calculate_bound(model) <= 1.1675
    return
end

sldp_example_one()
-------------------------------------------------------------------
         SDDP.jl (c) Oscar Dowson and contributors, 2017-24
-------------------------------------------------------------------
problem
  nodes           : 8
  state variables : 1
  scenarios       : 1.00000e+08
  existing cuts   : false
options
  solver          : serial mode
  risk measure    : SDDP.Expectation()
  sampling scheme : SDDP.InSampleMonteCarlo
subproblem structure
  VariableRef                             : [7, 7]
  AffExpr in MOI.EqualTo{Float64}         : [1, 1]
  AffExpr in MOI.GreaterThan{Float64}     : [2, 2]
  VariableRef in MOI.GreaterThan{Float64} : [4, 4]
  VariableRef in MOI.LessThan{Float64}    : [1, 2]
  VariableRef in MOI.ZeroOne              : [1, 1]
numerical stability report
  matrix range     [1e+00, 2e+00]
  objective range  [5e-01, 1e+00]
  bounds range     [1e+00, 1e+00]
  rhs range        [1e+00, 1e+00]
-------------------------------------------------------------------
 iteration    simulation      bound        time (s)     solves  pid
-------------------------------------------------------------------
        10   4.150176e+00  1.164925e+00  3.860250e-01      1680   1
        20   3.176779e+00  1.167188e+00  4.810481e-01      2560   1
        30   3.621429e+00  1.167299e+00  8.818910e-01      4240   1
        40   3.437090e+00  1.167299e+00  9.780531e-01      5120   1
        50   3.238210e+00  1.167299e+00  1.378051e+00      6800   1
        60   3.224280e+00  1.167299e+00  1.479348e+00      7680   1
        65   3.839342e+00  1.167299e+00  1.531019e+00      8120   1
-------------------------------------------------------------------
status         : simulation_stopping
total time (s) : 1.531019e+00
total solves   : 8120
best bound     :  1.167299e+00
simulation ci  :  3.297366e+00 ± 1.174014e-01
numeric issues : 0
-------------------------------------------------------------------