StructDualDynProg: Problem 5.2, 2 stages

This example comes from StochasticDualDynamicProgramming.jl

using SDDP, HiGHS, Test

function test_prob52_2stages()
    model = SDDP.LinearPolicyGraph(
        stages = 2,
        lower_bound = 0.0,
        optimizer = HiGHS.Optimizer,
        direct_mode = true,
    ) do subproblem, stage
        # ========== Problem data ==========
        n = 4
        m = 3
        ic = [16, 5, 32, 2]
        C = [25, 80, 6.5, 160]
        T = [8760, 7000, 1500] / 8760
        D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]
        p2 = [0.9, 0.1]
        # ========== State Variables ==========
        @variable(subproblem, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)
        # ========== Variables ==========
        @variables(subproblem, begin
            y[1:n, 1:m] >= 0
            v[1:n] >= 0
            penalty >= 0
            rhs_noise[1:m]  # Dummy variable for RHS noise term.
        end)
        # ========== Constraints ==========
        @constraints(
            subproblem,
            begin
                [i = 1:n], x[i].out == x[i].in + v[i]
                [i = 1:n], sum(y[i, :]) <= x[i].in
                [j = 1:m], sum(y[:, j]) + penalty >= rhs_noise[j]
            end
        )
        if stage == 2
            # No investment in last stage.
            @constraint(subproblem, sum(v) == 0)
        end
        # ========== Uncertainty ==========
        if stage != 1 # no uncertainty in first stage
            SDDP.parameterize(subproblem, 1:size(D2, 2), p2) do ω
                for j in 1:m
                    JuMP.fix(rhs_noise[j], D2[j, ω])
                end
            end
        end
        # ========== Stage objective ==========
        @stageobjective(subproblem, ic' * v + C' * y * T + 1e6 * penalty)
        return
    end
    SDDP.train(model, iteration_limit = 50, log_frequency = 10)
    @test SDDP.calculate_bound(model) ≈ 340315.52 atol = 0.1
    return
end

test_prob52_2stages()
------------------------------------------------------------------------------
          SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23

Problem
  Nodes           : 2
  State variables : 4
  Scenarios       : 2.00000e+00
  Existing cuts   : false
  Subproblem structure                      : (min, max)
    Variables                               : (29, 29)
    VariableRef in MOI.LessThan{Float64}    : (1, 1)
    AffExpr in MOI.LessThan{Float64}        : (4, 4)
    VariableRef in MOI.GreaterThan{Float64} : (22, 22)
    AffExpr in MOI.EqualTo{Float64}         : (4, 5)
    AffExpr in MOI.GreaterThan{Float64}     : (3, 3)
Options
  Solver          : serial mode
  Risk measure    : SDDP.Expectation()
  Sampling scheme : SDDP.InSampleMonteCarlo

Numerical stability report
  Non-zero Matrix range     [1e+00, 1e+00]
  Non-zero Objective range  [1e+00, 1e+06]
  Non-zero Bounds range     [0e+00, 0e+00]
  Non-zero RHS range        [0e+00, 0e+00]
No problems detected

 Iteration    Simulation       Bound         Time (s)    Proc. ID   # Solves
       10    3.455904e+05   3.147347e+05   6.511927e-03          1         50
       20    3.336455e+05   3.402383e+05   1.321077e-02          1        100
       30    3.337559e+05   3.403155e+05   2.067399e-02          1        150
       40    3.337559e+05   3.403155e+05   2.826381e-02          1        200
       50    3.337559e+05   3.403155e+05   3.648996e-02          1        250

Terminating training
  Status         : iteration_limit
  Total time (s) : 3.648996e-02
  Total solves   : 250
  Best bound     :  3.403155e+05
  Simulation CI  :  1.246633e+08 ± 1.715174e+08
------------------------------------------------------------------------------

This page was generated using Literate.jl.