Asset management

This tutorial was generated using Literate.jl. Download the source as a .jl file. Download the source as a .ipynb file.

Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011

using SDDP, HiGHS, Test

function asset_management_simple()
    model = SDDP.PolicyGraph(
        SDDP.MarkovianGraph(
            Array{Float64,2}[
                [1.0]',
                [0.5 0.5],
                [0.5 0.5; 0.5 0.5],
                [0.5 0.5; 0.5 0.5],
            ],
        );
        lower_bound = -1_000.0,
        optimizer = HiGHS.Optimizer,
    ) do subproblem, index
        (stage, markov_state) = index
        r_stock = [1.25, 1.06]
        r_bonds = [1.14, 1.12]
        @variable(subproblem, stocks >= 0, SDDP.State, initial_value = 0.0)
        @variable(subproblem, bonds >= 0, SDDP.State, initial_value = 0.0)
        if stage == 1
            @constraint(subproblem, stocks.out + bonds.out == 55)
            @stageobjective(subproblem, 0)
        elseif 1 < stage < 4
            @constraint(
                subproblem,
                r_stock[markov_state] * stocks.in +
                r_bonds[markov_state] * bonds.in == stocks.out + bonds.out
            )
            @stageobjective(subproblem, 0)
        else
            @variable(subproblem, over >= 0)
            @variable(subproblem, short >= 0)
            @constraint(
                subproblem,
                r_stock[markov_state] * stocks.in +
                r_bonds[markov_state] * bonds.in - over + short == 80
            )
            @stageobjective(subproblem, -over + 4 * short)
        end
    end
    SDDP.train(model; log_frequency = 5)
    @test SDDP.calculate_bound(model) ≈ 1.514 atol = 1e-4
    return
end

asset_management_simple()
-------------------------------------------------------------------
         SDDP.jl (c) Oscar Dowson and contributors, 2017-24
-------------------------------------------------------------------
problem
  nodes           : 7
  state variables : 2
  scenarios       : 8.00000e+00
  existing cuts   : false
options
  solver          : serial mode
  risk measure    : SDDP.Expectation()
  sampling scheme : SDDP.InSampleMonteCarlo
subproblem structure
  VariableRef                             : [5, 7]
  AffExpr in MOI.EqualTo{Float64}         : [1, 1]
  VariableRef in MOI.GreaterThan{Float64} : [3, 5]
  VariableRef in MOI.LessThan{Float64}    : [1, 1]
numerical stability report
  matrix range     [1e+00, 1e+00]
  objective range  [1e+00, 4e+00]
  bounds range     [1e+03, 1e+03]
  rhs range        [6e+01, 8e+01]
-------------------------------------------------------------------
 iteration    simulation      bound        time (s)     solves  pid
-------------------------------------------------------------------
         5  -5.684342e-14  1.184830e+00  1.273203e-02        87   1
        10   5.012507e+01  1.508277e+00  1.931286e-02       142   1
        15  -1.428571e+00  1.514085e+00  2.632284e-02       197   1
        20   7.105427e-14  1.514085e+00  3.380990e-02       252   1
        25  -3.979039e-13  1.514085e+00  8.704400e-02       339   1
        30  -1.428571e+00  1.514085e+00  9.537506e-02       394   1
        35  -1.428571e+00  1.514085e+00  1.289730e-01       449   1
        40   0.000000e+00  1.514085e+00  1.384881e-01       504   1
-------------------------------------------------------------------
status         : simulation_stopping
total time (s) : 1.384881e-01
total solves   : 504
best bound     :  1.514085e+00
simulation ci  :  2.863132e+00 ± 6.778637e+00
numeric issues : 0
-------------------------------------------------------------------