Asset management

This tutorial was generated using Literate.jl. Download the source as a .jl file. Download the source as a .ipynb file.

Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011

using SDDP, HiGHS, Test

function asset_management_simple()
    model = SDDP.PolicyGraph(
        SDDP.MarkovianGraph(
            Array{Float64,2}[
                [1.0]',
                [0.5 0.5],
                [0.5 0.5; 0.5 0.5],
                [0.5 0.5; 0.5 0.5],
            ],
        );
        lower_bound = -1_000.0,
        optimizer = HiGHS.Optimizer,
    ) do subproblem, index
        (stage, markov_state) = index
        r_stock = [1.25, 1.06]
        r_bonds = [1.14, 1.12]
        @variable(subproblem, stocks >= 0, SDDP.State, initial_value = 0.0)
        @variable(subproblem, bonds >= 0, SDDP.State, initial_value = 0.0)
        if stage == 1
            @constraint(subproblem, stocks.out + bonds.out == 55)
            @stageobjective(subproblem, 0)
        elseif 1 < stage < 4
            @constraint(
                subproblem,
                r_stock[markov_state] * stocks.in +
                r_bonds[markov_state] * bonds.in == stocks.out + bonds.out
            )
            @stageobjective(subproblem, 0)
        else
            @variable(subproblem, over >= 0)
            @variable(subproblem, short >= 0)
            @constraint(
                subproblem,
                r_stock[markov_state] * stocks.in +
                r_bonds[markov_state] * bonds.in - over + short == 80
            )
            @stageobjective(subproblem, -over + 4 * short)
        end
    end
    SDDP.train(model; log_frequency = 5)
    @test SDDP.calculate_bound(model) ≈ 1.514 atol = 1e-4
    return
end

asset_management_simple()
-------------------------------------------------------------------
         SDDP.jl (c) Oscar Dowson and contributors, 2017-24
-------------------------------------------------------------------
problem
  nodes           : 7
  state variables : 2
  scenarios       : 8.00000e+00
  existing cuts   : false
options
  solver          : serial mode
  risk measure    : SDDP.Expectation()
  sampling scheme : SDDP.InSampleMonteCarlo
subproblem structure
  VariableRef                             : [5, 7]
  AffExpr in MOI.EqualTo{Float64}         : [1, 1]
  VariableRef in MOI.GreaterThan{Float64} : [3, 5]
  VariableRef in MOI.LessThan{Float64}    : [1, 1]
numerical stability report
  matrix range     [1e+00, 1e+00]
  objective range  [1e+00, 4e+00]
  bounds range     [1e+03, 1e+03]
  rhs range        [6e+01, 8e+01]
-------------------------------------------------------------------
 iteration    simulation      bound        time (s)     solves  pid
-------------------------------------------------------------------
         5  -1.620000e+00 -8.522173e-01  1.301599e-02        87   1
        10  -1.847411e-13  1.392784e+00  1.991010e-02       142   1
        15  -6.963319e-13  1.514085e+00  2.743506e-02       197   1
        20   1.136868e-13  1.514085e+00  3.520012e-02       252   1
        25  -1.080025e-12  1.514085e+00  9.351707e-02       339   1
        30   1.136868e-13  1.514085e+00  1.022439e-01       394   1
        35  -2.479988e+01  1.514085e+00  1.113100e-01       449   1
        40   1.136868e-13  1.514085e+00  1.208129e-01       504   1
-------------------------------------------------------------------
status         : simulation_stopping
total time (s) : 1.208129e-01
total solves   : 504
best bound     :  1.514085e+00
simulation ci  :  3.429060e+00 ± 6.665883e+00
numeric issues : 0
-------------------------------------------------------------------