Generation expansion

using SDDP
import HiGHS
import Test

function generation_expansion(duality_handler)
    build_cost = 1e4
    use_cost = 4
    num_units = 5
    capacities = ones(num_units)
    demand_vals =
        0.5 * [
            5 5 5 5 5 5 5 5
            4 3 1 3 0 9 8 17
            0 9 4 2 19 19 13 7
            25 11 4 14 4 6 15 12
            6 7 5 3 8 4 17 13
        ]
    # Cost of unmet demand
    penalty = 5e5
    # Discounting rate
    rho = 0.99
    model = SDDP.LinearPolicyGraph(
        stages = 5,
        lower_bound = 0.0,
        optimizer = HiGHS.Optimizer,
    ) do sp, stage
        @variable(
            sp,
            0 <= invested[1:num_units] <= 1,
            SDDP.State,
            Int,
            initial_value = 0
        )
        @variables(sp, begin
            generation >= 0
            unmet >= 0
            demand
        end)

        @constraints(
            sp,
            begin
                # Can't un-invest
                investment[i in 1:num_units], invested[i].out >= invested[i].in
                # Generation capacity
                sum(capacities[i] * invested[i].out for i in 1:num_units) >=
                generation
                # Meet demand or pay a penalty
                unmet >= demand - sum(generation)
                # For fewer iterations order the units to break symmetry, units are identical (tougher numerically)
                [j in 1:(num_units-1)], invested[j].out <= invested[j+1].out
            end
        )
        # Demand is uncertain
        SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, demand_vals[stage, :])

        @expression(
            sp,
            investment_cost,
            build_cost *
            sum(invested[i].out - invested[i].in for i in 1:num_units)
        )
        @stageobjective(
            sp,
            (investment_cost + generation * use_cost) * rho^(stage - 1) +
            penalty * unmet
        )
    end
    if get(ARGS, 1, "") == "--write"
        # Run `$ julia generation_expansion.jl --write` to update the benchmark
        # model directory
        model_dir = joinpath(@__DIR__, "..", "..", "..", "benchmarks", "models")
        SDDP.write_to_file(
            model,
            joinpath(model_dir, "generation_expansion.sof.json.gz");
            test_scenarios = 100,
        )
        exit(0)
    end
    SDDP.train(
        model,
        iteration_limit = 50,
        log_frequency = 10,
        duality_handler = duality_handler,
    )
    Test.@test SDDP.calculate_bound(model) ≈ 2.078860e6 atol = 1e3
    return
end

generation_expansion(SDDP.ContinuousConicDuality())
generation_expansion(SDDP.LagrangianDuality())
------------------------------------------------------------------------------
          SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23

Problem
  Nodes           : 5
  State variables : 5
  Scenarios       : 3.27680e+04
  Existing cuts   : false
  Subproblem structure                      : (min, max)
    Variables                               : (14, 14)
    VariableRef in MOI.LessThan{Float64}    : (5, 6)
    AffExpr in MOI.LessThan{Float64}        : (4, 4)
    AffExpr in MOI.GreaterThan{Float64}     : (7, 7)
    VariableRef in MOI.GreaterThan{Float64} : (8, 8)
    VariableRef in MOI.Integer              : (5, 5)
Options
  Solver          : serial mode
  Risk measure    : SDDP.Expectation()
  Sampling scheme : SDDP.InSampleMonteCarlo

Numerical stability report
  Non-zero Matrix range     [1e+00, 1e+00]
  Non-zero Objective range  [1e+00, 5e+05]
  Non-zero Bounds range     [1e+00, 1e+00]
  Non-zero RHS range        [0e+00, 0e+00]
No problems detected

 Iteration    Simulation       Bound         Time (s)    Proc. ID   # Solves
       10    1.299460e+06   2.074407e+06   3.225350e-01          1        420
       20    5.494569e+05   2.078257e+06   6.449771e-01          1        840
       30    3.799664e+06   2.078257e+06   9.656630e-01          1       1260
       40    3.003323e+04   2.078257e+06   1.291656e+00          1       1680
       50    1.299670e+06   2.078257e+06   1.623750e+00          1       2100

Terminating training
  Status         : iteration_limit
  Total time (s) : 1.623750e+00
  Total solves   : 2100
  Best bound     :  2.078257e+06
  Simulation CI  :  1.637329e+06 ± 4.278137e+05
------------------------------------------------------------------------------
------------------------------------------------------------------------------
          SDDP.jl (c) Oscar Dowson and SDDP.jl contributors, 2017-23

Problem
  Nodes           : 5
  State variables : 5
  Scenarios       : 3.27680e+04
  Existing cuts   : false
  Subproblem structure                      : (min, max)
    Variables                               : (14, 14)
    VariableRef in MOI.LessThan{Float64}    : (5, 6)
    AffExpr in MOI.LessThan{Float64}        : (4, 4)
    AffExpr in MOI.GreaterThan{Float64}     : (7, 7)
    VariableRef in MOI.GreaterThan{Float64} : (8, 8)
    VariableRef in MOI.Integer              : (5, 5)
Options
  Solver          : serial mode
  Risk measure    : SDDP.Expectation()
  Sampling scheme : SDDP.InSampleMonteCarlo

Numerical stability report
  Non-zero Matrix range     [1e+00, 1e+00]
  Non-zero Objective range  [1e+00, 5e+05]
  Non-zero Bounds range     [1e+00, 1e+00]
  Non-zero RHS range        [0e+00, 0e+00]
No problems detected

 Iteration    Simulation       Bound         Time (s)    Proc. ID   # Solves
       10L   6.049668e+06   2.079056e+06   9.531000e-01          1        420
       20L   3.964699e+04   2.079373e+06   2.084185e+00          1        840
       30L   2.299861e+06   2.079457e+06   3.362212e+00          1       1260
       40L   2.549666e+06   2.079457e+06   4.698038e+00          1       1680
       50L   3.799867e+06   2.079457e+06   5.960328e+00          1       2100

Terminating training
  Status         : iteration_limit
  Total time (s) : 5.960328e+00
  Total solves   : 2100
  Best bound     :  2.079457e+06
  Simulation CI  :  2.279134e+06 ± 5.020451e+05
------------------------------------------------------------------------------

This page was generated using Literate.jl.