Skip to content

Add JuMP-like API #281

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 18 additions & 36 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,12 @@ examples, tutorials, and an API reference.

### DiffOpt-JuMP API with `Parameters`

Here is an example with a Parametric **Linear Program**:

```julia
using JuMP, DiffOpt, HiGHS

model = Model(
() -> DiffOpt.diff_optimizer(
HiGHS.Optimizer;
with_parametric_opt_interface = true,
),
)
model = DiffOpt.quadratic_diff_model(HiGHS.Optimizer)
set_silent(model)

p_val = 4.0
Expand All @@ -64,9 +61,9 @@ optimize!(model)

# differentiate w.r.t. p
direction_p = 3.0
MOI.set(model, DiffOpt.ForwardConstraintSet(), ParameterRef(p), Parameter(direction_p))
DiffOpt.set_forward_parameter(model, p, direction_p)
DiffOpt.forward_differentiate!(model)
@show MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) == direction_p * 3 / pc_val
@show DiffOpt.get_forward_variable(model, x) == direction_p * 3 / pc_val

# update p and pc
p_val = 2.0
Expand All @@ -82,45 +79,30 @@ optimize!(model)
DiffOpt.empty_input_sensitivities!(model)
# differentiate w.r.t. pc
direction_pc = 10.0
MOI.set(model, DiffOpt.ForwardConstraintSet(), ParameterRef(pc), Parameter(direction_pc))
DiffOpt.set_forward_parameter(model, pc, direction_pc)
DiffOpt.forward_differentiate!(model)
@show abs(MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) -
@show abs(DiffOpt.get_forward_variable(model, x) -
-direction_pc * 3 * p_val / pc_val^2) < 1e-5

# always a good practice to clear previously set sensitivities
DiffOpt.empty_input_sensitivities!(model)
# Now, reverse model AD
direction_x = 10.0
MOI.set(model, DiffOpt.ReverseVariablePrimal(), x, direction_x)
DiffOpt.set_reverse_variable(model, x, direction_x)
DiffOpt.reverse_differentiate!(model)
@show MOI.get(model, DiffOpt.ReverseConstraintSet(), ParameterRef(p)) == MOI.Parameter(direction_x * 3 / pc_val)
@show abs(MOI.get(model, DiffOpt.ReverseConstraintSet(), ParameterRef(pc)).value -
-direction_x * 3 * p_val / pc_val^2) < 1e-5
@show DiffOpt.get_reverse_parameter(model, p) == direction_x * 3 / pc_val
@show DiffOpt.get_reverse_parameter(model, pc) == -direction_x * 3 * p_val / pc_val^2
```

### Low level DiffOpt-JuMP API:

A brief example:
Available models:
* `DiffOpt.quadratic_diff_model`: Quadratic Programs (QP) and Linear Programs
(LP)
* `DiffOpt.conic_diff_model`: Conic Programs (CP) and Linear Programs (LP)
* `DiffOpt.nonlinear_diff_model`: Nonlinear Programs (NLP), Quadratic Program
(QP) and Linear Programs (LP)
* `DiffOpt.diff_model`: Nonlinear Programs (NLP), Conic Programs (CP),
Quadratic Programs (QP) and Linear Programs (LP)

```julia
using JuMP, DiffOpt, HiGHS
# Create a model using the wrapper
model = Model(() -> DiffOpt.diff_optimizer(HiGHS.Optimizer))
# Define your model and solve it
@variable(model, x)
@constraint(model, cons, x >= 3)
@objective(model, Min, 2x)
optimize!(model)
# Choose the problem parameters to differentiate with respect to, and set their
# perturbations.
MOI.set(model, DiffOpt.ReverseVariablePrimal(), x, 1.0)
# Differentiate the model
DiffOpt.reverse_differentiate!(model)
# fetch the gradients
grad_exp = MOI.get(model, DiffOpt.ReverseConstraintFunction(), cons) # -3 x - 1
constant(grad_exp) # -1
coefficient(grad_exp, x) # -3
```

## Citing DiffOpt.jl

Expand Down
2 changes: 2 additions & 0 deletions src/DiffOpt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ function add_default_factorization(model)
return
end

include("jump_wrapper.jl")

export diff_optimizer

# TODO
Expand Down
141 changes: 141 additions & 0 deletions src/jump_wrapper.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
"""
diff_model(optimizer_constructor; with_parametric_opt_interface::Bool = true, with_bridge_type = Float64, with_cache::Bool = true)

Create a JuMP model with a differentiable optimizer. The optimizer is created
using `optimizer_constructor`. This model will try to select the proper
differentiable optimization method based on the problem structure.

See also: [`nonlinear_diff_model`](@ref), [`conic_diff_model`](@ref), [`quadratic_diff_model`](@ref).
"""
function diff_model(
optimizer_constructor;
with_parametric_opt_interface::Bool = true,
with_bridge_type = Float64,
with_cache::Bool = true,
)
inner = diff_optimizer(
optimizer_constructor;
with_parametric_opt_interface = with_parametric_opt_interface,
with_bridge_type = with_bridge_type,
with_cache = with_cache,
)
return JuMP.direct_model(inner)
end

"""
nonlinear_diff_model(optimizer_constructor; with_bridge_type = Float64, with_cache::Bool = true)

Create a JuMP model with a differentiable optimizer for nonlinear programs.
The optimizer is created using `optimizer_constructor`.

See also: [`conic_diff_model`](@ref), [`quadratic_diff_model`](@ref), [`diff_model`](@ref).
"""
function nonlinear_diff_model(
optimizer_constructor;
with_bridge_type = Float64,
with_cache::Bool = true,
)
inner = diff_optimizer(
optimizer_constructor;
with_parametric_opt_interface = false,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add comment on why this is false

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, I might re rewrite some of the internals to try to ger rid of this

with_bridge_type = with_bridge_type,
with_cache = with_cache,
)
MOI.set(inner, ModelConstructor(), NonLinearProgram.Model)
return JuMP.direct_model(inner)
end

"""
conic_diff_model(optimizer_constructor; with_bridge_type = Float64, with_cache::Bool = true)

Create a JuMP model with a differentiable optimizer for conic programs.
The optimizer is created using `optimizer_constructor`.

See also: [`nonlinear_diff_model`](@ref), [`quadratic_diff_model`](@ref), [`diff_model`](@ref).
"""
function conic_diff_model(

Check warning on line 56 in src/jump_wrapper.jl

View check run for this annotation

Codecov / codecov/patch

src/jump_wrapper.jl#L56

Added line #L56 was not covered by tests
optimizer_constructor;
with_bridge_type = Float64,
with_cache::Bool = true,
)
inner = diff_optimizer(

Check warning on line 61 in src/jump_wrapper.jl

View check run for this annotation

Codecov / codecov/patch

src/jump_wrapper.jl#L61

Added line #L61 was not covered by tests
optimizer_constructor;
with_parametric_opt_interface = true,
with_bridge_type = with_bridge_type,
with_cache = with_cache,
)
MOI.set(inner, ModelConstructor(), ConicProgram.Model)
return JuMP.direct_model(inner)

Check warning on line 68 in src/jump_wrapper.jl

View check run for this annotation

Codecov / codecov/patch

src/jump_wrapper.jl#L67-L68

Added lines #L67 - L68 were not covered by tests
end

"""
quadratic_diff_model(optimizer_constructor; with_bridge_type = Float64, with_cache::Bool = true)

Create a JuMP model with a differentiable optimizer for quadratic programs.
The optimizer is created using `optimizer_constructor`.

See also: [`nonlinear_diff_model`](@ref), [`conic_diff_model`](@ref), [`diff_model`](@ref).
"""
function quadratic_diff_model(
optimizer_constructor;
with_bridge_type = Float64,
with_cache::Bool = true,
)
inner = diff_optimizer(
optimizer_constructor;
with_parametric_opt_interface = true,
with_bridge_type = with_bridge_type,
with_cache = with_cache,
)
MOI.set(inner, ModelConstructor(), QuadraticProgram.Model)
return JuMP.direct_model(inner)
end

"""
set_forward_parameter(model::JuMP.Model, variable::JuMP.VariableRef, value::Number)

Set the value of a parameter input sensitivity for forward mode.
"""
function set_forward_parameter(
model::JuMP.Model,
variable::JuMP.VariableRef,
value::Number,
)
return MOI.set(
model,
ForwardConstraintSet(),
ParameterRef(variable),
Parameter(value),
)
end

"""
get_reverse_parameter(model::JuMP.Model, variable::JuMP.VariableRef)

Get the value of a parameter output sensitivity for reverse mode.
"""
function get_reverse_parameter(model::JuMP.Model, variable::JuMP.VariableRef)
return MOI.get(model, ReverseConstraintSet(), ParameterRef(variable)).value
end

"""
set_reverse_variable(model::JuMP.Model, variable::JuMP.VariableRef, value::Number)

Set the value of a variable input sensitivity for reverse mode.
"""
function set_reverse_variable(
model::JuMP.Model,
variable::JuMP.VariableRef,
value::Number,
)
return MOI.set(model, ReverseVariablePrimal(), variable, value)
end

"""
get_forward_variable(model::JuMP.Model, variable::JuMP.VariableRef)

Get the value of a variable output sensitivity for forward mode.
"""
function get_forward_variable(model::JuMP.Model, variable::JuMP.VariableRef)
return MOI.get(model, ForwardVariablePrimal(), variable)
end
2 changes: 1 addition & 1 deletion src/moi_wrapper.jl
Original file line number Diff line number Diff line change
Expand Up @@ -863,7 +863,7 @@ function MOI.get(model::Optimizer, attr::DifferentiateTimeSec)
end

function MOI.supports(
model::Optimizer,
::Optimizer,
::NonLinearKKTJacobianFactorization,
::Function,
)
Expand Down
124 changes: 124 additions & 0 deletions test/jump_wrapper.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
# Copyright (c) 2020: Akshay Sharma and contributors
#
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.

module TestJuMPWrapper

using Test
using JuMP
import DiffOpt
import HiGHS
import Ipopt
import SCS
import MathOptInterface as MOI

const ATOL = 1e-3
const RTOL = 1e-3

function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$name", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end

function test_jump_api()
for (MODEL, SOLVER) in [
(DiffOpt.diff_model, Ipopt.Optimizer),
(DiffOpt.quadratic_diff_model, HiGHS.Optimizer),
(DiffOpt.quadratic_diff_model, SCS.Optimizer),
(DiffOpt.quadratic_diff_model, Ipopt.Optimizer),
# (DiffOpt.conic_diff_model, HiGHS.Optimizer),
# (DiffOpt.conic_diff_model, SCS.Optimizer), # conicmodel has a issue with sign
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need at least one of the three to be enabled otherwise it's not tested

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup,
But there is a bug.
Conic returns wrong sign in some cases.

# (DiffOpt.conic_diff_model, Ipopt.Optimizer),
# (DiffOpt.nonlinear_diff_model, HiGHS.Optimizer), # SQF ctr not supported?
# (DiffOpt.nonlinear_diff_model, SCS.Optimizer), # returns zero for sensitivity
(DiffOpt.nonlinear_diff_model, Ipopt.Optimizer),
],
ineq in [true, false],
min in [true, false],
flip in [true, false]

@testset "$(MODEL) with: $(SOLVER), $(ineq ? "ineqs" : "eqs"), $(min ? "Min" : "Max"), $(flip ? "geq" : "leq")" begin
model = MODEL(SOLVER)
set_silent(model)

p_val = 4.0
pc_val = 2.0
@variable(model, x)
@variable(model, p in Parameter(p_val))
@variable(model, pc in Parameter(pc_val))
if ineq
if !flip
cons = @constraint(model, pc * x >= 3 * p)
else
cons = @constraint(model, pc * x <= 3 * p)
end
else
@constraint(model, cons, pc * x == 3 * p)
end
sign = flip ? -1 : 1
if min
@objective(model, Min, 2x * sign)
else
@objective(model, Max, -2x * sign)
end
optimize!(model)
@test value(x) ≈ 3 * p_val / pc_val atol = ATOL rtol = RTOL

# the function is
# x(p, pc) = 3p / pc
# hence,
# dx/dp = 3 / pc
# dx/dpc = -3p / pc^2

# First, try forward mode AD

# differentiate w.r.t. p
direction_p = 3.0
DiffOpt.set_forward_parameter(model, p, direction_p)
DiffOpt.forward_differentiate!(model)
@test DiffOpt.get_forward_variable(model, x) ≈
direction_p * 3 / pc_val atol = ATOL rtol = RTOL

# update p and pc
p_val = 2.0
pc_val = 6.0
set_parameter_value(p, p_val)
set_parameter_value(pc, pc_val)
# re-optimize
optimize!(model)
# check solution
@test value(x) ≈ 3 * p_val / pc_val atol = ATOL rtol = RTOL

# stop differentiating with respect to p
DiffOpt.empty_input_sensitivities!(model)
# differentiate w.r.t. pc
direction_pc = 10.0
DiffOpt.set_forward_parameter(model, pc, direction_pc)
DiffOpt.forward_differentiate!(model)
@test DiffOpt.get_forward_variable(model, x) ≈
-direction_pc * 3 * p_val / pc_val^2 atol = ATOL rtol = RTOL

# always a good practice to clear previously set sensitivities
DiffOpt.empty_input_sensitivities!(model)
# Now, reverse model AD
direction_x = 10.0
DiffOpt.set_reverse_variable(model, x, direction_x)
DiffOpt.reverse_differentiate!(model)
@test DiffOpt.get_reverse_parameter(model, p) ≈
direction_x * 3 / pc_val atol = ATOL rtol = RTOL
@test DiffOpt.get_reverse_parameter(model, pc) ≈
-direction_x * 3 * p_val / pc_val^2 atol = ATOL rtol = RTOL
end
end
end

end # module

TestJuMPWrapper.runtests()