Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion src/controller/construct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,12 @@ function setconstraint!(
JuMP.delete(optim, optim[:linconstraint])
JuMP.unregister(optim, :linconstraint)
@constraint(optim, linconstraint, A*Z̃var .≤ b)
set_nonlincon!(mpc, model, transcription, optim)
if JuMP.solver_name(optim) ≠ "Ipopt"
set_nonlincon!(mpc, model, transcription, optim)
else
g_oracle, geq_oracle = get_nonlinops(mpc, optim)
set_nonlincon_exp!(mpc, transcription, g_oracle, geq_oracle)
end
else
i_b, i_g = init_matconstraint_mpc(
model, transcription, nc,
Expand All @@ -453,6 +458,12 @@ function setconstraint!(
return mpc
end

"By default, no nonlinear operators, return 4 nothing"
get_nonlinops(::PredictiveController, _ ) = (nothing, nothing, nothing, nothing)

"By default, no nonlinear constraints, return nothing."
set_nonlincon_exp!(::PredictiveController, ::TranscriptionMethod, _ , _) = nothing

"""
default_Hp(model::LinModel)

Expand Down
229 changes: 201 additions & 28 deletions src/controller/nonlinmpc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,9 @@ end

Init the nonlinear optimization for [`NonLinMPC`](@ref) controllers.
"""
function init_optimization!(mpc::NonLinMPC, model::SimModel, optim::JuMP.GenericModel)
function init_optimization!(
mpc::NonLinMPC, model::SimModel, optim::JuMP.GenericModel{JNT}
) where JNT<:Real
# --- variables and linear constraints ---
con, transcription = mpc.con, mpc.transcription
nZ̃ = length(mpc.Z̃)
Expand All @@ -538,27 +540,37 @@ function init_optimization!(mpc::NonLinMPC, model::SimModel, optim::JuMP.Generic
JuMP.set_attribute(optim, "nlp_scaling_max_gradient", 10.0/C)
end
end
Jfunc, ∇Jfunc!, gfuncs, ∇gfuncs!, geqfuncs, ∇geqfuncs! = get_optim_functions(
mpc, optim
)
@operator(optim, J, nZ̃, Jfunc, ∇Jfunc!)
@objective(optim, Min, J(Z̃var...))
init_nonlincon!(mpc, model, transcription, gfuncs, ∇gfuncs!, geqfuncs, ∇geqfuncs!)
set_nonlincon!(mpc, model, transcription, optim)
if JuMP.solver_name(optim) ≠ "Ipopt"
# everything with the splatting syntax:
J_func, ∇J_func!, g_funcs, ∇g_funcs!, geq_funcs, ∇geq_funcs! = get_optim_functions(
mpc, optim
)
else
# constraints with vector nonlinear oracle, objective function with splatting:
g_oracle, geq_oracle, J_func, ∇J_func! = get_nonlinops(mpc, optim)
end
@operator(optim, J_op, nZ̃, J_func, ∇J_func!)
@objective(optim, Min, J_op(Z̃var...))
if JuMP.solver_name(optim) ≠ "Ipopt"
init_nonlincon!(mpc, model, transcription, g_funcs, ∇g_funcs!, geq_funcs, ∇geq_funcs!)
set_nonlincon!(mpc, model, transcription, optim)
else
set_nonlincon_exp!(mpc, transcription, g_oracle, geq_oracle)
end
return nothing
end

"""
get_optim_functions(
mpc::NonLinMPC, optim::JuMP.GenericModel
) -> Jfunc, ∇Jfunc!, gfuncs, ∇gfuncs!, geqfuncs, ∇geqfuncs!
) -> J_func, ∇J_func!, g_funcs, ∇g_funcs!, geq_funcs, ∇geq_funcs!

Return the functions for the nonlinear optimization of `mpc` [`NonLinMPC`](@ref) controller.

Return the nonlinear objective `Jfunc` function, and `∇Jfunc!`, to compute its gradient.
Also return vectors with the nonlinear inequality constraint functions `gfuncs`, and
`∇gfuncs!`, for the associated gradients. Lastly, also return vectors with the nonlinear
equality constraint functions `geqfuncs` and gradients `∇geqfuncs!`.
Return the nonlinear objective `J_func` function, and `∇J_func!`, to compute its gradient.
Also return vectors with the nonlinear inequality constraint functions `g_funcs`, and
`∇g_funcs!`, for the associated gradients. Lastly, also return vectors with the nonlinear
equality constraint functions `geq_funcs` and gradients `∇geq_funcs!`.

This method is really intricate and I'm not proud of it. That's because of 3 elements:

Expand Down Expand Up @@ -615,11 +627,11 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
J[], _ = value_and_gradient!(Jfunc!, ∇J, ∇J_prep, grad, Z̃_∇J, ∇J_context...)
end
end
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
function J_func(Z̃arg::Vararg{T, N}) where {N, T<:Real}
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
return J[]::T
end
Jfunc! = if nZ̃ == 1 # univariate syntax (see JuMP.@operator doc):
J_func! = if nZ̃ == 1 # univariate syntax (see JuMP.@operator doc):
function (Z̃arg)
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
return ∇J[begin]
Expand Down Expand Up @@ -652,16 +664,16 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
value_and_jacobian!(gfunc!, g, ∇g, ∇g_prep, jac, Z̃_∇g, ∇g_context...)
end
end
gfuncs = Vector{Function}(undef, ng)
for i in eachindex(gfuncs)
g_funcs = Vector{Function}(undef, ng)
for i in eachindex(g_funcs)
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
return g[i]::T
end
gfuncs[i] = gfunc_i
g_funcs[i] = gfunc_i
end
gfuncs! = Vector{Function}(undef, ng)
for i in eachindex(∇gfuncs!)
g_funcs! = Vector{Function}(undef, ng)
for i in eachindex(∇g_funcs!)
∇gfuncs_i! = if nZ̃ == 1 # univariate syntax (see JuMP.@operator doc):
function (Z̃arg::T) where T<:Real
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
Expand All @@ -673,7 +685,7 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
return ∇g_i .= @views ∇g[i, :]
end
end
gfuncs![i] = ∇gfuncs_i!
g_funcs![i] = ∇gfuncs_i!
end
# --------------------- equality constraint functions ---------------------------------
function geqfunc!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g)
Expand All @@ -694,28 +706,175 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
value_and_jacobian!(geqfunc!, geq, ∇geq, ∇geq_prep, jac, Z̃_∇geq, ∇geq_context...)
end
end
geqfuncs = Vector{Function}(undef, neq)
for i in eachindex(geqfuncs)
geq_funcs = Vector{Function}(undef, neq)
for i in eachindex(geq_funcs)
geqfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃arg)
return geq[i]::T
end
geqfuncs[i] = geqfunc_i
geq_funcs[i] = geqfunc_i
end
geqfuncs! = Vector{Function}(undef, neq)
for i in eachindex(∇geqfuncs!)
geq_funcs! = Vector{Function}(undef, neq)
for i in eachindex(∇geq_funcs!)
# only multivariate syntax, univariate is impossible since nonlinear equality
# constraints imply MultipleShooting, thus input increment ΔU and state X̂0 in Z̃:
∇geqfuncs_i! =
function (∇geq_i, Z̃arg::Vararg{T, N}) where {N, T<:Real}
update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃arg)
return ∇geq_i .= @views ∇geq[i, :]
end
geqfuncs![i] = ∇geqfuncs_i!
geq_funcs![i] = ∇geqfuncs_i!
end
return Jfunc, ∇Jfunc!, gfuncs, ∇gfuncs!, geqfuncs, ∇geqfuncs!
return J_func, ∇J_func!, g_funcs, ∇g_funcs!, geq_funcs, ∇geq_funcs!
end

# TODO: move docstring of method above here an re-work it
function get_nonlinops(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where JNT<:Real
# ----------- common cache for all functions ----------------------------------------
model = mpc.estim.model
transcription = mpc.transcription
grad, jac = mpc.gradient, mpc.jacobian
nu, ny, nx̂, nϵ = model.nu, model.ny, mpc.estim.nx̂, mpc.nϵ
nk = get_nk(model, transcription)
Hp, Hc = mpc.Hp, mpc.Hc
ng, nc, neq = length(mpc.con.i_g), mpc.con.nc, mpc.con.neq
nZ̃, nU, nŶ, nX̂, nK = length(mpc.Z̃), Hp*nu, Hp*ny, Hp*nx̂, Hp*nk
nΔŨ, nUe, nŶe = nu*Hc + nϵ, nU + nu, nŶ + ny
strict = Val(true)
myNaN, myInf = convert(JNT, NaN), convert(JNT, Inf)
J::Vector{JNT} = zeros(JNT, 1)
ΔŨ::Vector{JNT} = zeros(JNT, nΔŨ)
x̂0end::Vector{JNT} = zeros(JNT, nx̂)
K0::Vector{JNT} = zeros(JNT, nK)
Ue::Vector{JNT}, Ŷe::Vector{JNT} = zeros(JNT, nUe), zeros(JNT, nŶe)
U0::Vector{JNT}, Ŷ0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nŶ)
Û0::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nX̂)
gc::Vector{JNT}, g::Vector{JNT} = zeros(JNT, nc), zeros(JNT, ng)
geq::Vector{JNT} = zeros(JNT, neq)
# -------------- inequality constraint: nonlinear oracle -----------------------------
function g!(g, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq)
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
return nothing
end
Z̃_∇g = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
∇g_context = (
Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0),
Cache(Û0), Cache(K0), Cache(X̂0),
Cache(gc), Cache(geq),
)
## temporarily enable all the inequality constraints for sparsity detection:
# mpc.con.i_g[1:end-nc] .= true
∇g_prep = prepare_jacobian(g!, g, jac, Z̃_∇g, ∇g_context...; strict)
# mpc.con.i_g[1:end-nc] .= false
∇g = init_diffmat(JNT, jac, ∇g_prep, nZ̃, ng)
function update_con!(g, ∇g, Z̃_∇g, Z̃_arg)
if isdifferent(Z̃_arg, Z̃_∇g)
Z̃_∇g .= Z̃_arg
value_and_jacobian!(g!, g, ∇g, ∇g_prep, jac, Z̃_∇g, ∇g_context...)
end
return nothing
end
function gfunc_oracle!(g_arg, Z̃_arg)
update_con!(g, ∇g, Z̃_∇g, Z̃_arg)
g_arg .= @views g[mpc.con.i_g]
return nothing
end
∇g_i_g = ∇g[mpc.con.i_g, :]
function ∇gfunc_oracle!(∇g_arg, Z̃_arg)
update_con!(g, ∇g, Z̃_∇g, Z̃_arg)
∇g_i_g .= @views ∇g[mpc.con.i_g, :]
diffmat2vec!(∇g_arg, ∇g_i_g)
return nothing
end
g_min = fill(-myInf, sum(mpc.con.i_g))
g_max = zeros(JNT, sum(mpc.con.i_g))
∇g_structure = init_diffstructure(∇g[mpc.con.i_g, :])
g_oracle = Ipopt._VectorNonlinearOracle(;
dimension = nZ̃,
l = g_min,
u = g_max,
eval_f = gfunc_oracle!,
jacobian_structure = ∇g_structure,
eval_jacobian = ∇gfunc_oracle!
)
# ------------- equality constraints : nonlinear oracle ------------------------------
function geq!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g)
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
return nothing
end
Z̃_∇geq = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
∇geq_context = (
Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0),
Cache(Û0), Cache(K0), Cache(X̂0),
Cache(gc), Cache(g)
)
∇geq_prep = prepare_jacobian(geq!, geq, jac, Z̃_∇geq, ∇geq_context...; strict)
∇geq = init_diffmat(JNT, jac, ∇geq_prep, nZ̃, neq)
function update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃_arg)
if isdifferent(Z̃_arg, Z̃_∇geq)
Z̃_∇geq .= Z̃_arg
value_and_jacobian!(geq!, geq, ∇geq, ∇geq_prep, jac, Z̃_∇geq, ∇geq_context...)
end
return nothing
end
function geq_oracle!(geq_arg, Z̃_arg)
update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃_arg)
geq_arg .= geq
return nothing
end
function ∇geq_oracle!(∇geq_arg, Z̃_arg)
update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃_arg)
diffmat2vec!(∇geq_arg, ∇geq)
return nothing
end
geq_min = geq_max = zeros(JNT, neq)
∇geq_structure = init_diffstructure(∇geq)
geq_oracle = Ipopt._VectorNonlinearOracle(;
dimension = nZ̃,
l = geq_min,
u = geq_max,
eval_f = geq_oracle!,
jacobian_structure = ∇geq_structure,
eval_jacobian = ∇geq_oracle!
)
# ------------- objective function: splatting syntax ---------------------------------
function J!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq)
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
return obj_nonlinprog!(Ŷ0, U0, mpc, model, Ue, Ŷe, ΔŨ)
end
Z̃_∇J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
∇J_context = (
Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0),
Cache(Û0), Cache(K0), Cache(X̂0),
Cache(gc), Cache(g), Cache(geq),
)
∇J_prep = prepare_gradient(J!, grad, Z̃_∇J, ∇J_context...; strict)
∇J = Vector{JNT}(undef, nZ̃)
function update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
if isdifferent(Z̃arg, Z̃_∇J)
Z̃_∇J .= Z̃arg
J[], _ = value_and_gradient!(J!, ∇J, ∇J_prep, grad, Z̃_∇J, ∇J_context...)
end
end
function J_func(Z̃arg::Vararg{T, N}) where {N, T<:Real}
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
return J[]::T
end
∇J_func! = if nZ̃ == 1 # univariate syntax (see JuMP.@operator doc):
function (Z̃arg)
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
return ∇J[]
end
else # multivariate syntax (see JuMP.@operator doc):
function (∇Jarg::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
return ∇Jarg .= ∇J
end
end
return g_oracle, geq_oracle, J_func, ∇J_func!
end


"""
update_predictions!(
ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq,
Expand All @@ -741,6 +900,20 @@ function update_predictions!(
return nothing
end

function set_nonlincon_exp!(
mpc::NonLinMPC, ::TranscriptionMethod, g_oracle, geq_oracle
)
optim = mpc.optim
Z̃var = optim[:Z̃var]
nonlin_constraints = JuMP.all_constraints(
optim, JuMP.Vector{JuMP.VariableRef}, Ipopt._VectorNonlinearOracle
)
map(con_ref -> JuMP.delete(optim, con_ref), nonlin_constraints)
@constraint(optim, Z̃var in g_oracle)
mpc.con.neq > 0 && @constraint(optim, Z̃var in geq_oracle)
return nothing
end

@doc raw"""
con_custom!(gc, mpc::NonLinMPC, Ue, Ŷe, ϵ) -> gc

Expand Down
3 changes: 0 additions & 3 deletions src/controller/transcription.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1016,9 +1016,6 @@ function linconstraint!(mpc::PredictiveController, ::NonLinModel, ::SingleShooti
return nothing
end




@doc raw"""
linconstrainteq!(
mpc::PredictiveController, model::LinModel, transcription::MultipleShooting
Expand Down
18 changes: 16 additions & 2 deletions src/general.jl
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,22 @@ function limit_solve_time(optim::GenericModel, Ts)
end

"Init a differentiation result matrix as dense or sparse matrix, as required by `backend`."
init_diffmat(T, backend::AbstractADType, _ , nx , ny) = Matrix{T}(undef, ny, nx)
init_diffmat(T, backend::AutoSparse ,prep , _ , _ ) = similar(sparsity_pattern(prep), T)
init_diffmat(T, ::AbstractADType, _ , nx, ny) = zeros(T, ny, nx)
function init_diffmat(T, ::AutoSparse, prep , _ , _ )
A = similar(sparsity_pattern(prep), T)
return A .= 0
end

"Init the sparsity structure of matrix `A` as required by `JuMP.jl`."
function init_diffstructure(A::AbstractSparseMatrix)
I, J = findnz(A)
return collect(zip(I, J))
end
init_diffstructure(A::AbstractMatrix)= Tuple.(CartesianIndices(A))[:]

"Store the differentiation matrix `A` in the vector `v` as required by `JuMP.jl.`"
diffmat2vec!(v::AbstractVector, A::AbstractSparseMatrix) = v .= nonzeros(A)
diffmat2vec!(v::AbstractVector, A::AbstractMatrix) = v[:] = A

backend_str(backend::AbstractADType) = string(nameof(typeof(backend)))
function backend_str(backend::AutoSparse)
Expand Down
6 changes: 0 additions & 6 deletions test/3_test_predictive_control.jl
Original file line number Diff line number Diff line change
Expand Up @@ -805,12 +805,6 @@ end
linmodel3 = LinModel{Float32}(0.5*ones(1,1), ones(1,1), ones(1,1), 0, 0, 3000.0)
nmpc5 = NonLinMPC(nonlinmodel, Hp=1, Hc=1, Cwt=Inf, transcription=MultipleShooting())
nmpc5 = setconstraint!(nmpc5, ymin=[1])
# execute update_predictions! branch in `gfunc_i` for coverage:
g_Y0min_end = nmpc5.optim[:g_Y0min_1].func
@test_nowarn g_Y0min_end(10.0, 9.0, 8.0, 7.0)
# execute update_predictions! branch in `geqfunc_i` for coverage:
geq_end = nmpc5.optim[:geq_2].func
@test_nowarn geq_end(5.0, 4.0, 3.0, 2.0)
f! = (ẋ,x,u,_,_) -> ẋ .= -0.001x .+ u
h! = (y,x,_,_) -> y .= x
nonlinmodel_c = NonLinModel(f!, h!, 500, 1, 1, 1)
Expand Down