diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index f244a4726..49c57a06e 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -3,11 +3,6 @@ on: pull_request_target: branches: [ main ] types: [labeled, opened, synchronize, reopened] -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} permissions: pull-requests: write # needed to post comments jobs: diff --git a/docs/src/manual/nonlinmpc.md b/docs/src/manual/nonlinmpc.md index 3ac196096..8ec651fe4 100644 --- a/docs/src/manual/nonlinmpc.md +++ b/docs/src/manual/nonlinmpc.md @@ -331,7 +331,7 @@ Nonlinear MPC is more computationally expensive than [`LinMPC`](@ref). Solving t should always be faster than the sampling time ``T_s = 0.1`` s for real-time operation. This requirement is sometimes hard to meet on electronics or mechanical systems because of the fast dynamics. To ease the design and comparison with [`LinMPC`](@ref), the [`linearize`](@ref) -function allows automatic linearization of [`NonLinModel`](@ref) based on [`ForwardDiff`](@extref ForwardDiff). +function allows automatic linearization of [`NonLinModel`](@ref) defaulting to [`ForwardDiff`](@extref ForwardDiff). We first linearize `model` at the point ``θ = π`` rad and ``ω = τ = 0`` (inverted position): ```@example man_nonlin @@ -341,7 +341,6 @@ linmodel = linearize(model, x=[π, 0], u=[0]) A [`SteadyKalmanFilter`](@ref) and a [`LinMPC`](@ref) are designed from `linmodel`: ```@example man_nonlin - skf = SteadyKalmanFilter(linmodel; σQ, σR, nint_u, σQint_u) mpc = LinMPC(skf; Hp, Hc, Mwt, Nwt, Cwt=Inf) mpc = setconstraint!(mpc, umin=[-1.5], umax=[+1.5]) diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 46db1d19f..61a5d29f0 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -564,7 +564,10 @@ function validate_args(mpc::PredictiveController, ry, d, D̂, R̂y, R̂u) end @doc raw""" - init_quadprog(model::LinModel, weights::ControllerWeights, Ẽ, P̃Δu, P̃u) -> H̃ + init_quadprog( + model::LinModel, transcriptions::TranscriptionMethod, weights::ControllerWeights, + Ẽ, P̃Δu, P̃u; warn_cond=1e6 + ) -> H̃ Init the quadratic programming Hessian `H̃` for MPC. @@ -582,19 +585,43 @@ in which ``\mathbf{Ẽ}``, ``\mathbf{P̃_{Δu}}`` and ``\mathbf{P̃_{u}}`` matri at [`relaxŶ`](@ref), [`relaxΔU`](@ref) and [`relaxU`](@ref) documentation, respectively. The vector ``\mathbf{q̃}`` and scalar ``r`` need recalculation each control period ``k``, see [`initpred!`](@ref). ``r`` does not impact the minima position. It is thus useless at -optimization but required to evaluate the minimal ``J`` value. +optimization but required to evaluate the minimal ``J`` value. A `@warn` will be displayed +if the condition number `cond(H̃) ≥ warn_cond` and `transcription` is a `SingleShooting` +(`warn_cond=Inf` for no warning). """ -function init_quadprog(::LinModel, weights::ControllerWeights, Ẽ, P̃Δu, P̃u) +function init_quadprog( + ::LinModel, transcription::TranscriptionMethod, weights::ControllerWeights, + Ẽ, P̃Δu, P̃u; warn_cond=1e6 +) M_Hp, Ñ_Hc, L_Hp = weights.M_Hp, weights.Ñ_Hc, weights.L_Hp H̃ = Hermitian(2*(Ẽ'*M_Hp*Ẽ + P̃Δu'*Ñ_Hc*P̃Δu + P̃u'*L_Hp*P̃u), :L) + verify_cond(transcription, H̃, warn_cond) return H̃ end "Return empty matrix if `model` is not a [`LinModel`](@ref)." -function init_quadprog(::SimModel{NT}, weights::ControllerWeights, _, _, _) where {NT<:Real} +function init_quadprog( + ::SimModel{NT}, ::TranscriptionMethod, ::ControllerWeights, args...; kwargs... +) where {NT<:Real} H̃ = Hermitian(zeros(NT, 0, 0), :L) return H̃ end +"Check the condition number of `H̃` and display a `@warn` if `cond(H̃) > warn_cond`." +function verify_cond(::SingleShooting, H̃, warn_cond) + if !isinf(warn_cond) + cond_H̃ = cond(H̃) + cond_H̃ > warn_cond && @warn( + "The Hessian condition number cond_H̃ > $warn_cond. The optimization "* + "problem may be ill-conditioned.\n Consider changing the tunings, using "* + "MultipleShooting, or using an optimizer more robust to this like DAQP.", + cond_H̃, + ) + end + return nothing +end +"No check if `transcription` is not a `SingleShooting`." +verify_cond(::TranscriptionMethod,_,_) = nothing + """ init_defaultcon_mpc( estim::StateEstimator, diff --git a/src/controller/execute.jl b/src/controller/execute.jl index 2e4bd283c..bf8fd110e 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -609,6 +609,7 @@ end "Update the prediction matrices, linear constraints and JuMP optimization." function setmodel_controller!(mpc::PredictiveController, uop_old, x̂op_old) model, estim, transcription = mpc.estim.model, mpc.estim, mpc.transcription + weights = mpc.weights nu, ny, nd, Hp, Hc = model.nu, model.ny, model.nd, mpc.Hp, mpc.Hc optim, con = mpc.optim, mpc.con # --- prediction matrices --- @@ -676,7 +677,8 @@ function setmodel_controller!(mpc::PredictiveController, uop_old, x̂op_old) con.x̂0min .-= estim.x̂op # convert x̂ to x̂0 with the new operating point con.x̂0max .-= estim.x̂op # convert x̂ to x̂0 with the new operating point # --- quadratic programming Hessian matrix --- - H̃ = init_quadprog(model, mpc.weights, mpc.Ẽ, mpc.P̃Δu, mpc.P̃u) + # do not verify the condition number of the Hessian here: + H̃ = init_quadprog(model, transcription, weights, mpc.Ẽ, mpc.P̃Δu, mpc.P̃u; warn_cond=Inf) mpc.H̃ .= H̃ # --- JuMP optimization --- Z̃var::Vector{JuMP.VariableRef} = optim[:Z̃var] diff --git a/src/controller/explicitmpc.jl b/src/controller/explicitmpc.jl index c5eaa2722..370a8d319 100644 --- a/src/controller/explicitmpc.jl +++ b/src/controller/explicitmpc.jl @@ -57,7 +57,7 @@ struct ExplicitMPC{ # dummy val (updated just before optimization): F = zeros(NT, ny*Hp) P̃Δu, P̃u, Ẽ = PΔu, Pu, E # no slack variable ϵ for ExplicitMPC - H̃ = init_quadprog(model, weights, Ẽ, P̃Δu, P̃u) + H̃ = init_quadprog(model, transcription, weights, Ẽ, P̃Δu, P̃u) # dummy vals (updated just before optimization): q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1) H̃_chol = cholesky(H̃) @@ -225,6 +225,7 @@ addinfo!(info, mpc::ExplicitMPC) = info "Update the prediction matrices and Cholesky factorization." function setmodel_controller!(mpc::ExplicitMPC, uop_old, _ ) model, estim, transcription = mpc.estim.model, mpc.estim, mpc.transcription + weights = mpc.weights nu, ny, nd, Hp, Hc = model.nu, model.ny, model.nd, mpc.Hp, mpc.Hc # --- predictions matrices --- E, G, J, K, V, B = init_predmat(model, estim, transcription, Hp, Hc) @@ -236,7 +237,8 @@ function setmodel_controller!(mpc::ExplicitMPC, uop_old, _ ) mpc.V .= V mpc.B .= B # --- quadratic programming Hessian matrix --- - H̃ = init_quadprog(model, mpc.weights, mpc.Ẽ, mpc.P̃Δu, mpc.P̃u) + # do not verify the condition number of the Hessian here: + H̃ = init_quadprog(model, transcription, weights, mpc.Ẽ, mpc.P̃Δu, mpc.P̃u, warn_cond=Inf) mpc.H̃ .= H̃ set_objective_hessian!(mpc) # --- operating points --- diff --git a/src/controller/linmpc.jl b/src/controller/linmpc.jl index 6eec082a3..b176e1ee0 100644 --- a/src/controller/linmpc.jl +++ b/src/controller/linmpc.jl @@ -79,7 +79,7 @@ struct LinMPC{ ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, Eŝ, Fŝ, Gŝ, Jŝ, Kŝ, Vŝ, Bŝ ) - H̃ = init_quadprog(model, weights, Ẽ, P̃Δu, P̃u) + H̃ = init_quadprog(model, transcription, weights, Ẽ, P̃Δu, P̃u) # dummy vals (updated just before optimization): q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1) Ks, Ps = init_stochpred(estim, Hp) diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index bf8b15976..25e9cfc3e 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -103,7 +103,7 @@ struct NonLinMPC{ Eŝ, Fŝ, Gŝ, Jŝ, Kŝ, Vŝ, Bŝ, gc!, nc ) - H̃ = init_quadprog(model, weights, Ẽ, P̃Δu, P̃u) + H̃ = init_quadprog(model, transcription, weights, Ẽ, P̃Δu, P̃u) # dummy vals (updated just before optimization): q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1) Ks, Ps = init_stochpred(estim, Hp) diff --git a/src/precompile.jl b/src/precompile.jl index 0baaaa70d..cd3b1254e 100644 --- a/src/precompile.jl +++ b/src/precompile.jl @@ -20,7 +20,8 @@ mpc_kf.estim() u = mpc_kf([55, 30]) sim!(mpc_kf, 2, [55, 30]) -mpc_lo = setconstraint!(LinMPC(Luenberger(model)), ymin=[45, -Inf]) +transcription = MultipleShooting() +mpc_lo = setconstraint!(LinMPC(Luenberger(model); transcription), ymin=[45, -Inf]) initstate!(mpc_lo, model.uop, model()) preparestate!(mpc_lo, [55, 30]) mpc_lo.estim() @@ -79,18 +80,21 @@ exmpc.estim() u = exmpc([55, 30]) sim!(exmpc, 2, [55, 30]) -function f!(xnext, x, u, _, model) - mul!(xnext, model.A , x) - mul!(xnext, model.Bu, u, 1, 1) +function f!(xnext, x, u, _ , p) + A, B, _ = p + mul!(xnext, A , x) + mul!(xnext, B, u, 1, 1) return nothing end -function h!(y, x, _, model) - mul!(y, model.C, x) +function h!(y, x, _ , p) + _, _, C = p + mul!(y, C, x) return nothing end +sys2 = minreal(ss(sys)) nlmodel = setop!( - NonLinModel(f!, h!, Ts, 2, 2, 2, solver=nothing, p=model), + NonLinModel(f!, h!, Ts, 2, 2, 2, solver=RungeKutta(4), p=(sys2.A, sys2.B, sys2.C)), uop=[10, 10], yop=[50, 30] ) y = nlmodel() @@ -101,8 +105,9 @@ nmpc_im.estim() u = nmpc_im([55, 30]) sim!(nmpc_im, 2, [55, 30]) -nmpc_ukf = setconstraint!( - NonLinMPC(UnscentedKalmanFilter(nlmodel), Hp=10, Cwt=1e3), ymin=[45, -Inf] +transcription = MultipleShooting(f_threads=true) +nmpc_ukf = setconstraint!(NonLinMPC( + UnscentedKalmanFilter(nlmodel); Hp=10, transcription, Cwt=1e3), ymin=[45, -Inf] ) initstate!(nmpc_ukf, nlmodel.uop, y) preparestate!(nmpc_ukf, [55, 30]) @@ -115,8 +120,9 @@ preparestate!(nmpc_ekf, [55, 30]) u = nmpc_ekf([55, 30]) sim!(nmpc_ekf, 2, [55, 30]) -nmpc_mhe = setconstraint!( - NonLinMPC(MovingHorizonEstimator(nlmodel, He=2), Hp=10, Cwt=Inf), ymin=[45, -Inf] +transcription = TrapezoidalCollocation() +nmpc_mhe = setconstraint!(NonLinMPC( + MovingHorizonEstimator(nlmodel, He=2); transcription, Hp=10, Cwt=Inf), ymin=[45, -Inf] ) setconstraint!(nmpc_mhe.estim, x̂min=[-50,-50,-50,-50], x̂max=[50,50,50,50]) initstate!(nmpc_mhe, nlmodel.uop, y)