@@ -19,75 +19,55 @@ Summary of an `NLopt` optimization
1919* `feval`: the number of function evaluations
2020* `optimizer`: the name of the optimizer used, as a `Symbol`
2121* `returnvalue`: the return value, as a `Symbol`
22+ * `xtol_zero_abs`: the tolerance for a near zero parameter to be considered practically zero
23+ * `ftol_zero_abs`: the tolerance for change in the objective for setting a near zero parameter to zero
24+ * `fitlog`: A vector of tuples of parameter and objectives values from steps in the optimization
2225* `nAGQ`: number of adaptive Gauss-Hermite quadrature points in deviance evaluation for GLMMs
2326* `REML`: use the REML criterion for LMM fits
2427* `sigma`: a priori value for the residual standard deviation for LMM
25- * `fitlog`: A vector of tuples of parameter and objectives values from steps in the optimization
2628
27- The latter four fields are MixedModels functionality and not related directly to the `NLopt` package or algorithms.
29+ The last three fields are MixedModels functionality and not related directly to the `NLopt` package or algorithms.
2830
2931!!! note
3032 The internal storage of the parameter values within `fitlog` may change in
3133 the future to use a different subtype of `AbstractVector` (e.g., `StaticArrays.SVector`)
3234 for each snapshot without being considered a breaking change.
3335"""
34- mutable struct OptSummary{T<: AbstractFloat }
36+ Base . @kwdef mutable struct OptSummary{T<: AbstractFloat }
3537 initial:: Vector{T}
3638 lowerbd:: Vector{T}
37- finitial:: T
38- ftol_rel:: T
39- ftol_abs:: T
40- xtol_rel:: T
41- xtol_abs:: Vector{T}
42- initial_step:: Vector{T}
43- maxfeval:: Int
44- maxtime:: T
45- feval:: Int
46- final:: Vector{T}
47- fmin:: T
48- optimizer:: Symbol
49- returnvalue:: Symbol
50- nAGQ:: Integer # don't really belong here but I needed a place to store them
51- REML:: Bool
52- sigma:: Union{T,Nothing}
53- fitlog:: Vector{Tuple{Vector{T},T}} # not SVector because we would need to parameterize on size (which breaks GLMM)
39+ # the @kwdef macro isn't quite smart enough for us to use the type parameter
40+ # for the default values, but we can fake it
41+ finitial:: T = Inf * one (eltype (initial))
42+ ftol_rel:: T = eltype (initial)(1.0e-12 )
43+ ftol_abs:: T = eltype (initial)(1.0e-8 )
44+ xtol_rel:: T = zero (eltype (initial))
45+ xtol_abs:: Vector{T} = zero (initial) .+ 1e-10
46+ initial_step:: Vector{T} = empty (initial)
47+ maxfeval:: Int = - 1
48+ maxtime:: T = - one (eltype (initial))
49+ feval:: Int = - 1
50+ final:: Vector{T} = copy (initial)
51+ fmin:: T = Inf * one (eltype (initial))
52+ optimizer:: Symbol = :LN_BOBYQA
53+ returnvalue:: Symbol = :FAILURE
54+ xtol_zero_abs:: T = eltype (initial)(0.001 )
55+ ftol_zero_abs:: T = eltype (initial)(1.e-5 )
56+ # not SVector because we would need to parameterize on size (which breaks GLMM)
57+ fitlog:: Vector{Tuple{Vector{T},T}} = [(initial, fmin)]
58+ # don't really belong here but I needed a place to store them
59+ nAGQ:: Int = 1
60+ REML:: Bool = false
61+ sigma:: Union{T,Nothing} = nothing
5462end
5563
5664function OptSummary (
5765 initial:: Vector{T} ,
58- lowerbd:: Vector{T} ,
59- optimizer:: Symbol ;
60- ftol_rel:: T = zero (T),
61- ftol_abs:: T = zero (T),
62- xtol_rel:: T = zero (T),
63- xtol_abs:: Vector{T} = zero (initial) .+ 1e-10 ,
64- initial_step:: Vector{T} = T[],
65- maxfeval= - 1 ,
66- maxtime= T (- 1 ),
67- ) where {T<: AbstractFloat }
68- fitlog = [(initial, T (Inf ))]
69-
70- return OptSummary (
71- initial,
72- lowerbd,
73- T (Inf ),
74- ftol_rel,
75- ftol_abs,
76- xtol_rel,
77- xtol_abs,
78- initial_step,
79- maxfeval,
80- maxtime,
81- - 1 ,
82- copy (initial),
83- T (Inf ),
84- optimizer,
85- :FAILURE ,
86- 1 ,
87- false ,
88- nothing ,
89- fitlog,
90- )
66+ lowerbd:: Vector{S} ,
67+ optimizer:: Symbol = :LN_BOBYQA ; kwargs... ,
68+ ) where {T<: AbstractFloat ,S<: AbstractFloat }
69+ TS = promote_type (T, S)
70+ return OptSummary {TS} (; initial, lowerbd, optimizer, kwargs... )
9171end
9272
9373"""
0 commit comments