diff --git a/NEWS.md b/NEWS.md index ece040c1a..728e40fd7 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,6 +1,16 @@ +MixedModels v4.25 Release Notes +============================== +- Add type notations in `pwrss(::LinearMixedModel)` and `logdet(::LinearMixedModel)` to enhance type inference. [#773] +- Take advantage of type parameter for `StatsAPI.weights(::LinearMixedModel{T})`. [#772] +- Fix use of kwargs in `fit!((::LinearMixedModel)`: [#772] + - user-specified `σ` is actually used, defaulting to existing value + - `REML` defaults to model's already specified REML value. +- Clean up code of keyword convenience constructor for `OptSummary`. [#772] +- Refactor thresholding parameters for forcing near-zero parameter values into `OptSummary`. [#772] + MixedModels v4.24.1 Release Notes ============================== -Add type notations in `pwrss(::LinearMixedModel)` and `logdet(::LinearMixedModel)` to enhance type inference. [#773] +- Re-export accidentally dropped export `lrtest`. [#769] MixedModels v4.24.0 Release Notes ============================== @@ -525,4 +535,6 @@ Package dependencies [#755]: https://github.com/JuliaStats/MixedModels.jl/issues/755 [#756]: https://github.com/JuliaStats/MixedModels.jl/issues/756 [#767]: https://github.com/JuliaStats/MixedModels.jl/issues/767 +[#769]: https://github.com/JuliaStats/MixedModels.jl/issues/769 +[#772]: https://github.com/JuliaStats/MixedModels.jl/issues/772 [#773]: https://github.com/JuliaStats/MixedModels.jl/issues/773 diff --git a/Project.toml b/Project.toml index 6d31df627..9e02e7138 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MixedModels" uuid = "ff71e718-51f3-5ec2-a782-8ffcbfa3c316" author = ["Phillip Alday ", "Douglas Bates ", "Jose Bayoan Santiago Calderon "] -version = "4.24.1" +version = "4.25.0" [deps] Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45" diff --git a/src/MixedModels.jl b/src/MixedModels.jl index 735b18581..231fdfcc5 100644 --- a/src/MixedModels.jl +++ b/src/MixedModels.jl @@ -19,7 +19,7 @@ using LinearAlgebra: ldiv!, lmul!, logdet, mul!, norm, normalize, normalize!, qr using LinearAlgebra: rank, rdiv!, rmul!, svd, tril! using Markdown: Markdown using MixedModelsDatasets: dataset, datasets -using NLopt: NLopt, Opt, ftol_abs, ftol_rel, initial_step, xtol_abs, xtol_rel +using NLopt: NLopt, Opt using PooledArrays: PooledArrays, PooledArray using PrecompileTools: PrecompileTools, @setup_workload, @compile_workload using ProgressMeter: ProgressMeter, Progress, ProgressUnknown, finish!, next! diff --git a/src/generalizedlinearmixedmodel.jl b/src/generalizedlinearmixedmodel.jl index 3e2125499..cfc06a778 100644 --- a/src/generalizedlinearmixedmodel.jl +++ b/src/generalizedlinearmixedmodel.jl @@ -309,13 +309,13 @@ function StatsAPI.fit!( ## check if very small parameter values bounded below by zero can be set to zero xmin_ = copy(xmin) for i in eachindex(xmin_) - if iszero(optsum.lowerbd[i]) && zero(T) < xmin_[i] < T(0.001) + if iszero(optsum.lowerbd[i]) && zero(T) < xmin_[i] < optsum.xtol_zero_abs xmin_[i] = zero(T) end end loglength = length(fitlog) if xmin ≠ xmin_ - if (zeroobj = obj(xmin_, T[])) ≤ (fmin + 1.e-5) + if (zeroobj = obj(xmin_, T[])) ≤ (fmin + optsum.ftol_zero_abs) fmin = zeroobj copyto!(xmin, xmin_) elseif length(fitlog) > loglength diff --git a/src/linearmixedmodel.jl b/src/linearmixedmodel.jl index 875b54c92..14f297835 100644 --- a/src/linearmixedmodel.jl +++ b/src/linearmixedmodel.jl @@ -36,8 +36,8 @@ struct LinearMixedModel{T<:AbstractFloat} <: MixedModel{T} sqrtwts::Vector{T} parmap::Vector{NTuple{3,Int}} dims::NamedTuple{(:n, :p, :nretrms),NTuple{3,Int}} - A::Vector{AbstractMatrix{T}} # cross-product blocks - L::Vector{AbstractMatrix{T}} + A::Vector{<:AbstractMatrix{T}} # cross-product blocks + L::Vector{<:AbstractMatrix{T}} optsum::OptSummary{T} end @@ -175,7 +175,7 @@ function LinearMixedModel( A, L = createAL(reterms, Xy) lbd = foldl(vcat, lowerbd(c) for c in reterms) θ = foldl(vcat, getθ(c) for c in reterms) - optsum = OptSummary(θ, lbd, :LN_BOBYQA; ftol_rel=T(1.0e-12), ftol_abs=T(1.0e-8)) + optsum = OptSummary(θ, lbd) optsum.sigma = isnothing(σ) ? nothing : T(σ) fill!(optsum.xtol_abs, 1.0e-10) return LinearMixedModel( @@ -408,7 +408,7 @@ function createAL(reterms::Vector{<:AbstractReMat{T}}, Xy::FeMat{T}) where {T} end end end - return A, L + return identity.(A), identity.(L) end StatsAPI.deviance(m::LinearMixedModel) = objective(m) @@ -431,8 +431,8 @@ function feL(m::LinearMixedModel) end """ - fit!(m::LinearMixedModel; progress::Bool=true, REML::Bool=false, - σ::Union{Real, Nothing}=nothing, + fit!(m::LinearMixedModel; progress::Bool=true, REML::Bool=m.optsum.REML, + σ::Union{Real, Nothing}=m.optsum.sigma, thin::Int=typemax(Int)) Optimize the objective of a `LinearMixedModel`. When `progress` is `true` a @@ -445,8 +445,8 @@ saved in `m.optsum.fitlog`. function StatsAPI.fit!( m::LinearMixedModel{T}; progress::Bool=true, - REML::Bool=false, - σ::Union{Real,Nothing}=nothing, + REML::Bool=m.optsum.REML, + σ::Union{Real,Nothing}=m.optsum.sigma, thin::Int=typemax(Int), ) where {T} optsum = m.optsum @@ -461,6 +461,7 @@ function StatsAPI.fit!( end opt = Opt(optsum) optsum.REML = REML + optsum.sigma = σ prog = ProgressUnknown(; desc="Minimizing", showspeed=true) # start from zero for the initial call to obj before optimization iter = 0 @@ -511,13 +512,13 @@ function StatsAPI.fit!( xmin_ = copy(xmin) lb = optsum.lowerbd for i in eachindex(xmin_) - if iszero(lb[i]) && zero(T) < xmin_[i] < T(0.001) + if iszero(lb[i]) && zero(T) < xmin_[i] < optsum.xtol_zero_abs xmin_[i] = zero(T) end end loglength = length(fitlog) if xmin ≠ xmin_ - if (zeroobj = obj(xmin_, T[])) ≤ (fmin + 1.e-5) + if (zeroobj = obj(xmin_, T[])) ≤ (fmin + optsum.ftol_zero_abs) fmin = zeroobj copyto!(xmin, xmin_) elseif length(fitlog) > loglength diff --git a/src/optsummary.jl b/src/optsummary.jl index e79fb4557..2ee69144c 100644 --- a/src/optsummary.jl +++ b/src/optsummary.jl @@ -19,75 +19,55 @@ Summary of an `NLopt` optimization * `feval`: the number of function evaluations * `optimizer`: the name of the optimizer used, as a `Symbol` * `returnvalue`: the return value, as a `Symbol` +* `xtol_zero_abs`: the tolerance for a near zero parameter to be considered practically zero +* `ftol_zero_abs`: the tolerance for change in the objective for setting a near zero parameter to zero +* `fitlog`: A vector of tuples of parameter and objectives values from steps in the optimization * `nAGQ`: number of adaptive Gauss-Hermite quadrature points in deviance evaluation for GLMMs * `REML`: use the REML criterion for LMM fits * `sigma`: a priori value for the residual standard deviation for LMM -* `fitlog`: A vector of tuples of parameter and objectives values from steps in the optimization -The latter four fields are MixedModels functionality and not related directly to the `NLopt` package or algorithms. +The last three fields are MixedModels functionality and not related directly to the `NLopt` package or algorithms. !!! note The internal storage of the parameter values within `fitlog` may change in the future to use a different subtype of `AbstractVector` (e.g., `StaticArrays.SVector`) for each snapshot without being considered a breaking change. """ -mutable struct OptSummary{T<:AbstractFloat} +Base.@kwdef mutable struct OptSummary{T<:AbstractFloat} initial::Vector{T} lowerbd::Vector{T} - finitial::T - ftol_rel::T - ftol_abs::T - xtol_rel::T - xtol_abs::Vector{T} - initial_step::Vector{T} - maxfeval::Int - maxtime::T - feval::Int - final::Vector{T} - fmin::T - optimizer::Symbol - returnvalue::Symbol - nAGQ::Integer # don't really belong here but I needed a place to store them - REML::Bool - sigma::Union{T,Nothing} - fitlog::Vector{Tuple{Vector{T},T}} # not SVector because we would need to parameterize on size (which breaks GLMM) + # the @kwdef macro isn't quite smart enough for us to use the type parameter + # for the default values, but we can fake it + finitial::T = Inf * one(eltype(initial)) + ftol_rel::T = eltype(initial)(1.0e-12) + ftol_abs::T = eltype(initial)(1.0e-8) + xtol_rel::T = zero(eltype(initial)) + xtol_abs::Vector{T} = zero(initial) .+ 1e-10 + initial_step::Vector{T} = empty(initial) + maxfeval::Int = -1 + maxtime::T = -one(eltype(initial)) + feval::Int = -1 + final::Vector{T} = copy(initial) + fmin::T = Inf * one(eltype(initial)) + optimizer::Symbol = :LN_BOBYQA + returnvalue::Symbol = :FAILURE + xtol_zero_abs::T = eltype(initial)(0.001) + ftol_zero_abs::T = eltype(initial)(1.e-5) + # not SVector because we would need to parameterize on size (which breaks GLMM) + fitlog::Vector{Tuple{Vector{T},T}} = [(initial, fmin)] + # don't really belong here but I needed a place to store them + nAGQ::Int = 1 + REML::Bool = false + sigma::Union{T,Nothing} = nothing end function OptSummary( initial::Vector{T}, - lowerbd::Vector{T}, - optimizer::Symbol; - ftol_rel::T=zero(T), - ftol_abs::T=zero(T), - xtol_rel::T=zero(T), - xtol_abs::Vector{T}=zero(initial) .+ 1e-10, - initial_step::Vector{T}=T[], - maxfeval=-1, - maxtime=T(-1), -) where {T<:AbstractFloat} - fitlog = [(initial, T(Inf))] - - return OptSummary( - initial, - lowerbd, - T(Inf), - ftol_rel, - ftol_abs, - xtol_rel, - xtol_abs, - initial_step, - maxfeval, - maxtime, - -1, - copy(initial), - T(Inf), - optimizer, - :FAILURE, - 1, - false, - nothing, - fitlog, - ) + lowerbd::Vector{S}, + optimizer::Symbol=:LN_BOBYQA; kwargs..., +) where {T<:AbstractFloat,S<:AbstractFloat} + TS = promote_type(T, S) + return OptSummary{TS}(; initial, lowerbd, optimizer, kwargs...) end """