Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose backtracking factor, rename increase factor #92

Merged
merged 1 commit into from
Mar 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions src/algorithms/fast_forward_backward.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ See also: [`FastForwardBackward`](@ref).
- `gamma=nothing`: stepsize, defaults to `1/Lf` if `Lf` is set, and `nothing` otherwise.
- `adaptive=true`: makes `gamma` adaptively adjust during the iterations; this is by default `gamma === nothing`.
- `minimum_gamma=1e-7`: lower bound to `gamma` in case `adaptive == true`.
- `regret_gamma=1.0`: factor to enlarge `gamma` in case `adaptive == true`, before backtracking.
- `reduce_gamma=0.5`: factor by which to reduce `gamma` in case `adaptive == true`, during backtracking.
- `increase_gamma=1.0`: factor by which to increase `gamma` in case `adaptive == true`, before backtracking.
- `extrapolation_sequence=nothing`: sequence (iterator) of extrapolation coefficients to use for acceleration.

# References
Expand All @@ -49,7 +50,8 @@ Base.@kwdef struct FastForwardBackwardIteration{R,Tx,Tf,Tg,TLf,Tgamma,Textr}
gamma::Tgamma = Lf === nothing ? nothing : (1 / Lf)
adaptive::Bool = gamma === nothing
minimum_gamma::R = real(eltype(x0))(1e-7)
regret_gamma::R = real(eltype(x0))(1.0)
reduce_gamma::R = real(eltype(x0))(0.5)
increase_gamma::R = real(eltype(x0))(1.0)
extrapolation_sequence::Textr = nothing
end

Expand Down Expand Up @@ -107,7 +109,7 @@ function Base.iterate(
state::FastForwardBackwardState{R,Tx},
) where {R,Tx}
state.gamma = if iter.adaptive == true
state.gamma *= iter.regret_gamma
state.gamma *= iter.increase_gamma
gamma, state.g_z = backtrack_stepsize!(
state.gamma,
iter.f,
Expand All @@ -123,6 +125,7 @@ function Base.iterate(
state.z,
nothing,
minimum_gamma = iter.minimum_gamma,
reduce_gamma = iter.reduce_gamma,
)
gamma
else
Expand Down
9 changes: 6 additions & 3 deletions src/algorithms/forward_backward.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ See also: [`ForwardBackward`](@ref).
- `gamma=nothing`: stepsize to use, defaults to `1/Lf` if not set (but `Lf` is).
- `adaptive=false`: forces the method stepsize to be adaptively adjusted.
- `minimum_gamma=1e-7`: lower bound to `gamma` in case `adaptive == true`.
- `regret_gamma=1.0`: factor to enlarge `gamma` in case `adaptive == true`, before backtracking.
- `reduce_gamma=0.5`: factor by which to reduce `gamma` in case `adaptive == true`, during backtracking.
- `increase_gamma=1.0`: factor by which to increase `gamma` in case `adaptive == true`, before backtracking.

# References
1. Lions, Mercier, “Splitting algorithms for the sum of two nonlinear operators,” SIAM Journal on Numerical Analysis, vol. 16, pp. 964–979 (1979).
Expand All @@ -42,7 +43,8 @@ Base.@kwdef struct ForwardBackwardIteration{R,Tx,Tf,Tg,TLf,Tgamma}
gamma::Tgamma = Lf === nothing ? nothing : (1 / Lf)
adaptive::Bool = gamma === nothing
minimum_gamma::R = real(eltype(x0))(1e-7)
regret_gamma::R = real(eltype(x0))(1.0)
reduce_gamma::R = real(eltype(x0))(0.5)
increase_gamma::R = real(eltype(x0))(1.0)
end

Base.IteratorSize(::Type{<:ForwardBackwardIteration}) = Base.IsInfinite()
Expand Down Expand Up @@ -87,7 +89,7 @@ function Base.iterate(
state::ForwardBackwardState{R,Tx},
) where {R,Tx}
if iter.adaptive == true
state.gamma *= iter.regret_gamma
state.gamma *= iter.increase_gamma
state.gamma, state.g_z, state.f_x = backtrack_stepsize!(
state.gamma,
iter.f,
Expand All @@ -103,6 +105,7 @@ function Base.iterate(
state.z,
state.grad_f_z,
minimum_gamma = iter.minimum_gamma,
reduce_gamma = iter.reduce_gamma,
)
state.x, state.z = state.z, state.x
state.grad_f_x, state.grad_f_z = state.grad_f_z, state.grad_f_x
Expand Down
19 changes: 15 additions & 4 deletions src/utilities/fb_tools.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,16 @@ function backtrack_stepsize!(
res,
Az,
grad_f_Az = nothing;
alpha = 1,
minimum_gamma = 1e-7,
alpha = R(1),
minimum_gamma = R(1e-7),
reduce_gamma = R(0.5),
) where {R}
f_Az_upp = f_model(f_Ax, At_grad_f_Ax, res, alpha / gamma)
_mul!(Az, A, z)
f_Az, cl = value_and_gradient_closure(f, Az)
tol = 10 * eps(R) * (1 + abs(f_Az))
while f_Az > f_Az_upp + tol && gamma >= minimum_gamma
gamma /= 2
gamma *= reduce_gamma
y .= x .- gamma .* At_grad_f_Ax
g_z = prox!(z, g, y, gamma)
res .= x .- z
Expand All @@ -63,7 +64,16 @@ function backtrack_stepsize!(
return gamma, g_z, f_Az, f_Az_upp
end

function backtrack_stepsize!(gamma, f, A, g, x; alpha = 1, minimum_gamma = 1e-7)
function backtrack_stepsize!(
gamma::R,
f,
A,
g,
x;
alpha = R(1),
minimum_gamma = R(1e-7),
reduce_gamma = R(0.5),
) where {R}
Ax = A * x
f_Ax, cl = value_and_gradient_closure(f, Ax)
grad_f_Ax = cl()
Expand All @@ -86,5 +96,6 @@ function backtrack_stepsize!(gamma, f, A, g, x; alpha = 1, minimum_gamma = 1e-7)
grad_f_Ax;
alpha = alpha,
minimum_gamma = minimum_gamma,
reduce_gamma = reduce_gamma,
)
end
12 changes: 10 additions & 2 deletions test/problems/test_lasso_small.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ using ProximalAlgorithms:
@testset "ForwardBackward (adaptive step, regret)" begin
x0 = zeros(T, n)
x0_backup = copy(x0)
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
solver = ProximalAlgorithms.ForwardBackward(
tol = TOL,
adaptive = true,
increase_gamma = R(1.01),
)
x, it = @inferred solver(x0 = x0, f = fA_autodiff, g = g)
@test eltype(x) == T
@test norm(x - x_star, Inf) <= TOL
Expand Down Expand Up @@ -101,7 +105,11 @@ using ProximalAlgorithms:
@testset "FastForwardBackward (adaptive step, regret)" begin
x0 = zeros(T, n)
x0_backup = copy(x0)
solver = ProximalAlgorithms.FastForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
solver = ProximalAlgorithms.FastForwardBackward(
tol = TOL,
adaptive = true,
increase_gamma = R(1.01),
)
x, it = @inferred solver(x0 = x0, f = fA_autodiff, g = g)
@test eltype(x) == T
@test norm(x - x_star, Inf) <= TOL
Expand Down
14 changes: 11 additions & 3 deletions test/problems/test_lasso_small_strongly_convex.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ using ProximalAlgorithms
@test it < 110
@test x0 == x0_backup
end

@testset "ForwardBackward (adaptive step)" begin
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true)
y, it = solver(x0 = x0, f = fA_autodiff, g = g)
Expand All @@ -81,7 +81,11 @@ using ProximalAlgorithms
end

@testset "ForwardBackward (adaptive step, regret)" begin
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true, regret_gamma=T(1.01))
solver = ProximalAlgorithms.ForwardBackward(
tol = TOL,
adaptive = true,
increase_gamma = T(1.01),
)
y, it = solver(x0 = x0, f = fA_autodiff, g = g)
@test eltype(y) == T
@test norm(y - x_star, Inf) <= TOL
Expand All @@ -108,7 +112,11 @@ using ProximalAlgorithms
end

@testset "FastForwardBackward (adaptive step, regret)" begin
solver = ProximalAlgorithms.FastForwardBackward(tol = TOL, adaptive = true, regret_gamma=T(1.01))
solver = ProximalAlgorithms.FastForwardBackward(
tol = TOL,
adaptive = true,
increase_gamma = T(1.01),
)
y, it = solver(x0 = x0, f = fA_autodiff, g = g)
@test eltype(y) == T
@test norm(y - x_star, Inf) <= TOL
Expand Down
12 changes: 10 additions & 2 deletions test/problems/test_sparse_logistic_small.jl
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,11 @@ using LinearAlgebra
@testset "ForwardBackward (adaptive step, regret)" begin
x0 = zeros(T, n)
x0_backup = copy(x0)
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
solver = ProximalAlgorithms.ForwardBackward(
tol = TOL,
adaptive = true,
increase_gamma = R(1.01),
)
x, it = solver(x0 = x0, f = fA_autodiff, g = g)
@test eltype(x) == T
@test norm(x - x_star, Inf) <= 1e-4
Expand All @@ -71,7 +75,11 @@ using LinearAlgebra
@testset "FastForwardBackward (adaptive step, regret)" begin
x0 = zeros(T, n)
x0_backup = copy(x0)
solver = ProximalAlgorithms.FastForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
solver = ProximalAlgorithms.FastForwardBackward(
tol = TOL,
adaptive = true,
increase_gamma = R(1.01),
)
x, it = solver(x0 = x0, f = fA_autodiff, g = g)
@test eltype(x) == T
@test norm(x - x_star, Inf) <= 1e-4
Expand Down
Loading