Skip to content

Commit

Permalink
Add multi start strategy (#138)
Browse files Browse the repository at this point in the history
  • Loading branch information
tmigot authored Jun 1, 2024
1 parent 9c4edac commit bb11e8b
Show file tree
Hide file tree
Showing 4 changed files with 153 additions and 2 deletions.
3 changes: 3 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@ NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f"
Percival = "01435c0c-c90d-11e9-3788-63660f8fbccc"
QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"
SolverParameters = "d076d87d-d1f9-4ea3-a44b-64b4cdd1e470"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[compat]
Expand All @@ -35,4 +37,5 @@ Percival = "0.6, 0.7"
QuadraticModels = "0.9.2"
Requires = "1"
SolverCore = "0.3"
SolverParameters = "0.1"
julia = "^1.6.0"
5 changes: 3 additions & 2 deletions src/JSOSuite.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ module JSOSuite
# other dependencies
using DataFrames, JuMP, Requires
# stdlib
using LinearAlgebra, Logging, SparseArrays
using LinearAlgebra, Logging, Random, SparseArrays
# JSO
using ADNLPModels, LLSModels, NLPModels, NLPModelsJuMP, QuadraticModels
using LinearOperators, NLPModelsModifiers, SolverCore
using LinearOperators, NLPModelsModifiers, SolverCore, SolverParameters
# JSO solvers
using JSOSolvers, Percival

Expand All @@ -17,5 +17,6 @@ include("solve.jl")
include("load-solvers.jl")
include("bmark-solvers.jl")
include("feasible-point.jl")
include("multi-start.jl")

end # module
123 changes: 123 additions & 0 deletions src/multi-start.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
export multi_start

"""
multi_start(nlp::AbstractNLPModel; kwargs...)
This function runs a simple optimization strategy that run local optimizers
for several initial guesses.
# Arguments
- `nlp::AbstractNLPModel{T, V}` represents the model to solve, see `NLPModels.jl`.
The keyword arguments may include
- `multi_solvers::Bool = false`: If true it runs all the available solvers, one solver otherwise;
- `skip_solvers::Vector{String} = String[]`: If `multi_solvers` is true, the solvers in this list are skipped;
- `N::Integer = get_nvar(nlp)`: number of additional initial guesses considered;
- `max_time::Float64 = 60.0`: maximum time limit in seconds;
- `verbose::Integer = 0`: if > 0, display iteration details every `verbose` iteration.
- `solver_verbose::Integer = 0`: verbosity of the solver;
- `strategy::Symbol = :random`: strategy to compute a next initial guess in [:random].
Other keyword arguments are passed to the solvers.
"""
function multi_start end

function multi_start(
nlp::AbstractNLPModel{T, S};
multi_solvers::Bool = false,
skip_solvers::Vector{String} = String[],
N::Integer = get_nvar(nlp),
max_time::Float64 = 60.0,
verbose::Integer = 0,
solver_verbose::Integer = 0,
strategy::Symbol = :random,
kwargs...
) where {T, S}
best_x = get_x0(nlp)
dom = Vector{RealInterval{Float64}}(undef, get_nvar(nlp))
new_x0, best_x = S(undef, get_nvar(nlp)), S(undef, get_nvar(nlp))
for i=1:get_nvar(nlp)
dom[i] = RealInterval(nlp.meta.lvar[i], nlp.meta.uvar[i])
end
new_x0 .= get_x0(nlp)
best_obj = obj(nlp, get_x0(nlp))
best_x .= get_x0(nlp)

# optimizers
select = select_optimizers(nlp)
if !multi_solvers
solvers = first(select.name)
else
solvers = select.name
@info solvers
@info skip_solvers
for skip in skip_solvers
ind_skip = findfirst(x -> x == skip, solvers)
if !isnothing(ind_skip)
deleteat!(solvers, ind_skip)
end
end
end
@info solvers
nsolvers = length(solvers)

start_time = time()
verbose > 0 && @info log_header(
[:i, :f, :normx, :normx0, :status],
[Int, T, T, T, Symbol],
hdr_override = Dict(:f => "f(x)", :normx => "‖x‖", :normx0 => "‖x₀‖"),
)
best_obj = run_solver!(best_x, best_obj, solvers, nlp, new_x0, verbose, solver_verbose, max_time; kwargs...)

for i=1:N
get_next_x0!(Val(strategy), new_x0, i, dom, best_x)
el_time = time() - start_time
best_obj = run_solver!(best_x, best_obj, solvers, nlp, new_x0, verbose, solver_verbose, max_time - el_time; kwargs...)
end
return best_x
end

function run_solver!(best_x, best_obj, solvers::Vector{String}, nlp, new_x0, verbose, solver_verbose, max_time; kwargs...)
for solver_name in solvers
stats = minimize(solver_name, nlp, x = new_x0, verbose = solver_verbose; kwargs...)
if (stats.status == :first_order) && (stats.objective < best_obj)
best_obj = stats.objective
best_x .= stats.solution
end
verbose > 0 && @info log_row(Any[0, best_obj, norm(best_x), norm(new_x0), stats.status])
end
return best_obj
end

function run_solver!(best_x, best_obj, solver_name::String, nlp, new_x0, verbose, solver_verbose, max_time; kwargs...)
stats = minimize(solver_name, nlp, x = new_x0, verbose = solver_verbose; kwargs...)
if (stats.status == :first_order) && (stats.objective < best_obj)
best_obj = stats.objective
best_x .= stats.solution
end
verbose > 0 && @info log_row(Any[0, best_obj, norm(best_x), norm(new_x0), stats.status])
return best_obj
end

function Random.rand!(rng::AbstractRNG, next_x0::AbstractArray{T}, dom::Vector{RealInterval{T}}) where {T}
# check that length(next_x0) == length(dom)
for i=1:length(next_x0)
next_x0[i] = rand(rng, dom[i])
end
return next_x0
end

function get_next_x0!(
::Val{:random},
new_x0::S,
::Integer,
dom::Vector{RealInterval{T}},
x::S,
args...;
random_seed::Integer = 1234,
rng = Random.default_rng(random_seed),
) where {S, T}
rand!(rng, new_x0, dom)
end
24 changes: 24 additions & 0 deletions test/multi-start-test.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
using ADNLPModels, NLPModels, NLPModelsTest, JSOSuite, LinearAlgebra

@info "Test 1"
nlp = BROWNDEN()
JSOSuite.multi_start(nlp, verbose = 1)

# Test 2
d = 5
function f(x; d = d)
return sum(x[i]^2 / 4000 - prod(cos(x[i] / sqrt(i)) for i=1:d) + 1 for i=1:d)
end
T = Float64

@info "Test 2"
nlp = ADNLPModel(f, 300 * ones(T, d), -600 * ones(T, d), 600 * ones(T, d))
ultimate_x = JSOSuite.multi_start(nlp, N = 50, verbose = 10)

norm(grad(nlp, ultimate_x)), obj(nlp, ultimate_x)

@info "Test 3"
nlp = ADNLPModel(f, 300 * ones(T, d))
ultimate_x = JSOSuite.multi_start(nlp, N = 10, verbose = 1, solver_verbose = 0, multi_solvers = true, skip_solvers = ["Percival"])

norm(grad(nlp, ultimate_x)), obj(nlp, ultimate_x)

0 comments on commit bb11e8b

Please sign in to comment.