diff --git a/Project.toml b/Project.toml index aba32a8..6a3ac7a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,10 +1,9 @@ name = "NLLSsolver" uuid = "50b5cf9e-bf7a-4e29-8981-4280cbba70cb" authors = ["Oliver Woodford"] -version = "3.3.0" +version = "3.3.1" [deps] -Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" DiffResults = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" HybridArrays = "1baab800-613f-4b0a-84e4-9cd3431bfbb9" @@ -23,9 +22,14 @@ ForwardDiff = "0.10" HybridArrays = "0.4" IfElse = "0.1.1" LDLFactorizations = "0.10" +LinearAlgebra = "1.6" LoopVectorization = "0.12.165" +Printf = "1.6" +Random = "1.6" +SparseArrays = "1.6" Static = "0.8.7" StaticArrays = "1" +Test = "1.6" VisualGeometryDatasets = "0.2.1" julia = "1.6" diff --git a/src/callbacks.jl b/src/callbacks.jl index 2f041e7..9fdb200 100644 --- a/src/callbacks.jl +++ b/src/callbacks.jl @@ -31,14 +31,16 @@ end struct CostTrajectory costs::Vector{Float64} + times_ns::Vector{UInt64} trajectory::Vector{Vector{Float64}} function CostTrajectory() - return new(sizehint!(Vector{Float64}(), 50), sizehint!(Vector{Vector{Float64}}(), 50)) + return new(sizehint!(Vector{Float64}(), 50), sizehint!(Vector{UInt64}(), 50), sizehint!(Vector{Vector{Float64}}(), 50)) end end function Base.empty!(ct::CostTrajectory) empty!(ct.costs) + empty!(ct.times_ns) empty!(ct.trajectory) return ct end @@ -46,6 +48,7 @@ end # Store per-iteration costs and trajectory function storecostscallback(store::CostTrajectory, cost, problem, data, unusedargs...) push!(store.costs, cost) + push!(store.times_ns, Base.time_ns() - data.starttime) push!(store.trajectory, Vector{Float64}(data.linsystem.x)) return (cost, 0) end diff --git a/src/iterators.jl b/src/iterators.jl index 03c9271..fda20fc 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -11,8 +11,8 @@ NewtonData(::NLLSProblem, ::NLLSInternal) = NewtonData() reset!(nd::NewtonData, ::NLLSProblem, ::NLLSInternal) = nd function iterate!(::NewtonData, data, problem::NLLSProblem, options::NLLSOptions)::Float64 - hessian, gradient = gethessgrad(data.linsystem) # Compute the step + gethessian(data.linsystem) data.timesolver += @elapsed_ns negate!(solve!(data.linsystem, options)) data.linearsolvers += 1 # Update the new variables @@ -119,14 +119,17 @@ mutable struct LevMarData lambda::Float64 function LevMarData(::NLLSProblem, ::NLLSInternal) - return new(1.0) + return new(0.0) end end -reset!(lmd::LevMarData, ::NLLSProblem, ::NLLSInternal) = lmd.lambda = 1.0 +reset!(lmd::LevMarData, ::NLLSProblem, ::NLLSInternal) = lmd.lambda = 0.0 function iterate!(levmardata::LevMarData, data, problem::NLLSProblem, options::NLLSOptions)::Float64 @assert levmardata.lambda >= 0. hessian, gradient = gethessgrad(data.linsystem) + if levmardata.lambda == 0 + levmardata.lambda = tr(hessian) ./ (size(hessian, 1) * 1e6) + end lastlambda = 0. mu = 2. while true diff --git a/src/linearsystem.jl b/src/linearsystem.jl index 1d98a6a..45b59b2 100644 --- a/src/linearsystem.jl +++ b/src/linearsystem.jl @@ -9,7 +9,7 @@ function getresblocksizes!(resblocksizes, residuals::Vector, ind::Int)::Int end # Uni-variate linear system -struct UniVariateLSdynamic +mutable struct UniVariateLSdynamic A::Matrix{Float64} b::Vector{Float64} x::Vector{Float64} @@ -18,22 +18,9 @@ struct UniVariateLSdynamic function UniVariateLSdynamic(unfixed, varlen) return new(zeros(Float64, varlen, varlen), zeros(Float64, varlen), Vector{Float64}(undef, varlen), UInt(unfixed)) end - - function UniVariateLSdynamic(prev::UniVariateLSdynamic, unfixed, varlen) - A = prev.A - if varlen == length(prev.b) - zero!(prev) - else - A = zeros(Float64, varlen, varlen) - resize!(prev.b, varlen) - fill!(prev.b, 0) - resize!(prev.x, varlen) - end - return new(A, prev.b, prev.x, UInt(unfixed)) - end end -struct UniVariateLSstatic{N, N2} +mutable struct UniVariateLSstatic{N, N2} A::MMatrix{N, N, Float64, N2} b::MVector{N, Float64} x::MVector{N, Float64} @@ -42,11 +29,6 @@ struct UniVariateLSstatic{N, N2} function UniVariateLSstatic{N, N2}(unfixed) where {N, N2} return new(zeros(MMatrix{N, N, Float64, N2}), zeros(MVector{N, Float64}), MVector{N, Float64}(undef), UInt(unfixed)) end - - function UniVariateLSstatic{N, N2}(prev::UniVariateLSstatic{N, N2}, unfixed, ::Any) where {N, N2} - zero!(prev) - return new(prev.A, prev.b, prev.x, UInt(unfixed)) - end end UniVariateLS = Union{UniVariateLSdynamic, UniVariateLSstatic{N, N2}} where {N, N2} @@ -69,13 +51,22 @@ struct MultiVariateLSsparse sparseindices::Vector{Int} ldlfac::LDLFactorizations.LDLFactorization{Float64, Int, Int, Int} - function MultiVariateLSsparse(A::BlockSparseMatrix, blockindices) + function MultiVariateLSsparse(A::BlockSparseMatrix, blockindices, storehessian=true) @assert A.rowblocksizes==A.columnblocksizes boffsets = computestartindices(A.rowblocksizes) blen = boffsets[end] + A.rowblocksizes[end] - 1 - sparseindices = makesparseindices(A, true) - hessian = SparseMatrixCSC{Float64, Int}(sparseindices.m, sparseindices.n, sparseindices.colptr, sparseindices.rowval, Vector{Float64}(undef, length(sparseindices.nzval))) - return new(A, zeros(Float64, blen), Vector{Float64}(undef, blen), blockindices, boffsets, hessian, sparseindices.nzval, ldl_analyze(hessian)) + if storehessian + x = Vector{Float64}(undef, blen) + sparseindices = makesparseindices(A, true) + hessian = SparseMatrixCSC{Float64, Int}(sparseindices.m, sparseindices.n, sparseindices.colptr, sparseindices.rowval, Vector{Float64}(undef, length(sparseindices.nzval))) + sparseindices = sparseindices.nzval + else + x = Vector{Float64}() + sparseindices = Vector{Int}() + hessian = spzeros(0, 0) + end + ldlfac = ldl_analyze(hessian) + return new(A, zeros(Float64, blen), x, blockindices, boffsets, hessian, sparseindices, ldlfac) end end @@ -93,18 +84,11 @@ struct MultiVariateLSdense blen -= 1 return new(BlockDenseMatrix{Float64}(boffsets), zeros(Float64, blen), Vector{Float64}(undef, blen), blockindices, boffsets) end - - function MultiVariateLSdense(from::MultiVariateLSdense, fromblock::Integer) - # Crop an existing linear system - boffsets = view(from.A.rowblockoffsets, 1:fromblock) - blen = boffsets[end] - 1 - return new(BlockDenseMatrix{Float64}(boffsets), zeros(Float64, blen), Vector{Float64}(undef, blen), blockindices, boffsets) - end end MultiVariateLS = Union{MultiVariateLSsparse, MultiVariateLSdense} -function makesymmvls(problem, unfixed, nblocks) +function makesymmvls(problem, unfixed, nblocks, formarginalization=false) # Multiple variables. Use a block sparse matrix blockindices = zeros(UInt, length(problem.variables)) blocksizes = zeros(UInt, nblocks) @@ -118,7 +102,7 @@ function makesymmvls(problem, unfixed, nblocks) end # Decide whether to have a sparse or a dense system - len = sum(blocksizes) + len = formarginalization ? 40 : sum(blocksizes) if len >= 40 # Compute the block sparsity sparsity = getvarcostmap(problem) @@ -126,12 +110,12 @@ function makesymmvls(problem, unfixed, nblocks) sparsity = triu(sparse(sparsity * sparsity' .> 0)) # Check sparsity level - if sparse_dense_decision(len, block_sparse_nnz(sparsity, blocksizes)) + if formarginalization || sparse_dense_decision(len, block_sparse_nnz(sparsity, blocksizes)) # Construct the BSM bsm = BlockSparseMatrix{Float64}(sparsity, blocksizes, blocksizes) # Construct the sparse MultiVariateLS - return MultiVariateLSsparse(bsm, blockindices) + return MultiVariateLSsparse(bsm, blockindices, !formarginalization) end end @@ -192,16 +176,18 @@ end uniformscaling!(linsystem, k) = uniformscaling!(linsystem.A, k) + getgrad(linsystem) = linsystem.b -gethessgrad(linsystem::UniVariateLS) = (linsystem.A, linsystem.b) -function gethessgrad(linsystem::MultiVariateLSsparse) +gethessian(linsystem::UniVariateLS) = linsystem.A +function gethessian(linsystem::MultiVariateLSsparse) # Fill sparse hessian @inbounds for (i, si) in enumerate(linsystem.sparseindices) linsystem.hessian.nzval[i] = linsystem.A.data[si] end - return linsystem.hessian, linsystem.b + return linsystem.hessian end -gethessgrad(linsystem::MultiVariateLSdense) = symmetrifyfull(linsystem.A), linsystem.b +gethessian(linsystem::MultiVariateLSdense) = symmetrifyfull(linsystem.A) +gethessgrad(linsystem) = gethessian(linsystem), linsystem.b function zero!(linsystem) fill!(linsystem.b, 0) diff --git a/src/marginalize.jl b/src/marginalize.jl index dd2735f..663a056 100644 --- a/src/marginalize.jl +++ b/src/marginalize.jl @@ -60,13 +60,13 @@ function marginalize!(to::MultiVariateLS, from::MultiVariateLSsparse, blockind:: end end -function marginalize!(to::MultiVariateLS, from::MultiVariateLS, blocks::AbstractRange, blocksz) +function marginalize!(to::MultiVariateLS, from::MultiVariateLSsparse, blocks::AbstractRange, blocksz) for block in blocks marginalize!(to, from, block, blocksz) end end -function marginalize!(to::MultiVariateLS, from::MultiVariateLS, fromblock = isa(to, MultiVariateLSsparse) ? length(to.A.rowblocksizes)+1 : length(to.A.rowblockoffsets)) +function marginalize!(to::MultiVariateLS, from::MultiVariateLSsparse, fromblock = isa(to, MultiVariateLSsparse) ? length(to.A.rowblocksizes)+1 : length(to.A.rowblockoffsets)) last = fromblock finish = length(from.A.rowblocksizes) while last <= finish @@ -145,12 +145,10 @@ function constructcrop(from::MultiVariateLSsparse, fromblock, forcesparse=false) A = BlockSparseMatrix{Float64}(start-1, cropsparsity, blocksizes, blocksizes) # Construct the sparse linear system - return MultiVariateLSsparse(A, from.blockindices) + return MultiVariateLSsparse(A, from.blockindices[1:findfirst(isequal(fromblock), from.blockindices)]) end end # Construct a dense linear system return MultiVariateLSdense(toblocksizes, from.blockindices) end - -constructcrop(from::MultiVariateLSdense, fromblock) = MultiVariateLSdense(from, fromblock) diff --git a/src/optimize.jl b/src/optimize.jl index 5ac0b0a..5ed51ca 100644 --- a/src/optimize.jl +++ b/src/optimize.jl @@ -1,20 +1,19 @@ - # Uni-variate optimization (single unfixed variable) -optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::Integer, callback=nullcallback, starttimens=Base.time_ns())::NLLSResult = getresult(setupiterator(optimizeinternal!, problem, options, NLLSInternal(UInt(unfixed), nvars(problem.variables[unfixed])), checkcallback(options, callback), starttimens)) +optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::Integer, callback=nullcallback, starttimens=Base.time_ns())::NLLSResult = getresult(setupiterator(optimizeinternal!, problem, options, NLLSInternal(UInt(unfixed), nvars(problem.variables[unfixed]), starttimens), checkcallback(options, callback))) # Multi-variate optimization function optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::AbstractVector, callback)::NLLSResult - starttime = Base.time_ns() + starttimens = Base.time_ns() @assert length(problem.variables) > 0 # Compute the number of free variables (nblocks) nblocks = sum(unfixed) if nblocks == 1 # One unfixed variable unfixed = findfirst(unfixed) - return optimize!(problem, options, unfixed, callback, starttime) + return optimize!(problem, options, unfixed, callback, starttimens) end # Multiple variables - return getresult(setupiterator(optimizeinternal!, problem, options, NLLSInternal(makesymmvls(problem, unfixed, nblocks)), checkcallback(options, callback), starttime)) + return getresult(setupiterator(optimizeinternal!, problem, options, NLLSInternal(makesymmvls(problem, unfixed, nblocks), starttimens), checkcallback(options, callback))) end # Conversions for different types of "unfixed" @@ -26,12 +25,33 @@ convertunfixed(unfixed, problem) = unfixed optimize!(problem::NLLSProblem, options::NLLSOptions=NLLSOptions(), unfixed=nothing, callback=nullcallback) = optimize!(problem, options, convertunfixed(unfixed, problem), callback) # Optimize one variable at a time -optimizesingles!(problem::NLLSProblem, options::NLLSOptions, type::DataType, starttime=Base.time_ns())::NLLSResult = getresult(setupiterator(optimizesinglesinternal!, problem, options, NLLSInternal(UInt(1), nvars(type())), type, starttime)) +optimizesingles!(problem::NLLSProblem, options::NLLSOptions, type::DataType) = optimizesingles!(problem, options, findall(v->isa(v, type), problem.variables)) +function optimizesingles!(problem::NLLSProblem{VT, CT}, options::NLLSOptions, indices) where {VT, CT} + # Get the indices per cost + costindices = sparse(getvarcostmap(problem)') + # Put the costs aside + allcosts = problem.costs + problem.costs = CostStruct{CT}() + # Go over the indices, sorted in order of variable size + indices = indices[sortperm([dynamic(nvars(problem.variables[i])) for i in indices])] + first = 1 + while first <= length(indices) + # Optimize all variables of the same size + first = setupiterator(optimizesinglesinternal!, problem, options, NLLSInternal(UInt(1), nvars(problem.variables[indices[first]]), UInt64(0)), allcosts, costindices, indices, first) + end + problem.costs = allcosts + return +end checkcallback(::NLLSOptions{Nothing}, callback) = callback checkcallback(options::NLLSOptions, ::Any) = options.callback function setupiterator(func, problem::NLLSProblem, options::NLLSOptions, data::NLLSInternal, trailingargs...) + # Copy the variables, if need be + if length(problem.variables) != length(problem.varnext) + problem.varnext = deepcopy(problem.variables) + end + # Call the optimizer with the required iterator struct if options.iterator == newton || options.iterator == gaussnewton # Newton's method, using Gauss' approximation to the Hessian (optimizing Hessian form) @@ -57,13 +77,10 @@ function setupiterator(func, problem::NLLSProblem, options::NLLSOptions, data::N end # The meat of an optimization -function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, iteratedata, callback, starttime::UInt64) - # Copy the variables - if length(problem.variables) != length(problem.varnext) - problem.varnext = copy(problem.variables) - end - stoptime = starttime + options.maxtime - data.timeinit += Base.time_ns() - starttime +function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, iteratedata, callback) + # Timing initializations + stoptime = data.starttime + options.maxtime + data.timeinit += Base.time_ns() - data.starttime # Initialize the linear problem data.timegradient += @elapsed_ns data.bestcost = costgradhess!(data.linsystem, problem.variables, problem.costs) data.gradientcomputations += 1 @@ -136,48 +153,35 @@ function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, ite updatefrombest!(problem, data) end # Return the data to produce the final result - data.timetotal += Base.time_ns() - starttime + data.timetotal += Base.time_ns() - data.starttime return data end # Optimizing variables one at a time (e.g. in alternation) -function optimizesinglesinternal!(problem::NLLSProblem{VT, CT}, options::NLLSOptions, data::NLLSInternal{LST}, iteratedata, ::Type{type}, starttime::UInt) where {VT, CT, LST<:UniVariateLS, type} - # Initialize stats - iternum = 0 - data.costcomputations = 2 # Count first and final cost computation here - subprob = NLLSProblem{VT, CT}(problem.variables, CostStruct{CT}()) - costindices = sparse(getvarcostmap(problem)') - data.timeinit = Base.time_ns() - starttime - # Compute initial cost - data.timecost = @elapsed_ns startcost = cost(problem) - # Optimize each variable of the given type, in sequence - indices = findall(v->isa(v, type), problem.variables) - for ind in indices - starttime_ = Base.time_ns() +function optimizesinglesinternal!(problem::NLLSProblem, options::NLLSOptions, data::NLLSInternal{LST}, iteratedata, allcosts::CostStruct, costindices, indices, first) where {LST<:UniVariateLS} + iternum = data.iternum + while first <= length(indices) + # Bail out if the variable size changes + ind = indices[first] + if nvars(problem.variables[ind]) != length(data.linsystem.b) + break + end + data.starttime = Base.time_ns() + data.linsystem.varindex = UInt(ind) # Construct the subset of residuals that depend on this variable - subproblem!(subprob, problem, @inbounds(view(costindices.rowval, costindices.colptr[ind]:costindices.colptr[ind+1]-1))) - # Update the linear system - data.linsystem = LST(data.linsystem, UInt(ind), nvars(problem.variables[ind]::type)) + selectcosts!(problem.costs, allcosts, @inbounds(view(costindices.rowval, costindices.colptr[ind]:costindices.colptr[ind+1]-1))) # Reset the iterator data reset!(iteratedata, problem, data) - stoptime = Base.time_ns() - data.timeinit += stoptime - starttime_ # Optimize the subproblem - optimizeinternal!(subprob, options, data, iteratedata, nullcallback, stoptime) - # Accumulate stats + optimizeinternal!(problem, options, data, iteratedata, nullcallback) + # Increment iternum += data.iternum + first += 1 end - # Compute final cost - data.timecost += @elapsed_ns data.bestcost = cost(problem) - # Correct some stats - data.startcost = startcost data.iternum = iternum - data.converged = 0 - data.timetotal = Base.time_ns() - starttime - return data + return first end - function updatefromnext!(problem::NLLSProblem, ::NLLSInternalMultiVar) problem.variables, problem.varnext = problem.varnext, problem.variables end diff --git a/src/problem.jl b/src/problem.jl index 6d700e8..58fdc77 100644 --- a/src/problem.jl +++ b/src/problem.jl @@ -67,13 +67,17 @@ function selectcosts!(outcosts::CostStruct, incosts::Vector{T}, costindices, las return lastcost, index end -function subproblem!(to::NLLSProblem{VT, CT}, from::NLLSProblem{VT, CT}, costindices::AbstractVector) where {VT, CT} - # Copy costs that have unfixed inputs +function selectcosts!(outcosts::CostStruct, incosts::CostStruct, costindices) lastcost = 0 index = 1 - for costs in values(from.costs) - lastcost, index = selectcosts!(to.costs, costs, costindices, lastcost, index) + for costs in values(incosts) + lastcost, index = selectcosts!(outcosts, costs, costindices, lastcost, index) end +end + +function subproblem!(to::NLLSProblem{VT, CT}, from::NLLSProblem{VT, CT}, costindices::AbstractVector) where {VT, CT} + # Copy costs that have unfixed inputs + selectcosts!(to.costs, from.costs, costindices) # Return the sub-problem return to end diff --git a/src/structs.jl b/src/structs.jl index 1928bf4..d9eb208 100644 --- a/src/structs.jl +++ b/src/structs.jl @@ -1,4 +1,4 @@ -using SparseArrays, Dates, Static +using SparseArrays, Static import IfElse.ifelse import Printf.@printf @@ -28,10 +28,11 @@ struct NLLSOptions{T} maxtime::UInt64 # Maximum optimization time allowed, in nano-seconds (converted from seconds in the constructor) iterator::NLLSIterator # Inner iterator (see above for options) callback::T # Callback called every outer iteration - (cost, problem, data) -> (newcost, terminate::Bool) where terminate == true ends the optimization + iteratordata # Iterator-specific data, to be passed to the iterator storecosts::Bool # Indicates whether the cost per outer iteration should be stored storetrajectory::Bool # Indicates whether the step per outer iteration should be stored end -function NLLSOptions(; maxiters=100, reldcost=1.e-15, absdcost=1.e-15, dstep=1.e-15, maxfails=3, maxtime=30.0, iterator=levenbergmarquardt, callback::T=nothing, storecosts=false, storetrajectory=false) where T +function NLLSOptions(; maxiters=100, reldcost=1.e-15, absdcost=1.e-15, dstep=1.e-15, maxfails=3, maxtime=30.0, iterator=levenbergmarquardt, callback::T=nothing, iteratordata=nothing, storecosts=false, storetrajectory=false) where T if iterator == gaussnewton Base.depwarn("gaussnewton is deprecated. Use newton instead", :NLLSOptions) end @@ -41,7 +42,7 @@ function NLLSOptions(; maxiters=100, reldcost=1.e-15, absdcost=1.e-15, dstep=1.e if !isnothing(callback) Base.depwarn("Setting callback in options is deprecated. Pass the callback directly to optimize!() instead", :NLLSOptions) end - NLLSOptions{T}(reldcost, absdcost, dstep, maxfails, maxiters, UInt64(round(maxtime * 1e9)), iterator, callback, storecosts, storetrajectory) + NLLSOptions{T}(reldcost, absdcost, dstep, maxfails, maxiters, UInt64(round(maxtime * 1e9)), iterator, callback, iteratordata, storecosts, storetrajectory) end struct NLLSResult @@ -95,6 +96,7 @@ mutable struct NLLSInternal{LSType} startcost::Float64 bestcost::Float64 # Times (nano-seconds) + starttime::UInt64 timetotal::UInt64 timeinit::UInt64 timecost::UInt64 @@ -112,11 +114,11 @@ mutable struct NLLSInternal{LSType} costs::Vector{Float64} trajectory::Vector{Vector{Float64}} - function NLLSInternal(linsystem::LSType) where LSType - return new{LSType}(0., 0., 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, linsystem, Vector{Float64}(), Vector{Vector{Float64}}()) + function NLLSInternal(linsystem::LSType, starttimens) where LSType + return new{LSType}(0., 0., starttimens, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, linsystem, Vector{Float64}(), Vector{Vector{Float64}}()) end end -NLLSInternal(unfixed::UInt, varlen) = NLLSInternal(ifelse(is_static(varlen), UniVariateLSstatic{dynamic(varlen), dynamic(varlen*varlen)}(unfixed), UniVariateLSdynamic(unfixed, dynamic(varlen)))) +NLLSInternal(unfixed::UInt, varlen, starttimens) = NLLSInternal(ifelse(is_static(varlen), UniVariateLSstatic{dynamic(varlen), dynamic(varlen*varlen)}(unfixed), UniVariateLSdynamic(unfixed, dynamic(varlen))), starttimens) getresult(data::NLLSInternal) = NLLSResult(data.startcost, data.bestcost, data.timetotal*1.e-9, data.timeinit*1.e-9, data.timecost*1.e-9, data.timegradient*1.e-9, data.timesolver*1.e-9, data.converged, data.iternum, data.costcomputations, data.gradientcomputations, data.linearsolvers, data.costs, data.trajectory) diff --git a/test/functional.jl b/test/functional.jl index 8d2c923..ac27b47 100644 --- a/test/functional.jl +++ b/test/functional.jl @@ -49,11 +49,13 @@ Base.eltype(::RosenbrockB) = Float64 # Check callback and max time termination result = NLLSsolver.optimize!(problem, NLLSsolver.NLLSOptions(maxtime=0.0), nothing, (cost, unusedargs...)->(cost, 13)) + @test NLLSsolver.cost(problem) == result.bestcost @test result.termination == (1 << 9) | (13 << 16) @test result.niterations == 1 # Optimize using Newton result = NLLSsolver.optimize!(problem, NLLSsolver.NLLSOptions(iterator=NLLSsolver.newton)) + @test NLLSsolver.cost(problem) == result.bestcost @test isapprox(problem.variables[1], 1.0; rtol=1.e-10) @test isapprox(problem.variables[2], 1.0; rtol=1.e-10) @@ -62,15 +64,23 @@ Base.eltype(::RosenbrockB) = Float64 problem.variables[2] = 2.5 ct = NLLSsolver.CostTrajectory() result = NLLSsolver.optimize!(problem, NLLSsolver.NLLSOptions(iterator=NLLSsolver.levenbergmarquardt), nothing, NLLSsolver.storecostscallback(ct)) + @test NLLSsolver.cost(problem) == result.bestcost @test isapprox(problem.variables[1], 1.0; rtol=1.e-10) @test isapprox(problem.variables[2], 1.0; rtol=1.e-10) + # Check callback results + len = length(ct.costs) + @test length(ct.times_ns) == len + @test length(ct.trajectory) == len @test all(diff(ct.costs) .<= 0.0) # Check costs decrease + @test all(diff(ct.times_ns) .>= 0.0) # Check costs increase + @test all(x -> length(x) == 2, ct.trajectory) # Check the trajectory lengths # Optimize using dogleg problem.variables[1] = -0.5 problem.variables[2] = 2.5 empty!(ct) result = NLLSsolver.optimize!(problem, NLLSsolver.NLLSOptions(iterator=NLLSsolver.dogleg), nothing, NLLSsolver.storecostscallback(ct.costs)) + @test NLLSsolver.cost(problem) == result.bestcost @test isapprox(problem.variables[1], 1.0; rtol=1.e-10) @test isapprox(problem.variables[2], 1.0; rtol=1.e-10) @test all(diff(ct.costs) .<= 0.0) # Check costs decrease @@ -81,6 +91,7 @@ Base.eltype(::RosenbrockB) = Float64 display(problem) result = NLLSsolver.optimize!(problem, NLLSsolver.NLLSOptions(iterator=NLLSsolver.gradientdescent), nothing, NLLSsolver.printoutcallback) display(result) + @test NLLSsolver.cost(problem) == result.bestcost @test isapprox(problem.variables[1], 1.0; rtol=1.e-5) @test isapprox(problem.variables[2], 1.0; rtol=1.e-5) end diff --git a/test/optimizeba.jl b/test/optimizeba.jl index 3b90245..257bc01 100644 --- a/test/optimizeba.jl +++ b/test/optimizeba.jl @@ -67,17 +67,19 @@ end @test cost(problem) ≈ costbefore # Optimze just the landmarks - result = NLLSsolver.optimizesingles!(problem, NLLSOptions(), Point3D{Float64}) - @test result.bestcost < 1.e-15 + NLLSsolver.optimizesingles!(problem, NLLSOptions(), Point3D{Float64}) + @test NLLSsolver.cost(problem) < 1.e-15 # Optimize problem problem = perturb_ba_problem(problem, 0.001, 0.001) result = optimize!(problem) + @test NLLSsolver.cost(problem) == result.bestcost @test result.bestcost < 1.e-15 # Generate & optimize a sparse problem problem = create_ba_problem(10, 50, 0.3) problem = perturb_ba_problem(problem, 0.001, 0.001) result = optimize!(problem) + @test NLLSsolver.cost(problem) == result.bestcost @test result.bestcost < 1.e-15 end