From d2680857745a2f90f751e3f90ed99cdbc2de4b74 Mon Sep 17 00:00:00 2001 From: "Oliver J. Woodford" Date: Thu, 21 Sep 2023 19:21:41 +0100 Subject: [PATCH] Ver 3.2.1 * Performance improvements * Fewer memory allocations * Greater test coverage --- Project.toml | 2 +- src/NLLSsolver.jl | 2 +- src/VectorRepo.jl | 12 ++- src/iterators.jl | 29 +++++-- src/linearsolver.jl | 9 ++- src/linearsystem.jl | 22 ++++- src/optimize.jl | 142 ++++++++++++++++----------------- src/structs.jl | 26 ++++-- src/visualgeometry/geometry.jl | 11 ++- test/optimizeba.jl | 76 ++++++++++++++++++ test/runtests.jl | 1 + 11 files changed, 234 insertions(+), 98 deletions(-) create mode 100644 test/optimizeba.jl diff --git a/Project.toml b/Project.toml index 421fab33..f8264f3c 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "NLLSsolver" uuid = "50b5cf9e-bf7a-4e29-8981-4280cbba70cb" authors = ["Oliver Woodford"] -version = "3.2.0" +version = "3.2.1" [deps] Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" diff --git a/src/NLLSsolver.jl b/src/NLLSsolver.jl index 80ef63e9..1fa8e035 100644 --- a/src/NLLSsolver.jl +++ b/src/NLLSsolver.jl @@ -6,7 +6,7 @@ export NLLSProblem, NLLSOptions, NLLSResult, NLLSIterator # Concrete problem & s export AbstractRobustifier, NoRobust, Scaled, HuberKernel, Huber2oKernel, GemanMcclureKernel # Robustifiers export ContaminatedGaussian # Adaptive robustifiers export EuclideanVector, ZeroToInfScalar, ZeroToOneScalar # Concrete general variables -export Rotation3DR, Rotation3DL, Point3D, Pose3D, EffPose3D, UnitPose3D # Concrete 3D geometry variable types +export Rotation3DR, Rotation3DL, Point3D, Pose3D, EffPose3D, UnitVec3D, UnitPose3D # Concrete 3D geometry variable types export SimpleCamera, NoDistortionCamera, ExtendedUnifiedCamera, BarrelDistortion, EULensDistortion # Concrete camera sensor & lens variable types export CostTrajectory # Object for storing opimization trajectory and costs # Functions diff --git a/src/VectorRepo.jl b/src/VectorRepo.jl index c7644aa3..188e19aa 100644 --- a/src/VectorRepo.jl +++ b/src/VectorRepo.jl @@ -8,14 +8,20 @@ struct VectorRepo{T} end VectorRepo() = VectorRepo{Any}() -@inline function Base.get(vr::VectorRepo{T}, type::DataType)::Vector{type} where T +function Base.get(vr::VectorRepo{T}, ::Type{type})::Vector{type} where {T, type} @assert type<:T "Invalid type" return haskey(vr.data, type) ? vr.data[type]::Vector{type} : Vector{type}() end -@inline function Base.get!(vr::VectorRepo{T}, type::DataType)::Vector{type} where T +function Base.get!(vr::VectorRepo{T}, ::Type{type})::Vector{type} where {T, type} @assert type<:T "Invalid type" - return get!(vr.data, type, Vector{type}())::Vector{type} + if haskey(vr.data, type) + vec = vr.data[type]::Vector{type} + else + vec = Vector{type}() + vr.data[type] = vec + end + return vec end @inline Base.push!(vr::VectorRepo, v::T) where T = push!(get!(vr, T), v) diff --git a/src/iterators.jl b/src/iterators.jl index 414e9d68..03c92714 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -7,6 +7,8 @@ negate!(x) = @.(x = -x) # Newton optimization (undamped-Hessian form) struct NewtonData end +NewtonData(::NLLSProblem, ::NLLSInternal) = NewtonData() +reset!(nd::NewtonData, ::NLLSProblem, ::NLLSInternal) = nd function iterate!(::NewtonData, data, problem::NLLSProblem, options::NLLSOptions)::Float64 hessian, gradient = gethessgrad(data.linsystem) @@ -22,14 +24,23 @@ function iterate!(::NewtonData, data, problem::NLLSProblem, options::NLLSOptions end # Dogleg optimization -mutable struct DoglegData +mutable struct DoglegData{T} trustradius::Float64 - cauchy::Vector{Float64} + cauchy::T - function DoglegData(varlen) - return new(0.0, Vector{Float64}(undef, varlen)) + function DoglegData(::NLLSProblem, data::NLLSInternal) + return new{typeof(data.linsystem.x)}(0.0, similar(data.linsystem.x)) end end +function reset!(dd::DoglegData{T}, ::NLLSProblem, data::NLLSInternal) where T<:Vector + dd.trustradius = 0.0 + resize!(dd.cauchy, length(data.linsystem.x)) + return +end +function reset!(dd::DoglegData{T}, ::NLLSProblem, data::NLLSInternal) where T<:StaticVector + dd.trustradius = 0.0 + return +end function iterate!(doglegdata::DoglegData, data, problem::NLLSProblem, options::NLLSOptions)::Float64 hessian, gradient = gethessgrad(data.linsystem) @@ -37,7 +48,7 @@ function iterate!(doglegdata::DoglegData, data, problem::NLLSProblem, options::N # Compute the Cauchy step gnorm2 = gradient' * gradient a = gnorm2 / (fast_bAb(hessian, gradient) + floatmin(eltype(gradient))) - doglegdata.cauchy = -a * gradient + doglegdata.cauchy .= -a * gradient alpha2 = a * a * gnorm2 alpha = sqrt(alpha2) if doglegdata.trustradius == 0 @@ -107,10 +118,11 @@ printoutcallback(cost, problem, data, iteratedata::DoglegData) = printoutcallbac mutable struct LevMarData lambda::Float64 - function LevMarData() + function LevMarData(::NLLSProblem, ::NLLSInternal) return new(1.0) end end +reset!(lmd::LevMarData, ::NLLSProblem, ::NLLSInternal) = lmd.lambda = 1.0 function iterate!(levmardata::LevMarData, data, problem::NLLSProblem, options::NLLSOptions)::Float64 @assert levmardata.lambda >= 0. @@ -149,7 +161,12 @@ printoutcallback(cost, problem, data, iteratedata::LevMarData) = printoutcallbac # Gradient descent optimization mutable struct GradientDescentData stepsize::Float64 + + function GradientDescentData(::NLLSProblem, ::NLLSInternal) + return new(1.0) + end end +reset!(gdd::GradientDescentData, ::NLLSProblem, ::NLLSInternal) = gdd.stepsize = 1.0 function iterate!(gddata::GradientDescentData, data, problem::NLLSProblem, options::NLLSOptions)::Float64 gradient = getgrad(data.linsystem) diff --git a/src/linearsolver.jl b/src/linearsolver.jl index d6892151..17198daf 100644 --- a/src/linearsolver.jl +++ b/src/linearsolver.jl @@ -5,13 +5,16 @@ import LinearAlgebra.ldiv! ldiv!(x, A, b) = x .= ldiv(A, b) function try_cholesky!(x::StaticVector, A::StaticMatrix, b::StaticVector) + # Convert to immutable type, to avoid allocations + A_ = SMatrix(A) + b_ = SVector(b) # Handle this issue with StaticArrays cholesky: https://github.com/JuliaArrays/StaticArrays.jl/issues/1194 - cholfac = StaticArrays._cholesky(Size(A), A, false) + cholfac = StaticArrays._cholesky(Size(A_), A_, false) if issuccess(cholfac) - return x .= cholfac \ b + return x .= cholfac \ b_ end # Handle this issue with StaticArrays qr: https://github.com/JuliaArrays/StaticArrays.jl/issues/1192 - return x .= A \ b + return x .= A_ \ b_ end function try_cholesky!(x, A, b) diff --git a/src/linearsystem.jl b/src/linearsystem.jl index b85897e6..7c8a0bd3 100644 --- a/src/linearsystem.jl +++ b/src/linearsystem.jl @@ -18,6 +18,19 @@ struct UniVariateLSdynamic function UniVariateLSdynamic(unfixed, varlen) return new(zeros(Float64, varlen, varlen), zeros(Float64, varlen), Vector{Float64}(undef, varlen), UInt(unfixed)) end + + function UniVariateLSdynamic(prev::UniVariateLSdynamic, unfixed, varlen) + A = prev.A + if varlen == length(prev.b) + zero!(prev) + else + A = zeros(Float64, varlen, varlen) + resize!(prev.b, varlen) + fill!(prev.b, 0) + resize!(prev.x, varlen) + end + return new(A, prev.b, prev.x, UInt(unfixed)) + end end struct UniVariateLSstatic{N, N2} @@ -29,6 +42,11 @@ struct UniVariateLSstatic{N, N2} function UniVariateLSstatic{N, N2}(unfixed) where {N, N2} return new(zeros(MMatrix{N, N, Float64, N2}), zeros(MVector{N, Float64}), MVector{N, Float64}(undef), UInt(unfixed)) end + + function UniVariateLSstatic{N, N2}(prev::UniVariateLSstatic{N, N2}, unfixed, ::Any) where {N, N2} + zero!(prev) + return new(prev.A, prev.b, prev.x, UInt(unfixed)) + end end UniVariateLS = Union{UniVariateLSdynamic, UniVariateLSstatic{N, N2}} where {N, N2} @@ -177,8 +195,8 @@ getgrad(linsystem) = linsystem.b gethessgrad(linsystem::UniVariateLS) = (linsystem.A, linsystem.b) function gethessgrad(linsystem::MultiVariateLSsparse) # Fill sparse hessian - for i in eachindex(linsystem.sparseindices) - @inbounds linsystem.hessian.nzval[i] = linsystem.A.data[linsystem.sparseindices[i]] + @inbounds for (i, si) in enumerate(linsystem.sparseindices) + linsystem.hessian.nzval[i] = linsystem.A.data[si] end return linsystem.hessian, linsystem.b end diff --git a/src/optimize.jl b/src/optimize.jl index 49102389..9fc9fd81 100644 --- a/src/optimize.jl +++ b/src/optimize.jl @@ -1,6 +1,6 @@ # Uni-variate optimization (single unfixed variable) -optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::Integer, callback=nullcallback, starttimens=Base.time_ns())::NLLSResult = optimizeinternal_checkcallback!(problem, options, NLLSInternal(UInt(unfixed), nvars(problem.variables[unfixed])), callback, starttimens) +optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::Integer, callback=nullcallback, starttimens=Base.time_ns())::NLLSResult = getresult(setupiterator(optimizeinternal!, problem, options, NLLSInternal(UInt(unfixed), nvars(problem.variables[unfixed])), checkcallback(options, callback), starttimens)) # Multi-variate optimization function optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::AbstractVector, callback)::NLLSResult @@ -13,8 +13,8 @@ function optimize!(problem::NLLSProblem, options::NLLSOptions, unfixed::Abstract unfixed = findfirst(unfixed) return optimize!(problem, options, unfixed, callback, starttime) end - # Multiple variables. Use a block sparse matrix - return optimizeinternal_checkcallback!(problem, options, NLLSInternal(makesymmvls(problem, unfixed, nblocks)), callback, starttime) + # Multiple variables + return getresult(setupiterator(optimizeinternal!, problem, options, NLLSInternal(makesymmvls(problem, unfixed, nblocks)), checkcallback(options, callback), starttime)) end # Conversions for different types of "unfixed" @@ -26,98 +26,52 @@ convertunfixed(unfixed, problem) = unfixed optimize!(problem::NLLSProblem, options::NLLSOptions=NLLSOptions(), unfixed=nothing, callback=nullcallback) = optimize!(problem, options, convertunfixed(unfixed, problem), callback) # Optimize one variable at a time -function optimizesingles!(problem::NLLSProblem{VT, CT}, options::NLLSOptions, type::DataType)::NLLSResult where {VT, CT} - starttime = Base.time_ns() - # Compute initial cost - timecost = @elapsed startcost = cost(problem) - # Initialize stats - subprobinit = @elapsed_ns begin - timeinit = 0.0 - timegradient = 0.0 - timesolver = 0.0 - iternum = 0 - costcomputations = 2 # Count final cost computation here - gradientcomputations = 0 - linearsolvers = 0 - subprob = NLLSProblem{VT, CT}(problem.variables, CostStruct{CT}()) - costindices = sparse(getvarcostmap(problem)') - end - # Optimize each variable of the given type, in sequence - for (ind, var) in enumerate(problem.variables) - # Skip variables of a different type - if !isa(var, type) - continue - end - # Construct the subset of residuals that depend on this variable - subprobinit += @elapsed_ns subproblem!(subprob, problem, @inbounds(view(costindices.rowval, costindices.colptr[ind]:costindices.colptr[ind+1]-1))) - # Optimize the subproblem - result = optimize!(subprob, options, ind) - # Accumulate stats - timeinit += result.timeinit - timecost += result.timecost - timegradient += result.timegradient - timesolver += result.timesolver - iternum += result.niterations - costcomputations += result.costcomputations - gradientcomputations += result.gradientcomputations - linearsolvers += result.linearsolvers - end - # Compute final cost - timecost += @elapsed endcost = cost(problem) - return NLLSResult(startcost, endcost, (Base.time_ns() - starttime)*1.e-9, timeinit + subprobinit*1.e-9, timecost, timegradient, timesolver, 0, iternum, costcomputations, gradientcomputations, linearsolvers, Vector{Float64}(), Vector{Vector{Float64}}()) -end +optimizesingles!(problem::NLLSProblem, options::NLLSOptions, type::DataType, starttime=Base.time_ns())::NLLSResult = getresult(setupiterator(optimizesinglesinternal!, problem, options, NLLSInternal(UInt(1), nvars(type())), type, starttime)) -function optimizeinternal_checkcallback!(problem::NLLSProblem, options::NLLSOptions, data::NLLSInternal, callback, starttimens::UInt64)::NLLSResult - if isnothing(options.callback) - return optimizeinternal!(problem, options, data, callback, starttimens) - else - Base.depwarn("Setting callback in options is deprecated. Pass the callback directly to optimize!() instead", :NLLSOptions) - return optimizeinternal!(problem, options, data, options.callback, starttimens) - end -end +checkcallback(::NLLSOptions{Nothing}, callback) = callback +checkcallback(options::NLLSOptions, ::Any) = options.callback -function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data::NLLSInternal, callback, starttimens::UInt64)::NLLSResult - # Copy the variables - if length(problem.variables) != length(problem.varnext) - problem.varnext = copy(problem.variables) - end +function setupiterator(func, problem::NLLSProblem, options::NLLSOptions, data::NLLSInternal, trailingargs...) # Call the optimizer with the required iterator struct if options.iterator == newton || options.iterator == gaussnewton # Newton's method, using Gauss' approximation to the Hessian (optimizing Hessian form) - newtondata = NewtonData() - return optimizeinternal!(problem, options, data, newtondata, callback, starttimens) + newtondata = NewtonData(problem, data) + return func(problem, options, data, newtondata, trailingargs...) end if options.iterator == levenbergmarquardt # Levenberg-Marquardt - levmardata = LevMarData() - return optimizeinternal!(problem, options, data, levmardata, callback, starttimens) + levmardata = LevMarData(problem, data) + return func(problem, options, data, levmardata, trailingargs...) end if options.iterator == dogleg # Dogleg - doglegdata = DoglegData(length(data.linsystem.x)) - return optimizeinternal!(problem, options, data, doglegdata, callback, starttimens) + doglegdata = DoglegData(problem, data) + return func(problem, options, data, doglegdata, trailingargs...) end if options.iterator == gradientdescent # Gradient descent - gddata = GradientDescentData(1.0) - return optimizeinternal!(problem, options, data, gddata, callback, starttimens) + gddata = GradientDescentData(problem, data) + return func(problem, options, data, gddata, trailingargs...) end error("Iterator not recognized") end -function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, iteratedata, callback, starttime::UInt64)::NLLSResult - timeinit = Base.time_ns() - starttime +# The meat of an optimization +function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, iteratedata, callback, starttime::UInt64) + # Copy the variables + if length(problem.variables) != length(problem.varnext) + problem.varnext = copy(problem.variables) + end + data.timeinit += Base.time_ns() - starttime # Initialize the linear problem data.timegradient += @elapsed_ns data.bestcost = costgradhess!(data.linsystem, problem.variables, problem.costs) data.gradientcomputations += 1 - # Initialize the results - startcost = data.bestcost - costs = Vector{Float64}() - trajectory = Vector{Vector{Float64}}() + data.startcost = data.bestcost # Do the iterations fails = 0 cost = data.bestcost converged = 0 + data.iternum = 0 while true data.iternum += 1 # Call the per iteration solver @@ -126,7 +80,7 @@ function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, ite cost, terminate = callback(cost, problem, data, iteratedata)::Tuple{Float64, Int} # Store the cost if necessary if options.storecosts - push!(costs, cost) + push!(data.costs, cost) end # Check for cost increase (only some iterators will do this) dcost = data.bestcost - cost @@ -149,7 +103,7 @@ function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, ite updatefromnext!(problem, data) if options.storetrajectory # Store the variable trajectory (as update vectors) - push!(trajectory, copy(data.linsystem.x)) + push!(data.trajectory, copy(data.linsystem.x)) end # Check for termination maxstep = maximum(abs, data.linsystem.x) @@ -164,6 +118,7 @@ function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, ite converged |= (fails > options.maxfails) << 7 # Max number of consecutive failed iterations reach converged |= (data.iternum >= options.maxiters) << 8 # Max number of iterations reached converged |= terminate << 9 # Terminated by the user-defined callback + data.converged = converged if converged != 0 break end @@ -178,10 +133,49 @@ function optimizeinternal!(problem::NLLSProblem, options::NLLSOptions, data, ite # Update the problem variables to the best ones found updatefrombest!(problem, data) end - # Return the result - return NLLSResult(startcost, data.bestcost, (Base.time_ns()-starttime)*1.e-9, timeinit*1.e-9, data.timecost*1.e-9, data.timegradient*1.e-9, data.timesolver*1.e-9, converged, data.iternum, data.costcomputations, data.gradientcomputations, data.linearsolvers, costs, trajectory) + # Return the data to produce the final result + data.timetotal += Base.time_ns() - starttime + return data +end + +# Optimizing variables one at a time (e.g. in alternation) +function optimizesinglesinternal!(problem::NLLSProblem{VT, CT}, options::NLLSOptions, data::NLLSInternal{LST}, iteratedata, ::Type{type}, starttime::UInt) where {VT, CT, LST<:UniVariateLS, type} + # Initialize stats + iternum = 0 + data.costcomputations = 2 # Count first and final cost computation here + subprob = NLLSProblem{VT, CT}(problem.variables, CostStruct{CT}()) + costindices = sparse(getvarcostmap(problem)') + data.timeinit = Base.time_ns() - starttime + # Compute initial cost + data.timecost = @elapsed_ns startcost = cost(problem) + # Optimize each variable of the given type, in sequence + indices = findall(v->isa(v, type), problem.variables) + for ind in indices + starttime_ = Base.time_ns() + # Construct the subset of residuals that depend on this variable + subproblem!(subprob, problem, @inbounds(view(costindices.rowval, costindices.colptr[ind]:costindices.colptr[ind+1]-1))) + # Update the linear system + data.linsystem = LST(data.linsystem, UInt(ind), nvars(problem.variables[ind]::type)) + # Reset the iterator data + reset!(iteratedata, problem, data) + stoptime = Base.time_ns() + data.timeinit += stoptime - starttime_ + # Optimize the subproblem + optimizeinternal!(subprob, options, data, iteratedata, nullcallback, stoptime) + # Accumulate stats + iternum += data.iternum + end + # Compute final cost + data.timecost += @elapsed_ns data.bestcost = cost(problem) + # Correct some stats + data.startcost = startcost + data.iternum = iternum + data.converged = 0 + data.timetotal = Base.time_ns() - starttime + return data end + function updatefromnext!(problem::NLLSProblem, ::NLLSInternalMultiVar) problem.variables, problem.varnext = problem.varnext, problem.variables end diff --git a/src/structs.jl b/src/structs.jl index 9fb51152..988a4fe9 100644 --- a/src/structs.jl +++ b/src/structs.jl @@ -19,25 +19,28 @@ function Base.String(iterator::NLLSIterator) return "Unknown iterator" end -struct NLLSOptions +struct NLLSOptions{T} reldcost::Float64 # Minimum relative reduction in cost required to avoid termination absdcost::Float64 # Minimum absolute reduction in cost required to avoid termination dstep::Float64 # Minimum L-infinity norm of the update vector required to avoid termination maxfails::Int # Maximum number of consecutive iterations that have a higher cost than the current best before termination maxiters::Int # Maximum number of outer iterations iterator::NLLSIterator # Inner iterator (see above for options) - callback # Callback called every outer iteration - (cost, problem, data) -> (newcost, terminate::Bool) where terminate == true ends the optimization + callback::T # Callback called every outer iteration - (cost, problem, data) -> (newcost, terminate::Bool) where terminate == true ends the optimization storecosts::Bool # Indicates whether the cost per outer iteration should be stored storetrajectory::Bool # Indicates whether the step per outer iteration should be stored end -function NLLSOptions(; maxiters=100, reldcost=1.e-15, absdcost=1.e-15, dstep=1.e-15, maxfails=3, iterator=levenbergmarquardt, callback=nothing, storecosts=false, storetrajectory=false) +function NLLSOptions(; maxiters=100, reldcost=1.e-15, absdcost=1.e-15, dstep=1.e-15, maxfails=3, iterator=levenbergmarquardt, callback::T=nothing, storecosts=false, storetrajectory=false) where T if iterator == gaussnewton Base.depwarn("gaussnewton is deprecated. Use newton instead", :NLLSOptions) end if storecosts || storetrajectory Base.depwarn("storecosts and storetrajectory are deprecated. Use storecostscallback instead", :NLLSOptions) end - NLLSOptions(reldcost, absdcost, dstep, maxfails, maxiters, iterator, callback, storecosts, storetrajectory) + if !isnothing(callback) + Base.depwarn("Setting callback in options is deprecated. Pass the callback directly to optimize!() instead", :NLLSOptions) + end + NLLSOptions{T}(reldcost, absdcost, dstep, maxfails, maxiters, iterator, callback, storecosts, storetrajectory) end struct NLLSResult @@ -86,21 +89,34 @@ function Base.show(io::IO, x::NLLSResult) end mutable struct NLLSInternal{LSType} + # Costs + startcost::Float64 bestcost::Float64 + # Times (nano-seconds) + timetotal::UInt64 + timeinit::UInt64 timecost::UInt64 timegradient::UInt64 timesolver::UInt64 + # Counts iternum::Int costcomputations::Int gradientcomputations::Int linearsolvers::Int + converged::Int + # Linear system linsystem::LSType + # Intermediate cost / trajectory storage (deprecated) + costs::Vector{Float64} + trajectory::Vector{Vector{Float64}} function NLLSInternal(linsystem::LSType) where LSType - return new{LSType}(0., 0, 0, 0, 0, 0, 0, 0, linsystem) + return new{LSType}(0., 0., 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, linsystem, Vector{Float64}(), Vector{Vector{Float64}}()) end end NLLSInternal(unfixed::UInt, varlen) = NLLSInternal(ifelse(is_static(varlen), UniVariateLSstatic{dynamic(varlen), dynamic(varlen*varlen)}(unfixed), UniVariateLSdynamic(unfixed, dynamic(varlen)))) +getresult(data::NLLSInternal) = NLLSResult(data.startcost, data.bestcost, data.timetotal*1.e-9, data.timeinit*1.e-9, data.timecost*1.e-9, data.timegradient*1.e-9, data.timesolver*1.e-9, data.converged, data.iternum, data.costcomputations, data.gradientcomputations, data.linearsolvers, data.costs, data.trajectory) + NLLSInternalMultiVar = Union{NLLSInternal{MultiVariateLSdense}, NLLSInternal{MultiVariateLSsparse}} NLLSInternalSingleVar = Union{NLLSInternal{UniVariateLSstatic{N, N2}}, NLLSInternal{UniVariateLSdynamic}} where {N, N2} diff --git a/src/visualgeometry/geometry.jl b/src/visualgeometry/geometry.jl index 65d9d48d..8c84de6d 100644 --- a/src/visualgeometry/geometry.jl +++ b/src/visualgeometry/geometry.jl @@ -43,8 +43,9 @@ abstract type AbstractPoint3D end struct Point3D{T<:Real} <: AbstractPoint3D v::SVector{3, T} end -Point3D(x::T, y::T, z::T) where T = Point3D(SVector{3, T}(x, y, z)) -Point3D() = Point3D(SVector{3, Float64}(0., 0., 0.)) +Point3D(x::T, y::T, z::T) where T = Point3D{T}(SVector{3, T}(x, y, z)) +Point3D() = Point3D{Float64}() +Point3D{T}() where T = Point3D{T}(SVector{3, T}(0, 0, 0)) nvars(::Point3D) = static(3) update(var::Point3D, updatevec, start=1) = Point3D(var.v + view(updatevec, SR(0, 2) .+ start)) @inline getvec(x::Point3D) = x.v @@ -111,13 +112,17 @@ struct EffPose3D{T<:Real} <: AbstractPose3D camcenter::Point3D{T} end EffPose3D(rx, ry, rz, cx, cy, cz) = EffPose3D(Rotation3DL(rx, ry, rz), Point3D(cx, cy, cz)) -EffPose3D(pose::Pose3D) = EffPose3D(Rotation3DL(pose.rot.m), NLLSsolver.Point3D(pose.rot.m' * -pose.trans.v)) EffPose3D() = EffPose3D(Rotation3DL(), Point3D()) nvars(::EffPose3D) = static(6) update(var::EffPose3D, updatevec, start=1) = EffPose3D(update(var.rot, updatevec, start), update(var.camcenter, updatevec, start+3)) inverse(var::EffPose3D) = Pose3D(Rotation3DR(var.rot.m'), var.camcenter) transform(pose::EffPose3D, point::AbstractPoint3D) = Point3D(pose.rot.m * (getvec(point) - pose.camcenter.v)) +# Pose conversions +Pose3D(pose::EffPose3D) = Pose3D(Rotation3DR(pose.rot.m), Point3D(pose.rot.m * -pose.camcenter.v)) +Pose3D(pose::Pose3D) = pose +EffPose3D(pose::Pose3D) = EffPose3D(Rotation3DL(pose.rot.m), Point3D(pose.rot.m' * -pose.trans.v)) +EffPose3D(pose::EffPose3D) = pose # Transformations on abstract types project(x::AbstractPoint3D) = SVector(getind(x, 1), getind(x, 2)) ./ getind(x, 3) diff --git a/test/optimizeba.jl b/test/optimizeba.jl new file mode 100644 index 00000000..07f2d594 --- /dev/null +++ b/test/optimizeba.jl @@ -0,0 +1,76 @@ +using NLLSsolver, Test, Static, StaticArrays, SparseArrays, Random, LinearAlgebra + +# Simple reprojection residual +struct ProjError <: AbstractResidual + measurement::SVector{2, Float64} # Landmark image coordinates (x, y) + varind::SVector{2, Int} # Variable indices for the residual (image, landmark) +end +ProjError(meas, cam, lndm) = ProjError(SVector{2}(meas[1], meas[2]), SVector{2, Int}(cam, lndm)) +NLLSsolver.ndeps(::ProjError) = static(2) # Residual depends on 2 variables +NLLSsolver.nres(::ProjError) = static(2) # Residual vector has length 2 +NLLSsolver.varindices(res::ProjError) = res.varind +NLLSsolver.getvars(res::ProjError, vars::Vector) = vars[res.varind[1]]::EffPose3D{Float64}, vars[res.varind[2]]::Point3D{Float64} +NLLSsolver.computeresidual(res::ProjError, pose::EffPose3D, X::Point3D) = project(pose * X) - res.measurement +Base.eltype(::ProjError) = Float64 + +function create_ba_problem(ncameras, nlandmarks, propvisible) + problem = NLLSProblem(Union{EffPose3D{Float64}, Point3D{Float64}}, ProjError) + + # Generate the landmarks in a unit cube centered on the origin + for i = 1:nlandmarks + addvariable!(problem, Point3D{Float64}(rand(SVector{3, Float64}) .- 0.5)) + end + + # Generate the cameras on a unit sphere, pointing to the origin + for i = 1:ncameras + camcenter = normalize(randn(SVector{3, Float64})) + addvariable!(problem, EffPose3D(Rotation3DL(UnitVec3D(-camcenter).v.m), Point3D(camcenter))) + end + + # Generate the measurements + visibility = sprand(ncameras, nlandmarks, propvisible) + for landmark = 1:nlandmarks + point = problem.variables[landmark]::Point3D{Float64} + for camera = view(visibility.rowval, visibility.colptr[landmark]:visibility.colptr[landmark+1]-1) + @assert camera <= ncameras + cam = camera + nlandmarks + addcost!(problem, ProjError(project(problem.variables[cam]::EffPose3D{Float64} * point), cam, landmark)) + end + end + + # Return the NLLSProblem + return problem +end + +function perturb_ba_problem(problem, pointnoise, posenoise) + for ind in 1:lastindex(problem.variables) + if isa(problem.variables[ind], Point3D) + problem.variables[ind]::Point3D{Float64} = update(problem.variables[ind]::Point3D{Float64}, randn(SVector{3, Float64}) * pointnoise) + else + problem.variables[ind]::EffPose3D{Float64} = update(problem.variables[ind]::EffPose3D{Float64}, randn(SVector{6, Float64}) * posenoise) + end + end + return problem +end + +@testset "optimizeba.jl" begin + # Generate some test data for a dense problem + Random.seed!(1) + problem = create_ba_problem(10, 100, 0.3) + + # Optimze just the landmarks + problem = perturb_ba_problem(problem, 0.003, 0.0) + result = NLLSsolver.optimizesingles!(problem, NLLSOptions(), Point3D{Float64}) + @test result.bestcost < 1.e-15 + + # Optimize problem + problem = perturb_ba_problem(problem, 0.001, 0.001) + result = optimize!(problem) + @test result.bestcost < 1.e-15 + + # Generate & optimize a sparse problem + problem = create_ba_problem(10, 400, 0.3) + problem = perturb_ba_problem(problem, 1.e-7, 1.e-7) + result = optimize!(problem) + @test result.bestcost < result.startcost * 1e-10 +end diff --git a/test/runtests.jl b/test/runtests.jl index a5e796c6..b9787617 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -8,6 +8,7 @@ using Test include("robust.jl") include("linearsolve.jl") include("functional.jl") + include("optimizeba.jl") include("dynamicvars.jl") include("nonsquaredcost.jl") include("adaptivecost.jl")