diff --git a/.travis.yml b/.travis.yml index 8f85fda..a69481c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: julia julia: - - 0.4 - 0.5 + - 0.6 - nightly notifications: email: false diff --git a/REQUIRE b/REQUIRE index 4fdfc69..78134e4 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,5 +1,5 @@ -julia 0.4 +julia 0.5 Calculus Distributions -Optim 0.7.5 Compat 0.9.1 +OptimBase diff --git a/src/LsqFit.jl b/src/LsqFit.jl index 66dfb51..a7bd47e 100644 --- a/src/LsqFit.jl +++ b/src/LsqFit.jl @@ -4,10 +4,12 @@ module LsqFit estimate_errors, estimate_covar - using Optim using Calculus using Distributions using Compat + using OptimBase + + import Base.summary include("levenberg_marquardt.jl") include("curve_fit.jl") diff --git a/src/curve_fit.jl b/src/curve_fit.jl index 31740c3..0227a8f 100755 --- a/src/curve_fit.jl +++ b/src/curve_fit.jl @@ -11,10 +11,10 @@ end # provide a method for those who have their own Jacobian function function lmfit(f::Function, g::Function, p0, wt; kwargs...) results = levenberg_marquardt(f, g, p0; kwargs...) - p = Optim.minimizer(results) + p = minimizer(results) resid = f(p) dof = length(resid) - length(p) - return LsqFitResult(dof, p, f(p), g(p), Optim.converged(results), wt) + return LsqFitResult(dof, p, f(p), g(p), converged(results), wt) end function lmfit(f::Function, p0, wt; kwargs...) diff --git a/src/levenberg_marquardt.jl b/src/levenberg_marquardt.jl index beddf6e..7e4420c 100644 --- a/src/levenberg_marquardt.jl +++ b/src/levenberg_marquardt.jl @@ -1,5 +1,5 @@ -immutable LevenbergMarquardt <: Optim.Optimizer end - +immutable LevenbergMarquardt <: Optimizer end +Base.summary(::LevenbergMarquardt) = "Levenberg-Marquardt" """ `levenberg_marquardt(f, g, initial_x; ` @@ -69,10 +69,10 @@ function levenberg_marquardt{T}(f::Function, g::Function, initial_x::AbstractVec n_buffer = Vector{T}(n) # Maintain a trace of the system. - tr = Optim.OptimizationTrace{LevenbergMarquardt}() + tr = OptimizationTrace{LevenbergMarquardt}() if show_trace d = Dict("lambda" => lambda) - os = Optim.OptimizationState{LevenbergMarquardt}(iterCt, sum(abs2, fcur), NaN, d) + os = OptimizationState{LevenbergMarquardt}(iterCt, sum(abs2, fcur), NaN, d) push!(tr, os) println(os) end @@ -145,7 +145,7 @@ function levenberg_marquardt{T}(f::Function, g::Function, initial_x::AbstractVec if show_trace g_norm = norm(J' * fcur, Inf) d = Dict("g(x)" => g_norm, "dx" => delta_x, "lambda" => lambda) - os = Optim.OptimizationState{LevenbergMarquardt}(iterCt, sum(abs2, fcur), g_norm, d) + os = OptimizationState{LevenbergMarquardt}(iterCt, sum(abs2, fcur), g_norm, d) push!(tr, os) println(os) end @@ -161,8 +161,8 @@ function levenberg_marquardt{T}(f::Function, g::Function, initial_x::AbstractVec converged = g_converged | x_converged end - Optim.MultivariateOptimizationResults( - "Levenberg-Marquardt", # method + MultivariateOptimizationResults( + LevenbergMarquardt(), # method initial_x, # initial_x x, # minimizer sum(abs2, fcur), # minimum @@ -170,10 +170,13 @@ function levenberg_marquardt{T}(f::Function, g::Function, initial_x::AbstractVec !converged, # iteration_converged x_converged, # x_converged 0.0, # x_tol + 0.0, false, # f_converged 0.0, # f_tol + 0.0, g_converged, # g_converged tolG, # g_tol + 0.0, false, # f_increased tr, # trace f_calls, # f_calls diff --git a/test/levenberg_marquardt.jl b/test/levenberg_marquardt.jl index 7ce777f..aa554e6 100644 --- a/test/levenberg_marquardt.jl +++ b/test/levenberg_marquardt.jl @@ -9,7 +9,7 @@ let initial_x = [100.0, 100.0] results = LsqFit.levenberg_marquardt(f_lm, g_lm, initial_x) - @assert norm(Optim.minimizer(results) - [0.0, 2.0]) < 0.01 + @assert norm(OptimBase.minimizer(results) - [0.0, 2.0]) < 0.01 function rosenbrock_res(x, r) @@ -36,12 +36,12 @@ let results = LsqFit.levenberg_marquardt(frb, grb, initial_xrb) - @assert norm(Optim.minimizer(results) - [1.0, 1.0]) < 0.01 + @assert norm(OptimBase.minimizer(results) - [1.0, 1.0]) < 0.01 # check estimate is within the bound PR #278 result = LsqFit.levenberg_marquardt(frb, grb, [150.0, 150.0]; lower = [10.0, 10.0], upper = [200.0, 200.0]) - @test Optim.minimizer(result)[1] >= 10.0 - @test Optim.minimizer(result)[2] >= 10.0 + @test OptimBase.minimizer(result)[1] >= 10.0 + @test OptimBase.minimizer(result)[2] >= 10.0 @@ -60,7 +60,7 @@ let g_lsq = Calculus.jacobian(f_lsq) results = LsqFit.levenberg_marquardt(f_lsq, g_lsq, [0.5, 0.5]) - @assert norm(Optim.minimizer(results) - [1.0, 2.0]) < 0.05 + @assert norm(OptimBase.minimizer(results) - [1.0, 2.0]) < 0.05 end let @@ -84,15 +84,15 @@ let lower=[5.0, 11.0, 5.0] results = LsqFit.levenberg_marquardt(f_lsq, g_lsq, [15.0, 15.0, 15.0], lower=lower) - Optim.minimizer(results) - @test Optim.converged(results) - @test all(Optim.minimizer(results) .>= lower) + OptimBase.minimizer(results) + @test OptimBase.converged(results) + @test all(OptimBase.minimizer(results) .>= lower) upper=[15.0, 9.0, 15.0] results = LsqFit.levenberg_marquardt(f_lsq, g_lsq, [5.0, 5.0, 5.0], upper=upper) - Optim.minimizer(results) - @test Optim.converged(results) - @test all(Optim.minimizer(results) .<= upper) + OptimBase.minimizer(results) + @test OptimBase.converged(results) + @test all(OptimBase.minimizer(results) .<= upper) # tests for PR #267 LsqFit.levenberg_marquardt(f_lsq, g_lsq, [15.0, 15.0, 15.0], show_trace=true) diff --git a/test/runtests.jl b/test/runtests.jl index d91792a..1ef5eef 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,7 +2,7 @@ # Correctness Tests # -using LsqFit, Optim, Base.Test, Compat +using LsqFit, Base.Test, Compat my_tests = [ "curve_fit.jl", "levenberg_marquardt.jl"]