Skip to content

Commit

Permalink
fix example
Browse files Browse the repository at this point in the history
  • Loading branch information
tjdiamandis committed Dec 30, 2023
1 parent f211703 commit f8dfaa5
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 56 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,5 @@ dev/
/benchmarks/test*
/benchmarks/figures/
/docs/build/
/docs/src/examples/
/docs/src/examples/
/docs/src/advanced/
32 changes: 19 additions & 13 deletions docs/src/advanced/lasso.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
The source files for all examples can be found in [/examples](https://github.com/tjdiamandis/GeNIOS.jl/tree/main/examples).
```@meta
EditURL = "<unknown>/examples/advanced/lasso.jl"
EditURL = "../../../examples/advanced/lasso.jl"
```

# Lasso, Three Ways
Expand Down Expand Up @@ -143,23 +143,27 @@ end
Now, we define $f$, its gradient, $g$, and its proximal operator.

````@example lasso
function f(x, A, b, tmp)
params = (; A=A, b=b, tmp=zeros(m), λ=λ)
function f(x, p)
A, b, tmp = p.A, p.b, p.tmp
mul!(tmp, A, x)
@. tmp -= b
return 0.5 * sum(w->w^2, tmp)
end
f(x) = f(x, A, b, zeros(m))
function grad_f!(g, x, A, b, tmp)
function grad_f!(g, x, p)
A, b, tmp = p.A, p.b, p.tmp
mul!(tmp, A, x)
@. tmp -= b
mul!(g, A', tmp)
return nothing
end
grad_f!(g, x) = grad_f!(g, x, A, b, zeros(m))
Hf = HessianLasso(A, zeros(m))
g(z, λ) = λ*sum(x->abs(x), z)
g(z) = g(z, λ)
function prox_g!(v, z, ρ)
g(z, p) = p.λ*sum(x->abs(x), z)
function prox_g!(v, z, ρ, p)
λ = p.λ
@inline soft_threshold(x::T, κ::T) where {T <: Real} = sign(x) * max(zero(T), abs(x) - κ)
v .= soft_threshold.(z, λ/ρ)
end
Expand All @@ -171,7 +175,8 @@ Finally, we can solve the problem.
solver = GeNIOS.GenericSolver(
f, grad_f!, Hf, # f(x)
g, prox_g!, # g(z)
I, zeros(n) # M, c: Mx + z = c
I, zeros(n); # M, c: Mx + z = c
params=params
)
res = solve!(solver; options=GeNIOS.SolverOptions(relax=true, verbose=true))
rmse = sqrt(1/m*norm(A*solver.zk - b, 2)^2)
Expand All @@ -185,14 +190,15 @@ for g:
````@example lasso
using ProximalOperators
prox_func = NormL1(λ)
gp(x) = prox_func(x)
prox_gp!(v, z, ρ) = prox!(v, prox_func, z, ρ)
gp(x, p) = prox_func(x)
prox_gp!(v, z, ρ, p) = prox!(v, prox_func, z, ρ)
# We see that this give the same result
solver = GeNIOS.GenericSolver(
f, grad_f!, Hf, # f(x)
gp, prox_gp!, # g(z)
I, zeros(n); # M, c: Mx + z = c
gp, prox_gp!, # g(z)
I, zeros(n); # M, c: Mx + z = c
params=params
)
res = solve!(solver; options=GeNIOS.SolverOptions(relax=true, verbose=true))
rmse = sqrt(1/m*norm(A*solver.zk - b, 2)^2)
Expand Down
25 changes: 14 additions & 11 deletions docs/src/advanced/portfolio.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
The source files for all examples can be found in [/examples](https://github.com/tjdiamandis/GeNIOS.jl/tree/main/examples).
```@meta
EditURL = "<unknown>/examples/advanced/portfolio.jl"
EditURL = "../../../examples/advanced/portfolio.jl"
```

# Markowitz Portfolio Optimization, Three Ways
Expand Down Expand Up @@ -153,26 +153,28 @@ which can be solved via a one-dimensional root-finding problem (see
the appendix of [our paper]()).

````@example portfolio
params = (; F=F, d=d, μ=μ, γ=γ, tmp=zeros(k))
# f(x) = γ/2 xᵀ(FFᵀ + D)x - μᵀx
function f(x, F, d, μ, γ, tmp)
function f(x, p)
F, d, μ, γ, tmp = p.F, p.d, p.μ, p.γ, p.tmp
mul!(tmp, F', x)
qf = sum(w->w^2, tmp)
qf += sum(i->d[i]*x[i]^2, 1:n)
qf = sum(abs2, tmp)
qf += sum(i->d[i]*x[i]^2, 1:length(x))
return γ/2 * qf - dot(μ, x)
end
f(x) = f(x, F, d, μ, γ, zeros(k))
# ∇f(x) = γ(FFᵀ + D)x - μ
function grad_f!(g, x, F, d, μ, γ, tmp)
function grad_f!(g, x, p)
F, d, μ, γ, tmp = p.F, p.d, p.μ, p.γ, p.tmp
mul!(tmp, F', x)
mul!(g, F, tmp)
@. g += d*x
@. g *= γ
@. g -= μ
return nothing
end
grad_f!(g, x) = grad_f!(g, x, F, d, μ, γ, zeros(k))
# ∇²f(x) = γ(FFᵀ + D)
struct HessianMarkowitz{T, S <: AbstractMatrix{T}} <: HessianOperator
Expand All @@ -189,11 +191,11 @@ end
Hf = HessianMarkowitz(F, d, zeros(k))
# g(z) = I(z)
function g(z)
function g(z, p)
T = eltype(z)
return all(z .>= zero(T)) && abs(sum(z) - one(T)) < 1e-6 ? 0 : Inf
return all(z .>= zero(T)) && abs(sum(z) - one(T)) < 1e-6 ? zero(T) : Inf
end
function prox_g!(v, z, ρ)
function prox_g!(v, z, ρ, p)
z_max = maximum(w->abs(w), z)
l = -z_max - 1
u = z_max
Expand All @@ -215,7 +217,8 @@ end
solver = GeNIOS.GenericSolver(
f, grad_f!, Hf, # f(x)
g, prox_g!, # g(z)
I, zeros(n) # M, c: Mx + z = c
I, zeros(n); # M, c: Mx - z = c
params=params
)
res = solve!(solver)
println("Optimal value: $(round(solver.obj_val, digits=4))")
Expand Down
58 changes: 28 additions & 30 deletions docs/src/advanced/signal.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
The source files for all examples can be found in [/examples](https://github.com/tjdiamandis/GeNIOS.jl/tree/main/examples).
```@meta
EditURL = "<unknown>/examples/advanced/signal.jl"
EditURL = "../../../examples/advanced/signal.jl"
```

# Signal Decomposition
Expand Down Expand Up @@ -101,15 +101,15 @@ parallelized.
First we define $f(x)$, which is just a quadratic.

````@example signal
f(x, T) = sum(w->w^2, x[1:T] ) / T
f(x) = f(x, T)
params = (; T=T)
function grad_f!(g, x, T)
@. g[1:T] = 2/T * x[1:T]
g[T+1:end] .= zero(eltype(x))
f(x, p) = sum(abs2, x[1:p.T] ) / p.T
function grad_f!(g, x, p)
@. g[1:p.T] = 2/p.T * x[1:p.T]
g[p.T+1:end] .= zero(eltype(x))
return nothing
end
grad_f!(g, x) = grad_f!(g, x, T)
````

The `HessianOperator` here is block diagonal, with a $T \times T$ identity block,
Expand All @@ -136,20 +136,19 @@ avoid for simplicity.
We use `ProximalOperators.jl` to help construct the proximal operator.

````@example signal
function g(z, T)
@views z1, z2, z3 = z[1:T], z[T+1:2T], z[2T+1:end]
function g(z, p)
@views z1, z2, z3 = z[1:p.T], z[p.T+1:2p.T], z[2p.T+1:end]
any(.!iszero.(z1)) && return Inf
gz2 = sum(t->(z2[t+1] - 2z[t] + z[t-1])^2, 2:T-1)
gz3 = sum(abs, diff(z3)) / (T-1)
gz2 = sum(t->(z2[t+1] - 2z[t] + z[t-1])^2, 2:p.T-1)
gz3 = sum(abs, diff(z3)) / (p.T-1)
return gz2 + gz3
end
g(z) = g(z, T)
# the prox operator for g, using ProximalOperators.jl
function prox_g!(v, z, ρ, T)
@views z1, z2, z3 = z[1:T], z[T+1:2T], z[2T+1:end]
@views v1, v2, v3 = v[1:T], v[T+1:2T], v[2T+1:end]
function prox_g!(v, z, ρ, p)
@views z1, z2, z3 = z[1:p.T], z[p.T+1:2p.T], z[2p.T+1:end]
@views v1, v2, v3 = v[1:p.T], v[p.T+1:2p.T], v[2p.T+1:end]
Threads.@threads for k in 1:3
if k == 1
Expand All @@ -158,38 +157,36 @@ function prox_g!(v, z, ρ, T)
elseif k == 2
# Prox for z2
# g²(z²) = γ₂/T * ||Az||²
du = vcat(zeros(1), ones(T-2))
d = vcat(zeros(1), -2*ones(T-2), zeros(1))
dl = vcat(ones(T-2), zeros(1))
du = vcat(zeros(1), ones(p.T-2))
d = vcat(zeros(1), -2*ones(p.T-2), zeros(1))
dl = vcat(ones(p.T-2), zeros(1))
# Use banded matrix for O(T) solve time
A = BandedMatrix(-1 => dl, 0 => d, 1 => du)
F = cholesky(I + 1e3/(ρ * T) * A'*A)
F = cholesky(I + 1e3/(ρ * p.T) * A'*A)
ldiv!(v2, F, z2)
else
# Prox for z3
ϕ³ = TotalVariation1D(1/T)
ϕ³ = TotalVariation1D(1/p.T)
prox!(v3, ϕ³, z3, 1/ρ)
end
end
return nothing
end
prox_g!(v, z, ρ) = prox_g!(v, z, ρ, T)
````

### The constraints
Note that $M$ is a highly structured matrix. We could use this fact to speed up
the operators $M$ and $M^T$, but we do not in this example.
Note that $M$ is a highly structured matrix. We use `LinearMaps.jl` to
implement this more efficiently.

````@example signal
IT = sparse(Matrix(1.0I, T, T))
_0 = spzeros(T, T)
_0 = LinearMap(spzeros(T, T))
M = [
IT IT IT;
_0 IT _0;
_0 _0 IT
]
I I I;
_0 I _0;
_0 _0 I
]
c = vcat(y, zeros(T), zeros(T));
nothing #hide
````
Expand All @@ -200,7 +197,8 @@ nothing #hide
solver = GeNIOS.GenericSolver(
f, grad_f!, Hf, # f(x)
g, prox_g!, # g(z)
M, c # M, c: Mx + z = c
M, c; # M, c: Mx + z = c
params=params
)
res = solve!(solver, options=GeNIOS.SolverOptions(eps_abs=1e-5, print_iter=100))
Expand Down
2 changes: 1 addition & 1 deletion examples/advanced/lasso.jl
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ for g:
=#
using ProximalOperators
prox_func = NormL1(λ)
gp(x) = prox_func(x)
gp(x, p) = prox_func(x)
prox_gp!(v, z, ρ, p) = prox!(v, prox_func, z, ρ)

## We see that this give the same result
Expand Down

0 comments on commit f8dfaa5

Please sign in to comment.