From b0d6a4e9d7e531f558bd2a669ef7342170b87771 Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Wed, 22 May 2024 16:59:52 +0200 Subject: [PATCH 1/8] Implement normal flow metaheuristic alternative generation --- Project.toml | 1 + src/MGA.jl | 2 + src/alternative-metaheuristics.jl | 325 ++++++++++++++++++++++++++++++ src/generate-alternatives.jl | 55 ++++- 4 files changed, 380 insertions(+), 3 deletions(-) create mode 100644 src/alternative-metaheuristics.jl diff --git a/Project.toml b/Project.toml index 130dab0..5eee808 100644 --- a/Project.toml +++ b/Project.toml @@ -7,6 +7,7 @@ version = "0.1.0" Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" +Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" [compat] julia = "1.6" diff --git a/src/MGA.jl b/src/MGA.jl index 7e07bba..00feec8 100644 --- a/src/MGA.jl +++ b/src/MGA.jl @@ -5,9 +5,11 @@ module MGA using JuMP using Distances using MathOptInterface +using Metaheuristics include("results.jl") include("alternative-optimisation.jl") include("generate-alternatives.jl") +include("alternative-metaheuristics.jl") end diff --git a/src/alternative-metaheuristics.jl b/src/alternative-metaheuristics.jl new file mode 100644 index 0000000..7965fde --- /dev/null +++ b/src/alternative-metaheuristics.jl @@ -0,0 +1,325 @@ +""" + Structure representing a problem that can be solved by Metaheuristics.jl and the algorithm to solve it. +""" +mutable struct MetaheuristicProblem + objective::Function + bounds::Matrix{Float64} + algorithm::Metaheuristics.Algorithm +end + +""" + constraint = extract_constraint( + constraint::MOI.ConstraintFunction, + x::Vector{Float64}, + index_map::Dict{Int64, Int64}, + fixed_variables::Dict{VariableRef, Float64} + ) + +Convert a constraint from a MathOptInterface function into a julia function of x. Supports only ScalarAffineFunction and VariableIndex constraints. + +# Arguments +- `constraint::MOI.ConstraintFunction`: constraint transform into a julia function. +- `x::Vector{Float64}`: a vector representing an individual in the metaheuristic population. +- `index_map::Dict{Int64, Int64}`: a dictionary mapping indices in the MathOptInterface model to indices of `x`. +- `fixed_variables::Dict{VariableRef, Float64}: a dictionary containing the values of the fixed variables.` +""" +function extract_constraint( + constraint::MOI.ConstraintFunction, + x::Vector{Float64}, + index_map::Dict{Int64, Int64}, + fixed_variables::Dict{VariableRef, Float64}, +) + result = 0 + for t in constraint.terms + if haskey(index_map, t.variable.value) + result += t.coefficient * x[index_map[t.variable.value]] + else + result += t.coefficient * fixed_variables[t] + end + end + return result +end + +""" + objective = extract_objective( + objective::MOI.AbstractFunction, + x::Vector{Float64}, + index_map::Dict{Int64, Int64}, + fixed_variables::Dict{VariableRef, Float64} + ) + +Convert the objective from a MathOptInterface function into a julia function of x. Supports only linear single-objective functions. + +# Arguments +- `objective::MOI.AbstractFunction`: the objective function to transform into a julia function. +- `x::Vector{Float64}`: a vector representing an individual in the metaheuristic population. +- `index_map::Dict{Int64, Int64}`: a dictionary mapping indices in the MathOptInterface model to indices of `x`. +- `fixed_variables::Dict{VariableRef, Float64}: a dictionary containing the values of the fixed variables.` +""" +function extract_objective( + objective::MOI.AbstractFunction, + x::Vector{Float64}, + index_map::Dict{Int64, Int64}, + fixed_variables::Dict{VariableRef, Float64}, +) + result = 0 + if typeof(objective) == MOI.VariableIndex + results += x[index_map[objective.value]] + elseif typeof(objective) == ScalarAffineFunction + # Add the constant in the objective function. + result += MOI.constant(objective) + # Add all terms in the objective function with variables iteratively. + for t in objective.terms + # If variable in `index_map`, add corresponding value to the result. Else, the variable is fixed and add the original resulting variable. + if haskey(index_map, t.variable.value) + result += MOI.coefficient(t) * x[index_map[t.variable.value]] + else + result += MOI.coefficient(t) * fixed_variables[t] + end + end + else + throw(ArgumentError("Only linear single-objective functions are supported.")) + end + return result +end + +""" + objective = create_objective( + model::JuMP.Model, + solution::OrderedDict{JuMP.VariableRef, Float64}, + optimality_gap::Float64, + metric::Distances.SemiMetric, + index_map::Dict{Int64, Int64}, + fixed_variables::Dict{VariableRef, Float64} + ) + +Create an objective function supported by Metaheuristics.jl for the alternative generating problem. + +# Arguments +- `model::JuMP.Model`: solved JuMP model of the original lp problem. +- `solution::OrderedDict{JuMP.VariableRef, Float64}`: solution value of the original lp problem excluding fixed variables. +- `optimality_gap::Float64`: maximum difference between objective value of optimal solution and alternative solutions. +- `metric::Distances.SemiMetric`: distance metric used to measure distance between solutions. +- `index_map::Dict{Int64, Int64}`: dictionary mapping indices in the JuMP/MathOptInterface model to indices of `x`. +- `fixed_variables::Dict{VariableRef, Float64}`: dictionary containing the values of the fixed variables. +""" +function create_objective( + model::JuMP.Model, + solution::OrderedDict{JuMP.VariableRef, Float64}, + optimality_gap::Float64, + metric::Distances.SemiMetric, + index_map::Dict{Int64, Int64}, + fixed_variables::Dict{VariableRef, Float64}, +) + original_objective = JuMP.objective_function(model) + solution_values = collect(Float64, values(solution)) + # Compute objective value or original LP. + original_objective_value = + extract_objective(original_objective, solution_values, index_map, fixed_variables) + # Obtain all constraints from model (including variable bounds). + constraints = JuMP.all_constraints(model, include_variable_in_set_constraints = true) + constraint_functions = map(c -> MOI.get(m, MOI.ConstraintFunction(), c), constraints) + constraint_sets = map(c -> MOI.get(m, MOI.ConstraintSet(), c), constraints) + + function f(x) + # Objective function for metaheuristic (= distance between individual x and solution values of original LP). Solution_values does not contain fixed_variables, these are not required in objective as the distance for these variables is zero. + fx = [-Distances.evaluate(metric, x, solution_values)] + # Initialise set of inequality constraints and add objective gap constraint. + gx = Vector{Float64}(undef, 0) + # Add objective gap constraint depending on whether original LP is maximised or minimised. + if JuMP.objective_sense(model) == JuMP.MAX_SENSE + push!( + gx, + original_objective_value * (1 - optimality_gap * sign(original_objective_value)) - + extract_objective(original_objective, x, index_map, fixed_variables), + ) + else + push!( + gx, + extract_objective(original_objective, x, index_map, fixed_variables) - + original_objective_value * (1 + optimality_gap * sign(original_objective_value)), + ) + end + # Initialise set of equality constraints. + hx = Vector{Float64}(undef, 0) + + for i in eachindex(constraint_functions) + c_fun = constraint_functions[i] + c_set = constraint_sets[i] + + # Check if constraint involves multiple variables or if it is a bound. + if isa(c_fun, MathOptInterface.ScalarAffineFunction) + # Add constraint to gx or hx, depending on equality or inequality. + if isa(c_set, MOI.LessThan) + resulting_constraint = + extract_constraint(c_fun, x, index_map, fixed_variables) - MOI.constant(c_set) + push!(gx, resulting_constraint) + elseif isa(c_set, MOI.GreaterThan) + resulting_constraint = + MOI.constant(c_set) - extract_constraint(c_fun, x, index_map, fixed_variables) + push!(gx, resulting_constraint) + elseif isa(c_set, MOI.EqualTo) + resulting_constraint = + extract_constraint(c_fun, x, index_map, fixed_variables) - MOI.constant(c_set) + push!(hx, resulting_constraint) + elseif isa(c_set, MOI.Interval) + constraint = extract_constraint(c_fun, x, index_map, fixed_variables) + push!(gx, constraint - c_set.upper) + push!(gx, c_set.lower - constraint) + end + elseif isa(c_fun, MathOptInterface.VariableIndex) + # Skip variable if it is fixed. + if !haskey(index_map, c_fun.value) + continue + end + # Add bounds to gx or hx, depending on equality or inequality. + if isa(c_set, MOI.LessThan) + push!(gx, x[index_map[c_fun.value]] - MOI.constant(c_set)) + elseif isa(c_set, MOI.GreaterThan) + push!(gx, MOI.constant(c_set) - x[index_map[c_fun.value]]) + elseif isa(c_set, MOI.EqualTo) + push!(hx, x[index_map[c_fun.value]] - MOI.constant(c_set)) + elseif isa(c_set, MOI.Interval) + push!(gx, x[index_map[c_fun.value]] - c_set.upper) + push!(gx, c_set.lower - x[index_map[c_fun.value]]) + end + else + throw(ArgumentError("Only linear non-vector constraints are supported.")) + end + end + + return fx, gx, hx + end + + return f +end + +""" + bounds = extract_bounds( + model::JuMP.Model, + index_map::Dict{Int64, Int64} + ) + +Transform the bounds from a JuMP Model into a matrix of bounds readable by Metaheuristics.jl. + +# Arguments +- `model::JuMP.Model`: solved JuMP model of the original lp problem. +- `index_map::Dict{Int64, Int64}`: dictionary mapping indices in the JuMP/MathOptInterface model to indices of `x`. +""" +function extract_bounds(model::JuMP.Model, index_map::Dict{Int64, Int64}) + # Initialise bound matrix with all variables between -Inf and Inf. + bounds = zeros((2, n_variables)) + for i = 1:n_variables + bounds[1, i] = -Inf + bounds[2, i] = Inf + end + + # Obtain all constraints from the model. + constraints = all_constraints(model, include_variable_in_set_constraints = true) + + for c in constraints + c_fun = MOI.get(model, MOI.ConstraintFunction(), c) + # Check if constraint is a bound. In that case add upper and lower bounds depending on type of constraint. + if isa(c_fun, MOI.VariableIndex) && haskey(index_map, c_fun.value) + c_set = MOI.get(model, MOI.ConstraintSet(), c) + if isa(c_set, MOI.LessThan) + bounds[2, index_map[c_fun.value]] = MOI.constant(c_set) + elseif isa(c_set, MOI.GreaterThan) + bounds[1, index_map[c_fun.value]] = MOI.constant(c_set) + elseif isa(c_set, MOI.EqualTo) + bounds[1, index_map[c_fun.value]] = MOI.constant(c_set) + bounds[2, index_map[c_fun.value]] = MOI.constant(c_set) + + elseif isa(c_set, MOI.Interval) + bounds[1, index_map[c_fun.value]] = c_set.lower + bounds[2, index_map[c_fun.value]] = c_set.upper + end + end + end + + return bounds +end + +""" + problem = create_alternative_generating_problem( + model::JuMP.Model, + algorithm::Metaheuristics.Algorithm, + initial_solution::OrderedDict{VariableRef, Float64}, + optimality_gap::Float64, + metric::Distances.SemiMetric, + fixed_variables::Dict{VariableRef, Float64} + ) + +Create the Metaheuristic problem representing the alternative generating problem for the original LP. + +# Arguments: +- `model::JuMP.Model`: JuMP model representing the original LP. +- `algorithm::Metaheuristics.Algorithm`: Metaheuristic algorithm to solve the alternative generating problem. +- `initial_solution::OrderedDict{VariableRef, Float64}`: (near-)optimal solution to `model`, for which alternatives are sought. +- `optimality_gap::Float64`: maximum gap in objective value between `initial_solution` and alternative solutions. +- `metric::Distances.SemiMetric`: distance metric used to compute distance between alternative solutions and `initial_solution`. +- `fixed_variables::Dict{VariableRef, Float64}`: solution values for fixed variables of the original problem. +""" +function create_alternative_generating_problem( + model::JuMP.Model, + algorithm::Metaheuristics.Algorithm, + initial_solution::OrderedDict{VariableRef, Float64}, + optimality_gap::Float64, + metric::Distances.SemiMetric, + fixed_variables::Dict{VariableRef, Float64}, +) + # Create map between variable indices in JuMP model and indices in individuals of Metaheuristic problem. + index_map = Dict{Int64, Int64}() + for i in eachindex(collect(keys(sol))) + index_map[collect(keys(sol))[i].index.value] = i + end + + objective = + create_objective(model, initial_solution, optimality_gap, metric, index_map, fixed_variables) + bounds = extract_bounds(model, index_map) + # TODO: Initialise initial_population + + return MetaheuristicProblem(objective, bounds, algorithm) +end + +""" + add_solution!( + problem::MetaheuristicProblem, + result::Metaheuristics.State, + metric::Distances.SemiMetric + ) + +Modify a Metaheuristic problem representing the alternative generating problem for the original LP using a newly found alternative solution. This function can be used when one wants to iteratively run a metaheuristic to find alternative solutions one by one. + +# Arguments: +- `problem::MetaheuristicProblem`: problem to be modified by adding a solution. +- `result::Metaheuristics.State`: result containing the optimal solution to add to the objective function. +- `metric::Distances.SemiMetric`: metric used to evaluate distance between alternatives. +""" +function add_solution!( + problem::MetaheuristicProblem, + result::Metaheuristics.State, + metric::Distances.SemiMetric, +) + # Create new objective function using old objective function. + objective = problem.objective + solution = minimizer(result) + function f(x) + f_old, gx, hx = objective(x) + fx = [f_old[1] - Distances.evaluate(metric, solution, x)] + return fx, gx, hx + end + problem.objective = f +end + +""" + result = run_alternative_generating_problem!( + problem::MetaheuristicProblem + ) + +Optimise the `problem` using the specified metaheuristic algorithm and return the result. +""" +function run_alternative_generating_problem!(problem::MetaheuristicProblem) + result = Metaheuristics.optimize(problem.objective, problem.bounds, problem.algorithm) + return result +end diff --git a/src/generate-alternatives.jl b/src/generate-alternatives.jl index acd8354..b994f48 100644 --- a/src/generate-alternatives.jl +++ b/src/generate-alternatives.jl @@ -1,4 +1,4 @@ -export generate_alternatives +export generate_alternatives!, generate_alternatives """ result = generate_alternatives!( @@ -38,7 +38,7 @@ function generate_alternatives!( @info "Creating model for generating alternatives." create_alternative_generating_problem!(model, optimality_gap, metric, fixed_variables) @info "Solving model." - optimize!(model) + JuMP.optimize!(model) @info "Solution #1/$n_alternatives found." solution_summary(model) update_solutions!(result, model) @@ -47,10 +47,59 @@ function generate_alternatives!( @info "Reconfiguring model for generating alternatives." add_solution!(model, metric) @info "Solving model." - optimize!(model) + JuMP.optimize!(model) @info "Solution #$i/$n_alternatives found." solution_summary(model) update_solutions!(result, model) end return result end + +function generate_alternatives( + model::JuMP.Model, + optimality_gap::Float64, + n_alternatives::Int64, + metaheuristic_algorithm::Metaheuristics.Algorithm; + metric::Distances.SemiMetric = SqEuclidean(), + fixed_variables::Vector{VariableRef} = VariableRef[], +) + if !is_solved_and_feasible(model) + throw(ArgumentError("JuMP model has not been solved.")) + elseif optimality_gap < 0 + throw(ArgumentError("Optimality gap (= $optimality_gap) should be at least 0.")) + elseif n_alternatives < 1 + throw(ArgumentError("Number of alternatives (= $n_alternatives) should be at least 1.")) + end + + @info "Setting up MGA problem and solver." + # Obtain the solution values for all variables, separated in fixed and non-fixed variables. + initial_solution = OrderedDict() + fixed_variable_solutions = Dict() + for v in all_variables(model) + if v in fixed_variables + fixed_variable_solutions[v] = value(v) + else + initial_solution[v] = value(v) + end + end + + problem = create_alternative_generating_problem( + model, + metaheuristic_algorithm, + initial_solution, + optimality_gap, + metric, + fixed_variables, + ) + @info "Solving MGA problem." + result = run_alternative_generating_problem!(problem) + @info "Solution #1/$n_solutions found." result minimizer(result) + + for i = 2:n_solutions + @info "Reconfiguring MGA problem with new solution." + add_solution!(problem, result, metric) + @info "Solving MGA problem." + result = run_alternative_generating_problem!(problem) + @info "Solution #$i/$n_solutions found." result minimizer(result) + end +end From 5e38332a66bdf902b5cd21446ecb7781763f350f Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Fri, 24 May 2024 16:00:44 +0200 Subject: [PATCH 2/8] Test and polish mga using metaheuristics --- Project.toml | 1 + src/MGA.jl | 1 + src/alternative-metaheuristics.jl | 71 +++--- src/generate-alternatives.jl | 48 +++- src/results.jl | 44 ++++ test/Project.toml | 2 + test/runtests.jl | 2 + test/test-alternative-metaheuristics.jl | 324 ++++++++++++++++++++++++ test/test-alternative-optimisation.jl | 8 +- test/test-generate-alternatives.jl | 164 +++++++++++- 10 files changed, 603 insertions(+), 62 deletions(-) create mode 100644 test/test-alternative-metaheuristics.jl diff --git a/Project.toml b/Project.toml index 5eee808..5e756d5 100644 --- a/Project.toml +++ b/Project.toml @@ -4,6 +4,7 @@ authors = ["Matthijs Arnoldus and contributors"] version = "0.1.0" [deps] +DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" diff --git a/src/MGA.jl b/src/MGA.jl index 00feec8..cfe39b1 100644 --- a/src/MGA.jl +++ b/src/MGA.jl @@ -6,6 +6,7 @@ using JuMP using Distances using MathOptInterface using Metaheuristics +using DataStructures include("results.jl") include("alternative-optimisation.jl") diff --git a/src/alternative-metaheuristics.jl b/src/alternative-metaheuristics.jl index 7965fde..3e7ccce 100644 --- a/src/alternative-metaheuristics.jl +++ b/src/alternative-metaheuristics.jl @@ -12,7 +12,7 @@ end constraint::MOI.ConstraintFunction, x::Vector{Float64}, index_map::Dict{Int64, Int64}, - fixed_variables::Dict{VariableRef, Float64} + fixed_variables::Dict{MOI.VariableIndex, Float64} ) Convert a constraint from a MathOptInterface function into a julia function of x. Supports only ScalarAffineFunction and VariableIndex constraints. @@ -21,20 +21,20 @@ Convert a constraint from a MathOptInterface function into a julia function of x - `constraint::MOI.ConstraintFunction`: constraint transform into a julia function. - `x::Vector{Float64}`: a vector representing an individual in the metaheuristic population. - `index_map::Dict{Int64, Int64}`: a dictionary mapping indices in the MathOptInterface model to indices of `x`. -- `fixed_variables::Dict{VariableRef, Float64}: a dictionary containing the values of the fixed variables.` +- `fixed_variables::Dict{MOI.VariableIndex, Float64}: a dictionary containing the values of the fixed variables.` """ function extract_constraint( - constraint::MOI.ConstraintFunction, + constraint::MOI.ScalarAffineFunction, x::Vector{Float64}, index_map::Dict{Int64, Int64}, - fixed_variables::Dict{VariableRef, Float64}, + fixed_variables::Dict{MOI.VariableIndex, Float64}, ) result = 0 for t in constraint.terms if haskey(index_map, t.variable.value) result += t.coefficient * x[index_map[t.variable.value]] else - result += t.coefficient * fixed_variables[t] + result += t.coefficient * fixed_variables[t.variable] end end return result @@ -42,43 +42,37 @@ end """ objective = extract_objective( - objective::MOI.AbstractFunction, + objective::JuMP.AffExpr, x::Vector{Float64}, index_map::Dict{Int64, Int64}, - fixed_variables::Dict{VariableRef, Float64} + fixed_variables::Dict{MOI.VariableIndex, Float64} ) Convert the objective from a MathOptInterface function into a julia function of x. Supports only linear single-objective functions. # Arguments -- `objective::MOI.AbstractFunction`: the objective function to transform into a julia function. +- `objective::JuMP.AffExpr`: the objective function to transform into a julia function. - `x::Vector{Float64}`: a vector representing an individual in the metaheuristic population. - `index_map::Dict{Int64, Int64}`: a dictionary mapping indices in the MathOptInterface model to indices of `x`. -- `fixed_variables::Dict{VariableRef, Float64}: a dictionary containing the values of the fixed variables.` +- `fixed_variables::Dict{MOI.VariableIndex, Float64}: a dictionary containing the values of the fixed variables.` """ function extract_objective( - objective::MOI.AbstractFunction, + objective::JuMP.AffExpr, x::Vector{Float64}, index_map::Dict{Int64, Int64}, - fixed_variables::Dict{VariableRef, Float64}, + fixed_variables::Dict{MOI.VariableIndex, Float64}, ) result = 0 - if typeof(objective) == MOI.VariableIndex - results += x[index_map[objective.value]] - elseif typeof(objective) == ScalarAffineFunction - # Add the constant in the objective function. - result += MOI.constant(objective) - # Add all terms in the objective function with variables iteratively. - for t in objective.terms - # If variable in `index_map`, add corresponding value to the result. Else, the variable is fixed and add the original resulting variable. - if haskey(index_map, t.variable.value) - result += MOI.coefficient(t) * x[index_map[t.variable.value]] - else - result += MOI.coefficient(t) * fixed_variables[t] - end + # Add the constant in the objective function. + result += objective.constant + # Add all terms in the objective function with variables iteratively. + for (var, coef) in objective.terms + # If variable in `index_map`, add corresponding value to the result. Else, the variable is fixed and add the original resulting variable. + if haskey(index_map, var.index.value) + result += coef * x[index_map[var.index.value]] + else + result += coef * fixed_variables[var.index] end - else - throw(ArgumentError("Only linear single-objective functions are supported.")) end return result end @@ -109,7 +103,7 @@ function create_objective( optimality_gap::Float64, metric::Distances.SemiMetric, index_map::Dict{Int64, Int64}, - fixed_variables::Dict{VariableRef, Float64}, + fixed_variables::Dict{MOI.VariableIndex, Float64}, ) original_objective = JuMP.objective_function(model) solution_values = collect(Float64, values(solution)) @@ -118,8 +112,8 @@ function create_objective( extract_objective(original_objective, solution_values, index_map, fixed_variables) # Obtain all constraints from model (including variable bounds). constraints = JuMP.all_constraints(model, include_variable_in_set_constraints = true) - constraint_functions = map(c -> MOI.get(m, MOI.ConstraintFunction(), c), constraints) - constraint_sets = map(c -> MOI.get(m, MOI.ConstraintSet(), c), constraints) + constraint_functions = map(c -> MOI.get(model, MOI.ConstraintFunction(), c), constraints) + constraint_sets = map(c -> MOI.get(model, MOI.ConstraintSet(), c), constraints) function f(x) # Objective function for metaheuristic (= distance between individual x and solution values of original LP). Solution_values does not contain fixed_variables, these are not required in objective as the distance for these variables is zero. @@ -188,6 +182,11 @@ function create_objective( end end + # hx should contain 0.0 if no equality constraints extract_constraint + if isempty(hx) + push!(hx, 0.0) + end + return fx, gx, hx end @@ -208,7 +207,8 @@ Transform the bounds from a JuMP Model into a matrix of bounds readable by Metah """ function extract_bounds(model::JuMP.Model, index_map::Dict{Int64, Int64}) # Initialise bound matrix with all variables between -Inf and Inf. - bounds = zeros((2, n_variables)) + n_variables = length(index_map) + bounds = zeros(Float64, (2, n_variables)) for i = 1:n_variables bounds[1, i] = -Inf bounds[2, i] = Inf @@ -258,7 +258,7 @@ Create the Metaheuristic problem representing the alternative generating problem - `initial_solution::OrderedDict{VariableRef, Float64}`: (near-)optimal solution to `model`, for which alternatives are sought. - `optimality_gap::Float64`: maximum gap in objective value between `initial_solution` and alternative solutions. - `metric::Distances.SemiMetric`: distance metric used to compute distance between alternative solutions and `initial_solution`. -- `fixed_variables::Dict{VariableRef, Float64}`: solution values for fixed variables of the original problem. +- `fixed_variables::Dict{MOI.VariableIndex, Float64}`: solution values for fixed variables of the original problem. """ function create_alternative_generating_problem( model::JuMP.Model, @@ -266,18 +266,19 @@ function create_alternative_generating_problem( initial_solution::OrderedDict{VariableRef, Float64}, optimality_gap::Float64, metric::Distances.SemiMetric, - fixed_variables::Dict{VariableRef, Float64}, + fixed_variables::Dict{MOI.VariableIndex, Float64}, ) # Create map between variable indices in JuMP model and indices in individuals of Metaheuristic problem. index_map = Dict{Int64, Int64}() - for i in eachindex(collect(keys(sol))) - index_map[collect(keys(sol))[i].index.value] = i + k = collect(keys(initial_solution)) + for i in eachindex(k) + index_map[k[i].index.value] = i end objective = create_objective(model, initial_solution, optimality_gap, metric, index_map, fixed_variables) bounds = extract_bounds(model, index_map) - # TODO: Initialise initial_population + # Possible TODO: Initialise initial_population return MetaheuristicProblem(objective, bounds, algorithm) end diff --git a/src/generate-alternatives.jl b/src/generate-alternatives.jl index b994f48..b91be80 100644 --- a/src/generate-alternatives.jl +++ b/src/generate-alternatives.jl @@ -9,7 +9,7 @@ export generate_alternatives!, generate_alternatives selected_variables::Vector{VariableRef} = [] ) -Generate `n_alternatives` solutions to `model` which are as distant from the optimum and each other, but with a maximum `optimality_gap`. +Generate `n_alternatives` solutions to `model` which are as distant from the optimum and each other, but with a maximum `optimality_gap`, using optimisation. # Arguments - `model::JuMP.Model`: a solved JuMP model for which alternatives are generated. @@ -55,6 +55,26 @@ function generate_alternatives!( return result end +""" + result = generate_alternatives( + model::JuMP.Model, + optimality_gap::Float64, + n_alternatives::Int64, + metaheuristic_algorithm::Metaheuristics.Algorithm; + metric::Distances.Metric = SqEuclidean(), + selected_variables::Vector{VariableRef} = [] + ) + +Generate `n_alternatives` solutions to `model` which are as distant from the optimum and each other, but with a maximum `optimality_gap`, using a metaheuristic algorithm. + +# Arguments +- `model::JuMP.Model`: a solved JuMP model for which alternatives are generated. +- `optimality_gap::Float64`: the maximum percentage deviation (>=0) an alternative may have compared to the optimal solution. +- `n_alternatives`: the number of alternative solutions sought. +- `metaheuristic_algorithm::Metaheuristics.Algorithm`: algorithm used to search for alternative solutions. +- `metric::Distances.Metric=SqEuclidean()`: the metric used to maximise the difference between alternatives and the optimal solution. +- `fixed_variables::Vector{VariableRef}=[]`: a subset of all variables of `model` that are not allowed to be changed when seeking for alternatives. +""" function generate_alternatives( model::JuMP.Model, optimality_gap::Float64, @@ -71,13 +91,15 @@ function generate_alternatives( throw(ArgumentError("Number of alternatives (= $n_alternatives) should be at least 1.")) end + result = AlternativeSolutions([], []) + @info "Setting up MGA problem and solver." # Obtain the solution values for all variables, separated in fixed and non-fixed variables. - initial_solution = OrderedDict() - fixed_variable_solutions = Dict() + initial_solution = OrderedDict{VariableRef, Float64}() + fixed_variable_solutions = Dict{MOI.VariableIndex, Float64}() for v in all_variables(model) if v in fixed_variables - fixed_variable_solutions[v] = value(v) + fixed_variable_solutions[v.index] = value(v) else initial_solution[v] = value(v) end @@ -89,17 +111,21 @@ function generate_alternatives( initial_solution, optimality_gap, metric, - fixed_variables, + fixed_variable_solutions, ) @info "Solving MGA problem." - result = run_alternative_generating_problem!(problem) - @info "Solution #1/$n_solutions found." result minimizer(result) + state = run_alternative_generating_problem!(problem) + @info "Solution #1/$n_alternatives found." state minimizer(state) + update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) - for i = 2:n_solutions + for i = 2:n_alternatives @info "Reconfiguring MGA problem with new solution." - add_solution!(problem, result, metric) + add_solution!(problem, state, metric) @info "Solving MGA problem." - result = run_alternative_generating_problem!(problem) - @info "Solution #$i/$n_solutions found." result minimizer(result) + state = run_alternative_generating_problem!(problem) + @info "Solution #$i/$n_alternatives found." state minimizer(state) + update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) end + + return result end diff --git a/src/results.jl b/src/results.jl index b31918d..8c9395a 100644 --- a/src/results.jl +++ b/src/results.jl @@ -30,3 +30,47 @@ function update_solutions!(results::AlternativeSolutions, model::JuMP.Model) end push!(results.objective_values, value(original_objective)) end + +""" + update_solutions!(results::AlternativeSolutions, model::JuMP.Model) + +Update the set of results `AlternativeSolutions` with the variable values obtained when solving using Metaheuristics. + +# Arguments +- `results::AlternativeSolutions`: set of solutions to add a new solution to. +- `state::Metaheuristics.State`: contains results to metaheuristic solve. +- `initial:solution::OrderedDict{VariableRef, Float64}`: used to identify the indices of the metaheuristic solution in the JuMP model. +- `fixed_variables::Dict{MOI.VariableIndex, Float64}`: set of fixed variables and their solution values. +- `model::JuMP.Model`: original model for which alternative solutions are found. +""" +function update_solutions!( + results::AlternativeSolutions, + state::Metaheuristics.State, + initial_solution::OrderedDict{VariableRef, Float64}, + fixed_variables::Dict{MOI.VariableIndex, Float64}, + model::JuMP.Model, +) + if !state.stop + throw(ErrorException("Metaheuristic state `state` not terminated when trying to read results.")) + end + + best_solution = minimizer(state) + solution = Dict{VariableRef, Float64}() + index_map = Dict{Int64, Int64}() + # Add all new results + for (i, (k, _)) in enumerate(initial_solution) + solution[k] = best_solution[i] + index_map[k.index.value] = i + end + # Add values of fixed variables. + for (k, v) in fixed_variables + solution[JuMP.VariableRef(model, k)] = v + end + + push!(results.solutions, solution) + + # Retrieve objective value to original problem. + objective_value = + extract_objective(JuMP.objective_function(model), best_solution, index_map, fixed_variables) + push!(results.objective_values, objective_value) +end diff --git a/test/Project.toml b/test/Project.toml index 4e76032..d355773 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,6 +1,8 @@ [deps] +DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" +Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/runtests.jl b/test/runtests.jl index f10faaa..30edf0c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,6 +4,8 @@ using Ipopt using JuMP using Distances using MathOptInterface +using Metaheuristics +using DataStructures for file in readdir(@__DIR__) if !startswith("test-")(file) diff --git a/test/test-alternative-metaheuristics.jl b/test/test-alternative-metaheuristics.jl new file mode 100644 index 0000000..fb277d9 --- /dev/null +++ b/test/test-alternative-metaheuristics.jl @@ -0,0 +1,324 @@ +@testset "Test constraint extraction" begin + @testset "Test that regular constraint is converted correctly" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @constraint(model, x_1 - 2 * x_2 ≥ 1) + @objective(model, Max, x_1 + x_2) + # Obtain constraint + constraint = JuMP.all_constraints(model, include_variable_in_set_constraints = true)[1] + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + index_map[2] = 2 + # Result should be x_1 - 2 * x_2, which is equal to -3.0 for x_1 = 1.0 and x_2 = 2.0. + @test MGA.extract_constraint( + MOI.get(model, MOI.ConstraintFunction(), constraint), + [1.0, 2.0], + index_map, + Dict{MOI.VariableIndex, Float64}(), + ) == -3.0 + end + + @testset "Test that constraint with fixed variable is converted correctly" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @constraint(model, x_1 - 2 * x_2 ≥ 1) + @objective(model, Max, x_1 + x_2) + # Obtain constraint + constraint = JuMP.all_constraints(model, include_variable_in_set_constraints = true)[1] + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + fixed_variables = Dict{MOI.VariableIndex, Float64}() + fixed_variables[x_2.index] = 2.0 + # Result should be x_1 - 2 * x_2, which is equal to -3.0 for x_1 = 1.0 and x_2 = 2.0. + @test MGA.extract_constraint( + MOI.get(model, MOI.ConstraintFunction(), constraint), + [1.0], + index_map, + fixed_variables, + ) == -3.0 + end +end + +@testset "Test objective extraction" begin + @testset "Test that regular objective is converted correctly" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + # Obtain objective + objective = JuMP.objective_function(model, AffExpr) + + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + index_map[2] = 2 + # Result should be x_1 + x_2, which is equal to 3.0 for x_1 = 1.0 and x_2 = 2.0. + @test MGA.extract_objective( + objective, + [1.0, 2.0], + index_map, + Dict{MOI.VariableIndex, Float64}(), + ) == 3.0 + end + + @testset "Test that objective with fixed variable is converted correctly" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + # Obtain objective + objective = JuMP.objective_function(model, AffExpr) + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + fixed_variables = Dict{MOI.VariableIndex, Float64}() + fixed_variables[x_2.index] = 2.0 + # Result should be x_1 + x_2, which is equal to 3.0 for x_1 = 1.0 and x_2 = 2.0. + @test MGA.extract_objective(objective, [1.0, 2.0], index_map, fixed_variables) == 3.0 + end +end + +@testset "Test bound extraction" begin + @testset "Test less than bounds" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, x_1 ≤ 1) + @objective(model, Max, x_1) + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + # Result should be a matrix with lower bound -Inf and upper bound 1. + result = zeros(Float64, (2, 1)) + result[1, 1] = -Inf + result[2, 1] = 1 + @test MGA.extract_bounds(model, index_map) == result + end + @testset "Test greater than bounds" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, x_1 ≥ 1) + @objective(model, Max, x_1) + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + # Result should be a matrix with lower bound -Inf and upper bound 1. + result = zeros(Float64, (2, 1)) + result[1, 1] = 1 + result[2, 1] = Inf + @test MGA.extract_bounds(model, index_map) == result + end + @testset "Test interval bounds" begin + # Initialise model + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, 0 ≤ x_1 ≤ 1) + @objective(model, Max, x_1) + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + # Result should be a matrix with lower bound -Inf and upper bound 1. + result = zeros(Float64, (2, 1)) + result[1, 1] = 0 + result[2, 1] = 1 + @test MGA.extract_bounds(model, index_map) == result + end +end + +@testset "Test creating objective function for metaheuristic" begin + @testset "Test simple problem with all types of variable bounds" begin + # Initialise model. + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, x_1 ≤ 1) + @variable(model, 0 ≤ x_2) + @variable(model, 0 ≤ x_3 ≤ 1) + @objective(model, Max, x_1 - x_2 + x_3) + JuMP.optimize!(model) + # Initialise other required structures. + solution = OrderedDict{VariableRef, Float64}() + solution[x_1] = 1.0 + solution[x_2] = 0.0 + solution[x_3] = 1.0 + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + index_map[2] = 2 + index_map[3] = 3 + # Run function and test results. We cannot test function equality so we evaluate the functions in two points and compare. + f = MGA.create_objective( + model, + solution, + 0.5, + Distances.SqEuclidean(), + index_map, + Dict{MOI.VariableIndex, Float64}(), + ) + f0, g0, h0 = f([0.0, 0.0, 0.0]) + f1, g1, h1 = f([1.0, 1.0, 1.0]) + @test f0 == [-2.0] && # -1 * ((x_1 - 1)^2 + (x_2 - 0)^2 + (x_3 - 1)^2) = -(1 + 0 + 1) = -2 + f1 == [-1.0] && # -1 * ((x_1 - 1)^2 + (x_2 - 0)^2 + (x_3 - 1)^2) = (0 + 1 + 0) = -1 + length(g0) == 5 && # Objective gap constraint + 4 variable bounds + g0[1] == 1.0 && # x_1 - x_2 + x_3 >= 1.0 (1.0 <= 0) + count(i -> (i == 0.0), g0) == 2 && # Constraints except objective gap constraint can be in arbitrary order, so we count the occurences per value. + count(i -> (i == -1.0), g0) == 2 && # -1 occurs for x_1 <= 1 and x_3 <= 1, for the others it's 0.0. + length(g1) == 5 && + g1[1] == 0.0 && + count(i -> (i == 0.0), g1) == 3 && # Objective gap, x_1 <= 1 and x_3 <= 1 + count(i -> (i == -1.0), g1) == 2 && # x_2 >= 0, x_3 >= 0. + h0 == [0.0] && # No equality constraints included + h1 == [0.0] + end + + @testset "Test simple problem with all types of constraints" begin + # Initialise model. + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + @variable(model, x_1) + @variable(model, x_2) + @constraint(model, x_1 + x_2 ≤ 3.0) + @constraint(model, x_1 - x_2 ≥ 0.0) + @constraint(model, x_2 == 0.0) + @objective(model, Max, 2 * x_1 + x_2) + JuMP.optimize!(model) + # Initialise other required structures. + solution = OrderedDict{VariableRef, Float64}() + solution[x_1] = 3.0 + solution[x_2] = 0.0 + index_map = Dict{Int64, Int64}() + index_map[1] = 1 + index_map[2] = 2 + # Run function and test results. We cannot test function equality so we evaluate the functions in two points and compare. + f = MGA.create_objective( + model, + solution, + 0.5, + Distances.SqEuclidean(), + index_map, + Dict{MOI.VariableIndex, Float64}(), + ) + f0, g0, h0 = f([0.0, 0.0]) + f1, g1, h1 = f([1.0, 1.0]) + @test f0 == [-9.0] && # -1 * ((x_1 - 1)^2 + (x_2 - 0)^2 + (x_3 - 1)^2) = -(1 + 0 + 1) = -2 + f1 == [-5.0] && # -1 * ((x_1 - 1)^2 + (x_2 - 0)^2 + (x_3 - 1)^2) = (0 + 1 + 0) = -1 + length(g0) == 3 && # Objective gap constraint + 2 inequality constraints + g0[1] == 3.0 && # 2 * x_1 + x_2 >= 3.0 (3.0 <= 0) + count(i -> (i == -3.0), g0) == 1 && # Constraints except objective gap constraint can be in arbitrary order, so we count the occurences per value. + count(i -> (i == 0.0), g0) == 1 && # -3 occurs for x_1 + x_2 <= 3, 0 for x_1 - x_2 >= 0. + length(g1) == 3 && + g1[1] == 0.0 && + count(i -> (i == -1.0), g1) == 1 && # x_1 + x_2 <= 3. + count(i -> (i == 0.0), g1) == 2 && # Objective gap and x_1 - x_2 >= 0. + h0 == [0.0] && # x_2 == 0.0 + h1 == [1.0] # x_2 == 0.0 + end +end + +@testset "Test creating metaheuristic alternative problem" begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + # Initialise other parameters + solution = OrderedDict{VariableRef, Float64}() + solution[x_1] = 1.0 + solution[x_2] = 1.0 + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + metric = Distances.SqEuclidean() + + problem = MGA.create_alternative_generating_problem( + model, + algorithm, + solution, + 0.5, + metric, + Dict{MOI.VariableIndex, Float64}(), + ) + f0, g0, h0 = problem.objective([0.0, 0.0]) + @test problem.algorithm == algorithm && + problem.bounds == [0.0 0.0; 1.0 1.0] && + f0 == [-2.0] && # -((x_1 - 1)^2 + (x_2 - 1)^2) = -(1 + 1) + length(g0) == 5 && + h0 == [0.0] +end + +@testset "Test running metaheuristic" begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + # Initialise other parameters + solution = OrderedDict{VariableRef, Float64}() + solution[x_1] = 1.0 + solution[x_2] = 1.0 + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + metric = Distances.SqEuclidean() + + problem = MGA.create_alternative_generating_problem( + model, + algorithm, + solution, + 0.5, + metric, + Dict{MOI.VariableIndex, Float64}(), + ) + result = MGA.run_alternative_generating_problem!(problem) + solution = minimizer(result) + + @test solution[1] ≥ 0 && solution[1] ≤ 1 && solution[2] ≥ 0 && solution[2] ≤ 1 +end + +@testset "Test adding solution to metaheuristic problem" begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + # Initialise other parameters + solution = OrderedDict{VariableRef, Float64}() + solution[x_1] = 1.0 + solution[x_2] = 1.0 + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + metric = Distances.SqEuclidean() + + problem = MGA.create_alternative_generating_problem( + model, + algorithm, + solution, + 0.5, + metric, + Dict{MOI.VariableIndex, Float64}(), + ) + result = MGA.run_alternative_generating_problem!(problem) + sol = minimizer(result) + MGA.add_solution!(problem, result, metric) + f0, g0, h0 = problem.objective([0.0, 0.0]) + + @test problem.algorithm == algorithm && + problem.bounds == [0.0 0.0; 1.0 1.0] && + f0 ≤ [-2.0 - sol[1]^2 - sol[2]^2] && # -((x_1 - 1)^2 + (x_2 - 1)^2 + (x_1 - res1)^2 + x_2 - res2)^2) = -(1 + 1 + res1^2 + res2^2) + length(g0) == 5 && + h0 == [0.0] +end diff --git a/test/test-alternative-optimisation.jl b/test/test-alternative-optimisation.jl index 3db11c5..c8cc713 100644 --- a/test/test-alternative-optimisation.jl +++ b/test/test-alternative-optimisation.jl @@ -7,7 +7,7 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) # Store the values of `x_1` and `x_2` to test that the correct values are used in the created alternative generation problem. x_1_res = value(x_1) x_2_res = value(x_2) @@ -27,7 +27,7 @@ @variable(model, 1 ≤ x_1 ≤ 2) @variable(model, 1 ≤ x_2 ≤ 2) @objective(model, Min, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) # Store the values of `x_1` and `x_2` to test that the correct values are used in the created alternative generation problem. x_1_res = value(x_1) x_2_res = value(x_2) @@ -53,7 +53,7 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) # Store the values of `x_1` and `x_2` to test that the correct values are used in the created alternative generation problem. x_1_res = value(x_1) x_2_res = value(x_2) @@ -82,7 +82,7 @@ end @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, (x_1 - 1)^2 + (x_2 - 1)^2) @constraint(model, original_objective, x_1 + x_2 ≥ 1.8) - optimize!(model) + JuMP.optimize!(model) # Store the values of `x_1` and `x_2` to test that the correct values are used in the created alternative generation problem. x_1_res = value(x_1) x_2_res = value(x_2) diff --git a/test/test-generate-alternatives.jl b/test/test-generate-alternatives.jl index 46bf888..a168f2c 100644 --- a/test/test-generate-alternatives.jl +++ b/test/test-generate-alternatives.jl @@ -19,7 +19,7 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) @test_throws ArgumentError MGA.generate_alternatives!(model, -0.1, 5) end @@ -32,7 +32,7 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) @test_throws ArgumentError MGA.generate_alternatives!(model, 0.1, 0) end @@ -45,7 +45,7 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) results = MGA.generate_alternatives!(model, 0.1, 1) @@ -57,7 +57,7 @@ (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) end - @testset "Test regular run with one alternative with one selected variable." begin + @testset "Test regular run with one alternative with one fixed variable." begin optimizer = Ipopt.Optimizer model = JuMP.Model(optimizer) @@ -65,7 +65,7 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) results = MGA.generate_alternatives!(model, 0.1, 1, fixed_variables = [x_2]) @@ -88,10 +88,9 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) results = MGA.generate_alternatives!(model, 0.1, 2) - println(results) # Test that `results` contains 2 solutions with two variables each, where the objective values of both solutions are between 1.8 and 2.0. @test length(results.solutions) == 2 && @@ -111,17 +110,158 @@ @variable(model, 0 ≤ x_1 ≤ 1) @variable(model, 0 ≤ x_2 ≤ 1) @objective(model, Max, x_1 + x_2) - optimize!(model) + JuMP.optimize!(model) + + results = MGA.generate_alternatives!(model, 0.1, 1, metric = WeightedSqEuclidean([0.5, 10])) + + # Test that `results` contains one solution with two variables. Logically, due to the weights this solution should return around 0.8 for `x_2` and 1.0 for `x_1`. + @test length(results.solutions) == 1 && + length(results.solutions[1]) == 2 && + length(results.objective_values) == 1 && + isapprox(results.objective_values[1], 1.8, atol = 0.01) && + isapprox(results.solutions[1][x_2], 0.8, atol = 0.01) && + isapprox(results.solutions[1][x_1], 1.0, atol = 0.01) + end +end + +@testset "Test generate alternatives using metaheuristics." begin + @testset "Make sure error is thrown when JuMP model is not solved." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + + @test_throws ArgumentError MGA.generate_alternatives(model, 0.1, 5, algorithm) + end + + @testset "Make sure error is thrown when incorrect optimality_gap." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + + @test_throws ArgumentError MGA.generate_alternatives(model, -0.1, 5, algorithm) + end + + @testset "Make sure error is thrown when incorrect n_alternatives." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + + @test_throws ArgumentError MGA.generate_alternatives(model, 0.1, 0, algorithm) + end + + @testset "Test regular run with one alternative." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + + results = MGA.generate_alternatives(model, 0.1, 1, algorithm) + + # Test that `results` contains one solution with 2 variables, and an objective value between 1.8 and 2.0. + @test length(results.solutions) == 1 && + length(results.solutions[1]) == 2 && + length(results.objective_values) == 1 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) + end + + @testset "Test regular run with one alternative with one fixed variable." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + + results = MGA.generate_alternatives(model, 0.1, 1, algorithm, fixed_variables = [x_2]) + + # Test that `results` contains one solution with 2 variables, and an objective value between 1.8 and 2.0. Also, `x_2` should remain around 1.0 and `x_1` should be between 0.8 and 1.0. + @test length(results.solutions) == 1 && + length(results.solutions[1]) == 2 && + length(results.objective_values) == 1 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) && + (results.solutions[1][x_1] ≥ 0.8 || isapprox(results.solutions[1][x_1], 0.8)) && + (results.solutions[1][x_1] ≤ 1.0 || isapprox(results.solutions[1][x_1], 1.0)) && + isapprox(results.solutions[1][x_2], 1.0) + end + + @testset "Test regular run with two alternatives." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) + + results = MGA.generate_alternatives(model, 0.1, 2, algorithm) + + # Test that `results` contains 2 solutions with two variables each, where the objective values of both solutions are between 1.8 and 2.0. + @test length(results.solutions) == 2 && + length(results.solutions[2]) == 2 && + length(results.objective_values) == 2 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) && + (results.objective_values[2] ≥ 1.8 || isapprox(results.objective_values[2], 1.8)) && + (results.objective_values[2] ≤ 2.0 || isapprox(results.objective_values[2], 2.0)) + end + + @testset "Test regular run with one alternative and a weighted metric." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) - results = MGA.generate_alternatives!(model, 0.1, 1, metric = WeightedSqEuclidean([0.5, 1])) + results = + MGA.generate_alternatives(model, 0.1, 1, algorithm, metric = WeightedSqEuclidean([0.5, 10])) println(results) # Test that `results` contains one solution with two variables. Logically, due to the weights this solution should return around 0.8 for `x_2` and 1.0 for `x_1`. @test length(results.solutions) == 1 && length(results.solutions[1]) == 2 && length(results.objective_values) == 1 && - isapprox(results.objective_values[1], 1.8) && - isapprox(results.solutions[1][x_2], 0.8) && - isapprox(results.solutions[1][x_1], 1.0) + isapprox(results.objective_values[1], 1.8, atol = 0.01) && + isapprox(results.solutions[1][x_2], 0.8, atol = 0.01) && + isapprox(results.solutions[1][x_1], 1.0, atol = 0.01) end end From f7c4eeb2d87a573601b4102edd85965964ef6a13 Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Mon, 27 May 2024 09:45:48 +0200 Subject: [PATCH 3/8] Specify Metaheuristics version --- Project.toml | 1 + test/Project.toml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/Project.toml b/Project.toml index 5e756d5..96c31da 100644 --- a/Project.toml +++ b/Project.toml @@ -12,3 +12,4 @@ Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" [compat] julia = "1.6" +Metaheuristics = "3.3.5" diff --git a/test/Project.toml b/test/Project.toml index d355773..233d615 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -6,3 +6,6 @@ JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[compat] +Metaheuristics = "3.3.5" From 3f5ee41f5f5effb621a5b02107eb3791818e99bf Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Mon, 27 May 2024 10:45:49 +0200 Subject: [PATCH 4/8] Change Metaheuristics version --- Project.toml | 2 +- test/Project.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 96c31da..9f05aae 100644 --- a/Project.toml +++ b/Project.toml @@ -12,4 +12,4 @@ Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" [compat] julia = "1.6" -Metaheuristics = "3.3.5" +Metaheuristics = "3.3" diff --git a/test/Project.toml b/test/Project.toml index 233d615..a3614d8 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -8,4 +8,4 @@ Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -Metaheuristics = "3.3.5" +Metaheuristics = "3.3" From e8073e818c6b516046429c32c2907659ef0beb93 Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Mon, 27 May 2024 10:52:52 +0200 Subject: [PATCH 5/8] Change Metaheuristics version again --- Project.toml | 2 +- test/Project.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 9f05aae..8d7eb9d 100644 --- a/Project.toml +++ b/Project.toml @@ -12,4 +12,4 @@ Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" [compat] julia = "1.6" -Metaheuristics = "3.3" +Metaheuristics = "3.2" diff --git a/test/Project.toml b/test/Project.toml index a3614d8..6285d3b 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -8,4 +8,4 @@ Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -Metaheuristics = "3.3" +Metaheuristics = "3.2" From 20598dd6461e87a0a97a00e2b20a480a302109d6 Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Tue, 28 May 2024 15:20:27 +0200 Subject: [PATCH 6/8] Resolve change from MGA to NearOptimalAlternatives --- src/generate-alternatives.jl | 8 +++--- test/test-alternative-metaheuristics.jl | 35 ++++++++++++++----------- test/test-generate-alternatives.jl | 9 +++++-- 3 files changed, 31 insertions(+), 21 deletions(-) diff --git a/src/generate-alternatives.jl b/src/generate-alternatives.jl index b91be80..b9476fc 100644 --- a/src/generate-alternatives.jl +++ b/src/generate-alternatives.jl @@ -93,7 +93,7 @@ function generate_alternatives( result = AlternativeSolutions([], []) - @info "Setting up MGA problem and solver." + @info "Setting up NearOptimalAlternatives problem and solver." # Obtain the solution values for all variables, separated in fixed and non-fixed variables. initial_solution = OrderedDict{VariableRef, Float64}() fixed_variable_solutions = Dict{MOI.VariableIndex, Float64}() @@ -113,15 +113,15 @@ function generate_alternatives( metric, fixed_variable_solutions, ) - @info "Solving MGA problem." + @info "Solving NearOptimalAlternatives problem." state = run_alternative_generating_problem!(problem) @info "Solution #1/$n_alternatives found." state minimizer(state) update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) for i = 2:n_alternatives - @info "Reconfiguring MGA problem with new solution." + @info "Reconfiguring NearOptimalAlternatives problem with new solution." add_solution!(problem, state, metric) - @info "Solving MGA problem." + @info "Solving NearOptimalAlternatives problem." state = run_alternative_generating_problem!(problem) @info "Solution #$i/$n_alternatives found." state minimizer(state) update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) diff --git a/test/test-alternative-metaheuristics.jl b/test/test-alternative-metaheuristics.jl index fb277d9..7ca9d41 100644 --- a/test/test-alternative-metaheuristics.jl +++ b/test/test-alternative-metaheuristics.jl @@ -13,7 +13,7 @@ index_map[1] = 1 index_map[2] = 2 # Result should be x_1 - 2 * x_2, which is equal to -3.0 for x_1 = 1.0 and x_2 = 2.0. - @test MGA.extract_constraint( + @test NearOptimalAlternatives.extract_constraint( MOI.get(model, MOI.ConstraintFunction(), constraint), [1.0, 2.0], index_map, @@ -36,7 +36,7 @@ fixed_variables = Dict{MOI.VariableIndex, Float64}() fixed_variables[x_2.index] = 2.0 # Result should be x_1 - 2 * x_2, which is equal to -3.0 for x_1 = 1.0 and x_2 = 2.0. - @test MGA.extract_constraint( + @test NearOptimalAlternatives.extract_constraint( MOI.get(model, MOI.ConstraintFunction(), constraint), [1.0], index_map, @@ -60,7 +60,7 @@ end index_map[1] = 1 index_map[2] = 2 # Result should be x_1 + x_2, which is equal to 3.0 for x_1 = 1.0 and x_2 = 2.0. - @test MGA.extract_objective( + @test NearOptimalAlternatives.extract_objective( objective, [1.0, 2.0], index_map, @@ -82,7 +82,12 @@ end fixed_variables = Dict{MOI.VariableIndex, Float64}() fixed_variables[x_2.index] = 2.0 # Result should be x_1 + x_2, which is equal to 3.0 for x_1 = 1.0 and x_2 = 2.0. - @test MGA.extract_objective(objective, [1.0, 2.0], index_map, fixed_variables) == 3.0 + @test NearOptimalAlternatives.extract_objective( + objective, + [1.0, 2.0], + index_map, + fixed_variables, + ) == 3.0 end end @@ -99,7 +104,7 @@ end result = zeros(Float64, (2, 1)) result[1, 1] = -Inf result[2, 1] = 1 - @test MGA.extract_bounds(model, index_map) == result + @test NearOptimalAlternatives.extract_bounds(model, index_map) == result end @testset "Test greater than bounds" begin # Initialise model @@ -113,7 +118,7 @@ end result = zeros(Float64, (2, 1)) result[1, 1] = 1 result[2, 1] = Inf - @test MGA.extract_bounds(model, index_map) == result + @test NearOptimalAlternatives.extract_bounds(model, index_map) == result end @testset "Test interval bounds" begin # Initialise model @@ -127,7 +132,7 @@ end result = zeros(Float64, (2, 1)) result[1, 1] = 0 result[2, 1] = 1 - @test MGA.extract_bounds(model, index_map) == result + @test NearOptimalAlternatives.extract_bounds(model, index_map) == result end end @@ -151,7 +156,7 @@ end index_map[2] = 2 index_map[3] = 3 # Run function and test results. We cannot test function equality so we evaluate the functions in two points and compare. - f = MGA.create_objective( + f = NearOptimalAlternatives.create_objective( model, solution, 0.5, @@ -194,7 +199,7 @@ end index_map[1] = 1 index_map[2] = 2 # Run function and test results. We cannot test function equality so we evaluate the functions in two points and compare. - f = MGA.create_objective( + f = NearOptimalAlternatives.create_objective( model, solution, 0.5, @@ -237,7 +242,7 @@ end algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) metric = Distances.SqEuclidean() - problem = MGA.create_alternative_generating_problem( + problem = NearOptimalAlternatives.create_alternative_generating_problem( model, algorithm, solution, @@ -271,7 +276,7 @@ end algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) metric = Distances.SqEuclidean() - problem = MGA.create_alternative_generating_problem( + problem = NearOptimalAlternatives.create_alternative_generating_problem( model, algorithm, solution, @@ -279,7 +284,7 @@ end metric, Dict{MOI.VariableIndex, Float64}(), ) - result = MGA.run_alternative_generating_problem!(problem) + result = NearOptimalAlternatives.run_alternative_generating_problem!(problem) solution = minimizer(result) @test solution[1] ≥ 0 && solution[1] ≤ 1 && solution[2] ≥ 0 && solution[2] ≤ 1 @@ -303,7 +308,7 @@ end algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) metric = Distances.SqEuclidean() - problem = MGA.create_alternative_generating_problem( + problem = NearOptimalAlternatives.create_alternative_generating_problem( model, algorithm, solution, @@ -311,9 +316,9 @@ end metric, Dict{MOI.VariableIndex, Float64}(), ) - result = MGA.run_alternative_generating_problem!(problem) + result = NearOptimalAlternatives.run_alternative_generating_problem!(problem) sol = minimizer(result) - MGA.add_solution!(problem, result, metric) + NearOptimalAlternatives.add_solution!(problem, result, metric) f0, g0, h0 = problem.objective([0.0, 0.0]) @test problem.algorithm == algorithm && diff --git a/test/test-generate-alternatives.jl b/test/test-generate-alternatives.jl index 797ff3f..c782902 100644 --- a/test/test-generate-alternatives.jl +++ b/test/test-generate-alternatives.jl @@ -278,8 +278,13 @@ end algorithm = Metaheuristics.PSO(N = 100, C1 = 2.0, C2 = 2.0, ω = 0.8) - results = - MGA.generate_alternatives(model, 0.1, 1, algorithm, metric = WeightedSqEuclidean([0.5, 10])) + results = NearOptimalAlternatives.generate_alternatives( + model, + 0.1, + 1, + algorithm, + metric = WeightedSqEuclidean([0.5, 10]), + ) results = NearOptimalAlternatives.generate_alternatives!( model, 0.1, From ccbf95827bf90875d923e033dfb015db311ccadd Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Tue, 28 May 2024 15:33:25 +0200 Subject: [PATCH 7/8] Update Julia version to 1.7 --- .github/workflows/Test.yml | 2 +- Project.toml | 4 ++-- test/Project.toml | 3 --- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/Test.yml b/.github/workflows/Test.yml index 49a5fe0..f0c1add 100644 --- a/.github/workflows/Test.yml +++ b/.github/workflows/Test.yml @@ -32,7 +32,7 @@ jobs: fail-fast: false matrix: version: - - "1.6" + - "1.7" - "1" os: - ubuntu-latest diff --git a/Project.toml b/Project.toml index 1d94f89..0f4f57b 100644 --- a/Project.toml +++ b/Project.toml @@ -14,5 +14,5 @@ Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" JuMP = "1" MathOptInterface = "1" Distances = "0.10" -julia = "1.6" -Metaheuristics = "3.2" +julia = "1.7" +Metaheuristics = "3.3" diff --git a/test/Project.toml b/test/Project.toml index 6285d3b..d355773 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -6,6 +6,3 @@ JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[compat] -Metaheuristics = "3.2" From 70d280a549f7dc1ebd671c23da8ad8e9b7d48c07 Mon Sep 17 00:00:00 2001 From: Matthijs Arnoldus Date: Wed, 29 May 2024 17:05:32 +0200 Subject: [PATCH 8/8] Implement Particle Swarm Optimisation for Generating Alternatives --- Project.toml | 5 +- src/NearOptimalAlternatives.jl | 2 + src/algorithms/PSOGA/PSOGA.jl | 233 ++++++++++++++++++++++++++++++ src/algorithms/PSOGA/is_better.jl | 59 ++++++++ src/generate-alternatives.jl | 31 +++- src/results.jl | 48 ++++++ test/Project.toml | 1 + test/test-psoga.jl | 129 +++++++++++++++++ 8 files changed, 498 insertions(+), 10 deletions(-) create mode 100644 src/algorithms/PSOGA/PSOGA.jl create mode 100644 src/algorithms/PSOGA/is_better.jl create mode 100644 test/test-psoga.jl diff --git a/Project.toml b/Project.toml index 0f4f57b..30be612 100644 --- a/Project.toml +++ b/Project.toml @@ -9,10 +9,11 @@ Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [compat] +Distances = "0.10" JuMP = "1" MathOptInterface = "1" -Distances = "0.10" -julia = "1.7" Metaheuristics = "3.3" +julia = "1.7" diff --git a/src/NearOptimalAlternatives.jl b/src/NearOptimalAlternatives.jl index 98e0a4e..3cf26c8 100644 --- a/src/NearOptimalAlternatives.jl +++ b/src/NearOptimalAlternatives.jl @@ -7,10 +7,12 @@ using Distances using MathOptInterface using Metaheuristics using DataStructures +using Statistics include("results.jl") include("alternative-optimisation.jl") include("generate-alternatives.jl") include("alternative-metaheuristics.jl") +include("algorithms/PSOGA/PSOGA.jl") end diff --git a/src/algorithms/PSOGA/PSOGA.jl b/src/algorithms/PSOGA/PSOGA.jl new file mode 100644 index 0000000..8fbefb4 --- /dev/null +++ b/src/algorithms/PSOGA/PSOGA.jl @@ -0,0 +1,233 @@ +import Metaheuristics: initialize!, update_state!, final_stage! +import Metaheuristics: AbstractParameters, gen_initial_state, Algorithm, get_position +# genetic operators +import Metaheuristics: SBX_crossover, polynomial_mutation!, create_solution, is_better +import Metaheuristics: reset_to_violated_bounds! +import Metaheuristics: velocity +include("is_better.jl") + +""" + Structure holding all parameters for PSOGA (Particle Swarm Optimisation for Generating Alternatives). +""" +mutable struct PSOGA <: AbstractParameters + N::Int # Total population size + N_solutions::Int # Number of solutions sought. This is the same as the number of subpopulations searching for a solution. + C1::Float64 # Cognitive parameter. Used to compute velocity based on own best solution. + C2::Float64 # Social parameter. Used to compute velocity based on best solution in subpopulation. + ω::Float64 # Inertia parameter. Used to compute velocity to ensure not too large changes. + v::Array{Float64} # Array of velocities per individual. + flock::Array # Array of all current positions of each of the individuals. + subBest::Array # Array of best solutions per subpopulation. + maximise_total::Bool # If true, we maximise the sum of distances between a point and all centroids of other subpopulations, else we maximise the minimum distance between a point and the centroids of other subpopulations. +end + +""" + PSOGA(; + N = 100, + N_solutions = 1, + C1 = 2.0, + C2 = 2.0, + ω = 0.8, + v = Float64[], + flock == Metaheuristics.xf_indiv[], + subBest = Metaheuristics.xf_indiv[], + information = Information(), + options = Options(), + ) + +Construct a PSOGA Metaheuristic algorithm. + +# Arguments +- `N`: total population size +- `N_solutions::Int`: number of solutions sought. This is the same as the number of subpopulations searching for a solution. +- `C1::Float64`: cognitive parameter. Used to compute velocity based on own best solution. +- `C2::Float64: social parameter. Used to compute velocity based on best solution in subpopulation. +- `ω::Float64`: inertia parameter. Used to compute velocity to ensure not too large changes. +- `v::Array{Float64}`: array of velocities per individual. +- `flock::Array`: array of all current positions of each of the individuals. +- `subBest::Array`: array of best solutions per subpopulation. +- `maximise_total::Bool`: if true, we maximise the sum of distances between a point and all centroids of other subpopulations, else we maximise the minimum distance between a point and the centroids of other subpopulations. +""" +function PSOGA(; + N::Int = 100, + N_solutions::Int = 1, + C1::Float64 = 2.0, + C2::Float64 = 2.0, + ω::Float64 = 0.8, + v::Array{Float64} = Float64[], + flock::Array = Metaheuristics.xf_indiv[], + subBest::Array = Metaheuristics.xf_indiv[], + maximise_total::Bool = true, + information = Information(), + options = Options(), +) + parameters = + PSOGA(N, N_solutions, promote(Float64(C1), C2, ω)..., v, flock, subBest, maximise_total) + + return Algorithm(parameters, information = information, options = options) +end + +""" + initialize!( + status, + parameters::PSOGA, + problem, + information, + options, + args...; + kwargs... + ) + +Initialise all parameters used when solving a problem using PSOGA. Called by main loop of Metaheuristics. +""" +function initialize!(status, parameters::PSOGA, problem, information, options, args...; kwargs...) + # Get problem dimensions. + D = Metaheuristics.getdim(problem) + + # Fix parameters if they don't have sizes that work + if options.f_calls_limit == 0 + options.f_calls_limit = 10000 * D + options.debug && @warn("f_calls_limit increased to $(options.f_calls_limit)") + end + if options.iterations == 0 + options.iterations = div(options.f_calls_limit, parameters.N) + 1 + end + if mod(parameters.N, parameters.N_solutions) != 0 + parameters.N += mod(parameters.N, parameters.N_solutions) + println(parameters.N) + options.debug && + @warn("Population size increased to $(parameters.N) to ensure equal size subpopulations.") + end + + # Initialise velocity and population parameters. + parameters.v = zeros(parameters.N, D) + status = gen_initial_state(problem, parameters, information, options, status) + + # Initialise parameter for best values per subpopulation and populate this array with bests in initial population. + parameters.subBest = Array{Any}(undef, parameters.N_solutions) + fill!(parameters.subBest, status.population[1]) + for (i, sol) in enumerate(status.population) + if Metaheuristics.is_better( + sol, + parameters.subBest[Int(1 + div(i - 1, (parameters.N / parameters.N_solutions)))], + ) + parameters.subBest[Int(1 + div(i - 1, (parameters.N / parameters.N_solutions)))] = sol + end + end + + # Initialise flock (set of all previous populations). + parameters.flock = status.population + + return status +end + +""" + update_state( + status, + parameters::PSOGA, + problem, + information, + options, + args...; + kwargs... + ) + +Perform one iteration of PSOGA. Called by main loop of Metaheuristics. +""" +function update_state!( + status, + parameters::PSOGA, + problem::Metaheuristics.AbstractProblem, + information::Information, + options::Options, + args...; + kwargs..., +) + # Initialise vector of new generation of individuals. + X_new = zeros(parameters.N, Metaheuristics.getdim(problem)) + + # Update all individuals' position by adding their velocity. + for i = 1:(parameters.N) + # Obtain the best position in the individuals subpopulation, its current position and its alltime best position. + xSPBest = + get_position(parameters.subBest[Int(1 + div(i - 1, (parameters.N / parameters.N_solutions)))]) + x = get_position(parameters.flock[i]) + xPBest = get_position(status.population[i]) + # Generate new velocity. + parameters.v[i, :] = velocity(x, parameters.v[i, :], xPBest, xSPBest, parameters, options.rng) + # Update position and reset to its bounds if it violates any. + x += parameters.v[i, :] + reset_to_violated_bounds!(x, problem.search_space) + X_new[i, :] = x + end + + # Compute the centroids of each subpopulation + centroids = ones(parameters.N_solutions, Metaheuristics.getdim(problem)) + for i = 1:(parameters.N_solutions) + centroids[i, :] = Statistics.mean( + X_new[ + ((i - 1) * div(parameters.N, parameters.N_solutions) + 1):(i * div( + parameters.N, + parameters.N_solutions, + )), + :, + ], + dims = 1, + ) + end + + # Update local bests (in population) and bests per subpopulation (subBest). + for (i, sol) in enumerate(Metaheuristics.create_solutions(X_new, problem; ε = options.h_tol)) + if is_better_psoga( + sol, + status.population[i], + centroids, + Int(1 + div(i - 1, (parameters.N / parameters.N_solutions))), + parameters.maximise_total, + ) + status.population[i] = sol + if is_better_psoga( + sol, + parameters.subBest[Int(1 + div(i - 1, (parameters.N / parameters.N_solutions)))], + centroids, + Int(1 + div(i - 1, (parameters.N / parameters.N_solutions))), + parameters.maximise_total, + ) + parameters.subBest[Int(1 + div(i - 1, (parameters.N / parameters.N_solutions)))] = sol + end + end + + # Update current generation. + parameters.flock[i] = sol + + # Check if stop criteria are met. + Metaheuristics.stop_criteria!(status, parameters, problem, information, options) + status.stop && break + end +end + +""" + final_stage( + status, + parameters::PSOGA, + problem, + information, + options, + args...; + kwargs... + ) + +Perform concluding operations after solving a problem using PSOGA. Called by main loop of Metaheuristics. +""" +function final_stage!( + status, + parameters::PSOGA, + problem::Metaheuristics.AbstractProblem, + information::Information, + options::Options, + args...; + kwargs..., +) + # Set end time of algorithm. + status.final_time = time() +end diff --git a/src/algorithms/PSOGA/is_better.jl b/src/algorithms/PSOGA/is_better.jl new file mode 100644 index 0000000..f1cee2d --- /dev/null +++ b/src/algorithms/PSOGA/is_better.jl @@ -0,0 +1,59 @@ +""" + is_better_psoga( + A::T, + B::T, + centroids::Vector{Any}, + subpop_a::Int64, + subpop_b::Int64, + maximise_total::Bool + ) where {T <: Metaheuristics.xFgh_solution} + +Compare two solutions of the PSOGA algorithm with respect to their distance to the optimal solution and other alternatives. + +# Arguments +- `A`: solution in PSOGA to be compared. +- `B`: solution in PSOGA to be compared. +- `centroids::Vector{Any}`: vector of centroids per subpopulation. A centroid is the average point of all solutions in a subpopulation. +- `subpop::Int64`: index of the subpopulation solution A and B are in. Note that they are always in the same, since we only compare within subpopulations or with themselves. +- `maximise_total::Bool`: if true, we maximise the sum of distances between a point and all centroids of other subpopulations, else we maximise the minimum distance between a point and the centroids of other subpopulations. +""" +function is_better_psoga( + A::T, + B::T, + centroids::Matrix{Float64}, + subpop::Int64, + maximise_total::Bool, +) where {T <: Metaheuristics.xFgh_solution} + A_vio = A.sum_violations + B_vio = B.sum_violations + + # If either A or B violates the constraints, the one with the smaller violation is better. + if A_vio < B_vio + return true + elseif B_vio < A_vio + return false + end + + # Set distances for A and B equal to negative objective value, which is equivalent to the distance between the point and the initial optimal solution. + A_dist = -A.f[1] + B_dist = -B.f[1] + + # For each subpopulation compute the distance between both points and the centroid of that subpopulation. + for i in eachindex(centroids[:, 1]) + # Skip the subpopulation A and B are in. + if i == subpop + continue + end + # Update distance based on whether we aim for maximising the total distance or the minimum distance. + if maximise_total + A_dist += sum((A.x[j] - centroids[i, j])^2 for j in eachindex(A.x)) + B_dist += sum((B.x[j] - centroids[i, j])^2 for j in eachindex(A.x)) + else + A_dist = min(A_dist, sum((A.x[j] - centroids[i, j])^2 for j in eachindex(A.x))) + B_dist = min(B_dist, sum((B.x[j] - centroids[i, j])^2 for j in eachindex(B.x))) + end + end + + # If total or minimum distance of A is bigger than for B, A is better so return true. + return A_dist > B_dist +end diff --git a/src/generate-alternatives.jl b/src/generate-alternatives.jl index b9476fc..9fcad78 100644 --- a/src/generate-alternatives.jl +++ b/src/generate-alternatives.jl @@ -116,16 +116,31 @@ function generate_alternatives( @info "Solving NearOptimalAlternatives problem." state = run_alternative_generating_problem!(problem) @info "Solution #1/$n_alternatives found." state minimizer(state) - update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) - - for i = 2:n_alternatives - @info "Reconfiguring NearOptimalAlternatives problem with new solution." - add_solution!(problem, state, metric) - @info "Solving NearOptimalAlternatives problem." - state = run_alternative_generating_problem!(problem) - @info "Solution #$i/$n_alternatives found." state minimizer(state) + # Update the solutions based on whether PSOGA is used or not + if typeof(metaheuristic_algorithm) == Metaheuristics.Algorithm{NearOptimalAlternatives.PSOGA} + update_solutions!( + result, + state, + metaheuristic_algorithm.parameters.subBest, + initial_solution, + fixed_variable_solutions, + model, + ) + else update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) end + # Only run iteratively if PSOGA is not used. + if typeof(metaheuristic_algorithm) != Metaheuristics.Algorithm{NearOptimalAlternatives.PSOGA} + for i = 2:n_alternatives + @info "Reconfiguring NearOptimalAlternatives problem with new solution." + add_solution!(problem, state, metric) + @info "Solving NearOptimalAlternatives problem." + state = run_alternative_generating_problem!(problem) + @info "Solution #$i/$n_alternatives found." state minimizer(state) + update_solutions!(result, state, initial_solution, fixed_variable_solutions, model) + end + end + return result end diff --git a/src/results.jl b/src/results.jl index 8c9395a..f895d56 100644 --- a/src/results.jl +++ b/src/results.jl @@ -74,3 +74,51 @@ function update_solutions!( extract_objective(JuMP.objective_function(model), best_solution, index_map, fixed_variables) push!(results.objective_values, objective_value) end + +""" + update_solutions!(results::AlternativeSolutions, model::JuMP.Model) + +Update the set of results `AlternativeSolutions` with the variable values obtained when solving using PSOGA. + +# Arguments +- `results::AlternativeSolutions`: set of solutions to add a new solution to. +- `state::Metaheuristics.State`: contains results to metaheuristic solve. +- `subBest::Vector{Any}`: contains the best results per subpopulation of PSOGA, which are the actual results of solving. +- `initial:solution::OrderedDict{VariableRef, Float64}`: used to identify the indices of the metaheuristic solution in the JuMP model. +- `fixed_variables::Dict{MOI.VariableIndex, Float64}`: set of fixed variables and their solution values. +- `model::JuMP.Model`: original model for which alternative solutions are found. +""" +function update_solutions!( + results::AlternativeSolutions, + state::Metaheuristics.State, + subBest::Vector{Any}, + initial_solution::OrderedDict{VariableRef, Float64}, + fixed_variables::Dict{MOI.VariableIndex, Float64}, + model::JuMP.Model, +) + if !state.stop + throw(ErrorException("Metaheuristic state `state` not terminated when trying to read results.")) + end + + # Loop over the best solutions of each subpopulation. + for best_solution in subBest + solution = Dict{VariableRef, Float64}() + index_map = Dict{Int64, Int64}() + # Add all new results + for (i, (k, _)) in enumerate(initial_solution) + solution[k] = best_solution.x[i] + index_map[k.index.value] = i + end + # Add values of fixed variables. + for (k, v) in fixed_variables + solution[JuMP.VariableRef(model, k)] = v + end + + push!(results.solutions, solution) + + # Retrieve objective value to original problem. + objective_value = + extract_objective(JuMP.objective_function(model), best_solution.x, index_map, fixed_variables) + push!(results.objective_values, objective_value) + end +end diff --git a/test/Project.toml b/test/Project.toml index d355773..2dc5cbf 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -5,4 +5,5 @@ Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/test-psoga.jl b/test/test-psoga.jl new file mode 100644 index 0000000..9daa732 --- /dev/null +++ b/test/test-psoga.jl @@ -0,0 +1,129 @@ +@testset "Test generate alternatives using PSOGA." begin + @testset "Test regular run with one alternative." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = NearOptimalAlternatives.PSOGA() + + results = NearOptimalAlternatives.generate_alternatives(model, 0.1, 1, algorithm) + println(results) + + # Test that `results` contains one solution with 2 variables, and an objective value between 1.8 and 2.0. + @test length(results.solutions) == 1 && + length(results.solutions[1]) == 2 && + length(results.objective_values) == 1 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) + end + + @testset "Test regular run with one alternative with one fixed variable." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = NearOptimalAlternatives.PSOGA() + + results = NearOptimalAlternatives.generate_alternatives( + model, + 0.1, + 1, + algorithm, + fixed_variables = [x_2], + ) + + # Test that `results` contains one solution with 2 variables, and an objective value between 1.8 and 2.0. Also, `x_2` should remain around 1.0 and `x_1` should be between 0.8 and 1.0. + @test length(results.solutions) == 1 && + length(results.solutions[1]) == 2 && + length(results.objective_values) == 1 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) && + (results.solutions[1][x_1] ≥ 0.8 || isapprox(results.solutions[1][x_1], 0.8)) && + (results.solutions[1][x_1] ≤ 1.0 || isapprox(results.solutions[1][x_1], 1.0)) && + isapprox(results.solutions[1][x_2], 1.0) + end + + @testset "Test regular run with two alternatives." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = NearOptimalAlternatives.PSOGA(N_solutions = 2, N = 200) + + results = NearOptimalAlternatives.generate_alternatives(model, 0.1, 2, algorithm) + + # Test that `results` contains 2 solutions with two variables each, where the objective values of both solutions are between 1.8 and 2.0. + @test length(results.solutions) == 2 && + length(results.solutions[2]) == 2 && + length(results.objective_values) == 2 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) && + (results.objective_values[2] ≥ 1.8 || isapprox(results.objective_values[2], 1.8)) && + (results.objective_values[2] ≤ 2.0 || isapprox(results.objective_values[2], 2.0)) + end + + @testset "Test regular run with three alternatives." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = NearOptimalAlternatives.PSOGA(N_solutions = 3, N = 300) + + results = NearOptimalAlternatives.generate_alternatives(model, 0.1, 3, algorithm) + + # Test that `results` contains 3 solutions with two variables each, where the objective values of both solutions are between 1.8 and 2.0. + @test length(results.solutions) == 3 && + length(results.solutions[2]) == 2 && + length(results.objective_values) == 3 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) && + (results.objective_values[2] ≥ 1.8 || isapprox(results.objective_values[2], 1.8)) && + (results.objective_values[2] ≤ 2.0 || isapprox(results.objective_values[2], 2.0)) && + (results.objective_values[3] ≥ 1.8 || isapprox(results.objective_values[3], 1.8)) && + (results.objective_values[3] ≤ 2.0 || isapprox(results.objective_values[3], 2.0)) + end + + @testset "Test regular run with two alternatives and maximise minimum distance." begin + optimizer = Ipopt.Optimizer + model = JuMP.Model(optimizer) + + # Initialise simple `square` JuMP model + @variable(model, 0 ≤ x_1 ≤ 1) + @variable(model, 0 ≤ x_2 ≤ 1) + @objective(model, Max, x_1 + x_2) + JuMP.optimize!(model) + + algorithm = NearOptimalAlternatives.PSOGA(N_solutions = 2, N = 200, maximise_total = false) + + results = NearOptimalAlternatives.generate_alternatives(model, 0.1, 2, algorithm) + + # Test that `results` contains 2 solutions with two variables each, where the objective values of both solutions are between 1.8 and 2.0. + @test length(results.solutions) == 2 && + length(results.solutions[2]) == 2 && + length(results.objective_values) == 2 && + (results.objective_values[1] ≥ 1.8 || isapprox(results.objective_values[1], 1.8)) && + (results.objective_values[1] ≤ 2.0 || isapprox(results.objective_values[1], 2.0)) && + (results.objective_values[2] ≥ 1.8 || isapprox(results.objective_values[2], 1.8)) && + (results.objective_values[2] ≤ 2.0 || isapprox(results.objective_values[2], 2.0)) + end +end