Skip to content

Commit

Permalink
Improve parallelization efficiency (#4)
Browse files Browse the repository at this point in the history
  • Loading branch information
timmens authored Apr 23, 2023
1 parent f1daa80 commit 4bce6be
Show file tree
Hide file tree
Showing 8 changed files with 426 additions and 38 deletions.
235 changes: 235 additions & 0 deletions src/tranquilo/acceptance_decision.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import NamedTuple

import numpy as np
import pandas as pd

from tranquilo.acceptance_sample_size import (
get_acceptance_sample_sizes,
Expand All @@ -20,6 +21,7 @@ def get_acceptance_decider(acceptance_decider, acceptance_options):
"classic": _accept_classic,
"naive_noisy": accept_naive_noisy,
"noisy": accept_noisy,
"classic_line_search": accept_classic_line_search,
}

out = get_component(
Expand Down Expand Up @@ -85,6 +87,142 @@ def accept_naive_noisy(
return out


def accept_classic_line_search(
subproblem_solution,
state,
history,
*,
wrapped_criterion,
min_improvement,
batch_size,
sample_points,
search_radius_factor,
rng,
):
# ==================================================================================
# Quick return if batch_size is 1

if batch_size == 1:
return _accept_classic(
subproblem_solution=subproblem_solution,
state=state,
history=history,
wrapped_criterion=wrapped_criterion,
min_improvement=min_improvement,
)

# ==================================================================================
# Add candidate to history

candidate_x = subproblem_solution.x
candidate_index = history.add_xs(candidate_x)

eval_info = {candidate_index: 1}

# ==================================================================================
# Determine whether the candidate it sufficiently close to the border of the
# trustregion, in which case we perform a line search

perform_line_search = _is_on_border(state.trustregion, x=candidate_x, rtol=1e-1)

if perform_line_search:
alpha_grid = _generate_alpha_grid(batch_size)

line_search_xs = _sample_on_line(
start_point=state.x, direction_point=candidate_x, alpha_grid=alpha_grid
)
else:
line_search_xs = None

# ==================================================================================
# Check whether there are any unallocated evaluations left, and if yes perform a
# speculative sampling

n_evals_line_search = 0 if line_search_xs is None else len(line_search_xs)
n_unallocated_evals = batch_size - 1 - n_evals_line_search

if n_unallocated_evals > 0:
speculative_xs = _generate_speculative_sample(
new_center=candidate_x,
search_radius_factor=search_radius_factor,
trustregion=state.trustregion,
sample_points=sample_points,
n_points=n_unallocated_evals,
history=history,
rng=rng,
)
else:
speculative_xs = None

# ==================================================================================
# Consolidate newly sampled points

if line_search_xs is not None and speculative_xs is not None:
new_xs = np.vstack([line_search_xs, speculative_xs])
elif line_search_xs is not None:
new_xs = line_search_xs
elif speculative_xs is not None:
new_xs = speculative_xs

# ==================================================================================
# Add new points to history and evaluate criterion

new_indices = history.add_xs(new_xs)

for idx in new_indices:
eval_info[idx] = 1

wrapped_criterion(eval_info)

# ==================================================================================
# Calculate rho

candidate_fval = np.mean(history.get_fvals(candidate_index))

actual_improvement = -(candidate_fval - state.fval)

rho = calculate_rho(
actual_improvement=actual_improvement,
expected_improvement=subproblem_solution.expected_improvement,
)

# ==================================================================================
# Check if there are any better points

new_fvals = history.get_fvals(new_indices)
new_fvals = pd.Series({i: np.mean(fvals) for i, fvals in new_fvals.items()})
new_fval_argmin = new_fvals.idxmin()

found_better_candidate = new_fvals.loc[new_fval_argmin] < candidate_fval

# If a better point was found, update the candidates
if found_better_candidate:
candidate_x = history.get_xs(new_fval_argmin)
candidate_fval = new_fvals.loc[new_fval_argmin]
candidate_index = new_fval_argmin

# ==================================================================================
# Calculate the overall improvement using a potentially updated candidate and draw
# the acceptance conclusions based on that.

overall_improvement = -(candidate_fval - state.fval)
is_accepted = overall_improvement >= min_improvement

# ==================================================================================
# Return results

res = _get_acceptance_result(
candidate_x=candidate_x,
candidate_fval=candidate_fval,
candidate_index=candidate_index,
rho=rho,
is_accepted=is_accepted,
old_state=state,
n_evals=1,
)
return res


def _accept_simple(
subproblem_solution,
state,
Expand Down Expand Up @@ -247,3 +385,100 @@ def calculate_rho(actual_improvement, expected_improvement):
else:
rho = actual_improvement / expected_improvement
return rho


# ======================================================================================
# Helper functions for line search
# ======================================================================================


def _generate_speculative_sample(
new_center, trustregion, sample_points, n_points, history, search_radius_factor, rng
):
"""Generative a speculative sample.
Args:
new_center (np.ndarray): New center of the trust region.
trustregion (Region): Current trust region.
sample_points (callable): Function to sample points.
n_points (int): Number of points to sample.
history (History): Tranquilo history.
search_radius_factor (float): Factor to multiply the trust region radius by to
get the search radius.
rng (np.random.Generator): Random number generator.
Returns:
np.ndarray: Speculative sample.
"""
search_region = trustregion._replace(
center=new_center, radius=search_radius_factor * trustregion.radius
)

old_indices = history.get_x_indices_in_region(search_region)

old_xs = history.get_xs(old_indices)

model_xs = old_xs

new_xs = sample_points(
search_region,
n_points=n_points,
existing_xs=model_xs,
rng=rng,
)
return new_xs


def _sample_on_line(start_point, direction_point, alpha_grid):
"""Sample points on a line defined by startind and direction points.
Args:
start_point (np.ndarray): Starting point of the line.
direction_point (np.ndarray): Direction point of the line.
alpha_grid (np.ndarray): Grid of alphas to sample points on the line. 0
corresponds to the starting point and 1 corresponds to the direction point.
Points larger than 1 are beyond the direction point.
Returns:
np.ndarray: Sampled points on the line.
"""
xs = start_point + alpha_grid.reshape(-1, 1) * (direction_point - start_point)
return xs


def _is_on_border(trustregion, x, rtol):
"""Check whether a point is sufficiently close to the border of a trust region.
Args:
trustregion (Region): Trust region.
x (np.ndarray): Point to check.
rtol (float): Relative tolerance.
Returns:
bool: True if the point is sufficiently close to the border of the trust region.
"""
if trustregion.shape == "sphere":
candidate_on_border = _is_on_sphere_border(trustregion, x=x, rtol=rtol)
else:
candidate_on_border = _is_on_cube_border(trustregion, x=x, rtol=rtol)
return candidate_on_border


def _is_on_sphere_border(trustregion, x, rtol):
x_center_dist = np.linalg.norm(x - trustregion.center, ord=2)
return np.isclose(x_center_dist, trustregion.radius, rtol=rtol)


def _is_on_cube_border(trustregion, x, rtol):
cube_bounds = trustregion.cube_bounds
is_on_lower_border = np.isclose(x, cube_bounds.lower, rtol=rtol).any()
is_on_upper_border = np.isclose(x, cube_bounds.upper, rtol=rtol).any()
return is_on_lower_border or is_on_upper_border


def _generate_alpha_grid(batch_size):
n_points = min(batch_size, 4) - 1
return 2 ** np.arange(1, n_points + 1, dtype=float)
33 changes: 19 additions & 14 deletions src/tranquilo/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,25 @@
import numpy as np


def get_default_stagnation_options(noisy, batch_size):
if noisy:
out = StagnationOptions(
min_relative_step_keep=0.0,
drop=False,
)
elif batch_size > 1:
out = StagnationOptions(
min_relative_step_keep=0.0,
drop=True,
)
else:
out = StagnationOptions(
min_relative_step_keep=0.125,
drop=True,
)
return out


def get_default_radius_options(x):
return RadiusOptions(initial_radius=0.1 * np.max(np.abs(x)))

Expand Down Expand Up @@ -79,20 +98,6 @@ def get_default_n_evals_per_point(noisy, noise_adaptation_options):
return noise_adaptation_options.min_n_evals if noisy else 1


def get_default_stagnation_options(noisy):
if noisy:
out = StagnationOptions(
min_relative_step_keep=0.0,
drop=False,
)
else:
out = StagnationOptions(
min_relative_step_keep=0.125,
drop=True,
)
return out


class StopOptions(NamedTuple):
"""Criteria for stopping without successful convergence."""

Expand Down
13 changes: 8 additions & 5 deletions src/tranquilo/process_arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from tranquilo.history import History
from tranquilo.options import (
ConvOptions,
get_default_stagnation_options,
StopOptions,
get_default_acceptance_decider,
get_default_aggregator,
Expand All @@ -26,6 +25,7 @@
get_default_radius_options,
get_default_sample_size,
get_default_search_radius_factor,
get_default_stagnation_options,
update_option_bundle,
NoiseAdaptationOptions,
)
Expand Down Expand Up @@ -113,10 +113,6 @@ def process_arguments(
x = _process_x(x)
noisy = _process_noisy(noisy)
n_cores = _process_n_cores(n_cores)
stagnation_options = update_option_bundle(
get_default_stagnation_options(noisy),
stagnation_options,
)
sampling_rng = _process_seed(seed)
simulation_rng = _process_seed(seed + 1000)

Expand All @@ -142,6 +138,9 @@ def process_arguments(
acceptance_decider = _process_acceptance_decider(acceptance_decider, noisy)

# process options that depend on arguments with dependent defaults
stagnation_options = update_option_bundle(
get_default_stagnation_options(noisy, batch_size=batch_size), stagnation_options
)
target_sample_size = _process_sample_size(
sample_size=sample_size,
model_type=model_type,
Expand Down Expand Up @@ -332,6 +331,10 @@ def _process_n_evals_at_start(n_evals, noisy):
return out


def next_multiple(n, base):
return int(np.ceil(n / base) * base)


def _process_n_evals_per_point(n_evals, noisy, noise_adaptation_options):
if n_evals is None:
out = get_default_n_evals_per_point(noisy, noise_adaptation_options)
Expand Down
Loading

0 comments on commit 4bce6be

Please sign in to comment.