Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improved configuration #9

Open
wants to merge 22 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 66 additions & 32 deletions README.md

Large diffs are not rendered by default.

23 changes: 0 additions & 23 deletions mloop_config.ini

This file was deleted.

199 changes: 109 additions & 90 deletions mloop_config.py
Original file line number Diff line number Diff line change
@@ -1,110 +1,129 @@
import os
import json
import configparser
from collections import namedtuple

import logging
logger = logging.getLogger('analysislib_mloop')

try:
import tomllib
except:
logger.debug('tomllib not found. Falling back to tomli')
import tomli as tomllib


MloopParam = namedtuple("MloopParam", ["name", "min", "max", "start"])
RunmanagerGlobal = namedtuple("RunmanagerGlobal", ["name", "expr", "args"])

def is_global_enabled(config, group, name, category):
"""
We check to see if the required global has been activated or not
"""

if group in config["ANALYSIS"]["groups"]:
if config[category][group][name].get("enable", True):
return True

return False

def prepare_globals(global_list, params_val_dict):
globals_to_set = {}
for g in global_list:
target = g.name
args = [ params_val_dict[arg] for arg in g.args ]

assert args

if g.expr:
val = eval(g.expr)(*args)
else:
val = args[0]

globals_to_set[target] = val

return globals_to_set


def get(config_path=None):
"""Creates config file from specified file, or
creates one locally with default values.
"""
Setup the mloop interface using the file specified by config_path
"""

# Default to local directory and default name
if not config_path:
folder = os.path.dirname(__file__)
config_path = os.path.join(folder, "mloop_config.ini")

# Instantiate RawConfigParser with case sensitive option names
config = configparser.RawConfigParser()
config.optionxform = str

# Check if file exists and initialise with defaults if it does not
if os.path.isfile(config_path):
# Retrieve configuration parameters
config.read(config_path)
else:
print("--- Configuration file not found: generating with default values ---")

# Shot compilation parameters
config["COMPILATION"] = {}
config["COMPILATION"]["mock"] = 'false'

# Analayis parameters
config["ANALYSIS"] = {}
# lyse DataFrame key to optimise
config["ANALYSIS"]["cost_key"] = '["fake_result", "y"]'
# Maximize cost_key (negate when reporting cost)
config["ANALYSIS"]["maximize"] = 'true'
# Don't report to M-LOOP if a shot is deemed bad
config["ANALYSIS"]["ignore_bad"] = 'true'
# Control log level for logging to console from analysislib-mloop. Not to be
# confused with MLOOP's console_log_level option for its logger.
config["ANALYSIS"]["analysislib_console_log_level"] = '"INFO"'
# Control log level for logging to file from analysislib-mloop. Not to be
# confused with MLOOP's file_log_level option for its logger.
config["ANALYSIS"]["analysislib_file_log_level"] = '"DEBUG"'

# M-LOOP parameters
config["MLOOP"] = {}
# Parameters mloop varies during optimisation
config["MLOOP"][
"mloop_params"
] = '{"x": {"min": -5.0, "max": 5.0, "start": -2.0} }'
# Number of training runs
config["MLOOP"]["num_training_runs"] = '5'
# Maximum number of iterations
config["MLOOP"]["max_num_runs_without_better_params"] = '10'
# Maximum number of iterations
config["MLOOP"]["max_num_runs"] = '20'
# Maximum % move distance from best params
config["MLOOP"]["trust_region"] = '0.5'
# Maximum number of iterations
config["MLOOP"]["cost_has_noise"] = 'true'
# Force mloop to return a parameter prediction before it is ready
config["MLOOP"]["no_delay"] = 'false'
# Display visualisations
config["MLOOP"]["visualisations"] = 'false'
# Type of learner to use in optimisation:
# [gaussian_process, random, nelder_mead]
config["MLOOP"]["controller_type"] = '"gaussian_process"'
# Mute output from MLOOP optimiser
config["MLOOP"]["console_log_level"] = '"NOTSET"'

# Write to file
folder = os.path.dirname(__file__)
with open(os.path.join(folder, "mloop_config.ini"), "w+") as f:
config.write(f)
config_path = os.path.join(folder, "mloop_config.toml")

# TODO: Check if file exists and copy a default into the specified location if it does not
# Also throw an exception since the default is unlikely to work for the user.

# if os.path.isfile(config_path): ...

with open(config_path, "rb") as f:
config = tomllib.load(f)

to_flatten = ["COMPILATION", "ANALYSIS", "MLOOP"]
# iterate over configuration object and store pairs in parameter dictionary
params = {}
for sect in config.sections():
for (key, val) in config.items(sect):
try:
params[key] = json.loads(val)
except json.JSONDecodeError:
params[key] = val
for sect in to_flatten:
for (key, val) in config[sect].items():
params[key] = val

# Convert cost_key to tuple
params["cost_key"] = tuple(params["cost_key"])

# store number of parameters for passing to controller interface
params["num_params"] = len(params["mloop_params"])

# get the names of the parameters, if not explicitly specified by user
if "param_names" not in params:
params["param_names"] = list(params["mloop_params"].keys())

# get min boundaries for specified variables
params["min_boundary"] = [param["min"] for param in params["mloop_params"].values()]

# get max boundaries for specified variables
params["max_boundary"] = [param["max"] for param in params["mloop_params"].values()]

# starting point for search space, default to half point if not defined
params["first_params"] = [
param["start"] for param in params["mloop_params"].values()
]

param_dict = {}
global_list = []

for group in config.get("MLOOP_PARAMS", {}):
for name, param in config["MLOOP_PARAMS"][group].items():
if is_global_enabled(config, group, name, "MLOOP_PARAMS"):
param_dict[name] = MloopParam(
name=name,
min=param["min"],
max=param["max"],
start=param["start"]
)

if "global_name" in param:
global_list.append(
RunmanagerGlobal(
name=param["global_name"],
expr=None,
args=[name]
)
)

for group in config.get("RUNMANAGER_GLOBALS", {}):
for name, param in config["RUNMANAGER_GLOBALS"][group].items():
if is_global_enabled(config, group, name, "RUNMANAGER_GLOBALS"):

global_list.append(
RunmanagerGlobal(
name=name,
expr=param.get('expr', None),
args=param['args']
)
)

# check if all mloop params can be mapped to at least one global
for ml_name in param_dict.keys():
if not any([ (ml_name in g.args) for g in global_list ]):
raise KeyError(f"Parameter {ml_name} in MLOOP_PARAMS doesn't have a Runmanager global mapped to it.")

# check if all args of any global has been defined in mloop params
for g in global_list:
for a in g.args:
if a not in param_dict:
raise KeyError(f"Argument {a} of global {g.name} doesn't exist.")

params['mloop_params'] = param_dict
params['runmanager_globals'] = global_list

params['num_params'] = len(params['mloop_params'].values())
params['min_boundary'] = [p.min for p in params['mloop_params'].values()]
params['max_boundary'] = [p.max for p in params['mloop_params'].values()]
params['first_params'] = [p.start for p in params['mloop_params'].values()]

return params


Expand Down
48 changes: 48 additions & 0 deletions mloop_config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
[COMPILATION]
mock = false

[ANALYSIS]
cost_key = ["fake_result", "y"]
maximize = true
ignore_bad = false # true
analysislib_console_log_level = 20
analysislib_file_log_level = 10
groups = ["MOT"]

[MLOOP]
num_training_runs = 5
max_num_runs_without_better_params = 10
max_num_runs = 30
trust_region = 0.05
cost_has_noise = true
no_delay = false
visualisations = false
controller_type = "gaussian_process"
console_log_level = 0

# Automatically creates the mapping to a runmanager global
[MLOOP_PARAMS.MOT.x] # this is part of the MOT group
global_name = "x" # << Specify the runmanager global it maps to
min = -5.0
max = 5.0
start = -2

# The mapping automatically created above by defining global_name is equivalent to
# [RUNMANAGER_GLOBALS.MOT.x] # this is part of the MOT group
# expr = "lambda m: m"
# args = ["x"]

# Create a MLOOP parameter but define its mapping to runmanager globals later
[MLOOP_PARAMS.TEST_FUNCTION.y] # this is part of the TEST_FUNCTION group
min = -5.0
max = 5.0
start = -2

[MLOOP_PARAMS.TEST_FUNCTION.z] # this is part of the TEST_FUNCTION group
min = -5.0
max = 5.0
start = -2

[RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] # this is part of the TEST_FUNCTION group
expr = "lambda x, y: (x, y)"
args = ["y", "z"]
Loading