From 7c929c62890a426c31837b417e1707cdf8e5f4a6 Mon Sep 17 00:00:00 2001 From: Yanda Geng Date: Mon, 13 Feb 2023 20:14:00 -0500 Subject: [PATCH 01/22] Added dynamical mapping between MLOOP params and globals and supports for TOML configuration file. --- mloop_config.py | 168 ++++++++++++++++++++++++++++++++++++--------- mloop_config.toml | 48 +++++++++++++ mloop_interface.py | 13 +++- mloop_multishot.py | 17 +++-- 4 files changed, 203 insertions(+), 43 deletions(-) create mode 100644 mloop_config.toml diff --git a/mloop_config.py b/mloop_config.py index 5d82db2..2113d71 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -1,29 +1,75 @@ import os import json import configparser +import tomli import logging +from collections import namedtuple -def get(config_path=None): +MloopParam = namedtuple("MloopParam", ["name", "min", "max", "start"]) +RunmanagerGlobal = namedtuple("RunmanagerGlobal", ["name", "expr", "args"]) + + +def prepare_globals(global_list, params_val_dict): + globals_to_set = {} + for g in global_list: + target = g.name + args = [ params_val_dict[arg] for arg in g.args ] + + assert args + + if g.expr: + val = eval(g.expr)(*args) + else: + val = args[0] + + globals_to_set[target] = val + + return globals_to_set + + +def get(config_paths=None): """Creates config file from specified file, or creates one locally with default values. """ # Default to local directory and default name - if not config_path: + if not config_paths: + config_paths = [] folder = os.path.dirname(__file__) - config_path = os.path.join(folder, "mloop_config.ini") - - # Instantiate RawConfigParser with case sensitive option names - config = configparser.RawConfigParser() - config.optionxform = str + config_paths.append(os.path.join(folder, "mloop_config.toml")) + config_paths.append(os.path.join(folder, "mloop_config.ini")) + + config_path = "" + for path in config_paths: + if os.path.isfile(path): + print(path) + config_path = path + break + + config = None + config_type = None + if config_path: + if config_path.lower().endswith(".ini"): + config_type = "ini" + config.read(config_path) + + # Instantiate RawConfigParser with case sensitive option names + config = configparser.RawConfigParser() + config.optionxform = str + + # Retrieve configuration parameters + config.read(config_path) + elif config_path.lower().endswith(".toml"): + config_type = "toml" + with open(config_path, "rb") as f: + config = tomli.load(f) + else: + raise TypeError("Unknown configuration file type. Supports only .ini or .toml.") - # Check if file exists and initialise with defaults if it does not - if os.path.isfile(config_path): - # Retrieve configuration parameters - config.read(config_path) else: print("--- Configuration file not found: generating with default values ---") + config_type = "ini" # Shot compilation parameters config["COMPILATION"] = {} @@ -75,36 +121,90 @@ def get(config_path=None): with open(os.path.join(folder, "mloop_config.ini"), "w+") as f: config.write(f) + to_flatten = ["COMPILATION", "ANALYSIS", "MLOOP"] # iterate over configuration object and store pairs in parameter dictionary params = {} - for sect in config.sections(): - for (key, val) in config.items(sect): - try: - params[key] = json.loads(val) - except json.JSONDecodeError: + for sect in to_flatten: + for (key, val) in config[sect].items(): + # only parse json in ini file, not in toml file + if config_type == "ini": + try: + params[key] = json.loads(val) + except json.JSONDecodeError: + params[key] = val + else: params[key] = val # Convert cost_key to tuple params["cost_key"] = tuple(params["cost_key"]) - # store number of parameters for passing to controller interface - params["num_params"] = len(params["mloop_params"]) - - # get the names of the parameters, if not explicitly specified by user - if "param_names" not in params: - params["param_names"] = list(params["mloop_params"].keys()) - - # get min boundaries for specified variables - params["min_boundary"] = [param["min"] for param in params["mloop_params"].values()] - - # get max boundaries for specified variables - params["max_boundary"] = [param["max"] for param in params["mloop_params"].values()] - - # starting point for search space, default to half point if not defined - params["first_params"] = [ - param["start"] for param in params["mloop_params"].values() - ] - + param_dict = {} + global_list = [] + + if config_type == "ini": + for name, param in config["MLOOP"]["mloop_params"].items(): + param_dict[name] = \ + MloopParam( + name=name, + min=param["min"], + max=param["max"], + start=param["start"] + ) + global_list.append(RunmanagerGlobal( + name=param["global_name"], + expr=None, + args=[name] + ) + ) + + elif config_type == "toml": + for name, param in config["MLOOP_PARAMS"].items(): + param_dict[name] = \ + MloopParam( + name=name, + min=param["min"], + max=param["max"], + start=param["start"] + ) + + if "global_name" in param: + global_list.append(RunmanagerGlobal( + name=param["global_name"], + expr=None, + args=[name] + ) + ) + + if "RUNMANAGER_GLOBALS" in config: + for name, param in config["RUNMANAGER_GLOBALS"].items(): + global_list.append(RunmanagerGlobal( + name=name, + expr=param.get('expr', None), + args=param['args'] + ) + ) + + # check if all mloop params can be mapped to at least one global + for ml_name in param_dict.keys(): + if not any([ (ml_name in g.args) for g in global_list ]): + raise KeyError(f"Parameter {ml_name} in MLOOP_PARAMS doesn't have a Runmanager global mapped to it.") + + # check if all args of any global has been defined in mloop params + for g in global_list: + for a in g.args: + if a not in param_dict: + raise KeyError(f"Argument {a} of global {g.name} doesn't exist.") + + params['mloop_params'] = param_dict + params['runmanager_globals'] = global_list + + params['num_params'] = len(params['mloop_params'].values()) + params['min_boundary'] = [p.min for p in params['mloop_params'].values()] + params['max_boundary'] = [p.max for p in params['mloop_params'].values()] + params['first_params'] = [p.start for p in params['mloop_params'].values()] + + + return params diff --git a/mloop_config.toml b/mloop_config.toml new file mode 100644 index 0000000..badc8d1 --- /dev/null +++ b/mloop_config.toml @@ -0,0 +1,48 @@ +[COMPILATION] +mock = false + +[ANALYSIS] +cost_key = ["fake_result_multishot", "y"] +maximize = true +ignore_bad = false +ignore_bad = true +analysislib_console_log_level = 20 +analysislib_file_log_level = 10 + +[MLOOP] +num_training_runs = 5 +max_num_runs_without_better_params = 10 +max_num_runs = 30 +trust_region = 0.05 +cost_has_noise = true +no_delay = false +visualisations = false +controller_type = "gaussian_process" +console_log_level = 0 + +# Automatically creates the mapping to a Runmanager global +[MLOOP_PARAMS.x] +global_name = "x" # << Specify the Runmanager global it maps to +min = -5.0 +max = 5.0 +start = -2 + +# The mapping automatically created above by defining global_name is equivalent to +# [RUNMANAGER_GLOBALS.x] +# expr = "lambda m: m" +# args = ["x"] + +# Create a MLOOP parameter but define its mapping to Runmanager globals later +[MLOOP_PARAMS.y] +min = -5.0 +max = 5.0 +start = -2 + +[MLOOP_PARAMS.z] +min = -5.0 +max = 5.0 +start = -2 + +[RUNMANAGER_GLOBALS.test_tuple] +expr = "lambda m, n: (m, n)" +args = ["y", "z"] diff --git a/mloop_interface.py b/mloop_interface.py index 90871c6..17a6b98 100644 --- a/mloop_interface.py +++ b/mloop_interface.py @@ -43,11 +43,15 @@ def get_next_cost_dict(self, params_dict): # Store current parameters to later verify reported cost corresponds to these # or so mloop_multishot.py can fake a cost if mock = True logger.debug('Storing requested parameters in lyse.routine_storage.') - lyse.routine_storage.params = params_dict['params'] + globals_dict = mloop_config.prepare_globals( + self.config['runmanager_globals'], + dict(zip(self.config['mloop_params'].keys(), params_dict['params'])) + ) + + lyse.routine_storage.params = globals_dict if not self.config['mock']: logger.info('Requesting next shot from experiment interface...') - globals_dict = dict(zip(self.config['mloop_params'], params_dict['params'])) logger.debug('Setting optimization parameter values.') set_globals(globals_dict) logger.debug('Setting mloop_iteration...') @@ -82,7 +86,10 @@ def main(): # Set the optimisation globals to their best results logger.info('Setting best parameters in runmanager.') - globals_dict = dict(zip(interface.config['mloop_params'], controller.best_params)) + globals_dict = mloop_config.prepare_globals( + interface.config['runmanager_globals'], + dict(zip(interface.config['mloop_params'].keys(), controller.best_params)) + ) set_globals(globals_dict) # Return the results in a dictionary diff --git a/mloop_multishot.py b/mloop_multishot.py index a64fae7..1335214 100644 --- a/mloop_multishot.py +++ b/mloop_multishot.py @@ -53,7 +53,7 @@ def check_runmanager(config): logger.debug('Getting globals.') rm_globals = rm.get_globals() - if not all([x in rm_globals for x in config['mloop_params']]): + if not all([x.name in rm_globals for x in config['runmanager_globals']]): msgs.append('Not all optimisation parameters present in runmanager.') logger.debug('Getting run shots state.') @@ -89,17 +89,22 @@ def verify_globals(config): # Get the current runmanager globals logger.debug('Getting values of globals from runmanager.') rm_globals = rm.get_globals() - current_values = [rm_globals[name] for name in config['mloop_params']] + current_values = [rm_globals[g.name] for g in config['runmanager_globals']] # Retrieve the parameter values requested by M-LOOP on this iteration logger.debug('Getting requested globals values from lyse.routine_storage.') - requested_values = lyse.routine_storage.params - requested_dict = dict(zip(config['mloop_params'], requested_values)) + requested_dict = lyse.routine_storage.params + print('requested_dict', requested_dict) + + requested_values = [requested_dict[g.name] for g in config['runmanager_globals']] + + print('requested_values', requested_values) + print('current_values', current_values) # Get the parameter values for the shot we just computed the cost for logger.debug('Getting lyse dataframe.') df = lyse.data() - shot_values = [df[name].iloc[-1] for name in config['mloop_params']] + shot_values = [df[g.name].iloc[-1] for g in config['runmanager_globals']] # Verify integrity by cross-checking against what was requested if not np.array_equal(current_values, requested_values): @@ -188,7 +193,7 @@ def cost_analysis(cost_key=(None,), maximize=True, x=None): cost_dict = cost_analysis( cost_key=config['cost_key'] if not config['mock'] else [], maximize=config['maximize'], - x=lyse.routine_storage.params[0] if config['mock'] else None, + x=lyse.routine_storage.params.values()[0] if config['mock'] else None, ) if not cost_dict['bad'] or not config['ignore_bad']: From e3438744a58fd2b9f2ae0e718d3721b0e270c5f1 Mon Sep 17 00:00:00 2001 From: Yanda Geng Date: Mon, 13 Feb 2023 20:33:51 -0500 Subject: [PATCH 02/22] Updated README.md. Added usage for the dynamic global mapping --- README.md | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ed9c371..88dd164 100644 --- a/README.md +++ b/README.md @@ -41,20 +41,46 @@ runmanager = localhost runmanager = 42523 ``` -2. **Configure optimisation settings in `mloop_config.ini`.** There is a tracked version of this file in the repository. At a bare minimum, you should modify the following: +2. **Configure optimisation settings in `mloop_config.toml`.** TOML is a high-level configuration file format and its description can be found [here](https://toml.io/). There is a tracked version of this file in the repository. At a bare minimum, you should modify the following: -```ini +```toml [ANALYSIS] cost_key = ["fake_result", "y"] maximize = true -[MLOOP] -mloop_params = {"x": {"min": -5.0, "max": 5.0, "start": -2.0} } +[MLOOP_PARAMS.x] +global_name = "x" +min = -5.0 +max = 5.0 +start = -2 ``` * `cost_key`: Column of the lyse dataframe to derive the cost from, specified as a `[routine_name, result_name]` pair. The present cost comes from the most recent value in this column, i.e. `cost = df[cost_key].iloc[-1]`. * `maximize`: Whether or not to negate the above value, since M-LOOP will minimize the cost. - * `mloop_params`: Dictionary of optimisation parameters, specified as (`global_name`, `dict`) pairs, where `dict` is used to create `min_boundary`, `max_boundary`, and `first_params` lists to meet [M-LOOP specifications](https://m-loop.readthedocs.io/en/latest/tutorials.html#parameter-settings). + * `MLOOP_PARAMS`: Dictionary of optimisation parameters controlled by MLOOP + * `global_name` defines the global it maps to in runmanager. + * `min`, `max`, `start` correspond to `min_boundary`, `max_boundary`, and `first_params` lists to meet [M-LOOP specifications](https://m-loop.readthedocs.io/en/latest/tutorials.html#parameter-settings). + +You may also specify a more complicated mapping between the paramaters controller by MLOOP and globals in runmanager. + +```toml +[MLOOP_PARAMS.y] +min = -5.0 +max = 5.0 +start = -2 + +[MLOOP_PARAMS.z] +min = -5.0 +max = 5.0 +start = -2 + +[RUNMANAGER_GLOBALS.test_tuple] +expr = "lambda m, n: (m, n)" +args = ["y", "z"] +``` + + * `y` and `z` are two MLOOP parameters that don't have a `global_name` defined. + * Instead, an dictionary entry in `RUNMANAGER_GLOBALS`, targeting global `test_tuple` in runmanager, is explicitly defined here with a customized mapping `lambda m, n: (m, n)`, which takes `y` and `z` as parameters. Every time, the tuple `(y, z)` will be passed to `test_tuple` in runmanager. 3. **Load the analysis routine that computes the quantity you want to optimise into [lyse](https://github.com/labscript-suite/lyse).** This routine should update `cost_key` of the lyse dataframe by calling the `save_result` (or its variants) of a `lyse.Run`. For the above parameters, this would be `fake_result.py` containing: From b31dcabeceb5cd00fa6f74c174be65668bd26c6f Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 11:11:57 -0500 Subject: [PATCH 03/22] Implemented a lyse speedup by using n_sequences=1 when getting the dataframe from lyse. This is a repeat of a commit from zakv that Russ has never approved. --- mloop_multishot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mloop_multishot.py b/mloop_multishot.py index 1335214..5784446 100644 --- a/mloop_multishot.py +++ b/mloop_multishot.py @@ -103,7 +103,7 @@ def verify_globals(config): # Get the parameter values for the shot we just computed the cost for logger.debug('Getting lyse dataframe.') - df = lyse.data() + df = lyse.data(n_sequences=1) shot_values = [df[g.name].iloc[-1] for g in config['runmanager_globals']] # Verify integrity by cross-checking against what was requested @@ -139,7 +139,7 @@ def cost_analysis(cost_key=(None,), maximize=True, x=None): # Retrieve current lyse DataFrame logger.debug('Getting lyse dataframe.') - df = lyse.data() + df = lyse.data(n_sequences=1) # Use the most recent shot ix = -1 From 0eeef63159395ee9fd480ac8ff58451c83fa46ff Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 13:29:11 -0500 Subject: [PATCH 04/22] Corrected error in trial TOML file. --- mloop_config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mloop_config.toml b/mloop_config.toml index badc8d1..1c03199 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -5,7 +5,7 @@ mock = false cost_key = ["fake_result_multishot", "y"] maximize = true ignore_bad = false -ignore_bad = true +# ignore_bad = true analysislib_console_log_level = 20 analysislib_file_log_level = 10 From e5f0d91f0d9e414f933c1a611e135760074bfc0a Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 13:37:59 -0500 Subject: [PATCH 05/22] Added "enable" as an option to the TOML parser so we can easily enable and disable variables. I think what I really want to do is define a "group" label to make it easy to optimize some collection of variables. --- mloop_config.py | 44 +++++++++++++++++++++++--------------------- mloop_config.toml | 4 ++++ 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/mloop_config.py b/mloop_config.py index 2113d71..8b8f6ee 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -159,30 +159,32 @@ def get(config_paths=None): elif config_type == "toml": for name, param in config["MLOOP_PARAMS"].items(): - param_dict[name] = \ - MloopParam( - name=name, - min=param["min"], - max=param["max"], - start=param["start"] - ) - - if "global_name" in param: - global_list.append(RunmanagerGlobal( - name=param["global_name"], - expr=None, - args=[name] + if ("enable" in param) and param["enable"]: + param_dict[name] = \ + MloopParam( + name=name, + min=param["min"], + max=param["max"], + start=param["start"] ) - ) + + if "global_name" in param: + global_list.append(RunmanagerGlobal( + name=param["global_name"], + expr=None, + args=[name] + ) + ) if "RUNMANAGER_GLOBALS" in config: - for name, param in config["RUNMANAGER_GLOBALS"].items(): - global_list.append(RunmanagerGlobal( - name=name, - expr=param.get('expr', None), - args=param['args'] - ) - ) + if ("enable" in param) and param["enable"]: + for name, param in config["RUNMANAGER_GLOBALS"].items(): + global_list.append(RunmanagerGlobal( + name=name, + expr=param.get('expr', None), + args=param['args'] + ) + ) # check if all mloop params can be mapped to at least one global for ml_name in param_dict.keys(): diff --git a/mloop_config.toml b/mloop_config.toml index 1c03199..fda95fa 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -22,6 +22,7 @@ console_log_level = 0 # Automatically creates the mapping to a Runmanager global [MLOOP_PARAMS.x] +enable = true global_name = "x" # << Specify the Runmanager global it maps to min = -5.0 max = 5.0 @@ -34,15 +35,18 @@ start = -2 # Create a MLOOP parameter but define its mapping to Runmanager globals later [MLOOP_PARAMS.y] +enable = false min = -5.0 max = 5.0 start = -2 [MLOOP_PARAMS.z] +enable = false min = -5.0 max = 5.0 start = -2 [RUNMANAGER_GLOBALS.test_tuple] +enable = false expr = "lambda m, n: (m, n)" args = ["y", "z"] From 92aa7457bd59d01e2743f0696ae37063fe94c96d Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 15:58:00 -0500 Subject: [PATCH 06/22] Added groups option to easily enable and disable parameters. --- mloop_config.py | 6 ++++-- mloop_config.toml | 10 ++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/mloop_config.py b/mloop_config.py index 8b8f6ee..bb93b01 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -115,6 +115,8 @@ def get(config_paths=None): config["MLOOP"]["controller_type"] = '"gaussian_process"' # Mute output from MLOOP optimiser config["MLOOP"]["console_log_level"] = '"NOTSET"' + # Which groups to actually optimize + config["MLOOP"]["groups"] = [] # Write to file folder = os.path.dirname(__file__) @@ -159,7 +161,7 @@ def get(config_paths=None): elif config_type == "toml": for name, param in config["MLOOP_PARAMS"].items(): - if ("enable" in param) and param["enable"]: + if ("group" in param) and (param["group"] in config["MLOOP"]["groups"]): param_dict[name] = \ MloopParam( name=name, @@ -177,7 +179,7 @@ def get(config_paths=None): ) if "RUNMANAGER_GLOBALS" in config: - if ("enable" in param) and param["enable"]: + if ("group" in param) and (param["group"] in config["MLOOP"]["groups"]): for name, param in config["RUNMANAGER_GLOBALS"].items(): global_list.append(RunmanagerGlobal( name=name, diff --git a/mloop_config.toml b/mloop_config.toml index fda95fa..ecff6a4 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -19,10 +19,11 @@ no_delay = false visualisations = false controller_type = "gaussian_process" console_log_level = 0 +groups = ["MOT"] # Automatically creates the mapping to a Runmanager global [MLOOP_PARAMS.x] -enable = true +group = "MOT" global_name = "x" # << Specify the Runmanager global it maps to min = -5.0 max = 5.0 @@ -30,23 +31,24 @@ start = -2 # The mapping automatically created above by defining global_name is equivalent to # [RUNMANAGER_GLOBALS.x] +# group = "MOT" # expr = "lambda m: m" # args = ["x"] # Create a MLOOP parameter but define its mapping to Runmanager globals later [MLOOP_PARAMS.y] -enable = false +group = "TEST_FUNCTION" min = -5.0 max = 5.0 start = -2 [MLOOP_PARAMS.z] -enable = false +group = "TEST_FUNCTION" min = -5.0 max = 5.0 start = -2 [RUNMANAGER_GLOBALS.test_tuple] -enable = false +group = "TEST_FUNCTION" expr = "lambda m, n: (m, n)" args = ["y", "z"] From 21f5f99e1d3e8a61a3b57334623a2b8725c755d0 Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 16:23:47 -0500 Subject: [PATCH 07/22] Simplified group syntax --- mloop_config.py | 50 ++++++++++++++++++++++++----------------------- mloop_config.toml | 15 +++++--------- 2 files changed, 31 insertions(+), 34 deletions(-) diff --git a/mloop_config.py b/mloop_config.py index bb93b01..d44869d 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -160,33 +160,35 @@ def get(config_paths=None): ) elif config_type == "toml": - for name, param in config["MLOOP_PARAMS"].items(): - if ("group" in param) and (param["group"] in config["MLOOP"]["groups"]): - param_dict[name] = \ - MloopParam( - name=name, - min=param["min"], - max=param["max"], - start=param["start"] - ) - - if "global_name" in param: - global_list.append(RunmanagerGlobal( - name=param["global_name"], - expr=None, - args=[name] + for group in config["MLOOP_PARAMS"]: + if group in config["griups"] + for name, param in config["MLOOP_PARAMS"][group]: + param_dict[name] = \ + MloopParam( + name=name, + min=param["min"], + max=param["max"], + start=param["start"] ) - ) + + if "global_name" in param: + global_list.append(RunmanagerGlobal( + name=param["global_name"], + expr=None, + args=[name] + ) + ) if "RUNMANAGER_GLOBALS" in config: - if ("group" in param) and (param["group"] in config["MLOOP"]["groups"]): - for name, param in config["RUNMANAGER_GLOBALS"].items(): - global_list.append(RunmanagerGlobal( - name=name, - expr=param.get('expr', None), - args=param['args'] - ) - ) + for group in config["MLOOP_PARAMS"]: + if group in config["groups"]: + for name, param in config["RUNMANAGER_GLOBALS"][group]: + global_list.append(RunmanagerGlobal( + name=name, + expr=param.get('expr', None), + args=param['args'] + ) + ) # check if all mloop params can be mapped to at least one global for ml_name in param_dict.keys(): diff --git a/mloop_config.toml b/mloop_config.toml index ecff6a4..2113bc3 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -22,33 +22,28 @@ console_log_level = 0 groups = ["MOT"] # Automatically creates the mapping to a Runmanager global -[MLOOP_PARAMS.x] -group = "MOT" +[MLOOP_PARAMS.MOT.x] global_name = "x" # << Specify the Runmanager global it maps to min = -5.0 max = 5.0 start = -2 # The mapping automatically created above by defining global_name is equivalent to -# [RUNMANAGER_GLOBALS.x] -# group = "MOT" +# [RUNMANAGER_GLOBALS.MOT.x] # expr = "lambda m: m" # args = ["x"] # Create a MLOOP parameter but define its mapping to Runmanager globals later -[MLOOP_PARAMS.y] -group = "TEST_FUNCTION" +[MLOOP_PARAMS.TEST_FUNCTION.y] min = -5.0 max = 5.0 start = -2 -[MLOOP_PARAMS.z] -group = "TEST_FUNCTION" +[MLOOP_PARAMS.TEST_FUNCTION.z] min = -5.0 max = 5.0 start = -2 -[RUNMANAGER_GLOBALS.test_tuple] -group = "TEST_FUNCTION" +[RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] expr = "lambda m, n: (m, n)" args = ["y", "z"] From 2e4c650cd5fa5448955a26a9ef4b3d6ee5b08cae Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 16:26:55 -0500 Subject: [PATCH 08/22] typeo corrected --- mloop_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mloop_config.py b/mloop_config.py index d44869d..760ee5d 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -161,7 +161,7 @@ def get(config_paths=None): elif config_type == "toml": for group in config["MLOOP_PARAMS"]: - if group in config["griups"] + if group in config["griups"]: for name, param in config["MLOOP_PARAMS"][group]: param_dict[name] = \ MloopParam( From ca94104cb87b9f3d767a50e5de10e667ff98ca5c Mon Sep 17 00:00:00 2001 From: spielman Date: Thu, 18 Jan 2024 16:34:45 -0500 Subject: [PATCH 09/22] another typo --- mloop_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mloop_config.py b/mloop_config.py index 760ee5d..7161256 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -161,7 +161,7 @@ def get(config_paths=None): elif config_type == "toml": for group in config["MLOOP_PARAMS"]: - if group in config["griups"]: + if group in config["groups"]: for name, param in config["MLOOP_PARAMS"][group]: param_dict[name] = \ MloopParam( From b5d7efda8ea904f7f3603392407cbedb28680132 Mon Sep 17 00:00:00 2001 From: Spielman Lab Date: Thu, 18 Jan 2024 16:48:09 -0500 Subject: [PATCH 10/22] Fixed easy bugs --- mloop_config.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mloop_config.py b/mloop_config.py index 7161256..385a081 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -161,8 +161,8 @@ def get(config_paths=None): elif config_type == "toml": for group in config["MLOOP_PARAMS"]: - if group in config["groups"]: - for name, param in config["MLOOP_PARAMS"][group]: + if group in config["MLOOP"]["groups"]: + for name, param in config["MLOOP_PARAMS"][group].items(): param_dict[name] = \ MloopParam( name=name, @@ -180,9 +180,9 @@ def get(config_paths=None): ) if "RUNMANAGER_GLOBALS" in config: - for group in config["MLOOP_PARAMS"]: - if group in config["groups"]: - for name, param in config["RUNMANAGER_GLOBALS"][group]: + for group in config["RUNMANAGER_GLOBALS"]: + if group in config["MLOOP"]["groups"]: + for name, param in config["RUNMANAGER_GLOBALS"][group].items(): global_list.append(RunmanagerGlobal( name=name, expr=param.get('expr', None), From 74091032c76dfef7504717a69fbe29168a4ad8d3 Mon Sep 17 00:00:00 2001 From: Spielman Lab Date: Fri, 19 Jan 2024 09:58:22 -0500 Subject: [PATCH 11/22] Remove .ini capability and added enable option. --- mloop_config.py | 163 ++++++++++------------------------------ mloop_config.toml | 11 ++- mloop_config_demo.toml | 49 ++++++++++++ mloop_config_local.toml | 48 ++++++++++++ 4 files changed, 146 insertions(+), 125 deletions(-) create mode 100644 mloop_config_demo.toml create mode 100644 mloop_config_local.toml diff --git a/mloop_config.py b/mloop_config.py index 385a081..5915bdf 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -9,6 +9,16 @@ MloopParam = namedtuple("MloopParam", ["name", "min", "max", "start"]) RunmanagerGlobal = namedtuple("RunmanagerGlobal", ["name", "expr", "args"]) +def is_global_active(config, group, category) + """ + We check to see if the requeted global has been activated or not + """ + + if group in config["MLOOP"]["groups"]: + if ("enable" not in config[category][group]) or config[category][group]: + return True + + return False def prepare_globals(global_list, params_val_dict): globals_to_set = {} @@ -48,94 +58,19 @@ def get(config_paths=None): break config = None - config_type = None if config_path: - if config_path.lower().endswith(".ini"): - config_type = "ini" - config.read(config_path) - - # Instantiate RawConfigParser with case sensitive option names - config = configparser.RawConfigParser() - config.optionxform = str - - # Retrieve configuration parameters - config.read(config_path) - elif config_path.lower().endswith(".toml"): - config_type = "toml" - with open(config_path, "rb") as f: - config = tomli.load(f) - else: - raise TypeError("Unknown configuration file type. Supports only .ini or .toml.") - + with open(config_path, "rb") as f: + config = tomli.load(f) else: - print("--- Configuration file not found: generating with default values ---") - config_type = "ini" - - # Shot compilation parameters - config["COMPILATION"] = {} - config["COMPILATION"]["mock"] = 'false' - - # Analayis parameters - config["ANALYSIS"] = {} - # lyse DataFrame key to optimise - config["ANALYSIS"]["cost_key"] = '["fake_result", "y"]' - # Maximize cost_key (negate when reporting cost) - config["ANALYSIS"]["maximize"] = 'true' - # Don't report to M-LOOP if a shot is deemed bad - config["ANALYSIS"]["ignore_bad"] = 'true' - # Control log level for logging to console from analysislib-mloop. Not to be - # confused with MLOOP's console_log_level option for its logger. - config["ANALYSIS"]["analysislib_console_log_level"] = '"INFO"' - # Control log level for logging to file from analysislib-mloop. Not to be - # confused with MLOOP's file_log_level option for its logger. - config["ANALYSIS"]["analysislib_file_log_level"] = '"DEBUG"' - - # M-LOOP parameters - config["MLOOP"] = {} - # Parameters mloop varies during optimisation - config["MLOOP"][ - "mloop_params" - ] = '{"x": {"min": -5.0, "max": 5.0, "start": -2.0} }' - # Number of training runs - config["MLOOP"]["num_training_runs"] = '5' - # Maximum number of iterations - config["MLOOP"]["max_num_runs_without_better_params"] = '10' - # Maximum number of iterations - config["MLOOP"]["max_num_runs"] = '20' - # Maximum % move distance from best params - config["MLOOP"]["trust_region"] = '0.5' - # Maximum number of iterations - config["MLOOP"]["cost_has_noise"] = 'true' - # Force mloop to return a parameter prediction before it is ready - config["MLOOP"]["no_delay"] = 'false' - # Display visualisations - config["MLOOP"]["visualisations"] = 'false' - # Type of learner to use in optimisation: - # [gaussian_process, random, nelder_mead] - config["MLOOP"]["controller_type"] = '"gaussian_process"' - # Mute output from MLOOP optimiser - config["MLOOP"]["console_log_level"] = '"NOTSET"' - # Which groups to actually optimize - config["MLOOP"]["groups"] = [] - - # Write to file - folder = os.path.dirname(__file__) - with open(os.path.join(folder, "mloop_config.ini"), "w+") as f: - config.write(f) + raise RuntimeError("Unknown configuration file type. Supports only .toml.") + to_flatten = ["COMPILATION", "ANALYSIS", "MLOOP"] # iterate over configuration object and store pairs in parameter dictionary params = {} for sect in to_flatten: for (key, val) in config[sect].items(): - # only parse json in ini file, not in toml file - if config_type == "ini": - try: - params[key] = json.loads(val) - except json.JSONDecodeError: - params[key] = val - else: - params[key] = val + params[key] = val # Convert cost_key to tuple params["cost_key"] = tuple(params["cost_key"]) @@ -143,52 +78,36 @@ def get(config_paths=None): param_dict = {} global_list = [] - if config_type == "ini": - for name, param in config["MLOOP"]["mloop_params"].items(): - param_dict[name] = \ - MloopParam( - name=name, - min=param["min"], - max=param["max"], - start=param["start"] - ) - global_list.append(RunmanagerGlobal( + for group in config.get("MLOOP_PARAMS", {}): + for name, param in config["MLOOP_PARAMS"][group].items(): + if is_global_active(config, group, "MLOOP_PARAMS"): + param_dict[name] = MloopParam( + name=name, + min=param["min"], + max=param["max"], + start=param["start"] + ) + + if "global_name" in param: + global_list.append( + RunmanagerGlobal( name=param["global_name"], expr=None, args=[name] - ) - ) - - elif config_type == "toml": - for group in config["MLOOP_PARAMS"]: - if group in config["MLOOP"]["groups"]: - for name, param in config["MLOOP_PARAMS"][group].items(): - param_dict[name] = \ - MloopParam( - name=name, - min=param["min"], - max=param["max"], - start=param["start"] - ) - - if "global_name" in param: - global_list.append(RunmanagerGlobal( - name=param["global_name"], - expr=None, - args=[name] - ) - ) - - if "RUNMANAGER_GLOBALS" in config: - for group in config["RUNMANAGER_GLOBALS"]: - if group in config["MLOOP"]["groups"]: - for name, param in config["RUNMANAGER_GLOBALS"][group].items(): - global_list.append(RunmanagerGlobal( - name=name, - expr=param.get('expr', None), - args=param['args'] - ) ) + ) + + for group in config.get("RUNMANAGER_GLOBALS", {}): + for name, param in config["RUNMANAGER_GLOBALS"][group].items(): + if is_global_active(config, group, "RUNMANAGER_GLOBALS"): + + global_list.append( + RunmanagerGlobal( + name=name, + expr=param.get('expr', None), + args=param['args'] + ) + ) # check if all mloop params can be mapped to at least one global for ml_name in param_dict.keys(): diff --git a/mloop_config.toml b/mloop_config.toml index 2113bc3..0ee9ed7 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -2,7 +2,7 @@ mock = false [ANALYSIS] -cost_key = ["fake_result_multishot", "y"] +cost_key = ["zTOF_singleShot", "max_OD"] maximize = true ignore_bad = false # ignore_bad = true @@ -22,28 +22,33 @@ console_log_level = 0 groups = ["MOT"] # Automatically creates the mapping to a Runmanager global -[MLOOP_PARAMS.MOT.x] -global_name = "x" # << Specify the Runmanager global it maps to +[MLOOP_PARAMS.MOT.test_mloop] +global_name = "test_mloop" # << Specify the Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true min = -5.0 max = 5.0 start = -2 # The mapping automatically created above by defining global_name is equivalent to # [RUNMANAGER_GLOBALS.MOT.x] +# enable = false # expr = "lambda m: m" # args = ["x"] # Create a MLOOP parameter but define its mapping to Runmanager globals later [MLOOP_PARAMS.TEST_FUNCTION.y] +enable = true min = -5.0 max = 5.0 start = -2 [MLOOP_PARAMS.TEST_FUNCTION.z] +enable = true min = -5.0 max = 5.0 start = -2 [RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] +enable = true expr = "lambda m, n: (m, n)" args = ["y", "z"] diff --git a/mloop_config_demo.toml b/mloop_config_demo.toml new file mode 100644 index 0000000..2113bc3 --- /dev/null +++ b/mloop_config_demo.toml @@ -0,0 +1,49 @@ +[COMPILATION] +mock = false + +[ANALYSIS] +cost_key = ["fake_result_multishot", "y"] +maximize = true +ignore_bad = false +# ignore_bad = true +analysislib_console_log_level = 20 +analysislib_file_log_level = 10 + +[MLOOP] +num_training_runs = 5 +max_num_runs_without_better_params = 10 +max_num_runs = 30 +trust_region = 0.05 +cost_has_noise = true +no_delay = false +visualisations = false +controller_type = "gaussian_process" +console_log_level = 0 +groups = ["MOT"] + +# Automatically creates the mapping to a Runmanager global +[MLOOP_PARAMS.MOT.x] +global_name = "x" # << Specify the Runmanager global it maps to +min = -5.0 +max = 5.0 +start = -2 + +# The mapping automatically created above by defining global_name is equivalent to +# [RUNMANAGER_GLOBALS.MOT.x] +# expr = "lambda m: m" +# args = ["x"] + +# Create a MLOOP parameter but define its mapping to Runmanager globals later +[MLOOP_PARAMS.TEST_FUNCTION.y] +min = -5.0 +max = 5.0 +start = -2 + +[MLOOP_PARAMS.TEST_FUNCTION.z] +min = -5.0 +max = 5.0 +start = -2 + +[RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] +expr = "lambda m, n: (m, n)" +args = ["y", "z"] diff --git a/mloop_config_local.toml b/mloop_config_local.toml new file mode 100644 index 0000000..0b657aa --- /dev/null +++ b/mloop_config_local.toml @@ -0,0 +1,48 @@ +[COMPILATION] +mock = false + +[ANALYSIS] +cost_key = ["zTOF_singleShot", "max_OD"] +maximize = true +ignore_bad = false +# ignore_bad = true +analysislib_console_log_level = 20 +analysislib_file_log_level = 10 + +[MLOOP] +num_training_runs = 5 +max_num_runs_without_better_params = 10 +max_num_runs = 30 +trust_region = 0.05 +cost_has_noise = true +no_delay = false +visualisations = false +controller_type = "gaussian_process" +console_log_level = 0 + +# Automatically creates the mapping to a Runmanager global +[MLOOP_PARAMS.test_mloop] +global_name = "test_mloop" # << Specify the Runmanager global it maps to +min = -5.0 +max = 5.0 +start = -2 + +# The mapping automatically created above by defining global_name is equivalent to +# [RUNMANAGER_GLOBALS.x] +# expr = "lambda m: m" +# args = ["x"] + +# Create a MLOOP parameter but define its mapping to Runmanager globals later +# [MLOOP_PARAMS.y] +# min = -5.0 +# max = 5.0 +# start = -2 + +# [MLOOP_PARAMS.z] +# min = -5.0 +# max = 5.0 +# start = -2 + +# [RUNMANAGER_GLOBALS.test_tuple] +# expr = "lambda m, n: (m, n)" +# args = ["y", "z"] From dfe243f95a010a401511e478649f2e7396f2fc58 Mon Sep 17 00:00:00 2001 From: Spielman Lab Date: Sat, 20 Jan 2024 09:58:11 -0500 Subject: [PATCH 12/22] Current working version --- mloop_config.ini | 23 ------- mloop_config.py | 40 ++++++----- mloop_config.toml | 171 +++++++++++++++++++++++++++++++++++++--------- 3 files changed, 158 insertions(+), 76 deletions(-) delete mode 100644 mloop_config.ini diff --git a/mloop_config.ini b/mloop_config.ini deleted file mode 100644 index b1145e4..0000000 --- a/mloop_config.ini +++ /dev/null @@ -1,23 +0,0 @@ -[COMPILATION] -mock = false - -[ANALYSIS] -cost_key = ["fake_result", "y"] -; cost_key = ["fake_result_multishot", "y"] -maximize = true -ignore_bad = false -; ignore_bad = true -analysislib_console_log_level = "INFO" -analysislib_file_log_level = "DEBUG" - -[MLOOP] -mloop_params = {"x": {"min": -5.0, "max": 5.0, "start": -2.0} } -num_training_runs = 5 -max_num_runs_without_better_params = 10 -max_num_runs = 20 -trust_region = 0.5 -cost_has_noise = true -no_delay = false -visualisations = false -controller_type = "gaussian_process" -console_log_level = "NOTSET" \ No newline at end of file diff --git a/mloop_config.py b/mloop_config.py index 5915bdf..beb6e03 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -9,13 +9,13 @@ MloopParam = namedtuple("MloopParam", ["name", "min", "max", "start"]) RunmanagerGlobal = namedtuple("RunmanagerGlobal", ["name", "expr", "args"]) -def is_global_active(config, group, category) +def is_global_active(config, group, name, category): """ We check to see if the requeted global has been activated or not """ if group in config["MLOOP"]["groups"]: - if ("enable" not in config[category][group]) or config[category][group]: + if config[category][group][name].get("enable", True): return True return False @@ -80,7 +80,7 @@ def get(config_paths=None): for group in config.get("MLOOP_PARAMS", {}): for name, param in config["MLOOP_PARAMS"][group].items(): - if is_global_active(config, group, "MLOOP_PARAMS"): + if is_global_active(config, group, name, "MLOOP_PARAMS"): param_dict[name] = MloopParam( name=name, min=param["min"], @@ -99,7 +99,7 @@ def get(config_paths=None): for group in config.get("RUNMANAGER_GLOBALS", {}): for name, param in config["RUNMANAGER_GLOBALS"][group].items(): - if is_global_active(config, group, "RUNMANAGER_GLOBALS"): + if is_global_active(config, group, name, "RUNMANAGER_GLOBALS"): global_list.append( RunmanagerGlobal( @@ -109,26 +109,24 @@ def get(config_paths=None): ) ) - # check if all mloop params can be mapped to at least one global - for ml_name in param_dict.keys(): - if not any([ (ml_name in g.args) for g in global_list ]): - raise KeyError(f"Parameter {ml_name} in MLOOP_PARAMS doesn't have a Runmanager global mapped to it.") + # check if all mloop params can be mapped to at least one global + for ml_name in param_dict.keys(): + if not any([ (ml_name in g.args) for g in global_list ]): + raise KeyError(f"Parameter {ml_name} in MLOOP_PARAMS doesn't have a Runmanager global mapped to it.") - # check if all args of any global has been defined in mloop params - for g in global_list: - for a in g.args: - if a not in param_dict: - raise KeyError(f"Argument {a} of global {g.name} doesn't exist.") - - params['mloop_params'] = param_dict - params['runmanager_globals'] = global_list + # check if all args of any global has been defined in mloop params + for g in global_list: + for a in g.args: + if a not in param_dict: + raise KeyError(f"Argument {a} of global {g.name} doesn't exist.") - params['num_params'] = len(params['mloop_params'].values()) - params['min_boundary'] = [p.min for p in params['mloop_params'].values()] - params['max_boundary'] = [p.max for p in params['mloop_params'].values()] - params['first_params'] = [p.start for p in params['mloop_params'].values()] - + params['mloop_params'] = param_dict + params['runmanager_globals'] = global_list + params['num_params'] = len(params['mloop_params'].values()) + params['min_boundary'] = [p.min for p in params['mloop_params'].values()] + params['max_boundary'] = [p.max for p in params['mloop_params'].values()] + params['first_params'] = [p.start for p in params['mloop_params'].values()] return params diff --git a/mloop_config.toml b/mloop_config.toml index 0ee9ed7..fdc84bb 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -2,53 +2,160 @@ mock = false [ANALYSIS] -cost_key = ["zTOF_singleShot", "max_OD"] +cost_key = ["zTOF_singleShot", "Nb"] maximize = true ignore_bad = false -# ignore_bad = true analysislib_console_log_level = 20 analysislib_file_log_level = 10 [MLOOP] num_training_runs = 5 -max_num_runs_without_better_params = 10 -max_num_runs = 30 +max_num_runs_without_better_params = 80 +max_num_runs = 400 trust_region = 0.05 cost_has_noise = true -no_delay = false +no_delay = true # false visualisations = false controller_type = "gaussian_process" console_log_level = 0 -groups = ["MOT"] +groups = ["CMOT", "MOL", "MTRAP"] -# Automatically creates the mapping to a Runmanager global -[MLOOP_PARAMS.MOT.test_mloop] -global_name = "test_mloop" # << Specify the Runmanager global it maps to +# CMOT Parameters + +[MLOOP_PARAMS.CMOT.CMOTCaptureWidth] +global_name = "CMOTCaptureWidth" # Runmanager global it maps to enable = true # This is an optinal parameter that defaults to true -min = -5.0 -max = 5.0 -start = -2 +min = 0.01 +max = 0.5 +start = 0.05 -# The mapping automatically created above by defining global_name is equivalent to -# [RUNMANAGER_GLOBALS.MOT.x] -# enable = false -# expr = "lambda m: m" -# args = ["x"] - -# Create a MLOOP parameter but define its mapping to Runmanager globals later -[MLOOP_PARAMS.TEST_FUNCTION.y] -enable = true -min = -5.0 -max = 5.0 -start = -2 +[MLOOP_PARAMS.CMOT.CMOTCurrent] +global_name = "CMOTCurrent" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 5 +max = 40 +start = 20 + +[MLOOP_PARAMS.CMOT.CMOTFreq] +global_name = "CMOTFreq" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = -1 +max = 1.5 +start = 0.5 + +[MLOOP_PARAMS.CMOT.RepumpCMOT] +global_name = "RepumpCMOT" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 0.0 +max = 1.2 +start = 0.055 + +# Molassas Parameters -[MLOOP_PARAMS.TEST_FUNCTION.z] -enable = true -min = -5.0 -max = 5.0 +[MLOOP_PARAMS.MOL.EndFreqMol] +global_name = "EndFreqMol" # Runmanager global it maps to +enable = false # This is an optinal parameter that defaults to true +min = 0.0 +max = 4 +start = 3.1 + +[MLOOP_PARAMS.MOL.MolXBias] +global_name = "MolXBias" # Runmanager global it maps to +enable = false # This is an optinal parameter that defaults to true +min = -1.0 +max = 1.5 +start = 0.44 + +[MLOOP_PARAMS.MOL.MolYBias] +global_name = "MolYBias" # Runmanager global it maps to +enable = false # This is an optinal parameter that defaults to true +min = -1.0 +max = 1.5 +start = 0.42 + +[MLOOP_PARAMS.MOL.MolZBias] +global_name = "MolZBias" # Runmanager global it maps to +enable = false # This is an optinal parameter that defaults to true +min = -1.0 +max = 1.0 +start = -0.04 + +[MLOOP_PARAMS.MOL.RepumpMol] +global_name = "RepumpMol" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 0.0 +max = 0.5 +start = 0.04 + +[MLOOP_PARAMS.MOL.StartFreqMol] +global_name = "StartFreqMol" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 0.0 +max = 4 +start = 0.45 + +[MLOOP_PARAMS.MOL.TauMol] +global_name = "TauMol" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 0.001 +max = 0.1 +start = 0.022 + +[MLOOP_PARAMS.MOL.TimeMol] +global_name = "TimeMol" # Runmanager global it maps to +enable = false # This is an optinal parameter that defaults to true +min = 0.005 +max = 0.04 +start = 0.011 + +[MLOOP_PARAMS.MTRAP.CapxShim] +global_name = "CapxShim" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = -2 +max = 2 +start = 0.067 + +[MLOOP_PARAMS.MTRAP.CapyShim] +global_name = "CapyShim" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = -4 +max = 4 start = -2 -[RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] -enable = true -expr = "lambda m, n: (m, n)" -args = ["y", "z"] +[MLOOP_PARAMS.MTRAP.CapzShim] +global_name = "CapzShim" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = -3 +max = 3 +start = 0.41 + +[MLOOP_PARAMS.MTRAP.IM] +global_name = "IM" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 60 +max = 100 +start = 90 + +[MLOOP_PARAMS.MTRAP.MOTCaptureCurrent] +global_name = "MOTCaptureCurrent" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 20 +max = 100 +start = 34 + +[MLOOP_PARAMS.MTRAP.MTrapCaptureWidth] +global_name = "MTrapCaptureWidth" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 0.001 +max = 0.1 +start = 0.01 + +[MLOOP_PARAMS.MTRAP.TrapTime] +global_name = "TrapTime" # Runmanager global it maps to +enable = true # This is an optinal parameter that defaults to true +min = 0.001 +max = 0.2 +start = 0.03 + + + From 46f172820c9d5ca2256b57d7afedacaae4c9421b Mon Sep 17 00:00:00 2001 From: spielman Date: Tue, 23 Jan 2024 10:43:20 -0500 Subject: [PATCH 13/22] Updates based on pull request feedback. --- README.md | 76 ++++++++-------- mloop_config.py | 48 +++++----- mloop_config.toml | 167 ++++++----------------------------- mloop_config_demo.toml | 49 ----------- mloop_config_example.toml | 178 ++++++++++++++++++++++++++++++++++++++ mloop_config_local.toml | 48 ---------- 6 files changed, 269 insertions(+), 297 deletions(-) delete mode 100644 mloop_config_demo.toml create mode 100644 mloop_config_example.toml delete mode 100644 mloop_config_local.toml diff --git a/README.md b/README.md index 88dd164..0487926 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,13 @@ # the _labscript suite_ Β» analysislib-mloop -### Machine-learning online optimisation of 𝘭𝘒𝘣𝘴𝘀𝘳π˜ͺ𝘱𝘡 𝘴𝘢π˜ͺ𝘡𝘦 controlled experiments +### Machine-learning online optimization of 𝘭𝘒𝘣𝘴𝘀𝘳π˜ͺ𝘱𝘡 𝘴𝘢π˜ͺ𝘡𝘦 controlled experiments [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black) [![GitHub](https://img.shields.io/github/license/rpanderson/analysislib-mloop)](https://github.com/rpanderson/analysislib-mloop/raw/master/LICENSE) [![python: 3.6 | 3.7 | 3.8](https://img.shields.io/badge/python-3.6%20%7C%203.7%20%7C%203.8-blue)](https://python.org) -**analysislib-mloop** implements machine-learning online optimisation of [_labscript suite_](http://labscriptsuite.org) controlled experiments using [M-LOOP](https://m-loop.readthedocs.io). +**analysislib-mloop** implements machine-learning online optimization of [_labscript suite_](http://labscriptsuite.org) controlled experiments using [M-LOOP](https://m-loop.readthedocs.io). ## Requirements @@ -22,7 +22,7 @@ ## Installation -The following assumes you have a working installation of the [_labscript suite_](https://docs.labscriptsuite.org/en/latest/installation) and [M-LOOP](https://m-loop.readthedocs.io/en/latest/install.html). Please see the installation documentation of these projects if you don't. +The following assumes you have a working installation of the [_labscript suite_](https://docs.labscriptsuite.org/en/latest/installation) and [M-LOOP](https://m-loop.readthedocs.io/en/latest/install.html). Please see the installation documentation of these projects if you don't. For python versions older than 3.11 `tomllib` is not included and [_tomli_](https://pypi.org/project/tomli/) must be installed instead. Clone this repository in your _labscript suite_ analysislib directory. By default, this is `~/labscript-suite/userlib/analysislib` (`~` is `%USERPROFILE%` on Windows). @@ -41,15 +41,17 @@ runmanager = localhost runmanager = 42523 ``` -2. **Configure optimisation settings in `mloop_config.toml`.** TOML is a high-level configuration file format and its description can be found [here](https://toml.io/). There is a tracked version of this file in the repository. At a bare minimum, you should modify the following: +2. **Configure optimization settings in `mloop_config.toml`.** TOML is a high-level configuration file format and its description can be found [here](https://toml.io/). The repository contains a fully functional demo `mloop_config_example.toml` that optimizes the MOT and cMOT laser cooling stages at the _RbChip_ experiment at NIST Gaithersburg. At a bare minimum, you should modify the following: ```toml [ANALYSIS] cost_key = ["fake_result", "y"] maximize = true +groups = ["MOT"] -[MLOOP_PARAMS.x] +[MLOOP_PARAMS.MOT.x] global_name = "x" +enable = true min = -5.0 max = 5.0 start = -2 @@ -57,32 +59,36 @@ start = -2 * `cost_key`: Column of the lyse dataframe to derive the cost from, specified as a `[routine_name, result_name]` pair. The present cost comes from the most recent value in this column, i.e. `cost = df[cost_key].iloc[-1]`. * `maximize`: Whether or not to negate the above value, since M-LOOP will minimize the cost. - * `MLOOP_PARAMS`: Dictionary of optimisation parameters controlled by MLOOP + * `groups`: Which group(s) of parameters are active + * `MLOOP_PARAMS`: Dictionary of optimization parameters controlled by MLOOP, specified as a list of groups such as `["MOT", "CMOT"]`. This is to simplify the optimization of different groups of parameters. * `global_name` defines the global it maps to in runmanager. + * `enable` allows parameters to be enabled or disabled on a case-by-case basis. This may be omitted and defaults to `true`. * `min`, `max`, `start` correspond to `min_boundary`, `max_boundary`, and `first_params` lists to meet [M-LOOP specifications](https://m-loop.readthedocs.io/en/latest/tutorials.html#parameter-settings). -You may also specify a more complicated mapping between the paramaters controller by MLOOP and globals in runmanager. +You may also specify a more complicated mapping between the parameters controller by MLOOP and globals in runmanager. ```toml -[MLOOP_PARAMS.y] +[MLOOP_PARAMS.CMOT.y] min = -5.0 max = 5.0 start = -2 -[MLOOP_PARAMS.z] +[MLOOP_PARAMS.CMOT.z] min = -5.0 max = 5.0 start = -2 -[RUNMANAGER_GLOBALS.test_tuple] -expr = "lambda m, n: (m, n)" +[RUNMANAGER_GLOBALS.CMOT.test_tuple] +expr = "lambda x, y: (x, y)" args = ["y", "z"] ``` - + * parameters may be shared between different groups, but the group must be enabled and the parameter must be enabled. * `y` and `z` are two MLOOP parameters that don't have a `global_name` defined. - * Instead, an dictionary entry in `RUNMANAGER_GLOBALS`, targeting global `test_tuple` in runmanager, is explicitly defined here with a customized mapping `lambda m, n: (m, n)`, which takes `y` and `z` as parameters. Every time, the tuple `(y, z)` will be passed to `test_tuple` in runmanager. + * Instead, a dictionary entry in `RUNMANAGER_GLOBALS`, targeting global `test_tuple` in runmanager, is explicitly defined here with a customized mapping `lambda m, n: (m, n)`, which takes `y` and `z` as parameters. Every time, the tuple `(y, z)` will be passed to `test_tuple` in runmanager. + +This might be useful if you have organized your runmanager variables into more complicated data structures such as tuples, dictionaries, or whatever. -3. **Load the analysis routine that computes the quantity you want to optimise into [lyse](https://github.com/labscript-suite/lyse).** This routine should update `cost_key` of the lyse dataframe by calling the `save_result` (or its variants) of a `lyse.Run`. For the above parameters, this would be `fake_result.py` containing: +3. **Load the analysis routine that computes the quantity you want to optimize into [lyse](https://github.com/labscript-suite/lyse).** This routine should update `cost_key` of the lyse dataframe by calling the `save_result` (or its variants) of a `lyse.Run`. For the above parameters, this would be `fake_result.py` containing: ```python import lyse @@ -96,16 +102,16 @@ run.save_result('y', your_result) 4. **Load `mloop_multishot.py` as an analysis routine in lyse.** Ensure that it runs after the analysis routine that updates `cost_key`, e.g. `fake_result.py` in the above configuration, using the (move routine) up/down buttons. -5. **Begin automated optimisation** by doing one of the following: +5. **Begin automated optimization** by doing one of the following: * Press the 'Run multishot analysis' button in lyse. + This requires the globals specified in `mloop_params` are active in runmanager; unless you - + Set `mock = true` in `mloop_config.ini`, which bypasses shot compilation and submission, and generates a fake cost based on the current value of the first optimisation parameter. Each press of 'Run multishot analysis' will elicit another M-LOOP iteration. This is useful for testing your M-LOOP installation and the threading/multiprocessing used in this codebase, as it only requires that lyse be running (and permits you to skip creating the template file and performing steps (1) and (3) above). + + Set `mock = true` in `mloop_config.ini`, which bypasses shot compilation and submission, and generates a fake cost based on the current value of the first optimization parameter. Each press of 'Run multishot analysis' will elicit another M-LOOP iteration. This is useful for testing your M-LOOP installation and the threading/multiprocessing used in this codebase, as it only requires that lyse be running (and permits you to skip creating the template file and performing steps (1) and (3) above). * Press the 'Engage' button in runmanager. - Either of these will begin an M-LOOP optimisation, with a new sequence of shots being compiled and submitted to [blacs](https://github.com/labscript-suite/blacs) each time a cost value is computed. + Either of these will begin an M-LOOP optimization, with a new sequence of shots being compiled and submitted to [blacs](https://github.com/labscript-suite/blacs) each time a cost value is computed. -6. **Pause optimisation** by pausing the lyse analysis queue or by unchecking (deactivating) the `mloop_multishot.py` in lyse. +6. **Pause optimization** by pausing the lyse analysis queue or by unchecking (deactivating) the `mloop_multishot.py` in lyse. -7. **Cancel or restart optimisation** by removing `mloop_multishot.py` or by right-clicking on it and selecting 'restart worker process for selected routines'. +7. **Cancel or restart optimization** by removing `mloop_multishot.py` or by right-clicking on it and selecting 'restart worker process for selected routines'. ### Uncertainties @@ -146,12 +152,12 @@ run = lyse.Run(h5_path=df.filepath.iloc[-1]) run.save_result(name='y', value=your_result if your_condition else np.nan) ``` -... and set `ignore_bad = true` in the analysis section of `mloop_config.ini`. Shots with `your_condition = False` will be not elicit the cost to be updated, thus postponing the next iteration of optimisation. An example of such a multi-shot routine can be found in fake_result_multishot.py. +... and set `ignore_bad = true` in the analysis section of `mloop_config.ini`. Shots with `your_condition = False` will be not elicit the cost to be updated, thus postponing the next iteration of optimization. An example of such a multi-shot routine can be found in fake_result_multishot.py. -### Analysing optimistion results +### Analyzing optimization results -Since cost evaluation can be based on one or more shots from one or more sequences, additional information is required to analyse a single M-LOOP optimisation session in lyse. Per-shot cost evaluation (e.g. of a single-shot analysis result) results in a single-shot sequence per M-LOOP iteration. For multi-shot cost evaluation, a single M-LOOP iteration might correspond to a single multi-shot sequence, repeated execution of the same shot (same `sequence_index` and `run number`, different `run repeat`), or something else. To keep track of this, we intend to add details of the optimisation session to the sequence attributes (written to each shot file). For the time being, you can keep track of the `mloop_session` and `mloop_iteration` by creating globals with these names in any active group in runmanager. They will be updated during each optimisation, and reset to `None` following the completion of an M-LOOP session. This then permits you to analyse shots from a particular optimisation session as follows: +Since cost evaluation can be based on one or more shots from one or more sequences, additional information is required to analyze a single M-LOOP optimization session in lyse. Per-shot cost evaluation (e.g. of a single-shot analysis result) results in a single-shot sequence per M-LOOP iteration. For multi-shot cost evaluation, a single M-LOOP iteration might correspond to a single multi-shot sequence, repeated execution of the same shot (same `sequence_index` and `run number`, different `run repeat`), or something else. To keep track of this, we intend to add details of the optimization session to the sequence attributes (written to each shot file). For the time being, you can keep track of the `mloop_session` and `mloop_iteration` by creating globals with these names in any active group in runmanager. They will be updated during each optimization, and reset to `None` following the completion of an M-LOOP session. This then permits you to analyze shots from a particular optimization session as follows: ```python import lyse @@ -174,13 +180,13 @@ The `mloop_multishot.py` script can be loaded as a single-shot analysis routine ### Is this implementation limited to M-LOOP? -Despite the name, `mloop_multishot.py` can be used for other automated optimisation and feed-forward. You can run any function the optimisation thread (see below), so long as it conforms to the following specification: +Despite the name, `mloop_multishot.py` can be used for other automated optimization and feed-forward. You can run any function the optimization thread (see below), so long as it conforms to the following specification: * Calls `lyse.routine_storage.queue.get()` iteratively. * Uses the `cost_dict` returned to modify global variables (which ones and how is up to you) using `runmanager.remote.set_globals()`. * Calls `runmanager.remote.engage()` when a new shot or sequence of shots is required to get the next cost (optional). -Feed-forward stabilisation (e.g. of some drifting quantity) could be readily achieved using a single-iteration optimisation session, replacing `main` of mloop_interface.py with, for example: +Feed-forward stabilization (e.g. of some drifting quantity) could be readily achieved using a single-iteration optimization session, replacing `main` of mloop_interface.py with, for example: ```python import lyse @@ -190,13 +196,13 @@ def main(): # cost_dict['cost'] is likely some error signal you are trying to zero cost_dict = lyse.routine_storage.queue.get() - # Your code goes here that determines the next value of a stabilisation parameter + # Your code goes here that determines the next value of a stabilization parameter set_globals('some_global': new_value) return ``` -If an alternative optimisation library requires something other than `cost_dict` (with keys `cost`, `uncer`, `bad`), you can modify `cost_analysis` accordingly. +If an alternative optimization library requires something other than `cost_dict` (with keys `cost`, `uncer`, `bad`), you can modify `cost_analysis` accordingly. ## Implementation @@ -205,9 +211,9 @@ We use `lyse.routine_storage` to store: * a long-lived thread (`threading.Thread`) to run the main method of `mloop_interface.py` within `mloop_multishot.py`, * a queue (`Queue.Queue`) for `mloop_multishot.py`/`mloop_interface.py` to put/get the latest M-LOOP cost dictionary, and - * (when `mock = true`) a variable `x` for `mloop_interface.py`/`mloop_multishot.py` to set/get, for spoofing an `cost_key` that changes with the current value of the (first) M-LOOP optimisation parameter. + * (when `mock = true`) a variable `x` for `mloop_interface.py`/`mloop_multishot.py` to set/get, for spoofing an `cost_key` that changes with the current value of the (first) M-LOOP optimization parameter. -Each time the `mloop_multishot.py` routine runs in lyse, we first check to see if there is an active optimisation by polling the optimisation thread. If it doesn't exist or is not alive, we start a new thread. If there's an optimisation underway, we retrieve the latest cost value from the lyse dataframe (see the `cost_analysis` function) and put it in the `lyse.routine_storage.queue`. +Each time the `mloop_multishot.py` routine runs in lyse, we first check to see if there is an active optimization by polling the optimization thread. If it doesn't exist or is not alive, we start a new thread. If there's an optimization underway, we retrieve the latest cost value from the lyse dataframe (see the `cost_analysis` function) and put it in the `lyse.routine_storage.queue`. The `LoopInterface` subclass (of `mloop.interface.Interface`) has a method `get_next_cost_dict`, which: @@ -216,20 +222,20 @@ The `LoopInterface` subclass (of `mloop.interface.Interface`) has a method `get_ The main method of `mloop_interface.py` follows the trend of the [M-LOOP Β» Python controlled experiment tutorial](https://m-loop.readthedocs.io/en/latest/tutorials.html#python-controlled-experiment): - * Instantiate `LoopInterface`, an M-LOOP optmiser interface. + * Instantiate `LoopInterface`, an M-LOOP optimizer interface. * Get the current configuration. - * Create an `mloop.controllers.Controller` instance for the optimiser interface, using the above configuration. + * Create an `mloop.controllers.Controller` instance for the optimizer interface, using the above configuration. * Run the `optimize` method of this controller. * Return a dictionary of `best_params`, `best_cost`, `best_uncer`, `best_index`. -Shots are compiled by programmatically interacting with the runmanager GUI. The current value of the optimisation parameters used by M-LOOP are reflected in runmanager, and when a given optimisation is complete, the best parameters are entered into runmanager programmatically. +Shots are compiled by programmatically interacting with the runmanager GUI. The current value of the optimization parameters used by M-LOOP are reflected in runmanager, and when a given optimization is complete, the best parameters are entered into runmanager programmatically. ## Roadmap ### Provenance -The original design and implementation occurred during the summer of 2017/2018 by Josh Morris, Ethan Payne, Lincoln Turner, and I, with assistance from Chris Billington and Phil Starkey. In this incarnation, the M-LOOP interface and experiment interface were run as standalone processes in a shell, with communication between these two actors and the analysis interface being done over a ZMQ socket. Experiment scripts were compiled against an otherwise empty 'template' shot file of globals, which was modified in place at each M-LOOP iteration. This required careful execution of the scripts in the right order, and for the M-LOOP interface to be restarted after each optimistion, and was a bit clunky/flaky. +The original design and implementation occurred during the summer of 2017/2018 by Josh Morris, Ethan Payne, Lincoln Turner, and I, with assistance from Chris Billington and Phil Starkey. In this incarnation, the M-LOOP interface and experiment interface were run as standalone processes in a shell, with communication between these two actors and the analysis interface being done over a ZMQ socket. Experiment scripts were compiled against an otherwise empty 'template' shot file of globals, which was modified in place at each M-LOOP iteration. This required careful execution of the scripts in the right order, and for the M-LOOP interface to be restarted after each optimization, and was a bit clunky/flaky. In 2019 we improved this original implementation using a single lyse analysis routine (the skeleton of which was written by Phil Starkey), and [remote control of the runmanager GUI](https://github.com/labscript-suite/runmanager/issues/68). This required the following enhancements and bugfixes to the labscript suite, which Chris Billington (mostly) and I undertook: @@ -246,12 +252,12 @@ M-LOOP was written by Michael Hush and is maintained by [M-LOOP contributors](ht ### Future improvements * Validation and error checks (#1). - * Sequence attributes that record the optimisation details. - * Generalise this implementation to other algorithmic optimisaiton libraries. + * Sequence attributes that record the optimization details. + * Generalize this implementation to other algorithmic optimization libraries. ### Contributing If you are an existing _labscript suite_ user, please test this out on your experiment! Report bugs, request new functionality, and submit pull requests using the [issue tracker](https://github.com/rpanderson/analysislib-mloop/issues) for this project. -If you'd like to implement machine-learning online optimisation on your shot-based, hardware-timed experiment, please consider deploying the _labscript suite_ and M-LOOP (or another machine learning library, by adapting this extension). +If you'd like to implement machine-learning online optimization on your shot-based, hardware-timed experiment, please consider deploying the _labscript suite_ and M-LOOP (or another machine learning library, by adapting this extension). diff --git a/mloop_config.py b/mloop_config.py index beb6e03..9c576c2 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -1,17 +1,24 @@ import os import json import configparser -import tomli -import logging from collections import namedtuple +import logging +logger = logging.getLogger('analysislib_mloop') + +try: + import tomllib +except: + logger.debug('tomllib not found. Falling back to tomli') + import tomli as tomllib + MloopParam = namedtuple("MloopParam", ["name", "min", "max", "start"]) RunmanagerGlobal = namedtuple("RunmanagerGlobal", ["name", "expr", "args"]) -def is_global_active(config, group, name, category): +def is_global_enabled(config, group, name, category): """ - We check to see if the requeted global has been activated or not + We check to see if the required global has been activated or not """ if group in config["MLOOP"]["groups"]: @@ -38,32 +45,23 @@ def prepare_globals(global_list, params_val_dict): return globals_to_set -def get(config_paths=None): - """Creates config file from specified file, or - creates one locally with default values. +def get(config_path=None): + """ + Setup the mloop interface using the file specified by config_path """ # Default to local directory and default name - if not config_paths: - config_paths = [] + if not config_path: folder = os.path.dirname(__file__) - config_paths.append(os.path.join(folder, "mloop_config.toml")) - config_paths.append(os.path.join(folder, "mloop_config.ini")) + config_path = os.path.join(folder, "mloop_config.ini") - config_path = "" - for path in config_paths: - if os.path.isfile(path): - print(path) - config_path = path - break + # TODO: Check if file exists and copy a default into the specified location if it does not + # Also throw an exception since the default is unlikely to work for the user. - config = None - if config_path: - with open(config_path, "rb") as f: - config = tomli.load(f) - else: - raise RuntimeError("Unknown configuration file type. Supports only .toml.") + # if os.path.isfile(config_path): ... + with open(config_path, "rb") as f: + config = tomllib.load(f) to_flatten = ["COMPILATION", "ANALYSIS", "MLOOP"] # iterate over configuration object and store pairs in parameter dictionary @@ -80,7 +78,7 @@ def get(config_paths=None): for group in config.get("MLOOP_PARAMS", {}): for name, param in config["MLOOP_PARAMS"][group].items(): - if is_global_active(config, group, name, "MLOOP_PARAMS"): + if is_global_enabled(config, group, name, "MLOOP_PARAMS"): param_dict[name] = MloopParam( name=name, min=param["min"], @@ -99,7 +97,7 @@ def get(config_paths=None): for group in config.get("RUNMANAGER_GLOBALS", {}): for name, param in config["RUNMANAGER_GLOBALS"][group].items(): - if is_global_active(config, group, name, "RUNMANAGER_GLOBALS"): + if is_global_enabled(config, group, name, "RUNMANAGER_GLOBALS"): global_list.append( RunmanagerGlobal( diff --git a/mloop_config.toml b/mloop_config.toml index fdc84bb..d00de29 100644 --- a/mloop_config.toml +++ b/mloop_config.toml @@ -2,160 +2,47 @@ mock = false [ANALYSIS] -cost_key = ["zTOF_singleShot", "Nb"] +cost_key = ["fake_result", "y"] maximize = true -ignore_bad = false +ignore_bad = false # true analysislib_console_log_level = 20 analysislib_file_log_level = 10 +groups = ["MOT"] [MLOOP] num_training_runs = 5 -max_num_runs_without_better_params = 80 -max_num_runs = 400 +max_num_runs_without_better_params = 10 +max_num_runs = 30 trust_region = 0.05 cost_has_noise = true -no_delay = true # false +no_delay = false visualisations = false controller_type = "gaussian_process" console_log_level = 0 -groups = ["CMOT", "MOL", "MTRAP"] -# CMOT Parameters - -[MLOOP_PARAMS.CMOT.CMOTCaptureWidth] -global_name = "CMOTCaptureWidth" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.01 -max = 0.5 -start = 0.05 - -[MLOOP_PARAMS.CMOT.CMOTCurrent] -global_name = "CMOTCurrent" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 5 -max = 40 -start = 20 - -[MLOOP_PARAMS.CMOT.CMOTFreq] -global_name = "CMOTFreq" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = -1 -max = 1.5 -start = 0.5 - -[MLOOP_PARAMS.CMOT.RepumpCMOT] -global_name = "RepumpCMOT" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.0 -max = 1.2 -start = 0.055 - -# Molassas Parameters - -[MLOOP_PARAMS.MOL.EndFreqMol] -global_name = "EndFreqMol" # Runmanager global it maps to -enable = false # This is an optinal parameter that defaults to true -min = 0.0 -max = 4 -start = 3.1 - -[MLOOP_PARAMS.MOL.MolXBias] -global_name = "MolXBias" # Runmanager global it maps to -enable = false # This is an optinal parameter that defaults to true -min = -1.0 -max = 1.5 -start = 0.44 - -[MLOOP_PARAMS.MOL.MolYBias] -global_name = "MolYBias" # Runmanager global it maps to -enable = false # This is an optinal parameter that defaults to true -min = -1.0 -max = 1.5 -start = 0.42 - -[MLOOP_PARAMS.MOL.MolZBias] -global_name = "MolZBias" # Runmanager global it maps to -enable = false # This is an optinal parameter that defaults to true -min = -1.0 -max = 1.0 -start = -0.04 - -[MLOOP_PARAMS.MOL.RepumpMol] -global_name = "RepumpMol" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.0 -max = 0.5 -start = 0.04 - -[MLOOP_PARAMS.MOL.StartFreqMol] -global_name = "StartFreqMol" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.0 -max = 4 -start = 0.45 - -[MLOOP_PARAMS.MOL.TauMol] -global_name = "TauMol" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.001 -max = 0.1 -start = 0.022 - -[MLOOP_PARAMS.MOL.TimeMol] -global_name = "TimeMol" # Runmanager global it maps to -enable = false # This is an optinal parameter that defaults to true -min = 0.005 -max = 0.04 -start = 0.011 - -[MLOOP_PARAMS.MTRAP.CapxShim] -global_name = "CapxShim" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = -2 -max = 2 -start = 0.067 - -[MLOOP_PARAMS.MTRAP.CapyShim] -global_name = "CapyShim" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = -4 -max = 4 +# Automatically creates the mapping to a runmanager global +[MLOOP_PARAMS.MOT.x] # this is part of the MOT group +global_name = "x" # << Specify the runmanager global it maps to +min = -5.0 +max = 5.0 start = -2 -[MLOOP_PARAMS.MTRAP.CapzShim] -global_name = "CapzShim" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = -3 -max = 3 -start = 0.41 - -[MLOOP_PARAMS.MTRAP.IM] -global_name = "IM" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 60 -max = 100 -start = 90 - -[MLOOP_PARAMS.MTRAP.MOTCaptureCurrent] -global_name = "MOTCaptureCurrent" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 20 -max = 100 -start = 34 - -[MLOOP_PARAMS.MTRAP.MTrapCaptureWidth] -global_name = "MTrapCaptureWidth" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.001 -max = 0.1 -start = 0.01 - -[MLOOP_PARAMS.MTRAP.TrapTime] -global_name = "TrapTime" # Runmanager global it maps to -enable = true # This is an optinal parameter that defaults to true -min = 0.001 -max = 0.2 -start = 0.03 +# The mapping automatically created above by defining global_name is equivalent to +# [RUNMANAGER_GLOBALS.MOT.x] # this is part of the MOT group +# expr = "lambda m: m" +# args = ["x"] +# Create a MLOOP parameter but define its mapping to runmanager globals later +[MLOOP_PARAMS.TEST_FUNCTION.y] # this is part of the TEST_FUNCTION group +min = -5.0 +max = 5.0 +start = -2 +[MLOOP_PARAMS.TEST_FUNCTION.z] # this is part of the TEST_FUNCTION group +min = -5.0 +max = 5.0 +start = -2 +[RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] # this is part of the TEST_FUNCTION group +expr = "lambda x, y: (x, y)" +args = ["y", "z"] diff --git a/mloop_config_demo.toml b/mloop_config_demo.toml deleted file mode 100644 index 2113bc3..0000000 --- a/mloop_config_demo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[COMPILATION] -mock = false - -[ANALYSIS] -cost_key = ["fake_result_multishot", "y"] -maximize = true -ignore_bad = false -# ignore_bad = true -analysislib_console_log_level = 20 -analysislib_file_log_level = 10 - -[MLOOP] -num_training_runs = 5 -max_num_runs_without_better_params = 10 -max_num_runs = 30 -trust_region = 0.05 -cost_has_noise = true -no_delay = false -visualisations = false -controller_type = "gaussian_process" -console_log_level = 0 -groups = ["MOT"] - -# Automatically creates the mapping to a Runmanager global -[MLOOP_PARAMS.MOT.x] -global_name = "x" # << Specify the Runmanager global it maps to -min = -5.0 -max = 5.0 -start = -2 - -# The mapping automatically created above by defining global_name is equivalent to -# [RUNMANAGER_GLOBALS.MOT.x] -# expr = "lambda m: m" -# args = ["x"] - -# Create a MLOOP parameter but define its mapping to Runmanager globals later -[MLOOP_PARAMS.TEST_FUNCTION.y] -min = -5.0 -max = 5.0 -start = -2 - -[MLOOP_PARAMS.TEST_FUNCTION.z] -min = -5.0 -max = 5.0 -start = -2 - -[RUNMANAGER_GLOBALS.TEST_FUNCTION.test_tuple] -expr = "lambda m, n: (m, n)" -args = ["y", "z"] diff --git a/mloop_config_example.toml b/mloop_config_example.toml new file mode 100644 index 0000000..9a1a22f --- /dev/null +++ b/mloop_config_example.toml @@ -0,0 +1,178 @@ +[COMPILATION] +mock = false + +[ANALYSIS] +cost_key = ["zTOF_singleShot", "Nb"] +maximize = true +ignore_bad = false +analysislib_console_log_level = 20 +analysislib_file_log_level = 10 + +[MLOOP] +num_training_runs = 5 +max_num_runs_without_better_params = 80 +max_num_runs = 400 +trust_region = 0.05 +cost_has_noise = true +no_delay = true # false +visualisations = false +controller_type = "gaussian_process" +console_log_level = 0 +groups = ["CMOT", "MOL", "MTRAP"] + + ###### ## ## ####### ######## +## ## ### ### ## ## ## +## #### #### ## ## ## +## ## ### ## ## ## ## +## ## ## ## ## ## +## ## ## ## ## ## ## + ###### ## ## ####### ## + +[MLOOP_PARAMS.CMOT.CMOTCaptureWidth] +global_name = "CMOTCaptureWidth" # runmanager global it maps to +enable = true # This is an optional parameter that defaults to true +min = 0.01 +max = 0.5 +start = 0.05 + +[MLOOP_PARAMS.CMOT.CMOTCurrent] +global_name = "CMOTCurrent" +enable = true +min = 5 +max = 40 +start = 20 + +[MLOOP_PARAMS.CMOT.CMOTFreq] +global_name = "CMOTFreq" +enable = true +min = -1 +max = 1.5 +start = 0.5 + +[MLOOP_PARAMS.CMOT.RepumpCMOT] +global_name = "RepumpCMOT" +enable = true +min = 0.0 +max = 1.2 +start = 0.055 + +## ## ####### ## ### ###### ###### ######## ###### +### ### ## ## ## ## ## ## ## ## ## ## ## ## +#### #### ## ## ## ## ## ## ## ## ## +## ### ## ## ## ## ## ## ###### ###### ###### ###### +## ## ## ## ## ######### ## ## ## ## +## ## ## ## ## ## ## ## ## ## ## ## ## ## +## ## ####### ######## ## ## ###### ###### ######## ###### + +[MLOOP_PARAMS.MOL.EndFreqMol] +global_name = "EndFreqMol" +enable = false +min = 0.0 +max = 4 +start = 3.1 + +[MLOOP_PARAMS.MOL.MolXBias] +global_name = "MolXBias" +enable = false +min = -1.0 +max = 1.5 +start = 0.44 + +[MLOOP_PARAMS.MOL.MolYBias] +global_name = "MolYBias" +enable = false +min = -1.0 +max = 1.5 +start = 0.42 + +[MLOOP_PARAMS.MOL.MolZBias] +global_name = "MolZBias" +enable = false +min = -1.0 +max = 1.0 +start = -0.04 + +[MLOOP_PARAMS.MOL.RepumpMol] +global_name = "RepumpMol" +enable = true +min = 0.0 +max = 0.5 +start = 0.04 + +[MLOOP_PARAMS.MOL.StartFreqMol] +global_name = "StartFreqMol" +enable = true +min = 0.0 +max = 4 +start = 0.45 + +[MLOOP_PARAMS.MOL.TauMol] +global_name = "TauMol" +enable = true +min = 0.001 +max = 0.1 +start = 0.022 + +[MLOOP_PARAMS.MOL.TimeMol] +global_name = "TimeMol" +enable = false +min = 0.005 +max = 0.04 +start = 0.011 + +## ## ######## ######## ### ######## +### ### ## ## ## ## ## ## ## +#### #### ## ## ## ## ## ## ## +## ### ## ## ######## ## ## ######## +## ## ## ## ## ######### ## +## ## ## ## ## ## ## ## +## ## ## ## ## ## ## ## + +[MLOOP_PARAMS.MTRAP.CapxShim] +global_name = "CapxShim" +enable = true +min = -2 +max = 2 +start = 0.067 + +[MLOOP_PARAMS.MTRAP.CapyShim] +global_name = "CapyShim" +enable = true +min = -4 +max = 4 +start = -2 + +[MLOOP_PARAMS.MTRAP.CapzShim] +global_name = "CapzShim" +enable = true +min = -3 +max = 3 +start = 0.41 + +[MLOOP_PARAMS.MTRAP.IM] +global_name = "IM" +enable = true +min = 60 +max = 100 +start = 90 + +[MLOOP_PARAMS.MTRAP.MOTCaptureCurrent] +global_name = "MOTCaptureCurrent" +enable = true +min = 20 +max = 100 +start = 34 + +[MLOOP_PARAMS.MTRAP.MTrapCaptureWidth] +global_name = "MTrapCaptureWidth" +enable = true +min = 0.001 +max = 0.1 +start = 0.01 + +[MLOOP_PARAMS.MTRAP.TrapTime] +global_name = "TrapTime" +enable = true +min = 0.001 +max = 0.2 +start = 0.03 \ No newline at end of file diff --git a/mloop_config_local.toml b/mloop_config_local.toml deleted file mode 100644 index 0b657aa..0000000 --- a/mloop_config_local.toml +++ /dev/null @@ -1,48 +0,0 @@ -[COMPILATION] -mock = false - -[ANALYSIS] -cost_key = ["zTOF_singleShot", "max_OD"] -maximize = true -ignore_bad = false -# ignore_bad = true -analysislib_console_log_level = 20 -analysislib_file_log_level = 10 - -[MLOOP] -num_training_runs = 5 -max_num_runs_without_better_params = 10 -max_num_runs = 30 -trust_region = 0.05 -cost_has_noise = true -no_delay = false -visualisations = false -controller_type = "gaussian_process" -console_log_level = 0 - -# Automatically creates the mapping to a Runmanager global -[MLOOP_PARAMS.test_mloop] -global_name = "test_mloop" # << Specify the Runmanager global it maps to -min = -5.0 -max = 5.0 -start = -2 - -# The mapping automatically created above by defining global_name is equivalent to -# [RUNMANAGER_GLOBALS.x] -# expr = "lambda m: m" -# args = ["x"] - -# Create a MLOOP parameter but define its mapping to Runmanager globals later -# [MLOOP_PARAMS.y] -# min = -5.0 -# max = 5.0 -# start = -2 - -# [MLOOP_PARAMS.z] -# min = -5.0 -# max = 5.0 -# start = -2 - -# [RUNMANAGER_GLOBALS.test_tuple] -# expr = "lambda m, n: (m, n)" -# args = ["y", "z"] From e165090ac8550952d913bb01581acdbdc4a7f31c Mon Sep 17 00:00:00 2001 From: spielman Date: Tue, 23 Jan 2024 10:53:29 -0500 Subject: [PATCH 14/22] Readme updated --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0487926..79abbb1 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,11 @@ # the _labscript suite_ Β» analysislib-mloop -### Machine-learning online optimization of 𝘭𝘒𝘣𝘴𝘀𝘳π˜ͺ𝘱𝘡 𝘴𝘢π˜ͺ𝘡𝘦 controlled experiments +### Machine-learning online optimization of [_labscript suite_](https://docs.labscriptsuite.org/en/latest/installation) controlled experiments [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black) [![GitHub](https://img.shields.io/github/license/rpanderson/analysislib-mloop)](https://github.com/rpanderson/analysislib-mloop/raw/master/LICENSE) -[![python: 3.6 | 3.7 | 3.8](https://img.shields.io/badge/python-3.6%20%7C%203.7%20%7C%203.8-blue)](https://python.org) +[![python: 3.6+ ](https://img.shields.io/badge/python-3.6+-blue)](https://python.org) **analysislib-mloop** implements machine-learning online optimization of [_labscript suite_](http://labscriptsuite.org) controlled experiments using [M-LOOP](https://m-loop.readthedocs.io). @@ -18,11 +18,13 @@ * [labscript_utils](https://github.com/labscript-suite/labscript_utils) 2.12.4 * [zprocess](https://pypi.org/project/zprocess) 2.13.2 * [M-LOOP](https://m-loop.readthedocs.io/en/latest/install.html) 2.2.0+ +* For python versions older than 3.11 `tomllib` is not included and `tomli` must be installed instead. + * [tomli](https://pypi.org/project/tomli/) 2.0.1 ## Installation -The following assumes you have a working installation of the [_labscript suite_](https://docs.labscriptsuite.org/en/latest/installation) and [M-LOOP](https://m-loop.readthedocs.io/en/latest/install.html). Please see the installation documentation of these projects if you don't. For python versions older than 3.11 `tomllib` is not included and [_tomli_](https://pypi.org/project/tomli/) must be installed instead. +The following assumes you have a working installation of the [_labscript suite_](https://docs.labscriptsuite.org/en/latest/installation) and [M-LOOP](https://m-loop.readthedocs.io/en/latest/install.html). Please see the installation documentation of these projects if you don't. Clone this repository in your _labscript suite_ analysislib directory. By default, this is `~/labscript-suite/userlib/analysislib` (`~` is `%USERPROFILE%` on Windows). From b26942c656cbb04d20796c29aab7b2b9ccb4717c Mon Sep 17 00:00:00 2001 From: Spielman Lab Date: Tue, 23 Jan 2024 11:02:50 -0500 Subject: [PATCH 15/22] Found a .ini left behind. --- mloop_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mloop_config.py b/mloop_config.py index 9c576c2..5ad85cb 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -53,7 +53,7 @@ def get(config_path=None): # Default to local directory and default name if not config_path: folder = os.path.dirname(__file__) - config_path = os.path.join(folder, "mloop_config.ini") + config_path = os.path.join(folder, "mloop_config.toml") # TODO: Check if file exists and copy a default into the specified location if it does not # Also throw an exception since the default is unlikely to work for the user. From 78c6bab0b3fdd27c16a2c13a147eaa9e02d3a52b Mon Sep 17 00:00:00 2001 From: Spielman Lab Date: Tue, 23 Jan 2024 11:38:27 -0500 Subject: [PATCH 16/22] Found group error from getting pull request ready. --- mloop_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mloop_config.py b/mloop_config.py index 5ad85cb..71f77de 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -21,7 +21,7 @@ def is_global_enabled(config, group, name, category): We check to see if the required global has been activated or not """ - if group in config["MLOOP"]["groups"]: + if group in config["ANALYSIS"]["groups"]: if config[category][group][name].get("enable", True): return True From 146e05b87f8f2e0dd8d53081fb24ddcec9f6d3fe Mon Sep 17 00:00:00 2001 From: spielman Date: Tue, 23 Jan 2024 15:25:29 -0500 Subject: [PATCH 17/22] Updated readme. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 79abbb1..27aa2aa 100644 --- a/README.md +++ b/README.md @@ -61,8 +61,8 @@ start = -2 * `cost_key`: Column of the lyse dataframe to derive the cost from, specified as a `[routine_name, result_name]` pair. The present cost comes from the most recent value in this column, i.e. `cost = df[cost_key].iloc[-1]`. * `maximize`: Whether or not to negate the above value, since M-LOOP will minimize the cost. - * `groups`: Which group(s) of parameters are active - * `MLOOP_PARAMS`: Dictionary of optimization parameters controlled by MLOOP, specified as a list of groups such as `["MOT", "CMOT"]`. This is to simplify the optimization of different groups of parameters. + * `groups`: Which group(s) of parameters are active, specified as a list of groups such as `["MOT", "CMOT"]`. This is to simplify the optimization of different groups of parameters. + * `MLOOP_PARAMS`: Dictionary of optimization parameters controlled by MLOOP. * `global_name` defines the global it maps to in runmanager. * `enable` allows parameters to be enabled or disabled on a case-by-case basis. This may be omitted and defaults to `true`. * `min`, `max`, `start` correspond to `min_boundary`, `max_boundary`, and `first_params` lists to meet [M-LOOP specifications](https://m-loop.readthedocs.io/en/latest/tutorials.html#parameter-settings). From be2503a976f71a0ee751f75153d5aae8f80ae869 Mon Sep 17 00:00:00 2001 From: spielman Date: Tue, 23 Jan 2024 15:29:30 -0500 Subject: [PATCH 18/22] More readme editing --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 27aa2aa..7bd99fc 100644 --- a/README.md +++ b/README.md @@ -81,12 +81,12 @@ max = 5.0 start = -2 [RUNMANAGER_GLOBALS.CMOT.test_tuple] -expr = "lambda x, y: (x, y)" +expr = "lambda y, z: (y, z)" args = ["y", "z"] ``` * parameters may be shared between different groups, but the group must be enabled and the parameter must be enabled. * `y` and `z` are two MLOOP parameters that don't have a `global_name` defined. - * Instead, a dictionary entry in `RUNMANAGER_GLOBALS`, targeting global `test_tuple` in runmanager, is explicitly defined here with a customized mapping `lambda m, n: (m, n)`, which takes `y` and `z` as parameters. Every time, the tuple `(y, z)` will be passed to `test_tuple` in runmanager. + * Instead, a dictionary entry in `RUNMANAGER_GLOBALS`, targeting global `test_tuple` in runmanager, is explicitly defined here with a customized mapping `lambda y, z: (y, z)`, which takes `y` and `z` as parameters. Every time, the tuple `(y, z)` will be passed to `test_tuple` in runmanager. This might be useful if you have organized your runmanager variables into more complicated data structures such as tuples, dictionaries, or whatever. From 360b40c959b9c059e7bf0c152cdb6cf92fc7a53d Mon Sep 17 00:00:00 2001 From: spielman Date: Sun, 28 Jan 2024 16:23:03 -0500 Subject: [PATCH 19/22] Converted .ini to .toml in readme. (cherry picked from commit 2c6e1e61bb6047e62f4473e936001b0e7271b7ac) (cherry picked from commit 6cffbfabc4d10f11bdbc291722d578951c13b16b) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7bd99fc..0cd310f 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ run.save_result('y', your_result) 5. **Begin automated optimization** by doing one of the following: * Press the 'Run multishot analysis' button in lyse. + This requires the globals specified in `mloop_params` are active in runmanager; unless you - + Set `mock = true` in `mloop_config.ini`, which bypasses shot compilation and submission, and generates a fake cost based on the current value of the first optimization parameter. Each press of 'Run multishot analysis' will elicit another M-LOOP iteration. This is useful for testing your M-LOOP installation and the threading/multiprocessing used in this codebase, as it only requires that lyse be running (and permits you to skip creating the template file and performing steps (1) and (3) above). + + Set `mock = true` in `mloop_config.toml`, which bypasses shot compilation and submission, and generates a fake cost based on the current value of the first optimization parameter. Each press of 'Run multishot analysis' will elicit another M-LOOP iteration. This is useful for testing your M-LOOP installation and the threading/multiprocessing used in this codebase, as it only requires that lyse be running (and permits you to skip creating the template file and performing steps (1) and (3) above). * Press the 'Engage' button in runmanager. Either of these will begin an M-LOOP optimization, with a new sequence of shots being compiled and submitted to [blacs](https://github.com/labscript-suite/blacs) each time a cost value is computed. @@ -154,7 +154,7 @@ run = lyse.Run(h5_path=df.filepath.iloc[-1]) run.save_result(name='y', value=your_result if your_condition else np.nan) ``` -... and set `ignore_bad = true` in the analysis section of `mloop_config.ini`. Shots with `your_condition = False` will be not elicit the cost to be updated, thus postponing the next iteration of optimization. An example of such a multi-shot routine can be found in fake_result_multishot.py. +... and set `ignore_bad = true` in the analysis section of `mloop_config.toml`. Shots with `your_condition = False` will be not elicit the cost to be updated, thus postponing the next iteration of optimization. An example of such a multi-shot routine can be found in fake_result_multishot.py. ### Analyzing optimization results From fc5e34f150ecb0ca66a24b6bfc17f5b633cdb61a Mon Sep 17 00:00:00 2001 From: spielman Date: Sun, 28 Jan 2024 16:27:40 -0500 Subject: [PATCH 20/22] Fixed bug in example code. (cherry picked from commit 894e5c0adc0e53fd76eeeff7077dc141ccf6e934) --- mloop_config_example.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mloop_config_example.toml b/mloop_config_example.toml index 9a1a22f..51c1763 100644 --- a/mloop_config_example.toml +++ b/mloop_config_example.toml @@ -7,6 +7,7 @@ maximize = true ignore_bad = false analysislib_console_log_level = 20 analysislib_file_log_level = 10 +groups = ["CMOT", "MOL", "MTRAP"] [MLOOP] num_training_runs = 5 @@ -18,7 +19,6 @@ no_delay = true # false visualisations = false controller_type = "gaussian_process" console_log_level = 0 -groups = ["CMOT", "MOL", "MTRAP"] ###### ## ## ####### ######## ## ## ### ### ## ## ## From 1ee8c09b2380c56c9f480dd038e20a621f825052 Mon Sep 17 00:00:00 2001 From: spielman Date: Sun, 28 Jan 2024 16:28:17 -0500 Subject: [PATCH 21/22] Removed prints that were being used as debug statements. (cherry picked from commit 38831f0cbcb14c6677e8b613d9d2bf8d114298c8) --- mloop_multishot.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mloop_multishot.py b/mloop_multishot.py index 5784446..ad20fd0 100644 --- a/mloop_multishot.py +++ b/mloop_multishot.py @@ -94,13 +94,9 @@ def verify_globals(config): # Retrieve the parameter values requested by M-LOOP on this iteration logger.debug('Getting requested globals values from lyse.routine_storage.') requested_dict = lyse.routine_storage.params - print('requested_dict', requested_dict) requested_values = [requested_dict[g.name] for g in config['runmanager_globals']] - print('requested_values', requested_values) - print('current_values', current_values) - # Get the parameter values for the shot we just computed the cost for logger.debug('Getting lyse dataframe.') df = lyse.data(n_sequences=1) From fcaaf7b5b5a44c6f353d9d7f11c34a56aa35641f Mon Sep 17 00:00:00 2001 From: spielman Date: Sun, 28 Jan 2024 16:35:07 -0500 Subject: [PATCH 22/22] Removed unused imports in mloop_config.py --- mloop_config.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mloop_config.py b/mloop_config.py index 71f77de..387c94c 100644 --- a/mloop_config.py +++ b/mloop_config.py @@ -1,6 +1,4 @@ import os -import json -import configparser from collections import namedtuple import logging