From b6a9ae7693bc11f3e87a5357785638b6c8997482 Mon Sep 17 00:00:00 2001 From: Thijs Snelleman Date: Fri, 27 Sep 2024 09:46:09 +0200 Subject: [PATCH] Moving code report generation to be done at documentation deployment --- .github/workflows/badgegenerator.yml | 4 +- .github/workflows/documentation.yml | 3 + .../source/_static/coverage/class_index.html | 1123 ----- .../coverage/coverage_html_cb_6fb7b396.js | 733 ---- .../coverage/favicon_32_cb_58284776.png | Bin 1732 -> 0 bytes .../_static/coverage/function_index.html | 3739 ----------------- .../source/_static/coverage/index.html | 643 --- .../coverage/keybd_closed_cb_ce680311.png | Bin 9004 -> 0 bytes .../_static/coverage/style_cb_8e611ae1.css | 337 -- .../z_1b9c668f29c73fc8___init___py.html | 108 - .../coverage/z_1b9c668f29c73fc8_smac2_py.html | 529 --- .../z_30633e1e9b8b76b1___init___py.html | 99 - .../coverage/z_30633e1e9b8b76b1_about_py.html | 113 - .../z_468929ca4d63ab61___init___py.html | 98 - ...929ca4d63ab61_configuration_output_py.html | 274 -- ...4d63ab61_parallel_portfolio_output_py.html | 218 - ..._468929ca4d63ab61_selection_output_py.html | 256 -- .../z_468929ca4d63ab61_structures_py.html | 218 - .../z_89d5682e9ce6320a___init___py.html | 105 - .../z_89d5682e9ce6320a_ablation_py.html | 334 -- .../z_89d5682e9ce6320a_extractor_py.html | 247 -- .../z_89d5682e9ce6320a_selector_py.html | 262 -- .../z_89d5682e9ce6320a_solver_py.html | 418 -- .../z_89d5682e9ce6320a_validator_py.html | 327 -- .../z_89d5682e9ce6320a_verifier_py.html | 169 - .../z_8a23ba30293532be___init___py.html | 172 - .../z_8a23ba30293532be_features_py.html | 179 - .../z_8a23ba30293532be_objective_py.html | 172 - ..._8a23ba30293532be_sparkle_callable_py.html | 135 - .../z_8a23ba30293532be_status_py.html | 115 - .../z_8ad50cee1c09c10b___init___py.html | 100 - .../z_8ad50cee1c09c10b_cli_types_py.html | 238 -- .../z_8ad50cee1c09c10b_file_help_py.html | 140 - ..._generate_report_for_configuration_py.html | 647 --- ...rate_report_for_parallel_portfolio_py.html | 465 -- ...c10b_generate_report_for_selection_py.html | 360 -- .../coverage/z_8ad50cee1c09c10b_latex_py.html | 346 -- ..._8ad50cee1c09c10b_settings_objects_py.html | 1000 ----- .../z_8ea3791e4a1a98c7___init___py.html | 98 - ...z_8ea3791e4a1a98c7_argparse_custom_py.html | 567 --- ..._8ea3791e4a1a98c7_global_variables_py.html | 150 - .../z_8ea3791e4a1a98c7_logging_py.html | 218 - .../z_8ea3791e4a1a98c7_nicknames_py.html | 138 - ...ea3791e4a1a98c7_reporting_scenario_py.html | 436 -- .../z_8ea3791e4a1a98c7_snapshot_help_py.html | 170 - .../z_8ea3791e4a1a98c7_system_status_py.html | 175 - .../z_a1a079e9ccaaf6c6___init___py.html | 100 - ...a1a079e9ccaaf6c6_feature_dataframe_py.html | 295 -- ...79e9ccaaf6c6_performance_dataframe_py.html | 693 --- .../z_a36c84129c2a5cca___init___py.html | 98 - .../coverage/z_a36c84129c2a5cca_about_py.html | 122 - ...84129c2a5cca_add_feature_extractor_py.html | 186 - .../z_a36c84129c2a5cca_add_instances_py.html | 205 - .../z_a36c84129c2a5cca_add_solver_py.html | 249 -- .../z_a36c84129c2a5cca_cleanup_py.html | 144 - .../coverage/z_a36c84129c2a5cca_cli_py.html | 138 - ..._a36c84129c2a5cca_compute_features_py.html | 247 -- ...5cca_compute_marginal_contribution_py.html | 319 -- ..._a36c84129c2a5cca_configure_solver_py.html | 364 -- ...a5cca_construct_portfolio_selector_py.html | 287 -- ...z_a36c84129c2a5cca_generate_report_py.html | 385 -- .../z_a36c84129c2a5cca_initialise_py.html | 256 -- .../z_a36c84129c2a5cca_load_snapshot_py.html | 125 - ...29c2a5cca_remove_feature_extractor_py.html | 164 - ..._a36c84129c2a5cca_remove_instances_py.html | 168 - .../z_a36c84129c2a5cca_remove_solver_py.html | 169 - .../z_a36c84129c2a5cca_run_ablation_py.html | 246 -- ...84129c2a5cca_run_configured_solver_py.html | 211 - ...4129c2a5cca_run_parallel_portfolio_py.html | 431 -- ...4129c2a5cca_run_portfolio_selector_py.html | 241 -- .../z_a36c84129c2a5cca_run_solvers_py.html | 317 -- .../z_a36c84129c2a5cca_save_snapshot_py.html | 115 - .../z_a36c84129c2a5cca_status_py.html | 150 - ...cca_validate_configured_vs_default_py.html | 238 -- .../coverage/z_a36c84129c2a5cca_wait_py.html | 254 -- .../z_bca5cc0da071bfb7___init___py.html | 100 - ..._bca5cc0da071bfb7_configurator_cli_py.html | 121 - .../z_bca5cc0da071bfb7_configurator_py.html | 242 -- .../z_bdd4bbf10be95802___init___py.html | 114 - .../z_bdd4bbf10be95802_instances_py.html | 248 -- .../z_e7a48d036a5616ad___init___py.html | 103 - .../z_e7a48d036a5616ad_general_py.html | 111 - .../z_e7a48d036a5616ad_pcsparser_py.html | 513 --- ...e7a48d036a5616ad_runsolver_parsing_py.html | 226 - .../z_e7a48d036a5616ad_slurm_parsing_py.html | 151 - ...d036a5616ad_solver_wrapper_parsing_py.html | 164 - .../source/_static/junit/assets/style.css | 319 -- Documentation/source/_static/junit/index.html | 770 ---- Documentation/source/index.md | 20 +- 89 files changed, 19 insertions(+), 26478 deletions(-) delete mode 100644 Documentation/source/_static/coverage/class_index.html delete mode 100644 Documentation/source/_static/coverage/coverage_html_cb_6fb7b396.js delete mode 100644 Documentation/source/_static/coverage/favicon_32_cb_58284776.png delete mode 100644 Documentation/source/_static/coverage/function_index.html delete mode 100644 Documentation/source/_static/coverage/index.html delete mode 100644 Documentation/source/_static/coverage/keybd_closed_cb_ce680311.png delete mode 100644 Documentation/source/_static/coverage/style_cb_8e611ae1.css delete mode 100644 Documentation/source/_static/coverage/z_1b9c668f29c73fc8___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_1b9c668f29c73fc8_smac2_py.html delete mode 100644 Documentation/source/_static/coverage/z_30633e1e9b8b76b1___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_30633e1e9b8b76b1_about_py.html delete mode 100644 Documentation/source/_static/coverage/z_468929ca4d63ab61___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_468929ca4d63ab61_configuration_output_py.html delete mode 100644 Documentation/source/_static/coverage/z_468929ca4d63ab61_parallel_portfolio_output_py.html delete mode 100644 Documentation/source/_static/coverage/z_468929ca4d63ab61_selection_output_py.html delete mode 100644 Documentation/source/_static/coverage/z_468929ca4d63ab61_structures_py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a_ablation_py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a_extractor_py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a_selector_py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a_solver_py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a_validator_py.html delete mode 100644 Documentation/source/_static/coverage/z_89d5682e9ce6320a_verifier_py.html delete mode 100644 Documentation/source/_static/coverage/z_8a23ba30293532be___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_8a23ba30293532be_features_py.html delete mode 100644 Documentation/source/_static/coverage/z_8a23ba30293532be_objective_py.html delete mode 100644 Documentation/source/_static/coverage/z_8a23ba30293532be_sparkle_callable_py.html delete mode 100644 Documentation/source/_static/coverage/z_8a23ba30293532be_status_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_cli_types_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_file_help_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_configuration_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_parallel_portfolio_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_selection_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_latex_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ad50cee1c09c10b_settings_objects_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_argparse_custom_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_global_variables_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_logging_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_nicknames_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_reporting_scenario_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_snapshot_help_py.html delete mode 100644 Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_system_status_py.html delete mode 100644 Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_feature_dataframe_py.html delete mode 100644 Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_performance_dataframe_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_about_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_feature_extractor_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_instances_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_solver_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_cleanup_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_cli_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_features_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_marginal_contribution_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_configure_solver_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_construct_portfolio_selector_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_generate_report_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_initialise_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_load_snapshot_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_feature_extractor_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_instances_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_solver_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_ablation_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_configured_solver_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_parallel_portfolio_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_portfolio_selector_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_solvers_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_save_snapshot_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_status_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_validate_configured_vs_default_py.html delete mode 100644 Documentation/source/_static/coverage/z_a36c84129c2a5cca_wait_py.html delete mode 100644 Documentation/source/_static/coverage/z_bca5cc0da071bfb7___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_cli_py.html delete mode 100644 Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_py.html delete mode 100644 Documentation/source/_static/coverage/z_bdd4bbf10be95802___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_bdd4bbf10be95802_instances_py.html delete mode 100644 Documentation/source/_static/coverage/z_e7a48d036a5616ad___init___py.html delete mode 100644 Documentation/source/_static/coverage/z_e7a48d036a5616ad_general_py.html delete mode 100644 Documentation/source/_static/coverage/z_e7a48d036a5616ad_pcsparser_py.html delete mode 100644 Documentation/source/_static/coverage/z_e7a48d036a5616ad_runsolver_parsing_py.html delete mode 100644 Documentation/source/_static/coverage/z_e7a48d036a5616ad_slurm_parsing_py.html delete mode 100644 Documentation/source/_static/coverage/z_e7a48d036a5616ad_solver_wrapper_parsing_py.html delete mode 100644 Documentation/source/_static/junit/assets/style.css delete mode 100644 Documentation/source/_static/junit/index.html diff --git a/.github/workflows/badgegenerator.yml b/.github/workflows/badgegenerator.yml index faf630cfc..40a718ae4 100644 --- a/.github/workflows/badgegenerator.yml +++ b/.github/workflows/badgegenerator.yml @@ -30,11 +30,11 @@ jobs: conda env update --file dev-env.yml --name base pip install genbadge[tests,coverage] - name: Run Code Coverage report - run: pytest --all --cov sparkle/ --cov-report=xml:.reports/coverage/coverage.xml --cov-report=html:Documentation/source/_static/coverage + run: pytest --all --cov sparkle/ --cov-report=xml:.reports/coverage/coverage.xml - name: Run badge generator coverage run: genbadge coverage -i .reports/coverage/coverage.xml -o .reports/coverage/coverage-badge.svg - name: Run tests report - run: pytest --all --junitxml=.reports/junit/junit.xml --html=.reports/junit/junit.html + run: pytest --all --junitxml=.reports/junit/junit.xml - name: Create tests badge run: genbadge tests -i .reports/junit/junit.xml -o .reports/junit/junit-badge.svg - name: Commit to branch diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 915352c52..bf9a8d4dc 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -40,6 +40,9 @@ jobs: conda install -y python=3.9 # https://github.com/conda/conda/issues/13560#issuecomment-1992720842 conda env update --file dev-env.yml --name base pip install sphinx sphinx_rtd_theme myst_parser sphinxcontrib-autoprogram + - name: Compile code reports + run: pytest --all --cov sparkle/ --cov-report=html:Documentation/source/_static/coverage + pytest --all --html=Documentation/source/_static/junit/index.html - name: Build Documentation run: | cd Documentation diff --git a/Documentation/source/_static/coverage/class_index.html b/Documentation/source/_static/coverage/class_index.html deleted file mode 100644 index 4a316461e..000000000 --- a/Documentation/source/_static/coverage/class_index.html +++ /dev/null @@ -1,1123 +0,0 @@ - - - - - Coverage report - - - - - -
-
-

Coverage report: - 30% -

- -
- -
- - -
-
-

- Files - Functions - Classes -

-

- coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Fileclassstatementsmissingexcludedcoverage
sparkle/CLI/__init__.py(no class)000100%
sparkle/CLI/about.py(no class)72071%
sparkle/CLI/add_feature_extractor.py(no class)525200%
sparkle/CLI/add_instances.py(no class)666600%
sparkle/CLI/add_solver.py(no class)828200%
sparkle/CLI/cleanup.py(no class)292900%
sparkle/CLI/cli.py(no class)275081%
sparkle/CLI/compute_features.py(no class)777700%
sparkle/CLI/compute_marginal_contribution.py(no class)9462034%
sparkle/CLI/configure_solver.py(no class)13113100%
sparkle/CLI/construct_portfolio_selector.py(no class)999900%
sparkle/CLI/generate_report.py(no class)14914900%
sparkle/CLI/help/__init__.py(no class)000100%
sparkle/CLI/help/argparse_custom.pySetByUser2200%
sparkle/CLI/help/argparse_custom.pyEnumAction101000%
sparkle/CLI/help/argparse_custom.pyArgumentContainer200100%
sparkle/CLI/help/argparse_custom.py(no class)834095%
sparkle/CLI/help/global_variables.py(no class)276078%
sparkle/CLI/help/logging.py(no class)442095%
sparkle/CLI/help/nicknames.py(no class)191900%
sparkle/CLI/help/reporting_scenario.pyScenario000100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario13887037%
sparkle/CLI/help/reporting_scenario.py(no class)4600100%
sparkle/CLI/help/snapshot_help.py(no class)4431030%
sparkle/CLI/help/system_status.py(no class)323200%
sparkle/CLI/initialise.py(no class)7761021%
sparkle/CLI/load_snapshot.py(no class)151500%
sparkle/CLI/remove_feature_extractor.py(no class)373700%
sparkle/CLI/remove_instances.py(no class)424200%
sparkle/CLI/remove_solver.py(no class)434300%
sparkle/CLI/run_ablation.py(no class)767600%
sparkle/CLI/run_configured_solver.py(no class)595900%
sparkle/CLI/run_parallel_portfolio.py(no class)20720700%
sparkle/CLI/run_portfolio_selector.py(no class)797900%
sparkle/CLI/run_solvers.py(no class)838300%
sparkle/CLI/save_snapshot.py(no class)9900%
sparkle/CLI/status.py(no class)252500%
sparkle/CLI/validate_configured_vs_default.py(no class)707000%
sparkle/CLI/wait.py(no class)676700%
sparkle/__init__.py(no class)100100%
sparkle/about.py(no class)700100%
sparkle/configurator/__init__.py(no class)000100%
sparkle/configurator/configurator.pyConfigurator177059%
sparkle/configurator/configurator.pyConfigurationScenario73057%
sparkle/configurator/configurator.py(no class)2700100%
sparkle/configurator/configurator_cli.py(no class)151500%
sparkle/configurator/implementations/__init__.py(no class)74043%
sparkle/configurator/implementations/smac2.pySMAC27728064%
sparkle/configurator/implementations/smac2.pySMAC2Scenario8227067%
sparkle/configurator/implementations/smac2.py(no class)4400100%
sparkle/instance/__init__.py(no class)82075%
sparkle/instance/instances.pyInstanceSet128033%
sparkle/instance/instances.pyFileInstanceSet103070%
sparkle/instance/instances.pyMultiFileInstanceSet131300%
sparkle/instance/instances.pyIterableFileInstanceSet111100%
sparkle/instance/instances.py(no class)3300100%
sparkle/platform/__init__.py(no class)200100%
sparkle/platform/cli_types.pyVerbosityLevel1100%
sparkle/platform/cli_types.pyTEXT3300%
sparkle/platform/cli_types.pyCommandName000100%
sparkle/platform/cli_types.py(no class)5900100%
sparkle/platform/file_help.py(no class)181800%
sparkle/platform/generate_report_for_configuration.py(no class)17726085%
sparkle/platform/generate_report_for_parallel_portfolio.py(no class)13713700%
sparkle/platform/generate_report_for_selection.py(no class)747400%
sparkle/platform/latex.pyReportType000100%
sparkle/platform/latex.py(no class)7957028%
sparkle/platform/output/__init__.py(no class)000100%
sparkle/platform/output/configuration_output.pyConfigurationOutput555500%
sparkle/platform/output/configuration_output.py(no class)171700%
sparkle/platform/output/parallel_portfolio_output.pyParallelPortfolioOutput333300%
sparkle/platform/output/parallel_portfolio_output.py(no class)141400%
sparkle/platform/output/selection_output.pySelectionOutput282800%
sparkle/platform/output/selection_output.py(no class)171700%
sparkle/platform/output/structures.pyValidationResults5500%
sparkle/platform/output/structures.pyConfigurationResults2200%
sparkle/platform/output/structures.pySelectionSolverData3300%
sparkle/platform/output/structures.pySelectionPerformance7700%
sparkle/platform/output/structures.pyParallelPortfolioResults121200%
sparkle/platform/output/structures.py(no class)181800%
sparkle/platform/settings_objects.pySettingState000100%
sparkle/platform/settings_objects.pySettings418374011%
sparkle/platform/settings_objects.py(no class)12600100%
sparkle/solver/__init__.py(no class)500100%
sparkle/solver/ablation.pyAblationScenario9174019%
sparkle/solver/ablation.py(no class)2000100%
sparkle/solver/extractor.pyExtractor444400%
sparkle/solver/extractor.py(no class)2100100%
sparkle/solver/selector.pySelector4842012%
sparkle/solver/selector.py(no class)1600100%
sparkle/solver/solver.pySolver11593019%
sparkle/solver/solver.py(no class)3100100%
sparkle/solver/validator.pyValidator929002%
sparkle/solver/validator.py(no class)1700100%
sparkle/solver/verifier.pySolutionVerifier2200%
sparkle/solver/verifier.pySATVerifier171700%
sparkle/solver/verifier.py(no class)1600100%
sparkle/structures/__init__.py(no class)200100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame625708%
sparkle/structures/feature_dataframe.py(no class)2800100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame19975062%
sparkle/structures/performance_dataframe.py(no class)5400100%
sparkle/tools/__init__.py(no class)500100%
sparkle/tools/general.py(no class)62067%
sparkle/tools/pcsparser.pyPCSObject181800%
sparkle/tools/pcsparser.pyPCSConvention000100%
sparkle/tools/pcsparser.pyPCSParser222200%
sparkle/tools/pcsparser.pySMACParser929200%
sparkle/tools/pcsparser.pyParamILSParser767600%
sparkle/tools/pcsparser.py(no class)3600100%
sparkle/tools/runsolver_parsing.py(no class)9384010%
sparkle/tools/slurm_parsing.pySlurmBatch141400%
sparkle/tools/slurm_parsing.py(no class)1000100%
sparkle/tools/solver_wrapper_parsing.py(no class)3023023%
sparkle/types/__init__.py(no class)405088%
sparkle/types/features.pyFeatureGroup000100%
sparkle/types/features.pyFeatureSubgroup000100%
sparkle/types/features.pyFeatureType1100%
sparkle/types/features.py(no class)6900100%
sparkle/types/objective.pyUseTime1100%
sparkle/types/objective.pySparkleObjective1100100%
sparkle/types/objective.pyPAR83062%
sparkle/types/objective.py(no class)2400100%
sparkle/types/sparkle_callable.pySparkleCallable112082%
sparkle/types/sparkle_callable.py(no class)600100%
sparkle/types/status.pySolverStatus000100%
sparkle/types/status.py(no class)1200100%
Total 52903679030%
-

- No items found using the specified filter. -

-
- - - diff --git a/Documentation/source/_static/coverage/coverage_html_cb_6fb7b396.js b/Documentation/source/_static/coverage/coverage_html_cb_6fb7b396.js deleted file mode 100644 index 1face13de..000000000 --- a/Documentation/source/_static/coverage/coverage_html_cb_6fb7b396.js +++ /dev/null @@ -1,733 +0,0 @@ -// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 -// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt - -// Coverage.py HTML report browser code. -/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ -/*global coverage: true, document, window, $ */ - -coverage = {}; - -// General helpers -function debounce(callback, wait) { - let timeoutId = null; - return function(...args) { - clearTimeout(timeoutId); - timeoutId = setTimeout(() => { - callback.apply(this, args); - }, wait); - }; -}; - -function checkVisible(element) { - const rect = element.getBoundingClientRect(); - const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); - const viewTop = 30; - return !(rect.bottom < viewTop || rect.top >= viewBottom); -} - -function on_click(sel, fn) { - const elt = document.querySelector(sel); - if (elt) { - elt.addEventListener("click", fn); - } -} - -// Helpers for table sorting -function getCellValue(row, column = 0) { - const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection - if (cell.childElementCount == 1) { - var child = cell.firstElementChild; - if (child.tagName === "A") { - child = child.firstElementChild; - } - if (child instanceof HTMLDataElement && child.value) { - return child.value; - } - } - return cell.innerText || cell.textContent; -} - -function rowComparator(rowA, rowB, column = 0) { - let valueA = getCellValue(rowA, column); - let valueB = getCellValue(rowB, column); - if (!isNaN(valueA) && !isNaN(valueB)) { - return valueA - valueB; - } - return valueA.localeCompare(valueB, undefined, {numeric: true}); -} - -function sortColumn(th) { - // Get the current sorting direction of the selected header, - // clear state on other headers and then set the new sorting direction. - const currentSortOrder = th.getAttribute("aria-sort"); - [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); - var direction; - if (currentSortOrder === "none") { - direction = th.dataset.defaultSortOrder || "ascending"; - } - else if (currentSortOrder === "ascending") { - direction = "descending"; - } - else { - direction = "ascending"; - } - th.setAttribute("aria-sort", direction); - - const column = [...th.parentElement.cells].indexOf(th) - - // Sort all rows and afterwards append them in order to move them in the DOM. - Array.from(th.closest("table").querySelectorAll("tbody tr")) - .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1)) - .forEach(tr => tr.parentElement.appendChild(tr)); - - // Save the sort order for next time. - if (th.id !== "region") { - let th_id = "file"; // Sort by file if we don't have a column id - let current_direction = direction; - const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); - if (stored_list) { - ({th_id, direction} = JSON.parse(stored_list)) - } - localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ - "th_id": th.id, - "direction": current_direction - })); - if (th.id !== th_id || document.getElementById("region")) { - // Sort column has changed, unset sorting by function or class. - localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ - "by_region": false, - "region_direction": current_direction - })); - } - } - else { - // Sort column has changed to by function or class, remember that. - localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ - "by_region": true, - "region_direction": direction - })); - } -} - -// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. -coverage.assign_shortkeys = function () { - document.querySelectorAll("[data-shortcut]").forEach(element => { - document.addEventListener("keypress", event => { - if (event.target.tagName.toLowerCase() === "input") { - return; // ignore keypress from search filter - } - if (event.key === element.dataset.shortcut) { - element.click(); - } - }); - }); -}; - -// Create the events for the filter box. -coverage.wire_up_filter = function () { - // Populate the filter and hide100 inputs if there are saved values for them. - const saved_filter_value = localStorage.getItem(coverage.FILTER_STORAGE); - if (saved_filter_value) { - document.getElementById("filter").value = saved_filter_value; - } - const saved_hide100_value = localStorage.getItem(coverage.HIDE100_STORAGE); - if (saved_hide100_value) { - document.getElementById("hide100").checked = JSON.parse(saved_hide100_value); - } - - // Cache elements. - const table = document.querySelector("table.index"); - const table_body_rows = table.querySelectorAll("tbody tr"); - const no_rows = document.getElementById("no_rows"); - - // Observe filter keyevents. - const filter_handler = (event => { - // Keep running total of each metric, first index contains number of shown rows - const totals = new Array(table.rows[0].cells.length).fill(0); - // Accumulate the percentage as fraction - totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection - - var text = document.getElementById("filter").value; - // Store filter value - localStorage.setItem(coverage.FILTER_STORAGE, text); - const casefold = (text === text.toLowerCase()); - const hide100 = document.getElementById("hide100").checked; - // Store hide value. - localStorage.setItem(coverage.HIDE100_STORAGE, JSON.stringify(hide100)); - - // Hide / show elements. - table_body_rows.forEach(row => { - var show = false; - // Check the text filter. - for (let column = 0; column < totals.length; column++) { - cell = row.cells[column]; - if (cell.classList.contains("name")) { - var celltext = cell.textContent; - if (casefold) { - celltext = celltext.toLowerCase(); - } - if (celltext.includes(text)) { - show = true; - } - } - } - - // Check the "hide covered" filter. - if (show && hide100) { - const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" "); - show = (numer !== denom); - } - - if (!show) { - // hide - row.classList.add("hidden"); - return; - } - - // show - row.classList.remove("hidden"); - totals[0]++; - - for (let column = 0; column < totals.length; column++) { - // Accumulate dynamic totals - cell = row.cells[column] // nosemgrep: eslint.detect-object-injection - if (cell.classList.contains("name")) { - continue; - } - if (column === totals.length - 1) { - // Last column contains percentage - const [numer, denom] = cell.dataset.ratio.split(" "); - totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection - totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection - } - else { - totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection - } - } - }); - - // Show placeholder if no rows will be displayed. - if (!totals[0]) { - // Show placeholder, hide table. - no_rows.style.display = "block"; - table.style.display = "none"; - return; - } - - // Hide placeholder, show table. - no_rows.style.display = null; - table.style.display = null; - - const footer = table.tFoot.rows[0]; - // Calculate new dynamic sum values based on visible rows. - for (let column = 0; column < totals.length; column++) { - // Get footer cell element. - const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection - if (cell.classList.contains("name")) { - continue; - } - - // Set value into dynamic footer cell element. - if (column === totals.length - 1) { - // Percentage column uses the numerator and denominator, - // and adapts to the number of decimal places. - const match = /\.([0-9]+)/.exec(cell.textContent); - const places = match ? match[1].length : 0; - const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection - cell.dataset.ratio = `${numer} ${denom}`; - // Check denom to prevent NaN if filtered files contain no statements - cell.textContent = denom - ? `${(numer * 100 / denom).toFixed(places)}%` - : `${(100).toFixed(places)}%`; - } - else { - cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection - } - } - }); - - document.getElementById("filter").addEventListener("input", debounce(filter_handler)); - document.getElementById("hide100").addEventListener("input", debounce(filter_handler)); - - // Trigger change event on setup, to force filter on page refresh - // (filter value may still be present). - document.getElementById("filter").dispatchEvent(new Event("input")); - document.getElementById("hide100").dispatchEvent(new Event("input")); -}; -coverage.FILTER_STORAGE = "COVERAGE_FILTER_VALUE"; -coverage.HIDE100_STORAGE = "COVERAGE_HIDE100_VALUE"; - -// Set up the click-to-sort columns. -coverage.wire_up_sorting = function () { - document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( - th => th.addEventListener("click", e => sortColumn(e.target)) - ); - - // Look for a localStorage item containing previous sort settings: - let th_id = "file", direction = "ascending"; - const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); - if (stored_list) { - ({th_id, direction} = JSON.parse(stored_list)); - } - let by_region = false, region_direction = "ascending"; - const sorted_by_region = localStorage.getItem(coverage.SORTED_BY_REGION); - if (sorted_by_region) { - ({ - by_region, - region_direction - } = JSON.parse(sorted_by_region)); - } - - const region_id = "region"; - if (by_region && document.getElementById(region_id)) { - direction = region_direction; - } - // If we are in a page that has a column with id of "region", sort on - // it if the last sort was by function or class. - let th; - if (document.getElementById(region_id)) { - th = document.getElementById(by_region ? region_id : th_id); - } - else { - th = document.getElementById(th_id); - } - th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); - th.click() -}; - -coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; -coverage.SORTED_BY_REGION = "COVERAGE_SORT_REGION"; - -// Loaded on index.html -coverage.index_ready = function () { - coverage.assign_shortkeys(); - coverage.wire_up_filter(); - coverage.wire_up_sorting(); - - on_click(".button_prev_file", coverage.to_prev_file); - on_click(".button_next_file", coverage.to_next_file); - - on_click(".button_show_hide_help", coverage.show_hide_help); -}; - -// -- pyfile stuff -- - -coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; - -coverage.pyfile_ready = function () { - // If we're directed to a particular line number, highlight the line. - var frag = location.hash; - if (frag.length > 2 && frag[1] === "t") { - document.querySelector(frag).closest(".n").classList.add("highlight"); - coverage.set_sel(parseInt(frag.substr(2), 10)); - } - else { - coverage.set_sel(0); - } - - on_click(".button_toggle_run", coverage.toggle_lines); - on_click(".button_toggle_mis", coverage.toggle_lines); - on_click(".button_toggle_exc", coverage.toggle_lines); - on_click(".button_toggle_par", coverage.toggle_lines); - - on_click(".button_next_chunk", coverage.to_next_chunk_nicely); - on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); - on_click(".button_top_of_page", coverage.to_top); - on_click(".button_first_chunk", coverage.to_first_chunk); - - on_click(".button_prev_file", coverage.to_prev_file); - on_click(".button_next_file", coverage.to_next_file); - on_click(".button_to_index", coverage.to_index); - - on_click(".button_show_hide_help", coverage.show_hide_help); - - coverage.filters = undefined; - try { - coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); - } catch(err) {} - - if (coverage.filters) { - coverage.filters = JSON.parse(coverage.filters); - } - else { - coverage.filters = {run: false, exc: true, mis: true, par: true}; - } - - for (cls in coverage.filters) { - coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection - } - - coverage.assign_shortkeys(); - coverage.init_scroll_markers(); - coverage.wire_up_sticky_header(); - - document.querySelectorAll("[id^=ctxs]").forEach( - cbox => cbox.addEventListener("click", coverage.expand_contexts) - ); - - // Rebuild scroll markers when the window height changes. - window.addEventListener("resize", coverage.build_scroll_markers); -}; - -coverage.toggle_lines = function (event) { - const btn = event.target.closest("button"); - const category = btn.value - const show = !btn.classList.contains("show_" + category); - coverage.set_line_visibilty(category, show); - coverage.build_scroll_markers(); - coverage.filters[category] = show; - try { - localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); - } catch(err) {} -}; - -coverage.set_line_visibilty = function (category, should_show) { - const cls = "show_" + category; - const btn = document.querySelector(".button_toggle_" + category); - if (btn) { - if (should_show) { - document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); - btn.classList.add(cls); - } - else { - document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); - btn.classList.remove(cls); - } - } -}; - -// Return the nth line div. -coverage.line_elt = function (n) { - return document.getElementById("t" + n)?.closest("p"); -}; - -// Set the selection. b and e are line numbers. -coverage.set_sel = function (b, e) { - // The first line selected. - coverage.sel_begin = b; - // The next line not selected. - coverage.sel_end = (e === undefined) ? b+1 : e; -}; - -coverage.to_top = function () { - coverage.set_sel(0, 1); - coverage.scroll_window(0); -}; - -coverage.to_first_chunk = function () { - coverage.set_sel(0, 1); - coverage.to_next_chunk(); -}; - -coverage.to_prev_file = function () { - window.location = document.getElementById("prevFileLink").href; -} - -coverage.to_next_file = function () { - window.location = document.getElementById("nextFileLink").href; -} - -coverage.to_index = function () { - location.href = document.getElementById("indexLink").href; -} - -coverage.show_hide_help = function () { - const helpCheck = document.getElementById("help_panel_state") - helpCheck.checked = !helpCheck.checked; -} - -// Return a string indicating what kind of chunk this line belongs to, -// or null if not a chunk. -coverage.chunk_indicator = function (line_elt) { - const classes = line_elt?.className; - if (!classes) { - return null; - } - const match = classes.match(/\bshow_\w+\b/); - if (!match) { - return null; - } - return match[0]; -}; - -coverage.to_next_chunk = function () { - const c = coverage; - - // Find the start of the next colored chunk. - var probe = c.sel_end; - var chunk_indicator, probe_line; - while (true) { - probe_line = c.line_elt(probe); - if (!probe_line) { - return; - } - chunk_indicator = c.chunk_indicator(probe_line); - if (chunk_indicator) { - break; - } - probe++; - } - - // There's a next chunk, `probe` points to it. - var begin = probe; - - // Find the end of this chunk. - var next_indicator = chunk_indicator; - while (next_indicator === chunk_indicator) { - probe++; - probe_line = c.line_elt(probe); - next_indicator = c.chunk_indicator(probe_line); - } - c.set_sel(begin, probe); - c.show_selection(); -}; - -coverage.to_prev_chunk = function () { - const c = coverage; - - // Find the end of the prev colored chunk. - var probe = c.sel_begin-1; - var probe_line = c.line_elt(probe); - if (!probe_line) { - return; - } - var chunk_indicator = c.chunk_indicator(probe_line); - while (probe > 1 && !chunk_indicator) { - probe--; - probe_line = c.line_elt(probe); - if (!probe_line) { - return; - } - chunk_indicator = c.chunk_indicator(probe_line); - } - - // There's a prev chunk, `probe` points to its last line. - var end = probe+1; - - // Find the beginning of this chunk. - var prev_indicator = chunk_indicator; - while (prev_indicator === chunk_indicator) { - probe--; - if (probe <= 0) { - return; - } - probe_line = c.line_elt(probe); - prev_indicator = c.chunk_indicator(probe_line); - } - c.set_sel(probe+1, end); - c.show_selection(); -}; - -// Returns 0, 1, or 2: how many of the two ends of the selection are on -// the screen right now? -coverage.selection_ends_on_screen = function () { - if (coverage.sel_begin === 0) { - return 0; - } - - const begin = coverage.line_elt(coverage.sel_begin); - const end = coverage.line_elt(coverage.sel_end-1); - - return ( - (checkVisible(begin) ? 1 : 0) - + (checkVisible(end) ? 1 : 0) - ); -}; - -coverage.to_next_chunk_nicely = function () { - if (coverage.selection_ends_on_screen() === 0) { - // The selection is entirely off the screen: - // Set the top line on the screen as selection. - - // This will select the top-left of the viewport - // As this is most likely the span with the line number we take the parent - const line = document.elementFromPoint(0, 0).parentElement; - if (line.parentElement !== document.getElementById("source")) { - // The element is not a source line but the header or similar - coverage.select_line_or_chunk(1); - } - else { - // We extract the line number from the id - coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); - } - } - coverage.to_next_chunk(); -}; - -coverage.to_prev_chunk_nicely = function () { - if (coverage.selection_ends_on_screen() === 0) { - // The selection is entirely off the screen: - // Set the lowest line on the screen as selection. - - // This will select the bottom-left of the viewport - // As this is most likely the span with the line number we take the parent - const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; - if (line.parentElement !== document.getElementById("source")) { - // The element is not a source line but the header or similar - coverage.select_line_or_chunk(coverage.lines_len); - } - else { - // We extract the line number from the id - coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); - } - } - coverage.to_prev_chunk(); -}; - -// Select line number lineno, or if it is in a colored chunk, select the -// entire chunk -coverage.select_line_or_chunk = function (lineno) { - var c = coverage; - var probe_line = c.line_elt(lineno); - if (!probe_line) { - return; - } - var the_indicator = c.chunk_indicator(probe_line); - if (the_indicator) { - // The line is in a highlighted chunk. - // Search backward for the first line. - var probe = lineno; - var indicator = the_indicator; - while (probe > 0 && indicator === the_indicator) { - probe--; - probe_line = c.line_elt(probe); - if (!probe_line) { - break; - } - indicator = c.chunk_indicator(probe_line); - } - var begin = probe + 1; - - // Search forward for the last line. - probe = lineno; - indicator = the_indicator; - while (indicator === the_indicator) { - probe++; - probe_line = c.line_elt(probe); - indicator = c.chunk_indicator(probe_line); - } - - coverage.set_sel(begin, probe); - } - else { - coverage.set_sel(lineno); - } -}; - -coverage.show_selection = function () { - // Highlight the lines in the chunk - document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); - for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { - coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); - } - - coverage.scroll_to_selection(); -}; - -coverage.scroll_to_selection = function () { - // Scroll the page if the chunk isn't fully visible. - if (coverage.selection_ends_on_screen() < 2) { - const element = coverage.line_elt(coverage.sel_begin); - coverage.scroll_window(element.offsetTop - 60); - } -}; - -coverage.scroll_window = function (to_pos) { - window.scroll({top: to_pos, behavior: "smooth"}); -}; - -coverage.init_scroll_markers = function () { - // Init some variables - coverage.lines_len = document.querySelectorAll("#source > p").length; - - // Build html - coverage.build_scroll_markers(); -}; - -coverage.build_scroll_markers = function () { - const temp_scroll_marker = document.getElementById("scroll_marker") - if (temp_scroll_marker) temp_scroll_marker.remove(); - // Don't build markers if the window has no scroll bar. - if (document.body.scrollHeight <= window.innerHeight) { - return; - } - - const marker_scale = window.innerHeight / document.body.scrollHeight; - const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); - - let previous_line = -99, last_mark, last_top; - - const scroll_marker = document.createElement("div"); - scroll_marker.id = "scroll_marker"; - document.getElementById("source").querySelectorAll( - "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" - ).forEach(element => { - const line_top = Math.floor(element.offsetTop * marker_scale); - const line_number = parseInt(element.querySelector(".n a").id.substr(1)); - - if (line_number === previous_line + 1) { - // If this solid missed block just make previous mark higher. - last_mark.style.height = `${line_top + line_height - last_top}px`; - } - else { - // Add colored line in scroll_marker block. - last_mark = document.createElement("div"); - last_mark.id = `m${line_number}`; - last_mark.classList.add("marker"); - last_mark.style.height = `${line_height}px`; - last_mark.style.top = `${line_top}px`; - scroll_marker.append(last_mark); - last_top = line_top; - } - - previous_line = line_number; - }); - - // Append last to prevent layout calculation - document.body.append(scroll_marker); -}; - -coverage.wire_up_sticky_header = function () { - const header = document.querySelector("header"); - const header_bottom = ( - header.querySelector(".content h2").getBoundingClientRect().top - - header.getBoundingClientRect().top - ); - - function updateHeader() { - if (window.scrollY > header_bottom) { - header.classList.add("sticky"); - } - else { - header.classList.remove("sticky"); - } - } - - window.addEventListener("scroll", updateHeader); - updateHeader(); -}; - -coverage.expand_contexts = function (e) { - var ctxs = e.target.parentNode.querySelector(".ctxs"); - - if (!ctxs.classList.contains("expanded")) { - var ctxs_text = ctxs.textContent; - var width = Number(ctxs_text[0]); - ctxs.textContent = ""; - for (var i = 1; i < ctxs_text.length; i += width) { - key = ctxs_text.substring(i, i + width).trim(); - ctxs.appendChild(document.createTextNode(contexts[key])); - ctxs.appendChild(document.createElement("br")); - } - ctxs.classList.add("expanded"); - } -}; - -document.addEventListener("DOMContentLoaded", () => { - if (document.body.classList.contains("indexfile")) { - coverage.index_ready(); - } - else { - coverage.pyfile_ready(); - } -}); diff --git a/Documentation/source/_static/coverage/favicon_32_cb_58284776.png b/Documentation/source/_static/coverage/favicon_32_cb_58284776.png deleted file mode 100644 index 8649f0475d8d20793b2ec431fe25a186a414cf10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1732 zcmV;#20QtQP)K2KOkBOVxIZChq#W-v7@TU%U6P(wycKT1hUJUToW3ke1U1ONa4 z000000000000000bb)GRa9mqwR9|UWHy;^RUrt?IT__Y0JUcxmBP0(51q1>E00030 z|NrOz)aw7%8sJzM<5^g%z7^qE`}_Ot|JUUG(NUkWzR|7K?Zo%@_v-8G-1N%N=D$;; zw;keH4dGY$`1t4M=HK_s*zm^0#KgqfwWhe3qO_HtvXYvtjgX>;-~C$L`&k>^R)9)7 zdPh2TL^pCnHC#0+_4D)M`p?qp!pq{jO_{8;$fbaflbx`Tn52n|n}8VFRTA1&ugOP< zPd{uvFjz7t*Vot1&d$l-xWCk}s;sQL&#O(Bskh6gqNJv>#iB=ypG1e3K!K4yc7!~M zfj4S*g^zZ7eP$+_Sl07Z646l;%urinP#D8a6TwRtnLIRcI!r4f@bK~9-`~;E(N?Lv zSEst7s;rcxsi~}{Nsytfz@MtUoR*iFc8!#vvx}Umhm4blk(_~MdVD-@dW&>!Nn~ro z_E~-ESVQAj6Wmn;(olz(O&_{U2*pZBc1aYjMh>Dq3z|6`jW`RDHV=t3I6yRKJ~LOX zz_z!!vbVXPqob#=pj3^VMT?x6t(irRmSKsMo1~LLkB&=#j!=M%NP35mfqim$drWb9 zYIb>no_LUwc!r^NkDzs4YHu@=ZHRzrafWDZd1EhEVq=tGX?tK$pIa)DTh#bkvh!J- z?^%@YS!U*0E8$q$_*aOTQ&)Ra64g>ep;BdcQgvlg8qQHrP*E$;P{-m=A*@axn@$bO zO-Y4JzS&EAi%YG}N?cn?YFS7ivPY=EMV6~YH;+Xxu|tefLS|Aza)Cg6us#)=JW!uH zQa?H>d^j+YHCtyjL^LulF*05|F$RG!AX_OHVI&MtA~_@=5_lU|0000rbW%=J06GH4 z^5LD8b8apw8vNh1ua1mF{{Hy)_U`NA;Nacc+sCpuHXa-V{r&yz?c(9#+}oX+NmiRW z+W-IqK1oDDR5;6GfCDCOP5}iL5fK(cB~ET81`MFgF2kGa9AjhSIk~-E-4&*tPPKdiilQJ11k_J082ZS z>@TvivP!5ZFG?t@{t+GpR3XR&@*hA_VE1|Lo8@L@)l*h(Z@=?c-NS$Fk&&61IzUU9 z*nPqBM=OBZ-6ka1SJgGAS-Us5EN)r#dUX%>wQZLa2ytPCtMKp)Ob z*xcu38Z&d5<-NBS)@jRD+*!W*cf-m_wmxDEqBf?czI%3U0J$Xik;lA`jg}VH?(S(V zE!M3;X2B8w0TnnW&6(8;_Uc)WD;Ms6PKP+s(sFgO!}B!^ES~GDt4qLPxwYB)^7)XA zZwo9zDy-B0B+jT6V=!=bo(zs_8{eBA78gT9GH$(DVhz;4VAYwz+bOIdZ-PNb|I&rl z^XG=vFLF)1{&nT2*0vMz#}7^9hXzzf&ZdKlEj{LihP;|;Ywqn35ajP?H?7t|i-Un% z&&kxee@9B{nwgv1+S-~0)E1{ob1^Wn`F2isurqThKK=3%&;`@{0{!D- z&CSj80t;uPu&FaJFtSXKH#ajgGj}=sEad7US6jP0|Db@0j)?(5@sf<7`~a9>s;wCa zm^)spe{uxGFmrJYI9cOh7s$>8Npkt-5EWB1UKc`{W{y5Ce$1+nM9Cr;);=Ju#N^62OSlJMn7omiUgP&ErsYzT~iGxcW aE(`!K@+CXylaC4j0000 - - - - Coverage report - - - - - -
-
-

Coverage report: - 30% -

- -
- -
- - -
-
-

- Files - Functions - Classes -

-

- coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Filefunctionstatementsmissingexcludedcoverage
sparkle/CLI/__init__.py(no function)000100%
sparkle/CLI/about.pyparser_function2200%
sparkle/CLI/about.py(no function)500100%
sparkle/CLI/add_feature_extractor.pyparser_function6600%
sparkle/CLI/add_feature_extractor.py(no function)464600%
sparkle/CLI/add_instances.pyparser_function7700%
sparkle/CLI/add_instances.py(no function)595900%
sparkle/CLI/add_solver.pyparser_function8800%
sparkle/CLI/add_solver.py(no function)747400%
sparkle/CLI/cleanup.pyparser_function4400%
sparkle/CLI/cleanup.pyremove_temporary_files2200%
sparkle/CLI/cleanup.py(no function)232300%
sparkle/CLI/cli.pycommands300100%
sparkle/CLI/cli.pymain174076%
sparkle/CLI/cli.py(no function)71086%
sparkle/CLI/compute_features.pyparser_function5500%
sparkle/CLI/compute_features.pycompute_features404000%
sparkle/CLI/compute_features.py(no function)323200%
sparkle/CLI/compute_marginal_contribution.pyparser_function6600%
sparkle/CLI/compute_marginal_contribution.pycompute_selector_performance162088%
sparkle/CLI/compute_marginal_contribution.pycompute_selector_marginal_contribution252500%
sparkle/CLI/compute_marginal_contribution.pycompute_marginal_contribution151500%
sparkle/CLI/compute_marginal_contribution.py(no function)3214056%
sparkle/CLI/configure_solver.pyparser_function171700%
sparkle/CLI/configure_solver.pyapply_settings_from_args161600%
sparkle/CLI/configure_solver.pyrun_after111100%
sparkle/CLI/configure_solver.py(no function)878700%
sparkle/CLI/construct_portfolio_selector.pyparser_function8800%
sparkle/CLI/construct_portfolio_selector.pyjudge_exist_remaining_jobs5500%
sparkle/CLI/construct_portfolio_selector.py(no function)868600%
sparkle/CLI/generate_report.pyparser_function111100%
sparkle/CLI/generate_report.py(no function)13813800%
sparkle/CLI/help/__init__.py(no function)000100%
sparkle/CLI/help/argparse_custom.pySetByUser.__call__2200%
sparkle/CLI/help/argparse_custom.pyEnumAction.__init__8800%
sparkle/CLI/help/argparse_custom.pyEnumAction.__call__2200%
sparkle/CLI/help/argparse_custom.pyuser_set_state3300%
sparkle/CLI/help/argparse_custom.pyset_by_user1100%
sparkle/CLI/help/argparse_custom.pyArgumentContainer.__init__200100%
sparkle/CLI/help/argparse_custom.py(no function)7900100%
sparkle/CLI/help/global_variables.pyget_seed1100%
sparkle/CLI/help/global_variables.pylatest_scenario3300%
sparkle/CLI/help/global_variables.pysettings300100%
sparkle/CLI/help/global_variables.py(no function)202090%
sparkle/CLI/help/logging.py_update_caller100100%
sparkle/CLI/help/logging.py_update_caller_file_path1300100%
sparkle/CLI/help/logging.pyadd_output400100%
sparkle/CLI/help/logging.pylog_command122083%
sparkle/CLI/help/logging.py(no function)1400100%
sparkle/CLI/help/nicknames.pyresolve_object_name151500%
sparkle/CLI/help/nicknames.py(no function)4400%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.__init__300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.read_scenario_ini7057019%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.write_scenario_ini4400%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.__init_section200100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.path_setter300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.none_if_empty_path300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_latest_scenario500100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_latest_scenario1100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_selection_scenario_path300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_selection_scenario_path1100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_selection_test_case_directory300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_selection_test_case_directory7700%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_parallel_portfolio_path300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_parallel_portfolio_path1100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_parallel_portfolio_instance_path300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_parallel_portfolio_instance_set3300%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_config_solver51080%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_config_solver4400%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_config_instance_set_train300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_config_instance_set_train4400%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.set_config_instance_set_test300100%
sparkle/CLI/help/reporting_scenario.pyReportingScenario.get_config_instance_set_test4400%
sparkle/CLI/help/reporting_scenario.py(no function)4600100%
sparkle/CLI/help/snapshot_help.pysave_current_sparkle_platform9900%
sparkle/CLI/help/snapshot_help.pyremove_current_platform2200%
sparkle/CLI/help/snapshot_help.pycreate_working_dirs2200%
sparkle/CLI/help/snapshot_help.pyextract_snapshot6600%
sparkle/CLI/help/snapshot_help.pyload_snapshot121200%
sparkle/CLI/help/snapshot_help.py(no function)1300100%
sparkle/CLI/help/system_status.pyprint_sparkle_list4400%
sparkle/CLI/help/system_status.pyprint_feature_computation_jobs9900%
sparkle/CLI/help/system_status.pyprint_performance_computation_jobs141400%
sparkle/CLI/help/system_status.py(no function)5500%
sparkle/CLI/initialise.pyparser_function3300%
sparkle/CLI/initialise.pydetect_sparkle_platform_exists6600%
sparkle/CLI/initialise.pycheck_for_initialise131300%
sparkle/CLI/initialise.pyinitialise_sparkle353500%
sparkle/CLI/initialise.py(no function)204080%
sparkle/CLI/load_snapshot.pyparser_function3300%
sparkle/CLI/load_snapshot.py(no function)121200%
sparkle/CLI/remove_feature_extractor.pyparser_function3300%
sparkle/CLI/remove_feature_extractor.py(no function)343400%
sparkle/CLI/remove_instances.pyparser_function3300%
sparkle/CLI/remove_instances.py(no function)393900%
sparkle/CLI/remove_solver.pyparser_function3300%
sparkle/CLI/remove_solver.py(no function)404000%
sparkle/CLI/run_ablation.pyparser_function131300%
sparkle/CLI/run_ablation.py(no function)636300%
sparkle/CLI/run_configured_solver.pyparser_function6600%
sparkle/CLI/run_configured_solver.py(no function)535300%
sparkle/CLI/run_parallel_portfolio.pyrun_parallel_portfolio11611600%
sparkle/CLI/run_parallel_portfolio.pyparser_function101000%
sparkle/CLI/run_parallel_portfolio.py(no function)818100%
sparkle/CLI/run_portfolio_selector.pyparser_function6600%
sparkle/CLI/run_portfolio_selector.py(no function)737300%
sparkle/CLI/run_solvers.pyparser_function8800%
sparkle/CLI/run_solvers.pyrunning_solvers_performance_data212100%
sparkle/CLI/run_solvers.pyrun_solvers_on_instances191900%
sparkle/CLI/run_solvers.py(no function)353500%
sparkle/CLI/save_snapshot.pyparser_function1100%
sparkle/CLI/save_snapshot.py(no function)8800%
sparkle/CLI/status.pyparser_function3300%
sparkle/CLI/status.py(no function)222200%
sparkle/CLI/validate_configured_vs_default.pyparser_function101000%
sparkle/CLI/validate_configured_vs_default.py(no function)606000%
sparkle/CLI/wait.pyparser_function3300%
sparkle/CLI/wait.pyget_runs_from_file9900%
sparkle/CLI/wait.pywait_for_jobs313100%
sparkle/CLI/wait.pywait_for_jobs.signal_handler2200%
sparkle/CLI/wait.py(no function)222200%
sparkle/__init__.py(no function)100100%
sparkle/about.py(no function)700100%
sparkle/configurator/__init__.py(no function)000100%
sparkle/configurator/configurator.pyConfigurator.__init__111091%
sparkle/configurator/configurator.pyConfigurator.scenario_class1100%
sparkle/configurator/configurator.pyConfigurator.configure1100%
sparkle/configurator/configurator.pyConfigurator.get_optimal_configuration1100%
sparkle/configurator/configurator.pyConfigurator.organise_output1100%
sparkle/configurator/configurator.pyConfigurator.set_scenario_dirs1100%
sparkle/configurator/configurator.pyConfigurator.get_status_from_logs1100%
sparkle/configurator/configurator.pyConfigurationScenario.__init__400100%
sparkle/configurator/configurator.pyConfigurationScenario.create_scenario1100%
sparkle/configurator/configurator.pyConfigurationScenario.create_scenario_file1100%
sparkle/configurator/configurator.pyConfigurationScenario.from_file1100%
sparkle/configurator/configurator.py(no function)2700100%
sparkle/configurator/configurator_cli.py(no function)151500%
sparkle/configurator/implementations/__init__.pyresolve_configurator4400%
sparkle/configurator/implementations/__init__.py(no function)300100%
sparkle/configurator/implementations/smac2.pySMAC2.__init__300100%
sparkle/configurator/implementations/smac2.pySMAC2.scenario_class100100%
sparkle/configurator/implementations/smac2.pySMAC2.configure207065%
sparkle/configurator/implementations/smac2.pySMAC2.get_optimal_configuration232091%
sparkle/configurator/implementations/smac2.pySMAC2.organise_output114064%
sparkle/configurator/implementations/smac2.pySMAC2.set_scenario_dirs200100%
sparkle/configurator/implementations/smac2.pySMAC2.get_smac_run_obj31067%
sparkle/configurator/implementations/smac2.pySMAC2.get_status_from_logs141400%
sparkle/configurator/implementations/smac2.pySMAC2Scenario.__init__2000100%
sparkle/configurator/implementations/smac2.pySMAC2Scenario.create_scenario71086%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._set_paths700100%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._prepare_scenario_directory400100%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._prepare_result_directory200100%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._create_scenario_file122083%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._prepare_instances400100%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._get_performance_measure31067%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._create_feature_file2200%
sparkle/configurator/implementations/smac2.pySMAC2Scenario._clean_up_scenario_dirs6600%
sparkle/configurator/implementations/smac2.pySMAC2Scenario.from_file151500%
sparkle/configurator/implementations/smac2.py(no function)4400100%
sparkle/instance/__init__.pyinstance_set52060%
sparkle/instance/__init__.py(no function)300100%
sparkle/instance/instances.pyInstanceSet.__init__300100%
sparkle/instance/instances.pyInstanceSet.size1100%
sparkle/instance/instances.pyInstanceSet.all_paths1100%
sparkle/instance/instances.pyInstanceSet.instance_paths1100%
sparkle/instance/instances.pyInstanceSet.instance_names1100%
sparkle/instance/instances.pyInstanceSet.name100100%
sparkle/instance/instances.pyInstanceSet.get_path_by_name4400%
sparkle/instance/instances.pyFileInstanceSet.__init__93067%
sparkle/instance/instances.pyFileInstanceSet.name100100%
sparkle/instance/instances.pyMultiFileInstanceSet.__init__121200%
sparkle/instance/instances.pyMultiFileInstanceSet.all_paths1100%
sparkle/instance/instances.pyIterableFileInstanceSet.__init__5500%
sparkle/instance/instances.pyIterableFileInstanceSet.size1100%
sparkle/instance/instances.pyIterableFileInstanceSet.__determine_size__5500%
sparkle/instance/instances.py(no function)3300100%
sparkle/platform/__init__.py(no function)200100%
sparkle/platform/cli_types.pyVerbosityLevel.from_string1100%
sparkle/platform/cli_types.pyTEXT.format_text3300%
sparkle/platform/cli_types.py(no function)5900100%
sparkle/platform/file_help.pyadd_remove_platform_item141400%
sparkle/platform/file_help.py(no function)4400%
sparkle/platform/generate_report_for_configuration.pyget_features_bool5500%
sparkle/platform/generate_report_for_configuration.pyget_average_performance400100%
sparkle/platform/generate_report_for_configuration.pyget_dict_instance_to_performance600100%
sparkle/platform/generate_report_for_configuration.pyget_ablation_bool300100%
sparkle/platform/generate_report_for_configuration.pyget_data_for_plot1100100%
sparkle/platform/generate_report_for_configuration.pyget_figure_configure_vs_default900100%
sparkle/platform/generate_report_for_configuration.pyget_figure_configured_vs_default_on_instance_set2200%
sparkle/platform/generate_report_for_configuration.pyget_timeouts_instanceset7700%
sparkle/platform/generate_report_for_configuration.pyget_timeouts1000100%
sparkle/platform/generate_report_for_configuration.pyget_ablation_table2311052%
sparkle/platform/generate_report_for_configuration.pyconfiguration_report_variables1300100%
sparkle/platform/generate_report_for_configuration.pyget_dict_variable_to_value_common351097%
sparkle/platform/generate_report_for_configuration.pyget_dict_variable_to_value_test1900100%
sparkle/platform/generate_report_for_configuration.pygenerate_report_for_configuration300100%
sparkle/platform/generate_report_for_configuration.py(no function)2700100%
sparkle/platform/generate_report_for_parallel_portfolio.pyget_solver_list_latex101000%
sparkle/platform/generate_report_for_parallel_portfolio.pyget_portfolio_metrics242400%
sparkle/platform/generate_report_for_parallel_portfolio.pyget_figure_parallel_portfolio_sparkle_vs_sbs6600%
sparkle/platform/generate_report_for_parallel_portfolio.pyget_results_table272700%
sparkle/platform/generate_report_for_parallel_portfolio.pyparallel_report_variables363600%
sparkle/platform/generate_report_for_parallel_portfolio.pygenerate_figure131300%
sparkle/platform/generate_report_for_parallel_portfolio.pygenerate_report_parallel_portfolio3300%
sparkle/platform/generate_report_for_parallel_portfolio.py(no function)181800%
sparkle/platform/generate_report_for_selection.pyget_num_instance_sets1100%
sparkle/platform/generate_report_for_selection.pyget_instance_set_count_list4400%
sparkle/platform/generate_report_for_selection.pysolver_ranked_latex_list2200%
sparkle/platform/generate_report_for_selection.pyget_portfolio_selector_performance5500%
sparkle/platform/generate_report_for_selection.pyget_figure_portfolio_selector_vs_sbs6600%
sparkle/platform/generate_report_for_selection.pyget_figure_portfolio_selector_sparkle_vs_vbs7700%
sparkle/platform/generate_report_for_selection.pyselection_report_variables282800%
sparkle/platform/generate_report_for_selection.pygenerate_report_selection6600%
sparkle/platform/generate_report_for_selection.py(no function)151500%
sparkle/platform/latex.pycheck_tex_commands_exist2200%
sparkle/platform/latex.pyunderscore_for_latex1100%
sparkle/platform/latex.pylist_to_latex53040%
sparkle/platform/latex.pygenerate_comparison_plot262600%
sparkle/platform/latex.pyfill_template_tex7700%
sparkle/platform/latex.pycompile_pdf8800%
sparkle/platform/latex.pygenerate_report101000%
sparkle/platform/latex.py(no function)2000100%
sparkle/platform/output/__init__.py(no function)000100%
sparkle/platform/output/configuration_output.pyConfigurationOutput.__init__252500%
sparkle/platform/output/configuration_output.pyConfigurationOutput.get_configurations8800%
sparkle/platform/output/configuration_output.pyConfigurationOutput.get_validation_data161600%
sparkle/platform/output/configuration_output.pyConfigurationOutput.serialize_configuration_results1100%
sparkle/platform/output/configuration_output.pyConfigurationOutput.serialize_scenario1100%
sparkle/platform/output/configuration_output.pyConfigurationOutput.write_output4400%
sparkle/platform/output/configuration_output.py(no function)171700%
sparkle/platform/output/parallel_portfolio_output.pyParallelPortfolioOutput.__init__202000%
sparkle/platform/output/parallel_portfolio_output.pyParallelPortfolioOutput.get_solver_solutions7700%
sparkle/platform/output/parallel_portfolio_output.pyParallelPortfolioOutput.serialize_instances1100%
sparkle/platform/output/parallel_portfolio_output.pyParallelPortfolioOutput.serialize_results1100%
sparkle/platform/output/parallel_portfolio_output.pyParallelPortfolioOutput.write_output4400%
sparkle/platform/output/parallel_portfolio_output.py(no function)141400%
sparkle/platform/output/selection_output.pySelectionOutput.__init__151500%
sparkle/platform/output/selection_output.pySelectionOutput.get_solver_data3300%
sparkle/platform/output/selection_output.pySelectionOutput.serialize_solvers1100%
sparkle/platform/output/selection_output.pySelectionOutput.serialize_performance1100%
sparkle/platform/output/selection_output.pySelectionOutput.serialize_instances1100%
sparkle/platform/output/selection_output.pySelectionOutput.serialize_contribution1100%
sparkle/platform/output/selection_output.pySelectionOutput.serialize_settings1100%
sparkle/platform/output/selection_output.pySelectionOutput.write_output5500%
sparkle/platform/output/selection_output.py(no function)171700%
sparkle/platform/output/structures.pyValidationResults.__init__5500%
sparkle/platform/output/structures.pyConfigurationResults.__init__2200%
sparkle/platform/output/structures.pySelectionSolverData.__init__3300%
sparkle/platform/output/structures.pySelectionPerformance.__init__7700%
sparkle/platform/output/structures.pyParallelPortfolioResults.__init__121200%
sparkle/platform/output/structures.py(no function)181800%
sparkle/platform/settings_objects.pySettings.__init__251096%
sparkle/platform/settings_objects.pySettings.read_settings_ini13012603%
sparkle/platform/settings_objects.pySettings.write_used_settings1100%
sparkle/platform/settings_objects.pySettings.write_settings_ini141400%
sparkle/platform/settings_objects.pySettings.__init_section200100%
sparkle/platform/settings_objects.pySettings.__check_setting_state116045%
sparkle/platform/settings_objects.pySettings.set_general_sparkle_objectives171700%
sparkle/platform/settings_objects.pySettings.get_general_sparkle_objectives3300%
sparkle/platform/settings_objects.pySettings.set_general_sparkle_configurator6600%
sparkle/platform/settings_objects.pySettings.get_general_sparkle_configurator8800%
sparkle/platform/settings_objects.pySettings.set_general_sparkle_selector600100%
sparkle/platform/settings_objects.pySettings.get_general_sparkle_selector300100%
sparkle/platform/settings_objects.pySettings.set_general_solution_verifier6600%
sparkle/platform/settings_objects.pySettings.get_general_solution_verifier6600%
sparkle/platform/settings_objects.pySettings.set_general_target_cutoff_time6600%
sparkle/platform/settings_objects.pySettings.get_general_target_cutoff_time3300%
sparkle/platform/settings_objects.pySettings.set_general_extractor_cutoff_time6600%
sparkle/platform/settings_objects.pySettings.get_general_extractor_cutoff_time3300%
sparkle/platform/settings_objects.pySettings.set_number_of_jobs_in_parallel6600%
sparkle/platform/settings_objects.pySettings.get_number_of_jobs_in_parallel3300%
sparkle/platform/settings_objects.pySettings.set_general_verbosity6600%
sparkle/platform/settings_objects.pySettings.get_general_verbosity3300%
sparkle/platform/settings_objects.pySettings.set_general_check_interval6600%
sparkle/platform/settings_objects.pySettings.get_general_check_interval3300%
sparkle/platform/settings_objects.pySettings.set_config_wallclock_time6600%
sparkle/platform/settings_objects.pySettings.get_config_wallclock_time3300%
sparkle/platform/settings_objects.pySettings.set_config_cpu_time6600%
sparkle/platform/settings_objects.pySettings.get_config_cpu_time4400%
sparkle/platform/settings_objects.pySettings.set_config_solver_calls6600%
sparkle/platform/settings_objects.pySettings.get_config_solver_calls4400%
sparkle/platform/settings_objects.pySettings.set_config_number_of_runs6600%
sparkle/platform/settings_objects.pySettings.get_config_number_of_runs3300%
sparkle/platform/settings_objects.pySettings.set_configurator_target_cutoff_length6600%
sparkle/platform/settings_objects.pySettings.get_configurator_target_cutoff_length3300%
sparkle/platform/settings_objects.pySettings.set_slurm_max_parallel_runs_per_node6600%
sparkle/platform/settings_objects.pySettings.get_slurm_max_parallel_runs_per_node3300%
sparkle/platform/settings_objects.pySettings.add_slurm_extra_option6600%
sparkle/platform/settings_objects.pySettings.get_slurm_extra_options8800%
sparkle/platform/settings_objects.pySettings.set_ablation_racing_flag6600%
sparkle/platform/settings_objects.pySettings.get_ablation_racing_flag3300%
sparkle/platform/settings_objects.pySettings.set_parallel_portfolio_check_interval6600%
sparkle/platform/settings_objects.pySettings.get_parallel_portfolio_check_interval3300%
sparkle/platform/settings_objects.pySettings.set_parallel_portfolio_number_of_seeds_per_solver6600%
sparkle/platform/settings_objects.pySettings.get_parallel_portfolio_number_of_seeds_per_solver3300%
sparkle/platform/settings_objects.pySettings.set_run_on6600%
sparkle/platform/settings_objects.pySettings.get_run_on1100%
sparkle/platform/settings_objects.pySettings.check_settings_changes313100%
sparkle/platform/settings_objects.py(no function)12600100%
sparkle/solver/__init__.py(no function)500100%
sparkle/solver/ablation.pyAblationScenario.__init__192089%
sparkle/solver/ablation.pyAblationScenario.create_configuration_file292900%
sparkle/solver/ablation.pyAblationScenario.create_instance_file131300%
sparkle/solver/ablation.pyAblationScenario.check_for_ablation4400%
sparkle/solver/ablation.pyAblationScenario.read_ablation_table101000%
sparkle/solver/ablation.pyAblationScenario.submit_ablation161600%
sparkle/solver/ablation.py(no function)2000100%
sparkle/solver/extractor.pyExtractor.__init__5500%
sparkle/solver/extractor.pyExtractor.features4400%
sparkle/solver/extractor.pyExtractor.feature_groups3300%
sparkle/solver/extractor.pyExtractor.output_dimension1100%
sparkle/solver/extractor.pyExtractor.groupwise_computation4400%
sparkle/solver/extractor.pyExtractor.build_cmd121200%
sparkle/solver/extractor.pyExtractor.run111100%
sparkle/solver/extractor.pyExtractor.get_feature_vector4400%
sparkle/solver/extractor.py(no function)2100100%
sparkle/solver/selector.pySelector.__init__600100%
sparkle/solver/selector.pySelector.build_construction_cmd7700%
sparkle/solver/selector.pySelector.construct121200%
sparkle/solver/selector.pySelector.build_cmd3300%
sparkle/solver/selector.pySelector.run9900%
sparkle/solver/selector.pySelector.process_predict_schedule_output111100%
sparkle/solver/selector.py(no function)1600100%
sparkle/solver/solver.pySolver.__init__165069%
sparkle/solver/solver.pySolver._get_pcs_file400100%
sparkle/solver/solver.pySolver.get_pcs_file300100%
sparkle/solver/solver.pySolver.read_pcs_file8800%
sparkle/solver/solver.pySolver.get_pcs5500%
sparkle/solver/solver.pySolver.build_cmd181800%
sparkle/solver/solver.pySolver.run292900%
sparkle/solver/solver.pySolver.config_str_to_dict95044%
sparkle/solver/solver.pySolver.parse_solver_output232300%
sparkle/solver/solver.py(no function)3100100%
sparkle/solver/validator.pyValidator.__init__200100%
sparkle/solver/validator.pyValidator.validate242400%
sparkle/solver/validator.pyValidator.retrieve_raw_results282800%
sparkle/solver/validator.pyValidator.get_validation_results151500%
sparkle/solver/validator.pyValidator.append_entry_to_csv232300%
sparkle/solver/validator.py(no function)1700100%
sparkle/solver/verifier.pySolutionVerifier.__init__1100%
sparkle/solver/verifier.pySolutionVerifier.verifiy1100%
sparkle/solver/verifier.pySATVerifier.__init__1100%
sparkle/solver/verifier.pySATVerifier.__str__1100%
sparkle/solver/verifier.pySATVerifier.verify1100%
sparkle/solver/verifier.pySATVerifier.sat_get_verify_string121200%
sparkle/solver/verifier.pySATVerifier.sat_judge_correctness_raw_result2200%
sparkle/solver/verifier.py(no function)1600100%
sparkle/structures/__init__.py(no function)200100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.__init__139031%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.add_extractor5500%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.add_instances3300%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.remove_extractor1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.remove_instances1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.get_feature_groups6600%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.get_value1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.set_value1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.has_missing_vectors6600%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.remaining_jobs8800%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.get_instance100100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.impute_missing_values1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.has_missing_value1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.reset_dataframe1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.sort1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.instances1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.extractors1100%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.save_csv2200%
sparkle/structures/feature_dataframe.pyFeatureDataFrame.to_autofolio8800%
sparkle/structures/feature_dataframe.py(no function)2800100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.__init__295083%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.__repr__1100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.num_objectives100100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.num_instances100100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.num_runs1100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.num_solvers1100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.multi_objective100100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.solvers100100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.objective_names31067%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.instances100100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.has_missing_values1100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.verify_objective72071%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.verify_run_id62067%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.verify_indexing300100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.add_solver42050%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.add_instance121200%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.set_value200100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.remove_solver100100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.remove_instance1100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.reset_value1100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.get_value200100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.get_values101000%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.mean101000%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.get_job_list500100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.remaining_jobs1000100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.best_instance_performance152087%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.best_performance500100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.schedule_performance195074%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.marginal_contribution141093%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.get_solver_ranking900100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.save_csv2200%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.clean_csv2200%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.copy500100%
sparkle/structures/performance_dataframe.pyPerformanceDataFrame.to_autofolio131300%
sparkle/structures/performance_dataframe.py(no function)5400100%
sparkle/tools/__init__.py(no function)500100%
sparkle/tools/general.pyget_time_pid_random_string2200%
sparkle/tools/general.py(no function)400100%
sparkle/tools/pcsparser.pyPCSObject.__init__1100%
sparkle/tools/pcsparser.pyPCSObject.add_param9900%
sparkle/tools/pcsparser.pyPCSObject.add_constraint1100%
sparkle/tools/pcsparser.pyPCSObject.add_forbidden1100%
sparkle/tools/pcsparser.pyPCSObject.add_comment1100%
sparkle/tools/pcsparser.pyPCSObject.clear1100%
sparkle/tools/pcsparser.pyPCSObject.get4400%
sparkle/tools/pcsparser.pyPCSParser.__init__3300%
sparkle/tools/pcsparser.pyPCSParser._format_string_to_enum4400%
sparkle/tools/pcsparser.pyPCSParser.check_validity1100%
sparkle/tools/pcsparser.pyPCSParser.load9900%
sparkle/tools/pcsparser.pyPCSParser.export5500%
sparkle/tools/pcsparser.pySMACParser.parse484800%
sparkle/tools/pcsparser.pySMACParser._parse_conditions292900%
sparkle/tools/pcsparser.pySMACParser._parse_condition141400%
sparkle/tools/pcsparser.pySMACParser.compile1100%
sparkle/tools/pcsparser.pyParamILSParser.parse1100%
sparkle/tools/pcsparser.pyParamILSParser.compile595900%
sparkle/tools/pcsparser.pyParamILSParser._compile_conditions161600%
sparkle/tools/pcsparser.py(no function)3600100%
sparkle/tools/runsolver_parsing.pyget_measurements131300%
sparkle/tools/runsolver_parsing.pyget_status181800%
sparkle/tools/runsolver_parsing.pyget_solver_args5500%
sparkle/tools/runsolver_parsing.pyget_solver_output484800%
sparkle/tools/runsolver_parsing.py(no function)900100%
sparkle/tools/slurm_parsing.pySlurmBatch.__init__141400%
sparkle/tools/slurm_parsing.py(no function)1000100%
sparkle/tools/solver_wrapper_parsing.pyparse_commandline_dict3300%
sparkle/tools/solver_wrapper_parsing.pyparse_solver_wrapper_args141400%
sparkle/tools/solver_wrapper_parsing.pyget_solver_call_params6600%
sparkle/tools/solver_wrapper_parsing.py(no function)700100%
sparkle/types/__init__.py_check_class100100%
sparkle/types/__init__.pyresolve_objective265081%
sparkle/types/__init__.py(no function)1300100%
sparkle/types/features.pyFeatureType.with_subgroup1100%
sparkle/types/features.py(no function)6900100%
sparkle/types/objective.pyUseTime._missing_1100%
sparkle/types/objective.pySparkleObjective.__init__900100%
sparkle/types/objective.pySparkleObjective.__str__100100%
sparkle/types/objective.pySparkleObjective.time100100%
sparkle/types/objective.pyPAR.__init__500100%
sparkle/types/objective.pyPAR.__init__.penalise3300%
sparkle/types/objective.py(no function)2400100%
sparkle/types/sparkle_callable.pySparkleCallable.__init__900100%
sparkle/types/sparkle_callable.pySparkleCallable.build_cmd1100%
sparkle/types/sparkle_callable.pySparkleCallable.run1100%
sparkle/types/sparkle_callable.py(no function)600100%
sparkle/types/status.py(no function)1200100%
Total 52903679030%
-

- No items found using the specified filter. -

-
- - - diff --git a/Documentation/source/_static/coverage/index.html b/Documentation/source/_static/coverage/index.html deleted file mode 100644 index af25ed31b..000000000 --- a/Documentation/source/_static/coverage/index.html +++ /dev/null @@ -1,643 +0,0 @@ - - - - - Coverage report - - - - - -
-
-

Coverage report: - 30% -

- -
- -
- - -
-
-

- Files - Functions - Classes -

-

- coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Filestatementsmissingexcludedcoverage
sparkle/CLI/__init__.py000100%
sparkle/CLI/about.py72071%
sparkle/CLI/add_feature_extractor.py525200%
sparkle/CLI/add_instances.py666600%
sparkle/CLI/add_solver.py828200%
sparkle/CLI/cleanup.py292900%
sparkle/CLI/cli.py275081%
sparkle/CLI/compute_features.py777700%
sparkle/CLI/compute_marginal_contribution.py9462034%
sparkle/CLI/configure_solver.py13113100%
sparkle/CLI/construct_portfolio_selector.py999900%
sparkle/CLI/generate_report.py14914900%
sparkle/CLI/help/__init__.py000100%
sparkle/CLI/help/argparse_custom.py9716084%
sparkle/CLI/help/global_variables.py276078%
sparkle/CLI/help/logging.py442095%
sparkle/CLI/help/nicknames.py191900%
sparkle/CLI/help/reporting_scenario.py18487053%
sparkle/CLI/help/snapshot_help.py4431030%
sparkle/CLI/help/system_status.py323200%
sparkle/CLI/initialise.py7761021%
sparkle/CLI/load_snapshot.py151500%
sparkle/CLI/remove_feature_extractor.py373700%
sparkle/CLI/remove_instances.py424200%
sparkle/CLI/remove_solver.py434300%
sparkle/CLI/run_ablation.py767600%
sparkle/CLI/run_configured_solver.py595900%
sparkle/CLI/run_parallel_portfolio.py20720700%
sparkle/CLI/run_portfolio_selector.py797900%
sparkle/CLI/run_solvers.py838300%
sparkle/CLI/save_snapshot.py9900%
sparkle/CLI/status.py252500%
sparkle/CLI/validate_configured_vs_default.py707000%
sparkle/CLI/wait.py676700%
sparkle/__init__.py100100%
sparkle/about.py700100%
sparkle/configurator/__init__.py000100%
sparkle/configurator/configurator.py5110080%
sparkle/configurator/configurator_cli.py151500%
sparkle/configurator/implementations/__init__.py74043%
sparkle/configurator/implementations/smac2.py20355073%
sparkle/instance/__init__.py82075%
sparkle/instance/instances.py7935056%
sparkle/platform/__init__.py200100%
sparkle/platform/cli_types.py634094%
sparkle/platform/file_help.py181800%
sparkle/platform/generate_report_for_configuration.py17726085%
sparkle/platform/generate_report_for_parallel_portfolio.py13713700%
sparkle/platform/generate_report_for_selection.py747400%
sparkle/platform/latex.py7957028%
sparkle/platform/output/__init__.py000100%
sparkle/platform/output/configuration_output.py727200%
sparkle/platform/output/parallel_portfolio_output.py474700%
sparkle/platform/output/selection_output.py454500%
sparkle/platform/output/structures.py474700%
sparkle/platform/settings_objects.py544374031%
sparkle/solver/__init__.py500100%
sparkle/solver/ablation.py11174033%
sparkle/solver/extractor.py6544032%
sparkle/solver/selector.py6442034%
sparkle/solver/solver.py14693036%
sparkle/solver/validator.py10990017%
sparkle/solver/verifier.py3519046%
sparkle/structures/__init__.py200100%
sparkle/structures/feature_dataframe.py9057037%
sparkle/structures/performance_dataframe.py25375070%
sparkle/tools/__init__.py500100%
sparkle/tools/general.py62067%
sparkle/tools/pcsparser.py244208015%
sparkle/tools/runsolver_parsing.py9384010%
sparkle/tools/slurm_parsing.py2414042%
sparkle/tools/solver_wrapper_parsing.py3023023%
sparkle/types/__init__.py405088%
sparkle/types/features.py701099%
sparkle/types/objective.py444091%
sparkle/types/sparkle_callable.py172088%
sparkle/types/status.py1200100%
Total52903679030%
-

- No items found using the specified filter. -

-
- - - diff --git a/Documentation/source/_static/coverage/keybd_closed_cb_ce680311.png b/Documentation/source/_static/coverage/keybd_closed_cb_ce680311.png deleted file mode 100644 index ba119c47df81ed2bbd27a06988abf700139c4f99..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9004 zcmeHLc{tSF+aIY=A^R4_poB4tZAN2XC;O7M(inrW3}(h&Q4}dl*&-65$i9^&vW6_# zcM4g`Qix=GhkBl;=lwnJ@Ap2}^}hc-b6vBXb3XUyzR%~}_c`-Dw+!?&>5p(90RRB> zXe~7($~PP3eT?=X<@3~Q1w84vX~IoSx~1#~02+TopXK(db;4v6!{+W`RHLkkHO zo;+s?)puc`+$yOwHv>I$5^8v^F3<|$44HA8AFnFB0cAP|C`p}aSMJK*-CUB{eQ!;K z-9Ju3OQ+xVPr3P#o4>_lNBT;M+1vgV&B~6!naOGHb-LFA9TkfHv1IFA1Y!Iz!Zl3) z%c#-^zNWPq7U_}6I7aHSmFWi125RZrNBKyvnV^?64)zviS;E!UD%LaGRl6@zn!3E{ zJ`B$5``cH_3a)t1#6I7d==JeB_IcSU%=I#DrRCBGm8GvCmA=+XHEvC2SIfsNa0(h9 z7P^C4U`W@@`9p>2f^zyb5B=lpc*RZMn-%%IqrxSWQF8{ec3i?-AB(_IVe z)XgT>Y^u41MwOMFvU=I4?!^#jaS-%bjnx@ zmL44yVEslR_ynm18F!u}Ru#moEn3EE?1=9@$B1Z5aLi5b8{&?V(IAYBzIar!SiY3< z`l0V)djHtrImy}(!7x-Pmq+njM)JFQ9mx*(C+9a3M)(_SW|lrN=gfxFhStu^zvynS zm@gl;>d8i8wpUkX42vS3BEzE3-yctH%t0#N%s+6-&_<*Fe7+h=`=FM?DOg1)eGL~~ zQvIFm$D*lqEh07XrXY=jb%hdyP4)`wyMCb$=-z9(lOme9=tirVkb)_GOl2MJn;=Ky z^0pV1owR7KP-BSxhI@@@+gG0roD-kXE1;!#R7KY1QiUbyDdTElm|ul7{mMdF1%UDJ z_vp=Vo!TCF?D*?u% zk~}4!xK2MSQd-QKC0${G=ZRv2x8%8ZqdfR!?Dv=5Mj^8WU)?iH;C?o6rSQy*^YwQb zf@5V)q=xah#a3UEIBC~N7on(p4jQd4K$|i7k`d8mw|M{Mxapl46Z^X^9U}JgqH#;T z`CTzafpMD+J-LjzF+3Xau>xM_sXisRj6m-287~i9g|%gHc}v77>n_+p7ZgmJszx!b zSmL4wV;&*5Z|zaCk`rOYFdOjZLLQr!WSV6AlaqYh_OE)>rYdtx`gk$yAMO=-E1b~J zIZY6gM*}1UWsJ)TW(pf1=h?lJy_0TFOr|nALGW>$IE1E7z+$`^2WJY+>$$nJo8Rs` z)xS>AH{N~X3+b=2+8Q_|n(1JoGv55r>TuwBV~MXE&9?3Zw>cIxnOPNs#gh~C4Zo=k z&!s;5)^6UG>!`?hh0Q|r|Qbm>}pgtOt23Vh!NSibozH$`#LSiYL)HR4bkfEJMa zBHwC3TaHx|BzD|MXAr>mm&FbZXeEX-=W}Ji&!pji4sO$#0Wk^Q7j%{8#bJPn$C=E% zPlB}0)@Ti^r_HMJrTMN?9~4LQbIiUiOKBVNm_QjABKY4;zC88yVjvB>ZETNzr%^(~ zI3U&Ont?P`r&4 z#Bp)jcVV_N_{c1_qW}_`dQm)D`NG?h{+S!YOaUgWna4i8SuoLcXAZ|#Jh&GNn7B}3 z?vZ8I{LpmCYT=@6)dLPd@|(;d<08ufov%+V?$mgUYQHYTrc%eA=CDUzK}v|G&9}yJ z)|g*=+RH1IQ>rvkY9UIam=fkxWDyGIKQ2RU{GqOQjD8nG#sl+$V=?wpzJdT=wlNWr z1%lw&+;kVs(z?e=YRWRA&jc75rQ~({*TS<( z8X!j>B}?Bxrrp%wEE7yBefQ?*nM20~+ZoQK(NO_wA`RNhsqVkXHy|sod@mqen=B#@ zmLi=x2*o9rWqTMWoB&qdZph$~qkJJTVNc*8^hU?gH_fY{GYPEBE8Q{j0Y$tvjMv%3 z)j#EyBf^7n)2d8IXDYX2O0S%ZTnGhg4Ss#sEIATKpE_E4TU=GimrD5F6K(%*+T-!o z?Se7^Vm`$ZKDwq+=~jf?w0qC$Kr&R-;IF#{iLF*8zKu8(=#chRO;>x zdM;h{i{RLpJgS!B-ueTFs8&4U4+D8|7nP~UZ@P`J;*0sj^#f_WqT#xpA?@qHonGB& zQ<^;OLtOG1w#)N~&@b0caUL7syAsAxV#R`n>-+eVL9aZwnlklzE>-6!1#!tVA`uNo z>Gv^P)sohc~g_1YMC;^f(N<{2y5C^;QCEXo;LQ^#$0 zr>jCrdoeXuff!dJ^`#=Wy2Gumo^Qt7BZrI~G+Pyl_kL>is3P0^JlE;Sjm-YfF~I>t z_KeNpK|5U&F4;v?WS&#l(jxUWDarfcIcl=-6!8>^S`57!M6;hZea5IFA@)2+*Rt85 zi-MBs_b^DU8LygXXQGkG+86N7<%M|baM(orG*ASffC`p!?@m{qd}IcYmZyi^d}#Q& zNjk-0@CajpUI-gPm20ERVDO!L8@p`tMJ69FD(ASIkdoLdiRV6h9TPKRz>2WK4upHd z6OZK33EP?`GoJkXh)S035}uLUO$;TlXwNdMg-WOhLB)7a`-%*a9lFmjf6n+4ZmIHN z-V@$ z8PXsoR4*`5RwXz=A8|5;aXKtSHFccj%dG7cO~UBJnt)61K>-uPX)`vu{7fcX6_>zZ zw_2V&Li+7mxbf!f7{Rk&VVyY!UtZywac%g!cH+xh#j$a`uf?XWl<``t`36W;p7=_* zO6uf~2{sAdkZn=Ts@p0>8N8rzw2ZLS@$ibV-c-QmG@%|3gUUrRxu=e*ekhTa+f?8q z3$JVGPr9w$VQG~QCq~Y=2ThLIH!T@(>{NihJ6nj*HA_C#Popv)CBa)+UI-bx8u8zfCT^*1|k z&N9oFYsZEijPn31Yx_yO5pFs>0tOAV=oRx~Wpy5ie&S_449m4R^{LWQMA~}vocV1O zIf#1ZV85E>tvZE4mz~zn{hs!pkIQM;EvZMimqiPAJu-9P@mId&nb$lsrICS=)zU3~ zn>a#9>}5*3N)9;PTMZ)$`5k} z?iG}Rwj$>Y*|(D3S3e&fxhaPHma8@vwu(cwdlaCjX+NIK6=$H4U`rfzcWQVOhp{fnzuZhgCCGpw|p zTi`>cv~xVzdx|^`C0vXdlMwPae3S?>3|7v$e*Bs6-5gS>>FMHk_r2M(ADOV{KV7+6 zA@5Q(mdx%7J}MY}K461iuQ}5GwDGI=Yc&g0MZHu)7gC3{5@QZj6SJl*o0MS2Cl_ia zyK?9QmC9tJ6yn{EA-erJ4wk$+!E#X(s~9h^HOmQ_|6V_s1)k;%9Q6Niw}SyT?jxl4 z;HYz2$Nj$8Q_*Xo`TWEUx^Q9b+ik@$o39`mlY&P}G8wnjdE+Dlj?uL;$aB$n;x zWoh-M_u>9}_Ok@d_uidMqz10zJc}RQijPW3Fs&~1am=j*+A$QWTvxf9)6n;n8zTQW z!Q_J1%apTsJzLF`#^P_#mRv2Ya_keUE7iMSP!ha-WQoo0vZZG?gyR;+4q8F6tL#u< zRj8Hu5f-p1$J;)4?WpGL{4@HmJ6&tF9A5Tc8Trp>;Y>{^s?Q1&bam}?OjsnKd?|Z82aix26wUOLxbEW~E)|CgJ#)MLf_me# zv4?F$o@A~Um)6>HlM0=3Bd-vc91EM}D+t6-@!}O%i*&Wl%@#C8X+?5+nv`oPu!!=5 znbL+Fk_#J_%8vOq^FIv~5N(nk03kyo1p@l|1c+rO^zCG3bk2?|%AF;*|4si1XM<`a z1NY0-8$wv?&129!(g_A1lXR!+pD*1*cF?T~e1d6*G1Fz)jcSaZoKpxtA%FNnKP2jo zLXn@OR#1z@6zuH%mMB98}-t zHJqClsZ!G5xMSgIs_=<8sBePXxfoXsuvy`|buON9BX%s-o>OVLA)k3W=wKnw1?so$ zEjm0aS=zu@Xu#;{A)QTjJ$a9_={++ACkRY*sk3jLk&Fu}RxR<-DXR<`5`$VNG*wJE zidM6VzaQ!M0gbQM98@x@;#0qUS8O)p6mrYwTk*;8J~!ovbY6jon^Ki}uggd3#J5G8 z>awvtF85Y<9yE{Iag}J7O7)1O=ylk^255@XmV5J06-{xaaSNASZoTKKp~$tSxdUI~ zU1RZ&UuW37Ro&_ryj^cSt$Jd&pt|+h!A&dwcr&`S=R5E`=6Tm`+(qGm@$YZ8(8@a$ zXfo@Rwtvm7N3RMmVCb7radAs-@QtCXx^CQ-<)V>QPLZy@jH{#dc4#(y zV)6Hp{ZMz!|NG8!>i01gZMy)G<8Hf2X7e&LH_gOaajW<<^Xi55@OnlY*|S|*TS8;u_nHbv7lgmmZ+Q<5 zi!*lLCJmdpyzl(L${$C?(pVo|oR%r~x_B_ocPePa_);27^=n4L=`toZ;xdBut9rSv z?wDQ7j2I3WQBdhz%X7`2YaG_y|wA!7|s?k;A&WNMLMTZEzCaE^d??E&u?f=ejQBR~|< z)=thyP2(p8r6mt?Ad}tXAP_GvF9|P630I;$1cpQ+Ay7C34hK^ZV3H4kjPV8&NP>G5 zKRDEIBrFl{M#j4mfP0)68&?mqJP1S?2mU0djAGTjDV;wZ?6vplNn~3Hn$nP>%!dMi zz@bnC7zzi&k&s{QDWkf&zgrVXKUJjY3Gv3bL0}S4h>OdgEJ$Q^&p-VAr3J}^a*+rz z!jW7(h*+GuCyqcC{MD(Ovj^!{pB^OKUe|uy&bD?CN>KZrf3?v>>l*xSvnQiH-o^ViN$%FRdm9url;%(*jf5H$*S)8;i0xWHdl>$p);nH9v0)YfW?Vz$! zNCeUbi9`NEg(i^57y=fzM@1o*z*Bf6?QCV>2p9}(BLlYsOCfMjFv1pw1mlo)Py{8v zppw{MDfEeWN+n>Ne~oI7%9cU}mz0r3!es2gNF0t5jkGipjIo2lz;-e)7}Ul_#!eDv zw;#>kI>;#-pyfeu3Fsd^2F@6=oh#8r9;A!G0`-mm7%{=S;Ec(bJ=I_`FodKGQVNEY zmXwr4{9*jpDl%4{ggQZ5Ac z%wYTdl*!1c5^)%^E78Q&)ma|27c6j(a=)g4sGrp$r{jv>>M2 z6y)E5|Aooe!PSfKzvKA>`a6pfK3=E8vL14ksP&f=>gOP?}rG6ye@9ZR3 zJF*vsh*P$w390i!FV~~_Hv6t2Zl<4VUi|rNja#boFt{%q~xGb z(2petq9A*_>~B*>?d?Olx^lmYg4)}sH2>G42RE; diff --git a/Documentation/source/_static/coverage/style_cb_8e611ae1.css b/Documentation/source/_static/coverage/style_cb_8e611ae1.css deleted file mode 100644 index 3cdaf05a3..000000000 --- a/Documentation/source/_static/coverage/style_cb_8e611ae1.css +++ /dev/null @@ -1,337 +0,0 @@ -@charset "UTF-8"; -/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ -/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ -/* Don't edit this .css file. Edit the .scss file instead! */ -html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } - -body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } - -@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } - -@media (prefers-color-scheme: dark) { body { color: #eee; } } - -html > body { font-size: 16px; } - -a:active, a:focus { outline: 2px dashed #007acc; } - -p { font-size: .875em; line-height: 1.4em; } - -table { border-collapse: collapse; } - -td { vertical-align: top; } - -table tr.hidden { display: none !important; } - -p#no_rows { display: none; font-size: 1.15em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } - -a.nav { text-decoration: none; color: inherit; } - -a.nav:hover { text-decoration: underline; color: inherit; } - -.hidden { display: none; } - -header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } - -@media (prefers-color-scheme: dark) { header { background: black; } } - -@media (prefers-color-scheme: dark) { header { border-color: #333; } } - -header .content { padding: 1rem 3.5rem; } - -header h2 { margin-top: .5em; font-size: 1em; } - -header h2 a.button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } - -@media (prefers-color-scheme: dark) { header h2 a.button { background: #333; } } - -@media (prefers-color-scheme: dark) { header h2 a.button { border-color: #444; } } - -header h2 a.button.current { border: 2px solid; background: #fff; border-color: #999; cursor: default; } - -@media (prefers-color-scheme: dark) { header h2 a.button.current { background: #1e1e1e; } } - -@media (prefers-color-scheme: dark) { header h2 a.button.current { border-color: #777; } } - -header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } - -@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } - -header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } - -header.sticky .text { display: none; } - -header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } - -header.sticky .content { padding: 0.5rem 3.5rem; } - -header.sticky .content p { font-size: 1em; } - -header.sticky ~ #source { padding-top: 6.5em; } - -main { position: relative; z-index: 1; } - -footer { margin: 1rem 3.5rem; } - -footer .content { padding: 0; color: #666; font-style: italic; } - -@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } - -#index { margin: 1rem 0 0 3.5rem; } - -h1 { font-size: 1.25em; display: inline-block; } - -#filter_container { float: right; margin: 0 2em 0 0; line-height: 1.66em; } - -#filter_container #filter { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } - -@media (prefers-color-scheme: dark) { #filter_container #filter { border-color: #444; } } - -@media (prefers-color-scheme: dark) { #filter_container #filter { background: #1e1e1e; } } - -@media (prefers-color-scheme: dark) { #filter_container #filter { color: #eee; } } - -#filter_container #filter:focus { border-color: #007acc; } - -#filter_container :disabled ~ label { color: #ccc; } - -@media (prefers-color-scheme: dark) { #filter_container :disabled ~ label { color: #444; } } - -#filter_container label { font-size: .875em; color: #666; } - -@media (prefers-color-scheme: dark) { #filter_container label { color: #aaa; } } - -header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } - -@media (prefers-color-scheme: dark) { header button { background: #333; } } - -@media (prefers-color-scheme: dark) { header button { border-color: #444; } } - -header button:active, header button:focus { outline: 2px dashed #007acc; } - -header button.run { background: #eeffee; } - -@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } - -header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } - -@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } - -header button.mis { background: #ffeeee; } - -@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } - -header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } - -@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } - -header button.exc { background: #f7f7f7; } - -@media (prefers-color-scheme: dark) { header button.exc { background: #333; } } - -header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } - -@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } - -header button.par { background: #ffffd5; } - -@media (prefers-color-scheme: dark) { header button.par { background: #650; } } - -header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } - -@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } - -#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } - -#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } - -#help_panel_wrapper { float: right; position: relative; } - -#keyboard_icon { margin: 5px; } - -#help_panel_state { display: none; } - -#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } - -#help_panel .keyhelp p { margin-top: .75em; } - -#help_panel .legend { font-style: italic; margin-bottom: 1em; } - -.indexfile #help_panel { width: 25em; } - -.pyfile #help_panel { width: 18em; } - -#help_panel_state:checked ~ #help_panel { display: block; } - -kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } - -#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } - -#source p { position: relative; white-space: pre; } - -#source p * { box-sizing: border-box; } - -#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } - -@media (prefers-color-scheme: dark) { #source p .n { color: #777; } } - -#source p .n.highlight { background: #ffdd00; } - -#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } - -@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } - -#source p .n a:hover { text-decoration: underline; color: #999; } - -@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } - -#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } - -@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } - -#source p .t:hover { background: #f2f2f2; } - -@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } - -#source p .t:hover ~ .r .annotate.long { display: block; } - -#source p .t .com { color: #008000; font-style: italic; line-height: 1px; } - -@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } - -#source p .t .key { font-weight: bold; line-height: 1px; } - -#source p .t .str { color: #0451a5; } - -@media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } - -#source p.mis .t { border-left: 0.2em solid #ff0000; } - -#source p.mis.show_mis .t { background: #fdd; } - -@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } - -#source p.mis.show_mis .t:hover { background: #f2d2d2; } - -@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } - -#source p.run .t { border-left: 0.2em solid #00dd00; } - -#source p.run.show_run .t { background: #dfd; } - -@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } - -#source p.run.show_run .t:hover { background: #d2f2d2; } - -@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } - -#source p.exc .t { border-left: 0.2em solid #808080; } - -#source p.exc.show_exc .t { background: #eee; } - -@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } - -#source p.exc.show_exc .t:hover { background: #e2e2e2; } - -@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } - -#source p.par .t { border-left: 0.2em solid #bbbb00; } - -#source p.par.show_par .t { background: #ffa; } - -@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } - -#source p.par.show_par .t:hover { background: #f2f2a2; } - -@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } - -#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } - -#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } - -@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } - -#source p .annotate.short:hover ~ .long { display: block; } - -#source p .annotate.long { width: 30em; right: 2.5em; } - -#source p input { display: none; } - -#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } - -#source p input ~ .r label.ctx::before { content: "â–¶ "; } - -#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } - -@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } - -@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } - -#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } - -@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } - -@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } - -#source p input:checked ~ .r label.ctx::before { content: "â–¼ "; } - -#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } - -#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } - -@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } - -#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } - -@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } - -#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } - -#index table.index { margin-left: -.5em; } - -#index td, #index th { text-align: right; padding: .25em .5em; border-bottom: 1px solid #eee; } - -@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } - -#index td.name, #index th.name { text-align: left; width: auto; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; min-width: 15em; } - -#index th { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-style: italic; color: #333; cursor: pointer; } - -@media (prefers-color-scheme: dark) { #index th { color: #ddd; } } - -#index th:hover { background: #eee; } - -@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } - -#index th .arrows { color: #666; font-size: 85%; font-family: sans-serif; font-style: normal; pointer-events: none; } - -#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } - -@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } - -#index th[aria-sort="ascending"] .arrows::after { content: " â–²"; } - -#index th[aria-sort="descending"] .arrows::after { content: " â–¼"; } - -#index td.name { font-size: 1.15em; } - -#index td.name a { text-decoration: none; color: inherit; } - -#index td.name .no-noun { font-style: italic; } - -#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } - -#index tr.region:hover { background: #eee; } - -@media (prefers-color-scheme: dark) { #index tr.region:hover { background: #333; } } - -#index tr.region:hover td.name { text-decoration: underline; color: inherit; } - -#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } - -@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } - -@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } - -#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } - -@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } diff --git a/Documentation/source/_static/coverage/z_1b9c668f29c73fc8___init___py.html b/Documentation/source/_static/coverage/z_1b9c668f29c73fc8___init___py.html deleted file mode 100644 index 1ae6ad325..000000000 --- a/Documentation/source/_static/coverage/z_1b9c668f29c73fc8___init___py.html +++ /dev/null @@ -1,108 +0,0 @@ - - - - - Coverage for sparkle/configurator/implementations/__init__.py: 43% - - - - - -
-
-

- Coverage for sparkle/configurator/implementations/__init__.py: - 43% -

- -

- 7 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides specific Configurator class implementations for Sparkle.""" 

-

2from sparkle.configurator.configurator import Configurator 

-

3from sparkle.configurator.implementations.smac2 import SMAC2, SMAC2Scenario 

-

4 

-

5 

-

6def resolve_configurator(configurator_name: str) -> Configurator: 

-

7 """Returns the Configurator subclass by name.""" 

-

8 subclass_names = [SMAC2.__name__] 

-

9 if configurator_name in subclass_names: 

-

10 return eval(configurator_name) 

-

11 return None 

-
- - - diff --git a/Documentation/source/_static/coverage/z_1b9c668f29c73fc8_smac2_py.html b/Documentation/source/_static/coverage/z_1b9c668f29c73fc8_smac2_py.html deleted file mode 100644 index f9ce5dd47..000000000 --- a/Documentation/source/_static/coverage/z_1b9c668f29c73fc8_smac2_py.html +++ /dev/null @@ -1,529 +0,0 @@ - - - - - Coverage for sparkle/configurator/implementations/smac2.py: 73% - - - - - -
-
-

- Coverage for sparkle/configurator/implementations/smac2.py: - 73% -

- -

- 203 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Configurator class to use different configurators like SMAC.""" 

-

4 

-

5from __future__ import annotations 

-

6from typing import Callable 

-

7from pathlib import Path 

-

8import ast 

-

9from statistics import mean 

-

10import operator 

-

11import fcntl 

-

12import glob 

-

13import shutil 

-

14 

-

15import pandas as pd 

-

16 

-

17import runrunner as rrr 

-

18from runrunner import Runner, Run 

-

19 

-

20from sparkle.configurator.configurator import Configurator, ConfigurationScenario 

-

21from sparkle.solver import Solver 

-

22from sparkle.solver.validator import Validator 

-

23from sparkle.instance import InstanceSet 

-

24from sparkle.types import SparkleObjective 

-

25 

-

26 

-

27class SMAC2(Configurator): 

-

28 """Class for SMAC2 (Java) configurator.""" 

-

29 configurator_path = Path(__file__).parent.parent.parent.resolve() /\ 

-

30 "Components/smac-v2.10.03-master-778" 

-

31 target_algorithm = "smac_target_algorithm.py" 

-

32 

-

33 def __init__(self: SMAC2, 

-

34 objectives: list[SparkleObjective], 

-

35 base_dir: Path, 

-

36 output_path: Path) -> None: 

-

37 """Returns the SMAC configurator, Java SMAC V2.10.03. 

-

38 

-

39 Args: 

-

40 objectives: The objectives to optimize. Only supports one objective. 

-

41 base_dir: The path where the configurator will be executed in. 

-

42 output_path: The path where the output will be placed. 

-

43 """ 

-

44 output_path = output_path / SMAC2.__name__ 

-

45 output_path.mkdir(parents=True, exist_ok=True) 

-

46 return super().__init__( 

-

47 validator=Validator(out_dir=output_path), 

-

48 output_path=output_path, 

-

49 executable_path=SMAC2.configurator_path / "smac", 

-

50 configurator_target=SMAC2.configurator_path / SMAC2.target_algorithm, 

-

51 objectives=objectives, 

-

52 base_dir=base_dir, 

-

53 tmp_path=output_path / "tmp", 

-

54 multi_objective_support=False) 

-

55 

-

56 @property 

-

57 def scenario_class(self: Configurator) -> ConfigurationScenario: 

-

58 """Returns the SMAC2 scenario class.""" 

-

59 return SMAC2Scenario 

-

60 

-

61 def configure(self: Configurator, 

-

62 scenario: ConfigurationScenario, 

-

63 validate_after: bool = True, 

-

64 sbatch_options: list[str] = [], 

-

65 num_parallel_jobs: int = None, 

-

66 base_dir: Path = None, 

-

67 run_on: Runner = Runner.SLURM) -> list[Run]: 

-

68 """Start configuration job. 

-

69 

-

70 Args: 

-

71 scenario: ConfigurationScenario object 

-

72 validate_after: Whether the Validator will be called after the configuration 

-

73 sbatch_options: List of slurm batch options to use 

-

74 num_parallel_jobs: The maximum number of jobs to run parallel. 

-

75 base_dir: The path where the sbatch scripts will be created for Slurm. 

-

76 run_on: On which platform to run the jobs. Default: Slurm. 

-

77 

-

78 Returns: 

-

79 A RunRunner Run object. 

-

80 """ 

-

81 self.scenario = scenario 

-

82 self.scenario.create_scenario(parent_directory=self.output_path) 

-

83 output_csv = self.scenario.validation / "configurations.csv" 

-

84 output_csv.parent.mkdir(exist_ok=True, parents=True) 

-

85 output = [f"{(self.scenario.result_directory).absolute()}/" 

-

86 f"{self.scenario.name}_seed_{seed}_smac.txt" 

-

87 for seed in range(self.scenario.number_of_runs)] 

-

88 cmds = [f"python3 {Configurator.configurator_cli_path.absolute()} " 

-

89 f"{SMAC2.__name__} {output[seed]} {output_csv.absolute()} " 

-

90 f"{self.executable_path.absolute()} " 

-

91 f"--scenario-file {(self.scenario.scenario_file_path).absolute()} " 

-

92 f"--seed {seed} " 

-

93 f"--execdir {self.scenario.tmp.absolute()}" 

-

94 for seed in range(self.scenario.number_of_runs)] 

-

95 parallel_jobs = self.scenario.number_of_runs 

-

96 if num_parallel_jobs is not None: 

-

97 parallel_jobs = max(num_parallel_jobs, 

-

98 self.scenario.number_of_runs) 

-

99 configuration_run = rrr.add_to_queue( 

-

100 runner=run_on, 

-

101 cmd=cmds, 

-

102 name="configure_solver", 

-

103 base_dir=base_dir, 

-

104 output_path=output, 

-

105 parallel_jobs=parallel_jobs, 

-

106 sbatch_options=sbatch_options, 

-

107 srun_options=["-N1", "-n1"]) 

-

108 runs = [configuration_run] 

-

109 

-

110 if validate_after: 

-

111 self.validator.out_dir = output_csv.parent 

-

112 self.validator.tmp_out_dir = base_dir 

-

113 validate_run = self.validator.validate( 

-

114 [scenario.solver] * self.scenario.number_of_runs, 

-

115 output_csv.absolute(), 

-

116 [scenario.instance_set], 

-

117 [self.scenario.sparkle_objective], 

-

118 scenario.cutoff_time, 

-

119 subdir=Path(), 

-

120 dependency=configuration_run, 

-

121 sbatch_options=sbatch_options, 

-

122 run_on=run_on) 

-

123 runs.append(validate_run) 

-

124 

-

125 if run_on == Runner.LOCAL: 

-

126 for run in runs: 

-

127 run.wait() 

-

128 return runs 

-

129 

-

130 def get_optimal_configuration( 

-

131 self: Configurator, 

-

132 solver: Solver, 

-

133 instance_set: InstanceSet, 

-

134 objective: SparkleObjective = None, 

-

135 aggregate_config: Callable = mean) -> tuple[float, str]: 

-

136 """Returns optimal value and configuration string of solver on instance set.""" 

-

137 if self.scenario is None: 

-

138 self.set_scenario_dirs(solver, instance_set) 

-

139 results = self.validator.get_validation_results( 

-

140 solver, 

-

141 instance_set, 

-

142 source_dir=self.scenario.validation, 

-

143 subdir=self.scenario.validation.relative_to(self.validator.out_dir)) 

-

144 # Group the results per configuration 

-

145 if objective is None: 

-

146 objective = self.objectives[0] 

-

147 value_column = results[0].index(objective.name) 

-

148 config_column = results[0].index("Configuration") 

-

149 configurations = list(set(row[config_column] for row in results[1:])) 

-

150 config_scores = [] 

-

151 for config in configurations: 

-

152 values = [float(row[value_column]) 

-

153 for row in results[1:] if row[1] == config] 

-

154 config_scores.append(aggregate_config(values)) 

-

155 

-

156 comparison = operator.lt if objective.minimise else operator.gt 

-

157 

-

158 # Return optimal value 

-

159 min_index = 0 

-

160 current_optimal = config_scores[min_index] 

-

161 for i, score in enumerate(config_scores): 

-

162 if comparison(score, current_optimal): 

-

163 min_index, current_optimal = i, score 

-

164 

-

165 # Return the optimal configuration dictionary as commandline args 

-

166 config_str = configurations[min_index].strip(" ") 

-

167 if config_str.startswith("{"): 

-

168 config = ast.literal_eval(config_str) 

-

169 config_str = " ".join([f"-{key} '{config[key]}'" for key in config]) 

-

170 return current_optimal, config_str 

-

171 

-

172 @staticmethod 

-

173 def organise_output(output_source: Path, output_target: Path = None) -> None | str: 

-

174 """Retrieves configurations from SMAC files and places them in output.""" 

-

175 call_key = SMAC2.target_algorithm 

-

176 # Last line describing a call is the best found configuration 

-

177 for line in reversed(output_source.open("r").readlines()): 

-

178 if call_key in line: 

-

179 call_str = line.split(call_key, maxsplit=1)[1].strip() 

-

180 # The Configuration appears after the first 6 arguments 

-

181 configuration = call_str.split(" ", 7)[-1] 

-

182 if output_target is None: 

-

183 return configuration 

-

184 with output_target.open("a") as fout: 

-

185 fcntl.flock(fout.fileno(), fcntl.LOCK_EX) 

-

186 fout.write(configuration + "\n") 

-

187 break 

-

188 

-

189 def set_scenario_dirs(self: Configurator, 

-

190 solver: Solver, instance_set: InstanceSet) -> None: 

-

191 """Patching method to allow the rebuilding of configuratio scenario.""" 

-

192 self.scenario = self.scenario_class(solver, instance_set) 

-

193 self.scenario._set_paths(self.output_path) 

-

194 

-

195 @staticmethod 

-

196 def get_smac_run_obj(objective: SparkleObjective) -> str: 

-

197 """Return the SMAC run objective based on the Performance Measure. 

-

198 

-

199 Returns: 

-

200 A string that represents the run objective set in the settings. 

-

201 """ 

-

202 if objective.time: 

-

203 return "RUNTIME" 

-

204 return "QUALITY" 

-

205 

-

206 def get_status_from_logs(self: SMAC2) -> None: 

-

207 """Method to scan the log files of the configurator for warnings.""" 

-

208 base_dir = self.output_path / "scenarios" 

-

209 if not base_dir.exists(): 

-

210 return 

-

211 print(f"Checking the log files of configurator {type(self).__name__} for " 

-

212 "warnings...") 

-

213 scenarios = [f for f in base_dir.iterdir() if f.is_dir()] 

-

214 for scenario in scenarios: 

-

215 log_dir = scenario / "outdir_train_configuration" \ 

-

216 / (scenario.name + "_scenario") 

-

217 warn_files = glob.glob(str(log_dir) + "/log-warn*") 

-

218 non_empty = [log_file for log_file in warn_files 

-

219 if Path(log_file).stat().st_size > 0] 

-

220 if len(non_empty) > 0: 

-

221 print(f"Scenario {scenario.name} has {len(non_empty)} warning(s), see " 

-

222 "the following log file(s) for more information:") 

-

223 for log_file in non_empty: 

-

224 print(f"\t-{log_file}") 

-

225 else: 

-

226 print(f"Scenario {scenario.name} has no warnings.") 

-

227 

-

228 

-

229class SMAC2Scenario(ConfigurationScenario): 

-

230 """Class to handle SMAC2 configuration scenarios.""" 

-

231 def __init__(self: ConfigurationScenario, solver: Solver, 

-

232 instance_set: InstanceSet, number_of_runs: int = None, 

-

233 solver_calls: int = None, cpu_time: int = None, 

-

234 wallclock_time: int = None, cutoff_time: int = None, 

-

235 cutoff_length: int = None, 

-

236 sparkle_objectives: list[SparkleObjective] = None, 

-

237 use_features: bool = None, configurator_target: Path = None, 

-

238 feature_data_df: pd.DataFrame = None)\ 

-

239 -> None: 

-

240 """Initialize scenario paths and names. 

-

241 

-

242 Args: 

-

243 solver: Solver that should be configured. 

-

244 instance_set: Instances object for the scenario. 

-

245 number_of_runs: The number of configurator runs to perform 

-

246 for configuring the solver. 

-

247 solver_calls: The number of times the solver is called for each 

-

248 configuration run 

-

249 cpu_time: The time budget allocated for each configuration run. (cpu) 

-

250 wallclock_time: The time budget allocated for each configuration run. 

-

251 (wallclock) 

-

252 cutoff_time: The maximum time allowed for each individual run during 

-

253 configuration. 

-

254 cutoff_length: The maximum number of iterations allowed for each 

-

255 individual run during configuration. 

-

256 sparkle_objectives: SparkleObjectives used for each run of the configuration. 

-

257 Will be simplified to the first objective. 

-

258 use_features: Boolean indicating if features should be used. 

-

259 configurator_target: The target Python script to be called. 

-

260 This script standardises Configurator I/O for solver wrappers. 

-

261 feature_data_df: If features are used, this contains the feature data. 

-

262 Defaults to None. 

-

263 """ 

-

264 super().__init__(solver, instance_set, sparkle_objectives) 

-

265 self.solver = solver 

-

266 self.instance_set = instance_set 

-

267 self.name = f"{self.solver.name}_{self.instance_set.name}" 

-

268 self.sparkle_objective = sparkle_objectives[0] if sparkle_objectives else None 

-

269 

-

270 self.number_of_runs = number_of_runs 

-

271 self.solver_calls = solver_calls 

-

272 self.cpu_time = cpu_time 

-

273 self.wallclock_time = wallclock_time 

-

274 self.cutoff_time = cutoff_time 

-

275 self.cutoff_length = cutoff_length 

-

276 self.use_features = use_features 

-

277 self.configurator_target = configurator_target 

-

278 self.feature_data = feature_data_df 

-

279 

-

280 self.parent_directory = Path() 

-

281 self.directory = Path() 

-

282 self.result_directory = Path() 

-

283 self.scenario_file_path = Path() 

-

284 self.feature_file_path = Path() 

-

285 self.instance_file_path = Path() 

-

286 

-

287 def create_scenario(self: ConfigurationScenario, parent_directory: Path) -> None: 

-

288 """Create scenario with solver and instances in the parent directory. 

-

289 

-

290 This prepares all the necessary subdirectories related to configuration. 

-

291 

-

292 Args: 

-

293 parent_directory: Directory in which the scenario should be created. 

-

294 """ 

-

295 self._set_paths(parent_directory) 

-

296 self._prepare_scenario_directory() 

-

297 self._prepare_result_directory() 

-

298 self._prepare_instances() 

-

299 

-

300 if self.use_features: 

-

301 self._create_feature_file() 

-

302 

-

303 self._create_scenario_file() 

-

304 

-

305 def _set_paths(self: ConfigurationScenario, parent_directory: Path) -> None: 

-

306 """Set the paths for the scenario based on the specified parent directory.""" 

-

307 self.parent_directory = parent_directory 

-

308 self.directory = self.parent_directory / "scenarios" / self.name 

-

309 self.result_directory = self.directory / "results" 

-

310 self.instance_file_path = self.directory / f"{self.instance_set.name}.txt" 

-

311 self.outdir_train = self.directory / "outdir_train_configuration" 

-

312 self.tmp = self.directory / "tmp" 

-

313 self.validation = self.directory / "validation" 

-

314 

-

315 def _prepare_scenario_directory(self: ConfigurationScenario) -> None: 

-

316 """Delete old scenario dir, recreate it, create empty dirs inside.""" 

-

317 shutil.rmtree(self.directory, ignore_errors=True) 

-

318 self.directory.mkdir(parents=True) 

-

319 

-

320 # Create empty directories as needed 

-

321 self.outdir_train.mkdir() 

-

322 self.tmp.mkdir() 

-

323 

-

324 def _prepare_result_directory(self: ConfigurationScenario) -> None: 

-

325 """Delete possible files in result directory.""" 

-

326 shutil.rmtree(self.result_directory, ignore_errors=True) 

-

327 self.result_directory.mkdir(parents=True) 

-

328 

-

329 def _create_scenario_file(self: ConfigurationScenario) -> None: 

-

330 """Create a file with the configuration scenario. 

-

331 

-

332 Writes supplementary information to the target algorithm (algo =) as: 

-

333 algo = {configurator_target} {solver_directory} {sparkle_objective} 

-

334 """ 

-

335 self.scenario_file_path = self.directory / f"{self.name}_scenario.txt" 

-

336 with self.scenario_file_path.open("w") as file: 

-

337 file.write(f"algo = {self.configurator_target.absolute()} " 

-

338 f"{self.solver.directory.absolute()} {self.sparkle_objective} \n" 

-

339 f"execdir = {self.tmp.absolute()}/\n" 

-

340 f"deterministic = {1 if self.solver.deterministic else 0}\n" 

-

341 f"run_obj = {self._get_performance_measure()}\n" 

-

342 f"cutoffTime = {self.cutoff_time}\n" 

-

343 f"cutoff_length = {self.cutoff_length}\n" 

-

344 f"paramfile = {self.solver.get_pcs_file()}\n" 

-

345 f"outdir = {self.outdir_train.absolute()}\n" 

-

346 f"instance_file = {self.instance_file_path.absolute()}\n" 

-

347 f"test_instance_file = {self.instance_file_path.absolute()}\n") 

-

348 if self.use_features: 

-

349 file.write(f"feature_file = {self.feature_file_path}\n") 

-

350 if self.wallclock_time is not None: 

-

351 file.write(f"wallclock-limit = {self.wallclock_time}\n") 

-

352 if self.cpu_time is not None: 

-

353 file.write(f"cputime-limit = {self.cpu_time}\n") 

-

354 if self.solver_calls is not None: 

-

355 file.write(f"runcount-limit = {self.solver_calls}\n") 

-

356 # We don't let SMAC do the validation 

-

357 file.write("validation = false" + "\n") 

-

358 

-

359 def _prepare_instances(self: ConfigurationScenario) -> None: 

-

360 """Create instance list file without instance specifics.""" 

-

361 self.instance_file_path.parent.mkdir(exist_ok=True, parents=True) 

-

362 with self.instance_file_path.open("w+") as file: 

-

363 for instance_path in self.instance_set._instance_paths: 

-

364 file.write(f"{instance_path.absolute()}\n") 

-

365 

-

366 def _get_performance_measure(self: ConfigurationScenario) -> str: 

-

367 """Retrieve the performance measure of the SparkleObjective. 

-

368 

-

369 Returns: 

-

370 Performance measure of the sparkle objective 

-

371 """ 

-

372 if self.sparkle_objective.time: 

-

373 return "RUNTIME" 

-

374 return "QUALITY" 

-

375 

-

376 def _create_feature_file(self: ConfigurationScenario) -> None: 

-

377 """Create CSV file from feature data.""" 

-

378 self.feature_file_path = Path(self.directory 

-

379 / f"{self.instance_set.name}_features.csv") 

-

380 self.feature_data.to_csv(self.directory 

-

381 / self.feature_file_path, index_label="INSTANCE_NAME") 

-

382 

-

383 def _clean_up_scenario_dirs(self: ConfigurationScenario, 

-

384 configurator_path: Path,) -> list[Path]: 

-

385 """Yield directories to clean up after configuration scenario is done. 

-

386 

-

387 Returns: 

-

388 list[str]: Full paths to directories that can be removed 

-

389 """ 

-

390 result = [] 

-

391 configurator_solver_path = configurator_path / "scenarios"\ 

-

392 / f"{self.solver.name}_{self.instance_set.name}" 

-

393 

-

394 for index in range(self.number_of_runs): 

-

395 dir = configurator_solver_path / str(index) 

-

396 result.append(dir) 

-

397 return result 

-

398 

-

399 @staticmethod 

-

400 def from_file(scenario_file: Path, solver: Solver, instance_set: InstanceSet, 

-

401 ) -> ConfigurationScenario: 

-

402 """Reads scenario file and initalises ConfigurationScenario.""" 

-

403 config = {} 

-

404 with scenario_file.open() as file: 

-

405 for line in file: 

-

406 key, value = line.strip().split(" = ") 

-

407 config[key] = value 

-

408 

-

409 # Collect relevant settings 

-

410 cpu_time = int(config["cpu_time"]) if "cpu_time" in config else None 

-

411 wallclock_limit = int(config["wallclock-limit"]) if "wallclock-limit" in config \ 

-

412 else None 

-

413 solver_calls = int(config["runcount-limit"]) if "runcount-limit" in config \ 

-

414 else None 

-

415 use_features = bool(config["feature_file"]) if "feature_file" in config \ 

-

416 else None 

-

417 

-

418 objective_str = config["algo"].split(" ")[-1] 

-

419 objective = SparkleObjective(objective_str) 

-

420 results_folder = scenario_file.parent / "results" 

-

421 state_run_dirs = [p for p in results_folder.iterdir() if p.is_file()] 

-

422 number_of_runs = len(state_run_dirs) 

-

423 return SMAC2Scenario(solver, 

-

424 instance_set, 

-

425 number_of_runs, 

-

426 solver_calls, 

-

427 cpu_time, 

-

428 wallclock_limit, 

-

429 int(config["cutoffTime"]), 

-

430 config["cutoff_length"], 

-

431 [objective], 

-

432 use_features) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_30633e1e9b8b76b1___init___py.html b/Documentation/source/_static/coverage/z_30633e1e9b8b76b1___init___py.html deleted file mode 100644 index 33a3e2deb..000000000 --- a/Documentation/source/_static/coverage/z_30633e1e9b8b76b1___init___py.html +++ /dev/null @@ -1,99 +0,0 @@ - - - - - Coverage for sparkle/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/__init__.py: - 100% -

- -

- 1 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Init file for sparkle.""" 

-

2import sparkle.about as about 

-
- - - diff --git a/Documentation/source/_static/coverage/z_30633e1e9b8b76b1_about_py.html b/Documentation/source/_static/coverage/z_30633e1e9b8b76b1_about_py.html deleted file mode 100644 index abf847bd0..000000000 --- a/Documentation/source/_static/coverage/z_30633e1e9b8b76b1_about_py.html +++ /dev/null @@ -1,113 +0,0 @@ - - - - - Coverage for sparkle/about.py: 100% - - - - - -
-
-

- Coverage for sparkle/about.py: - 100% -

- -

- 7 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Helper module for information about Sparkle.""" 

-

2 

-

3name = "Sparkle" 

-

4version = "0.8.8" 

-

5description = "Platform for evaluating empirical algorithms/solvers" 

-

6licence = "MIT" 

-

7authors = ["Koen van der Blom", 

-

8 "Jeremie Gobeil", 

-

9 "Holger H. Hoos", 

-

10 "Chuan Luo", 

-

11 "Jeroen Rook", 

-

12 "Thijs Snelleman", 

-

13 ] 

-

14contact = "sparkle@aim.rwth-aachen.de" 

-

15 

-

16about_str = f"{name}-{version}" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_468929ca4d63ab61___init___py.html b/Documentation/source/_static/coverage/z_468929ca4d63ab61___init___py.html deleted file mode 100644 index cb543a4ea..000000000 --- a/Documentation/source/_static/coverage/z_468929ca4d63ab61___init___py.html +++ /dev/null @@ -1,98 +0,0 @@ - - - - - Coverage for sparkle/platform/output/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/platform/output/__init__.py: - 100% -

- -

- 0 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Initialising for the output module.""" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_468929ca4d63ab61_configuration_output_py.html b/Documentation/source/_static/coverage/z_468929ca4d63ab61_configuration_output_py.html deleted file mode 100644 index 8d151551e..000000000 --- a/Documentation/source/_static/coverage/z_468929ca4d63ab61_configuration_output_py.html +++ /dev/null @@ -1,274 +0,0 @@ - - - - - Coverage for sparkle/platform/output/configuration_output.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/output/configuration_output.py: - 0% -

- -

- 72 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle class to organise configuration output.""" 

-

3 

-

4from __future__ import annotations 

-

5 

-

6from sparkle.platform import \ 

-

7 generate_report_for_configuration as sgrfch 

-

8from sparkle.solver import Solver 

-

9from sparkle.instance import InstanceSet 

-

10from sparkle.configurator.configurator import Configurator, ConfigurationScenario 

-

11from sparkle.solver.validator import Validator 

-

12from sparkle.platform.output.structures import ValidationResults, ConfigurationResults 

-

13from sparkle.types import SolverStatus 

-

14 

-

15import json 

-

16from pathlib import Path 

-

17 

-

18 

-

19class ConfigurationOutput: 

-

20 """Class that collects configuration data and outputs it a JSON format.""" 

-

21 

-

22 def __init__(self: ConfigurationOutput, path: Path, solver: Solver, 

-

23 configurator: Configurator, instance_set_train: InstanceSet, 

-

24 instance_set_test: InstanceSet, output: Path) -> None: 

-

25 """Initialize Configurator Output class. 

-

26 

-

27 Args: 

-

28 path: Path to configuration output directory 

-

29 solver: Solver object 

-

30 configurator: The configurator that was used 

-

31 instance_set_train: Instance set used for training 

-

32 instance_set_test: Instance set used for testing 

-

33 output: Path to the output directory 

-

34 """ 

-

35 self.solver = solver 

-

36 self.configurator = configurator 

-

37 self.instance_set_train = instance_set_train 

-

38 self.instance_set_test = instance_set_test 

-

39 self.directory = path 

-

40 self.output = output / "configuration.json" if not output.is_file() else output 

-

41 

-

42 solver_dir_name = path.name 

-

43 scenario_file = path / f"{solver_dir_name}_scenario.txt" 

-

44 if not scenario_file.is_file(): 

-

45 raise Exception("Can't find scenario file") 

-

46 

-

47 # Sets scenario on configurator object 

-

48 self.configurator.scenario = \ 

-

49 configurator.scenario_class.from_file(scenario_file, self.solver, 

-

50 self.instance_set_train) 

-

51 self.configurator.scenario._set_paths(self.configurator.output_path) 

-

52 

-

53 # Retrieve all configurations 

-

54 config_path = path / "validation" / "configurations.csv" 

-

55 self.configurations = self.get_configurations(config_path) 

-

56 

-

57 # Retrieve best found configuration 

-

58 objective = self.configurator.scenario.sparkle_objective 

-

59 _, self.best_config = self.configurator.get_optimal_configuration( 

-

60 self.solver, self.instance_set_train, objective) 

-

61 

-

62 # Retrieves validation results for all configurations 

-

63 self.validation_results = [] 

-

64 for config in self.configurations: 

-

65 val_res = self.get_validation_data(self.instance_set_train, 

-

66 config) 

-

67 self.validation_results.append(val_res) 

-

68 

-

69 # Retrieve test validation results if they exist 

-

70 if self.instance_set_test is not None: 

-

71 self.validation_results_test = [] 

-

72 for config in self.configurations: 

-

73 val_res = self.get_validation_data(self.instance_set_test, 

-

74 config) 

-

75 self.validation_results_test.append(val_res) 

-

76 

-

77 def get_configurations(self: ConfigurationOutput, config_path: Path) -> list[dict]: 

-

78 """Read all configurations and transform them to dictionaries.""" 

-

79 configs = [] 

-

80 # Check if the path exists and is a file 

-

81 if config_path.exists() and config_path.is_file(): 

-

82 with config_path.open("r") as file: 

-

83 for line in file: 

-

84 config = Solver.config_str_to_dict(line.strip()) 

-

85 if config not in configs: 

-

86 configs.append(config) 

-

87 return configs 

-

88 

-

89 def get_validation_data(self: ConfigurationOutput, instance_set: InstanceSet, 

-

90 config: dict) -> ConfigurationResults: 

-

91 """Returns best config and ConfigurationResults for instance set.""" 

-

92 objective = self.configurator.scenario.sparkle_objective 

-

93 

-

94 # Retrieve found configuration 

-

95 _, best_config = self.configurator.get_optimal_configuration( 

-

96 self.solver, instance_set, objective) 

-

97 

-

98 # Retrieve validation results 

-

99 validator = Validator(self.directory) 

-

100 val_results = validator.get_validation_results( 

-

101 self.solver, instance_set, config=best_config, 

-

102 source_dir=self.directory, subdir="validation") 

-

103 header = val_results[0] 

-

104 results = [] 

-

105 value_column = header.index(objective.name) 

-

106 instance_column = header.index("Instance") 

-

107 status_column = header.index("Status") 

-

108 cpu_time_column = header.index("CPU Time") 

-

109 wall_time_column = header.index("Wallclock Time") 

-

110 for res in val_results[1:]: 

-

111 results.append([res[instance_column], SolverStatus(res[status_column]), 

-

112 res[value_column], res[cpu_time_column], 

-

113 res[wall_time_column]]) 

-

114 final_results = ValidationResults(self.solver, config, 

-

115 instance_set, results) 

-

116 perf_par = sgrfch.get_average_performance(val_results, 

-

117 objective) 

-

118 return ConfigurationResults(perf_par, 

-

119 final_results) 

-

120 

-

121 def serialize_configuration_results(self: ConfigurationOutput, 

-

122 cr: ConfigurationResults) -> dict: 

-

123 """Transform ConfigurationResults to dictionary format.""" 

-

124 return { 

-

125 "performance": cr.performance, 

-

126 "results": { 

-

127 "solver": cr.results.solver.name, 

-

128 "configuration": cr.results.configuration, 

-

129 "instance_set": cr.results.instance_set.name, 

-

130 "result_header": cr.results.result_header, 

-

131 "result_values": cr.results.result_vals, 

-

132 }, 

-

133 } 

-

134 

-

135 def serialize_scenario(self: ConfigurationOutput, 

-

136 scenario: ConfigurationScenario) -> dict: 

-

137 """Transform ConfigurationScenario to dictionary format.""" 

-

138 return { 

-

139 "number_of_runs": scenario.number_of_runs, 

-

140 "solver_calls": scenario.solver_calls, 

-

141 "cpu_time": scenario.cpu_time, 

-

142 "wallclock_time": scenario.wallclock_time, 

-

143 "cutoff_time": scenario.cutoff_time, 

-

144 "cutoff_length": scenario.cutoff_length, 

-

145 "sparkle_objective": scenario.sparkle_objective.name, 

-

146 "use_features": scenario.use_features, 

-

147 "configurator_target": scenario.configurator_target, 

-

148 "feature_data": scenario.feature_data, 

-

149 } 

-

150 

-

151 def write_output(self: ConfigurationOutput) -> None: 

-

152 """Write data into a JSON file.""" 

-

153 output_data = { 

-

154 "solver": self.solver.name if self.solver else None, 

-

155 "configurator": ( 

-

156 str(self.configurator.executable_path) if self.configurator else None 

-

157 ), 

-

158 "best_configuration": Solver.config_str_to_dict(self.best_config), 

-

159 "configurations": self.configurations, 

-

160 "scenario": self.serialize_scenario(self.configurator.scenario) 

-

161 if self.configurator.scenario else None, 

-

162 "training_results": [ 

-

163 self.serialize_configuration_results(validation_result) 

-

164 for validation_result in self.validation_results 

-

165 ], 

-

166 "test_set": ( 

-

167 [ 

-

168 self.serialize_configuration_results(validation_result) 

-

169 for validation_result in self.validation_results_test 

-

170 ] 

-

171 if self.instance_set_test else None 

-

172 ), 

-

173 } 

-

174 

-

175 self.output.parent.mkdir(parents=True, exist_ok=True) 

-

176 with self.output.open("w") as f: 

-

177 json.dump(output_data, f, indent=4) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_468929ca4d63ab61_parallel_portfolio_output_py.html b/Documentation/source/_static/coverage/z_468929ca4d63ab61_parallel_portfolio_output_py.html deleted file mode 100644 index 9448b88f2..000000000 --- a/Documentation/source/_static/coverage/z_468929ca4d63ab61_parallel_portfolio_output_py.html +++ /dev/null @@ -1,218 +0,0 @@ - - - - - Coverage for sparkle/platform/output/parallel_portfolio_output.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/output/parallel_portfolio_output.py: - 0% -

- -

- 47 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle class to organise configuration output.""" 

-

3 

-

4from __future__ import annotations 

-

5 

-

6from sparkle.platform import generate_report_for_parallel_portfolio as sgrfpp 

-

7from sparkle.instance import InstanceSet 

-

8from sparkle.platform.output.structures import ParallelPortfolioResults 

-

9from sparkle.types import SparkleObjective 

-

10 

-

11import json 

-

12from pathlib import Path 

-

13import csv 

-

14 

-

15 

-

16class ParallelPortfolioOutput: 

-

17 """Class that collects parallel portfolio data and outputs it a JSON format.""" 

-

18 

-

19 def __init__(self: ParallelPortfolioOutput, parallel_portfolio_path: Path, 

-

20 instance_set: InstanceSet, 

-

21 objective: SparkleObjective, 

-

22 output: Path) -> None: 

-

23 """Initialize ParallelPortfolioOutput class. 

-

24 

-

25 Args: 

-

26 parallel_portfolio_path: Path to parallel portfolio output directory 

-

27 instance_set: List of instances 

-

28 objective: The objective of the portfolio 

-

29 output: Path to the output directory 

-

30 """ 

-

31 if not output.is_file(): 

-

32 self.output = output / "parallel_portfolio.json" 

-

33 else: 

-

34 self.output = output 

-

35 

-

36 self.instance_set = instance_set 

-

37 csv_data = [line for line in 

-

38 csv.reader((parallel_portfolio_path / "results.csv").open("r"))] 

-

39 header = csv_data[0] 

-

40 csv_data = csv_data[1:] 

-

41 solver_column = header.index("Solver") 

-

42 instance_column = header.index("Instance") 

-

43 status_column = header.index("status") 

-

44 objective_column = header.index(objective.name) 

-

45 self.solver_list = list(set([line[solver_column] for line in csv_data])) 

-

46 

-

47 # Collect solver performance for each instance 

-

48 instance_results = {name: [] for name in instance_set._instance_names} 

-

49 for row in csv_data: 

-

50 if row[instance_column] in instance_results.keys(): 

-

51 instance_results[row[instance_column]].append( 

-

52 [row[solver_column], row[status_column], row[objective_column]]) 

-

53 

-

54 solvers_solutions = self.get_solver_solutions(self.solver_list, csv_data) 

-

55 unsolved_instances = self.instance_set.size - sum([solvers_solutions[key] 

-

56 for key in solvers_solutions]) 

-

57 # sbs_runtime is redundant, the same information is available in instance_results 

-

58 _, sbs, runtime_all_solvers, _ =\ 

-

59 sgrfpp.get_portfolio_metrics(self.solver_list, 

-

60 instance_set, 

-

61 instance_results, 

-

62 objective) 

-

63 

-

64 self.results = ParallelPortfolioResults(unsolved_instances, 

-

65 sbs, runtime_all_solvers, 

-

66 instance_results) 

-

67 

-

68 def get_solver_solutions(self: ParallelPortfolioOutput, 

-

69 solver_list: list[str], 

-

70 csv_data: list[list[str]]) -> dict: 

-

71 """Return dictionary with solution count for each solver.""" 

-

72 # Default initalisation, increase solution counter for each successful evaluation 

-

73 solvers_solutions = {solver: 0 for solver in solver_list} 

-

74 instance_names_copy = self.instance_set._instance_names.copy() 

-

75 

-

76 for line in csv_data: 

-

77 if line[0] in instance_names_copy and line[2].lower() == "success": 

-

78 solvers_solutions[line[1]] += 1 

-

79 instance_names_copy.remove(line[0]) 

-

80 

-

81 return solvers_solutions 

-

82 

-

83 def serialize_instances(self: ParallelPortfolioOutput, 

-

84 instances: list[InstanceSet]) -> dict: 

-

85 """Transform Instances to dictionary format.""" 

-

86 # Even though parallel portfolio currently doesn't support multi sets, 

-

87 # this function is already ready to support mutliple sets 

-

88 return { 

-

89 "number_of_instance_sets": len(instances), 

-

90 "instance_sets": [ 

-

91 { 

-

92 "name": instance.name, 

-

93 "number_of_instances": instance.size 

-

94 } 

-

95 for instance in instances 

-

96 ] 

-

97 } 

-

98 

-

99 def serialize_results(self: ParallelPortfolioOutput, 

-

100 pr: ParallelPortfolioResults) -> dict: 

-

101 """Transform results to dictionary format.""" 

-

102 return { 

-

103 "sbs": pr.sbs, 

-

104 "unsolved_instances": pr.unsolved_instances, 

-

105 "runtime_solvers": pr.runtime_solvers, 

-

106 "solvers_performance": pr.solver_performance, 

-

107 "instance_results": pr.instance_results, 

-

108 } 

-

109 

-

110 def write_output(self: ParallelPortfolioOutput) -> None: 

-

111 """Write data into a JSON file.""" 

-

112 output_data = { 

-

113 "number_of_solvers": len(self.solver_list), 

-

114 "solvers": self.solver_list, 

-

115 "instances": self.serialize_instances([self.instance_set]), 

-

116 "results": self.serialize_results(self.results), 

-

117 } 

-

118 

-

119 self.output.parent.mkdir(parents=True, exist_ok=True) 

-

120 with self.output.open("w") as f: 

-

121 json.dump(output_data, f, indent=4) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_468929ca4d63ab61_selection_output_py.html b/Documentation/source/_static/coverage/z_468929ca4d63ab61_selection_output_py.html deleted file mode 100644 index ac74aaebf..000000000 --- a/Documentation/source/_static/coverage/z_468929ca4d63ab61_selection_output_py.html +++ /dev/null @@ -1,256 +0,0 @@ - - - - - Coverage for sparkle/platform/output/selection_output.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/output/selection_output.py: - 0% -

- -

- 45 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle class to organise configuration output.""" 

-

3 

-

4from __future__ import annotations 

-

5 

-

6from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

7from sparkle.platform import generate_report_for_selection as sgfs 

-

8from sparkle.types.objective import SparkleObjective 

-

9from sparkle.instance import InstanceSet 

-

10from sparkle.platform.output.structures import SelectionPerformance, SelectionSolverData 

-

11 

-

12import json 

-

13from pathlib import Path 

-

14 

-

15 

-

16class SelectionOutput: 

-

17 """Class that collects selection data and outputs it a JSON format.""" 

-

18 

-

19 def __init__(self: SelectionOutput, selection_scenario: Path, 

-

20 train_data: PerformanceDataFrame, 

-

21 feature_data: FeatureDataFrame, 

-

22 training_instances: list[InstanceSet], 

-

23 test_instances: list[InstanceSet], 

-

24 objective: SparkleObjective, 

-

25 cutoff_time: int, 

-

26 output: Path) -> None: 

-

27 """Initialize SelectionOutput class. 

-

28 

-

29 Args: 

-

30 selection_scenario: Path to selection output directory 

-

31 train_data: The performance input data for the selector 

-

32 feature_data: Feature data created by extractor 

-

33 training_instances: The set of training instances 

-

34 test_instances: The set of test instances 

-

35 objective: The objective of the selector 

-

36 cutoff_time: The cutoff time 

-

37 penalised_time: The penalised time 

-

38 output: Path to the output directory 

-

39 """ 

-

40 if not output.is_file(): 

-

41 self.output = output / "selection.json" 

-

42 else: 

-

43 self.output = output 

-

44 if test_instances is not None and not isinstance(test_instances, list): 

-

45 test_instances = [test_instances] 

-

46 

-

47 self.training_instances = training_instances 

-

48 self.test_instances = test_instances 

-

49 self.cutoff_time = cutoff_time 

-

50 

-

51 self.objective = objective 

-

52 self.solver_data = self.get_solver_data(train_data, self.objective) 

-

53 # Collect marginal contribution data 

-

54 self.marginal_contribution_perfect = train_data.marginal_contribution(objective, 

-

55 sort=True) 

-

56 self.marginal_contribution_actual = \ 

-

57 sgfs.compute_selector_marginal_contribution(train_data, 

-

58 feature_data, 

-

59 selection_scenario, 

-

60 objective) 

-

61 

-

62 # Collect performance data 

-

63 portfolio_selector_performance_path = selection_scenario / "performance.csv" 

-

64 vbs_performance = objective.instance_aggregator( 

-

65 train_data.best_instance_performance(objective=objective.name)) 

-

66 self.performance_data = SelectionPerformance( 

-

67 portfolio_selector_performance_path, vbs_performance, self.objective) 

-

68 

-

69 def get_solver_data(self: SelectionOutput, 

-

70 train_data: PerformanceDataFrame, 

-

71 objective: SparkleObjective) -> SelectionSolverData: 

-

72 """Initalise SelectionSolverData object.""" 

-

73 solver_performance_ranking = train_data.get_solver_ranking(objective=objective) 

-

74 num_solvers = train_data.num_solvers 

-

75 return SelectionSolverData(solver_performance_ranking, 

-

76 num_solvers) 

-

77 

-

78 def serialize_solvers(self: SelectionOutput, 

-

79 sd: SelectionSolverData) -> dict: 

-

80 """Transform SelectionSolverData to dictionary format.""" 

-

81 return { 

-

82 "number_of_solvers": sd.num_solvers, 

-

83 "single_best_solver": sd.single_best_solver, 

-

84 "solver_ranking": [ 

-

85 { 

-

86 "solver_name": solver[0], 

-

87 "performance": solver[1] 

-

88 } 

-

89 for solver in sd.solver_performance_ranking 

-

90 ] 

-

91 } 

-

92 

-

93 def serialize_performance(self: SelectionOutput, 

-

94 sp: SelectionPerformance) -> dict: 

-

95 """Transform SelectionPerformance to dictionary format.""" 

-

96 return { 

-

97 "vbs_performance": sp.vbs_performance, 

-

98 "actual_performance": sp.actual_performance, 

-

99 "objective": self.objective.name, 

-

100 "metric": sp.metric 

-

101 } 

-

102 

-

103 def serialize_instances(self: SelectionOutput, 

-

104 instances: list[InstanceSet]) -> dict: 

-

105 """Transform Instances to dictionary format.""" 

-

106 return { 

-

107 "number_of_instance_sets": len(instances), 

-

108 "instance_sets": [ 

-

109 { 

-

110 "name": instance.name, 

-

111 "number_of_instances": instance.size 

-

112 } 

-

113 for instance in instances 

-

114 ] 

-

115 } 

-

116 

-

117 def serialize_contribution(self: SelectionOutput) -> dict: 

-

118 """Transform marginal contribution ranking to dictionary format.""" 

-

119 return { 

-

120 "marginal_contribution_actual": [ 

-

121 { 

-

122 "solver_name": ranking[0], 

-

123 "marginal_contribution": ranking[1], 

-

124 "best_performance": ranking[2] 

-

125 } 

-

126 for ranking in self.marginal_contribution_actual 

-

127 ], 

-

128 "marginal_contribution_perfect": [ 

-

129 { 

-

130 "solver_name": ranking[0], 

-

131 "marginal_contribution": ranking[1], 

-

132 "best_performance": ranking[2] 

-

133 } 

-

134 for ranking in self.marginal_contribution_perfect 

-

135 ] 

-

136 } 

-

137 

-

138 def serialize_settings(self: SelectionOutput) -> dict: 

-

139 """Transform settings to dictionary format.""" 

-

140 return { 

-

141 "cutoff_time": self.cutoff_time, 

-

142 } 

-

143 

-

144 def write_output(self: SelectionOutput) -> None: 

-

145 """Write data into a JSON file.""" 

-

146 test_data = self.serialize_instances(self.test_instances) if self.test_instances\ 

-

147 else None 

-

148 output_data = { 

-

149 "solvers": self.serialize_solvers(self.solver_data), 

-

150 "training_instances": self.serialize_instances(self.training_instances), 

-

151 "test_instances": test_data, 

-

152 "performance": self.serialize_performance(self.performance_data), 

-

153 "settings": self.serialize_settings(), 

-

154 "marginal_contribution": self.serialize_contribution() 

-

155 } 

-

156 

-

157 self.output.parent.mkdir(parents=True, exist_ok=True) 

-

158 with self.output.open("w") as f: 

-

159 json.dump(output_data, f, indent=4) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_468929ca4d63ab61_structures_py.html b/Documentation/source/_static/coverage/z_468929ca4d63ab61_structures_py.html deleted file mode 100644 index d46f675b9..000000000 --- a/Documentation/source/_static/coverage/z_468929ca4d63ab61_structures_py.html +++ /dev/null @@ -1,218 +0,0 @@ - - - - - Coverage for sparkle/platform/output/structures.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/output/structures.py: - 0% -

- -

- 47 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle output structures.""" 

-

3from __future__ import annotations 

-

4import sys 

-

5from pathlib import Path 

-

6 

-

7from runrunner.base import Status 

-

8 

-

9from sparkle.solver import Solver 

-

10from sparkle.types import SolverStatus, SparkleObjective 

-

11from sparkle.instance import InstanceSet 

-

12from sparkle.structures import PerformanceDataFrame 

-

13 

-

14 

-

15class ValidationResults: 

-

16 """Class that stores validation information and results.""" 

-

17 def __init__(self: ValidationResults, solver: Solver, 

-

18 configuration: dict, instance_set: InstanceSet, 

-

19 results: list[list[str, Status, float, float]]) -> None: 

-

20 """Initalize ValidationResults. 

-

21 

-

22 Args: 

-

23 solver: The name of the solver 

-

24 configuration: The configuration being used 

-

25 instance_set: The set of instances 

-

26 results: Validation results in the format: 

-

27 [["instance", "status", "quality", "runtime"]] 

-

28 """ 

-

29 self.solver = solver 

-

30 self.configuration = configuration 

-

31 self.instance_set = instance_set 

-

32 self.result_header = ["instance", "status", "quality", "runtime"] 

-

33 self.result_vals = results 

-

34 

-

35 

-

36class ConfigurationResults: 

-

37 """Class that aggregates configuration results.""" 

-

38 def __init__(self: ConfigurationResults, metrics: float, 

-

39 results: ValidationResults) -> None: 

-

40 """Initalize ConfigurationResults. 

-

41 

-

42 Args: 

-

43 metrics: The performance of a configured solver 

-

44 results: The results for one configuration 

-

45 """ 

-

46 self.performance = metrics 

-

47 self.results = results 

-

48 

-

49 

-

50class SelectionSolverData: 

-

51 """Class that stores solver information.""" 

-

52 def __init__(self: SelectionSolverData, 

-

53 solver_performance_ranking: list[tuple[str, float]], 

-

54 num_solvers: int) -> None: 

-

55 """Initalize SelectionSolverData. 

-

56 

-

57 Args: 

-

58 solver_performance_ranking: list with solvers ranked by avg. performance 

-

59 num_solvers: The number of solvers 

-

60 """ 

-

61 self.solver_performance_ranking = solver_performance_ranking 

-

62 self.single_best_solver = solver_performance_ranking[0][0] 

-

63 self.num_solvers = num_solvers 

-

64 

-

65 

-

66class SelectionPerformance: 

-

67 """Class that stores selection performance results.""" 

-

68 def __init__(self: SelectionSolverData, 

-

69 performance_path: Path, 

-

70 vbs_performance: float, 

-

71 objective: SparkleObjective) -> None: 

-

72 """Initalize SelectionPerformance. 

-

73 

-

74 Args: 

-

75 performance_path: Path to portfolio selector performance 

-

76 vbs_performance: The performance of the virtual best selector 

-

77 objective: The objective (Performance type) 

-

78 """ 

-

79 if not performance_path.exists(): 

-

80 print(f"ERROR: {performance_path} does not exist.") 

-

81 sys.exit(-1) 

-

82 actual_performance_data = PerformanceDataFrame(performance_path) 

-

83 self.vbs_performance = vbs_performance 

-

84 self.actual_performance = actual_performance_data.mean( 

-

85 objective=objective.name) 

-

86 self.metric = objective.name 

-

87 

-

88 

-

89class ParallelPortfolioResults: 

-

90 """Class that stores parallel portfolio results.""" 

-

91 def __init__(self: ParallelPortfolioResults, 

-

92 unsolved_instances: int, 

-

93 sbs: str, 

-

94 runtime_solvers: dict[str, float], 

-

95 instance_results: dict[str, list]) -> None: 

-

96 """Initalize SelectionSolverData. 

-

97 

-

98 Args: 

-

99 unsolved_instances: Number of unsolved instances 

-

100 sbs: Name of the single best solver 

-

101 runtime_solvers: Dictionary containing penalised average runtime per solver 

-

102 instance_results: Dictionary containing 

-

103 """ 

-

104 self.unsolved_instances = unsolved_instances 

-

105 self.sbs = sbs 

-

106 self.runtime_solvers = runtime_solvers 

-

107 self.instance_results = instance_results 

-

108 

-

109 self.solver_performance = {} 

-

110 # Iterate over each instance and aggregate the results 

-

111 for _, results in self.instance_results.items(): 

-

112 for solver_result in results: 

-

113 solver_name = solver_result[0] 

-

114 outcome = solver_result[1] 

-

115 # Initialize the solver's record in solver_performance if not present 

-

116 if solver_name not in self.solver_performance: 

-

117 self.solver_performance[solver_name] = { 

-

118 status: 0 for status in SolverStatus 

-

119 } 

-

120 # Increment the appropriate outcome count 

-

121 self.solver_performance[solver_name][outcome] += 1 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a___init___py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a___init___py.html deleted file mode 100644 index 7eb8def34..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a___init___py.html +++ /dev/null @@ -1,105 +0,0 @@ - - - - - Coverage for sparkle/solver/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/solver/__init__.py: - 100% -

- -

- 5 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides solver support for Sparkle.""" 

-

2from sparkle.solver.solver import Solver 

-

3from sparkle.solver.extractor import Extractor 

-

4from sparkle.solver.selector import Selector 

-

5 

-

6from sparkle.solver.validator import Validator 

-

7from sparkle.solver.verifier import SATVerifier, SolutionVerifier 

-

8# from sparkle.solver.ablation import AblationScenario # TODO: Remove cyclic dependency 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_ablation_py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a_ablation_py.html deleted file mode 100644 index c3256d34b..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_ablation_py.html +++ /dev/null @@ -1,334 +0,0 @@ - - - - - Coverage for sparkle/solver/ablation.py: 33% - - - - - -
-
-

- Coverage for sparkle/solver/ablation.py: - 33% -

- -

- 111 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions for ablation analysis.""" 

-

4from __future__ import annotations 

-

5import re 

-

6import shutil 

-

7import decimal 

-

8from pathlib import Path 

-

9 

-

10import runrunner as rrr 

-

11from runrunner.base import Runner, Run 

-

12 

-

13from sparkle.CLI.help import global_variables as gv 

-

14from sparkle.CLI.help import logging as sl 

-

15 

-

16from sparkle.configurator.implementations import SMAC2 

-

17from sparkle.platform import CommandName 

-

18from sparkle.solver import Solver 

-

19from sparkle.instance import InstanceSet 

-

20 

-

21 

-

22class AblationScenario: 

-

23 """Class for ablation analysis.""" 

-

24 def __init__(self: AblationScenario, 

-

25 solver: Solver, 

-

26 train_set: InstanceSet, 

-

27 test_set: InstanceSet, 

-

28 output_dir: Path, 

-

29 ablation_executable: Path = None, 

-

30 ablation_validation_executable: Path = None, 

-

31 override_dirs: bool = False) -> None: 

-

32 """Initialize ablation scenario. 

-

33 

-

34 Args: 

-

35 solver: Solver object 

-

36 train_set: The training instance 

-

37 test_set: The test instance 

-

38 output_dir: The output directory 

-

39 ablation_executable: (Only for execution) The ablation executable 

-

40 ablation_validation_executable: (Only for execution) The validation exec 

-

41 override_dirs: Whether to clean the scenario directory if it already exists 

-

42 """ 

-

43 self.ablation_exec = ablation_executable 

-

44 self.ablation_validation_exec = ablation_validation_executable 

-

45 self.solver = solver 

-

46 self.train_set = train_set 

-

47 self.test_set = test_set 

-

48 self.output_dir = output_dir 

-

49 self.scenario_name = f"{self.solver.name}_{self.train_set.name}" 

-

50 if self.test_set is not None: 

-

51 self.scenario_name += f"_{self.test_set.name}" 

-

52 self.scenario_dir = self.output_dir / self.scenario_name 

-

53 if override_dirs and self.scenario_dir.exists(): 

-

54 print("Warning: found existing ablation scenario. This will be removed.") 

-

55 shutil.rmtree(self.scenario_dir) 

-

56 

-

57 # Create required scenario directories 

-

58 self.tmp_dir = self.scenario_dir / "tmp" 

-

59 self.tmp_dir.mkdir(parents=True, exist_ok=True) 

-

60 

-

61 self.validation_dir = self.scenario_dir / "validation" 

-

62 self.validation_dir_tmp = self.validation_dir / "tmp" 

-

63 self.validation_dir_tmp.mkdir(parents=True, exist_ok=True) 

-

64 self.table_file = self.validation_dir / "log" / "ablation-validation-run1234.txt" 

-

65 

-

66 def create_configuration_file(self: AblationScenario) -> None: 

-

67 """Create a configuration file for ablation analysis. 

-

68 

-

69 Args: 

-

70 solver: Solver object 

-

71 instance_train_name: The training instance 

-

72 instance_test_name: The test instance 

-

73 

-

74 Returns: 

-

75 None 

-

76 """ 

-

77 ablation_scenario_dir = self.scenario_dir 

-

78 objective = gv.settings().get_general_sparkle_objectives()[0] 

-

79 configurator = gv.settings().get_general_sparkle_configurator() 

-

80 _, opt_config_str = configurator.get_optimal_configuration( 

-

81 self.solver, self.train_set, objective=objective) 

-

82 

-

83 # We need to check which params are missing and supplement with default values 

-

84 pcs = self.solver.get_pcs() 

-

85 for p in pcs: 

-

86 if p["name"] not in opt_config_str: 

-

87 opt_config_str += f" -{p['name']} {p['default']}" 

-

88 

-

89 # Ablation cannot deal with E scientific notation in floats 

-

90 ctx = decimal.Context(prec=16) 

-

91 for config in opt_config_str.split(" -"): 

-

92 _, value = config.strip().split(" ") 

-

93 if "e" in value.lower(): 

-

94 value = value.strip("'") 

-

95 float_value = float(value.lower()) 

-

96 formatted = format(ctx.create_decimal(float_value), "f") 

-

97 opt_config_str = opt_config_str.replace(value, formatted) 

-

98 

-

99 smac_run_obj = SMAC2.get_smac_run_obj(objective) 

-

100 objective_str = "MEAN10" if smac_run_obj == "RUNTIME" else "MEAN" 

-

101 run_cutoff_time = gv.settings().get_general_target_cutoff_time() 

-

102 run_cutoff_length = gv.settings().get_configurator_target_cutoff_length() 

-

103 concurrent_clis = gv.settings().get_slurm_max_parallel_runs_per_node() 

-

104 ablation_racing = gv.settings().get_ablation_racing_flag() 

-

105 configurator = gv.settings().get_general_sparkle_configurator() 

-

106 pcs_file_path = f"{self.solver.get_pcs_file().absolute()}" # Get Solver PCS 

-

107 

-

108 # Create config file 

-

109 config_file = Path(f"{ablation_scenario_dir}/ablation_config.txt") 

-

110 config = (f'algo = "{configurator.configurator_target.absolute()} ' 

-

111 f'{self.solver.directory.absolute()} {objective}"\n' 

-

112 f"execdir = {self.tmp_dir.absolute()}\n" 

-

113 "experimentDir = ./\n" 

-

114 f"deterministic = {1 if self.solver.deterministic else 0}\n" 

-

115 f"run_obj = {smac_run_obj}\n" 

-

116 f"overall_obj = {objective_str}\n" 

-

117 f"cutoffTime = {run_cutoff_time}\n" 

-

118 f"cutoff_length = {run_cutoff_length}\n" 

-

119 f"cli-cores = {concurrent_clis}\n" 

-

120 f"useRacing = {ablation_racing}\n" 

-

121 "seed = 1234\n" 

-

122 f"paramfile = {pcs_file_path}\n" 

-

123 "instance_file = instances_train.txt\n" 

-

124 "test_instance_file = instances_test.txt\n" 

-

125 "sourceConfiguration=DEFAULT\n" 

-

126 f'targetConfiguration="{opt_config_str}"') 

-

127 config_file.open("w").write(config) 

-

128 # Write config to validation directory 

-

129 conf_valid = config.replace(f"execdir = {self.tmp_dir.absolute()}\n", 

-

130 f"execdir = {self.validation_dir_tmp.absolute()}\n") 

-

131 (self.validation_dir / config_file.name).open("w").write(conf_valid) 

-

132 

-

133 def create_instance_file(self: AblationScenario, test: bool = False) -> None: 

-

134 """Create an instance file for ablation analysis.""" 

-

135 file_suffix = "_train.txt" 

-

136 instance_set = self.train_set 

-

137 if test: 

-

138 file_suffix = "_test.txt" 

-

139 instance_set = self.test_set if self.test_set is not None else self.train_set 

-

140 # We give the Ablation script the paths of the instances 

-

141 file_instance = self.scenario_dir / f"instances{file_suffix}" 

-

142 with file_instance.open("w") as fh: 

-

143 for instance in instance_set._instance_paths: 

-

144 # We need to unpack the multi instance file paths in quotes 

-

145 if isinstance(instance, list): 

-

146 joined_instances = " ".join( 

-

147 [str(file.absolute()) for file in instance]) 

-

148 fh.write(f"{joined_instances}\n") 

-

149 else: 

-

150 fh.write(f"{instance.absolute()}\n") 

-

151 # Copy to validation directory 

-

152 shutil.copyfile(file_instance, self.validation_dir / file_instance.name) 

-

153 

-

154 def check_for_ablation(self: AblationScenario) -> bool: 

-

155 """Checks if ablation has terminated successfully.""" 

-

156 if not self.table_file.is_file(): 

-

157 return False 

-

158 # First line in the table file should be "Ablation analysis validation complete." 

-

159 table_line = self.table_file.open().readline().strip() 

-

160 return table_line == "Ablation analysis validation complete." 

-

161 

-

162 def read_ablation_table(self: AblationScenario) -> list[list[str]]: 

-

163 """Read from ablation table of a scenario.""" 

-

164 if not self.check_for_ablation(): 

-

165 # No ablation table exists for this solver-instance pair 

-

166 return [] 

-

167 results = [["Round", "Flipped parameter", "Source value", "Target value", 

-

168 "Validation result"]] 

-

169 

-

170 for line in self.table_file.open().readlines(): 

-

171 # Pre-process lines from the ablation file and add to the results dictionary. 

-

172 # Sometimes ablation rounds switch multiple parameters at once. 

-

173 # EXAMPLE: 2 EDR, EDRalpha 0, 0.1 1, 0.1013241633106732 486.31691 

-

174 # To split the row correctly, we remove the space before the comma separated 

-

175 # parameters and add it back. 

-

176 # T.S. 30-01-2024: the results object is a nested list not dictionary? 

-

177 values = re.sub(r"\s+", " ", line.strip()) 

-

178 values = re.sub(r", ", ",", values) 

-

179 values = [val.replace(",", ", ") for val in values.split(" ")] 

-

180 if len(values) == 5: 

-

181 results.append(values) 

-

182 return results 

-

183 

-

184 def submit_ablation(self: AblationScenario, 

-

185 run_on: Runner = Runner.SLURM) -> list[Run]: 

-

186 """Submit an ablation job. 

-

187 

-

188 Args: 

-

189 run_on: Determines to which RunRunner queue the job is added 

-

190 

-

191 Returns: 

-

192 A list of Run objects. Empty when running locally. 

-

193 """ 

-

194 # 1. submit the ablation to the runrunner queue 

-

195 clis = gv.settings().get_slurm_max_parallel_runs_per_node() 

-

196 cmd = f"{self.ablation_exec.absolute()} --optionFile ablation_config.txt" 

-

197 srun_options = ["-N1", "-n1", f"-c{clis}"] 

-

198 sbatch_options = [f"--cpus-per-task={clis}"] +\ 

-

199 gv.settings().get_slurm_extra_options(as_args=True) 

-

200 

-

201 run_ablation = rrr.add_to_queue( 

-

202 runner=run_on, 

-

203 cmd=cmd, 

-

204 name=CommandName.RUN_ABLATION, 

-

205 base_dir=sl.caller_log_dir, 

-

206 path=self.scenario_dir, 

-

207 sbatch_options=sbatch_options, 

-

208 srun_options=srun_options) 

-

209 

-

210 runs = [] 

-

211 if run_on == Runner.LOCAL: 

-

212 run_ablation.wait() 

-

213 runs.append(run_ablation) 

-

214 

-

215 # 2. Run ablation validation run if we have a test set to run on 

-

216 if self.test_set is not None: 

-

217 # Validation dir should have a copy of all needed files, except for the 

-

218 # output of the ablation run, which is stored in ablation-run[seed].txt 

-

219 cmd = f"{self.ablation_validation_exec.absolute()} "\ 

-

220 "--optionFile ablation_config.txt "\ 

-

221 "--ablationLogFile ../log/ablation-run1234.txt" 

-

222 

-

223 run_ablation_validation = rrr.add_to_queue( 

-

224 runner=run_on, 

-

225 cmd=cmd, 

-

226 name=CommandName.RUN_ABLATION_VALIDATION, 

-

227 path=self.validation_dir, 

-

228 base_dir=sl.caller_log_dir, 

-

229 dependencies=run_ablation, 

-

230 sbatch_options=sbatch_options, 

-

231 srun_options=srun_options) 

-

232 

-

233 if run_on == Runner.LOCAL: 

-

234 run_ablation_validation.wait() 

-

235 runs.append(run_ablation_validation) 

-

236 

-

237 return runs 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_extractor_py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a_extractor_py.html deleted file mode 100644 index 71a7b7b40..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_extractor_py.html +++ /dev/null @@ -1,247 +0,0 @@ - - - - - Coverage for sparkle/solver/extractor.py: 32% - - - - - -
-
-

- Coverage for sparkle/solver/extractor.py: - 32% -

- -

- 65 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Methods regarding feature extractors.""" 

-

2from __future__ import annotations 

-

3from pathlib import Path 

-

4import ast 

-

5import subprocess 

-

6from sparkle.types import SparkleCallable, SolverStatus 

-

7from sparkle.structures import FeatureDataFrame 

-

8from sparkle.tools.runsolver_parsing import get_status 

-

9 

-

10 

-

11class Extractor(SparkleCallable): 

-

12 """Extractor base class for extracting features from instances.""" 

-

13 wrapper = "sparkle_extractor_wrapper.py" 

-

14 

-

15 def __init__(self: Extractor, 

-

16 directory: Path, 

-

17 runsolver_exec: Path = None, 

-

18 raw_output_directory: Path = None, 

-

19 ) -> None: 

-

20 """Initialize solver. 

-

21 

-

22 Args: 

-

23 directory: Directory of the solver. 

-

24 runsolver_exec: Path to the runsolver executable. 

-

25 By default, runsolver in directory. 

-

26 raw_output_directory: Directory where solver will write its raw output. 

-

27 Defaults to directory / tmp 

-

28 """ 

-

29 super().__init__(directory, runsolver_exec, raw_output_directory) 

-

30 self._features = None 

-

31 self._feature_groups = None 

-

32 self._output_dimension = None 

-

33 self._groupwise_computation = None 

-

34 

-

35 @property 

-

36 def features(self: Extractor) -> list[tuple[str, str]]: 

-

37 """Determines the features of the extractor.""" 

-

38 if self._features is None: 

-

39 extractor_process = subprocess.run( 

-

40 [self.directory / Extractor.wrapper, "-features"], capture_output=True) 

-

41 self._features = ast.literal_eval(extractor_process.stdout.decode()) 

-

42 return self._features 

-

43 

-

44 @property 

-

45 def feature_groups(self: Extractor) -> list[str]: 

-

46 """Returns the various feature groups the Extractor has.""" 

-

47 if self._feature_groups is None: 

-

48 self._feature_groups = list(set([group for group, _ in self.features])) 

-

49 return self._feature_groups 

-

50 

-

51 @property 

-

52 def output_dimension(self: Extractor) -> int: 

-

53 """The size of the output vector of the extractor.""" 

-

54 return len(self.features) 

-

55 

-

56 @property 

-

57 def groupwise_computation(self: Extractor) -> bool: 

-

58 """Determines if you can call the extractor per group for parallelisation.""" 

-

59 if self._groupwise_computation is None: 

-

60 extractor_help = subprocess.run([self.directory / Extractor.wrapper, "-h"], 

-

61 capture_output=True) 

-

62 # Not the cleanest / most precise way to determine this 

-

63 self._groupwise_computation =\ 

-

64 "-feature_group" in extractor_help.stdout.decode() 

-

65 return self._groupwise_computation 

-

66 

-

67 def build_cmd(self: Extractor, 

-

68 instance: Path | list[Path], 

-

69 feature_group: str = None, 

-

70 output_file: Path = None, 

-

71 runsolver_args: list[str | Path] = None, 

-

72 ) -> list[str]: 

-

73 """Builds a command line string seperated by space. 

-

74 

-

75 Args: 

-

76 instance: The instance to run on 

-

77 feature_group: The optional feature group to run the extractor for. 

-

78 outputfile: Optional file to write the output to. 

-

79 runsolver_args: The arguments for runsolver. If not present, 

-

80 will run the extractor without runsolver. 

-

81 

-

82 Returns: 

-

83 The command seperated per item in the list. 

-

84 """ 

-

85 cmd_list_extractor = [] 

-

86 if not isinstance(instance, list): 

-

87 instance = [instance] 

-

88 if runsolver_args is not None: 

-

89 # Ensure stringification of runsolver configuration is done correctly 

-

90 cmd_list_extractor += [str(self.runsolver_exec.absolute())] 

-

91 cmd_list_extractor += [str(runsolver_config) for runsolver_config 

-

92 in runsolver_args] 

-

93 cmd_list_extractor += [f"{self.directory / Extractor.wrapper}", 

-

94 "-extractor_dir", f"{self.directory}/", 

-

95 "-instance_file"] + [str(file) for file in instance] 

-

96 if feature_group is not None: 

-

97 cmd_list_extractor += ["-feature_group", feature_group] 

-

98 if output_file is not None: 

-

99 cmd_list_extractor += ["-output_file", str(output_file)] 

-

100 return cmd_list_extractor 

-

101 

-

102 def run(self: Extractor, 

-

103 instance: Path | list[Path], 

-

104 feature_group: str = None, 

-

105 output_file: Path = None, 

-

106 runsolver_args: list[str | Path] = None) -> list | None: 

-

107 """Runs an extractor job with Runrunner. 

-

108 

-

109 Args: 

-

110 extractor_path: Path to the executable 

-

111 instance: Path to the instance to run on 

-

112 feature_group: The feature group to compute. Must be supported by the 

-

113 extractor to use. 

-

114 output_file: Target output. If None, piped to the RunRunner job. 

-

115 runsolver_args: List of run solver args, each word a seperate item. 

-

116 

-

117 Returns: 

-

118 The features or None if an output file is used, or features can not be found. 

-

119 """ 

-

120 if feature_group is not None and not self.groupwise_computation: 

-

121 # This extractor cannot handle groups, compute all features 

-

122 feature_group = None 

-

123 cmd_extractor = self.build_cmd( 

-

124 instance, feature_group, output_file, runsolver_args) 

-

125 extractor = subprocess.run(cmd_extractor, capture_output=True) 

-

126 if output_file is None: 

-

127 try: 

-

128 features = ast.literal_eval(extractor.stdout.decode()) 

-

129 return features 

-

130 except Exception: 

-

131 return None 

-

132 return None 

-

133 

-

134 def get_feature_vector(self: Extractor, 

-

135 result: Path, 

-

136 runsolver_values: Path = None) -> list[str]: 

-

137 """Extracts feature vector from an output file. 

-

138 

-

139 Args: 

-

140 result: The raw output of the extractor 

-

141 runsolver_values: The output of runsolver. 

-

142 

-

143 Returns: 

-

144 A list of features. Vector of missing values upon failure. 

-

145 """ 

-

146 if result.exists() and get_status(runsolver_values, 

-

147 None) != SolverStatus.TIMEOUT: 

-

148 feature_values = ast.literal_eval(result.read_text()) 

-

149 return [str(value) for _, _, value in feature_values] 

-

150 return [FeatureDataFrame.missing_value] * self.output_dimension 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_selector_py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a_selector_py.html deleted file mode 100644 index 430ce9264..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_selector_py.html +++ /dev/null @@ -1,262 +0,0 @@ - - - - - Coverage for sparkle/solver/selector.py: 34% - - - - - -
-
-

- Coverage for sparkle/solver/selector.py: - 34% -

- -

- 64 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""File to handle a Selector for selecting Solvers.""" 

-

2from __future__ import annotations 

-

3from pathlib import Path 

-

4import subprocess 

-

5import ast 

-

6 

-

7import runrunner as rrr 

-

8from runrunner import Runner, Run 

-

9 

-

10from sparkle.types import SparkleCallable, SparkleObjective 

-

11from sparkle.structures import FeatureDataFrame, PerformanceDataFrame 

-

12 

-

13 

-

14class Selector(SparkleCallable): 

-

15 """The Selector class for handling Algorithm Selection.""" 

-

16 

-

17 def __init__(self: SparkleCallable, 

-

18 executable_path: Path, 

-

19 raw_output_directory: Path) -> None: 

-

20 """Initialize the Selector object. 

-

21 

-

22 Args: 

-

23 executable_path: Path of the Selector executable. 

-

24 raw_output_directory: Directory where the Selector will write its raw output. 

-

25 Defaults to directory / tmp 

-

26 """ 

-

27 self.selector_builder_path = executable_path 

-

28 self.directory = self.selector_builder_path.parent 

-

29 self.name = self.selector_builder_path.name 

-

30 self.raw_output_directory = raw_output_directory 

-

31 

-

32 if not self.raw_output_directory.exists(): 

-

33 self.raw_output_directory.mkdir(parents=True) 

-

34 

-

35 def build_construction_cmd( 

-

36 self: Selector, 

-

37 target_file: Path, 

-

38 performance_data: Path, 

-

39 feature_data: Path, 

-

40 objective: SparkleObjective, 

-

41 runtime_cutoff: int | float | str = None, 

-

42 wallclock_limit: int | float | str = None) -> list[str | Path]: 

-

43 """Builds the commandline call string for constructing the Selector. 

-

44 

-

45 Args: 

-

46 target_file: Path to the file to save the Selector to. 

-

47 performance_data: Path to the performance data csv. 

-

48 feature_data: Path to the feature data csv. 

-

49 objective: The objective to optimize for selection. 

-

50 runtime_cutoff: Cutoff for the runtime in seconds. Defaults to None 

-

51 wallclock_limit: Cutoff for total wallclock in seconds. Defaults to None 

-

52 

-

53 Returns: 

-

54 The command list for constructing the Selector. 

-

55 """ 

-

56 objective_function = "runtime" if objective.time else "solution_quality" 

-

57 # Python3 to avoid execution rights 

-

58 cmd = ["python3", self.selector_builder_path, 

-

59 "--performance_csv", performance_data, 

-

60 "--feature_csv", feature_data, 

-

61 "--objective", objective_function, 

-

62 "--save", target_file] 

-

63 if runtime_cutoff is not None: 

-

64 cmd.extend(["--runtime_cutoff", str(runtime_cutoff), "--tune"]) 

-

65 if wallclock_limit is not None: 

-

66 cmd.extend(["--wallclock_limit", str(wallclock_limit)]) 

-

67 return cmd 

-

68 

-

69 def construct(self: Selector, 

-

70 target_file: Path | str, 

-

71 performance_data: PerformanceDataFrame, 

-

72 feature_data: FeatureDataFrame, 

-

73 objective: SparkleObjective, 

-

74 runtime_cutoff: int | float | str = None, 

-

75 wallclock_limit: int | float | str = None, 

-

76 run_on: Runner = Runner.SLURM, 

-

77 sbatch_options: list[str] = None, 

-

78 base_dir: Path = Path()) -> Run: 

-

79 """Construct the Selector. 

-

80 

-

81 Args: 

-

82 target_file: Path to the file to save the Selector to. 

-

83 performance_data: Path to the performance data csv. 

-

84 feature_data: Path to the feature data csv. 

-

85 objective: The objective to optimize for selection. 

-

86 runtime_cutoff: Cutoff for the runtime in seconds. 

-

87 wallclock_limit: Cutoff for the wallclock time in seconds. 

-

88 run_on: Which runner to use. Defaults to slurm. 

-

89 sbatch_options: Additional options to pass to sbatch. 

-

90 base_dir: The base directory to run the Selector in. 

-

91 

-

92 Returns: 

-

93 Path to the constructed Selector. 

-

94 """ 

-

95 if isinstance(target_file, str): 

-

96 target_file = self.raw_output_directory / target_file 

-

97 # Convert the dataframes to Selector Format 

-

98 performance_csv = performance_data.to_autofolio(objective=objective, 

-

99 target=target_file.parent) 

-

100 feature_csv = feature_data.to_autofolio(target_file.parent) 

-

101 cmd = self.build_construction_cmd(target_file, 

-

102 performance_csv, 

-

103 feature_csv, 

-

104 objective, 

-

105 runtime_cutoff, 

-

106 wallclock_limit) 

-

107 

-

108 cmd_str = " ".join([str(c) for c in cmd]) 

-

109 construct = rrr.add_to_queue( 

-

110 runner=run_on, 

-

111 cmd=[cmd_str], 

-

112 name="construct_selector", 

-

113 base_dir=base_dir, 

-

114 stdout=Path("normal.log"), 

-

115 stderr=Path("error.log"), 

-

116 sbatch_options=sbatch_options) 

-

117 if run_on == Runner.LOCAL: 

-

118 construct.wait() 

-

119 if not target_file.is_file(): 

-

120 print(f"Selector construction of {self.name} failed!") 

-

121 

-

122 return construct 

-

123 

-

124 def build_cmd(self: Selector, 

-

125 selector_path: Path, 

-

126 feature_vector: list | str) -> list[str | Path]: 

-

127 """Builds the commandline call string for running the Selector.""" 

-

128 if isinstance(feature_vector, list): 

-

129 feature_vector = " ".join(map(str, feature_vector)) 

-

130 

-

131 return ["python3", self.selector_builder_path, 

-

132 "--load", selector_path, 

-

133 "--feature_vec", feature_vector] 

-

134 

-

135 def run(self: Selector, 

-

136 selector_path: Path, 

-

137 feature_vector: list | str) -> list: 

-

138 """Run the Selector, returning the prediction schedule upon success.""" 

-

139 cmd = self.build_cmd(selector_path, feature_vector) 

-

140 run = subprocess.run(cmd, capture_output=True) 

-

141 if run.returncode != 0: 

-

142 print(f"Selector run of {self.name} failed! Error:\n" 

-

143 f"{run.stderr.decode()}") 

-

144 return None 

-

145 # Process the prediction schedule from the output 

-

146 schedule = Selector.process_predict_schedule_output(run.stdout.decode()) 

-

147 if schedule is None: 

-

148 print(f"Error getting predict schedule! Selector {self.name} output:\n" 

-

149 f"{run.stderr.decode()}") 

-

150 return schedule 

-

151 

-

152 @staticmethod 

-

153 def process_predict_schedule_output(output: str) -> list: 

-

154 """Return the predicted algorithm schedule as a list.""" 

-

155 prefix_string = "Selected Schedule [(algorithm, budget)]: " 

-

156 predict_schedule = "" 

-

157 predict_schedule_lines = output.splitlines() 

-

158 for line in predict_schedule_lines: 

-

159 if line.strip().startswith(prefix_string): 

-

160 predict_schedule = line.strip() 

-

161 break 

-

162 if predict_schedule == "": 

-

163 return None 

-

164 predict_schedule_string = predict_schedule[len(prefix_string):] 

-

165 return ast.literal_eval(predict_schedule_string) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_solver_py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a_solver_py.html deleted file mode 100644 index be677f8c2..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_solver_py.html +++ /dev/null @@ -1,418 +0,0 @@ - - - - - Coverage for sparkle/solver/solver.py: 36% - - - - - -
-
-

- Coverage for sparkle/solver/solver.py: - 36% -

- -

- 146 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""File to handle a solver and its directories.""" 

-

2 

-

3from __future__ import annotations 

-

4import sys 

-

5from typing import Any 

-

6import shlex 

-

7import ast 

-

8import json 

-

9from pathlib import Path 

-

10 

-

11import runrunner as rrr 

-

12from runrunner.local import LocalRun 

-

13from runrunner.slurm import SlurmRun 

-

14from runrunner.base import Status, Runner 

-

15 

-

16from sparkle.tools import runsolver_parsing, general as tg 

-

17from sparkle.tools import pcsparser 

-

18from sparkle.types import SparkleCallable, SolverStatus 

-

19from sparkle.solver.verifier import SolutionVerifier 

-

20from sparkle.instance import InstanceSet 

-

21from sparkle.types import resolve_objective, SparkleObjective, UseTime 

-

22 

-

23 

-

24class Solver(SparkleCallable): 

-

25 """Class to handle a solver and its directories.""" 

-

26 meta_data = "solver_meta.txt" 

-

27 wrapper = "sparkle_solver_wrapper.py" 

-

28 

-

29 def __init__(self: Solver, 

-

30 directory: Path, 

-

31 raw_output_directory: Path = None, 

-

32 runsolver_exec: Path = None, 

-

33 deterministic: bool = None, 

-

34 verifier: SolutionVerifier = None) -> None: 

-

35 """Initialize solver. 

-

36 

-

37 Args: 

-

38 directory: Directory of the solver. 

-

39 raw_output_directory: Directory where solver will write its raw output. 

-

40 Defaults to directory / tmp 

-

41 runsolver_exec: Path to the runsolver executable. 

-

42 By default, runsolver in directory. 

-

43 deterministic: Bool indicating determinism of the algorithm. 

-

44 Defaults to False. 

-

45 verifier: The solution verifier to use. If None, no verifier is used. 

-

46 """ 

-

47 super().__init__(directory, runsolver_exec, raw_output_directory) 

-

48 self.deterministic = deterministic 

-

49 self.verifier = verifier 

-

50 self.meta_data_file = self.directory / Solver.meta_data 

-

51 

-

52 if self.raw_output_directory is None: 

-

53 self.raw_output_directory = self.directory / "tmp" 

-

54 self.raw_output_directory.mkdir(exist_ok=True) 

-

55 if self.runsolver_exec is None: 

-

56 self.runsolver_exec = self.directory / "runsolver" 

-

57 if not self.meta_data_file.exists(): 

-

58 self.meta_data_file = None 

-

59 if self.deterministic is None: 

-

60 if self.meta_data_file is not None: 

-

61 # Read the parameter from file 

-

62 meta_dict = ast.literal_eval(self.meta_data_file.open().read()) 

-

63 self.deterministic = meta_dict["deterministic"] 

-

64 else: 

-

65 self.deterministic = False 

-

66 

-

67 def _get_pcs_file(self: Solver) -> Path | bool: 

-

68 """Get path of the parameter file. 

-

69 

-

70 Returns: 

-

71 Path to the parameter file or False if the parameter file does not exist. 

-

72 """ 

-

73 pcs_files = [p for p in self.directory.iterdir() if p.suffix == ".pcs"] 

-

74 if len(pcs_files) != 1: 

-

75 # We only consider one PCS file per solver 

-

76 return False 

-

77 return pcs_files[0] 

-

78 

-

79 def get_pcs_file(self: Solver) -> Path: 

-

80 """Get path of the parameter file. 

-

81 

-

82 Returns: 

-

83 Path to the parameter file. None if it can not be resolved. 

-

84 """ 

-

85 if not (file_path := self._get_pcs_file()): 

-

86 return None 

-

87 return file_path 

-

88 

-

89 def read_pcs_file(self: Solver) -> bool: 

-

90 """Checks if the pcs file can be read.""" 

-

91 pcs_file = self._get_pcs_file() 

-

92 try: 

-

93 parser = pcsparser.PCSParser() 

-

94 parser.load(str(pcs_file), convention="smac") 

-

95 return True 

-

96 except SyntaxError: 

-

97 pass 

-

98 return False 

-

99 

-

100 def get_pcs(self: Solver) -> dict[str, tuple[str, str, str]]: 

-

101 """Get the parameter content of the PCS file.""" 

-

102 if not (pcs_file := self.get_pcs_file()): 

-

103 return None 

-

104 parser = pcsparser.PCSParser() 

-

105 parser.load(str(pcs_file), convention="smac") 

-

106 return [p for p in parser.pcs.params if p["type"] == "parameter"] 

-

107 

-

108 def build_cmd(self: Solver, 

-

109 instance: str | list[str], 

-

110 objectives: list[SparkleObjective], 

-

111 seed: int, 

-

112 cutoff_time: int = None, 

-

113 configuration: dict = None) -> list[str]: 

-

114 """Build the solver call on an instance with a configuration. 

-

115 

-

116 Args: 

-

117 instance: Path to the instance. 

-

118 seed: Seed of the solver. 

-

119 cutoff_time: Cutoff time for the solver. 

-

120 configuration: Configuration of the solver. 

-

121 

-

122 Returns: 

-

123 List of commands and arguments to execute the solver. 

-

124 """ 

-

125 if configuration is None: 

-

126 configuration = {} 

-

127 # Ensure configuration contains required entries for each wrapper 

-

128 configuration["solver_dir"] = str(self.directory.absolute()) 

-

129 configuration["instance"] = instance 

-

130 configuration["seed"] = seed 

-

131 configuration["objectives"] = ",".join([str(obj) for obj in objectives]) 

-

132 if cutoff_time is not None: # Use RunSolver 

-

133 configuration["cutoff_time"] = cutoff_time 

-

134 # Create RunSolver Logs 

-

135 # --timestamp 

-

136 # instructs to timestamp each line of the solver standard output and 

-

137 # error files (which are then redirected to stdout) 

-

138 

-

139 # --use-pty 

-

140 # use a pseudo-terminal to collect the solver output. Currently only 

-

141 # available when lines are timestamped. Some I/O libraries (including 

-

142 # the C library) automatically flushes the output after each line when 

-

143 # the standard output is a terminal. There's no automatic flush when 

-

144 # the standard output is a pipe or a plain file. See setlinebuf() for 

-

145 # some details. This option instructs runsolver to use a 

-

146 # pseudo-terminal instead of a pipe/file to collect the solver 

-

147 # output. This fools the solver which will line-buffer its output. 

-

148 

-

149 # -w filename or --watcher-data filename 

-

150 # sends the watcher informations to filename 

-

151 

-

152 # -v filename or --var filename 

-

153 # save the most relevant information (times,...) 

-

154 # in an easy to parse VAR=VALUE file 

-

155 

-

156 # -o filename or --solver-data filename 

-

157 # redirects the solver output (both stdout and stderr) to filename 

-

158 inst_name = Path(instance).name 

-

159 raw_result_path =\ 

-

160 Path(f"{self.name}_{inst_name}_{tg.get_time_pid_random_string()}.rawres") 

-

161 runsolver_watch_data_path = raw_result_path.with_suffix(".log") 

-

162 runsolver_values_path = raw_result_path.with_suffix(".val") 

-

163 

-

164 solver_cmd = [str(self.runsolver_exec.absolute()), 

-

165 "--timestamp", "--use-pty", 

-

166 "--cpu-limit", str(cutoff_time), 

-

167 "-w", str(runsolver_watch_data_path), 

-

168 "-v", str(runsolver_values_path), 

-

169 "-o", str(raw_result_path)] 

-

170 else: 

-

171 configuration["cutoff_time"] = sys.maxsize 

-

172 solver_cmd = [] 

-

173 

-

174 # Ensure stringification of dictionary will go correctly for key value pairs 

-

175 configuration = {key: str(configuration[key]) for key in configuration} 

-

176 solver_cmd += [str((self.directory / Solver.wrapper).absolute()), 

-

177 f"'{json.dumps(configuration)}'"] 

-

178 return solver_cmd 

-

179 

-

180 def run(self: Solver, 

-

181 instance: str | list[str] | InstanceSet, 

-

182 objectives: list[SparkleObjective], 

-

183 seed: int, 

-

184 cutoff_time: int = None, 

-

185 configuration: dict = None, 

-

186 run_on: Runner = Runner.LOCAL, 

-

187 commandname: str = "run_solver", 

-

188 sbatch_options: list[str] = None, 

-

189 cwd: Path = None) -> SlurmRun | list[dict[str, Any]] | dict[str, Any]: 

-

190 """Run the solver on an instance with a certain configuration. 

-

191 

-

192 Args: 

-

193 instance: The instance(s) to run the solver on, list in case of multi-file. 

-

194 In case of an instance set, will run on all instances in the set. 

-

195 seed: Seed to run the solver with. Fill with abitrary int in case of 

-

196 determnistic solver. 

-

197 cutoff_time: The cutoff time for the solver, measured through RunSolver. 

-

198 If None, will be executed without RunSolver. 

-

199 configuration: The solver configuration to use. Can be empty. 

-

200 cwd: Path where to execute. Defaults to self.raw_output_directory. 

-

201 

-

202 Returns: 

-

203 Solver output dict possibly with runsolver values. 

-

204 """ 

-

205 if cwd is None: 

-

206 cwd = self.raw_output_directory 

-

207 cmds = [] 

-

208 if isinstance(instance, InstanceSet): 

-

209 for inst in instance.instance_paths: 

-

210 solver_cmd = self.build_cmd(inst.absolute(), 

-

211 objectives=objectives, 

-

212 seed=seed, 

-

213 cutoff_time=cutoff_time, 

-

214 configuration=configuration) 

-

215 cmds.append(" ".join(solver_cmd)) 

-

216 else: 

-

217 solver_cmd = self.build_cmd(instance, 

-

218 objectives=objectives, 

-

219 seed=seed, 

-

220 cutoff_time=cutoff_time, 

-

221 configuration=configuration) 

-

222 cmds.append(" ".join(solver_cmd)) 

-

223 run = rrr.add_to_queue(runner=run_on, 

-

224 cmd=cmds, 

-

225 name=commandname, 

-

226 base_dir=cwd, 

-

227 path=cwd, 

-

228 sbatch_options=sbatch_options) 

-

229 

-

230 if isinstance(run, LocalRun): 

-

231 run.wait() 

-

232 # Subprocess resulted in error 

-

233 if run.status == Status.ERROR: 

-

234 print(f"WARNING: Solver {self.name} execution seems to have failed!\n") 

-

235 for i, job in enumerate(run.jobs): 

-

236 print(f"[Job {i}] The used command was: {cmds[i]}\n" 

-

237 "The error yielded was:\n" 

-

238 f"\t-stdout: '{run.jobs[0]._process.stdout}'\n" 

-

239 f"\t-stderr: '{run.jobs[0]._process.stderr}'\n") 

-

240 return {"status": SolverStatus.ERROR, } 

-

241 

-

242 solver_outputs = [] 

-

243 for i, job in enumerate(run.jobs): 

-

244 solver_cmd = cmds[i].split(" ") 

-

245 runsolver_configuration = None 

-

246 if solver_cmd[0] == str(self.runsolver_exec.absolute()): 

-

247 runsolver_configuration = solver_cmd[:11] 

-

248 solver_output = Solver.parse_solver_output(run.jobs[i].stdout, 

-

249 runsolver_configuration, 

-

250 cwd) 

-

251 if self.verifier is not None: 

-

252 solver_output["status"] = self.verifier.verifiy( 

-

253 instance, Path(runsolver_configuration[-1])) 

-

254 solver_outputs.append(solver_output) 

-

255 return solver_outputs if len(solver_outputs) > 1 else solver_output 

-

256 return run 

-

257 

-

258 @staticmethod 

-

259 def config_str_to_dict(config_str: str) -> dict[str, str]: 

-

260 """Parse a configuration string to a dictionary.""" 

-

261 # First we filter the configuration of unwanted characters 

-

262 config_str = config_str.strip().replace("-", "") 

-

263 # Then we split the string by spaces, but conserve substrings 

-

264 config_list = shlex.split(config_str) 

-

265 # We return empty for empty input OR uneven input 

-

266 if config_str == "" or config_str == r"{}" or len(config_list) & 1: 

-

267 return {} 

-

268 config_dict = {} 

-

269 for index in range(0, len(config_list), 2): 

-

270 # As the value will already be a string object, no quotes are allowed in it 

-

271 value = config_list[index + 1].strip('"').strip("'") 

-

272 config_dict[config_list[index]] = value 

-

273 return config_dict 

-

274 

-

275 @staticmethod 

-

276 def parse_solver_output(solver_output: str, 

-

277 runsolver_configuration: list[str] = None, 

-

278 cwd: Path = None) -> dict[str, Any]: 

-

279 """Parse the output of the solver. 

-

280 

-

281 Args: 

-

282 solver_output: The output of the solver run which needs to be parsed 

-

283 runsolver_configuration: The runsolver configuration to wrap the solver 

-

284 with. If runsolver was not used this should be None. 

-

285 cwd: Path where to execute. Defaults to self.raw_output_directory. 

-

286 

-

287 Returns: 

-

288 Dictionary representing the parsed solver output 

-

289 """ 

-

290 if runsolver_configuration is not None: 

-

291 parsed_output = runsolver_parsing.get_solver_output(runsolver_configuration, 

-

292 solver_output, 

-

293 cwd) 

-

294 else: 

-

295 parsed_output = ast.literal_eval(solver_output) 

-

296 

-

297 # cast status attribute from str to Enum 

-

298 parsed_output["status"] = SolverStatus(parsed_output["status"]) 

-

299 # apply objectives to parsed output, runtime based objectives added here 

-

300 for key, value in parsed_output.items(): 

-

301 if key == "status": 

-

302 continue 

-

303 objective = resolve_objective(key) 

-

304 if objective is None: 

-

305 continue 

-

306 if objective.use_time == UseTime.NO: 

-

307 if objective.post_process is not None: 

-

308 parsed_output[objective] = objective.post_process(value) 

-

309 else: 

-

310 if runsolver_configuration is None: 

-

311 continue 

-

312 if objective.use_time == UseTime.CPU_TIME: 

-

313 parsed_output[key] = parsed_output["cpu_time"] 

-

314 else: 

-

315 parsed_output[key] = parsed_output["wall_time"] 

-

316 if objective.post_process is not None: 

-

317 parsed_output[key] = objective.post_process( 

-

318 parsed_output[key], parsed_output["cutoff_time"]) 

-

319 if "cutoff_time" in parsed_output: 

-

320 del parsed_output["cutoff_time"] 

-

321 return parsed_output 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_validator_py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a_validator_py.html deleted file mode 100644 index a1b45c5ac..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_validator_py.html +++ /dev/null @@ -1,327 +0,0 @@ - - - - - Coverage for sparkle/solver/validator.py: 17% - - - - - -
-
-

- Coverage for sparkle/solver/validator.py: - 17% -

- -

- 109 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""File containing the Validator class.""" 

-

2 

-

3from __future__ import annotations 

-

4 

-

5import sys 

-

6from pathlib import Path 

-

7import csv 

-

8import ast 

-

9import runrunner as rrr 

-

10from runrunner import Runner, Run 

-

11 

-

12from sparkle.solver import Solver 

-

13from sparkle.instance import InstanceSet 

-

14from sparkle.types import SparkleObjective, resolve_objective 

-

15from sparkle.tools.runsolver_parsing import get_solver_args 

-

16 

-

17 

-

18class Validator(): 

-

19 """Class to handle the validation of solvers on instance sets.""" 

-

20 def __init__(self: Validator, 

-

21 out_dir: Path = Path(), 

-

22 tmp_out_dir: Path = Path()) -> None: 

-

23 """Construct the validator.""" 

-

24 self.out_dir = out_dir 

-

25 self.tmp_out_dir = tmp_out_dir 

-

26 

-

27 def validate(self: Validator, 

-

28 solvers: list[Path] | list[Solver] | Solver | Path, 

-

29 configurations: list[dict] | dict | Path, 

-

30 instance_sets: list[InstanceSet], 

-

31 objectives: list[SparkleObjective], 

-

32 cut_off: int, 

-

33 subdir: Path = None, 

-

34 dependency: list[Run] | Run = None, 

-

35 sbatch_options: list[str] = [], 

-

36 run_on: Runner = Runner.SLURM) -> Run: 

-

37 """Validate a list of solvers (with configurations) on a set of instances. 

-

38 

-

39 Args: 

-

40 solvers: list of solvers to validate 

-

41 configurations: list of configurations for each solver we validate. 

-

42 If a path is supplied, will use each line as a configuration. 

-

43 instance_sets: set of instance sets on which we want to validate each solver 

-

44 objectives: list of objectives to validate 

-

45 cut_off: maximum run time for the solver per instance 

-

46 subdir: The subdir where to place the output in the outputdir. If None, 

-

47 a semi-unique combination of solver_instanceset is created. 

-

48 dependency: Jobs to wait for before executing the validation. 

-

49 sbatch_options: list of slurm batch options 

-

50 run_on: whether to run on SLURM or local 

-

51 """ 

-

52 if not isinstance(solvers, list) and isinstance(configurations, list): 

-

53 # If we receive one solver but multiple configurations, we cas the 

-

54 # Solvers to a list of the same length 

-

55 solvers = [solvers] * len(configurations) 

-

56 elif not isinstance(configurations, list) and isinstance(solvers, list): 

-

57 # If there is only one configuration, we cast it to a list of the same 

-

58 # length as the solver list 

-

59 configurations = [configurations] * len(solvers) 

-

60 if not isinstance(solvers, list) or len(configurations) != len(solvers): 

-

61 print("Error: Number of solvers and configurations does not match!") 

-

62 sys.exit(-1) 

-

63 # Ensure we have the object representation of solvers 

-

64 solvers = [Solver(s) if isinstance(s, Path) else s for s in solvers] 

-

65 cmds = [] 

-

66 out_paths = [] 

-

67 for index, (solver, config) in enumerate(zip(solvers, configurations)): 

-

68 if config is None: 

-

69 config = {} 

-

70 elif isinstance(config, Path): 

-

71 # Point to the config line in file 

-

72 config = {"config_path": config} 

-

73 for instance_set in instance_sets: 

-

74 if subdir is None: 

-

75 out_path = self.out_dir / f"{solver.name}_{instance_set.name}" 

-

76 else: 

-

77 out_path = self.out_dir / subdir 

-

78 out_path.mkdir(exist_ok=True) 

-

79 for instance_path in instance_set._instance_paths: 

-

80 cmds.append(" ".join( 

-

81 solver.build_cmd(instance=instance_path.absolute(), 

-

82 objectives=objectives, 

-

83 seed=index, 

-

84 cutoff_time=cut_off, 

-

85 configuration=config))) 

-

86 out_paths.extend([out_path] * len(instance_set._instance_paths)) 

-

87 return rrr.add_to_queue( 

-

88 runner=run_on, 

-

89 cmd=cmds, 

-

90 name="validation", 

-

91 base_dir=self.tmp_out_dir, 

-

92 path=out_paths, 

-

93 dependencies=dependency, 

-

94 sbatch_options=sbatch_options, 

-

95 ) 

-

96 

-

97 def retrieve_raw_results(self: Validator, 

-

98 solver: Solver, 

-

99 instance_sets: InstanceSet | list[InstanceSet], 

-

100 subdir: Path = None, 

-

101 log_dir: Path = None) -> None: 

-

102 """Checks the raw results of a given solver for a specific instance_set. 

-

103 

-

104 Writes the raw results to a unified CSV file for the resolve/instance_set 

-

105 combination. 

-

106 

-

107 Args: 

-

108 solver: The solver for which to check the raw result path 

-

109 instance_sets: The set of instances for which to retrieve the results 

-

110 subdir: Subdir where the CSV is to be placed, passed to the append method. 

-

111 log_dir: The directory to search for log files. If none, defaults to 

-

112 the log directory of the Solver. 

-

113 """ 

-

114 if isinstance(instance_sets, InstanceSet): 

-

115 instance_sets = [instance_sets] 

-

116 if log_dir is None: 

-

117 log_dir = solver.raw_output_directory 

-

118 for res in log_dir.iterdir(): 

-

119 if res.suffix != ".rawres": 

-

120 continue 

-

121 solver_args = get_solver_args(res.with_suffix(".log")) 

-

122 solver_args = ast.literal_eval(solver_args) 

-

123 instance_path = Path(solver_args["instance"]) 

-

124 # Remove default args 

-

125 if "config_path" in solver_args: 

-

126 # The actual solver configuration can be found elsewhere 

-

127 row_idx = int(solver_args["seed"]) 

-

128 config_path = Path(solver_args["config_path"]) 

-

129 if not config_path.exists(): 

-

130 config_path = log_dir / config_path 

-

131 config_str = config_path.open("r").readlines()[row_idx] 

-

132 solver_args = Solver.config_str_to_dict(config_str) 

-

133 else: 

-

134 for def_arg in ["instance", "solver_dir", "cutoff_time", 

-

135 "seed", "objectives"]: 

-

136 if def_arg in solver_args: 

-

137 del solver_args[def_arg] 

-

138 solver_args = str(solver_args).replace('"', "'") 

-

139 

-

140 for instance_set in instance_sets: 

-

141 if instance_path.name in instance_set._instance_names: 

-

142 out_dict = Solver.parse_solver_output( 

-

143 "", 

-

144 ["-o", res.name, 

-

145 "-v", res.with_suffix(".val").name, 

-

146 "-w", res.with_suffix(".log").name], 

-

147 log_dir) 

-

148 self.append_entry_to_csv(solver.name, 

-

149 solver_args, 

-

150 instance_set, 

-

151 instance_path.name, 

-

152 solver_output=out_dict, 

-

153 subdir=subdir) 

-

154 res.unlink() 

-

155 res.with_suffix(".val").unlink(missing_ok=True) 

-

156 res.with_suffix(".log").unlink(missing_ok=True) 

-

157 

-

158 def get_validation_results(self: Validator, 

-

159 solver: Solver, 

-

160 instance_set: InstanceSet, 

-

161 source_dir: Path = None, 

-

162 subdir: Path = None, 

-

163 config: str = None) -> list[list[str]]: 

-

164 """Query the results of the validation of solver on instance_set. 

-

165 

-

166 Args: 

-

167 solver: Solver object 

-

168 instance_set: Instance set 

-

169 source_dir: Path where to look for any unprocessed output. 

-

170 By default, look in the solver's tmp dir. 

-

171 subdir: Path where to place the .csv file subdir. By default will be 

-

172 'self.outputdir/solver.name_instanceset.name/validation.csv' 

-

173 config: Path to the configuration if the solver was configured, None 

-

174 otherwise 

-

175 Returns 

-

176 A list of row lists with string values 

-

177 """ 

-

178 if source_dir is None: 

-

179 source_dir = self.out_dir / f"{solver.name}_{instance_set.name}" 

-

180 if any(x.suffix == ".rawres" for x in source_dir.iterdir()): 

-

181 self.retrieve_raw_results( 

-

182 solver, instance_set, subdir=subdir, log_dir=source_dir) 

-

183 if subdir is None: 

-

184 subdir = Path(f"{solver.name}_{instance_set.name}") 

-

185 csv_file = self.out_dir / subdir / "validation.csv" 

-

186 csv_data = [line for line in csv.reader(csv_file.open("r"))] 

-

187 header = csv_data[0] 

-

188 if config is not None: 

-

189 # We filter on the config string by subdict 

-

190 if isinstance(config, str): 

-

191 config = Solver.config_str_to_dict(config) 

-

192 csv_data = [line for line in csv_data[1:] if 

-

193 config.items() == ast.literal_eval(line[1]).items()] 

-

194 csv_data.insert(0, header) 

-

195 return csv_data 

-

196 

-

197 def append_entry_to_csv(self: Validator, 

-

198 solver: str, 

-

199 config_str: str, 

-

200 instance_set: InstanceSet, 

-

201 instance: str, 

-

202 solver_output: dict, 

-

203 subdir: Path = None) -> None: 

-

204 """Append a validation result as a row to a CSV file.""" 

-

205 if subdir is None: 

-

206 subdir = Path(f"{solver}_{instance_set.name}") 

-

207 out_dir = self.out_dir / subdir 

-

208 if not out_dir.exists(): 

-

209 out_dir.mkdir(parents=True) 

-

210 csv_file = out_dir / "validation.csv" 

-

211 status = solver_output["status"] 

-

212 cpu_time = solver_output["cpu_time"] 

-

213 wall_time = solver_output["wall_time"] 

-

214 del solver_output["status"] 

-

215 del solver_output["cpu_time"] 

-

216 del solver_output["wall_time"] 

-

217 sorted_keys = sorted(solver_output) 

-

218 objectives = [resolve_objective(key) for key in sorted_keys] 

-

219 objectives = [o for o in objectives if o is not None] 

-

220 if not csv_file.exists(): 

-

221 # Write header 

-

222 header = ["Solver", "Configuration", "InstanceSet", "Instance", "Status", 

-

223 "CPU Time", "Wallclock Time"] + [o.name for o in objectives] 

-

224 with csv_file.open("w") as out: 

-

225 csv.writer(out).writerow((header)) 

-

226 values = [solver, config_str, instance_set.name, instance, status, cpu_time, 

-

227 wall_time] + [solver_output[o.name] for o in objectives] 

-

228 with csv_file.open("a") as out: 

-

229 writer = csv.writer(out) 

-

230 writer.writerow(values) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_verifier_py.html b/Documentation/source/_static/coverage/z_89d5682e9ce6320a_verifier_py.html deleted file mode 100644 index 3d4e4ebf0..000000000 --- a/Documentation/source/_static/coverage/z_89d5682e9ce6320a_verifier_py.html +++ /dev/null @@ -1,169 +0,0 @@ - - - - - Coverage for sparkle/solver/verifier.py: 46% - - - - - -
-
-

- Coverage for sparkle/solver/verifier.py: - 46% -

- -

- 35 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Methods related to SAT specific runs.""" 

-

2from __future__ import annotations 

-

3from pathlib import Path 

-

4import subprocess 

-

5 

-

6from sparkle.types import SolverStatus 

-

7 

-

8 

-

9class SolutionVerifier: 

-

10 """Solution verifier base class.""" 

-

11 

-

12 def __init__(self: SolutionVerifier) -> None: 

-

13 """Initialize the solution verifier.""" 

-

14 raise NotImplementedError 

-

15 

-

16 def verifiy(self: SolutionVerifier) -> SolverStatus: 

-

17 """Verify the solution.""" 

-

18 raise NotImplementedError 

-

19 

-

20 

-

21class SATVerifier(SolutionVerifier): 

-

22 """Class to handle the SAT verifier.""" 

-

23 sat_verifier_path = Path("sparkle/Components/Sparkle-SAT-verifier/SAT") 

-

24 

-

25 def __init__(self: SATVerifier) -> None: 

-

26 """Initialize the SAT verifier.""" 

-

27 return 

-

28 

-

29 def __str__(self: SATVerifier) -> str: 

-

30 """Return the name of the SAT verifier.""" 

-

31 return "SATVerifier" 

-

32 

-

33 def verify(self: SATVerifier, instance: Path, raw_result: Path) -> SolverStatus: 

-

34 """Run a SAT verifier and return its status.""" 

-

35 return SATVerifier.sat_judge_correctness_raw_result(instance, raw_result) 

-

36 

-

37 @staticmethod 

-

38 def sat_get_verify_string(sat_output: str) -> SolverStatus: 

-

39 """Return the status of the SAT verifier. 

-

40 

-

41 Four statuses are possible: "SAT", "UNSAT", "WRONG", "UNKNOWN" 

-

42 """ 

-

43 lines = [line.strip() for line in sat_output.splitlines()] 

-

44 for index, line in enumerate(lines): 

-

45 if line == "Solution verified.": 

-

46 if lines[index + 2] == "11": 

-

47 return SolverStatus.SAT 

-

48 elif line == "Solver reported unsatisfiable. I guess it must be right!": 

-

49 if lines[index + 2] == "10": 

-

50 return SolverStatus.UNSAT 

-

51 elif line == "Wrong solution.": 

-

52 if lines[index + 2] == "0": 

-

53 return SolverStatus.WRONG 

-

54 return SolverStatus.UNKNOWN 

-

55 

-

56 @staticmethod 

-

57 def sat_judge_correctness_raw_result(instance: Path, 

-

58 raw_result: Path) -> SolverStatus: 

-

59 """Run a SAT verifier to determine correctness of a result. 

-

60 

-

61 Args: 

-

62 instance: path to the instance 

-

63 raw_result: path to the result to verify 

-

64 

-

65 Returns: 

-

66 The status of the solver on the instance 

-

67 """ 

-

68 sat_verify = subprocess.run([SATVerifier.sat_verifier_path, 

-

69 instance, 

-

70 raw_result], 

-

71 capture_output=True) 

-

72 return SATVerifier.sat_get_verify_string(sat_verify.stdout.decode()) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8a23ba30293532be___init___py.html b/Documentation/source/_static/coverage/z_8a23ba30293532be___init___py.html deleted file mode 100644 index 1daf7b02a..000000000 --- a/Documentation/source/_static/coverage/z_8a23ba30293532be___init___py.html +++ /dev/null @@ -1,172 +0,0 @@ - - - - - Coverage for sparkle/types/__init__.py: 88% - - - - - -
-
-

- Coverage for sparkle/types/__init__.py: - 88% -

- -

- 40 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides types for Sparkle applications.""" 

-

2import importlib 

-

3import inspect 

-

4import re 

-

5from typing import Callable 

-

6 

-

7from sparkle.types.sparkle_callable import SparkleCallable 

-

8from sparkle.types.features import FeatureGroup, FeatureSubgroup, FeatureType 

-

9from sparkle.types.status import SolverStatus 

-

10from sparkle.types import objective 

-

11from sparkle.types.objective import SparkleObjective, UseTime 

-

12 

-

13 

-

14objective_string_regex = re.compile(r"(?P<name>[\w\-_]+)(:(?P<direction>min|max))?$") 

-

15objective_variable_regex = re.compile(r"(-?\d+)$") 

-

16 

-

17 

-

18def _check_class(candidate: Callable) -> bool: 

-

19 """Verify whether a loaded class is a valid objective class.""" 

-

20 return inspect.isclass(candidate) and issubclass(candidate, SparkleObjective) 

-

21 

-

22 

-

23def resolve_objective(objective_name: str) -> SparkleObjective: 

-

24 """Try to resolve the objective class by (case-sensitive) name. 

-

25 

-

26 convention: objective_name(variable-k)?(:[min|max])? 

-

27 

-

28 Order of resolving: 

-

29 class_name of user defined SparkleObjectives 

-

30 class_name of sparkle defined SparkleObjectives 

-

31 default SparkleObjective with minimization unless specified as max 

-

32 

-

33 Args: 

-

34 name: The name of the objective class. Can include parameter value k. 

-

35 

-

36 Returns: 

-

37 Instance of the Objective class or None if not found. 

-

38 """ 

-

39 match = objective_string_regex.fullmatch(objective_name) 

-

40 if match is None or objective_name == "" or not objective_name[0].isalpha(): 

-

41 return None 

-

42 

-

43 name = match.group("name") 

-

44 minimise = not match.group("direction") == "max" # .group returns "" if no match 

-

45 

-

46 # Search for optional variable and record split point between name and variable 

-

47 name_options = [(name, None), ] # Options of names to check for 

-

48 if m := objective_variable_regex.search(name): 

-

49 argument = int(m.group()) 

-

50 name_options = [(name[:m.start()], argument), ] + name_options # Prepend 

-

51 

-

52 # First try to resolve the user input classes 

-

53 for rname, rarg in name_options: 

-

54 try: 

-

55 user_module = importlib.import_module("Settings.objective") 

-

56 for o_name, o_class in inspect.getmembers(user_module, 

-

57 predicate=_check_class): 

-

58 if o_name == rname: 

-

59 if rarg is not None: 

-

60 return o_class(rarg) 

-

61 return o_class() 

-

62 except Exception: 

-

63 pass 

-

64 

-

65 for rname, rarg in name_options: 

-

66 # Try to match with specially defined classes 

-

67 for o_name, o_class in inspect.getmembers(objective, 

-

68 predicate=_check_class): 

-

69 if o_name == rname: 

-

70 if rarg is not None: 

-

71 return o_class(rarg) 

-

72 return o_class() 

-

73 

-

74 # No special objects found. Return objective with full name 

-

75 return SparkleObjective(name=objective_name, minimise=minimise) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8a23ba30293532be_features_py.html b/Documentation/source/_static/coverage/z_8a23ba30293532be_features_py.html deleted file mode 100644 index 6bd99a0ab..000000000 --- a/Documentation/source/_static/coverage/z_8a23ba30293532be_features_py.html +++ /dev/null @@ -1,179 +0,0 @@ - - - - - Coverage for sparkle/types/features.py: 99% - - - - - -
-
-

- Coverage for sparkle/types/features.py: - 99% -

- -

- 70 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Types definition for features to unify extractors across the Sparkle platform.""" 

-

2from __future__ import annotations 

-

3from enum import Enum 

-

4 

-

5 

-

6class FeatureGroup(str, Enum): 

-

7 """Various feature groups.""" 

-

8 BASE = "base" 

-

9 PRE = "pre" 

-

10 DIAMETER = "dia" 

-

11 LOBJOIS = "lobjois" 

-

12 UNIT = "unit" 

-

13 SP = "sp" 

-

14 LS = "ls" 

-

15 LP = "lp" 

-

16 CL = "cl" 

-

17 

-

18 

-

19class FeatureSubgroup(str, Enum): 

-

20 """Various feature subgroups. Only used for embedding in with feature names.""" 

-

21 VCG = "Variable-Clause-Graph" 

-

22 POSNEG = "Postive-Negative-Literals" 

-

23 HORNY = "Horn-Formula" 

-

24 VG = "Variable-Graph" 

-

25 KLB = "Kevin-Leyton-Brown" 

-

26 CG = "Clause-Graph" 

-

27 BIAS = "bias" 

-

28 UNCONSTRAINT = "unconstraint" 

-

29 NUM = "num" 

-

30 SIZE = "size" 

-

31 

-

32 

-

33class FeatureType(str, Enum): 

-

34 """Various feature types.""" 

-

35 MIN = "min" 

-

36 MAX = "max" 

-

37 MEAN = "mean" 

-

38 ENTROPY = "entropy" 

-

39 QUANTILE_10 = "q10" 

-

40 QUANTILE_25 = "q25" 

-

41 QUANTILE_50 = "q50" 

-

42 QUANTILE_75 = "q75" 

-

43 QUANTILE_90 = "q90" 

-

44 COEFFICIENT_VARIATION = "coefficient_of_variation" 

-

45 NUMBER_OF_VARS_ORIGINAL = "n_vars_original" 

-

46 NUMBER_OF_CLAUSES_ORIGINAL = "n_clauses_original" 

-

47 NUMBER_OF_VARS = "n_vars" 

-

48 NUMBER_OF_CLAUSES = "n_clauses" 

-

49 REDUCED_VARS = "reduced_vars" 

-

50 REDUCED_CLAUSES = "reduced_clauses" 

-

51 PRE_FEATURE_TIME = "pre_featuretime" 

-

52 VARS_CLAUSES_RATIO = "vars_clauses_ratio" 

-

53 CLAUSE_RATIO_MEAN = "clause_ratio_mean" 

-

54 CLAUSE_RATIO_MINIMUM = "clause_ratio_minimum" 

-

55 CLAUSE_RATIO_MAXIMUM = "clause_ratio_maximum" 

-

56 CLAUSE_RATIO_ENTROPY = "clause_ratio_entropy" 

-

57 CLAUSE_MEAN = "clause_mean" 

-

58 CLAUSE_MIN = "clause_min" 

-

59 CLAUSE_MAX = "clause_max" 

-

60 CLAUSE_FRACTION = "clauses_fraction" 

-

61 CLAUSE_ENTROPY = "clause_entropy" 

-

62 CLAUSE_COEFFICIENT_VARIATION = "clause_coefficient_of_variation" 

-

63 VAR_MEAN = "variable_mean" 

-

64 VAR_MIN = "variable_min" 

-

65 VAR_MAX = "variable_max" 

-

66 VAR_STD = "variable_standard_deviation" 

-

67 VAR_ENTROPY = "variable_entropy" 

-

68 VAR_COEFFICIENT_VARIATION = "variable_coefficient_of_variation" 

-

69 CLUSTER_COEFFICIENT_MEAN = "cluster_coefficient_mean" 

-

70 CLUSTER_COEFFICIENT_VARIATION = "cluster_coefficient_of_variation" 

-

71 CLUSTER_COEFFICIENT_MIN = "cluster_coefficient_min" 

-

72 CLUSTER_COEFFICIENT_MAX = "cluster_coefficient_max" 

-

73 CLUSTER_COEFFICIENT_ENTROPY = "cluster_coefficient_entropy" 

-

74 UNARY = "unary" 

-

75 BINARY = "binary" 

-

76 TRINARY = "trinary" 

-

77 FEATURE_TIME = "feature_time" 

-

78 

-

79 @staticmethod 

-

80 def with_subgroup(subgroup: FeatureSubgroup, feature: FeatureType) -> str: 

-

81 """Return a standardised string with a subgroup embedded.""" 

-

82 return f"{subgroup.value}_{feature.value}" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8a23ba30293532be_objective_py.html b/Documentation/source/_static/coverage/z_8a23ba30293532be_objective_py.html deleted file mode 100644 index 30c8a2da3..000000000 --- a/Documentation/source/_static/coverage/z_8a23ba30293532be_objective_py.html +++ /dev/null @@ -1,172 +0,0 @@ - - - - - Coverage for sparkle/types/objective.py: 91% - - - - - -
-
-

- Coverage for sparkle/types/objective.py: - 91% -

- -

- 44 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Class for Sparkle Objective and Performance.""" 

-

2from __future__ import annotations 

-

3from enum import Enum 

-

4import typing 

-

5import numpy as np 

-

6 

-

7 

-

8class UseTime(str, Enum): 

-

9 """Use time or not.""" 

-

10 WALL_TIME = "WALL_TIME" 

-

11 CPU_TIME = "CPU_TIME" 

-

12 NO = "NO" 

-

13 

-

14 @classmethod 

-

15 def _missing_(cls: UseTime, value: object) -> UseTime: 

-

16 """Return error use time.""" 

-

17 return UseTime.NO 

-

18 

-

19 

-

20class SparkleObjective: 

-

21 """Objective for Sparkle specified by user.""" 

-

22 

-

23 name: str 

-

24 run_aggregator: typing.Callable 

-

25 instance_aggregator: typing.Callable 

-

26 solver_aggregator: typing.Callable 

-

27 minimise: bool 

-

28 post_process: typing.Callable 

-

29 use_time: UseTime 

-

30 

-

31 def __init__(self: SparkleObjective, 

-

32 name: str, 

-

33 run_aggregator: typing.Callable = np.mean, 

-

34 instance_aggregator: typing.Callable = np.mean, 

-

35 solver_aggregator: typing.Callable = None, 

-

36 minimise: bool = True, 

-

37 post_process: typing.Callable = None, 

-

38 use_time: UseTime = UseTime.NO) -> None: 

-

39 """Create sparkle objective from string.""" 

-

40 self.name = name 

-

41 self.run_aggregator: typing.Callable = run_aggregator 

-

42 self.instance_aggregator: typing.Callable = instance_aggregator 

-

43 if solver_aggregator is None: 

-

44 solver_aggregator = np.min if minimise else np.max 

-

45 self.solver_aggregator: typing.Callable = solver_aggregator 

-

46 self.minimise: bool = minimise 

-

47 self.post_process: typing.Callable = post_process 

-

48 self.use_time: UseTime = use_time 

-

49 

-

50 def __str__(self: SparkleObjective) -> str: 

-

51 """Return a stringified version.""" 

-

52 return f"{self.name}" 

-

53 

-

54 @property 

-

55 def time(self: SparkleObjective) -> bool: 

-

56 """Return whether the objective is time based.""" 

-

57 return self.use_time != UseTime.NO 

-

58 

-

59 

-

60class PAR(SparkleObjective): 

-

61 """Penalised Averaged Runtime Objective for Sparkle.""" 

-

62 

-

63 def __init__(self: PAR, k: int = 10) -> None: 

-

64 """Initialize PAR.""" 

-

65 self.k = k 

-

66 if k <= 0: 

-

67 raise ValueError("k must be greater than 0.") 

-

68 

-

69 def penalise(value: float, cutoff: float) -> float: 

-

70 """Return penalised value.""" 

-

71 if value > cutoff: 

-

72 return cutoff * self.k 

-

73 return value 

-

74 

-

75 super().__init__(f"PAR{k}", use_time=UseTime.CPU_TIME, post_process=penalise) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8a23ba30293532be_sparkle_callable_py.html b/Documentation/source/_static/coverage/z_8a23ba30293532be_sparkle_callable_py.html deleted file mode 100644 index 9720847e7..000000000 --- a/Documentation/source/_static/coverage/z_8a23ba30293532be_sparkle_callable_py.html +++ /dev/null @@ -1,135 +0,0 @@ - - - - - Coverage for sparkle/types/sparkle_callable.py: 88% - - - - - -
-
-

- Coverage for sparkle/types/sparkle_callable.py: - 88% -

- -

- 17 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Abstract class Sparkle Callable.""" 

-

2from __future__ import annotations 

-

3from pathlib import Path 

-

4 

-

5 

-

6class SparkleCallable: 

-

7 """Sparkle Callable class.""" 

-

8 

-

9 def __init__(self: SparkleCallable, 

-

10 directory: Path, 

-

11 runsolver_exec: Path = None, 

-

12 raw_output_directory: Path = None) -> None: 

-

13 """Initialize callable. 

-

14 

-

15 Args: 

-

16 directory: Directory of the callable. 

-

17 runsolver_exec: Path to the runsolver executable. 

-

18 By default, runsolver in solver_directory. 

-

19 raw_output_directory: Directory where callable will write its raw output. 

-

20 Defaults to directory / tmp 

-

21 """ 

-

22 self.directory = directory 

-

23 self.name = directory.name 

-

24 self.raw_output_directory = raw_output_directory 

-

25 self.runsolver_exec = runsolver_exec 

-

26 if self.raw_output_directory is None: 

-

27 self.raw_output_directory = self.directory / "tmp" 

-

28 self.raw_output_directory.mkdir(exist_ok=True) 

-

29 if self.runsolver_exec is None: 

-

30 self.runsolver_exec = self.directory / "runsolver" 

-

31 

-

32 def build_cmd(self: SparkleCallable) -> list[str | Path]: 

-

33 """A method that builds the commandline call string.""" 

-

34 return NotImplementedError 

-

35 

-

36 def run(self: SparkleCallable) -> None: 

-

37 """A method that runs the callable.""" 

-

38 return NotImplementedError 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8a23ba30293532be_status_py.html b/Documentation/source/_static/coverage/z_8a23ba30293532be_status_py.html deleted file mode 100644 index 8cb0a6454..000000000 --- a/Documentation/source/_static/coverage/z_8a23ba30293532be_status_py.html +++ /dev/null @@ -1,115 +0,0 @@ - - - - - Coverage for sparkle/types/status.py: 100% - - - - - -
-
-

- Coverage for sparkle/types/status.py: - 100% -

- -

- 12 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Class for solver status.""" 

-

2from __future__ import annotations 

-

3from enum import Enum 

-

4 

-

5 

-

6class SolverStatus(str, Enum): 

-

7 """Possible return states for solver runs.""" 

-

8 SUCCESS = "SUCCESS" 

-

9 CRASHED = "CRASHED" 

-

10 TIMEOUT = "TIMEOUT" 

-

11 WRONG = "WRONG" 

-

12 UNKNOWN = "UNKNOWN" 

-

13 ERROR = "ERROR" 

-

14 KILLED = "KILLED" 

-

15 

-

16 # SAT specific status 

-

17 SAT = "SAT" 

-

18 UNSAT = "UNSAT" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b___init___py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b___init___py.html deleted file mode 100644 index a234735e9..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b___init___py.html +++ /dev/null @@ -1,100 +0,0 @@ - - - - - Coverage for sparkle/platform/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/platform/__init__.py: - 100% -

- -

- 2 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides platform support for Sparkle.""" 

-

2from sparkle.platform.cli_types import CommandName, COMMAND_DEPENDENCIES 

-

3from sparkle.platform.settings_objects import Settings, SettingState 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_cli_types_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_cli_types_py.html deleted file mode 100644 index 8298243f5..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_cli_types_py.html +++ /dev/null @@ -1,238 +0,0 @@ - - - - - Coverage for sparkle/platform/cli_types.py: 94% - - - - - -
-
-

- Coverage for sparkle/platform/cli_types.py: - 94% -

- -

- 63 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Helper types for command line interface.""" 

-

2from enum import Enum 

-

3from typing import Type 

-

4 

-

5 

-

6class VerbosityLevel(Enum): 

-

7 """Enum of possible verbosity states.""" 

-

8 

-

9 QUIET = 0 

-

10 STANDARD = 1 

-

11 

-

12 @staticmethod 

-

13 def from_string(name: str) -> "VerbosityLevel": 

-

14 """Converts string to VerbosityLevel.""" 

-

15 return VerbosityLevel[name] 

-

16 

-

17 

-

18class TEXT(Enum): 

-

19 """Class for ANSI text formatting.""" 

-

20 

-

21 RESET = "\033[0m" 

-

22 BOLD = "\033[1m" 

-

23 ITALIC = "\033[3m" 

-

24 UNDERLINE = "\033[4m" 

-

25 

-

26 BLACK = "\033[30m" 

-

27 RED = "\033[31m" 

-

28 GREEN = "\033[32m" 

-

29 YELLOW = "\033[33m" 

-

30 BLUE = "\033[34m" 

-

31 MAGENTA = "\033[35m" 

-

32 CYAN = "\033[36m" 

-

33 WHITE = "\033[37m" 

-

34 

-

35 # BG = Background 

-

36 BG_BLACK = "\033[40m" 

-

37 BG_RED = "\033[41m" 

-

38 BG_GREEN = "\033[42m" 

-

39 BG_YELLOW = "\033[43m" 

-

40 BG_BLUE = "\033[44m" 

-

41 BG_MAGENTA = "\033[45m" 

-

42 BG_CYAN = "\033[46m" 

-

43 BG_WHITE = "\033[47m" 

-

44 

-

45 @classmethod 

-

46 def format_text(cls: Type["TEXT"], formats: list[str], text: str) -> str: 

-

47 """Styles the string based on the provided formats.""" 

-

48 start_format = "".join(format_.value for format_ in formats) 

-

49 end_format = cls.RESET.value 

-

50 return f"{start_format}{text}{end_format}" 

-

51 

-

52 

-

53class CommandName(str, Enum): 

-

54 """Enum of all command names.""" 

-

55 

-

56 ABOUT = "about" 

-

57 ADD_FEATURE_EXTRACTOR = "add_feature_extractor" 

-

58 ADD_INSTANCES = "add_instances" 

-

59 ADD_SOLVER = "add_solver" 

-

60 COMPUTE_FEATURES = "compute_features" 

-

61 COMPUTE_MARGINAL_CONTRIBUTION = "compute_marginal_contribution" 

-

62 CONFIGURE_SOLVER = "configure_solver" 

-

63 CONSTRUCT_PORTFOLIO_SELECTOR = "construct_portfolio_selector" 

-

64 GENERATE_REPORT = "generate_report" 

-

65 INITIALISE = "initialise" 

-

66 REMOVE_FEATURE_EXTRACTOR = "remove_feature_extractor" 

-

67 REMOVE_INSTANCES = "remove_instances" 

-

68 REMOVE_SOLVER = "remove_solver" 

-

69 RUN_ABLATION = "run_ablation" 

-

70 RUN_ABLATION_VALIDATION = "run_ablation_validation" 

-

71 ABLATION_CALLBACK = "ablation_callback" 

-

72 ABLATION_VALIDATION_CALLBACK = "ablation_validation_callback" 

-

73 RUN_SOLVER = "run_solver" 

-

74 RUN_SOLVERS = "run_solvers" 

-

75 RUN_PORTFOLIO_SELECTOR = "run_portfolio_selector" 

-

76 RUN_STATUS = "run_status" 

-

77 SPARKLE_WAIT = "sparkle_wait" 

-

78 SYSTEM_STATUS = "system_status" 

-

79 VALIDATE_CONFIGURED_VS_DEFAULT = "validate_configured_vs_default" 

-

80 RUN_CONFIGURED_SOLVER = "run_configured_solver" 

-

81 RUN_PARALLEL_PORTFOLIO = "run_parallel_portfolio" 

-

82 VALIDATION = "validation" 

-

83 

-

84 

-

85# NOTE: This dependency list contains all possible direct dependencies, including 

-

86# optional dependencies, and 'either or' dependencies 

-

87# 

-

88# Optional dpendency: GENERATE_REPORT is possible based on just CONFIGURE_SOLVER, 

-

89# but can optionally wait for VALIDATE_CONFIGURED_VS_DEFAULT as well 

-

90# 

-

91# 'Either or' dependency: GENERATE_REPORT can run after CONFIGURE_SOLVER, but 

-

92# also after CONSTRUCT_PORTFOLIO_SELECTOR, but does not need both 

-

93# 

-

94# TODO: Check if empty dependency lists are correct. These were not important 

-

95# when this was implemented, but might have 'trivial' dependencies, such as the 

-

96# INITIALISE command. 

-

97COMMAND_DEPENDENCIES = { 

-

98 CommandName.ABOUT: [], 

-

99 CommandName.ADD_FEATURE_EXTRACTOR: [CommandName.INITIALISE], 

-

100 CommandName.ADD_INSTANCES: [CommandName.INITIALISE], 

-

101 CommandName.ADD_SOLVER: [CommandName.INITIALISE], 

-

102 CommandName.COMPUTE_FEATURES: [CommandName.INITIALISE, 

-

103 CommandName.ADD_FEATURE_EXTRACTOR, 

-

104 CommandName.ADD_INSTANCES], 

-

105 CommandName.COMPUTE_MARGINAL_CONTRIBUTION: [ 

-

106 CommandName.INITIALISE, 

-

107 CommandName.CONSTRUCT_PORTFOLIO_SELECTOR], 

-

108 CommandName.CONFIGURE_SOLVER: [CommandName.INITIALISE, 

-

109 CommandName.ADD_INSTANCES, 

-

110 CommandName.ADD_SOLVER], 

-

111 CommandName.CONSTRUCT_PORTFOLIO_SELECTOR: [CommandName.INITIALISE, 

-

112 CommandName.COMPUTE_FEATURES, 

-

113 CommandName.RUN_SOLVERS], 

-

114 CommandName.GENERATE_REPORT: [CommandName.INITIALISE, 

-

115 CommandName.CONFIGURE_SOLVER, 

-

116 CommandName.VALIDATE_CONFIGURED_VS_DEFAULT, 

-

117 CommandName.RUN_ABLATION, 

-

118 CommandName.CONSTRUCT_PORTFOLIO_SELECTOR, 

-

119 CommandName.RUN_PORTFOLIO_SELECTOR], 

-

120 CommandName.INITIALISE: [], 

-

121 CommandName.REMOVE_FEATURE_EXTRACTOR: [CommandName.INITIALISE], 

-

122 CommandName.REMOVE_INSTANCES: [CommandName.INITIALISE], 

-

123 CommandName.REMOVE_SOLVER: [CommandName.INITIALISE], 

-

124 CommandName.RUN_ABLATION: [CommandName.INITIALISE, 

-

125 CommandName.CONFIGURE_SOLVER], 

-

126 CommandName.RUN_SOLVERS: [CommandName.INITIALISE, 

-

127 CommandName.ADD_INSTANCES, 

-

128 CommandName.ADD_SOLVER], 

-

129 CommandName.RUN_PORTFOLIO_SELECTOR: [ 

-

130 CommandName.INITIALISE, 

-

131 CommandName.CONSTRUCT_PORTFOLIO_SELECTOR], 

-

132 CommandName.RUN_STATUS: [], 

-

133 CommandName.SPARKLE_WAIT: [], 

-

134 CommandName.SYSTEM_STATUS: [], 

-

135 CommandName.VALIDATE_CONFIGURED_VS_DEFAULT: [CommandName.INITIALISE, 

-

136 CommandName.CONFIGURE_SOLVER], 

-

137 CommandName.RUN_CONFIGURED_SOLVER: [CommandName.INITIALISE, 

-

138 CommandName.CONFIGURE_SOLVER], 

-

139 CommandName.RUN_PARALLEL_PORTFOLIO: [ 

-

140 CommandName.INITIALISE] 

-

141} 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_file_help_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_file_help_py.html deleted file mode 100644 index a39bdf9c5..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_file_help_py.html +++ /dev/null @@ -1,140 +0,0 @@ - - - - - Coverage for sparkle/platform/file_help.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/file_help.py: - 0% -

- -

- 18 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions for file manipulation.""" 

-

4from __future__ import annotations 

-

5from filelock import FileLock 

-

6from pathlib import Path 

-

7 

-

8 

-

9def add_remove_platform_item(item: any, 

-

10 file_target: Path, 

-

11 target: list | dict, 

-

12 key: str = None, 

-

13 remove: bool = False) -> None: 

-

14 """Add/remove item from a list or dictionary of the platform that must saved to disk. 

-

15 

-

16 Args: 

-

17 item: The item to be added to the data structure. 

-

18 file_target: Path to the file where we want to keep the disk storage. 

-

19 target: Either a list or dictionary to add the item to. 

-

20 key: Optional string, in case we use a dictionary. 

-

21 remove: If true, remove the item from platform. 

-

22 If the target is a dict, the key is used to remove the entry. 

-

23 """ 

-

24 # ast.literal_eval can't deal with Path objects 

-

25 if isinstance(item, Path): 

-

26 item = str(item) 

-

27 if isinstance(file_target, str): 

-

28 file_target = Path(file_target) 

-

29 # Add/Remove item to/from object 

-

30 if isinstance(target, dict): 

-

31 if remove: 

-

32 del target[key] 

-

33 else: 

-

34 target[key] = item 

-

35 else: 

-

36 if remove: 

-

37 target.remove(item) 

-

38 else: 

-

39 target.append(item) 

-

40 # (Over)Write data structure to path 

-

41 lock = FileLock(f"{file_target}.lock") 

-

42 with lock.acquire(timeout=60): 

-

43 file_target.open("w").write(str(target)) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_configuration_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_configuration_py.html deleted file mode 100644 index 4a85a02ca..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_configuration_py.html +++ /dev/null @@ -1,647 +0,0 @@ - - - - - Coverage for sparkle/platform/generate_report_for_configuration.py: 85% - - - - - -
-
-

- Coverage for sparkle/platform/generate_report_for_configuration.py: - 85% -

- -

- 177 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions for algorithm configuration report generation.""" 

-

4from __future__ import annotations 

-

5 

-

6import sys 

-

7from pathlib import Path 

-

8 

-

9from scipy.stats import linregress 

-

10 

-

11from sparkle.platform import latex as stex 

-

12from sparkle.solver.ablation import AblationScenario 

-

13from sparkle.solver.validator import Validator 

-

14from sparkle.configurator.configurator import Configurator, ConfigurationScenario 

-

15from sparkle.solver import Solver 

-

16from sparkle.instance import InstanceSet 

-

17from sparkle.configurator.implementations import SMAC2 

-

18from sparkle.types import SparkleObjective 

-

19from sparkle import about 

-

20 

-

21 

-

22def get_features_bool(configurator_scenario: ConfigurationScenario, 

-

23 solver_name: str, train_set: InstanceSet) -> str: 

-

24 """Return a bool string for latex indicating whether features were used. 

-

25 

-

26 True if a feature file is given in the scenario file, false otherwise. 

-

27 

-

28 Args: 

-

29 solver_name: Name of the solver 

-

30 instance_set_train_name: Name of the instance set used for training 

-

31 

-

32 Returns: 

-

33 A string describing whether features are used 

-

34 """ 

-

35 scenario_file = configurator_scenario.directory \ 

-

36 / f"{solver_name}_{train_set.name}_scenario.txt" 

-

37 

-

38 for line in scenario_file.open("r").readlines(): 

-

39 if line.split(" ")[0] == "feature_file": 

-

40 return "\\featurestrue" 

-

41 return "\\featuresfalse" 

-

42 

-

43 

-

44def get_average_performance(results: list[list[str]], 

-

45 objective: SparkleObjective) -> float: 

-

46 """Return the PAR score for a given results file and cutoff time. 

-

47 

-

48 Args: 

-

49 results_file: Name of the result file 

-

50 objective: The objective to average 

-

51 

-

52 Returns: 

-

53 Average performance value 

-

54 """ 

-

55 instance_per_dict = get_dict_instance_to_performance(results, 

-

56 objective) 

-

57 num_instances = len(instance_per_dict.keys()) 

-

58 sum_par = sum(float(instance_per_dict[instance]) for instance in instance_per_dict) 

-

59 return float(sum_par / num_instances) 

-

60 

-

61 

-

62def get_dict_instance_to_performance(results: list[list[str]], 

-

63 objective: SparkleObjective) -> dict[str, float]: 

-

64 """Return a dictionary of instance names and their performance. 

-

65 

-

66 Args: 

-

67 results: Results from CSV 

-

68 objective: The Sparkle Objective we are converting for 

-

69 Returns: 

-

70 A dictionary containing the performance for each instance 

-

71 """ 

-

72 value_column = results[0].index(objective.name) 

-

73 results_per_instance = {} 

-

74 for row in results[1:]: 

-

75 value = float(row[value_column]) 

-

76 results_per_instance[Path(row[3]).name] = value 

-

77 return results_per_instance 

-

78 

-

79 

-

80def get_ablation_bool(scenario: AblationScenario) -> str: 

-

81 """Return the ablation bool as LaTeX string. 

-

82 

-

83 Args: 

-

84 solver: The solver object 

-

85 instance_train_name: Name of the trianing instance set 

-

86 instance_test_name: Name of the testing instance set 

-

87 

-

88 Returns: 

-

89 A string describing whether ablation was run or not 

-

90 """ 

-

91 if scenario.check_for_ablation(): 

-

92 return "\\ablationtrue" 

-

93 return "\\ablationfalse" 

-

94 

-

95 

-

96def get_data_for_plot(configured_results: list[list[str]], 

-

97 default_results: list[list[str]], 

-

98 objective: SparkleObjective) -> list: 

-

99 """Return the required data to plot. 

-

100 

-

101 Creates a nested list of performance values algorithm runs with default and 

-

102 configured parameters on instances in a given instance set. 

-

103 

-

104 Args: 

-

105 configured_results_dir: Directory of results for configured solver 

-

106 default_results_dir: Directory of results for default solver 

-

107 run_cutoff_time: Cutoff time 

-

108 

-

109 Returns: 

-

110 A list of lists containing data points 

-

111 """ 

-

112 dict_instance_to_par_default = get_dict_instance_to_performance( 

-

113 default_results, objective) 

-

114 dict_instance_to_par_configured = get_dict_instance_to_performance( 

-

115 configured_results, objective) 

-

116 

-

117 instances = (dict_instance_to_par_default.keys() 

-

118 & dict_instance_to_par_configured.keys()) 

-

119 if (len(dict_instance_to_par_default) != len(instances)): 

-

120 print("""ERROR: Number of instances does not match 

-

121 the number of performance values for the default configuration.""") 

-

122 sys.exit(-1) 

-

123 points = [] 

-

124 for instance in instances: 

-

125 point = [dict_instance_to_par_default[instance], 

-

126 dict_instance_to_par_configured[instance]] 

-

127 points.append(point) 

-

128 

-

129 return points 

-

130 

-

131 

-

132def get_figure_configure_vs_default(configured_results: list[list[str]], 

-

133 default_results: list[list[str]], 

-

134 target_directory: Path, 

-

135 figure_filename: str, 

-

136 performance_measure: str, 

-

137 run_cutoff_time: float, 

-

138 objective: SparkleObjective) -> str: 

-

139 """Create a figure comparing the configured and default solver. 

-

140 

-

141 Base function to create a comparison plot of a given instance set between the default 

-

142 and configured performance. 

-

143 

-

144 Args: 

-

145 configured_results_dir: Directory of results for configured solver 

-

146 default_results_dir: Directory of results for default solver 

-

147 target_directory: Directory for the configuration reports 

-

148 figure_filename: Filename for the figure 

-

149 run_cutoff_time: Cutoff time 

-

150 

-

151 Returns: 

-

152 A string containing the latex command to include the figure 

-

153 """ 

-

154 points = get_data_for_plot(configured_results, default_results, 

-

155 objective) 

-

156 

-

157 plot_params = {"xlabel": f"Default parameters [{performance_measure}]", 

-

158 "ylabel": f"Configured parameters [{performance_measure}]", 

-

159 "scale": "linear", 

-

160 "limit_min": 1.5, 

-

161 "limit_max": 1.5, 

-

162 "replace_zeros": False, 

-

163 "output_dir": target_directory 

-

164 } 

-

165 # Check if the scale of the axis can be considered linear 

-

166 linearity_x = linregress([p[0] for p in points], range(len(points))).rvalue > 0.5 

-

167 linearity_y = linregress([p[1] for p in points], range(len(points))).rvalue > 0.5 

-

168 if not linearity_x or not linearity_y: 

-

169 plot_params["scale"] = "log" 

-

170 plot_params["replace_zeros"] = True 

-

171 

-

172 stex.generate_comparison_plot(points, 

-

173 figure_filename, 

-

174 **plot_params) 

-

175 

-

176 return f"\\includegraphics[width=0.6\\textwidth]{{{figure_filename}}}" 

-

177 

-

178 

-

179def get_figure_configured_vs_default_on_instance_set(solver: Solver, 

-

180 instance_set_name: str, 

-

181 res_default: list[list[str]], 

-

182 res_conf: list[list[str]], 

-

183 target_directory: Path, 

-

184 smac_objective: str, 

-

185 run_cutoff_time: float, 

-

186 objective: SparkleObjective, 

-

187 data_type: str = "train") -> str: 

-

188 """Create a figure comparing the configured and default solver on the training set. 

-

189 

-

190 Manages the creation of a comparison plot of the instances in the train instance set 

-

191 for the report by gathering the proper files and choosing the plotting parameters 

-

192 based on the performance measure. 

-

193 

-

194 Args: 

-

195 solver: The solver object 

-

196 instance_set_train_name: Name of the instance set for training 

-

197 configuration_reports_directory: Directory to the configuration reports 

-

198 run_cutoff_time: Cutoff time 

-

199 

-

200 Returns: 

-

201 A string containing the latex comand to include the figure 

-

202 """ 

-

203 data_plot_configured_vs_default_on_instance_set_filename = ( 

-

204 f"data_{solver.name}_configured_vs_default_on_{instance_set_name}_{data_type}") 

-

205 return get_figure_configure_vs_default( 

-

206 res_conf, res_default, target_directory, 

-

207 data_plot_configured_vs_default_on_instance_set_filename, 

-

208 smac_objective, 

-

209 run_cutoff_time, 

-

210 objective) 

-

211 

-

212 

-

213def get_timeouts_instanceset(solver: Solver, 

-

214 instance_set: InstanceSet, 

-

215 configurator: Configurator, 

-

216 validator: Validator, 

-

217 cutoff: float) -> tuple[int, int, int]: 

-

218 """Return the number of timeouts by configured, default and both on the testing set. 

-

219 

-

220 Args: 

-

221 solver: The solver object 

-

222 instance_set: Instance Set 

-

223 configurator: Configurator 

-

224 validator: Validator 

-

225 cutoff: Cutoff time 

-

226 

-

227 Returns: 

-

228 A tuple containing the number of timeouts for the different configurations 

-

229 """ 

-

230 objective = configurator.scenario.sparkle_objective 

-

231 _, config = configurator.get_optimal_configuration( 

-

232 solver, instance_set, objective) 

-

233 res_default = validator.get_validation_results(solver, 

-

234 instance_set, 

-

235 config="") 

-

236 res_conf = validator.get_validation_results(solver, 

-

237 instance_set, 

-

238 config=config) 

-

239 dict_instance_to_par_configured = get_dict_instance_to_performance( 

-

240 res_conf, objective) 

-

241 dict_instance_to_par_default = get_dict_instance_to_performance( 

-

242 res_default, objective) 

-

243 

-

244 return get_timeouts(dict_instance_to_par_configured, 

-

245 dict_instance_to_par_default, cutoff) 

-

246 

-

247 

-

248def get_timeouts(instance_to_par_configured: dict, 

-

249 instance_to_par_default: dict, 

-

250 cutoff: float) -> tuple[int, int, int]: 

-

251 """Return the number of timeouts for given dicts. 

-

252 

-

253 Args: 

-

254 dict_instance_to_par_configured: _description_ 

-

255 dict_instance_to_par_default: _description_ 

-

256 cutoff: Cutoff value 

-

257 

-

258 Returns: 

-

259 A tuple containing timeout values 

-

260 """ 

-

261 configured_timeouts = 0 

-

262 default_timeouts = 0 

-

263 overlapping_timeouts = 0 

-

264 

-

265 for instance in instance_to_par_configured: 

-

266 configured_par = instance_to_par_configured[instance] 

-

267 default_par = instance_to_par_default[instance] 

-

268 # Count the amount of values that are equal to timeout 

-

269 configured_timeouts += (configured_par > cutoff) 

-

270 default_timeouts += (default_par > cutoff) 

-

271 overlapping_timeouts += (configured_par > cutoff 

-

272 and default_par > cutoff) 

-

273 

-

274 return configured_timeouts, default_timeouts, overlapping_timeouts 

-

275 

-

276 

-

277def get_ablation_table(scenario: AblationScenario) -> str: 

-

278 """Generate a LaTeX table of the ablation path. 

-

279 

-

280 This is the result of the ablation analysis to determine the parameter importance. 

-

281 

-

282 Args: 

-

283 solver: The solver object 

-

284 instance_set_train_name: Name of the instance set for training 

-

285 instance_set_test_name: Name of the instance set for testing 

-

286 

-

287 Returns: 

-

288 A string containing the LaTeX table code of the ablation path 

-

289 """ 

-

290 results = scenario.read_ablation_table() 

-

291 table_string = r"\begin{tabular}{rp{0.25\linewidth}rrr}" 

-

292 # "Round", "Flipped parameter", "Source value", "Target value", "Validation result" 

-

293 for i, line in enumerate(results): 

-

294 # If this fails something has changed in the representation of ablation tables 

-

295 if len(line) != 5: 

-

296 print("""ERROR: something has changed with the representation 

-

297 of ablation tables""") 

-

298 sys.exit(-1) 

-

299 if i == 0: 

-

300 line = [f"\\textbf{{{word}}}" for word in line] 

-

301 

-

302 # Put multiple variable changes in one round on a seperate line 

-

303 if (len(line[1].split(",")) > 1 

-

304 and len(line[1].split(",")) == len(line[2].split(",")) 

-

305 and len(line[1].split(",")) == len(line[3].split(","))): 

-

306 params = line[1].split(",") 

-

307 default_values = line[2].split(",") 

-

308 flipped_values = line[3].split(",") 

-

309 

-

310 sublines = len(params) 

-

311 for subline in range(sublines): 

-

312 round = "" if subline != 0 else line[0] 

-

313 result = "" if subline + 1 != sublines else line[-1] 

-

314 printline = [round, params[subline], default_values[subline], 

-

315 flipped_values[subline], result] 

-

316 table_string += " & ".join(printline) + " \\\\ " 

-

317 else: 

-

318 table_string += " & ".join(line) + " \\\\ " 

-

319 if i == 0: 

-

320 table_string += "\\hline " 

-

321 table_string += "\\end{tabular}" 

-

322 

-

323 return table_string 

-

324 

-

325 

-

326def configuration_report_variables(target_dir: Path, 

-

327 solver: Solver, 

-

328 configurator: Configurator, 

-

329 validator: Validator, 

-

330 extractor_dir: Path, 

-

331 bib_path: Path, 

-

332 instance_set_train: InstanceSet, 

-

333 extractor_cuttoff: int, 

-

334 instance_set_test: InstanceSet = None, 

-

335 ablation: AblationScenario = None) -> dict: 

-

336 """Return a dict matching LaTeX variables and their values. 

-

337 

-

338 Args: 

-

339 solver: Object representation of the Solver 

-

340 instance_set_train: Path of the instance set for training 

-

341 instance_set_test: Path of the instance set for testing. Defaults to None. 

-

342 ablation: Whether or not ablation is used. Defaults to True. 

-

343 

-

344 Returns: 

-

345 A dictionary containing the variables and values 

-

346 """ 

-

347 has_test = instance_set_test is not None 

-

348 

-

349 full_dict = get_dict_variable_to_value_common(solver, 

-

350 configurator, 

-

351 validator, 

-

352 ablation, 

-

353 bib_path, 

-

354 instance_set_train, 

-

355 target_dir) 

-

356 

-

357 if has_test: 

-

358 test_dict = get_dict_variable_to_value_test(target_dir, 

-

359 solver, 

-

360 configurator, 

-

361 validator, 

-

362 ablation, 

-

363 instance_set_train, 

-

364 instance_set_test) 

-

365 full_dict.update(test_dict) 

-

366 full_dict["testBool"] = f"\\test{str(has_test).lower()}" 

-

367 

-

368 if ablation is None: 

-

369 full_dict["ablationBool"] = "\\ablationfalse" 

-

370 

-

371 if full_dict["featuresBool"] == "\\featurestrue": 

-

372 full_dict["numFeatureExtractors"] =\ 

-

373 len([p for p in extractor_dir.iterdir()]) 

-

374 full_dict["featureExtractorList"] =\ 

-

375 stex.list_to_latex([(p.name, "") for p in extractor_dir.iterdir()]) 

-

376 full_dict["featureComputationCutoffTime"] = extractor_cuttoff 

-

377 

-

378 return full_dict 

-

379 

-

380 

-

381def get_dict_variable_to_value_common(solver: Solver, 

-

382 configurator: Configurator, 

-

383 validator: Validator, 

-

384 ablation: AblationScenario, 

-

385 bibliography_path: Path, 

-

386 train_set: InstanceSet, 

-

387 target_directory: Path) -> dict: 

-

388 """Return a dict matching LaTeX variables and values used for all config. reports. 

-

389 

-

390 Args: 

-

391 Solver: The solver object 

-

392 instance_set_train: Path of the instance set for training 

-

393 instance_set_test: Path of the instance set for testing 

-

394 target_directory: Path to directory with configuration reports 

-

395 

-

396 Returns: 

-

397 A dictionary containing the variables and values 

-

398 """ 

-

399 objective = configurator.scenario.sparkle_objective 

-

400 _, opt_config = configurator.get_optimal_configuration( 

-

401 solver, train_set, objective) 

-

402 res_default = validator.get_validation_results( 

-

403 solver, train_set, config="") 

-

404 res_conf = validator.get_validation_results( 

-

405 solver, train_set, config=opt_config) 

-

406 instance_names = set([res[3] for res in res_default]) 

-

407 opt_config_list = [f"{key}: {value}" for key, value in 

-

408 Solver.config_str_to_dict(opt_config).items()] 

-

409 

-

410 latex_dict = {"bibliographypath": bibliography_path.absolute()} 

-

411 latex_dict["performanceMeasure"] = objective.name 

-

412 smac_run_obj = SMAC2.get_smac_run_obj(objective) 

-

413 

-

414 if smac_run_obj == "RUNTIME": 

-

415 latex_dict["runtimeBool"] = "\\runtimetrue" 

-

416 elif smac_run_obj == "QUALITY": 

-

417 latex_dict["runtimeBool"] = "\\runtimefalse" 

-

418 

-

419 latex_dict["solver"] = solver.name 

-

420 latex_dict["instanceSetTrain"] = train_set.name 

-

421 latex_dict["sparkleVersion"] = about.version 

-

422 latex_dict["numInstanceInTrainingInstanceSet"] = len(instance_names) 

-

423 

-

424 run_cutoff_time = configurator.scenario.cutoff_time 

-

425 latex_dict["numSmacRuns"] = configurator.scenario.number_of_runs 

-

426 latex_dict["smacObjective"] = smac_run_obj 

-

427 latex_dict["smacWholeTimeBudget"] = configurator.scenario.wallclock_time 

-

428 latex_dict["smacEachRunCutoffTime"] = run_cutoff_time 

-

429 latex_dict["optimisedConfiguration"] = stex.list_to_latex(opt_config_list) 

-

430 latex_dict["optimisedConfigurationTrainingPerformancePAR"] =\ 

-

431 get_average_performance(res_conf, objective) 

-

432 latex_dict["defaultConfigurationTrainingPerformancePAR"] =\ 

-

433 get_average_performance(res_default, objective) 

-

434 

-

435 str_value = get_figure_configured_vs_default_on_instance_set( 

-

436 solver, train_set.name, res_default, res_conf, target_directory, 

-

437 smac_run_obj, float(run_cutoff_time), objective) 

-

438 latex_dict["figure-configured-vs-default-train"] = str_value 

-

439 

-

440 # Retrieve timeout numbers for the training instances 

-

441 configured_timeouts_train, default_timeouts_train, overlapping_timeouts_train =\ 

-

442 get_timeouts_instanceset(solver, train_set, configurator, validator, 

-

443 run_cutoff_time) 

-

444 

-

445 latex_dict["timeoutsTrainDefault"] = default_timeouts_train 

-

446 latex_dict["timeoutsTrainConfigured"] = configured_timeouts_train 

-

447 latex_dict["timeoutsTrainOverlap"] = overlapping_timeouts_train 

-

448 latex_dict["ablationBool"] = get_ablation_bool(ablation) 

-

449 latex_dict["ablationPath"] = get_ablation_table(ablation) 

-

450 latex_dict["featuresBool"] = get_features_bool( 

-

451 configurator.scenario, solver.name, train_set) 

-

452 

-

453 return latex_dict 

-

454 

-

455 

-

456def get_dict_variable_to_value_test(target_dir: Path, 

-

457 solver: Solver, 

-

458 configurator: Configurator, 

-

459 validator: Validator, 

-

460 ablation: AblationScenario, 

-

461 train_set: InstanceSet, 

-

462 test_set: InstanceSet) -> dict: 

-

463 """Return a dict matching test set specific latex variables with their values. 

-

464 

-

465 Args: 

-

466 target_dir: Path to where output should go 

-

467 solver: The solver object 

-

468 configurator: Configurator for which the report is generated 

-

469 validator: Validator that provided the data set results 

-

470 train_set: Instance set for training 

-

471 test_set: Instance set for testing 

-

472 

-

473 Returns: 

-

474 A dictionary containting the variables and their values 

-

475 """ 

-

476 _, config = configurator.get_optimal_configuration( 

-

477 solver, train_set, configurator.scenario.sparkle_objective) 

-

478 res_default = validator.get_validation_results( 

-

479 solver, test_set, config="") 

-

480 res_conf = validator.get_validation_results( 

-

481 solver, test_set, config=config) 

-

482 instance_names = set([res[3] for res in res_default]) 

-

483 run_cutoff_time = configurator.scenario.cutoff_time 

-

484 objective = configurator.scenario.sparkle_objective 

-

485 test_dict = {"instanceSetTest": test_set.name} 

-

486 test_dict["numInstanceInTestingInstanceSet"] = len(instance_names) 

-

487 test_dict["optimisedConfigurationTestingPerformancePAR"] =\ 

-

488 get_average_performance(res_conf, objective) 

-

489 test_dict["defaultConfigurationTestingPerformancePAR"] =\ 

-

490 get_average_performance(res_default, objective) 

-

491 smac_run_obj = SMAC2.get_smac_run_obj( 

-

492 configurator.scenario.sparkle_objective) 

-

493 test_dict["figure-configured-vs-default-test"] =\ 

-

494 get_figure_configured_vs_default_on_instance_set( 

-

495 solver, test_set.name, res_default, res_conf, target_dir, smac_run_obj, 

-

496 float(run_cutoff_time), 

-

497 configurator.scenario.sparkle_objective, data_type="test") 

-

498 

-

499 # Retrieve timeout numbers for the testing instances 

-

500 configured_timeouts_test, default_timeouts_test, overlapping_timeouts_test =\ 

-

501 get_timeouts_instanceset(solver, 

-

502 test_set, 

-

503 configurator, 

-

504 validator, 

-

505 run_cutoff_time) 

-

506 

-

507 test_dict["timeoutsTestDefault"] = default_timeouts_test 

-

508 test_dict["timeoutsTestConfigured"] = configured_timeouts_test 

-

509 test_dict["timeoutsTestOverlap"] = overlapping_timeouts_test 

-

510 test_dict["ablationBool"] = get_ablation_bool(ablation) 

-

511 test_dict["ablationPath"] = get_ablation_table(ablation) 

-

512 return test_dict 

-

513 

-

514 

-

515def generate_report_for_configuration(solver: Solver, 

-

516 configurator: Configurator, 

-

517 validator: Validator, 

-

518 extractor_dir: Path, 

-

519 target_path: Path, 

-

520 latex_template_path: Path, 

-

521 bibliography_path: Path, 

-

522 train_set: InstanceSet, 

-

523 extractor_cuttoff: int, 

-

524 test_set: InstanceSet = None, 

-

525 ablation: AblationScenario = None) -> None: 

-

526 """Generate a report for algorithm configuration. 

-

527 

-

528 Args: 

-

529 solver: Object representation of the solver 

-

530 configurator: Configurator for the report 

-

531 validator: Validator that validated the configurator 

-

532 extractor_dir: Path to the extractor used 

-

533 target_path: Where the report files will be placed. 

-

534 latex_template_path: Path to the template to use for the report 

-

535 bibliography_path: The bib corresponding to the latex template 

-

536 train_set: Instance set for training 

-

537 extractor_cuttoff: Cut off for extractor 

-

538 test_set: Instance set for testing 

-

539 ablation: Whether or not ablation is used. Defaults to True. 

-

540 """ 

-

541 target_path.mkdir(parents=True, exist_ok=True) 

-

542 variables_dict = configuration_report_variables( 

-

543 target_path, solver, configurator, validator, extractor_dir, bibliography_path, 

-

544 train_set, extractor_cuttoff, test_set, 

-

545 ablation) 

-

546 stex.generate_report(latex_template_path, 

-

547 "template-Sparkle-for-configuration.tex", 

-

548 target_path, 

-

549 "Sparkle_Report_for_Configuration", 

-

550 variables_dict) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_parallel_portfolio_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_parallel_portfolio_py.html deleted file mode 100644 index 7a5a2e2da..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_parallel_portfolio_py.html +++ /dev/null @@ -1,465 +0,0 @@ - - - - - Coverage for sparkle/platform/generate_report_for_parallel_portfolio.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/generate_report_for_parallel_portfolio.py: - 0% -

- -

- 137 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions for parallel portfolio report generation.""" 

-

4from pathlib import Path 

-

5import csv 

-

6import operator 

-

7 

-

8import plotly.express as px 

-

9import pandas as pd 

-

10import plotly.io as pio 

-

11 

-

12from sparkle.platform import latex as stex 

-

13from sparkle.platform import generate_report_for_selection as sgfs 

-

14from sparkle.types import SparkleObjective, SolverStatus 

-

15from sparkle.instance import InstanceSet 

-

16 

-

17 

-

18pio.kaleido.scope.mathjax = None # Bug fix for kaleido 

-

19 

-

20 

-

21def get_solver_list_latex(solver_list: list[str]) -> str: 

-

22 """Return the list of solvers as string, including each solver-seed combination. 

-

23 

-

24 Args: 

-

25 solver_list: The solver list to convert 

-

26 

-

27 Returns: 

-

28 A list of solvers in the parallel portfolio as str. 

-

29 """ 

-

30 latex_itemize = "" 

-

31 

-

32 for solver_str in solver_list: 

-

33 # Solver string may contain path and variation seed 

-

34 solver_split = solver_str.split(" ") 

-

35 solver_name = Path(solver_split[0]).name 

-

36 solver_seeds = int(solver_split[2]) if len(solver_split) == 3 else 0 

-

37 

-

38 latex_itemize += f"\\item \\textbf{{{stex.underscore_for_latex(solver_name)}}}\n" 

-

39 # Only include if we used more than one seed 

-

40 if solver_seeds > 1: 

-

41 seeds = ",".join(list[range(1, solver_seeds + 1)]) 

-

42 latex_itemize += f"\\item[]With seeds: {seeds}\n" 

-

43 

-

44 return latex_itemize 

-

45 

-

46 

-

47def get_portfolio_metrics( 

-

48 solver_list: list[str], 

-

49 instance_set: InstanceSet, 

-

50 results: dict[list[str, str]], 

-

51 objective: SparkleObjective) -> tuple[dict[str, float], str, dict[str, float]]: 

-

52 """Return the portfolio metrics for SBS, aggregated results per solver and VBS. 

-

53 

-

54 Args: 

-

55 solver_list: list of solvers 

-

56 instance_list: List of paths to instance sets 

-

57 results: dictionary of results of the portfolio 

-

58 

-

59 Returns: 

-

60 A quatro-tuple: 

-

61 A dict containing the objective value per instance for the single best solver 

-

62 A string containing the name of the single best solver. 

-

63 A second dict with aggrated objective values over all instances per solver. 

-

64 A third dict containing the virtual best solver (Portfolio) per instance. 

-

65 """ 

-

66 corrected_solver_results = {solver: [] for solver in solver_list} 

-

67 instance_worst = None 

-

68 op = operator.gt if objective.minimise else operator.lt 

-

69 for instance in results: 

-

70 for (solver, status, value) in results[instance]: 

-

71 if value == "None": 

-

72 corrected_solver_results[solver].append(None) 

-

73 continue # Solver failed to return a value 

-

74 value = float(value) 

-

75 if instance_worst is None or op(value, instance_worst): 

-

76 instance_worst = value 

-

77 if SolverStatus(status) != SolverStatus.SUCCESS: 

-

78 # If the solver wasn't successful, we also assign the worst known value 

-

79 corrected_solver_results[solver].append(None) 

-

80 continue 

-

81 corrected_solver_results[solver].append(float(value)) 

-

82 for solver in corrected_solver_results: 

-

83 if corrected_solver_results[solver][-1] is None: 

-

84 corrected_solver_results[solver][-1] = instance_worst 

-

85 aggregated_results_solvers = {solver: objective.instance_aggregator( 

-

86 corrected_solver_results[solver]) for solver in corrected_solver_results} 

-

87 vbs_results_instance = {instance: objective.solver_aggregator( 

-

88 [corrected_solver_results[solver][index] for solver in corrected_solver_results]) 

-

89 for index, instance in enumerate(instance_set._instance_names)} 

-

90 # Find the single best solver (SBS) 

-

91 sbs_name = min(aggregated_results_solvers, key=aggregated_results_solvers.get) 

-

92 sbs_results = corrected_solver_results[sbs_name] 

-

93 sbs_dict = {name: sbs_results[index] 

-

94 for index, name in enumerate(instance_set._instance_names)} 

-

95 return sbs_dict, sbs_name, aggregated_results_solvers, vbs_results_instance 

-

96 

-

97 

-

98def get_figure_parallel_portfolio_sparkle_vs_sbs( 

-

99 target_directory: Path, 

-

100 solver_list: list[str], 

-

101 instance_set: InstanceSet, 

-

102 results: list[str], 

-

103 objective: SparkleObjective) -> tuple[ 

-

104 str, dict[str, float], dict[str, float]]: 

-

105 """Generate PaP vs SBS figure and return a string to include it in LaTeX. 

-

106 

-

107 Args: 

-

108 target_directory: Path where to place the files 

-

109 parallel_portfolio_path: Parallel portfolio path. 

-

110 instances: List of instances. 

-

111 

-

112 Returns: 

-

113 a three tuple: 

-

114 str_value: A string to include the PaP vs SBS figure in LaTeX 

-

115 dict_all_solvers: A dict containing the penalised average run time per 

-

116 solver. 

-

117 dict_actual_parallel_portfolio_penalty_time_on_each_instance: A dict with 

-

118 instance names and the penalised running time of the PaP. 

-

119 """ 

-

120 sbs_instance_results, sbs_solver, dict_all_solvers, vbs_instance_results =\ 

-

121 get_portfolio_metrics(solver_list, instance_set, results, objective) 

-

122 figure_filename = "figure_parallel_portfolio_sparkle_vs_sbs" 

-

123 data = [[sbs_instance_results[instance], vbs_instance_results[instance]] 

-

124 for instance in sbs_instance_results] 

-

125 generate_figure(target_directory, 

-

126 f"SBS ({stex.underscore_for_latex(sbs_solver)})", 

-

127 "Parallel-Portfolio", figure_filename, objective.name, data) 

-

128 latex_include = f"\\includegraphics[width=0.6\\textwidth]{{{figure_filename}}}" 

-

129 return latex_include, dict_all_solvers, vbs_instance_results 

-

130 

-

131 

-

132def get_results_table(results: dict[str, str, str], 

-

133 dict_all_solvers: dict[str, float], parallel_portfolio_path: Path, 

-

134 dict_portfolio: dict[str, float], 

-

135 solver_with_solutions: dict[str, int], 

-

136 n_unsolved_instances: int, n_instances: int, 

-

137 performance_metric: str) -> str: 

-

138 """Returns a LaTeX table with the portfolio results. 

-

139 

-

140 Args: 

-

141 results: The total results with status and runtime per solver 

-

142 dict_all_solvers: A dict containing the penalised average run time per solver. 

-

143 parallel_portfolio_path: Parallel portfolio path 

-

144 dict_portfolio: A dict with instance names and the penalised running time of the 

-

145 PaP. 

-

146 solver_with_solutions: A dict with solver name as key, and number of solved 

-

147 instances for the corresponding solver as value, see 

-

148 get_solvers_with_solution. 

-

149 n_unsolved_instances: Number of unsolved instances. 

-

150 n_instances: Number of instances. 

-

151 

-

152 Returns: 

-

153 A string containing LaTeX code for a table with the portfolio results. 

-

154 """ 

-

155 portfolio_par = 0.0 

-

156 for instance in dict_portfolio: 

-

157 portfolio_par += dict_portfolio[instance] 

-

158 portfolio_par = portfolio_par / n_instances 

-

159 total_killed = 0 

-

160 for instance in results: 

-

161 for (_, status, _) in results[instance]: 

-

162 total_killed += (status.lower() == "killed") 

-

163 # Table 1: Portfolio results 

-

164 table_string = ( 

-

165 "\\caption{\\textbf{Portfolio results}} \\label{tab:portfolio_results} ") 

-

166 table_string += "\\begin{tabular}{rrrrr}" 

-

167 table_string += ( 

-

168 "\\textbf{Portfolio nickname} & \\textbf{" 

-

169 f"{performance_metric}" 

-

170 "} & \\textbf{\\#Timeouts} & " 

-

171 "\\textbf{\\#Cancelled} & \\textbf{\\# Solved} \\\\ \\hline ") 

-

172 table_string += ( 

-

173 f"{stex.underscore_for_latex(parallel_portfolio_path.name)} & " 

-

174 f"{round(portfolio_par,2)} & {n_unsolved_instances} & {total_killed} & " 

-

175 f"{n_instances-n_unsolved_instances} \\\\ ") 

-

176 table_string += "\\end{tabular}" 

-

177 table_string += "\\bigskip" 

-

178 # Table 2: Solver results 

-

179 table_string += "\\caption{\\textbf{Solver results}} \\label{tab:solver_results} " 

-

180 table_string += "\\begin{tabular}{rrrrr}" 

-

181 

-

182 for i, line in enumerate(dict_all_solvers): 

-

183 solver_name = Path(line).name 

-

184 

-

185 if i == 0: 

-

186 table_string += ( 

-

187 "\\textbf{Solver} & \\textbf{" 

-

188 f"{performance_metric}" 

-

189 "} & \\textbf{\\#Timeouts} & " 

-

190 "\\textbf{\\#Cancelled} & \\textbf{\\#Best solver} \\\\ \\hline ") 

-

191 

-

192 if solver_name not in solver_with_solutions: 

-

193 cancelled = n_instances - n_unsolved_instances 

-

194 table_string += ( 

-

195 f"{stex.underscore_for_latex(solver_name)} & " 

-

196 f"{round(dict_all_solvers[line], 2)} & {n_unsolved_instances} & " 

-

197 f"{cancelled} & 0 \\\\ ") 

-

198 else: 

-

199 cancelled = (n_instances - n_unsolved_instances 

-

200 - solver_with_solutions[solver_name]) 

-

201 table_string += ( 

-

202 f"{stex.underscore_for_latex(solver_name)} & " 

-

203 f"{round(dict_all_solvers[line], 2)} & {n_unsolved_instances} & " 

-

204 f"{cancelled} & {solver_with_solutions[solver_name]} \\\\ ") 

-

205 table_string += "\\end{tabular}" 

-

206 return table_string 

-

207 

-

208 

-

209def parallel_report_variables(target_directory: Path, 

-

210 parallel_portfolio_path: Path, 

-

211 bibliograpghy_path: Path, 

-

212 objective: SparkleObjective, 

-

213 cutoff: int, 

-

214 instance_set: InstanceSet) -> dict[str, str]: 

-

215 """Returns a mapping between LaTeX report variables and their values. 

-

216 

-

217 Args: 

-

218 target_directory: Path to where to place the generated files. 

-

219 parallel_portfolio_path: Parallel portfolio path. 

-

220 bibliograpghy_path: Path to the bib file 

-

221 instances: List of instances. 

-

222 

-

223 Returns: 

-

224 A dictionary that maps variables used in the LaTeX report to values. 

-

225 """ 

-

226 variables_dict = {"bibliographypath": bibliograpghy_path.absolute(), 

-

227 "cutoffTime": cutoff, 

-

228 "performanceMetric": objective.name, 

-

229 "numInstanceClasses": "1"} # Currently no support for multi sets 

-

230 # Get the results data 

-

231 csv_data = [line for line in 

-

232 csv.reader((parallel_portfolio_path / "results.csv").open("r"))] 

-

233 header = csv_data[0] 

-

234 csv_data = csv_data[1:] 

-

235 solver_column = header.index("Solver") 

-

236 instance_column = header.index("Instance") 

-

237 status_column = header.index("status") 

-

238 objective_column = header.index(objective.name) 

-

239 solver_list = list(set([line[solver_column] 

-

240 for line in csv_data])) # Unique set of solvers 

-

241 results = {name: [] for name in instance_set._instance_names} 

-

242 for row in csv_data: 

-

243 if row[instance_column] in results.keys(): 

-

244 results[row[instance_column]].append( 

-

245 [row[solver_column], row[status_column], row[objective_column]]) 

-

246 

-

247 variables_dict["numSolvers"] = len(solver_list) 

-

248 variables_dict["solverList"] = get_solver_list_latex(solver_list) 

-

249 variables_dict["instanceClassList"] =\ 

-

250 sgfs.get_instance_set_count_list(instance_set._instance_paths) 

-

251 

-

252 # Produce some statistics on the parallel portfolio 

-

253 solvers_solutions = {solver: 0 for solver in solver_list} 

-

254 instance_names_copy = instance_set._instance_names.copy() 

-

255 for line in csv_data: 

-

256 if (line[instance_column] in instance_names_copy 

-

257 and SolverStatus(line[status_column]) == SolverStatus.SUCCESS): 

-

258 solvers_solutions[line[solver_column]] += 1 

-

259 instance_names_copy.remove(line[instance_column]) 

-

260 unsolved_instances = instance_set.size - sum([solvers_solutions[key] 

-

261 for key in solvers_solutions]) 

-

262 inst_succes = [] 

-

263 for solver in solvers_solutions: 

-

264 inst_succes.append("\\item Solver " 

-

265 f"\\textbf{{{stex.underscore_for_latex(solver)}}}, was the " 

-

266 "best solver on " 

-

267 f"\\textbf{{{solvers_solutions[solver]}}} instance(s)") 

-

268 if unsolved_instances > 0: 

-

269 inst_succes.append(f"\\item \\textbf{{{unsolved_instances}}} instances(s) " 

-

270 "remained unsolved") 

-

271 

-

272 variables_dict["solversWithSolution"] = "\n".join(inst_succes) 

-

273 

-

274 (figure_name, dict_all_solvers, 

-

275 dict_actual_parallel_portfolio_penalty_time_on_each_instance) =\ 

-

276 get_figure_parallel_portfolio_sparkle_vs_sbs(target_directory, solver_list, 

-

277 instance_set, results, objective) 

-

278 

-

279 variables_dict["figure-parallel-portfolio-sparkle-vs-sbs"] = figure_name 

-

280 variables_dict["resultsTable"] = get_results_table( 

-

281 results, dict_all_solvers, parallel_portfolio_path, 

-

282 dict_actual_parallel_portfolio_penalty_time_on_each_instance, 

-

283 solvers_solutions, unsolved_instances, instance_set.size, objective.name) 

-

284 

-

285 if objective.time: 

-

286 variables_dict["decisionBool"] = "\\decisiontrue" 

-

287 else: 

-

288 variables_dict["decisionBool"] = "\\decisionfalse" 

-

289 return variables_dict 

-

290 

-

291 

-

292def generate_figure( 

-

293 target_directory: Path, 

-

294 sbs_name: str, parallel_portfolio_name: str, 

-

295 figure_parallel_portfolio_vs_sbs_filename: str, 

-

296 performance_measure: str, data: list) -> None: 

-

297 """Generates image for parallel portfolio report.""" 

-

298 upper_bound = max([x for xs in data for x in xs]) * 1.5 

-

299 lower_bound = 0.01 

-

300 

-

301 output_plot = target_directory / f"{figure_parallel_portfolio_vs_sbs_filename}.pdf" 

-

302 

-

303 xlabel = f"{sbs_name}, {performance_measure}" 

-

304 ylabel = f"{parallel_portfolio_name}" 

-

305 df = pd.DataFrame(data, columns=[xlabel, ylabel]) 

-

306 fig = px.scatter(data_frame=df, x=xlabel, y=ylabel, 

-

307 range_x=[lower_bound, upper_bound], 

-

308 range_y=[lower_bound, upper_bound], 

-

309 log_x=True, log_y=True, 

-

310 width=500, height=500) 

-

311 # Add in the seperation line 

-

312 fig.add_shape(type="line", x0=0, y0=0, x1=upper_bound, y1=upper_bound, 

-

313 line=dict(color="lightgrey", width=1)) 

-

314 fig.update_traces(marker=dict(color="RoyalBlue", symbol="x")) 

-

315 fig.update_layout( 

-

316 plot_bgcolor="white" 

-

317 ) 

-

318 fig.update_xaxes( 

-

319 type="log", 

-

320 mirror=True, 

-

321 dtick=1, 

-

322 ticks="outside", 

-

323 showline=True, 

-

324 showgrid=True, 

-

325 linecolor="black", 

-

326 gridcolor="lightgrey" 

-

327 ) 

-

328 fig.update_yaxes( 

-

329 type="log", 

-

330 mirror=True, 

-

331 dtick=1, 

-

332 ticks="outside", 

-

333 showline=True, 

-

334 showgrid=True, 

-

335 linecolor="black", 

-

336 gridcolor="lightgrey" 

-

337 ) 

-

338 fig.write_image(output_plot) 

-

339 

-

340 

-

341def generate_report_parallel_portfolio(parallel_portfolio_path: Path, 

-

342 target_path: Path, 

-

343 latex_template: Path, 

-

344 bibliograpghy_path: Path, 

-

345 objective: SparkleObjective, 

-

346 cutoff: int, 

-

347 instances: InstanceSet) -> None: 

-

348 """Generate a report for a parallel algorithm portfolio. 

-

349 

-

350 Args: 

-

351 parallel_portfolio_path: Parallel portfolio path. 

-

352 target_path: Where the report data will be placed. 

-

353 latex_template: Path to the latex template path used 

-

354 bibliograpghy_path: Path to the bib file 

-

355 objective: The objective of the portfolio 

-

356 cutoff: The cutoff time for each solver 

-

357 instances: List of instances. 

-

358 """ 

-

359 target_path.mkdir(parents=True, exist_ok=True) 

-

360 dict_variable_to_value = parallel_report_variables( 

-

361 target_path, parallel_portfolio_path, bibliograpghy_path, objective, 

-

362 cutoff, instances) 

-

363 

-

364 stex.generate_report(latex_template, 

-

365 "template-Sparkle-for-parallel-portfolio.tex", 

-

366 target_path, 

-

367 "Sparkle_Report_Parallel_Portfolio", 

-

368 dict_variable_to_value) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_selection_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_selection_py.html deleted file mode 100644 index a71842b09..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_generate_report_for_selection_py.html +++ /dev/null @@ -1,360 +0,0 @@ - - - - - Coverage for sparkle/platform/generate_report_for_selection.py: 0% - - - - - -
-
-

- Coverage for sparkle/platform/generate_report_for_selection.py: - 0% -

- -

- 74 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions for selection report generation.""" 

-

4import sys 

-

5from pathlib import Path 

-

6from collections import Counter 

-

7 

-

8from sparkle.CLI.compute_marginal_contribution\ 

-

9 import compute_selector_marginal_contribution 

-

10 

-

11from sparkle.platform import latex as stex 

-

12from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

13from sparkle.types.objective import SparkleObjective 

-

14 

-

15 

-

16def get_num_instance_sets(instance_list: list[str]) -> int: 

-

17 """Get the number of instance sets. 

-

18 

-

19 Args: 

-

20 instance_list: List of instances to use 

-

21 

-

22 Returns: 

-

23 The number of instance sets as LaTeX str. 

-

24 """ 

-

25 return len(set([Path(instance_path).parent.name 

-

26 for instance_path in instance_list])) 

-

27 

-

28 

-

29def get_instance_set_count_list(instance_list: list[str] = None) -> str: 

-

30 """Get the instance sets for use in a LaTeX document. 

-

31 

-

32 Returns: 

-

33 The list of instance sets as LaTeX str. 

-

34 """ 

-

35 instance_list = [Path(instance_path).parent.name for instance_path in instance_list] 

-

36 count = Counter(instance_list) 

-

37 rows = [(inst_key, f", constisting of {count[inst_key]} instances") 

-

38 for inst_key in count] 

-

39 return stex.list_to_latex(rows) 

-

40 

-

41 

-

42def solver_ranked_latex_list(solver_ranking: list[tuple[str, float]], 

-

43 objective: SparkleObjective = None) -> str: 

-

44 """Convert a list of the solvers ranked by performance to LaTeX. 

-

45 

-

46 Returns: 

-

47 The list of solvers ranked as LaTeX str. 

-

48 """ 

-

49 objective_str = f"{objective}: " if objective is not None else "" 

-

50 return stex.list_to_latex([(row[0], f", {objective_str} {row[1]}") 

-

51 for row in solver_ranking]) 

-

52 

-

53 

-

54def get_portfolio_selector_performance(selection_scenario: Path) -> PerformanceDataFrame: 

-

55 """Creates a dictionary with the portfolio selector performance on each instance. 

-

56 

-

57 Returns: 

-

58 A dict that maps instance name str to performance. 

-

59 """ 

-

60 portfolio_selector_performance_path = selection_scenario / "performance.csv" 

-

61 if not portfolio_selector_performance_path.exists(): 

-

62 print(f"ERROR: {portfolio_selector_performance_path} does not exist.") 

-

63 sys.exit(-1) 

-

64 return PerformanceDataFrame(portfolio_selector_performance_path) 

-

65 

-

66 

-

67def get_figure_portfolio_selector_vs_sbs( 

-

68 output_dir: Path, 

-

69 objective: SparkleObjective, 

-

70 train_data: PerformanceDataFrame, 

-

71 portfolio_selector_performance: PerformanceDataFrame, 

-

72 sbs_solver: str) -> str: 

-

73 """Create a LaTeX plot comparing the selector and the SBS. 

-

74 

-

75 The plot compares the performance on each instance of the portfolio selector created 

-

76 by Sparkle and the SBS (single best solver). 

-

77 

-

78 Returns: 

-

79 LaTeX str to include the comparison plot in a LaTeX report. 

-

80 """ 

-

81 # We create a point of x,y form (SBS performance, portfolio performance) 

-

82 selector = portfolio_selector_performance.solvers[0] 

-

83 points = [[train_data.get_value(sbs_solver, instance, objective.name), 

-

84 portfolio_selector_performance.get_value(selector, 

-

85 instance, 

-

86 objective.name)] 

-

87 for instance in portfolio_selector_performance.instances] 

-

88 

-

89 figure_filename = "figure_portfolio_selector_sparkle_vs_sbs" 

-

90 sbs_solver_name = Path(sbs_solver).name 

-

91 

-

92 stex.generate_comparison_plot(points, 

-

93 figure_filename, 

-

94 xlabel=f"SBS ({sbs_solver_name}) [{objective}]", 

-

95 ylabel=f"Sparkle Selector [{objective}]", 

-

96 limit="magnitude", 

-

97 limit_min=0.25, 

-

98 limit_max=0.25, 

-

99 replace_zeros=True, 

-

100 output_dir=output_dir) 

-

101 return f"\\includegraphics[width=0.6\\textwidth]{{{figure_filename}}}" 

-

102 

-

103 

-

104def get_figure_portfolio_selector_sparkle_vs_vbs( 

-

105 output_dir: Path, 

-

106 objective: SparkleObjective, 

-

107 train_data: PerformanceDataFrame, 

-

108 actual_portfolio_selector_penalty: PerformanceDataFrame) -> str: 

-

109 """Create a LaTeX plot comparing the selector and the VBS. 

-

110 

-

111 The plot compares the performance on each instance of the portfolio selector created 

-

112 by Sparkle and the VBS (virtual best solver). 

-

113 

-

114 Returns: 

-

115 LaTeX str to include the comparison plot in a LaTeX report. 

-

116 """ 

-

117 vbs_performance = train_data.best_instance_performance(objective=objective.name) 

-

118 instances = actual_portfolio_selector_penalty.instances 

-

119 solver = actual_portfolio_selector_penalty.solvers[0] 

-

120 points = [(vbs_performance[instance], 

-

121 actual_portfolio_selector_penalty.get_value(solver, 

-

122 instance, 

-

123 objective.name)) 

-

124 for instance in instances] 

-

125 

-

126 figure_filename = "figure_portfolio_selector_sparkle_vs_vbs" 

-

127 

-

128 stex.generate_comparison_plot(points, 

-

129 figure_filename, 

-

130 xlabel=f"VBS [{objective}]", 

-

131 ylabel=f"Sparkle Selector [{objective.name}]", 

-

132 limit="magnitude", 

-

133 limit_min=0.25, 

-

134 limit_max=0.25, 

-

135 replace_zeros=True, 

-

136 output_dir=output_dir) 

-

137 return f"\\includegraphics[width=0.6\\textwidth]{{{figure_filename}}}" 

-

138 

-

139 

-

140def selection_report_variables( 

-

141 target_dir: Path, 

-

142 bibliograpghy_path: Path, 

-

143 extractor_path: Path, 

-

144 selection_scenario: Path, 

-

145 performance_data: PerformanceDataFrame, 

-

146 feature_data: FeatureDataFrame, 

-

147 objective: SparkleObjective, 

-

148 extractor_cutoff: int, 

-

149 cutoff: int, 

-

150 test_case_data: PerformanceDataFrame = None) -> dict[str, str]: 

-

151 """Returns: a dict matching variables in the LaTeX template with their values. 

-

152 

-

153 Args: 

-

154 target_dir: Output path 

-

155 bibliography_path: Path to the bib file 

-

156 test_case_directory: Path to the test case directory. 

-

157 

-

158 Returns: 

-

159 A dict matching str variables in the LaTeX template with their value str. 

-

160 """ 

-

161 actual_performance_data = get_portfolio_selector_performance(selection_scenario) 

-

162 solver_performance_ranking = performance_data.get_solver_ranking( 

-

163 objective=objective) 

-

164 single_best_solver = solver_performance_ranking[0][0] 

-

165 latex_dict = {"bibliographypath": bibliograpghy_path.absolute(), 

-

166 "numSolvers": performance_data.num_solvers, 

-

167 "solverList": stex.list_to_latex([(s, "") 

-

168 for s in performance_data.solvers])} 

-

169 latex_dict["numFeatureExtractors"] = len( 

-

170 [p for p in extractor_path.iterdir() if p.is_dir()]) 

-

171 stex.list_to_latex([(f, "") for f in extractor_path.iterdir()]) 

-

172 latex_dict["featureExtractorList"] = stex.list_to_latex( 

-

173 [(f, "") for f in extractor_path.iterdir()]) 

-

174 latex_dict["numInstanceClasses"] = get_num_instance_sets(performance_data.instances) 

-

175 latex_dict["instanceClassList"] =\ 

-

176 get_instance_set_count_list(performance_data.instances) 

-

177 latex_dict["featureComputationCutoffTime"] = extractor_cutoff 

-

178 latex_dict["performanceComputationCutoffTime"] = cutoff 

-

179 rank_list_perfect = performance_data.marginal_contribution(objective, sort=True) 

-

180 rank_list_actual = compute_selector_marginal_contribution(performance_data, 

-

181 feature_data, 

-

182 selection_scenario, 

-

183 objective) 

-

184 latex_dict["solverPerfectRankingList"] = solver_ranked_latex_list(rank_list_perfect) 

-

185 latex_dict["solverActualRankingList"] = solver_ranked_latex_list(rank_list_actual) 

-

186 latex_dict["PARRankingList"] = solver_ranked_latex_list(solver_performance_ranking, 

-

187 objective) 

-

188 latex_dict["VBSPAR"] = objective.instance_aggregator( 

-

189 performance_data.best_instance_performance(objective=objective.name)) 

-

190 latex_dict["actualPAR"] = actual_performance_data.mean(objective=objective.name) 

-

191 latex_dict["metric"] = objective.name 

-

192 latex_dict["figure-portfolio-selector-sparkle-vs-sbs"] =\ 

-

193 get_figure_portfolio_selector_vs_sbs( 

-

194 target_dir, objective, performance_data, 

-

195 actual_performance_data, single_best_solver) 

-

196 latex_dict["figure-portfolio-selector-sparkle-vs-vbs"] =\ 

-

197 get_figure_portfolio_selector_sparkle_vs_vbs(target_dir, 

-

198 objective, 

-

199 performance_data, 

-

200 actual_performance_data) 

-

201 latex_dict["testBool"] = r"\testfalse" 

-

202 

-

203 # Train and test 

-

204 if test_case_data is not None: 

-

205 latex_dict["testInstanceClass"] =\ 

-

206 f"\\textbf{ {test_case_data.csv_filepath.parent.name} }" 

-

207 latex_dict["numInstanceInTestInstanceClass"] =\ 

-

208 test_case_data.num_instances 

-

209 latex_dict["testActualPAR"] = test_case_data.mean(objective=objective.name) 

-

210 latex_dict["testBool"] = r"\testtrue" 

-

211 

-

212 return latex_dict 

-

213 

-

214 

-

215def generate_report_selection(target_path: Path, 

-

216 latex_dir: Path, 

-

217 latex_template: Path, 

-

218 bibliography_path: Path, 

-

219 extractor_path: Path, 

-

220 selection_scenario: Path, 

-

221 feature_data: FeatureDataFrame, 

-

222 train_data: PerformanceDataFrame, 

-

223 objective: SparkleObjective, 

-

224 extractor_cutoff: int, 

-

225 cutoff: int, 

-

226 test_case_data: PerformanceDataFrame = None) -> None: 

-

227 """Generate a report for algorithm selection. 

-

228 

-

229 Args: 

-

230 target_path: Path where the outputfiles will be placed. 

-

231 latex_dir: The latex dir 

-

232 latex_template: The template for the report 

-

233 bibliography_path: Path to the bib file. 

-

234 extractor_path: Path to the extractor used 

-

235 selection_scenario: Path to the selector scenario 

-

236 feature_data: Feature data created by extractor 

-

237 train_data: The performance input data for the selector 

-

238 objective: The objective for the selector 

-

239 extractor_cutoff: The maximum time for the selector to run 

-

240 cutoff: The cutoff per solver 

-

241 test_case_data: Path to the test case directory. Defaults to None. 

-

242 """ 

-

243 # Include results on the test set if a test case directory is given 

-

244 latex_report_filename = Path("Sparkle_Report") 

-

245 if test_case_data is not None: 

-

246 latex_report_filename = Path("Sparkle_Report_for_Test") 

-

247 

-

248 target_path.mkdir(parents=True, exist_ok=True) 

-

249 dict_variable_to_value = selection_report_variables(target_path, 

-

250 bibliography_path, 

-

251 extractor_path, 

-

252 selection_scenario, 

-

253 train_data, 

-

254 feature_data, 

-

255 objective, 

-

256 extractor_cutoff, 

-

257 cutoff, 

-

258 test_case_data) 

-

259 stex.generate_report(latex_dir, 

-

260 latex_template, 

-

261 target_path, 

-

262 latex_report_filename, 

-

263 dict_variable_to_value) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_latex_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_latex_py.html deleted file mode 100644 index 593b820ff..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_latex_py.html +++ /dev/null @@ -1,346 +0,0 @@ - - - - - Coverage for sparkle/platform/latex.py: 28% - - - - - -
-
-

- Coverage for sparkle/platform/latex.py: - 28% -

- -

- 79 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper classes/method for LaTeX and bibTeX.""" 

-

4from shutil import which 

-

5from pathlib import Path 

-

6import subprocess 

-

7from enum import Enum 

-

8 

-

9import numpy as np 

-

10import pandas as pd 

-

11import plotly.express as px 

-

12import plotly.io as pio 

-

13 

-

14pio.kaleido.scope.mathjax = None # Bug fix for kaleido 

-

15 

-

16 

-

17class ReportType(str, Enum): 

-

18 """enum for separating different types of reports.""" 

-

19 ALGORITHM_SELECTION = "algorithm_selection" 

-

20 ALGORITHM_CONFIGURATION = "algorithm_configuration" 

-

21 PARALLEL_PORTFOLIO = "parallel_portfolio" 

-

22 

-

23 

-

24def check_tex_commands_exist(latex_directory_path: Path) -> None: 

-

25 """Raise an exception if one of the latex commands is not present.""" 

-

26 if which("bibtex") is None or which("pdflatex") is None: 

-

27 raise Exception("Error: It seems like latex is not available on your system.\n" 

-

28 "You can install latex and run the command again, " 

-

29 f"or copy the source files in {latex_directory_path} on your " 

-

30 "local machine to generate the report.") 

-

31 

-

32 

-

33def underscore_for_latex(string: str) -> str: 

-

34 """Return the input str with the underscores escaped for use in LaTeX. 

-

35 

-

36 Args: 

-

37 string: A given str with underscores. 

-

38 

-

39 Returns: 

-

40 The corresponding str with underscores escaped. 

-

41 """ 

-

42 return string.replace("_", "\\_") 

-

43 

-

44 

-

45def list_to_latex(content: list | list[tuple]) -> str: 

-

46 """Convert a list to LaTeX. 

-

47 

-

48 Args: 

-

49 content: The list to convert. If a tuple, first item will be boldface. 

-

50 

-

51 Returns: 

-

52 The list as LaTeX str. 

-

53 """ 

-

54 if len(content) == 0: 

-

55 return "\\item" 

-

56 if isinstance(content[0], tuple): 

-

57 return "".join(f"\\item \\textbf{{{item[0]}}}{item[1]}" for item in content) 

-

58 return "".join(f"\\item {item}\n" for item in content) 

-

59 

-

60 

-

61def generate_comparison_plot(points: list, 

-

62 figure_filename: str, 

-

63 xlabel: str = "default", 

-

64 ylabel: str = "optimised", 

-

65 title: str = "", 

-

66 scale: str = "log", 

-

67 limit: str = "magnitude", 

-

68 limit_min: float = 0.2, 

-

69 limit_max: float = 0.2, 

-

70 replace_zeros: bool = True, 

-

71 magnitude_lines: int = 2147483647, 

-

72 output_dir: Path = None) -> None: 

-

73 """Create comparison plots between two different solvers/portfolios. 

-

74 

-

75 Args: 

-

76 points: list of points which represents with the performance results of 

-

77 (solverA, solverB) 

-

78 figure_filename: filename without filetype (e.g., .jpg) to save the figure to. 

-

79 xlabel: Name of solverA (default: default) 

-

80 ylabel: Name of solverB (default: optimised) 

-

81 title: Display title in the image (default: None) 

-

82 scale: [linear, log] (default: linear) 

-

83 limit: The method to compute the axis limits in the figure 

-

84 [absolute, relative, magnitude] (default: relative) 

-

85 absolute: Uses the limit_min/max values as absolute values 

-

86 relative: Decreases/increases relatively to the min/max values found in the 

-

87 points. E.g., min/limit_min and max*limit_max 

-

88 magnitude: Increases the order of magnitude(10) of the min/max values in the 

-

89 points. E.g., 10**floor(log10(min)-limit_min) 

-

90 and 10**ceil(log10(max)+limit_max) 

-

91 limit_min: Value used to compute the minimum limit 

-

92 limit_max: Value used to compute the maximum limit 

-

93 computing the figure limits. This is only relevant for runtime objectives 

-

94 replace_zeros: Replaces zeros valued performances to a very small value to make 

-

95 plotting on log-scale possible 

-

96 magnitude_lines: Draw magnitude lines (only supported for log scale) 

-

97 output_dir: directory path to place the figure and its intermediate files in 

-

98 (default: current working directory) 

-

99 """ 

-

100 output_dir = Path() if output_dir is None else Path(output_dir) 

-

101 

-

102 df = pd.DataFrame(points, columns=[xlabel, ylabel]) 

-

103 if replace_zeros and (df < 0).any(axis=None): 

-

104 # Log scale cannot deal with negative and zero values, set to smallest non zero 

-

105 df[df < 0] = np.nextafter(0, 1) 

-

106 

-

107 # process range values 

-

108 min_point_value = df.min(numeric_only=True).min() 

-

109 max_point_value = df.max(numeric_only=True).max() 

-

110 

-

111 if limit == "absolute": 

-

112 min_value = limit_min 

-

113 max_value = limit_max 

-

114 elif limit == "relative": 

-

115 min_value = (min_point_value * (1 / limit_min) if min_point_value > 0 

-

116 else min_point_value * limit_min) 

-

117 max_value = (max_point_value * limit_max if max_point_value > 0 

-

118 else max_point_value * (1 / limit_max)) 

-

119 elif limit == "magnitude": 

-

120 min_value = 10 ** (np.floor(np.log10(min_point_value)) - limit_min) 

-

121 max_value = 10 ** (np.ceil(np.log10(max_point_value)) + limit_max) 

-

122 

-

123 if scale == "log" and np.min(points) <= 0: 

-

124 raise Exception("Cannot plot negative and zero values on a log scales") 

-

125 

-

126 output_plot = output_dir / f"{figure_filename}.pdf" 

-

127 log_scale = scale == "log" 

-

128 fig = px.scatter(data_frame=df, x=xlabel, y=ylabel, 

-

129 range_x=[min_value, max_value], range_y=[min_value, max_value], 

-

130 title=title, log_x=log_scale, log_y=log_scale, 

-

131 width=500, height=500) 

-

132 # Add in the seperation line 

-

133 fig.add_shape(type="line", x0=0, y0=0, x1=max_value, y1=max_value, 

-

134 line=dict(color="lightgrey", width=1)) 

-

135 fig.update_traces(marker=dict(color="RoyalBlue", symbol="x")) 

-

136 fig.update_layout( 

-

137 plot_bgcolor="white" 

-

138 ) 

-

139 fig.update_xaxes( 

-

140 type="linear" if not log_scale else "log", 

-

141 mirror=True, 

-

142 tickmode="linear", 

-

143 ticks="outside", 

-

144 tick0=0, 

-

145 dtick=100 if not log_scale else 1, 

-

146 showline=True, 

-

147 linecolor="black", 

-

148 gridcolor="lightgrey" 

-

149 ) 

-

150 fig.update_yaxes( 

-

151 type="linear" if not log_scale else "log", 

-

152 mirror=True, 

-

153 tickmode="linear", 

-

154 ticks="outside", 

-

155 tick0=0, 

-

156 dtick=100 if not log_scale else 1, 

-

157 showline=True, 

-

158 linecolor="black", 

-

159 gridcolor="lightgrey" 

-

160 ) 

-

161 fig.write_image(output_plot) 

-

162 

-

163 

-

164def fill_template_tex(template_tex: str, variables: dict) -> str: 

-

165 """Given a latex template, replaces all the @@ variables using the dict. 

-

166 

-

167 Args: 

-

168 template_tex: The template to be populated 

-

169 variables: Variable names (key) with their target (value) 

-

170 

-

171 Returns: 

-

172 The populated latex string. 

-

173 """ 

-

174 for variable_key, target_value in variables.items(): 

-

175 variable = f"@@{variable_key}@@" 

-

176 target_value = str(target_value) 

-

177 # We don't modify variable names in the Latex file 

-

178 if "\\includegraphics" not in target_value and "\\label" not in target_value: 

-

179 # Rectify underscores in target_value 

-

180 target_value = target_value.replace("_", r"\textunderscore ") 

-

181 template_tex = template_tex.replace(variable, target_value) 

-

182 return template_tex 

-

183 

-

184 

-

185def compile_pdf(latex_files_path: Path, latex_report_filename: Path) -> Path: 

-

186 """Compile the given latex files to a PDF. 

-

187 

-

188 Args: 

-

189 latex_files_path: Path to the directory with source files 

-

190 where the report will be generated. 

-

191 latex_report_filename: Name of the output files. 

-

192 

-

193 Returns: 

-

194 Path to the newly generated report in PDF format. 

-

195 """ 

-

196 pdf_process = subprocess.run(["pdflatex", "-interaction=nonstopmode", 

-

197 f"{latex_report_filename}.tex"], 

-

198 cwd=latex_files_path, capture_output=True) 

-

199 

-

200 if pdf_process.returncode != 0: 

-

201 print(f"[{pdf_process.returncode}] ERROR generating with PDFLatex command:\n" 

-

202 f"{pdf_process.stdout.decode()}\n {pdf_process.stderr.decode()}\n") 

-

203 

-

204 bibtex_process = subprocess.run(["bibtex", f"{latex_report_filename}.aux"], 

-

205 cwd=latex_files_path, capture_output=True) 

-

206 

-

207 if bibtex_process.returncode != 0: 

-

208 print("ERROR whilst generating with Bibtex command:" 

-

209 f"{bibtex_process.stdout} {bibtex_process.stderr}") 

-

210 

-

211 # TODO: Fix compilation for references 

-

212 # (~\ref[] yields [?] in pdf, re-running command fixes it) 

-

213 # We have to re-run the same pdf command to take in the updates bib files from bibtex 

-

214 # But Bibtex cannot function without .aux file produced by pdflatex. Hence run twice. 

-

215 pdf_process = subprocess.run(["pdflatex", "-interaction=nonstopmode", 

-

216 f"{latex_report_filename}.tex"], 

-

217 cwd=latex_files_path, capture_output=True) 

-

218 

-

219 return Path(latex_files_path / latex_report_filename).with_suffix(".pdf") 

-

220 

-

221 

-

222def generate_report(latex_source_path: Path, 

-

223 latex_template_name: str, 

-

224 target_path: Path, 

-

225 report_name: str, 

-

226 variable_dict: dict) -> None: 

-

227 """General steps to generate a report. 

-

228 

-

229 Args: 

-

230 latex_source_path: The path to the template 

-

231 latex_template_name: The template name 

-

232 target_path: The directory where the result should be placed 

-

233 report_name: The name of the pdf (without suffix) 

-

234 variable_dict: TBD 

-

235 """ 

-

236 latex_template_filepath = latex_source_path / latex_template_name 

-

237 

-

238 report_content = latex_template_filepath.open("r").read() 

-

239 report_content = fill_template_tex(report_content, variable_dict) 

-

240 

-

241 target_path.mkdir(parents=True, exist_ok=True) 

-

242 latex_report_filepath = target_path / report_name 

-

243 latex_report_filepath = latex_report_filepath.with_suffix(".tex") 

-

244 Path(latex_report_filepath).open("w+").write(report_content) 

-

245 

-

246 check_tex_commands_exist(target_path) 

-

247 report_path = compile_pdf(target_path, report_name) 

-

248 

-

249 print(f"Report is placed at: {report_path}") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_settings_objects_py.html b/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_settings_objects_py.html deleted file mode 100644 index 7f2b093db..000000000 --- a/Documentation/source/_static/coverage/z_8ad50cee1c09c10b_settings_objects_py.html +++ /dev/null @@ -1,1000 +0,0 @@ - - - - - Coverage for sparkle/platform/settings_objects.py: 31% - - - - - -
-
-

- Coverage for sparkle/platform/settings_objects.py: - 31% -

- -

- 544 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Classes and Enums to control settings.""" 

-

2from __future__ import annotations 

-

3import configparser 

-

4from enum import Enum 

-

5from pathlib import Path 

-

6from pathlib import PurePath 

-

7 

-

8from sparkle.types import SparkleObjective, resolve_objective 

-

9from sparkle.types.objective import PAR 

-

10from sparkle.solver import Selector 

-

11from sparkle.configurator.configurator import Configurator 

-

12from sparkle.solver.verifier import SATVerifier 

-

13from sparkle.configurator import implementations as cim 

-

14 

-

15from runrunner import Runner 

-

16from sparkle.platform.cli_types import VerbosityLevel 

-

17 

-

18 

-

19class SettingState(Enum): 

-

20 """Enum of possible setting states.""" 

-

21 

-

22 NOT_SET = 0 

-

23 DEFAULT = 1 

-

24 FILE = 2 

-

25 CMD_LINE = 3 

-

26 

-

27 

-

28class Settings: 

-

29 """Class to read, write, set, and get settings.""" 

-

30 # CWD Prefix 

-

31 cwd_prefix = Path() # Empty for now 

-

32 

-

33 # Library prefix 

-

34 lib_prefix = Path(__file__).parent.parent.resolve() 

-

35 

-

36 # Default directory names 

-

37 rawdata_dir = Path("Raw_Data") 

-

38 analysis_dir = Path("Analysis") 

-

39 __settings_dir = Path("Settings") 

-

40 __settings_file = Path("sparkle_settings.ini") 

-

41 

-

42 # Default settings path 

-

43 DEFAULT_settings_path = PurePath(cwd_prefix / __settings_dir / __settings_file) 

-

44 

-

45 # Default library pathing 

-

46 DEFAULT_components = lib_prefix / "Components" 

-

47 

-

48 # Example settings path 

-

49 DEFAULT_example_settings_path = PurePath(DEFAULT_components / "sparkle_settings.ini") 

-

50 

-

51 # Runsolver component 

-

52 DEFAULT_runsolver_dir = DEFAULT_components / "runsolver" / "src" 

-

53 DEFAULT_runsolver_exec = DEFAULT_runsolver_dir / "runsolver" 

-

54 

-

55 # Ablation component 

-

56 DEFAULT_ablation_dir = DEFAULT_components / "ablationAnalysis-0.9.4" 

-

57 DEFAULT_ablation_exec = DEFAULT_ablation_dir / "ablationAnalysis" 

-

58 DEFAULT_ablation_validation_exec = DEFAULT_ablation_dir / "ablationValidation" 

-

59 

-

60 # Autofolio component 

-

61 DEFAULT_general_sparkle_selector = DEFAULT_components / "AutoFolio/scripts/autofolio" 

-

62 

-

63 # Report component 

-

64 DEFAULT_latex_source = DEFAULT_components / "Sparkle-latex-source" 

-

65 DEFAULT_latex_bib = DEFAULT_latex_source / "SparkleReport.bib" 

-

66 

-

67 # Default input directory pathing 

-

68 DEFAULT_solver_dir = cwd_prefix / "Solvers" 

-

69 DEFAULT_instance_dir = cwd_prefix / "Instances" 

-

70 DEFAULT_extractor_dir = cwd_prefix / "Extractors" 

-

71 DEFAULT_snapshot_dir = cwd_prefix / "Snapshots" 

-

72 

-

73 # Default output directory pathing 

-

74 DEFAULT_tmp_output = cwd_prefix / "Tmp" 

-

75 DEFAULT_output = cwd_prefix / "Output" 

-

76 DEFAULT_configuration_output = DEFAULT_output / "Configuration" 

-

77 DEFAULT_selection_output = DEFAULT_output / "Selection" 

-

78 DEFAULT_validation_output = DEFAULT_output / "Validation" 

-

79 DEFAULT_parallel_portfolio_output = DEFAULT_output / "Parallel_Portfolio" 

-

80 DEFAULT_ablation_output = DEFAULT_output / "Ablation" 

-

81 DEFAULT_log_output = DEFAULT_output / "Log" 

-

82 

-

83 # Default output subdirs 

-

84 DEFAULT_configuration_output_raw = DEFAULT_configuration_output / rawdata_dir 

-

85 DEFAULT_configuration_output_analysis = DEFAULT_configuration_output / analysis_dir 

-

86 DEFAULT_selection_output_raw = DEFAULT_selection_output / rawdata_dir 

-

87 DEFAULT_selection_output_analysis = DEFAULT_selection_output / analysis_dir 

-

88 DEFAULT_parallel_portfolio_output_raw =\ 

-

89 DEFAULT_parallel_portfolio_output / rawdata_dir 

-

90 DEFAULT_parallel_portfolio_output_analysis =\ 

-

91 DEFAULT_parallel_portfolio_output / analysis_dir 

-

92 

-

93 # Old default output dirs which should be part of something else 

-

94 DEFAULT_feature_data = DEFAULT_output / "Feature_Data" 

-

95 DEFAULT_performance_data = DEFAULT_output / "Performance_Data" 

-

96 

-

97 # Collection of all working dirs for platform 

-

98 DEFAULT_working_dirs = [ 

-

99 DEFAULT_output, DEFAULT_configuration_output, 

-

100 DEFAULT_selection_output, DEFAULT_validation_output, 

-

101 DEFAULT_tmp_output, 

-

102 DEFAULT_log_output, 

-

103 DEFAULT_solver_dir, DEFAULT_instance_dir, 

-

104 DEFAULT_feature_data, DEFAULT_performance_data, 

-

105 DEFAULT_extractor_dir, 

-

106 ] 

-

107 

-

108 # Old default file paths from GV which should be turned into variables 

-

109 DEFAULT_feature_data_path =\ 

-

110 DEFAULT_feature_data / "feature_data.csv" 

-

111 DEFAULT_performance_data_path =\ 

-

112 DEFAULT_performance_data / "performance_data.csv" 

-

113 

-

114 # Constant default values 

-

115 DEFAULT_general_sparkle_objective = PAR(10) 

-

116 DEFAULT_general_sparkle_configurator = cim.SMAC2.__name__ 

-

117 DEFAULT_general_solution_verifier = str(None) 

-

118 DEFAULT_general_target_cutoff_time = 60 

-

119 DEFAULT_general_extractor_cutoff_time = 60 

-

120 DEFAULT_number_of_jobs_in_parallel = 25 

-

121 DEFAULT_general_verbosity = VerbosityLevel.STANDARD 

-

122 DEFAULT_general_check_interval = 10 

-

123 

-

124 DEFAULT_config_wallclock_time = 600 

-

125 DEFAULT_config_cpu_time = None 

-

126 DEFAULT_config_solver_calls = None 

-

127 DEFAULT_config_number_of_runs = 25 

-

128 DEFAULT_configurator_target_cutoff_length = "max" 

-

129 

-

130 DEFAULT_portfolio_construction_timeout = None 

-

131 

-

132 DEFAULT_slurm_max_parallel_runs_per_node = 8 

-

133 

-

134 DEFAULT_ablation_racing = False 

-

135 

-

136 DEFAULT_parallel_portfolio_check_interval = 4 

-

137 DEFAULT_parallel_portfolio_num_seeds_per_solver = 1 

-

138 

-

139 def __init__(self: Settings, file_path: PurePath = None) -> None: 

-

140 """Initialise a settings object.""" 

-

141 # Settings 'dictionary' in configparser format 

-

142 self.__settings = configparser.ConfigParser() 

-

143 

-

144 # Setting flags 

-

145 self.__general_sparkle_objective_set = SettingState.NOT_SET 

-

146 self.__general_sparkle_configurator_set = SettingState.NOT_SET 

-

147 self.__general_sparkle_selector_set = SettingState.NOT_SET 

-

148 self.__general_solution_verifier_set = SettingState.NOT_SET 

-

149 self.__general_target_cutoff_time_set = SettingState.NOT_SET 

-

150 self.__general_extractor_cutoff_time_set = SettingState.NOT_SET 

-

151 self.__general_verbosity_set = SettingState.NOT_SET 

-

152 self.__general_check_interval_set = SettingState.NOT_SET 

-

153 

-

154 self.__config_wallclock_time_set = SettingState.NOT_SET 

-

155 self.__config_cpu_time_set = SettingState.NOT_SET 

-

156 self.__config_solver_calls_set = SettingState.NOT_SET 

-

157 self.__config_number_of_runs_set = SettingState.NOT_SET 

-

158 

-

159 self.__run_on_set = SettingState.NOT_SET 

-

160 self.__number_of_jobs_in_parallel_set = SettingState.NOT_SET 

-

161 self.__slurm_max_parallel_runs_per_node_set = SettingState.NOT_SET 

-

162 self.__configurator_target_cutoff_length_set = SettingState.NOT_SET 

-

163 self.__slurm_extra_options_set = dict() 

-

164 self.__ablation_racing_flag_set = SettingState.NOT_SET 

-

165 

-

166 self.__parallel_portfolio_check_interval_set = SettingState.NOT_SET 

-

167 self.__parallel_portfolio_num_seeds_per_solver_set = SettingState.NOT_SET 

-

168 

-

169 self.__general_sparkle_configurator = None 

-

170 

-

171 if file_path is None: 

-

172 # Initialise settings from default file path 

-

173 self.read_settings_ini() 

-

174 else: 

-

175 # Initialise settings from a given file path 

-

176 self.read_settings_ini(file_path) 

-

177 

-

178 def read_settings_ini(self: Settings, file_path: PurePath = DEFAULT_settings_path, 

-

179 state: SettingState = SettingState.FILE) -> None: 

-

180 """Read the settings from an INI file.""" 

-

181 # Read file 

-

182 file_settings = configparser.ConfigParser() 

-

183 file_settings.read(file_path) 

-

184 

-

185 # Set internal settings based on data read from FILE if they were read 

-

186 # successfully 

-

187 if file_settings.sections() != []: 

-

188 section = "general" 

-

189 option_names = ("objective", ) 

-

190 for option in option_names: 

-

191 if file_settings.has_option(section, option): 

-

192 value = [resolve_objective(obj) for obj in 

-

193 file_settings.get(section, option).split(",")] 

-

194 self.set_general_sparkle_objectives(value, state) 

-

195 file_settings.remove_option(section, option) 

-

196 

-

197 # Comma so python understands it's a tuple... 

-

198 option_names = ("configurator", ) 

-

199 for option in option_names: 

-

200 if file_settings.has_option(section, option): 

-

201 value = file_settings.get(section, option) 

-

202 self.set_general_sparkle_configurator(value, state) 

-

203 file_settings.remove_option(section, option) 

-

204 

-

205 option_names = ("selector", ) 

-

206 for option in option_names: 

-

207 if file_settings.has_option(section, option): 

-

208 value = file_settings.get(section, option) 

-

209 self.set_general_sparkle_selector(value, state) 

-

210 file_settings.remove_option(section, option) 

-

211 

-

212 option_names = ("solution_verifier", ) 

-

213 for option in option_names: 

-

214 if file_settings.has_option(section, option): 

-

215 value = file_settings.get(section, option).lower() 

-

216 self.set_general_solution_verifier(value, state) 

-

217 file_settings.remove_option(section, option) 

-

218 

-

219 option_names = ("target_cutoff_time", 

-

220 "cutoff_time_each_solver_call") 

-

221 for option in option_names: 

-

222 if file_settings.has_option(section, option): 

-

223 value = file_settings.getint(section, option) 

-

224 self.set_general_target_cutoff_time(value, state) 

-

225 file_settings.remove_option(section, option) 

-

226 

-

227 option_names = ("extractor_cutoff_time", 

-

228 "cutoff_time_each_feature_computation") 

-

229 for option in option_names: 

-

230 if file_settings.has_option(section, option): 

-

231 value = file_settings.getint(section, option) 

-

232 self.set_general_extractor_cutoff_time(value, state) 

-

233 file_settings.remove_option(section, option) 

-

234 

-

235 option_names = ("run_on", ) 

-

236 for option in option_names: 

-

237 if file_settings.has_option(section, option): 

-

238 value = file_settings.get(section, option) 

-

239 self.set_run_on(value, state) 

-

240 file_settings.remove_option(section, option) 

-

241 

-

242 option_names = ("verbosity", ) 

-

243 for option in option_names: 

-

244 if file_settings.has_option(section, option): 

-

245 value = VerbosityLevel.from_string( 

-

246 file_settings.get(section, option)) 

-

247 self.set_general_verbosity(value, state) 

-

248 file_settings.remove_option(section, option) 

-

249 

-

250 option_names = ("check_interval", ) 

-

251 for option in option_names: 

-

252 if file_settings.has_option(section, option): 

-

253 value = int(file_settings.get(section, option)) 

-

254 self.set_general_check_interval(value, state) 

-

255 file_settings.remove_option(section, option) 

-

256 

-

257 section = "configuration" 

-

258 option_names = ("wallclock_time", ) 

-

259 for option in option_names: 

-

260 if file_settings.has_option(section, option): 

-

261 value = file_settings.getint(section, option) 

-

262 self.set_config_wallclock_time(value, state) 

-

263 file_settings.remove_option(section, option) 

-

264 

-

265 option_names = ("cpu_time", ) 

-

266 for option in option_names: 

-

267 if file_settings.has_option(section, option): 

-

268 value = file_settings.getint(section, option) 

-

269 self.set_config_cpu_time(value, state) 

-

270 file_settings.remove_option(section, option) 

-

271 

-

272 option_names = ("solver_calls", ) 

-

273 for option in option_names: 

-

274 if file_settings.has_option(section, option): 

-

275 value = file_settings.getint(section, option) 

-

276 self.set_config_solver_calls(value, state) 

-

277 file_settings.remove_option(section, option) 

-

278 

-

279 option_names = ("number_of_runs", ) 

-

280 for option in option_names: 

-

281 if file_settings.has_option(section, option): 

-

282 value = file_settings.getint(section, option) 

-

283 self.set_config_number_of_runs(value, state) 

-

284 file_settings.remove_option(section, option) 

-

285 

-

286 option_names = ("target_cutoff_length", "smac_each_run_cutoff_length") 

-

287 for option in option_names: 

-

288 if file_settings.has_option(section, option): 

-

289 value = file_settings.get(section, option) 

-

290 self.set_configurator_target_cutoff_length(value, state) 

-

291 file_settings.remove_option(section, option) 

-

292 

-

293 section = "slurm" 

-

294 option_names = ("number_of_jobs_in_parallel", "num_job_in_parallel") 

-

295 for option in option_names: 

-

296 if file_settings.has_option(section, option): 

-

297 value = file_settings.getint(section, option) 

-

298 self.set_number_of_jobs_in_parallel(value, state) 

-

299 file_settings.remove_option(section, option) 

-

300 

-

301 option_names = ("max_parallel_runs_per_node", "clis_per_node") 

-

302 for option in option_names: 

-

303 if file_settings.has_option(section, option): 

-

304 value = file_settings.getint(section, option) 

-

305 self.set_slurm_max_parallel_runs_per_node(value, state) 

-

306 file_settings.remove_option(section, option) 

-

307 

-

308 section = "ablation" 

-

309 option_names = ("racing", "ablation_racing") 

-

310 for option in option_names: 

-

311 if file_settings.has_option(section, option): 

-

312 value = file_settings.getboolean(section, option) 

-

313 self.set_ablation_racing_flag(value, state) 

-

314 file_settings.remove_option(section, option) 

-

315 

-

316 section = "parallel_portfolio" 

-

317 option_names = ("check_interval", ) 

-

318 for option in option_names: 

-

319 if file_settings.has_option(section, option): 

-

320 value = int(file_settings.get(section, option)) 

-

321 self.set_parallel_portfolio_check_interval(value, state) 

-

322 file_settings.remove_option(section, option) 

-

323 

-

324 option_names = ("num_seeds_per_solver", ) 

-

325 for option in option_names: 

-

326 if file_settings.has_option(section, option): 

-

327 value = int(file_settings.get(section, option)) 

-

328 self.set_parallel_portfolio_number_of_seeds_per_solver(value, state) 

-

329 file_settings.remove_option(section, option) 

-

330 

-

331 # TODO: Report on any unknown settings that were read 

-

332 sections = file_settings.sections() 

-

333 

-

334 for section in sections: 

-

335 for option in file_settings[section]: 

-

336 # TODO: Should check the options are valid Slurm options 

-

337 if section == "slurm": 

-

338 value = file_settings.get(section, option) 

-

339 self.add_slurm_extra_option(option, value, state) 

-

340 else: 

-

341 print(f'Unrecognised section - option combination: "{section} ' 

-

342 f'{option}" in file {file_path} ignored') 

-

343 

-

344 # Print error if unable to read the settings 

-

345 else: 

-

346 print(f"ERROR: Failed to read settings from {file_path} The file may have " 

-

347 "been empty, located in a different path, or be in another format than" 

-

348 " INI. Default Settings values be used.") 

-

349 

-

350 def write_used_settings(self: Settings) -> None: 

-

351 """Write the used settings to the default locations.""" 

-

352 # Write to latest settings file 

-

353 self.write_settings_ini(self.__settings_dir / "latest.ini") 

-

354 

-

355 def write_settings_ini(self: Settings, file_path: Path) -> None: 

-

356 """Write the settings to an INI file.""" 

-

357 # Create needed directories if they don't exist 

-

358 file_path.parent.mkdir(parents=True, exist_ok=True) 

-

359 slurm_extra_section_options = None 

-

360 if self.__settings.has_section("slurm_extra"): 

-

361 # Slurm extra options are not written as a seperate section 

-

362 slurm_extra_section_options = {} 

-

363 for key in self.__settings["slurm_extra"]: 

-

364 self.__settings["slurm"][key] = self.__settings["slurm_extra"][key] 

-

365 slurm_extra_section_options[key] = self.__settings["slurm_extra"][key] 

-

366 self.__settings.remove_section("slurm_extra") 

-

367 # Write the settings to file 

-

368 with file_path.open("w") as settings_file: 

-

369 self.__settings.write(settings_file) 

-

370 # Rebuild slurm extra if needed 

-

371 if slurm_extra_section_options is not None: 

-

372 self.__settings.add_section("slurm_extra") 

-

373 for key in slurm_extra_section_options: 

-

374 self.__settings["slurm_extra"][key] = slurm_extra_section_options[key] 

-

375 

-

376 def __init_section(self: Settings, section: str) -> None: 

-

377 if section not in self.__settings: 

-

378 self.__settings[section] = {} 

-

379 

-

380 @staticmethod 

-

381 def __check_setting_state(current_state: SettingState, 

-

382 new_state: SettingState, name: str) -> bool: 

-

383 change_setting_ok = True 

-

384 

-

385 if current_state == SettingState.FILE and new_state == SettingState.DEFAULT: 

-

386 change_setting_ok = False 

-

387 print(f"Warning: Attempting to overwrite setting for {name} with default " 

-

388 "value; keeping the value read from file!") 

-

389 elif (current_state == SettingState.CMD_LINE 

-

390 and new_state == SettingState.DEFAULT): 

-

391 change_setting_ok = False 

-

392 print(f"Warning: Attempting to overwrite setting for {name} with default " 

-

393 "value; keeping the value read from command line!") 

-

394 elif current_state == SettingState.CMD_LINE and new_state == SettingState.FILE: 

-

395 change_setting_ok = False 

-

396 print(f"Warning: Attempting to overwrite setting for {name} with value from " 

-

397 "file; keeping the value read from command line!") 

-

398 

-

399 return change_setting_ok 

-

400 

-

401 # General settings ### 

-

402 def set_general_sparkle_objectives( 

-

403 self: Settings, 

-

404 value: list[SparkleObjective] = [DEFAULT_general_sparkle_objective, ], 

-

405 origin: SettingState = SettingState.DEFAULT) -> None: 

-

406 """Set the sparkle objective.""" 

-

407 section = "general" 

-

408 name = "objective" 

-

409 if value is not None and self.__check_setting_state( 

-

410 self.__general_sparkle_objective_set, origin, name): 

-

411 if isinstance(value, list): 

-

412 value = ",".join([str(obj) for obj in value]) 

-

413 else: 

-

414 value = str(value) 

-

415 # Append standard Sparkle Objectives 

-

416 if "status" not in value: 

-

417 value += ",status" 

-

418 if "cpu_time" not in value: 

-

419 value += ",cpu_time" 

-

420 if "wall_time" not in value: 

-

421 value += ",wall_time" 

-

422 if "memory" not in value: 

-

423 value += ",memory" 

-

424 self.__init_section(section) 

-

425 self.__general_sparkle_objective_set = origin 

-

426 self.__settings[section][name] = value 

-

427 

-

428 def get_general_sparkle_objectives(self: Settings) -> list[SparkleObjective]: 

-

429 """Return the performance measure.""" 

-

430 if self.__general_sparkle_objective_set == SettingState.NOT_SET: 

-

431 self.set_general_sparkle_objectives() 

-

432 

-

433 return [resolve_objective(obj) 

-

434 for obj in self.__settings["general"]["objective"].split(",")] 

-

435 

-

436 def set_general_sparkle_configurator( 

-

437 self: Settings, 

-

438 value: str = DEFAULT_general_sparkle_configurator, 

-

439 origin: SettingState = SettingState.DEFAULT) -> None: 

-

440 """Set the Sparkle configurator.""" 

-

441 section = "general" 

-

442 name = "configurator" 

-

443 if value is not None and self.__check_setting_state( 

-

444 self.__general_sparkle_configurator_set, origin, name): 

-

445 self.__init_section(section) 

-

446 self.__general_sparkle_configurator_set = origin 

-

447 self.__settings[section][name] = value 

-

448 

-

449 def get_general_sparkle_configurator(self: Settings) -> Configurator: 

-

450 """Return the configurator init method.""" 

-

451 if self.__general_sparkle_configurator_set == SettingState.NOT_SET: 

-

452 self.set_general_sparkle_configurator() 

-

453 if self.__general_sparkle_configurator is None: 

-

454 configurator_subclass =\ 

-

455 cim.resolve_configurator(self.__settings["general"]["configurator"]) 

-

456 if configurator_subclass is not None: 

-

457 self.__general_sparkle_configurator = configurator_subclass( 

-

458 objectives=self.get_general_sparkle_objectives(), 

-

459 base_dir=Settings.DEFAULT_tmp_output, 

-

460 output_path=Settings.DEFAULT_configuration_output_raw) 

-

461 else: 

-

462 print("WARNING: Configurator class name not recognised:" 

-

463 f'{self.__settings["general"]["configurator"]}. ' 

-

464 "Configurator not set.") 

-

465 return self.__general_sparkle_configurator 

-

466 

-

467 def set_general_sparkle_selector( 

-

468 self: Settings, 

-

469 value: Path = DEFAULT_general_sparkle_selector, 

-

470 origin: SettingState = SettingState.DEFAULT) -> None: 

-

471 """Set the Sparkle selector.""" 

-

472 section = "general" 

-

473 name = "selector" 

-

474 if value is not None and self.__check_setting_state( 

-

475 self.__general_sparkle_selector_set, origin, name): 

-

476 self.__init_section(section) 

-

477 self.__general_sparkle_selector_set = origin 

-

478 self.__settings[section][name] = str(value) 

-

479 

-

480 def get_general_sparkle_selector(self: Settings) -> Selector: 

-

481 """Return the selector init method.""" 

-

482 if self.__general_sparkle_selector_set == SettingState.NOT_SET: 

-

483 self.set_general_sparkle_selector() 

-

484 return Selector(Path(self.__settings["general"]["selector"]), 

-

485 self.DEFAULT_selection_output_raw) 

-

486 

-

487 def set_general_solution_verifier( 

-

488 self: Settings, value: str = DEFAULT_general_solution_verifier, 

-

489 origin: SettingState = SettingState.DEFAULT) -> None: 

-

490 """Set the solution verifier to use.""" 

-

491 section = "general" 

-

492 name = "solution_verifier" 

-

493 

-

494 if value is not None and self.__check_setting_state( 

-

495 self.__general_solution_verifier_set, origin, name): 

-

496 self.__init_section(section) 

-

497 self.__general_solution_verifier_set = origin 

-

498 self.__settings[section][name] = value 

-

499 

-

500 def get_general_solution_verifier(self: Settings) -> object: 

-

501 """Return the solution verifier to use.""" 

-

502 if self.__general_solution_verifier_set == SettingState.NOT_SET: 

-

503 self.set_general_solution_verifier() 

-

504 name = self.__settings["general"]["solution_verifier"].lower() 

-

505 if name == str(SATVerifier()).lower(): 

-

506 return SATVerifier() 

-

507 return None 

-

508 

-

509 def set_general_target_cutoff_time( 

-

510 self: Settings, value: int = DEFAULT_general_target_cutoff_time, 

-

511 origin: SettingState = SettingState.DEFAULT) -> None: 

-

512 """Set the cutoff time in seconds for target algorithms.""" 

-

513 section = "general" 

-

514 name = "target_cutoff_time" 

-

515 

-

516 if value is not None and self.__check_setting_state( 

-

517 self.__general_target_cutoff_time_set, origin, name): 

-

518 self.__init_section(section) 

-

519 self.__general_target_cutoff_time_set = origin 

-

520 self.__settings[section][name] = str(value) 

-

521 

-

522 def get_general_target_cutoff_time(self: Settings) -> int: 

-

523 """Return the cutoff time in seconds for target algorithms.""" 

-

524 if self.__general_target_cutoff_time_set == SettingState.NOT_SET: 

-

525 self.set_general_target_cutoff_time() 

-

526 return int(self.__settings["general"]["target_cutoff_time"]) 

-

527 

-

528 def set_general_extractor_cutoff_time( 

-

529 self: Settings, value: int = DEFAULT_general_extractor_cutoff_time, 

-

530 origin: SettingState = SettingState.DEFAULT) -> None: 

-

531 """Set the cutoff time in seconds for feature extraction.""" 

-

532 section = "general" 

-

533 name = "extractor_cutoff_time" 

-

534 

-

535 if value is not None and self.__check_setting_state( 

-

536 self.__general_extractor_cutoff_time_set, origin, name): 

-

537 self.__init_section(section) 

-

538 self.__general_extractor_cutoff_time_set = origin 

-

539 self.__settings[section][name] = str(value) 

-

540 

-

541 def get_general_extractor_cutoff_time(self: Settings) -> int: 

-

542 """Return the cutoff time in seconds for feature extraction.""" 

-

543 if self.__general_extractor_cutoff_time_set == SettingState.NOT_SET: 

-

544 self.set_general_extractor_cutoff_time() 

-

545 return int(self.__settings["general"]["extractor_cutoff_time"]) 

-

546 

-

547 def set_number_of_jobs_in_parallel( 

-

548 self: Settings, value: int = DEFAULT_number_of_jobs_in_parallel, 

-

549 origin: SettingState = SettingState.DEFAULT) -> None: 

-

550 """Set the number of runs Sparkle can do in parallel.""" 

-

551 section = "slurm" 

-

552 name = "number_of_jobs_in_parallel" 

-

553 

-

554 if value is not None and self.__check_setting_state( 

-

555 self.__number_of_jobs_in_parallel_set, origin, name): 

-

556 self.__init_section(section) 

-

557 self.__number_of_jobs_in_parallel_set = origin 

-

558 self.__settings[section][name] = str(value) 

-

559 

-

560 def get_number_of_jobs_in_parallel(self: Settings) -> int: 

-

561 """Return the number of runs Sparkle can do in parallel.""" 

-

562 if self.__number_of_jobs_in_parallel_set == SettingState.NOT_SET: 

-

563 self.set_number_of_jobs_in_parallel() 

-

564 

-

565 return int(self.__settings["slurm"]["number_of_jobs_in_parallel"]) 

-

566 

-

567 def set_general_verbosity( 

-

568 self: Settings, value: VerbosityLevel = DEFAULT_general_verbosity, 

-

569 origin: SettingState = SettingState.DEFAULT) -> None: 

-

570 """Set the general verbosity to use.""" 

-

571 section = "general" 

-

572 name = "verbosity" 

-

573 

-

574 if value is not None and self.__check_setting_state( 

-

575 self.__general_verbosity_set, origin, name): 

-

576 self.__init_section(section) 

-

577 self.__general_verbosity_set = origin 

-

578 self.__settings[section][name] = value.name 

-

579 

-

580 def get_general_verbosity(self: Settings) -> VerbosityLevel: 

-

581 """Return the general verbosity.""" 

-

582 if self.__general_verbosity_set == SettingState.NOT_SET: 

-

583 self.set_general_verbosity() 

-

584 

-

585 return VerbosityLevel.from_string( 

-

586 self.__settings["general"]["verbosity"]) 

-

587 

-

588 def set_general_check_interval( 

-

589 self: Settings, 

-

590 value: int = DEFAULT_general_check_interval, 

-

591 origin: SettingState = SettingState.DEFAULT) -> None: 

-

592 """Set the general check interval.""" 

-

593 section = "general" 

-

594 name = "check_interval" 

-

595 

-

596 if value is not None and self.__check_setting_state( 

-

597 self.__general_check_interval_set, origin, name): 

-

598 self.__init_section(section) 

-

599 self.__general_check_interval_set = origin 

-

600 self.__settings[section][name] = str(value) 

-

601 

-

602 def get_general_check_interval(self: Settings) -> int: 

-

603 """Return the general check interval.""" 

-

604 if self.__general_check_interval_set == SettingState.NOT_SET: 

-

605 self.set_general_check_interval() 

-

606 

-

607 return int( 

-

608 self.__settings["general"]["check_interval"]) 

-

609 

-

610 # Configuration settings ### 

-

611 

-

612 def set_config_wallclock_time( 

-

613 self: Settings, value: int = DEFAULT_config_wallclock_time, 

-

614 origin: SettingState = SettingState.DEFAULT) -> None: 

-

615 """Set the budget per configuration run in seconds (wallclock).""" 

-

616 section = "configuration" 

-

617 name = "wallclock_time" 

-

618 

-

619 if value is not None and self.__check_setting_state( 

-

620 self.__config_wallclock_time_set, origin, name): 

-

621 self.__init_section(section) 

-

622 self.__config_wallclock_time_set = origin 

-

623 self.__settings[section][name] = str(value) 

-

624 

-

625 def get_config_wallclock_time(self: Settings) -> int: 

-

626 """Return the budget per configuration run in seconds (wallclock).""" 

-

627 if self.__config_wallclock_time_set == SettingState.NOT_SET: 

-

628 self.set_config_wallclock_time() 

-

629 return int(self.__settings["configuration"]["wallclock_time"]) 

-

630 

-

631 def set_config_cpu_time( 

-

632 self: Settings, value: int = DEFAULT_config_cpu_time, 

-

633 origin: SettingState = SettingState.DEFAULT) -> None: 

-

634 """Set the budget per configuration run in seconds (cpu).""" 

-

635 section = "configuration" 

-

636 name = "cpu_time" 

-

637 

-

638 if value is not None and self.__check_setting_state( 

-

639 self.__config_cpu_time_set, origin, name): 

-

640 self.__init_section(section) 

-

641 self.__config_cpu_time_set = origin 

-

642 self.__settings[section][name] = str(value) 

-

643 

-

644 def get_config_cpu_time(self: Settings) -> int | None: 

-

645 """Return the budget per configuration run in seconds (cpu).""" 

-

646 if self.__config_cpu_time_set == SettingState.NOT_SET: 

-

647 self.set_config_cpu_time() 

-

648 return None 

-

649 

-

650 return int(self.__settings["configuration"]["cpu_time"]) 

-

651 

-

652 def set_config_solver_calls( 

-

653 self: Settings, value: int = DEFAULT_config_solver_calls, 

-

654 origin: SettingState = SettingState.DEFAULT) -> None: 

-

655 """Set the number of solver calls.""" 

-

656 section = "configuration" 

-

657 name = "solver_calls" 

-

658 

-

659 if value is not None and self.__check_setting_state( 

-

660 self.__config_solver_calls_set, origin, name): 

-

661 self.__init_section(section) 

-

662 self.__config_solver_calls_set = origin 

-

663 self.__settings[section][name] = str(value) 

-

664 

-

665 def get_config_solver_calls(self: Settings) -> int | None: 

-

666 """Return the number of solver calls.""" 

-

667 if self.__config_solver_calls_set == SettingState.NOT_SET: 

-

668 self.set_config_solver_calls() 

-

669 return None 

-

670 

-

671 return int(self.__settings["configuration"]["solver_calls"]) 

-

672 

-

673 def set_config_number_of_runs( 

-

674 self: Settings, value: int = DEFAULT_config_number_of_runs, 

-

675 origin: SettingState = SettingState.DEFAULT) -> None: 

-

676 """Set the number of configuration runs.""" 

-

677 section = "configuration" 

-

678 name = "number_of_runs" 

-

679 

-

680 if value is not None and self.__check_setting_state( 

-

681 self.__config_number_of_runs_set, origin, name): 

-

682 self.__init_section(section) 

-

683 self.__config_number_of_runs_set = origin 

-

684 self.__settings[section][name] = str(value) 

-

685 

-

686 def get_config_number_of_runs(self: Settings) -> int: 

-

687 """Return the number of configuration runs.""" 

-

688 if self.__config_number_of_runs_set == SettingState.NOT_SET: 

-

689 self.set_config_number_of_runs() 

-

690 

-

691 return int(self.__settings["configuration"]["number_of_runs"]) 

-

692 

-

693 # Configuration: SMAC specific settings ### 

-

694 

-

695 def set_configurator_target_cutoff_length( 

-

696 self: Settings, value: str = DEFAULT_configurator_target_cutoff_length, 

-

697 origin: SettingState = SettingState.DEFAULT) -> None: 

-

698 """Set the target algorithm cutoff length.""" 

-

699 section = "configuration" 

-

700 name = "target_cutoff_length" 

-

701 

-

702 if value is not None and self.__check_setting_state( 

-

703 self.__configurator_target_cutoff_length_set, origin, name): 

-

704 self.__init_section(section) 

-

705 self.__configurator_target_cutoff_length_set = origin 

-

706 self.__settings[section][name] = str(value) 

-

707 

-

708 def get_configurator_target_cutoff_length(self: Settings) -> str: 

-

709 """Return the target algorithm cutoff length.""" 

-

710 if self.__configurator_target_cutoff_length_set == SettingState.NOT_SET: 

-

711 self.set_configurator_target_cutoff_length() 

-

712 return self.__settings["configuration"]["target_cutoff_length"] 

-

713 

-

714 # Slurm settings ### 

-

715 

-

716 def set_slurm_max_parallel_runs_per_node( 

-

717 self: Settings, 

-

718 value: int = DEFAULT_slurm_max_parallel_runs_per_node, 

-

719 origin: SettingState = SettingState.DEFAULT) -> None: 

-

720 """Set the number of algorithms Slurm can run in parallel per node.""" 

-

721 section = "slurm" 

-

722 name = "max_parallel_runs_per_node" 

-

723 

-

724 if value is not None and self.__check_setting_state( 

-

725 self.__slurm_max_parallel_runs_per_node_set, origin, name): 

-

726 self.__init_section(section) 

-

727 self.__slurm_max_parallel_runs_per_node_set = origin 

-

728 self.__settings[section][name] = str(value) 

-

729 

-

730 def get_slurm_max_parallel_runs_per_node(self: Settings) -> int: 

-

731 """Return the number of algorithms Slurm can run in parallel per node.""" 

-

732 if self.__slurm_max_parallel_runs_per_node_set == SettingState.NOT_SET: 

-

733 self.set_slurm_max_parallel_runs_per_node() 

-

734 

-

735 return int(self.__settings["slurm"]["max_parallel_runs_per_node"]) 

-

736 

-

737 # SLURM extra options 

-

738 

-

739 def add_slurm_extra_option(self: Settings, name: str, value: str, 

-

740 origin: SettingState = SettingState.DEFAULT) -> None: 

-

741 """Add additional Slurm options.""" 

-

742 section = "slurm_extra" 

-

743 

-

744 current_state = (self.__slurm_extra_options_set[name] 

-

745 if name in self.__slurm_extra_options_set 

-

746 else SettingState.NOT_SET) 

-

747 

-

748 if value is not None and self.__check_setting_state(current_state, origin, name): 

-

749 self.__init_section(section) 

-

750 self.__slurm_extra_options_set[name] = origin 

-

751 self.__settings[section][name] = str(value) 

-

752 

-

753 def get_slurm_extra_options(self: Settings, 

-

754 as_args: bool = False) -> dict | list: 

-

755 """Return a dict with additional Slurm options.""" 

-

756 section = "slurm_extra" 

-

757 options = dict() 

-

758 

-

759 if "slurm_extra" in self.__settings.sections(): 

-

760 for option in self.__settings["slurm_extra"]: 

-

761 options[option] = self.__settings.get(section, option) 

-

762 if as_args: 

-

763 return [f"--{key}={options[key]}" for key in options.keys()] 

-

764 return options 

-

765 

-

766 # Ablation settings ### 

-

767 

-

768 def set_ablation_racing_flag(self: Settings, value: bool = DEFAULT_ablation_racing, 

-

769 origin: SettingState = SettingState.DEFAULT) -> None: 

-

770 """Set a flag indicating whether racing should be used for ablation.""" 

-

771 section = "ablation" 

-

772 name = "racing" 

-

773 

-

774 if value is not None and self.__check_setting_state( 

-

775 self.__ablation_racing_flag_set, origin, name): 

-

776 self.__init_section(section) 

-

777 self.__ablation_racing_flag_set = origin 

-

778 self.__settings[section][name] = str(value) 

-

779 

-

780 def get_ablation_racing_flag(self: Settings) -> bool: 

-

781 """Return a bool indicating whether the racing flag is set for ablation.""" 

-

782 if self.__ablation_racing_flag_set == SettingState.NOT_SET: 

-

783 self.set_ablation_racing_flag() 

-

784 

-

785 return bool(self.__settings["ablation"]["racing"]) 

-

786 

-

787 # Parallel Portfolio settings 

-

788 

-

789 def set_parallel_portfolio_check_interval( 

-

790 self: Settings, 

-

791 value: int = DEFAULT_parallel_portfolio_check_interval, 

-

792 origin: SettingState = SettingState.DEFAULT) -> None: 

-

793 """Set the parallel portfolio check interval.""" 

-

794 section = "parallel_portfolio" 

-

795 name = "check_interval" 

-

796 

-

797 if value is not None and self.__check_setting_state( 

-

798 self.__parallel_portfolio_check_interval_set, origin, name): 

-

799 self.__init_section(section) 

-

800 self.__parallel_portfolio_check_interval_set = origin 

-

801 self.__settings[section][name] = str(value) 

-

802 

-

803 def get_parallel_portfolio_check_interval(self: Settings) -> int: 

-

804 """Return the parallel portfolio check interval.""" 

-

805 if self.__parallel_portfolio_check_interval_set == SettingState.NOT_SET: 

-

806 self.set_parallel_portfolio_check_interval() 

-

807 

-

808 return int( 

-

809 self.__settings["parallel_portfolio"]["check_interval"]) 

-

810 

-

811 def set_parallel_portfolio_number_of_seeds_per_solver( 

-

812 self: Settings, 

-

813 value: int = DEFAULT_parallel_portfolio_num_seeds_per_solver, 

-

814 origin: SettingState = SettingState.DEFAULT) -> None: 

-

815 """Set the parallel portfolio seeds per solver to start.""" 

-

816 section = "parallel_portfolio" 

-

817 name = "num_seeds_per_solver" 

-

818 

-

819 if value is not None and self.__check_setting_state( 

-

820 self.__parallel_portfolio_num_seeds_per_solver_set, origin, name): 

-

821 self.__init_section(section) 

-

822 self.__parallel_portfolio_num_seeds_per_solver_set = origin 

-

823 self.__settings[section][name] = str(value) 

-

824 

-

825 def get_parallel_portfolio_number_of_seeds_per_solver(self: Settings) -> int: 

-

826 """Return the parallel portfolio seeds per solver to start.""" 

-

827 if self.__parallel_portfolio_num_seeds_per_solver_set == SettingState.NOT_SET: 

-

828 self.set_parallel_portfolio_number_of_seeds_per_solver() 

-

829 

-

830 return int( 

-

831 self.__settings["parallel_portfolio"]["num_seeds_per_solver"]) 

-

832 

-

833 def set_run_on(self: Settings, value: Runner = str, 

-

834 origin: SettingState = SettingState.DEFAULT) -> None: 

-

835 """Set the compute on which to run.""" 

-

836 section = "general" 

-

837 name = "run_on" 

-

838 

-

839 if value is not None and self.__check_setting_state( 

-

840 self.__run_on_set, origin, name): 

-

841 self.__init_section(section) 

-

842 self.__run_on_set = origin 

-

843 self.__settings[section][name] = value 

-

844 

-

845 def get_run_on(self: Settings) -> Runner: 

-

846 """Return the compute on which to run.""" 

-

847 return Runner(self.__settings["general"]["run_on"]) 

-

848 

-

849 @staticmethod 

-

850 def check_settings_changes(cur_settings: Settings, prev_settings: Settings) -> bool: 

-

851 """Check if there are changes between the previous and the current settings. 

-

852 

-

853 Prints any section changes, printing None if no setting was found. 

-

854 

-

855 Args: 

-

856 cur_settings: The current settings 

-

857 prev_settings: The previous settings 

-

858 

-

859 Returns: 

-

860 True iff there are no changes. 

-

861 """ 

-

862 cur_dict = cur_settings.__settings._sections 

-

863 prev_dict = prev_settings.__settings._sections 

-

864 

-

865 cur_sections_set = set(cur_dict.keys()) 

-

866 prev_sections_set = set(prev_dict.keys()) 

-

867 

-

868 sections_removed = prev_sections_set - cur_sections_set 

-

869 if sections_removed: 

-

870 print("Warning: the following sections have been removed:") 

-

871 for section in sections_removed: 

-

872 print(f" - Section '{section}'") 

-

873 

-

874 sections_added = cur_sections_set - prev_sections_set 

-

875 if sections_added: 

-

876 print("Warning: the following sections have been added:") 

-

877 for section in sections_added: 

-

878 print(f" - Section '{section}'") 

-

879 

-

880 sections_remained = cur_sections_set & prev_sections_set 

-

881 option_changed = False 

-

882 for section in sections_remained: 

-

883 printed_section = False 

-

884 names = set(cur_dict[section].keys()) | set(prev_dict[section].keys()) 

-

885 for name in names: 

-

886 # if name is not present in one of the two dicts, get None as placeholder 

-

887 cur_val = cur_dict[section].get(name, None) 

-

888 prev_val = prev_dict[section].get(name, None) 

-

889 if cur_val != prev_val: 

-

890 # Have we printed the initial warning? 

-

891 if not option_changed: 

-

892 print("Warning: The following attributes/options have changed:") 

-

893 option_changed = True 

-

894 

-

895 # do we have yet to print the section? 

-

896 if not printed_section: 

-

897 print(f" - In the section '{section}':") 

-

898 printed_section = True 

-

899 

-

900 # print actual change 

-

901 print(f" · '{name}' changed from '{prev_val}' to '{cur_val}'") 

-

902 

-

903 return not (sections_removed or sections_added or option_changed) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7___init___py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7___init___py.html deleted file mode 100644 index 240eab410..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7___init___py.html +++ /dev/null @@ -1,98 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/CLI/help/__init__.py: - 100% -

- -

- 0 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Init for CLI help package.""" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_argparse_custom_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_argparse_custom_py.html deleted file mode 100644 index c6dd62679..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_argparse_custom_py.html +++ /dev/null @@ -1,567 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/argparse_custom.py: 84% - - - - - -
-
-

- Coverage for sparkle/CLI/help/argparse_custom.py: - 84% -

- -

- 97 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Custom helper class and functions to process CLI arguments with argparse.""" 

-

2 

-

3from __future__ import annotations 

-

4import argparse 

-

5import enum 

-

6from pathlib import Path 

-

7from typing import Any 

-

8 

-

9from runrunner.base import Runner 

-

10 

-

11from sparkle.platform.settings_objects import SettingState, Settings 

-

12 

-

13 

-

14class SetByUser(argparse.Action): 

-

15 """Possible action to execute for CLI argument.""" 

-

16 

-

17 def __call__(self: SetByUser, parser: argparse.ArgumentParser, 

-

18 namespace: argparse.Namespace, values: str, option_string: str = None)\ 

-

19 -> None: 

-

20 """Set attributes when called.""" 

-

21 setattr(namespace, self.dest, values) 

-

22 setattr(namespace, self.dest + "_nondefault", True) 

-

23 

-

24 

-

25# Taken from https://stackoverflow.com/a/60750535 

-

26class EnumAction(argparse.Action): 

-

27 """Argparse action for handling Enums.""" 

-

28 def __init__(self: EnumAction, **kwargs: str) -> None: 

-

29 """Initialise the EnumAction.""" 

-

30 # Pop off the type value 

-

31 enum_type = kwargs.pop("type", None) 

-

32 

-

33 # Ensure an Enum subclass is provided 

-

34 if enum_type is None: 

-

35 raise ValueError("type must be assigned an Enum when using EnumAction") 

-

36 if not issubclass(enum_type, enum.Enum): 

-

37 raise TypeError("type must be an Enum when using EnumAction") 

-

38 

-

39 # Generate choices from the Enum 

-

40 kwargs.setdefault("choices", tuple(e.value for e in enum_type)) 

-

41 

-

42 super(EnumAction, self).__init__(**kwargs) 

-

43 

-

44 self._enum = enum_type 

-

45 

-

46 def __call__(self: EnumAction, parser: argparse.ArgumentParser, 

-

47 namespace: argparse.Namespace, values: str, option_string: str = None) \ 

-

48 -> None: 

-

49 """Converts value back to Enum.""" 

-

50 value = self._enum(values) 

-

51 setattr(namespace, self.dest, value) 

-

52 

-

53 

-

54def user_set_state(args: argparse.Namespace, arg_name: str) -> SettingState: 

-

55 """Return the SettingState of an argument.""" 

-

56 if hasattr(args, arg_name + "_nondefault"): 

-

57 return SettingState.CMD_LINE 

-

58 else: 

-

59 return SettingState.DEFAULT 

-

60 

-

61 

-

62def set_by_user(args: argparse.Namespace, arg_name: str) -> bool: 

-

63 """Return whether an argument was set through the CLI by the user or not.""" 

-

64 return hasattr(args, arg_name + "_nondefault") 

-

65 

-

66 

-

67class ArgumentContainer(): 

-

68 """Helper class for more convenient argument packaging and access.""" 

-

69 def __init__(self: ArgumentContainer, names: list[str], kwargs: dict[str, Any])\ 

-

70 -> None: 

-

71 """Create an ArgumentContainer. 

-

72 

-

73 Args: 

-

74 names: List of names for the contained argument. For positional arguments, 

-

75 this will contain a single string. For positional arguments this will 

-

76 typically contain two, the first one starting with '-' and the second one 

-

77 starting with '--'. 

-

78 kwargs: Keyword arguments needed by the method parser.add_argument which adds 

-

79 the contained argument to a parser. 

-

80 """ 

-

81 self.names = names 

-

82 self.kwargs = kwargs 

-

83 

-

84 

-

85AblationArgument = ArgumentContainer(names=["--ablation"], 

-

86 kwargs={"required": False, 

-

87 "action": "store_true", 

-

88 "help": "run ablation after configuration"}) 

-

89SelectorAblationArgument =\ 

-

90 ArgumentContainer(names=["--solver-ablation"], 

-

91 kwargs={"required": False, 

-

92 "action": "store_true", 

-

93 "help": "construct a selector for " 

-

94 "each solver ablation combination"}) 

-

95 

-

96ActualMarginalContributionArgument = \ 

-

97 ArgumentContainer(names=["--actual"], 

-

98 kwargs={"action": "store_true", 

-

99 "help": "compute the marginal contribution " 

-

100 "for the actual selector"}) 

-

101 

-

102AlsoConstructSelectorAndReportArgument = \ 

-

103 ArgumentContainer(names=["--also-construct-selector-and-report"], 

-

104 kwargs={"action": "store_true", 

-

105 "help": "after running the solvers also construct the " 

-

106 "selector and generate the report"}) 

-

107 

-

108CleanupArgumentAll = \ 

-

109 ArgumentContainer(names=["--all"], 

-

110 kwargs={"action": "store_true", 

-

111 "help": "clean all output files"}) 

-

112 

-

113CleanupArgumentRemove = \ 

-

114 ArgumentContainer(names=["--remove"], 

-

115 kwargs={"action": "store_true", 

-

116 "help": "remove all files in the platform, including " 

-

117 "user data such as InstanceSets and Solvers"}) 

-

118 

-

119ConfiguratorArgument = ArgumentContainer(names=["--configurator"], 

-

120 kwargs={"type": Path, 

-

121 "help": "path to configurator"}) 

-

122 

-

123CPUTimeArgument = \ 

-

124 ArgumentContainer(names=["--cpu-time"], 

-

125 kwargs={"type": int, 

-

126 "help": "configuration budget per configurator run in " 

-

127 "seconds (cpu)"}) 

-

128 

-

129CutOffTimeArgument = \ 

-

130 ArgumentContainer(names=["--cutoff-time"], 

-

131 kwargs={"type": int, 

-

132 "help": "The duration the portfolio will run before the " 

-

133 "solvers within the portfolio will be stopped " 

-

134 "(default: " 

-

135 f"{Settings.DEFAULT_general_target_cutoff_time})"}) 

-

136 

-

137DeterministicArgument =\ 

-

138 ArgumentContainer(names=["--deterministic"], 

-

139 kwargs={"action": "store_true", 

-

140 "help": "Flag indicating the solver is deterministic"}) 

-

141 

-

142DownloadExamplesArgument =\ 

-

143 ArgumentContainer(names=["--download-examples"], 

-

144 kwargs={"action": argparse.BooleanOptionalAction, 

-

145 "default": False, 

-

146 "help": "Download the Examples into the directory."}) 

-

147 

-

148ExtractorPathArgument = ArgumentContainer(names=["extractor_path"], 

-

149 kwargs={"metavar": "extractor-path", 

-

150 "type": str, 

-

151 "help": "path or nickname of the " 

-

152 "feature extractor" 

-

153 }) 

-

154 

-

155GenerateJSONArgument = ArgumentContainer(names=["--only-json"], 

-

156 kwargs={"required": False, 

-

157 "default": False, 

-

158 "type": bool, 

-

159 "help": "if set to True, only generate " 

-

160 "machine readable output" 

-

161 }) 

-

162 

-

163InstancePathPositional = ArgumentContainer(names=["instance_path"], 

-

164 kwargs={"type": Path, 

-

165 "help": "Path to an instance (set)"}) 

-

166 

-

167InstancePath = ArgumentContainer(names=["--instance-path"], 

-

168 kwargs={"type": Path, 

-

169 "help": "Path to an instance (set)"}) 

-

170 

-

171InstanceSetTestArgument = \ 

-

172 ArgumentContainer(names=["--instance-set-test"], 

-

173 kwargs={"required": False, 

-

174 "type": Path, 

-

175 "help": "path to test instance set (only for validating)"}) 

-

176 

-

177InstanceSetTrainArgument = \ 

-

178 ArgumentContainer(names=["--instance-set-train"], 

-

179 kwargs={"required": True, 

-

180 "type": Path, 

-

181 "help": "path to training instance set"}) 

-

182 

-

183InstanceSetTestAblationArgument = \ 

-

184 ArgumentContainer(names=["--instance-set-test"], 

-

185 kwargs={"required": False, 

-

186 "type": str, 

-

187 "help": "path to test instance set"}) 

-

188 

-

189InstanceSetTrainAblationArgument = \ 

-

190 ArgumentContainer(names=["--instance-set-train"], 

-

191 kwargs={"required": False, 

-

192 "type": str, 

-

193 "help": "path to training instance set"}) 

-

194 

-

195InstanceSetTestReportArgument = \ 

-

196 ArgumentContainer(names=["--instance-set-test"], 

-

197 kwargs={"required": False, 

-

198 "type": str, 

-

199 "help": "path to testing instance set included in " 

-

200 "Sparkle for an algorithm configuration report"}) 

-

201 

-

202InstanceSetTrainReportArgument = \ 

-

203 ArgumentContainer(names=["--instance-set-train"], 

-

204 kwargs={"required": False, 

-

205 "type": str, 

-

206 "help": "path to training instance set included in " 

-

207 "Sparkle for an algorithm configuration report"}) 

-

208 

-

209InstancesPathArgument = ArgumentContainer(names=["instances_path"], 

-

210 kwargs={"metavar": "instances-path", 

-

211 "type": str, 

-

212 "help": "path to the instance set"}) 

-

213 

-

214InstancesPathRemoveArgument = \ 

-

215 ArgumentContainer(names=["instances_path"], 

-

216 kwargs={"metavar": "instances-path", 

-

217 "type": str, 

-

218 "help": "path to or nickname of the instance set"}) 

-

219 

-

220JobIDsArgument = ArgumentContainer(names=["--job-ids"], 

-

221 kwargs={"required": False, 

-

222 "nargs": "+", 

-

223 "type": str, 

-

224 "default": None, 

-

225 "help": "job ID to wait for"}) 

-

226 

-

227NicknameFeatureExtractorArgument = \ 

-

228 ArgumentContainer(names=["--nickname"], 

-

229 kwargs={"type": str, 

-

230 "help": "set a nickname for the feature extractor"}) 

-

231 

-

232NicknameInstanceSetArgument = \ 

-

233 ArgumentContainer(names=["--nickname"], 

-

234 kwargs={"type": str, 

-

235 "help": "set a nickname for the instance set"}) 

-

236 

-

237NicknamePortfolioArgument = \ 

-

238 ArgumentContainer(names=["--portfolio-name"], 

-

239 kwargs={"type": Path, 

-

240 "help": "Specify a name of the portfolio. " 

-

241 "If none is given, one will be generated."}) 

-

242 

-

243NicknameSolverArgument = \ 

-

244 ArgumentContainer(names=["--nickname"], 

-

245 kwargs={"type": str, 

-

246 "help": "set a nickname for the solver"}) 

-

247 

-

248NoAblationReportArgument = ArgumentContainer(names=["--no-ablation"], 

-

249 kwargs={"required": False, 

-

250 "dest": "flag_ablation", 

-

251 "default": True, 

-

252 "const": False, 

-

253 "nargs": "?", 

-

254 "help": "turn off reporting on " 

-

255 "ablation for an algorithm " 

-

256 "configuration report"}) 

-

257 

-

258NumberOfRunsConfigurationArgument = \ 

-

259 ArgumentContainer(names=["--number-of-runs"], 

-

260 kwargs={"type": int, 

-

261 "help": "number of configuration runs to execute"}) 

-

262 

-

263NumberOfRunsAblationArgument = \ 

-

264 ArgumentContainer(names=["--number-of-runs"], 

-

265 kwargs={"type": int, 

-

266 "default": Settings.DEFAULT_config_number_of_runs, 

-

267 "action": SetByUser, 

-

268 "help": "Number of configuration runs to execute"}) 

-

269 

-

270PerfectSelectorMarginalContributionArgument =\ 

-

271 ArgumentContainer(names=["--perfect"], 

-

272 kwargs={"action": "store_true", 

-

273 "help": "compute the marginal contribution " 

-

274 "for the perfect selector"}) 

-

275 

-

276RacingArgument = ArgumentContainer(names=["--racing"], 

-

277 kwargs={"type": bool, 

-

278 "default": Settings. 

-

279 DEFAULT_ablation_racing, 

-

280 "action": SetByUser, 

-

281 "help": "Performs abaltion analysis with " 

-

282 "racing"}) 

-

283 

-

284RecomputeFeaturesArgument = \ 

-

285 ArgumentContainer(names=["--recompute"], 

-

286 kwargs={"action": "store_true", 

-

287 "help": "Re-run feature extractor for instances with " 

-

288 "previously computed features"}) 

-

289 

-

290RecomputeMarginalContributionArgument = \ 

-

291 ArgumentContainer(names=["--recompute"], 

-

292 kwargs={"action": "store_true", 

-

293 "help": "force marginal contribution to be recomputed even" 

-

294 " when it already exists in file for the current " 

-

295 "selector"}) 

-

296 

-

297RecomputeMarginalContributionForSelectorArgument = \ 

-

298 ArgumentContainer(names=["--recompute-marginal-contribution"], 

-

299 kwargs={"action": "store_true", 

-

300 "help": "force marginal contribution to be recomputed even" 

-

301 " when it already exists in file for the current " 

-

302 "selector"}) 

-

303 

-

304RecomputePortfolioSelectorArgument = \ 

-

305 ArgumentContainer(names=["--recompute-portfolio-selector"], 

-

306 kwargs={"action": "store_true", 

-

307 "help": "force the construction of a new portfolio " 

-

308 "selector even when it already exists for the current " 

-

309 "feature and performance data. NOTE: This will also " 

-

310 "result in the computation of the marginal contributions " 

-

311 "of solvers to the new portfolio selector."}) 

-

312 

-

313RecomputeRunSolversArgument = \ 

-

314 ArgumentContainer(names=["--recompute"], 

-

315 kwargs={"action": "store_true", 

-

316 "help": "recompute the performance of all solvers on all " 

-

317 "instances"}) 

-

318 

-

319RunExtractorNowArgument = \ 

-

320 ArgumentContainer(names=["--run-extractor-now"], 

-

321 kwargs={"default": False, 

-

322 "action": "store_true", 

-

323 "help": "immediately run the feature extractor(s) on all " 

-

324 "the instances"}) 

-

325 

-

326RunOnArgument = ArgumentContainer(names=["--run-on"], 

-

327 kwargs={"type": Runner, 

-

328 "choices": [Runner.LOCAL, 

-

329 Runner.SLURM], 

-

330 "action": EnumAction, 

-

331 "help": "On which computer or cluster " 

-

332 "environment to execute the " 

-

333 "calculation."}) 

-

334 

-

335RunSolverNowArgument = ArgumentContainer(names=["--run-solver-now"], 

-

336 kwargs={"default": False, 

-

337 "action": "store_true", 

-

338 "help": "immediately run the solver(s) " 

-

339 "on all instances"}) 

-

340 

-

341SelectionReportArgument = \ 

-

342 ArgumentContainer(names=["--selection"], 

-

343 kwargs={"action": "store_true", 

-

344 "help": "set to generate a normal selection report"}) 

-

345 

-

346SettingsFileArgument = \ 

-

347 ArgumentContainer(names=["--settings-file"], 

-

348 kwargs={"type": Path, 

-

349 "default": Settings.DEFAULT_settings_path, 

-

350 "action": SetByUser, 

-

351 "help": "Specify the settings file to use in case you want" 

-

352 " to use one other than the default"}) 

-

353 

-

354SkipChecksArgument = ArgumentContainer( 

-

355 names=["--skip-checks"], 

-

356 kwargs={"dest": "run_checks", 

-

357 "default": True, 

-

358 "action": "store_false", 

-

359 "help": "Checks the solver's functionality by testing it on an instance " 

-

360 "and the pcs file, when applicable."}) 

-

361 

-

362SnapshotArgument = ArgumentContainer(names=["snapshot_file_path"], 

-

363 kwargs={"metavar": "snapshot-file-path", 

-

364 "type": str, 

-

365 "help": "path to the snapshot file"}) 

-

366 

-

367SolverArgument = ArgumentContainer(names=["--solver"], 

-

368 kwargs={"required": True, 

-

369 "type": Path, 

-

370 "help": "path to solver"}) 

-

371 

-

372SolversArgument = ArgumentContainer(names=["--solvers"], 

-

373 kwargs={"required": False, 

-

374 "nargs": "+", 

-

375 "type": list[str], 

-

376 "help": "Specify the list of solvers to be " 

-

377 "used. If not specifed, all solvers " 

-

378 "known in Sparkle will be used."}) 

-

379 

-

380SolverCallsArgument = \ 

-

381 ArgumentContainer(names=["--solver-calls"], 

-

382 kwargs={"type": int, 

-

383 "help": "number of solver calls to execute"}) 

-

384 

-

385SolverSeedsArgument = \ 

-

386 ArgumentContainer(names=["--solver-seeds"], 

-

387 kwargs={"type": int, 

-

388 "help": "number of random seeds per solver to execute"}) 

-

389 

-

390SolverRemoveArgument = \ 

-

391 ArgumentContainer(names=["solver"], 

-

392 kwargs={"metavar": "solver", 

-

393 "type": str, 

-

394 "help": "name, path to or nickname of the solver"}) 

-

395 

-

396SolverPathArgument = ArgumentContainer(names=["solver_path"], 

-

397 kwargs={"metavar": "solver-path", 

-

398 "type": str, 

-

399 "help": "path to the solver"}) 

-

400 

-

401SolverReportArgument = ArgumentContainer(names=["--solver"], 

-

402 kwargs={"required": False, 

-

403 "type": str, 

-

404 "default": None, 

-

405 "help": "path to solver for an " 

-

406 "algorithm configuration report"}) 

-

407 

-

408TargetCutOffTimeAblationArgument = \ 

-

409 ArgumentContainer(names=["--target-cutoff-time"], 

-

410 kwargs={"type": int, 

-

411 "default": Settings.DEFAULT_general_target_cutoff_time, 

-

412 "action": SetByUser, 

-

413 "help": "cutoff time per target algorithm run in seconds"}) 

-

414 

-

415TargetCutOffTimeConfigurationArgument = \ 

-

416 ArgumentContainer(names=["--target-cutoff-time"], 

-

417 kwargs={"type": int, 

-

418 "help": "cutoff time per target algorithm run in seconds"}) 

-

419 

-

420TargetCutOffTimeRunSolversArgument = \ 

-

421 ArgumentContainer(names=["--target-cutoff-time"], 

-

422 kwargs={"type": int, 

-

423 "help": "cutoff time per target algorithm run in seconds"}) 

-

424 

-

425TargetCutOffTimeValidationArgument = \ 

-

426 ArgumentContainer(names=["--target-cutoff-time"], 

-

427 kwargs={"type": int, 

-

428 "default": Settings.DEFAULT_general_target_cutoff_time, 

-

429 "action": SetByUser, 

-

430 "help": "cutoff time per target algorithm run in seconds"}) 

-

431 

-

432TestCaseDirectoryArgument = \ 

-

433 ArgumentContainer(names=["--test-case-directory"], 

-

434 kwargs={"type": str, 

-

435 "default": None, 

-

436 "help": "Path to test case directory of an instance set " 

-

437 + "for a selection report"}) 

-

438 

-

439UseFeaturesArgument = ArgumentContainer(names=["--use-features"], 

-

440 kwargs={"required": False, 

-

441 "action": "store_true", 

-

442 "help": "use the training set's features" 

-

443 " for configuration"}) 

-

444 

-

445ValidateArgument = ArgumentContainer(names=["--validate"], 

-

446 kwargs={"required": False, 

-

447 "action": "store_true", 

-

448 "help": "validate after configuration"}) 

-

449 

-

450VerboseArgument = ArgumentContainer(names=["--verbose", "-v"], 

-

451 kwargs={"action": "store_true", 

-

452 "help": "output status in verbose mode"}) 

-

453 

-

454WallClockTimeArgument = \ 

-

455 ArgumentContainer(names=["--wallclock-time"], 

-

456 kwargs={"type": int, 

-

457 "help": "configuration budget per configurator run in " 

-

458 "seconds (wallclock)"}) 

-

459 

-

460SelectorTimeoutArgument = \ 

-

461 ArgumentContainer(names=["--selector-timeout"], 

-

462 kwargs={"type": int, 

-

463 "default": Settings.DEFAULT_portfolio_construction_timeout, 

-

464 "help": "Cuttoff time (in seconds) for the algorithm" 

-

465 "selector construction"}) 

-

466 

-

467SparkleObjectiveArgument = \ 

-

468 ArgumentContainer(names=["--objectives"], 

-

469 kwargs={"type": str, 

-

470 "help": "the comma seperated objective(s) to use."}) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_global_variables_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_global_variables_py.html deleted file mode 100644 index cd372a389..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_global_variables_py.html +++ /dev/null @@ -1,150 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/global_variables.py: 78% - - - - - -
-
-

- Coverage for sparkle/CLI/help/global_variables.py: - 78% -

- -

- 27 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Definitions of constants broadly used in Sparkle.""" 

-

3import ast 

-

4from pathlib import Path 

-

5 

-

6from sparkle.CLI.help.reporting_scenario import ReportingScenario 

-

7from sparkle.platform.settings_objects import Settings 

-

8 

-

9 

-

10# TODO: Handle different seed requirements; for the moment this is a dummy function 

-

11def get_seed() -> int: 

-

12 """Return a seed.""" 

-

13 return 1 

-

14 

-

15 

-

16__latest_scenario: ReportingScenario = None 

-

17 

-

18 

-

19def latest_scenario() -> ReportingScenario: 

-

20 """Function to get the global latest scenario object.""" 

-

21 global __latest_scenario 

-

22 if __latest_scenario is None: 

-

23 __latest_scenario = ReportingScenario() 

-

24 return __latest_scenario 

-

25 

-

26 

-

27__settings: Settings = None 

-

28 

-

29 

-

30def settings() -> Settings: 

-

31 """Function to get the global settings object.""" 

-

32 global __settings 

-

33 if __settings is None: 

-

34 __settings = Settings() 

-

35 return __settings 

-

36 

-

37 

-

38reference_list_dir = Path("Reference_Lists") 

-

39reference_list_dir.mkdir(exist_ok=True) 

-

40extractor_nickname_list_path = reference_list_dir / "sparkle_extractor_nickname_list.txt" 

-

41solver_nickname_list_path = reference_list_dir / "sparkle_solver_nickname_list.txt" 

-

42instances_nickname_path = reference_list_dir / "sparkle_instance_nickname_list.txt" 

-

43 

-

44file_storage_data_mapping = {solver_nickname_list_path: {}, 

-

45 instances_nickname_path: {}, 

-

46 extractor_nickname_list_path: {}} 

-

47 

-

48for data_path in file_storage_data_mapping.keys(): 

-

49 if data_path.exists(): 

-

50 with data_path.open("r+") as fo: 

-

51 file_storage_data_mapping[data_path] = ast.literal_eval(fo.read()) 

-

52 

-

53solver_nickname_mapping = file_storage_data_mapping[solver_nickname_list_path] 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_logging_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_logging_py.html deleted file mode 100644 index b7a855e11..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_logging_py.html +++ /dev/null @@ -1,218 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/logging.py: 95% - - - - - -
-
-

- Coverage for sparkle/CLI/help/logging.py: - 95% -

- -

- 44 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions to log which output was created by Sparkle where.""" 

-

4from __future__ import annotations 

-

5 

-

6import time 

-

7from pathlib import Path 

-

8from pathlib import PurePath 

-

9 

-

10from runrunner.logger import Log as RunRunnerLog 

-

11 

-

12from sparkle.CLI.help import global_variables as gv 

-

13 

-

14 

-

15# Keep track of which command called Sparkle 

-

16global caller 

-

17caller: str = "unknown" 

-

18 

-

19# Current caller file path 

-

20global caller_log_path 

-

21caller_log_path: str | PurePath = "not set" 

-

22 

-

23# Root output directory for the calling command in the form of 

-

24# Output/<timestamp>_<command_name>/ 

-

25global caller_out_dir 

-

26caller_out_dir: Path = Path(".") 

-

27 

-

28# Log directory for the calling command in the form of 

-

29# Output/<timestamp>_<command_name>/Log/ 

-

30global caller_log_dir 

-

31caller_log_dir: Path = Path(".") 

-

32 

-

33 

-

34def _update_caller(argv: list[str]) -> None: 

-

35 """Update which command is currently active. 

-

36 

-

37 Args: 

-

38 argv: List containing the command line arguments derived from sys.argv. 

-

39 

-

40 """ 

-

41 global caller 

-

42 caller = Path(argv[0]).stem 

-

43 

-

44 

-

45def _update_caller_file_path(timestamp: str) -> None: 

-

46 """Create a new file path for the caller with the given timestamp. 

-

47 

-

48 Args: 

-

49 timestamp: String representation of the time. 

-

50 

-

51 """ 

-

52 caller_file = caller + "_main_log.txt" 

-

53 caller_dir = Path(timestamp + "_" + caller) 

-

54 log_dir = gv.settings().DEFAULT_log_output 

-

55 # Set caller directory for other Sparkle functions to use 

-

56 global caller_out_dir 

-

57 caller_out_dir = Path(caller_dir) 

-

58 global caller_log_path 

-

59 caller_log_path = PurePath(log_dir / caller_out_dir / caller_file) 

-

60 global caller_log_dir 

-

61 caller_log_dir = log_dir / caller_out_dir 

-

62 

-

63 # Create needed directories if they don't exist 

-

64 caller_dir = Path(caller_log_path).parents[0] 

-

65 caller_dir.mkdir(parents=True, exist_ok=True) 

-

66 caller_log_dir.mkdir(parents=True, exist_ok=True) 

-

67 

-

68 # If the caller output file does not exist yet, write the header 

-

69 if not Path(caller_log_path).is_file(): 

-

70 output_header = "\t Timestamp\t\t\t\t\t\t\t Path\t\t\t\t\t\t\t Description\n" 

-

71 with Path(caller_log_path).open("a") as output_file: 

-

72 output_file.write(output_header) 

-

73 

-

74 

-

75def add_output(output_path: str, description: str) -> None: 

-

76 """Add output location and description to the log of the current command. 

-

77 

-

78 Args: 

-

79 output_path: The file path of where output is written to. 

-

80 description: A short description of what kind of output is written to this file. 

-

81 

-

82 """ 

-

83 # Prepare logging information 

-

84 timestamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(time.time())) 

-

85 output_str = f"{timestamp}\t{output_path}\t{description}\n" 

-

86 # Write output path and description to caller file 

-

87 with Path(caller_log_path).open("a") as output_file: 

-

88 output_file.write(output_str) 

-

89 

-

90 

-

91def log_command(argv: list[str]) -> None: 

-

92 """Write to file which command was executed. 

-

93 

-

94 Includes information on when it was executed, with which arguments, and 

-

95 where details about it's output are stored (if any). 

-

96 

-

97 Args: 

-

98 argv: List containing the command line arguments derived from sys.argv. 

-

99 

-

100 """ 

-

101 # Determine caller 

-

102 _update_caller(argv) 

-

103 

-

104 # Prepare logging information 

-

105 timestamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(time.time())) 

-

106 _update_caller_file_path(timestamp) 

-

107 output_file = caller_log_path 

-

108 args = " ".join(argv[0:]) 

-

109 log_str = timestamp + " " + args + " " + str(output_file) + "\n" 

-

110 

-

111 # If the log file does not exist yet, write the header 

-

112 log_path = gv.settings().DEFAULT_output / "sparkle.log" 

-

113 if not log_path.is_file(): 

-

114 log_header = "\t Timestamp\t\t\t\t\t\t\t Command\t\t\t\t\t\t\t Output details\n" 

-

115 log_str = log_header + log_str 

-

116 

-

117 # Write to log file 

-

118 log_path.open("a").write(log_str) 

-

119 

-

120 # Pipe RunRunner log to the caller log 

-

121 RunRunnerLog.set_log_file(caller_log_path) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_nicknames_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_nicknames_py.html deleted file mode 100644 index c16a81e2d..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_nicknames_py.html +++ /dev/null @@ -1,138 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/nicknames.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/help/nicknames.py: - 0% -

- -

- 19 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Helper functions for CLI nicknames.""" 

-

2from __future__ import annotations 

-

3from pathlib import Path 

-

4from typing import Callable 

-

5 

-

6 

-

7def resolve_object_name(name: str | Path, 

-

8 nickname_dict: dict = {}, 

-

9 target_dir: Path = Path(), 

-

10 class_name: Callable = None) -> Path | any: 

-

11 """Attempts to resolve a (nick) name. 

-

12 

-

13 Args: 

-

14 name: The (nick)name to resolve 

-

15 target_dir: The location where the file object should exist 

-

16 class_name: If passed, will attempt to return an object 

-

17 that is constructed from this Path. 

-

18 

-

19 Returns: 

-

20 Path to the object, None if unresolvable. 

-

21 """ 

-

22 path = None 

-

23 # We cannot handle None as a name 

-

24 if name is None: 

-

25 return None 

-

26 # First check if the name already is a path 

-

27 if Path(name).exists(): 

-

28 path = Path(name) 

-

29 # Second check if its a nickname registered in Sparkle 

-

30 elif str(name) in nickname_dict: 

-

31 path = Path(nickname_dict[str(name)]) 

-

32 # Third check if we can create a valid path with the name 

-

33 elif (target_dir / name).exists(): 

-

34 path = (target_dir / name) 

-

35 # Finally, attempt to construct the object from the Path 

-

36 try: 

-

37 if class_name is not None and path is not None: 

-

38 return class_name(path) 

-

39 except Exception: 

-

40 return None 

-

41 return path 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_reporting_scenario_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_reporting_scenario_py.html deleted file mode 100644 index 038bb0c90..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_reporting_scenario_py.html +++ /dev/null @@ -1,436 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/reporting_scenario.py: 53% - - - - - -
-
-

- Coverage for sparkle/CLI/help/reporting_scenario.py: - 53% -

- -

- 184 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Helper module to manage Sparkle scenarios.""" 

-

2# Keep in CLI help 

-

3 

-

4from __future__ import annotations 

-

5import configparser 

-

6from enum import Enum 

-

7from pathlib import Path 

-

8from pathlib import PurePath 

-

9from sparkle.solver import Solver 

-

10from sparkle.instance import instance_set, InstanceSet 

-

11 

-

12 

-

13class Scenario(str, Enum): 

-

14 """Enum of possible execution scenarios for Sparkle.""" 

-

15 

-

16 NONE = "NONE" 

-

17 SELECTION = "SELECTION" 

-

18 CONFIGURATION = "CONFIGURATION" 

-

19 PARALLEL_PORTFOLIO = "PARALLEL_PORTFOLIO" 

-

20 

-

21 

-

22class ReportingScenario: 

-

23 """Class to manage scenarios executed with Sparkle.""" 

-

24 

-

25 # ReportingScenario path names and defaults 

-

26 __reporting_scenario_file = Path("latest_scenario.ini") 

-

27 __reporting_scenario_dir = Path("Output") 

-

28 DEFAULT_reporting_scenario_path = Path( 

-

29 PurePath(__reporting_scenario_dir / __reporting_scenario_file)) 

-

30 

-

31 # Constant default values 

-

32 DEFAULT_latest_scenario = Scenario.NONE 

-

33 

-

34 DEFAULT_selection_portfolio_path = Path("") 

-

35 DEFAULT_selection_test_case_directory = Path("") 

-

36 

-

37 DEFAULT_parallel_portfolio_path = Path("") 

-

38 DEFAULT_parallel_portfolio_instance_list = [] 

-

39 

-

40 DEFAULT_config_solver = Path("") 

-

41 DEFAULT_config_instance_set_train = Path("") 

-

42 DEFAULT_config_instance_set_test = Path("") 

-

43 

-

44 def __init__(self: ReportingScenario) -> None: 

-

45 """Initialise a ReportingScenario object.""" 

-

46 # ReportingScenario 'dictionary' in configparser format 

-

47 self.__scenario = configparser.ConfigParser() 

-

48 

-

49 # Initialise scenario in default file path 

-

50 self.read_scenario_ini() 

-

51 

-

52 return 

-

53 

-

54 def read_scenario_ini( 

-

55 self: ReportingScenario, file_path: Path = DEFAULT_reporting_scenario_path)\ 

-

56 -> None: 

-

57 """Read the scenario from an INI file. 

-

58 

-

59 Args: 

-

60 file_path: Path of the INI file for the scenario. Defaults to 

-

61 DEFAULT_reporting_scenario_path. 

-

62 """ 

-

63 # If the file does not exist set default values 

-

64 if not Path(file_path).is_file(): 

-

65 self.set_latest_scenario() 

-

66 self.set_selection_scenario_path() 

-

67 self.set_selection_test_case_directory() 

-

68 self.set_parallel_portfolio_path() 

-

69 self.set_parallel_portfolio_instance_path() 

-

70 self.set_config_solver() 

-

71 self.set_config_instance_set_train() 

-

72 self.set_config_instance_set_test() 

-

73 

-

74 # Read file 

-

75 file_scenario = configparser.ConfigParser() 

-

76 file_scenario.read(str(file_path)) 

-

77 

-

78 # Set internal scenario based on data read from FILE if they were read 

-

79 # successfully 

-

80 if file_scenario.sections() != []: 

-

81 section = "latest" 

-

82 option_names = ("scenario",) # Comma to make it a tuple 

-

83 for option in option_names: 

-

84 if file_scenario.has_option(section, option): 

-

85 value = Scenario(file_scenario.get(section, option)) 

-

86 self.set_latest_scenario(value) 

-

87 file_scenario.remove_option(section, option) 

-

88 

-

89 section = "selection" 

-

90 option_names = ("scenario_path",) # Comma to make it a tuple 

-

91 for option in option_names: 

-

92 if file_scenario.has_option(section, option): 

-

93 value = Path(file_scenario.get(section, option)) 

-

94 self.set_selection_scenario_path(value) 

-

95 file_scenario.remove_option(section, option) 

-

96 

-

97 option_names = ("test_case_directory",) # Comma to make it a tuple 

-

98 for option in option_names: 

-

99 if file_scenario.has_option(section, option): 

-

100 value = Path(file_scenario.get(section, option)) 

-

101 self.set_selection_test_case_directory(value) 

-

102 file_scenario.remove_option(section, option) 

-

103 

-

104 section = "configuration" 

-

105 option_names = ("solver",) # Comma to make it a tuple 

-

106 for option in option_names: 

-

107 if file_scenario.has_option(section, option): 

-

108 value = Path(file_scenario.get(section, option)) 

-

109 self.set_config_solver(value) 

-

110 file_scenario.remove_option(section, option) 

-

111 

-

112 option_names = ("instance_set_train",) # Comma to make it a tuple 

-

113 for option in option_names: 

-

114 if file_scenario.has_option(section, option): 

-

115 value = Path(file_scenario.get(section, option)) 

-

116 self.set_config_instance_set_train(value) 

-

117 file_scenario.remove_option(section, option) 

-

118 

-

119 option_names = ("instance_set_test",) # Comma to make it a tuple 

-

120 for option in option_names: 

-

121 if file_scenario.has_option(section, option): 

-

122 value = Path(file_scenario.get(section, option)) 

-

123 self.set_config_instance_set_test(value) 

-

124 file_scenario.remove_option(section, option) 

-

125 

-

126 section = "parallel_portfolio" 

-

127 option_names = ("portfolio_path",) # Comma to make it a tuple 

-

128 for option in option_names: 

-

129 if file_scenario.has_option(section, option): 

-

130 value = Path(file_scenario.get(section, option)) 

-

131 self.set_parallel_portfolio_path(value) 

-

132 file_scenario.remove_option(section, option) 

-

133 

-

134 section = "parallel_portfolio" 

-

135 option_names = ("instance_path",) # Comma to make it a tuple 

-

136 for option in option_names: 

-

137 if file_scenario.has_option(section, option): 

-

138 value = file_scenario.get(section, option) 

-

139 self.set_parallel_portfolio_instance_path(value) 

-

140 file_scenario.remove_option(section, option) 

-

141 

-

142 # Report on any unknown settings that were read 

-

143 sections = file_scenario.sections() 

-

144 

-

145 for section in sections: 

-

146 for option in file_scenario[section]: 

-

147 print(f'Unrecognised section - option combination:"{section} ' 

-

148 f'{option}" in file {file_path} ignored') 

-

149 

-

150 # Print error if unable to read the scenario file 

-

151 else: 

-

152 print(f"WARNING: Failed to read latest scenario from {file_path} The " 

-

153 "file may have been empty, or is in another format than INI. Default " 

-

154 "values will be used.") 

-

155 

-

156 def write_scenario_ini( 

-

157 self: ReportingScenario, file_path: Path = DEFAULT_reporting_scenario_path)\ 

-

158 -> None: 

-

159 """Write the scenario file in INI format. 

-

160 

-

161 Args: 

-

162 file_path: Path of the INI file for the scenario. Defaults to 

-

163 DEFAULT_reporting_scenario_path. 

-

164 """ 

-

165 # Create needed directories if they don't exist 

-

166 file_dir = file_path.parents[0] 

-

167 file_dir.mkdir(parents=True, exist_ok=True) 

-

168 

-

169 # Write the scenario to file 

-

170 with Path(str(file_path)).open("w") as scenario_file: 

-

171 self.__scenario.write(scenario_file) 

-

172 

-

173 def __init_section(self: ReportingScenario, section: str) -> None: 

-

174 """Initialise a section in the scenario file. 

-

175 

-

176 Args: 

-

177 section: Name of the section. 

-

178 """ 

-

179 if section not in self.__scenario: 

-

180 self.__scenario[section] = {} 

-

181 

-

182 # Generic setters ### 

-

183 

-

184 def path_setter(self: ReportingScenario, section: str, name: str, value: Path)\ 

-

185 -> None: 

-

186 """Set a generic Path for the scenario. 

-

187 

-

188 Args: 

-

189 section: Name of the section. 

-

190 name: Name of the path element. 

-

191 value: Value of the path given. 

-

192 """ 

-

193 if value is not None: 

-

194 self.__init_section(section) 

-

195 self.__scenario[section][name] = str(value) 

-

196 

-

197 # Generic getters ### 

-

198 

-

199 def none_if_empty_path(self: ReportingScenario, path: Path) -> Path: 

-

200 """Return None if a path is empty or the Path otherwise. 

-

201 

-

202 Args: 

-

203 path: Path value given. 

-

204 

-

205 Returns: 

-

206 None if the given path is empty, the given Path value otherwise. 

-

207 """ 

-

208 if str(path) == "" or str(path) == ".": 

-

209 return None 

-

210 return path 

-

211 

-

212 # Latest settings ### 

-

213 

-

214 def set_latest_scenario(self: ReportingScenario, 

-

215 value: Scenario = DEFAULT_latest_scenario) -> None: 

-

216 """Set the latest Scenario that was executed.""" 

-

217 section = "latest" 

-

218 name = "scenario" 

-

219 

-

220 if value is not None: 

-

221 self.__init_section(section) 

-

222 self.__scenario[section][name] = value.name 

-

223 

-

224 def get_latest_scenario(self: ReportingScenario) -> Scenario: 

-

225 """Return the latest Scenario that was executed.""" 

-

226 return Scenario(self.__scenario["latest"]["scenario"]) 

-

227 

-

228 # Selection settings ### 

-

229 

-

230 def set_selection_scenario_path( 

-

231 self: ReportingScenario, value: Path = DEFAULT_selection_portfolio_path)\ 

-

232 -> None: 

-

233 """Set the path to portfolio selector used for algorithm selection.""" 

-

234 section = "selection" 

-

235 name = "scenario_path" 

-

236 self.path_setter(section, name, value) 

-

237 

-

238 def get_selection_scenario_path(self: ReportingScenario) -> Path: 

-

239 """Return the path to portfolio selector used for algorithm selection.""" 

-

240 return Path(self.__scenario["selection"]["scenario_path"]) 

-

241 

-

242 def set_selection_test_case_directory( 

-

243 self: ReportingScenario, 

-

244 value: Path = DEFAULT_selection_test_case_directory) -> None: 

-

245 """Set the path to the testing set that was used for algorithm selection.""" 

-

246 section = "selection" 

-

247 name = "test_case_directory" 

-

248 self.path_setter(section, name, value) 

-

249 

-

250 def get_selection_test_case_directory(self: ReportingScenario) -> str: 

-

251 """Return the path to the testing set that was used for algorithm selection.""" 

-

252 try: 

-

253 path = self.__scenario["selection"]["test_case_directory"] 

-

254 if Path(path) == Path("."): 

-

255 path = None 

-

256 except KeyError: 

-

257 path = None 

-

258 return path 

-

259 

-

260 # Parallel portfolio settings ### 

-

261 

-

262 def set_parallel_portfolio_path( 

-

263 self: ReportingScenario, 

-

264 value: Path = DEFAULT_parallel_portfolio_path) -> None: 

-

265 """Set the path to the parallel portfolio.""" 

-

266 section = "parallel_portfolio" 

-

267 name = "portfolio_path" 

-

268 self.path_setter(section, name, value) 

-

269 

-

270 def get_parallel_portfolio_path(self: ReportingScenario) -> Path: 

-

271 """Return the path to the parallel portfolio.""" 

-

272 return Path(self.__scenario["parallel_portfolio"]["portfolio_path"]) 

-

273 

-

274 def set_parallel_portfolio_instance_path( 

-

275 self: ReportingScenario, 

-

276 value: Path = None) -> None: 

-

277 """Set the instance path used with the parallel portfolio.""" 

-

278 section = "parallel_portfolio" 

-

279 name = "instance_path" 

-

280 self.path_setter(section, name, value) 

-

281 

-

282 def get_parallel_portfolio_instance_set(self: ReportingScenario) -> InstanceSet: 

-

283 """Return the instance list used with the parallel portfolio. 

-

284 

-

285 If instance list is empty return None. 

-

286 """ 

-

287 if self.__scenario["parallel_portfolio"]["instance_path"] is None: 

-

288 return None 

-

289 return instance_set(Path(self.__scenario["parallel_portfolio"]["instance_path"])) 

-

290 

-

291 # Configuration settings ### 

-

292 

-

293 def set_config_solver(self: ReportingScenario, 

-

294 value: Solver | Path = DEFAULT_config_solver) -> None: 

-

295 """Set the path to the solver that was configured.""" 

-

296 section = "configuration" 

-

297 name = "solver" 

-

298 if isinstance(value, Solver): 

-

299 value = value.directory 

-

300 self.path_setter(section, name, value) 

-

301 

-

302 def get_config_solver(self: ReportingScenario) -> Solver: 

-

303 """Return the path to the solver that was configured.""" 

-

304 path = self.none_if_empty_path(Path(self.__scenario["configuration"]["solver"])) 

-

305 if path is not None: 

-

306 return Solver(path) 

-

307 return None 

-

308 

-

309 def set_config_instance_set_train( 

-

310 self: ReportingScenario, value: Path = DEFAULT_config_instance_set_train)\ 

-

311 -> None: 

-

312 """Set the path to the training instance set used for configuration.""" 

-

313 section = "configuration" 

-

314 name = "instance_set_train" 

-

315 self.path_setter(section, name, value) 

-

316 

-

317 def get_config_instance_set_train(self: ReportingScenario) -> InstanceSet: 

-

318 """Return the path to the training instance set used for configuration.""" 

-

319 path = self.none_if_empty_path( 

-

320 Path(self.__scenario["configuration"]["instance_set_train"])) 

-

321 if path is None: 

-

322 return None 

-

323 return instance_set(path) 

-

324 

-

325 def set_config_instance_set_test( 

-

326 self: ReportingScenario, value: Path = DEFAULT_config_instance_set_test)\ 

-

327 -> None: 

-

328 """Set the path to the testing instance set used for configuration.""" 

-

329 section = "configuration" 

-

330 name = "instance_set_test" 

-

331 self.path_setter(section, name, value) 

-

332 

-

333 def get_config_instance_set_test(self: ReportingScenario) -> InstanceSet: 

-

334 """Return the path to the testing instance set used for configuration.""" 

-

335 path = self.none_if_empty_path( 

-

336 Path(self.__scenario["configuration"]["instance_set_test"])) 

-

337 if path is None: 

-

338 return None 

-

339 return instance_set(path) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_snapshot_help_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_snapshot_help_py.html deleted file mode 100644 index c6b5583eb..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_snapshot_help_py.html +++ /dev/null @@ -1,170 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/snapshot_help.py: 30% - - - - - -
-
-

- Coverage for sparkle/CLI/help/snapshot_help.py: - 30% -

- -

- 44 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions to record and restore a Sparkle platform.""" 

-

4import shutil 

-

5import sys 

-

6import os 

-

7import time 

-

8from pathlib import Path 

-

9import zipfile 

-

10 

-

11from sparkle.CLI.help import global_variables as gv 

-

12from sparkle.tools.general import get_time_pid_random_string 

-

13 

-

14 

-

15def save_current_sparkle_platform() -> None: 

-

16 """Store the current Sparkle platform in a .zip file.""" 

-

17 time_stamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(time.time())) 

-

18 snapshot_tmp_path = gv.settings().DEFAULT_snapshot_dir /\ 

-

19 f"Snapshot_{os.getlogin()}_{time_stamp}" 

-

20 snapshot_tmp_path.mkdir(parents=True) # Create temporary directory for zip 

-

21 for working_dir in gv.settings().DEFAULT_working_dirs: 

-

22 if working_dir.exists(): 

-

23 shutil.copytree(working_dir, snapshot_tmp_path / working_dir.name) 

-

24 shutil.make_archive(snapshot_tmp_path, "zip", snapshot_tmp_path) 

-

25 shutil.rmtree(snapshot_tmp_path) 

-

26 print(f"Snapshot file {snapshot_tmp_path}.zip saved successfully!") 

-

27 

-

28 

-

29def remove_current_platform() -> None: 

-

30 """Remove the current Sparkle platform.""" 

-

31 for working_dir in gv.settings().DEFAULT_working_dirs: 

-

32 shutil.rmtree(working_dir, ignore_errors=True) 

-

33 

-

34 

-

35def create_working_dirs() -> None: 

-

36 """Create working directories.""" 

-

37 for working_dir in gv.settings().DEFAULT_working_dirs: 

-

38 working_dir.mkdir(parents=True, exist_ok=True) 

-

39 

-

40 

-

41def extract_snapshot(snapshot_file: Path) -> None: 

-

42 """Restore a Sparkle platform from a snapshot. 

-

43 

-

44 Args: 

-

45 snapshot_file: Path to the where the current Sparkle platform should be stored. 

-

46 """ 

-

47 tmp_directory = Path(f"tmp_directory_{get_time_pid_random_string()}") 

-

48 gv.settings().DEFAULT_tmp_output.mkdir(exist_ok=True) 

-

49 with zipfile.ZipFile(snapshot_file, "r") as zip_ref: 

-

50 zip_ref.extractall(tmp_directory) 

-

51 shutil.copytree(tmp_directory, "./", dirs_exist_ok=True) 

-

52 shutil.rmtree(tmp_directory) 

-

53 

-

54 

-

55def load_snapshot(snapshot_file: Path) -> None: 

-

56 """Load a Sparkle platform from a snapshot. 

-

57 

-

58 Args: 

-

59 snapshot_file: File path to the file where the Sparkle platform is stored. 

-

60 """ 

-

61 if not snapshot_file.exists(): 

-

62 print(f"ERROR: Snapshot file {snapshot_file} does not exist!") 

-

63 sys.exit(-1) 

-

64 if not snapshot_file.suffix == ".zip": 

-

65 print(f"ERROR: File {snapshot_file} is not a .zip file!") 

-

66 sys.exit(-1) 

-

67 print("Cleaning existing Sparkle platform ...") 

-

68 remove_current_platform() 

-

69 print("Existing Sparkle platform cleaned!") 

-

70 

-

71 print(f"Loading snapshot file {snapshot_file} ...") 

-

72 extract_snapshot(snapshot_file) 

-

73 print(f"Snapshot file {snapshot_file} loaded successfully!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_system_status_py.html b/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_system_status_py.html deleted file mode 100644 index 8b37797d8..000000000 --- a/Documentation/source/_static/coverage/z_8ea3791e4a1a98c7_system_status_py.html +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Coverage for sparkle/CLI/help/system_status.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/help/system_status.py: - 0% -

- -

- 32 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Helper functions to inform about Sparkle's system status.""" 

-

4from pathlib import Path 

-

5 

-

6from sparkle.structures import FeatureDataFrame, PerformanceDataFrame 

-

7 

-

8 

-

9def print_sparkle_list(objects: list[any], type: str, details: bool = False) -> None: 

-

10 """Print a list of sparkle objects. 

-

11 

-

12 Args: 

-

13 objects: The objects to print 

-

14 type: The name of the object type 

-

15 details: Indicating if output should be detailed 

-

16 """ 

-

17 print(f"\nCurrently Sparkle has {len(objects)} {type}(s)" 

-

18 + (":" if details else "")) 

-

19 

-

20 if details: 

-

21 for i, object in enumerate(objects): 

-

22 print(f"[{i + 1}]: {type}: {Path(object).name}") 

-

23 

-

24 

-

25def print_feature_computation_jobs(feature_data_csv: Path, 

-

26 verbose: bool = False) -> None: 

-

27 """Print a list of remaining feature computation jobs. 

-

28 

-

29 Args: 

-

30 feature_data_csv: Path to the feature data csv 

-

31 verbose: Indicating, if output should be verbose 

-

32 """ 

-

33 if not feature_data_csv.exists(): 

-

34 print("\nNo feature data found, cannot determine remaining jobs.") 

-

35 

-

36 feature_data = FeatureDataFrame(feature_data_csv) 

-

37 jobs = feature_data.remaining_jobs() 

-

38 

-

39 print(f"\nCurrently Sparkle has {len(jobs)} remaining feature computation " 

-

40 "jobs that need to be performed before creating an algorithm selector" 

-

41 + (":" if verbose else "")) 

-

42 

-

43 if verbose: 

-

44 for i, job in enumerate(jobs): 

-

45 print(f"[{i + 1}]: Extractor: {Path(job[1]).name}, Group: {job[2]}, " 

-

46 f"Instance: {Path(job[0]).name}") 

-

47 print() 

-

48 

-

49 

-

50def print_performance_computation_jobs(performance_data_csv_path: Path, 

-

51 verbose: bool = False) -> None: 

-

52 """Print a list of remaining performance computation jobs. 

-

53 

-

54 Args: 

-

55 performance_data_csv_path: Path to the performance data csv 

-

56 verbose: Indicating, if output should be verbose 

-

57 """ 

-

58 if not performance_data_csv_path.exists(): 

-

59 print("\nNo performance data found, cannot determine remaining jobs.") 

-

60 return 

-

61 performance_data_csv = PerformanceDataFrame(performance_data_csv_path) 

-

62 jobs = performance_data_csv.remaining_jobs() 

-

63 total_job_num = sum([len(jobs[instance]) for instance in jobs.keys()]) 

-

64 

-

65 print(f"\nCurrently Sparkle has {total_job_num} remaining performance computation" 

-

66 " jobs that need to be performed before creating an algorithm selector" 

-

67 + (":" if verbose else "")) 

-

68 

-

69 if verbose: 

-

70 i = 0 

-

71 for instance in jobs.keys(): 

-

72 for extractor in jobs[instance]: 

-

73 print(f"[{i + 1}]: Solver: " 

-

74 f"{Path(extractor).name}, Instance: " 

-

75 f"{Path(instance).name}") 

-

76 i += 1 

-

77 

-

78 print() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6___init___py.html b/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6___init___py.html deleted file mode 100644 index 6cfcc1060..000000000 --- a/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6___init___py.html +++ /dev/null @@ -1,100 +0,0 @@ - - - - - Coverage for sparkle/structures/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/structures/__init__.py: - 100% -

- -

- 2 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides Sparkle's wrappers for Pandas DataFrames.""" 

-

2from sparkle.structures.feature_dataframe import FeatureDataFrame 

-

3from sparkle.structures.performance_dataframe import PerformanceDataFrame 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_feature_dataframe_py.html b/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_feature_dataframe_py.html deleted file mode 100644 index ba98e2fac..000000000 --- a/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_feature_dataframe_py.html +++ /dev/null @@ -1,295 +0,0 @@ - - - - - Coverage for sparkle/structures/feature_dataframe.py: 37% - - - - - -
-
-

- Coverage for sparkle/structures/feature_dataframe.py: - 37% -

- -

- 90 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Module to manage feature data files and common operations on them.""" 

-

4from __future__ import annotations 

-

5import pandas as pd 

-

6import math 

-

7from pathlib import Path 

-

8 

-

9 

-

10class FeatureDataFrame: 

-

11 """Class to manage feature data CSV files and common operations on them.""" 

-

12 missing_value = math.nan 

-

13 multi_dim_names = ["FeatureGroup", "FeatureName", "Extractor"] 

-

14 

-

15 def __init__(self: FeatureDataFrame, 

-

16 csv_filepath: Path, 

-

17 instances: list[str] = [], 

-

18 extractor_data: dict[str, list[tuple[str, str]]] = {} 

-

19 ) -> None: 

-

20 """Initialise a SparkleFeatureDataCSV object. 

-

21 

-

22 Arguments: 

-

23 csv_filepath: The Path for the CSV storage. If it does not exist, 

-

24 a new DataFrame will be initialised and stored here. 

-

25 instances: The list of instances (Columns) to be added to the DataFrame. 

-

26 extractor_data: A dictionary with extractor names as key, and a list of 

-

27 tuples ordered as [(feature_group, feature_name), ...] as value. 

-

28 """ 

-

29 self.csv_filepath = csv_filepath 

-

30 if self.csv_filepath.exists(): 

-

31 # Read from file 

-

32 self.dataframe = pd.read_csv(self.csv_filepath, 

-

33 index_col=FeatureDataFrame.multi_dim_names) 

-

34 return 

-

35 # Unfold the extractor_data into lists 

-

36 multi_index_lists = [[], [], []] 

-

37 for extractor in extractor_data: 

-

38 for group, feature_name in extractor_data[extractor]: 

-

39 multi_index_lists[0].append(group) 

-

40 multi_index_lists[1].append(feature_name) 

-

41 multi_index_lists[2].append(extractor) 

-

42 # Initialise new dataframe 

-

43 self.dataframe = pd.DataFrame(FeatureDataFrame.missing_value, 

-

44 index=multi_index_lists, 

-

45 columns=instances) 

-

46 self.dataframe.index.names = FeatureDataFrame.multi_dim_names 

-

47 self.save_csv() 

-

48 

-

49 def add_extractor(self: FeatureDataFrame, 

-

50 extractor: str, 

-

51 extractor_features: list[tuple[str, str]], 

-

52 values: list[list[float]] = None) -> None: 

-

53 """Add an extractor and its feature names to the dataframe. 

-

54 

-

55 Arguments: 

-

56 extractor: Name of the extractor 

-

57 extractor_features: Tuples of [FeatureGroup, FeatureName] 

-

58 values: Initial values of the Extractor per instance in the dataframe. 

-

59 Defaults to FeatureDataFrame.missing_value. 

-

60 """ 

-

61 if values is None: 

-

62 values = [FeatureDataFrame.missing_value 

-

63 for _ in range(len(extractor_features))] 

-

64 # Unfold to indices to lists 

-

65 for index, pair in enumerate(extractor_features): 

-

66 feature_group, feature = pair 

-

67 self.dataframe.loc[(feature_group, feature, extractor), :] = values[index] 

-

68 

-

69 def add_instances(self: FeatureDataFrame, 

-

70 instance: str | list[str], 

-

71 values: list[float] = None) -> None: 

-

72 """Add one or more instances to the dataframe.""" 

-

73 if values is None: 

-

74 values = FeatureDataFrame.missing_value 

-

75 self.dataframe[instance] = values 

-

76 

-

77 def remove_extractor(self: FeatureDataFrame, 

-

78 extractor: str) -> None: 

-

79 """Remove an extractor from the dataframe.""" 

-

80 self.dataframe.drop(extractor, axis=0, level="Extractor", inplace=True) 

-

81 

-

82 def remove_instances(self: FeatureDataFrame, 

-

83 instances: str | list[str]) -> None: 

-

84 """Remove an instance from the dataframe.""" 

-

85 self.dataframe.drop(instances, axis=1, inplace=True) 

-

86 

-

87 def get_feature_groups(self: FeatureDataFrame, 

-

88 extractor: str | list[str] = None) -> list[str]: 

-

89 """Retrieve the feature groups in the dataframe. 

-

90 

-

91 Args: 

-

92 extractor: Optional. If extractor(s) are given, 

-

93 yields only feature groups of that extractor. 

-

94 

-

95 Returns: 

-

96 A list of feature groups. 

-

97 """ 

-

98 indices = self.dataframe.index 

-

99 if extractor is not None: 

-

100 if isinstance(extractor, str): 

-

101 extractor = [extractor] 

-

102 indices = indices[indices.isin(extractor, level=2)] 

-

103 return indices.get_level_values(level=0).unique().to_list() 

-

104 

-

105 def get_value(self: FeatureDataFrame, 

-

106 instance: str, 

-

107 extractor: str, 

-

108 feature_group: str, 

-

109 feature_name: str) -> None: 

-

110 """Return a value in the dataframe.""" 

-

111 return self.dataframe.loc[(feature_group, feature_name, extractor), instance] 

-

112 

-

113 def set_value(self: FeatureDataFrame, 

-

114 instance: str, 

-

115 extractor: str, 

-

116 feature_group: str, 

-

117 feature_name: str, 

-

118 value: float) -> None: 

-

119 """Set a value in the dataframe.""" 

-

120 self.dataframe.loc[(feature_group, feature_name, extractor), instance] = value 

-

121 

-

122 def has_missing_vectors(self: FeatureDataFrame) -> bool: 

-

123 """Returns True if there are any Extractors still to be run on any instance.""" 

-

124 for instance in self.dataframe.columns: 

-

125 for extractor in self.extractors: 

-

126 extractor_features = self.dataframe.xs(extractor, level=2, 

-

127 drop_level=False) 

-

128 if extractor_features.loc[:, instance].isnull().all(): 

-

129 return True 

-

130 return False 

-

131 

-

132 def remaining_jobs(self: FeatureDataFrame) -> list[tuple[str, str, str]]: 

-

133 """Determines needed feature computations per instance/extractor/group. 

-

134 

-

135 Returns: 

-

136 list: A list of tuples representing (Extractor, Instance, Feature Group). 

-

137 that needs to be computed. 

-

138 """ 

-

139 remaining_jobs = [] 

-

140 for extractor in self.extractors: 

-

141 for group in self.get_feature_groups(extractor): 

-

142 subset = self.dataframe.xs((group, extractor), level=(0, 2)) 

-

143 for instance in self.dataframe.columns: 

-

144 if subset.loc[:, instance].isnull().all(): 

-

145 remaining_jobs.append((instance, extractor, group)) 

-

146 return remaining_jobs 

-

147 

-

148 def get_instance(self: FeatureDataFrame, instance: str) -> list[float]: 

-

149 """Return the feature vector of an instance.""" 

-

150 return self.dataframe[instance].tolist() 

-

151 

-

152 def impute_missing_values(self: FeatureDataFrame) -> None: 

-

153 """Imputes all NaN values by taking the average feature value.""" 

-

154 self.dataframe = self.dataframe.T.fillna(self.dataframe.mean(axis=1)).T 

-

155 

-

156 def has_missing_value(self: FeatureDataFrame) -> bool: 

-

157 """Return whether there are missing values in the feature data.""" 

-

158 return self.dataframe.isnull().any().any() 

-

159 

-

160 def reset_dataframe(self: FeatureDataFrame) -> bool: 

-

161 """Resets all values to FeatureDataFrame.missing_value.""" 

-

162 self.dataframe.loc[:, :] = FeatureDataFrame.missing_value 

-

163 

-

164 def sort(self: FeatureDataFrame) -> None: 

-

165 """Sorts the DataFrame by Multi-Index for readability.""" 

-

166 self.dataframe.sort_index(level=FeatureDataFrame.multi_dim_names) 

-

167 

-

168 @property 

-

169 def instances(self: FeatureDataFrame) -> list[str]: 

-

170 """Return the instances in the dataframe.""" 

-

171 return self.dataframe.columns 

-

172 

-

173 @property 

-

174 def extractors(self: FeatureDataFrame) -> list[str]: 

-

175 """Returns all unique extractors in the DataFrame.""" 

-

176 return self.dataframe.index.get_level_values("Extractor").unique().to_list() 

-

177 

-

178 def save_csv(self: FeatureDataFrame, csv_filepath: Path = None) -> None: 

-

179 """Write a CSV to the given path. 

-

180 

-

181 Args: 

-

182 csv_filepath: String path to the csv file. Defaults to self.csv_filepath. 

-

183 """ 

-

184 csv_filepath = self.csv_filepath if csv_filepath is None else csv_filepath 

-

185 self.dataframe.to_csv(csv_filepath) 

-

186 

-

187 def to_autofolio(self: FeatureDataFrame, 

-

188 target: Path = None) -> Path: 

-

189 """Port the data to a format acceptable for AutoFolio.""" 

-

190 autofolio_df = self.dataframe.copy() 

-

191 autofolio_df.index = autofolio_df.index.map("_".join) # Reduce Multi-Index 

-

192 autofolio_df = autofolio_df.T # Autofolio has feature columns and instance rows 

-

193 if target is None: 

-

194 path = self.csv_filepath.parent / f"autofolio_{self.csv_filepath.name}" 

-

195 else: 

-

196 path = target / f"autofolio_{self.csv_filepath.name}" 

-

197 autofolio_df.to_csv(path) 

-

198 return path 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_performance_dataframe_py.html b/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_performance_dataframe_py.html deleted file mode 100644 index 1790bc9d3..000000000 --- a/Documentation/source/_static/coverage/z_a1a079e9ccaaf6c6_performance_dataframe_py.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - Coverage for sparkle/structures/performance_dataframe.py: 70% - - - - - -
-
-

- Coverage for sparkle/structures/performance_dataframe.py: - 70% -

- -

- 253 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Module to manage performance data files and common operations on them.""" 

-

4from __future__ import annotations 

-

5from pathlib import Path 

-

6import sys 

-

7import math 

-

8import pandas as pd 

-

9 

-

10from sparkle.types import SparkleObjective, resolve_objective 

-

11 

-

12 

-

13class PerformanceDataFrame(): 

-

14 """Class to manage performance data and common operations on them.""" 

-

15 missing_value = math.nan 

-

16 missing_objective = "UNKNOWN" 

-

17 multi_dim_names = ["Objective", "Instance", "Run"] 

-

18 

-

19 def __init__(self: PerformanceDataFrame, 

-

20 csv_filepath: Path, 

-

21 solvers: list[str] = [], 

-

22 objectives: list[str | SparkleObjective] = None, 

-

23 instances: list[str] = [], 

-

24 n_runs: int = 1, 

-

25 init_df: bool = True) -> None: 

-

26 """Initialise a PerformanceDataFrame. 

-

27 

-

28 Consists of: 

-

29 - Columns representing the Solvers 

-

30 - Rows representing the result by multi-index in order of: 

-

31 * Objective (Static, given in constructor or read from file) 

-

32 * Instance 

-

33 * Runs (Static, given in constructor or read from file) 

-

34 

-

35 Args: 

-

36 csv_filepath: If path exists, load from Path. 

-

37 Otherwise create new and save to this path. 

-

38 solvers: List of solver names to be added into the Dataframe 

-

39 objectives: List of SparkleObjectives or objective names. By default None, 

-

40 then the objectives will be derived from Sparkle Settings if possible. 

-

41 instances: List of instance names to be added into the Dataframe 

-

42 n_runs: The number of runs to consider per Solver/Objective/Instance comb. 

-

43 init_df: Whether the dataframe should be initialised. Set to false to reduce 

-

44 heavy IO loads. 

-

45 """ 

-

46 self.csv_filepath = csv_filepath 

-

47 # Runs is a ``static'' dimension 

-

48 self.n_runs = n_runs 

-

49 self.run_ids = list(range(1, self.n_runs + 1)) # We count runs from 1 

-

50 if objectives is not None: 

-

51 self.objectives = [resolve_objective(o) if isinstance(o, str) else o 

-

52 for o in objectives] 

-

53 else: 

-

54 self.objectives = [SparkleObjective(PerformanceDataFrame.missing_objective)] 

-

55 if init_df: 

-

56 if self.csv_filepath.exists(): 

-

57 self.dataframe = pd.read_csv(csv_filepath) 

-

58 has_rows = len(self.dataframe.index) > 0 

-

59 if (PerformanceDataFrame.multi_dim_names[0] not in self.dataframe.columns 

-

60 or not has_rows): 

-

61 # No objective present, force into column 

-

62 if objectives is None: 

-

63 self.dataframe[PerformanceDataFrame.multi_dim_names[0]] =\ 

-

64 PerformanceDataFrame.missing_objective 

-

65 else: # Constructor is provided with the objectives 

-

66 self.dataframe[PerformanceDataFrame.multi_dim_names[0]] =\ 

-

67 [o.name for o in self.objectives] 

-

68 else: 

-

69 # Objectives are present, determine which ones 

-

70 names = self.dataframe[PerformanceDataFrame.multi_dim_names[0]] 

-

71 self.objectives = [resolve_objective(name) for name in 

-

72 names.unique()] 

-

73 if (PerformanceDataFrame.multi_dim_names[2] not in self.dataframe.columns 

-

74 or not has_rows): 

-

75 # No runs column present, force into column 

-

76 self.n_runs = 1 

-

77 self.dataframe[PerformanceDataFrame.multi_dim_names[2]] = self.n_runs 

-

78 self.run_ids = [self.n_runs] 

-

79 else: 

-

80 # Runs are present, determine run ids 

-

81 run_label = PerformanceDataFrame.multi_dim_names[2] 

-

82 self.run_ids = self.dataframe[run_label].unique().tolist() 

-

83 if PerformanceDataFrame.multi_dim_names[1] not in self.dataframe.columns: 

-

84 # Instances are listed as rows, force into column 

-

85 self.dataframe = self.dataframe.reset_index().rename( 

-

86 columns={"index": PerformanceDataFrame.multi_dim_names[1]}) 

-

87 # Now we can cast the columns into multi dim 

-

88 self.dataframe = self.dataframe.set_index( 

-

89 PerformanceDataFrame.multi_dim_names) 

-

90 else: 

-

91 # Initialize empty DataFrame 

-

92 midx = pd.MultiIndex.from_product( 

-

93 [[o.name for o in self.objectives], instances, self.run_ids], 

-

94 names=PerformanceDataFrame.multi_dim_names) 

-

95 self.dataframe = pd.DataFrame(PerformanceDataFrame.missing_value, 

-

96 index=midx, 

-

97 columns=solvers) 

-

98 self.save_csv() 

-

99 # Sort the index to optimize lookup speed 

-

100 self.dataframe = self.dataframe.sort_index() 

-

101 

-

102 def __repr__(self: PerformanceDataFrame) -> str: 

-

103 """Return string representation of the DataFrame.""" 

-

104 return self.dataframe.__repr__() 

-

105 

-

106 # Properties 

-

107 

-

108 @property 

-

109 def num_objectives(self: PerformanceDataFrame) -> int: 

-

110 """Retrieve the number of objectives in the DataFrame.""" 

-

111 return self.dataframe.index.levels[0].size 

-

112 

-

113 @property 

-

114 def num_instances(self: PerformanceDataFrame) -> int: 

-

115 """Return the number of instances.""" 

-

116 return self.dataframe.index.levels[1].size 

-

117 

-

118 @property 

-

119 def num_runs(self: PerformanceDataFrame) -> int: 

-

120 """Return the number of runs.""" 

-

121 return self.dataframe.index.levels[2].size 

-

122 

-

123 @property 

-

124 def num_solvers(self: PerformanceDataFrame) -> int: 

-

125 """Return the number of solvers.""" 

-

126 return self.dataframe.columns.size 

-

127 

-

128 @property 

-

129 def multi_objective(self: PerformanceDataFrame) -> bool: 

-

130 """Return whether the dataframe represent MO or not.""" 

-

131 return self.num_objectives > 1 

-

132 

-

133 @property 

-

134 def solvers(self: PerformanceDataFrame) -> list[str]: 

-

135 """Return the solver present as a list of strings.""" 

-

136 return self.dataframe.columns.tolist() 

-

137 

-

138 @property 

-

139 def objective_names(self: PerformanceDataFrame) -> list[str]: 

-

140 """Return the objective names as a list of strings.""" 

-

141 if self.num_objectives == 0: 

-

142 return [PerformanceDataFrame.missing_objective] 

-

143 return self.dataframe.index.levels[0].tolist() 

-

144 

-

145 @property 

-

146 def instances(self: PerformanceDataFrame) -> list[str]: 

-

147 """Return the instances as a Pandas Index object.""" 

-

148 return self.dataframe.index.levels[1].tolist() 

-

149 

-

150 @property 

-

151 def has_missing_values(self: PerformanceDataFrame) -> bool: 

-

152 """Returns True if there are any missing values in the dataframe.""" 

-

153 return self.dataframe.isnull().any().any() 

-

154 

-

155 def verify_objective(self: PerformanceDataFrame, 

-

156 objective: str) -> str: 

-

157 """Method to check whether the specified objective is valid. 

-

158 

-

159 Users are allowed to index the dataframe without specifying all dimensions. 

-

160 However, when dealing with multiple objectives this is not allowed and this 

-

161 is verified here. If we have only one objective this is returned. Otherwise, 

-

162 if an objective is specified by the user this is returned. 

-

163 

-

164 Args: 

-

165 objective: The objective given by the user 

-

166 """ 

-

167 if objective is None: 

-

168 if self.multi_objective: 

-

169 raise ValueError("Error: MO Data, but objective not specified.") 

-

170 elif self.num_objectives == 1: 

-

171 return self.objective_names[0] 

-

172 else: 

-

173 return PerformanceDataFrame.missing_objective 

-

174 return objective 

-

175 

-

176 def verify_run_id(self: PerformanceDataFrame, 

-

177 run_id: int) -> int: 

-

178 """Method to check whether run id is valid. 

-

179 

-

180 Similar to verify_objective but here we check the dimensionality of runs. 

-

181 

-

182 Args: 

-

183 run_id: the run as specified by the user. 

-

184 """ 

-

185 if run_id is None: 

-

186 if self.n_runs > 1: 

-

187 print("Error: Multiple run performance data, but run not specified") 

-

188 sys.exit(-1) 

-

189 else: 

-

190 run_id = self.run_ids[0] 

-

191 return run_id 

-

192 

-

193 def verify_indexing(self: PerformanceDataFrame, 

-

194 objective: str, 

-

195 run_id: int) -> tuple[str, int]: 

-

196 """Method to check whether data indexing is correct. 

-

197 

-

198 Users are allowed to use the Performance Dataframe without the second and 

-

199 fourth dimension (Objective and Run respectively) in the case they only 

-

200 have one objective or only do one run. This method adjusts the indexing for 

-

201 those cases accordingly. 

-

202 

-

203 Args: 

-

204 objective: The given objective name 

-

205 run_id: The given run index 

-

206 

-

207 Returns: 

-

208 A tuple representing the (possibly adjusted) Objective and Run index. 

-

209 """ 

-

210 objective = self.verify_objective(objective) 

-

211 run_id = self.verify_run_id(run_id) 

-

212 return objective, run_id 

-

213 

-

214 # Getters and Setters 

-

215 

-

216 def add_solver(self: PerformanceDataFrame, 

-

217 solver_name: str, 

-

218 initial_value: float | list[float] = None) -> None: 

-

219 """Add a new solver to the dataframe. Initializes value to None by default. 

-

220 

-

221 Args: 

-

222 solver_name: The name of the solver to be added. 

-

223 initial_value: The value assigned for each index of the new solver. 

-

224 If not None, must match the index dimension (n_obj * n_inst * n_runs). 

-

225 """ 

-

226 if solver_name in self.dataframe.columns: 

-

227 print(f"WARNING: Tried adding already existing solver {solver_name} to " 

-

228 f"Performance DataFrame: {self.csv_filepath}") 

-

229 return 

-

230 self.dataframe[solver_name] = initial_value 

-

231 

-

232 def add_instance(self: PerformanceDataFrame, 

-

233 instance_name: str, 

-

234 initial_value: float | list[float] = None) -> None: 

-

235 """Add and instance to the DataFrame.""" 

-

236 if self.dataframe.index.size == 0 or self.dataframe.columns.size == 0: 

-

237 # First instance or no Solvers yet 

-

238 solvers = self.dataframe.columns.to_list() 

-

239 instances = self.dataframe.index.levels[1].to_list() + [instance_name] 

-

240 midx = pd.MultiIndex.from_product( 

-

241 [self.objective_names, instances, self.run_ids], 

-

242 names=PerformanceDataFrame.multi_dim_names) 

-

243 self.dataframe = pd.DataFrame(initial_value, index=midx, columns=solvers) 

-

244 else: 

-

245 if instance_name in self.dataframe.index.levels[1]: 

-

246 print(f"WARNING: Tried adding already existing instance {instance_name} " 

-

247 f"to Performance DataFrame: {self.csv_filepath}") 

-

248 return 

-

249 # Create the missing indices 

-

250 levels = [self.dataframe.index.levels[0].tolist(), 

-

251 [instance_name], 

-

252 self.dataframe.index.levels[2].tolist()] 

-

253 # NOTE: Did this fix Jeroen's bug? .from_arrays instead of direct constructor 

-

254 emidx = pd.MultiIndex.from_arrays(levels, 

-

255 names=PerformanceDataFrame.multi_dim_names) 

-

256 # Create the missing column values 

-

257 edf = pd.DataFrame(PerformanceDataFrame.missing_value, 

-

258 index=emidx, 

-

259 columns=self.dataframe.columns) 

-

260 # Concatenate the original and new dataframe together 

-

261 self.dataframe = pd.concat([self.dataframe, edf]) 

-

262 

-

263 # Can we make this handle a sequence of inputs instead of just 1? 

-

264 def set_value(self: PerformanceDataFrame, 

-

265 value: float, 

-

266 solver: str, 

-

267 instance: str, 

-

268 objective: str = None, 

-

269 run: int = None) -> None: 

-

270 """Setter method to assign a value to the Dataframe. 

-

271 

-

272 Args: 

-

273 value: Float value to be assigned. 

-

274 solver: The solver that produced the value. 

-

275 instance: The instance that the value was produced on. 

-

276 objective: The objective for which the result was produced. 

-

277 Optional in case of using single objective. 

-

278 run: The run index for which the result was produced. 

-

279 Optional in case of doing single run results. 

-

280 """ 

-

281 objective, run = self.verify_indexing(objective, run) 

-

282 self.dataframe.at[(objective, instance, run), solver] = value 

-

283 

-

284 def remove_solver(self: PerformanceDataFrame, solver_name: str | list[str]) -> None: 

-

285 """Drop one or more solvers from the Dataframe.""" 

-

286 self.dataframe.drop(solver_name, axis=1, inplace=True) 

-

287 

-

288 def remove_instance(self: PerformanceDataFrame, instance_name: str) -> None: 

-

289 """Drop an instance from the Dataframe.""" 

-

290 self.dataframe.drop(instance_name, axis=0, level="Instance", inplace=True) 

-

291 

-

292 def reset_value(self: PerformanceDataFrame, 

-

293 solver: str, 

-

294 instance: str, 

-

295 objective: str = None, 

-

296 run: int = None) -> None: 

-

297 """Reset a value in the dataframe.""" 

-

298 self.set_value(PerformanceDataFrame.missing_value, 

-

299 solver, instance, objective, run) 

-

300 

-

301 # Can we unify get_value and get_values? 

-

302 def get_value(self: PerformanceDataFrame, 

-

303 solver: str, 

-

304 instance: str, 

-

305 objective: str = None, 

-

306 run: int = None) -> float: 

-

307 """Index a value of the DataFrame and return it.""" 

-

308 objective, run = self.verify_indexing(objective, run) 

-

309 return float(self.dataframe.loc[(objective, instance, run), solver]) 

-

310 

-

311 def get_values(self: PerformanceDataFrame, 

-

312 solver: str, 

-

313 instance: str = None, 

-

314 objective: str = None, 

-

315 run: int = None) -> list[float]: 

-

316 """Return a list of solver values.""" 

-

317 subdf = self.dataframe[solver] 

-

318 if objective is not None: 

-

319 objective = self.verify_objective(objective) 

-

320 subdf = subdf.xs(objective, level=0, drop_level=False) 

-

321 if instance is not None: 

-

322 subdf = subdf.xs(instance, level=1, drop_level=False) 

-

323 if run is not None: 

-

324 run = self.verify_run_id(run) 

-

325 subdf = subdf.xs(run, level=2, drop_level=False) 

-

326 return subdf.to_list() 

-

327 

-

328 # Modifiers 

-

329 

-

330 '''def penalise(self: PerformanceDataFrame, 

-

331 threshold: float, 

-

332 penalty: float, 

-

333 objective: str = None, 

-

334 lower_bound: bool = False) -> None: 

-

335 """Penalises the DataFrame values if crossing threshold by specified penalty. 

-

336 

-

337 Directly updates the DataFrame object held by this class. 

-

338 

-

339 Args: 

-

340 threshold: The threshold of performances to be met 

-

341 penalty: The values assigned for out of bounds performances 

-

342 objective: The objective that should be penalised. 

-

343 lower_bound: Whether the threshold is a lower_bound. By default, 

-

344 the threshold is treated as an upperbound for performance values. 

-

345 """ 

-

346 objective = self.verify_objective(objective) 

-

347 comparison_op = operator.lt if lower_bound else operator.gt 

-

348 self.dataframe[comparison_op(self.dataframe.loc[(objective), :], 

-

349 threshold)] = penalty''' 

-

350 

-

351 # Calculables 

-

352 

-

353 def mean(self: PerformanceDataFrame, 

-

354 objective: str = None, 

-

355 solver: str = None, 

-

356 instance: str = None) -> float: 

-

357 """Return the mean value of a slice of the dataframe.""" 

-

358 objective = self.verify_objective(objective) 

-

359 subset = self.dataframe.xs(objective, level=0) 

-

360 if solver is not None: 

-

361 subset = subset.xs(solver, axis=1, drop_level=False) 

-

362 if instance is not None: 

-

363 subset = subset.xs(instance, axis=0, drop_level=False) 

-

364 value = subset.astype(float).mean() 

-

365 if isinstance(value, pd.Series): 

-

366 return value.mean() 

-

367 return value 

-

368 

-

369 # TODO: This method should be refactored or not exist 

-

370 def get_job_list(self: PerformanceDataFrame, rerun: bool = False) \ 

-

371 -> list[tuple[str, str]]: 

-

372 """Return a list of performance computation jobs there are to be done. 

-

373 

-

374 Get a list of tuple[instance, solver] to run from the performance data 

-

375 csv file. If rerun is False (default), get only the tuples that don't have a 

-

376 value in the table, else (True) get all the tuples. 

-

377 

-

378 Args: 

-

379 rerun: Boolean indicating if we want to rerun all jobs 

-

380 """ 

-

381 df = self.dataframe.stack(future_stack=True) 

-

382 if not rerun: 

-

383 df = df[df.isnull()] 

-

384 df.index = df.index.droplevel(["Objective"]) 

-

385 return df.index.unique().tolist() 

-

386 

-

387 # TODO: This method should be refactored or not exist 

-

388 def remaining_jobs(self: PerformanceDataFrame) -> dict[str, list[str]]: 

-

389 """Return a dictionary for empty values per instance and solver combination.""" 

-

390 remaining_jobs = {} 

-

391 null_df = self.dataframe.isnull() 

-

392 for row in self.dataframe.index: 

-

393 instance = row[1] 

-

394 for solver in self.dataframe.columns: 

-

395 if null_df.at[row, solver]: 

-

396 if instance not in remaining_jobs: 

-

397 remaining_jobs[instance] = set([solver]) 

-

398 else: 

-

399 remaining_jobs[instance].add(solver) 

-

400 return remaining_jobs 

-

401 

-

402 def best_instance_performance( 

-

403 self: PerformanceDataFrame, 

-

404 objective: str | SparkleObjective = None, 

-

405 run_id: int = None, 

-

406 exclude_solvers: list[str] = None) -> pd.Series: 

-

407 """Return the best performance for each instance in the portfolio. 

-

408 

-

409 Args: 

-

410 objective: The objective for which we calculate the best performance 

-

411 run_id: The run for which we calculate the best performance. If None, 

-

412 we consider all runs. 

-

413 exclude_solvers: List of solvers to exclude in the calculation. 

-

414 

-

415 Returns: 

-

416 The best performance for each instance in the portfolio. 

-

417 """ 

-

418 objective = self.verify_objective(objective) 

-

419 if isinstance(objective, str): 

-

420 objective = resolve_objective(objective) 

-

421 subdf = self.dataframe.xs(objective.name, level=0) 

-

422 if exclude_solvers is not None: 

-

423 subdf = subdf.drop(exclude_solvers, axis=1) 

-

424 if run_id is not None: 

-

425 run_id = self.verify_run_id(run_id) 

-

426 subdf = subdf.xs(run_id, level=1) 

-

427 else: 

-

428 # Drop the run level 

-

429 subdf = subdf.droplevel(level=1) 

-

430 if objective.minimise: 

-

431 series = subdf.min(axis=1) 

-

432 else: 

-

433 series = subdf.max(axis=1) 

-

434 # Ensure we always return the best for each run 

-

435 series = series.sort_values(ascending=objective.minimise) 

-

436 return series.groupby(series.index).first().astype(float) 

-

437 

-

438 def best_performance( 

-

439 self: PerformanceDataFrame, 

-

440 exclude_solvers: list[str] = [], 

-

441 objective: str | SparkleObjective = None) -> float: 

-

442 """Return the overall best performance of the portfolio. 

-

443 

-

444 Args: 

-

445 exclude_solvers: List of solvers to exclude in the calculation. 

-

446 Defaults to none. 

-

447 objective: The objective for which we calculate the best performance 

-

448 

-

449 Returns: 

-

450 The aggregated best performance of the portfolio over all instances. 

-

451 """ 

-

452 objective = self.verify_objective(objective) 

-

453 if isinstance(objective, str): 

-

454 objective = resolve_objective(objective) 

-

455 instance_best = self.best_instance_performance( 

-

456 objective, exclude_solvers=exclude_solvers).to_numpy(dtype=float) 

-

457 return objective.instance_aggregator(instance_best) 

-

458 

-

459 def schedule_performance( 

-

460 self: PerformanceDataFrame, 

-

461 schedule: dict[str: list[tuple[str, float | None]]], 

-

462 target_solver: str = None, 

-

463 objective: str | SparkleObjective = None) -> float: 

-

464 """Return the performance of a selection schedule on the portfolio. 

-

465 

-

466 Args: 

-

467 schedule: Compute the best performance according to a selection schedule. 

-

468 A dictionary with instances as keys and a list of tuple consisting of 

-

469 (solver, max_runtime) or solvers if no runtime prediction should be used. 

-

470 target_solver: If not None, store the values in this solver of the DF. 

-

471 objective: The objective for which we calculate the best performance 

-

472 

-

473 Returns: 

-

474 The performance of the schedule over the instances in the dictionary. 

-

475 """ 

-

476 objective = self.verify_objective(objective) 

-

477 if isinstance(objective, str): 

-

478 objective = resolve_objective(objective) 

-

479 select = min if objective.minimise else max 

-

480 performances = [0.0 for _ in range(len(schedule.keys()))] 

-

481 for ix, instance in enumerate(schedule.keys()): 

-

482 for iy, (solver, max_runtime) in enumerate(schedule[instance]): 

-

483 performance = self.get_value(solver, instance, objective.name) 

-

484 if max_runtime is not None: # We are dealing with runtime 

-

485 performances[ix] += performance 

-

486 if performance < max_runtime: 

-

487 break # Solver finished in time 

-

488 else: # Quality, we take the best found performance 

-

489 if iy == 0: # First solver, set initial value 

-

490 performances[ix] = performance 

-

491 continue 

-

492 performances[ix] = select(performances[ix], performance) 

-

493 if target_solver is not None: 

-

494 self.set_value(performances[ix], target_solver, instance, objective.name) 

-

495 return performances 

-

496 

-

497 def marginal_contribution( 

-

498 self: PerformanceDataFrame, 

-

499 objective: str | SparkleObjective = None, 

-

500 sort: bool = False) -> list[float]: 

-

501 """Return the marginal contribution of the solvers on the instances. 

-

502 

-

503 Args: 

-

504 objective: The objective for which we calculate the marginal contribution. 

-

505 sort: Whether to sort the results afterwards 

-

506 Returns: 

-

507 The marginal contribution of each solver. 

-

508 """ 

-

509 output = [] 

-

510 objective = self.verify_objective(objective) 

-

511 if isinstance(objective, str): 

-

512 objective = resolve_objective(objective) 

-

513 best_performance = self.best_performance(objective=objective) 

-

514 for solver in self.solvers: 

-

515 # By calculating the best performance excluding this Solver, 

-

516 # we can determine its relative impact on the portfolio. 

-

517 missing_solver_best = self.best_performance( 

-

518 exclude_solvers=[solver], 

-

519 objective=objective) 

-

520 # Now we need to see how much the portfolio's best performance 

-

521 # decreases without this solver. 

-

522 marginal_contribution = missing_solver_best / best_performance 

-

523 if missing_solver_best == best_performance: 

-

524 # No change, no contribution 

-

525 marginal_contribution = 0.0 

-

526 output.append((solver, marginal_contribution, missing_solver_best)) 

-

527 if sort: 

-

528 output.sort(key=lambda x: x[1], reverse=objective.minimise) 

-

529 return output 

-

530 

-

531 def get_solver_ranking(self: PerformanceDataFrame, 

-

532 objective: str | SparkleObjective = None 

-

533 ) -> list[tuple[str, float]]: 

-

534 """Return a list with solvers ranked by average performance.""" 

-

535 objective = self.verify_objective(objective) 

-

536 if isinstance(objective, str): 

-

537 objective = resolve_objective(objective) 

-

538 sub_df = self.dataframe.loc(axis=0)[objective.name, :, :] 

-

539 # Reduce Runs Dimension 

-

540 sub_df = sub_df.droplevel("Run") 

-

541 sub_df = sub_df.groupby(sub_df.index).agg(func=objective.run_aggregator) 

-

542 solver_ranking = [(solver, objective.instance_aggregator( 

-

543 sub_df[solver].astype(float))) for solver in self.solvers] 

-

544 # Sort the list by second value (the performance) 

-

545 solver_ranking.sort(key=lambda performance: performance[1], 

-

546 reverse=(not objective.minimise)) 

-

547 return solver_ranking 

-

548 

-

549 def save_csv(self: PerformanceDataFrame, csv_filepath: Path = None) -> None: 

-

550 """Write a CSV to the given path. 

-

551 

-

552 Args: 

-

553 csv_filepath: String path to the csv file. Defaults to self.csv_filepath. 

-

554 """ 

-

555 csv_filepath = self.csv_filepath if csv_filepath is None else csv_filepath 

-

556 self.dataframe.to_csv(csv_filepath) 

-

557 

-

558 def clean_csv(self: PerformanceDataFrame) -> None: 

-

559 """Set all values in Performance Data to None.""" 

-

560 self.dataframe[:] = PerformanceDataFrame.missing_value 

-

561 self.save_csv() 

-

562 

-

563 def copy(self: PerformanceDataFrame, 

-

564 csv_filepath: Path = None) -> PerformanceDataFrame: 

-

565 """Create a copy of this object. 

-

566 

-

567 Args: 

-

568 csv_filepath: The new filepath to use for saving the object to. 

-

569 Warning: If the original path is used, it could lead to dataloss! 

-

570 """ 

-

571 csv_filepath = self.csv_filepath if csv_filepath is None else csv_filepath 

-

572 pd_copy = PerformanceDataFrame(self.csv_filepath, init_df=False) 

-

573 pd_copy.dataframe = self.dataframe.copy() 

-

574 pd_copy.csv_filepath = csv_filepath 

-

575 return pd_copy 

-

576 

-

577 def to_autofolio(self: PerformanceDataFrame, 

-

578 objective: SparkleObjective = None, 

-

579 target: Path = None) -> Path: 

-

580 """Port the data to a format acceptable for AutoFolio.""" 

-

581 if (objective is None and self.multi_objective or self.n_runs > 1): 

-

582 print(f"ERROR: Currently no porting available for {self.csv_filepath} " 

-

583 "to Autofolio due to multi objective or number of runs.") 

-

584 return 

-

585 autofolio_df = self.dataframe.copy() 

-

586 if objective is not None: 

-

587 autofolio_df = autofolio_df.loc[objective.name] 

-

588 autofolio_df.index = autofolio_df.index.droplevel("Run") 

-

589 else: 

-

590 autofolio_df.index = autofolio_df.index.droplevel(["Objective", "Run"]) 

-

591 if target is None: 

-

592 path = self.csv_filepath.parent / f"autofolio_{self.csv_filepath.name}" 

-

593 else: 

-

594 path = target / f"autofolio_{self.csv_filepath.name}" 

-

595 autofolio_df.to_csv(path) 

-

596 return path 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca___init___py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca___init___py.html deleted file mode 100644 index 95dc5c7a3..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca___init___py.html +++ /dev/null @@ -1,98 +0,0 @@ - - - - - Coverage for sparkle/CLI/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/CLI/__init__.py: - 100% -

- -

- 0 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Init file for Sparkle commands.""" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_about_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_about_py.html deleted file mode 100644 index 0509e3978..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_about_py.html +++ /dev/null @@ -1,122 +0,0 @@ - - - - - Coverage for sparkle/CLI/about.py: 71% - - - - - -
-
-

- Coverage for sparkle/CLI/about.py: - 71% -

- -

- 7 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to show information about Sparkle.""" 

-

3import argparse 

-

4import sparkle 

-

5 

-

6 

-

7def parser_function() -> argparse.ArgumentParser: 

-

8 """Define the command line arguments. 

-

9 

-

10 Returns: 

-

11 The argument parser. 

-

12 """ 

-

13 parser = argparse.ArgumentParser() 

-

14 return parser 

-

15 

-

16 

-

17if __name__ == "__main__": 

-

18 print("\n".join([ 

-

19 f"Sparkle ({sparkle.about.description})", 

-

20 f"Version: {sparkle.about.version}", 

-

21 f"Licence: {sparkle.about.licence}", 

-

22 f'Written by {", ".join(sparkle.about.authors[:-1])},\ 

-

23 and {sparkle.about.authors[-1]}', 

-

24 f"Contact: {sparkle.about.contact}", 

-

25 "For more details see README.md"])) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_feature_extractor_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_feature_extractor_py.html deleted file mode 100644 index ff134701c..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_feature_extractor_py.html +++ /dev/null @@ -1,186 +0,0 @@ - - - - - Coverage for sparkle/CLI/add_feature_extractor.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/add_feature_extractor.py: - 0% -

- -

- 52 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to add a feature extractor to the Sparkle platform.""" 

-

3import os 

-

4import sys 

-

5import shutil 

-

6import argparse 

-

7from pathlib import Path 

-

8 

-

9from sparkle.platform import file_help as sfh 

-

10from sparkle.CLI.help import global_variables as gv 

-

11from sparkle.structures import FeatureDataFrame 

-

12from sparkle.CLI.compute_features import compute_features 

-

13from sparkle.CLI.help import logging as sl 

-

14from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

15from sparkle.CLI.initialise import check_for_initialise 

-

16from sparkle.CLI.help import argparse_custom as ac 

-

17from sparkle.solver import Extractor 

-

18 

-

19 

-

20def parser_function() -> argparse.ArgumentParser: 

-

21 """Define the command line arguments.""" 

-

22 # Define command line arguments 

-

23 parser = argparse.ArgumentParser() 

-

24 parser.add_argument(*ac.ExtractorPathArgument.names, 

-

25 **ac.ExtractorPathArgument.kwargs) 

-

26 group_extractor_run = parser.add_mutually_exclusive_group() 

-

27 group_extractor_run.add_argument(*ac.RunExtractorNowArgument.names, 

-

28 **ac.RunExtractorNowArgument.kwargs) 

-

29 parser.add_argument(*ac.NicknameFeatureExtractorArgument.names, 

-

30 **ac.NicknameFeatureExtractorArgument.kwargs) 

-

31 return parser 

-

32 

-

33 

-

34if __name__ == "__main__": 

-

35 # Log command call 

-

36 sl.log_command(sys.argv) 

-

37 

-

38 parser = parser_function() 

-

39 

-

40 # Process command line arguments 

-

41 args = parser.parse_args() 

-

42 

-

43 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.ADD_FEATURE_EXTRACTOR]) 

-

44 

-

45 extractor_source = Path(args.extractor_path) 

-

46 if not extractor_source.exists(): 

-

47 print(f'Feature extractor path "{extractor_source}" does not exist!') 

-

48 sys.exit(-1) 

-

49 

-

50 nickname_str = args.nickname 

-

51 

-

52 # Start add feature extractor 

-

53 extractor_target_path = gv.settings().DEFAULT_extractor_dir / extractor_source.name 

-

54 

-

55 if extractor_target_path.exists(): 

-

56 print(f"Feature extractor {extractor_source.name} already exists! " 

-

57 "Can not add feature extractor.") 

-

58 sys.exit(-1) 

-

59 extractor_target_path.mkdir() 

-

60 shutil.copytree(extractor_source, extractor_target_path, dirs_exist_ok=True) 

-

61 

-

62 # Check execution permissions for wrapper 

-

63 extractor_wrapper = extractor_target_path / Extractor.wrapper 

-

64 if not extractor_wrapper.is_file() or not os.access(extractor_wrapper, os.X_OK): 

-

65 print(f"The file {extractor_wrapper} does not exist or is \ 

-

66 not executable.") 

-

67 sys.exit(-1) 

-

68 

-

69 # Get the extractor features groups and names from the wrapper 

-

70 extractor = Extractor(extractor_target_path) 

-

71 feature_dataframe = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

72 feature_dataframe.add_extractor(extractor.name, extractor.features) 

-

73 feature_dataframe.save_csv() 

-

74 

-

75 print(f"Adding feature extractor {extractor_target_path.name} done!") 

-

76 

-

77 if nickname_str is not None: 

-

78 sfh.add_remove_platform_item( 

-

79 extractor_target_path, 

-

80 gv.extractor_nickname_list_path, 

-

81 gv.file_storage_data_mapping[gv.extractor_nickname_list_path], 

-

82 key=nickname_str) 

-

83 

-

84 if args.run_extractor_now: 

-

85 print("Start computing features ...") 

-

86 compute_features(gv.settings().DEFAULT_feature_data_path, False) 

-

87 

-

88 # Write used settings to file 

-

89 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_instances_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_instances_py.html deleted file mode 100644 index d07f4aeda..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_instances_py.html +++ /dev/null @@ -1,205 +0,0 @@ - - - - - Coverage for sparkle/CLI/add_instances.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/add_instances.py: - 0% -

- -

- 66 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to add an instance set to the Sparkle platform.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6from pathlib import Path 

-

7import shutil 

-

8 

-

9from sparkle.CLI.help import global_variables as gv 

-

10from sparkle.platform import file_help as sfh 

-

11from sparkle.platform.settings_objects import SettingState 

-

12from sparkle.instance import instance_set 

-

13from sparkle.structures import FeatureDataFrame, PerformanceDataFrame 

-

14from sparkle.CLI.compute_features import compute_features 

-

15from sparkle.CLI.run_solvers import running_solvers_performance_data 

-

16from sparkle.CLI.help import logging as sl 

-

17from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

18from sparkle.CLI.initialise import check_for_initialise 

-

19from sparkle.CLI.help import argparse_custom as ac 

-

20 

-

21 

-

22def parser_function() -> argparse.ArgumentParser: 

-

23 """Define the command line arguments.""" 

-

24 parser = argparse.ArgumentParser() 

-

25 parser.add_argument(*ac.InstancesPathArgument.names, 

-

26 **ac.InstancesPathArgument.kwargs) 

-

27 parser.add_argument(*ac.RunExtractorNowArgument.names, 

-

28 **ac.RunExtractorNowArgument.kwargs) 

-

29 parser.add_argument(*ac.RunSolverNowArgument.names, 

-

30 **ac.RunSolverNowArgument.kwargs) 

-

31 parser.add_argument(*ac.NicknameInstanceSetArgument.names, 

-

32 **ac.NicknameInstanceSetArgument.kwargs) 

-

33 parser.add_argument(*ac.RunOnArgument.names, 

-

34 **ac.RunOnArgument.kwargs) 

-

35 

-

36 return parser 

-

37 

-

38 

-

39if __name__ == "__main__": 

-

40 # Log command call 

-

41 sl.log_command(sys.argv) 

-

42 

-

43 # Define command line arguments 

-

44 parser = parser_function() 

-

45 

-

46 # Process command line arguments 

-

47 args = parser.parse_args() 

-

48 instances_source = Path(args.instances_path) 

-

49 instances_target = gv.settings().DEFAULT_instance_dir / instances_source.name 

-

50 

-

51 if args.run_on is not None: 

-

52 gv.settings().set_run_on( 

-

53 args.run_on.value, SettingState.CMD_LINE) 

-

54 run_on = gv.settings().get_run_on() 

-

55 

-

56 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.ADD_INSTANCES]) 

-

57 

-

58 if not instances_source.exists(): 

-

59 print(f'Instance set path "{instances_source}" does not exist!') 

-

60 sys.exit(-1) 

-

61 if instances_target.exists(): 

-

62 print(f'Instance set "{instances_source.name}" already exists in Sparkle! ' 

-

63 "Exiting...") 

-

64 sys.exit(-1) 

-

65 if args.nickname is not None: 

-

66 sfh.add_remove_platform_item(instances_target, 

-

67 gv.instances_nickname_path, key=args.nickname) 

-

68 

-

69 print(f"Start adding all instances in directory {instances_source} ...") 

-

70 new_instance_set = instance_set(instances_source) 

-

71 

-

72 instances_target.mkdir(parents=True) 

-

73 print("Copying files...") 

-

74 for instance_path_source in new_instance_set.all_paths: 

-

75 print(f"Copying {instance_path_source} to {instances_target}...", end="\r") 

-

76 shutil.copy(instance_path_source, instances_target) 

-

77 print("\nCopying done!") 

-

78 # Refresh the instance set as the target instance set 

-

79 new_instance_set = instance_set(instances_target) 

-

80 

-

81 # Add the instances to the Feature Data / Performance Data 

-

82 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

83 # When adding instances, an empty performance DF has no objectives yet 

-

84 performance_data = PerformanceDataFrame( 

-

85 gv.settings().DEFAULT_performance_data_path, 

-

86 objectives=gv.settings().get_general_sparkle_objectives()) 

-

87 for instance_path in new_instance_set.instance_paths: 

-

88 # Construct a name path due to multi-file instances 

-

89 feature_data.add_instances(str(instance_path)) 

-

90 performance_data.add_instance(str(instance_path)) 

-

91 feature_data.save_csv() 

-

92 performance_data.save_csv() 

-

93 

-

94 print(f"\nAdding instance set {new_instance_set.name} done!") 

-

95 

-

96 if args.run_extractor_now: 

-

97 print("Start computing features ...") 

-

98 compute_features(gv.settings().DEFAULT_feature_data_path, False) 

-

99 

-

100 if args.run_solver_now: 

-

101 num_job_in_parallel = gv.settings().get_number_of_jobs_in_parallel() 

-

102 running_solvers_performance_data(gv.settings().DEFAULT_performance_data_path, 

-

103 num_job_in_parallel, 

-

104 rerun=False, run_on=run_on) 

-

105 print("Running solvers...") 

-

106 

-

107 # Write used settings to file 

-

108 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_solver_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_solver_py.html deleted file mode 100644 index d0b79ce92..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_add_solver_py.html +++ /dev/null @@ -1,249 +0,0 @@ - - - - - Coverage for sparkle/CLI/add_solver.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/add_solver.py: - 0% -

- -

- 82 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to add a solver to the Sparkle platform.""" 

-

3import os 

-

4import stat 

-

5import sys 

-

6import argparse 

-

7import shutil 

-

8from pathlib import Path 

-

9 

-

10import runrunner as rrr 

-

11 

-

12from sparkle.platform import file_help as sfh 

-

13from sparkle.CLI.help import global_variables as gv 

-

14from sparkle.structures import PerformanceDataFrame 

-

15from sparkle.CLI.run_solvers import running_solvers_performance_data 

-

16from sparkle.solver import Solver 

-

17from sparkle.CLI.help import logging as sl 

-

18from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

19from sparkle.CLI.initialise import check_for_initialise 

-

20from sparkle.CLI.help import argparse_custom as ac 

-

21from sparkle.platform.settings_objects import SettingState 

-

22 

-

23 

-

24def parser_function() -> argparse.ArgumentParser: 

-

25 """Define the command line arguments.""" 

-

26 parser = argparse.ArgumentParser( 

-

27 description="Add a solver to the Sparkle platform.", 

-

28 epilog="") 

-

29 parser.add_argument(*ac.DeterministicArgument.names, 

-

30 **ac.DeterministicArgument.kwargs) 

-

31 parser.add_argument(*ac.RunSolverNowArgument.names, 

-

32 **ac.RunSolverNowArgument.kwargs) 

-

33 parser.add_argument(*ac.NicknameSolverArgument.names, 

-

34 **ac.NicknameSolverArgument.kwargs) 

-

35 parser.add_argument(*ac.SolverPathArgument.names, 

-

36 **ac.SolverPathArgument.kwargs) 

-

37 parser.add_argument(*ac.RunOnArgument.names, 

-

38 **ac.RunOnArgument.kwargs) 

-

39 parser.add_argument(*ac.SkipChecksArgument.names, 

-

40 **ac.SkipChecksArgument.kwargs) 

-

41 return parser 

-

42 

-

43 

-

44if __name__ == "__main__": 

-

45 # Log command call 

-

46 sl.log_command(sys.argv) 

-

47 

-

48 # Define command line arguments 

-

49 parser = parser_function() 

-

50 

-

51 # Process command line arguments 

-

52 args = parser.parse_args() 

-

53 solver_source = Path(args.solver_path) 

-

54 deterministic = args.deterministic 

-

55 

-

56 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.ADD_SOLVER]) 

-

57 

-

58 if not solver_source.exists(): 

-

59 print(f'Solver path "{solver_source}" does not exist!') 

-

60 sys.exit(-1) 

-

61 

-

62 nickname = args.nickname 

-

63 

-

64 if args.run_on is not None: 

-

65 gv.settings().set_run_on( 

-

66 args.run_on.value, SettingState.CMD_LINE) 

-

67 run_on = gv.settings().get_run_on() 

-

68 

-

69 if args.run_checks: 

-

70 print("Running checks...") 

-

71 solver = Solver(Path(solver_source)) 

-

72 pcs_file = solver.get_pcs_file() 

-

73 if pcs_file is None: 

-

74 print("None or multiple .pcs files found. Solver " 

-

75 "is not valid for configuration.") 

-

76 else: 

-

77 print(f"One pcs file detected: {pcs_file.name}. ", end="") 

-

78 if solver.read_pcs_file(): 

-

79 print("Can read the pcs file.") 

-

80 else: 

-

81 print("WARNING: Can not read the provided pcs file format.") 

-

82 

-

83 configurator_wrapper_path = solver_source / Solver.wrapper 

-

84 if not (configurator_wrapper_path.is_file() 

-

85 and os.access(configurator_wrapper_path, os.X_OK)): 

-

86 print(f"WARNING: Solver {solver_source.name} does not have a solver wrapper " 

-

87 f"(Missing file {Solver.wrapper}) or is not executable. ") 

-

88 

-

89 # Start add solver 

-

90 solver_directory = gv.settings().DEFAULT_solver_dir / solver_source.name 

-

91 if not solver_directory.exists(): 

-

92 solver_directory.mkdir(parents=True, exist_ok=True) 

-

93 else: 

-

94 print(f"ERROR: Solver {solver_source.name} already exists! " 

-

95 "Can not add new solver.") 

-

96 sys.exit(-1) 

-

97 shutil.copytree(solver_source, solver_directory, dirs_exist_ok=True) 

-

98 # Save the deterministic bool in the solver 

-

99 with (solver_directory / Solver.meta_data).open("w+") as fout: 

-

100 fout.write(str({"deterministic": deterministic})) 

-

101 

-

102 # Add RunSolver executable to the solver 

-

103 runsolver_path = gv.settings().DEFAULT_runsolver_exec 

-

104 if runsolver_path.name in [file.name for file in solver_directory.iterdir()]: 

-

105 print("Warning! RunSolver executable detected in Solver " 

-

106 f"{solver_source.name}. This will be replaced with " 

-

107 f"Sparkle's version of RunSolver. ({runsolver_path})") 

-

108 runsolver_target = solver_directory / runsolver_path.name 

-

109 shutil.copyfile(runsolver_path, runsolver_target) 

-

110 runsolver_target.chmod(os.stat(runsolver_target).st_mode | stat.S_IEXEC) 

-

111 

-

112 performance_data = PerformanceDataFrame( 

-

113 gv.settings().DEFAULT_performance_data_path, 

-

114 objectives=gv.settings().get_general_sparkle_objectives()) 

-

115 performance_data.add_solver(solver_directory) 

-

116 performance_data.save_csv() 

-

117 

-

118 print(f"Adding solver {solver_source.name} done!") 

-

119 

-

120 if nickname is not None: 

-

121 sfh.add_remove_platform_item(solver_directory, 

-

122 gv.solver_nickname_list_path, key=nickname) 

-

123 

-

124 if args.run_solver_now: 

-

125 num_job_in_parallel = gv.settings().get_number_of_jobs_in_parallel() 

-

126 dependency_run_list = [running_solvers_performance_data( 

-

127 gv.settings().DEFAULT_performance_data_path, num_job_in_parallel, 

-

128 rerun=False, run_on=run_on 

-

129 )] 

-

130 

-

131 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

132 srun_options = ["-N1", "-n1"] + sbatch_options 

-

133 run_construct_portfolio_selector = rrr.add_to_queue( 

-

134 cmd="sparkle/CLI/construct_portfolio_selector.py", 

-

135 name=CommandName.CONSTRUCT_PORTFOLIO_SELECTOR, 

-

136 dependencies=dependency_run_list, 

-

137 base_dir=sl.caller_log_dir, 

-

138 sbatch_options=sbatch_options, 

-

139 srun_options=srun_options) 

-

140 

-

141 dependency_run_list.append(run_construct_portfolio_selector) 

-

142 

-

143 run_generate_report = rrr.add_to_queue( 

-

144 cmd="sparkle/CLI/generate_report.py", 

-

145 name=CommandName.GENERATE_REPORT, 

-

146 dependencies=dependency_run_list, 

-

147 base_dir=sl.caller_log_dir, 

-

148 sbatch_options=sbatch_options, 

-

149 srun_options=srun_options) 

-

150 

-

151 # Write used settings to file 

-

152 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_cleanup_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_cleanup_py.html deleted file mode 100644 index 169c57ba3..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_cleanup_py.html +++ /dev/null @@ -1,144 +0,0 @@ - - - - - Coverage for sparkle/CLI/cleanup.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/cleanup.py: - 0% -

- -

- 29 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Command to remove temporary files not affecting the platform state.""" 

-

3import sys 

-

4import argparse 

-

5import shutil 

-

6 

-

7from sparkle.CLI.help import logging as sl 

-

8from sparkle.CLI.help import global_variables as gv 

-

9from sparkle.CLI.help import argparse_custom as ac 

-

10from sparkle.CLI.help import snapshot_help as snh 

-

11 

-

12 

-

13def parser_function() -> argparse.ArgumentParser: 

-

14 """Define the command line arguments.""" 

-

15 parser = argparse.ArgumentParser() 

-

16 parser.add_argument(*ac.CleanupArgumentAll.names, **ac.CleanupArgumentAll.kwargs) 

-

17 parser.add_argument(*ac.CleanupArgumentRemove.names, 

-

18 **ac.CleanupArgumentRemove.kwargs) 

-

19 return parser 

-

20 

-

21 

-

22def remove_temporary_files() -> None: 

-

23 """Remove temporary files. Only removes files not affecting the sparkle state.""" 

-

24 shutil.rmtree(gv.settings().DEFAULT_log_output, ignore_errors=True) 

-

25 gv.settings().DEFAULT_log_output.mkdir() 

-

26 

-

27 

-

28if __name__ == "__main__": 

-

29 # Log command call 

-

30 sl.log_command(sys.argv) 

-

31 

-

32 # Define command line arguments 

-

33 parser = parser_function() 

-

34 

-

35 # Process command line arguments 

-

36 args = parser.parse_args() 

-

37 if args.all: 

-

38 shutil.rmtree(gv.settings().DEFAULT_output, ignore_errors=True) 

-

39 snh.create_working_dirs() 

-

40 print("Removed all output files from the platform!") 

-

41 elif args.remove: 

-

42 snh.remove_current_platform() 

-

43 snh.create_working_dirs() 

-

44 print("Cleaned platform of all files!") 

-

45 else: 

-

46 remove_temporary_files() 

-

47 print("Cleaned platform of temporary files!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_cli_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_cli_py.html deleted file mode 100644 index cecadf127..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_cli_py.html +++ /dev/null @@ -1,138 +0,0 @@ - - - - - Coverage for sparkle/CLI/cli.py: 81% - - - - - -
-
-

- Coverage for sparkle/CLI/cli.py: - 81% -

- -

- 27 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle CLI entry point.""" 

-

3import sys 

-

4import os 

-

5from pathlib import Path 

-

6 

-

7 

-

8def commands() -> list[str]: 

-

9 """Get list of available commands.""" 

-

10 module_path = Path(__file__).parent.resolve() 

-

11 self_name = Path(__file__).name 

-

12 return [path.stem for path in module_path.iterdir() 

-

13 if path.is_file() and path.suffix == ".py" and path.name != self_name] 

-

14 

-

15 

-

16def main() -> None: 

-

17 """Pass through command to launch CLI commands.""" 

-

18 module_path = Path(__file__).parent.resolve() 

-

19 max_space = max([path.name.count("_") for path in module_path.iterdir() 

-

20 if path.is_file()]) 

-

21 if len(sys.argv) < 2: 

-

22 print("Usage: sparkle <command>") 

-

23 sys.exit(1) 

-

24 # Support spaces instead of _ 

-

25 possible_commands = commands() 

-

26 for i in range(1, min(max_space, len(sys.argv))): 

-

27 command = "_".join(sys.argv[1:i + 1]) 

-

28 args = sys.argv[i + 1:] 

-

29 command_file = module_path / f"{command}.py" 

-

30 if command in possible_commands: 

-

31 break 

-

32 if command_file.is_file(): 

-

33 if not os.access(command_file, os.X_OK): # Pip installation changes exec rights 

-

34 command_file.chmod(0o755) 

-

35 os.system(f"{command_file} {' '.join(args)}") 

-

36 else: 

-

37 print(f"Does not understand command {command}") 

-

38 

-

39 

-

40if __name__ == "__main__": 

-

41 main() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_features_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_features_py.html deleted file mode 100644 index 744d6b711..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_features_py.html +++ /dev/null @@ -1,247 +0,0 @@ - - - - - Coverage for sparkle/CLI/compute_features.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/compute_features.py: - 0% -

- -

- 77 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to compute features for instances.""" 

-

3from __future__ import annotations 

-

4import sys 

-

5import argparse 

-

6from pathlib import Path 

-

7 

-

8import runrunner as rrr 

-

9from runrunner.base import Runner, Status, Run 

-

10 

-

11from sparkle.solver import Extractor 

-

12from sparkle.CLI.help import global_variables as gv 

-

13from sparkle.CLI.help import logging as sl 

-

14from sparkle.platform.settings_objects import SettingState 

-

15from sparkle.CLI.help import argparse_custom as ac 

-

16from sparkle.platform import COMMAND_DEPENDENCIES, CommandName 

-

17from sparkle.CLI.initialise import check_for_initialise 

-

18from sparkle.structures import FeatureDataFrame 

-

19 

-

20 

-

21def parser_function() -> argparse.ArgumentParser: 

-

22 """Define the command line arguments.""" 

-

23 parser = argparse.ArgumentParser() 

-

24 parser.add_argument(*ac.RecomputeFeaturesArgument.names, 

-

25 **ac.RecomputeFeaturesArgument.kwargs) 

-

26 parser.add_argument(*ac.SettingsFileArgument.names, 

-

27 **ac.SettingsFileArgument.kwargs) 

-

28 parser.add_argument(*ac.RunOnArgument.names, 

-

29 **ac.RunOnArgument.kwargs) 

-

30 

-

31 return parser 

-

32 

-

33 

-

34def compute_features( 

-

35 feature_data: Path | FeatureDataFrame, 

-

36 recompute: bool, 

-

37 run_on: Runner = Runner.SLURM) -> Run: 

-

38 """Compute features for all instance and feature extractor combinations. 

-

39 

-

40 A RunRunner run is submitted for the computation of the features. 

-

41 The results are then stored in the csv file specified by feature_data_csv_path. 

-

42 

-

43 Args: 

-

44 feature_data: Feature Data Frame to use, or path to read it from. 

-

45 recompute: Specifies if features should be recomputed. 

-

46 run_on: Runner 

-

47 On which computer or cluster environment to run the solvers. 

-

48 Available: Runner.LOCAL, Runner.SLURM. Default: Runner.SLURM 

-

49 

-

50 Returns: 

-

51 The Slurm job or Local job 

-

52 """ 

-

53 if isinstance(feature_data, Path): 

-

54 feature_data = FeatureDataFrame(feature_data) 

-

55 if recompute: 

-

56 feature_data.reset_dataframe() 

-

57 jobs = feature_data.remaining_jobs() 

-

58 

-

59 # If there are no jobs, stop 

-

60 if not jobs: 

-

61 print("No feature computation jobs to run; stopping execution! To recompute " 

-

62 "feature values use the --recompute flag.") 

-

63 return None 

-

64 cutoff = gv.settings().get_general_extractor_cutoff_time() 

-

65 cmd_list = [] 

-

66 extractors = {} 

-

67 features_core = Path(__file__).parent.resolve() / "core" / "compute_features.py" 

-

68 # We create a job for each instance/extractor combination 

-

69 for instance_path, extractor_name, feature_group in jobs: 

-

70 extractor_path = gv.settings().DEFAULT_extractor_dir / extractor_name 

-

71 cmd = (f"{features_core} " 

-

72 f"--instance {instance_path} " 

-

73 f"--extractor {extractor_path} " 

-

74 f"--feature-csv {feature_data.csv_filepath} " 

-

75 f"--cutoff {cutoff}") 

-

76 if extractor_name in extractors: 

-

77 extractor = extractors[extractor_name] 

-

78 else: 

-

79 extractor = Extractor(extractor_path) 

-

80 extractors[extractor_name] = extractor 

-

81 if extractor.groupwise_computation: 

-

82 # Extractor job can be parallelised, thus creating i * e * g jobs 

-

83 cmd_list.append(cmd + f" --feature-group {feature_group}") 

-

84 else: 

-

85 cmd_list.append(cmd) 

-

86 

-

87 print(f"The number of compute jobs: {len(cmd_list)}") 

-

88 

-

89 parallel_jobs = min(len(cmd_list), gv.settings().get_number_of_jobs_in_parallel()) 

-

90 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

91 srun_options = ["-N1", "-n1"] + sbatch_options 

-

92 run = rrr.add_to_queue( 

-

93 runner=run_on, 

-

94 cmd=cmd_list, 

-

95 name=CommandName.COMPUTE_FEATURES, 

-

96 parallel_jobs=parallel_jobs, 

-

97 base_dir=sl.caller_log_dir, 

-

98 sbatch_options=sbatch_options, 

-

99 srun_options=srun_options) 

-

100 

-

101 if run_on == Runner.SLURM: 

-

102 print(f"Running the extractors through Slurm with Job IDs: {run.run_id}") 

-

103 elif run_on == Runner.LOCAL: 

-

104 print("Waiting for the local calculations to finish.") 

-

105 run.wait() 

-

106 for job in run.jobs: 

-

107 jobs_done = sum(j.status == Status.COMPLETED for j in run.jobs) 

-

108 print(f"Executing Progress: {jobs_done} out of {len(run.jobs)}") 

-

109 if jobs_done == len(run.jobs): 

-

110 break 

-

111 job.wait() 

-

112 print("Computing features done!") 

-

113 

-

114 return run 

-

115 

-

116 

-

117if __name__ == "__main__": 

-

118 # Log command call 

-

119 sl.log_command(sys.argv) 

-

120 

-

121 # Define command line arguments 

-

122 parser = parser_function() 

-

123 

-

124 # Process command line arguments 

-

125 args = parser.parse_args() 

-

126 

-

127 if args.run_on is not None: 

-

128 gv.settings().set_run_on( 

-

129 args.run_on.value, SettingState.CMD_LINE) 

-

130 run_on = gv.settings().get_run_on() 

-

131 

-

132 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.COMPUTE_FEATURES]) 

-

133 

-

134 if ac.set_by_user(args, "settings_file"): 

-

135 gv.settings().read_settings_ini( 

-

136 args.settings_file, SettingState.CMD_LINE 

-

137 ) # Do first, so other command line options can override settings from the file 

-

138 

-

139 # Check if there are any feature extractors registered 

-

140 if not any([p.is_dir() for p in gv.settings().DEFAULT_extractor_dir.iterdir()]): 

-

141 print("No feature extractors present! Add feature extractors to Sparkle " 

-

142 "by using the add_feature_extractor command.") 

-

143 sys.exit() 

-

144 

-

145 # Start compute features 

-

146 print("Start computing features ...") 

-

147 compute_features(gv.settings().DEFAULT_feature_data_path, args.recompute, run_on) 

-

148 

-

149 # Write used settings to file 

-

150 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_marginal_contribution_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_marginal_contribution_py.html deleted file mode 100644 index f9d04f89a..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_compute_marginal_contribution_py.html +++ /dev/null @@ -1,319 +0,0 @@ - - - - - Coverage for sparkle/CLI/compute_marginal_contribution.py: 34% - - - - - -
-
-

- Coverage for sparkle/CLI/compute_marginal_contribution.py: - 34% -

- -

- 94 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command for the computation of the marginal contributions.""" 

-

3import sys 

-

4import argparse 

-

5from pathlib import Path 

-

6import operator 

-

7 

-

8import tabulate 

-

9 

-

10from sparkle.CLI.help import global_variables as gv 

-

11from sparkle.CLI.help import logging as sl 

-

12from sparkle.platform.settings_objects import SettingState 

-

13from sparkle.CLI.help import argparse_custom as ac 

-

14from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

15from sparkle.CLI.initialise import check_for_initialise 

-

16from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

17from sparkle.types import SparkleObjective 

-

18 

-

19 

-

20def parser_function() -> argparse.ArgumentParser: 

-

21 """Define the command line arguments.""" 

-

22 parser = argparse.ArgumentParser() 

-

23 parser.add_argument(*ac.PerfectSelectorMarginalContributionArgument.names, 

-

24 **ac.PerfectSelectorMarginalContributionArgument.kwargs) 

-

25 parser.add_argument(*ac.ActualMarginalContributionArgument.names, 

-

26 **ac.ActualMarginalContributionArgument.kwargs) 

-

27 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

28 **ac.SparkleObjectiveArgument.kwargs) 

-

29 parser.add_argument(*ac.SettingsFileArgument.names, 

-

30 **ac.SettingsFileArgument.kwargs) 

-

31 

-

32 return parser 

-

33 

-

34 

-

35def compute_selector_performance( 

-

36 actual_portfolio_selector: Path, 

-

37 performance_data: PerformanceDataFrame, 

-

38 feature_data: FeatureDataFrame, 

-

39 objective: SparkleObjective) -> float: 

-

40 """Return the performance of a selector over all instances. 

-

41 

-

42 Args: 

-

43 actual_portfolio_selector: Path to portfolio selector. 

-

44 performance_data: The performance data. 

-

45 feature_data: The feature data. 

-

46 objective: Objective to compute the performance for 

-

47 

-

48 Returns: 

-

49 The selector performance as a single floating point number. 

-

50 """ 

-

51 performance_path = actual_portfolio_selector.parent / "performance.csv" 

-

52 if performance_path.exists(): 

-

53 selector_performance_data = PerformanceDataFrame(performance_path) 

-

54 return objective.instance_aggregator( 

-

55 selector_performance_data.get_values("portfolio_selector")) 

-

56 selector_performance_data = performance_data.copy() 

-

57 

-

58 selector_performance_data.add_solver("portfolio_selector") 

-

59 selector_performance_data.csv_filepath =\ 

-

60 actual_portfolio_selector.parent / "performance.csv" 

-

61 selector = gv.settings().get_general_sparkle_selector() 

-

62 

-

63 schedule = {} 

-

64 for instance in performance_data.instances: 

-

65 # We get the performance for an instance by infering the model predicition 

-

66 # for the instance. 

-

67 feature_vector = feature_data.get_instance(instance) 

-

68 schedule[instance] = selector.run(actual_portfolio_selector, feature_vector) 

-

69 schedule_performance = selector_performance_data.schedule_performance( 

-

70 schedule, target_solver="portfolio_selector", objective=objective) 

-

71 # Remove solvers from the dataframe 

-

72 selector_performance_data.remove_solver(performance_data.solvers) 

-

73 selector_performance_data.save_csv() # Save the results to disk 

-

74 return objective.instance_aggregator(schedule_performance) 

-

75 

-

76 

-

77def compute_selector_marginal_contribution( 

-

78 performance_data: PerformanceDataFrame, 

-

79 feature_data: FeatureDataFrame, 

-

80 selector_scenario: Path, 

-

81 objective: SparkleObjective) -> list[tuple[str, float]]: 

-

82 """Compute the marginal contributions of solvers in the selector. 

-

83 

-

84 Args: 

-

85 performance_data: Performance data object 

-

86 feature_data_csv_path: Path to the CSV file with the feature data. 

-

87 selector_scenario: Path to the selector scenario for which to compute 

-

88 marginal contribution. 

-

89 objective: Objective to compute the marginal contribution for. 

-

90 

-

91 Returns: 

-

92 A list of 2-tuples where every 2-tuple is of the form 

-

93 (solver name, marginal contribution, best_performance). 

-

94 """ 

-

95 portfolio_selector_path = selector_scenario / "portfolio_selector" 

-

96 

-

97 if not portfolio_selector_path.exists(): 

-

98 print(f"ERROR: Selector {portfolio_selector_path} does not exist! " 

-

99 "Cannot compute marginal contribution.") 

-

100 sys.exit(-1) 

-

101 

-

102 selector_performance = compute_selector_performance( 

-

103 portfolio_selector_path, performance_data, 

-

104 feature_data, objective) 

-

105 

-

106 rank_list = [] 

-

107 compare = operator.lt if objective.minimise else operator.gt 

-

108 # Compute contribution per solver 

-

109 # NOTE: This could be parallelised 

-

110 for solver in performance_data.solvers: 

-

111 solver_name = Path(solver).name 

-

112 # 1. Copy the dataframe original df 

-

113 tmp_performance_df = performance_data.copy() 

-

114 # 2. Remove the solver from this copy 

-

115 tmp_performance_df.remove_solver(solver) 

-

116 ablated_actual_portfolio_selector =\ 

-

117 selector_scenario / f"ablate_{solver_name}" / "portfolio_selector" 

-

118 if not ablated_actual_portfolio_selector.exists(): 

-

119 print(f"WARNING: Selector without {solver_name} does not exist! " 

-

120 f"Cannot compute marginal contribution of {solver_name}.") 

-

121 continue 

-

122 

-

123 ablated_selector_performance = compute_selector_performance( 

-

124 ablated_actual_portfolio_selector, tmp_performance_df, 

-

125 feature_data, objective) 

-

126 

-

127 # 1. If the performance remains equal, this solver did not contribute 

-

128 # 2. If there is a performance decay without this solver, it does contribute 

-

129 # 3. If there is a performance improvement, we have a bad portfolio selector 

-

130 if ablated_selector_performance == selector_performance: 

-

131 marginal_contribution = 0.0 

-

132 elif not compare(ablated_selector_performance, selector_performance): 

-

133 # In the case that the performance decreases, we have a contributing solver 

-

134 marginal_contribution = ablated_selector_performance / selector_performance 

-

135 else: 

-

136 print("****** WARNING DUBIOUS SELECTOR/SOLVER: " 

-

137 f"The omission of solver {solver_name} yields an improvement. " 

-

138 "The selector improves better without this solver. It may be usefull " 

-

139 "to construct a portfolio without this solver.") 

-

140 marginal_contribution = 0.0 

-

141 

-

142 rank_list.append((solver, marginal_contribution, ablated_selector_performance)) 

-

143 

-

144 rank_list.sort(key=lambda contribution: contribution[1], reverse=True) 

-

145 return rank_list 

-

146 

-

147 

-

148def compute_marginal_contribution( 

-

149 scenario: Path, 

-

150 compute_perfect: bool, compute_actual: bool) -> None: 

-

151 """Compute the marginal contribution. 

-

152 

-

153 Args: 

-

154 scenario: Path to the selector scenario for which to compute 

-

155 compute_perfect: Bool indicating if the contribution for the perfect 

-

156 portfolio selector should be computed. 

-

157 compute_actual: Bool indicating if the contribution for the actual portfolio 

-

158 selector should be computed. 

-

159 """ 

-

160 performance_data = PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path) 

-

161 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

162 objective = gv.settings().get_general_sparkle_objectives()[0] 

-

163 

-

164 if compute_perfect: 

-

165 # Perfect selector is the computation of the best performance per instance 

-

166 print("Computing each solver's marginal contribution to perfect selector ...") 

-

167 contribution_data = performance_data.marginal_contribution( 

-

168 objective=objective.name, sort=True) 

-

169 table = tabulate.tabulate( 

-

170 contribution_data, 

-

171 headers=["Solver", "Marginal Contribution", "Best Performance"],) 

-

172 print(table, "\n") 

-

173 print("Marginal contribution (perfect selector) computing done!") 

-

174 

-

175 if compute_actual: 

-

176 print("Start computing marginal contribution per Solver to actual selector...") 

-

177 contribution_data = compute_selector_marginal_contribution( 

-

178 performance_data, 

-

179 feature_data, 

-

180 scenario, 

-

181 objective 

-

182 ) 

-

183 table = tabulate.tabulate( 

-

184 contribution_data, 

-

185 headers=["Solver", "Marginal Contribution", "Best Performance"],) 

-

186 print(table, "\n") 

-

187 print("Marginal contribution (actual selector) computing done!") 

-

188 

-

189 

-

190if __name__ == "__main__": 

-

191 # Log command call 

-

192 sl.log_command(sys.argv) 

-

193 

-

194 # Define command line arguments 

-

195 parser = parser_function() 

-

196 

-

197 # Process command line arguments 

-

198 args = parser.parse_args() 

-

199 

-

200 check_for_initialise( 

-

201 COMMAND_DEPENDENCIES[CommandName.COMPUTE_MARGINAL_CONTRIBUTION] 

-

202 ) 

-

203 

-

204 if ac.set_by_user(args, "settings_file"): 

-

205 gv.settings().read_settings_ini( 

-

206 args.settings_file, SettingState.CMD_LINE 

-

207 ) # Do first, so other command line options can override settings from the file 

-

208 if ac.set_by_user(args, "objectives"): 

-

209 gv.settings().set_general_sparkle_objectives( 

-

210 args.objectives, SettingState.CMD_LINE 

-

211 ) 

-

212 selection_scenario = gv.latest_scenario().get_selection_scenario_path() 

-

213 

-

214 if not (args.perfect | args.actual): 

-

215 print("ERROR: compute_marginal_contribution called without a flag set to" 

-

216 " True, stopping execution") 

-

217 sys.exit(-1) 

-

218 

-

219 compute_marginal_contribution(selection_scenario, args.perfect, args.actual) 

-

220 

-

221 # Write used settings to file 

-

222 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_configure_solver_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_configure_solver_py.html deleted file mode 100644 index 9eab5d806..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_configure_solver_py.html +++ /dev/null @@ -1,364 +0,0 @@ - - - - - Coverage for sparkle/CLI/configure_solver.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/configure_solver.py: - 0% -

- -

- 131 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to configure a solver.""" 

-

3from __future__ import annotations 

-

4 

-

5import argparse 

-

6import sys 

-

7import os 

-

8from pathlib import Path 

-

9from pandas import DataFrame 

-

10 

-

11from runrunner.base import Runner, Run 

-

12import runrunner as rrr 

-

13 

-

14from sparkle.CLI.help import global_variables as gv 

-

15from sparkle.CLI.help import logging as sl 

-

16from sparkle.platform.settings_objects import SettingState 

-

17from sparkle.CLI.help.reporting_scenario import Scenario 

-

18from sparkle.structures import FeatureDataFrame 

-

19from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

20from sparkle.configurator.configurator import Configurator 

-

21from sparkle.CLI.help.nicknames import resolve_object_name 

-

22from sparkle.solver import Solver 

-

23from sparkle.CLI.initialise import check_for_initialise 

-

24from sparkle.CLI.help import argparse_custom as ac 

-

25from sparkle.instance import instance_set, InstanceSet 

-

26 

-

27 

-

28def parser_function() -> argparse.ArgumentParser: 

-

29 """Define the command line arguments.""" 

-

30 parser = argparse.ArgumentParser( 

-

31 description="Configure a solver in the Sparkle platform.", 

-

32 epilog=("Note that the test instance set is only used if the ``--ablation``" 

-

33 " or ``--validation`` flags are given")) 

-

34 parser.add_argument(*ac.ConfiguratorArgument.names, 

-

35 **ac.ConfiguratorArgument.kwargs) 

-

36 parser.add_argument(*ac.SolverArgument.names, 

-

37 **ac.SolverArgument.kwargs) 

-

38 parser.add_argument(*ac.InstanceSetTrainArgument.names, 

-

39 **ac.InstanceSetTrainArgument.kwargs) 

-

40 parser.add_argument(*ac.InstanceSetTestArgument.names, 

-

41 **ac.InstanceSetTestArgument.kwargs) 

-

42 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

43 **ac.SparkleObjectiveArgument.kwargs) 

-

44 parser.add_argument(*ac.TargetCutOffTimeConfigurationArgument.names, 

-

45 **ac.TargetCutOffTimeConfigurationArgument.kwargs) 

-

46 parser.add_argument(*ac.WallClockTimeArgument.names, 

-

47 **ac.WallClockTimeArgument.kwargs) 

-

48 parser.add_argument(*ac.CPUTimeArgument.names, 

-

49 **ac.CPUTimeArgument.kwargs) 

-

50 parser.add_argument(*ac.SolverCallsArgument.names, 

-

51 **ac.SolverCallsArgument.kwargs) 

-

52 parser.add_argument(*ac.NumberOfRunsConfigurationArgument.names, 

-

53 **ac.NumberOfRunsConfigurationArgument.kwargs) 

-

54 parser.add_argument(*ac.SettingsFileArgument.names, 

-

55 **ac.SettingsFileArgument.kwargs) 

-

56 parser.add_argument(*ac.UseFeaturesArgument.names, 

-

57 **ac.UseFeaturesArgument.kwargs) 

-

58 parser.add_argument(*ac.ValidateArgument.names, 

-

59 **ac.ValidateArgument.kwargs) 

-

60 parser.add_argument(*ac.AblationArgument.names, 

-

61 **ac.AblationArgument.kwargs) 

-

62 parser.add_argument(*ac.RunOnArgument.names, 

-

63 **ac.RunOnArgument.kwargs) 

-

64 return parser 

-

65 

-

66 

-

67def apply_settings_from_args(args: argparse.Namespace) -> None: 

-

68 """Apply command line arguments to settings. 

-

69 

-

70 Args: 

-

71 args: Arguments object created by ArgumentParser. 

-

72 """ 

-

73 if args.settings_file is not None: 

-

74 gv.settings().read_settings_ini(args.settings_file, SettingState.CMD_LINE) 

-

75 if args.objectives is not None: 

-

76 gv.settings().set_general_sparkle_objectives( 

-

77 args.objectives, SettingState.CMD_LINE) 

-

78 if args.target_cutoff_time is not None: 

-

79 gv.settings().set_general_target_cutoff_time( 

-

80 args.target_cutoff_time, SettingState.CMD_LINE) 

-

81 if args.wallclock_time is not None: 

-

82 gv.settings().set_config_wallclock_time( 

-

83 args.wallclock_time, SettingState.CMD_LINE) 

-

84 if args.cpu_time is not None: 

-

85 gv.settings().set_config_cpu_time( 

-

86 args.cpu_time, SettingState.CMD_LINE) 

-

87 if args.solver_calls is not None: 

-

88 gv.settings().set_config_solver_calls( 

-

89 args.solver_calls, SettingState.CMD_LINE) 

-

90 if args.number_of_runs is not None: 

-

91 gv.settings().set_config_number_of_runs( 

-

92 args.number_of_runs, SettingState.CMD_LINE) 

-

93 if args.run_on is not None: 

-

94 gv.settings().set_run_on( 

-

95 args.run_on.value, SettingState.CMD_LINE) 

-

96 

-

97 

-

98def run_after(solver: Path, 

-

99 train_set: InstanceSet, 

-

100 test_set: InstanceSet, 

-

101 dependency: list[Run], 

-

102 command: CommandName, 

-

103 run_on: Runner = Runner.SLURM) -> Run: 

-

104 """Add a command to run after configuration to RunRunner queue. 

-

105 

-

106 Args: 

-

107 solver: Path (object) to solver. 

-

108 train_set: Instances used for training. 

-

109 test_set: Instances used for testing. 

-

110 dependency: List of job dependencies. 

-

111 command: The command to run. Currently supported: Validation and Ablation. 

-

112 run_on: Whether the job is executed on Slurm or locally. 

-

113 

-

114 Returns: 

-

115 RunRunner Run object regarding the callback 

-

116 """ 

-

117 cmd_file = "validate_configured_vs_default.py" 

-

118 if command == CommandName.RUN_ABLATION: 

-

119 cmd_file = "run_ablation.py" 

-

120 

-

121 command_line = f"./sparkle/CLI/{cmd_file} --settings-file Settings/latest.ini "\ 

-

122 f"--solver {solver.name} --instance-set-train {train_set.directory}"\ 

-

123 f" --run-on {run_on}" 

-

124 if test_set is not None: 

-

125 command_line += f" --instance-set-test {test_set.directory}" 

-

126 

-

127 run = rrr.add_to_queue( 

-

128 runner=run_on, 

-

129 cmd=command_line, 

-

130 name=command, 

-

131 dependencies=dependency, 

-

132 base_dir=sl.caller_log_dir, 

-

133 srun_options=["-N1", "-n1"], 

-

134 sbatch_options=gv.settings().get_slurm_extra_options(as_args=True)) 

-

135 

-

136 if run_on == Runner.LOCAL: 

-

137 print("Waiting for the local calculations to finish.") 

-

138 run.wait() 

-

139 return run 

-

140 

-

141 

-

142if __name__ == "__main__": 

-

143 # Log command call 

-

144 sl.log_command(sys.argv) 

-

145 

-

146 parser = parser_function() 

-

147 

-

148 # Process command line arguments 

-

149 args = parser.parse_args() 

-

150 

-

151 apply_settings_from_args(args) 

-

152 

-

153 validate = args.validate 

-

154 ablation = args.ablation 

-

155 solver = resolve_object_name( 

-

156 args.solver, 

-

157 gv.file_storage_data_mapping[gv.solver_nickname_list_path], 

-

158 gv.settings().DEFAULT_solver_dir, class_name=Solver) 

-

159 instance_set_train = resolve_object_name( 

-

160 args.instance_set_train, 

-

161 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

162 gv.settings().DEFAULT_instance_dir, instance_set) 

-

163 instance_set_test = args.instance_set_test 

-

164 if instance_set_test is not None: 

-

165 instance_set_test = resolve_object_name( 

-

166 args.instance_set_test, 

-

167 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

168 gv.settings().DEFAULT_instance_dir, instance_set) 

-

169 use_features = args.use_features 

-

170 run_on = gv.settings().get_run_on() 

-

171 if args.configurator is not None: 

-

172 gv.settings().set_general_sparkle_configurator( 

-

173 value=getattr(Configurator, args.configurator), 

-

174 origin=SettingState.CMD_LINE) 

-

175 

-

176 # Check if Solver and instance sets were resolved 

-

177 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.CONFIGURE_SOLVER]) 

-

178 

-

179 feature_data_df = None 

-

180 if use_features: 

-

181 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

182 

-

183 data_dict = {} 

-

184 feature_data_df = feature_data.dataframe 

-

185 

-

186 for label, row in feature_data_df.iterrows(): 

-

187 # os.path.split(os.path.split(label)[0])[1] gives the dir/instance set name 

-

188 if os.path.split(os.path.split(label)[0])[1] == instance_set_train.name: 

-

189 if row.empty: 

-

190 print("No feature data exists for the given training set, please " 

-

191 "run add_feature_extractor.py, then compute_features.py") 

-

192 sys.exit(-1) 

-

193 

-

194 new_label = (f"../../../instances/{instance_set_train.name}/" 

-

195 + os.path.split(label)[1]) 

-

196 data_dict[new_label] = row 

-

197 

-

198 feature_data_df = DataFrame.from_dict(data_dict, orient="index", 

-

199 columns=feature_data_df.columns) 

-

200 

-

201 if feature_data.has_missing_value(): 

-

202 print("You have unfinished feature computation jobs, please run " 

-

203 "`sparkle compute features`") 

-

204 sys.exit(-1) 

-

205 

-

206 for index, column in enumerate(feature_data_df): 

-

207 feature_data_df.rename(columns={column: f"Feature{index+1}"}, inplace=True) 

-

208 

-

209 number_of_runs = gv.settings().get_config_number_of_runs() 

-

210 solver_calls = gv.settings().get_config_solver_calls() 

-

211 cpu_time = gv.settings().get_config_cpu_time() 

-

212 wallclock_time = gv.settings().get_config_wallclock_time() 

-

213 cutoff_time = gv.settings().get_general_target_cutoff_time() 

-

214 cutoff_length = gv.settings().get_configurator_target_cutoff_length() 

-

215 sparkle_objectives =\ 

-

216 gv.settings().get_general_sparkle_objectives() 

-

217 configurator = gv.settings().get_general_sparkle_configurator() 

-

218 config_scenario = configurator.scenario_class( 

-

219 solver, instance_set_train, number_of_runs, solver_calls, cpu_time, 

-

220 wallclock_time, cutoff_time, cutoff_length, sparkle_objectives, use_features, 

-

221 configurator.configurator_target, feature_data_df) 

-

222 

-

223 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

224 dependency_job_list = configurator.configure( 

-

225 scenario=config_scenario, 

-

226 sbatch_options=sbatch_options, 

-

227 num_parallel_jobs=gv.settings().get_number_of_jobs_in_parallel(), 

-

228 base_dir=sl.caller_log_dir, 

-

229 run_on=run_on) 

-

230 

-

231 # Update latest scenario 

-

232 gv.latest_scenario().set_config_solver(solver) 

-

233 gv.latest_scenario().set_config_instance_set_train(instance_set_train.directory) 

-

234 gv.latest_scenario().set_latest_scenario(Scenario.CONFIGURATION) 

-

235 

-

236 if instance_set_test is not None: 

-

237 gv.latest_scenario().set_config_instance_set_test(instance_set_test.directory) 

-

238 else: 

-

239 # Set to default to overwrite possible old path 

-

240 gv.latest_scenario().set_config_instance_set_test() 

-

241 

-

242 # Set validation to wait until configuration is done 

-

243 if validate: 

-

244 validate_jobid = run_after( 

-

245 solver, instance_set_train, instance_set_test, dependency_job_list, 

-

246 command=CommandName.VALIDATE_CONFIGURED_VS_DEFAULT, run_on=run_on 

-

247 ) 

-

248 dependency_job_list.append(validate_jobid) 

-

249 

-

250 if ablation: 

-

251 ablation_jobid = run_after( 

-

252 solver, instance_set_train, instance_set_test, dependency_job_list, 

-

253 command=CommandName.RUN_ABLATION, run_on=run_on 

-

254 ) 

-

255 dependency_job_list.append(ablation_jobid) 

-

256 

-

257 if run_on == Runner.SLURM: 

-

258 job_id_str = ",".join([run.run_id for run in dependency_job_list]) 

-

259 print(f"Running configuration. Waiting for Slurm job(s) with id(s): " 

-

260 f"{job_id_str}") 

-

261 else: 

-

262 print("Running configuration finished!") 

-

263 

-

264 # Write used settings to file 

-

265 gv.settings().write_used_settings() 

-

266 # Write used scenario to file 

-

267 gv.latest_scenario().write_scenario_ini() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_construct_portfolio_selector_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_construct_portfolio_selector_py.html deleted file mode 100644 index 9374cd9ae..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_construct_portfolio_selector_py.html +++ /dev/null @@ -1,287 +0,0 @@ - - - - - Coverage for sparkle/CLI/construct_portfolio_selector.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/construct_portfolio_selector.py: - 0% -

- -

- 99 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to construct a portfolio selector.""" 

-

3import sys 

-

4import argparse 

-

5from pathlib import Path 

-

6 

-

7import runrunner as rrr 

-

8from runrunner.base import Runner 

-

9 

-

10from sparkle.CLI.help import global_variables as gv 

-

11from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

12from sparkle.CLI.help import logging as sl 

-

13from sparkle.platform.settings_objects import SettingState 

-

14from sparkle.CLI.help import argparse_custom as ac 

-

15from sparkle.CLI.help.reporting_scenario import Scenario 

-

16from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

17from sparkle.CLI.initialise import check_for_initialise 

-

18 

-

19 

-

20def parser_function() -> argparse.ArgumentParser: 

-

21 """Define the command line arguments.""" 

-

22 parser = argparse.ArgumentParser() 

-

23 parser.add_argument(*ac.RecomputePortfolioSelectorArgument.names, 

-

24 **ac.RecomputePortfolioSelectorArgument.kwargs) 

-

25 parser.add_argument(*ac.RecomputeMarginalContributionForSelectorArgument.names, 

-

26 **ac.RecomputeMarginalContributionForSelectorArgument.kwargs) 

-

27 parser.add_argument(*ac.SelectorTimeoutArgument.names, 

-

28 **ac.SelectorTimeoutArgument.kwargs) 

-

29 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

30 **ac.SparkleObjectiveArgument.kwargs) 

-

31 parser.add_argument(*ac.SelectorAblationArgument.names, 

-

32 **ac.SelectorAblationArgument.kwargs) 

-

33 parser.add_argument(*ac.RunOnArgument.names, 

-

34 **ac.RunOnArgument.kwargs) 

-

35 return parser 

-

36 

-

37 

-

38def judge_exist_remaining_jobs(feature_data_csv: Path, 

-

39 performance_data_csv: Path) -> bool: 

-

40 """Return whether there are remaining feature or performance computation jobs.""" 

-

41 feature_data = FeatureDataFrame(feature_data_csv) 

-

42 if feature_data.has_missing_vectors(): 

-

43 return True 

-

44 performance_data = PerformanceDataFrame(performance_data_csv) 

-

45 return performance_data.has_missing_values 

-

46 

-

47 

-

48if __name__ == "__main__": 

-

49 # Log command call 

-

50 sl.log_command(sys.argv) 

-

51 

-

52 # Define command line arguments 

-

53 parser = parser_function() 

-

54 

-

55 # Process command line arguments 

-

56 args = parser.parse_args() 

-

57 selector_timeout = args.selector_timeout 

-

58 flag_recompute_portfolio = args.recompute_portfolio_selector 

-

59 flag_recompute_marg_cont = args.recompute_marginal_contribution 

-

60 solver_ablation = args.solver_ablation 

-

61 

-

62 check_for_initialise( 

-

63 COMMAND_DEPENDENCIES[CommandName.CONSTRUCT_PORTFOLIO_SELECTOR] 

-

64 ) 

-

65 

-

66 if ac.set_by_user(args, "objectives"): 

-

67 gv.settings().set_general_sparkle_objectives( 

-

68 args.objectives, SettingState.CMD_LINE 

-

69 ) 

-

70 if args.run_on is not None: 

-

71 gv.settings().set_run_on( 

-

72 args.run_on.value, SettingState.CMD_LINE) 

-

73 run_on = gv.settings().get_run_on() 

-

74 

-

75 print("Start constructing Sparkle portfolio selector ...") 

-

76 selector = gv.settings().get_general_sparkle_selector() 

-

77 

-

78 flag_judge_exist_remaining_jobs = judge_exist_remaining_jobs( 

-

79 gv.settings().DEFAULT_feature_data_path, 

-

80 gv.settings().DEFAULT_performance_data_path) 

-

81 

-

82 if flag_judge_exist_remaining_jobs: 

-

83 print("There remain unperformed feature computation jobs or performance " 

-

84 "computation jobs!") 

-

85 print("Please first execute all unperformed jobs before constructing Sparkle " 

-

86 "portfolio selector") 

-

87 print("Sparkle portfolio selector is not successfully constructed!") 

-

88 sys.exit(-1) 

-

89 

-

90 # Selector (AutoFolio) cannot handle cutoff time less than 2, adjust if needed 

-

91 cutoff_time = max(gv.settings().get_general_target_cutoff_time(), 2) 

-

92 

-

93 # Determine the objective function 

-

94 objective = gv.settings().get_general_sparkle_objectives()[0] 

-

95 

-

96 performance_data = PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path) 

-

97 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

98 

-

99 if feature_data.has_missing_value(): 

-

100 print("WARNING: Missing values in the feature data, will be imputed as the mean " 

-

101 "value of all other non-missing values! Imputing all missing values...") 

-

102 feature_data.impute_missing_values() 

-

103 

-

104 # TODO: Allow user to specify subsets of data to be used 

-

105 

-

106 # Selector is named after the solvers it can predict, sort for permutation invariance 

-

107 solvers = sorted([s.name for s in gv.settings().DEFAULT_solver_dir.iterdir()]) 

-

108 selection_scenario_path = ( 

-

109 gv.settings().DEFAULT_selection_output 

-

110 / gv.settings().DEFAULT_general_sparkle_selector.name 

-

111 / "_".join(solvers)) 

-

112 

-

113 # Update latest scenario 

-

114 gv.latest_scenario().set_selection_scenario_path(selection_scenario_path) 

-

115 gv.latest_scenario().set_latest_scenario(Scenario.SELECTION) 

-

116 # Set to default to overwrite possible old path 

-

117 gv.latest_scenario().set_selection_test_case_directory() 

-

118 

-

119 selector_path = selection_scenario_path / "portfolio_selector" 

-

120 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

121 if selector_path.exists() and not flag_recompute_portfolio: 

-

122 print("Portfolio selector already exists. Set the recompute flag to re-create.") 

-

123 sys.exit() 

-

124 

-

125 selector_path.parent.mkdir(exist_ok=True, parents=True) 

-

126 

-

127 selector_run = selector.construct(selector_path, 

-

128 performance_data, 

-

129 feature_data, 

-

130 objective, 

-

131 cutoff_time, 

-

132 selector_timeout, 

-

133 run_on=run_on, 

-

134 sbatch_options=sbatch_options, 

-

135 base_dir=sl.caller_log_dir) 

-

136 if run_on == Runner.LOCAL: 

-

137 print("Sparkle portfolio selector constructed!") 

-

138 else: 

-

139 print("Sparkle portfolio selector constructor running...") 

-

140 

-

141 dependencies = [selector_run] 

-

142 if solver_ablation: 

-

143 for solver in performance_data.solvers: 

-

144 solver_name = Path(solver).name 

-

145 ablate_solver_dir = selection_scenario_path / f"ablate_{solver_name}" 

-

146 ablate_solver_selector = ablate_solver_dir / "portfolio_selector" 

-

147 if (ablate_solver_selector.exists() and not flag_recompute_portfolio): 

-

148 print(f"Portfolio selector without {solver_name} already exists. " 

-

149 "Set the recompute flag to re-create.") 

-

150 continue 

-

151 ablate_solver_dir.mkdir(exist_ok=True, parents=True) 

-

152 ablated_performance_data = performance_data.copy() 

-

153 ablated_performance_data.remove_solver(solver) 

-

154 ablated_run = selector.construct(ablate_solver_selector, 

-

155 ablated_performance_data, 

-

156 feature_data, 

-

157 objective, 

-

158 cutoff_time, 

-

159 selector_timeout, 

-

160 run_on=run_on, 

-

161 sbatch_options=sbatch_options, 

-

162 base_dir=sl.caller_log_dir) 

-

163 dependencies.append(ablated_run) 

-

164 if run_on == Runner.LOCAL: 

-

165 print(f"Portfolio selector without {solver_name} constructed!") 

-

166 else: 

-

167 print(f"Portfolio selector without {solver_name} constructor running...") 

-

168 with_actual = "--actual" if solver_ablation else "" 

-

169 cmd = (f"sparkle/CLI/compute_marginal_contribution.py --perfect {with_actual} " 

-

170 f"{ac.SparkleObjectiveArgument.names[0]} {objective}") 

-

171 

-

172 marginal_contribution = rrr.add_to_queue( 

-

173 runner=run_on, 

-

174 cmd=cmd, 

-

175 name=CommandName.COMPUTE_MARGINAL_CONTRIBUTION, 

-

176 base_dir=sl.caller_log_dir, 

-

177 dependencies=dependencies, 

-

178 sbatch_options=sbatch_options) 

-

179 dependencies.append(marginal_contribution) 

-

180 if run_on == Runner.LOCAL: 

-

181 marginal_contribution.wait() 

-

182 print("Selector marginal contribution computing done!") 

-

183 else: 

-

184 print(f"Running selector construction. Waiting for Slurm job(s) with id(s): " 

-

185 f"{', '.join([d.run_id for d in dependencies])}") 

-

186 

-

187 # Write used settings to file 

-

188 gv.settings().write_used_settings() 

-

189 # Write used scenario to file 

-

190 gv.latest_scenario().write_scenario_ini() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_generate_report_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_generate_report_py.html deleted file mode 100644 index 4059a7e2b..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_generate_report_py.html +++ /dev/null @@ -1,385 +0,0 @@ - - - - - Coverage for sparkle/CLI/generate_report.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/generate_report.py: - 0% -

- -

- 149 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to generate a report for an executed experiment.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6from pathlib import Path, PurePath 

-

7 

-

8from sparkle.CLI.help import global_variables as gv 

-

9from sparkle.solver.ablation import AblationScenario 

-

10from sparkle.platform import generate_report_for_selection as sgfs 

-

11from sparkle.platform import \ 

-

12 generate_report_for_configuration as sgrfch 

-

13from sparkle.CLI.help import logging as sl 

-

14from sparkle.platform.settings_objects import Settings, SettingState 

-

15from sparkle.CLI.help import argparse_custom as ac 

-

16from sparkle.CLI.help.reporting_scenario import Scenario 

-

17from sparkle.platform import \ 

-

18 generate_report_for_parallel_portfolio as sgrfpph 

-

19from sparkle.solver import Solver 

-

20from sparkle.solver.validator import Validator 

-

21from sparkle.instance import instance_set 

-

22from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

23from sparkle.platform.output.configuration_output import ConfigurationOutput 

-

24from sparkle.platform.output.selection_output import SelectionOutput 

-

25from sparkle.platform.output.parallel_portfolio_output import ParallelPortfolioOutput 

-

26 

-

27from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

28from sparkle.CLI.initialise import check_for_initialise 

-

29from sparkle.CLI.help.nicknames import resolve_object_name 

-

30 

-

31 

-

32def parser_function() -> argparse.ArgumentParser: 

-

33 """Define the command line arguments.""" 

-

34 parser = argparse.ArgumentParser( 

-

35 description=("Without any arguments a report for the most recent algorithm " 

-

36 "selection or algorithm configuration procedure is generated."), 

-

37 epilog=("Note that if a test instance set is given, the training instance set " 

-

38 "must also be given.")) 

-

39 # Configuration arguments 

-

40 parser.add_argument(*ac.SolverReportArgument.names, 

-

41 **ac.SolverReportArgument.kwargs) 

-

42 parser.add_argument(*ac.InstanceSetTrainReportArgument.names, 

-

43 **ac.InstanceSetTrainReportArgument.kwargs) 

-

44 parser.add_argument(*ac.InstanceSetTestReportArgument.names, 

-

45 **ac.InstanceSetTestReportArgument.kwargs) 

-

46 parser.add_argument(*ac.NoAblationReportArgument.names, 

-

47 **ac.NoAblationReportArgument.kwargs) 

-

48 # Selection arguments 

-

49 parser.add_argument(*ac.SelectionReportArgument.names, 

-

50 **ac.SelectionReportArgument.kwargs) 

-

51 parser.add_argument(*ac.TestCaseDirectoryArgument.names, 

-

52 **ac.TestCaseDirectoryArgument.kwargs) 

-

53 # Common arguments 

-

54 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

55 **ac.SparkleObjectiveArgument.kwargs) 

-

56 parser.add_argument(*ac.SettingsFileArgument.names, 

-

57 **ac.SettingsFileArgument.kwargs) 

-

58 parser.add_argument(*ac.GenerateJSONArgument.names, 

-

59 **ac.GenerateJSONArgument.kwargs) 

-

60 return parser 

-

61 

-

62 

-

63if __name__ == "__main__": 

-

64 # Compare current settings to latest.ini 

-

65 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

66 

-

67 # Log command call 

-

68 sl.log_command(sys.argv) 

-

69 

-

70 # Define command line arguments 

-

71 parser = parser_function() 

-

72 

-

73 # Process command line arguments 

-

74 args = parser.parse_args() 

-

75 selection = args.selection 

-

76 test_case_dir = args.test_case_directory 

-

77 only_json = args.only_json 

-

78 

-

79 solver = resolve_object_name(args.solver, 

-

80 gv.solver_nickname_mapping, 

-

81 gv.settings().DEFAULT_solver_dir, Solver) 

-

82 instance_set_train = resolve_object_name( 

-

83 args.instance_set_train, 

-

84 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

85 gv.settings().DEFAULT_instance_dir, instance_set) 

-

86 instance_set_test = resolve_object_name( 

-

87 args.instance_set_train, 

-

88 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

89 gv.settings().DEFAULT_instance_dir, instance_set) 

-

90 

-

91 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.GENERATE_REPORT]) 

-

92 

-

93 # Do first, so other command line options can override settings from the file 

-

94 if ac.set_by_user(args, "settings_file"): 

-

95 gv.settings().read_settings_ini( 

-

96 args.settings_file, SettingState.CMD_LINE 

-

97 ) 

-

98 if args.objectives is not None: 

-

99 gv.settings().set_general_sparkle_objectives( 

-

100 args.objectives, SettingState.CMD_LINE) 

-

101 

-

102 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

103 # If no arguments are set get the latest scenario 

-

104 if not selection and test_case_dir is None and solver is None: 

-

105 scenario = gv.latest_scenario().get_latest_scenario() 

-

106 if scenario == Scenario.SELECTION: 

-

107 selection = True 

-

108 test_case_dir = gv.latest_scenario().get_selection_test_case_directory() 

-

109 elif scenario == Scenario.CONFIGURATION: 

-

110 solver = gv.latest_scenario().get_config_solver() 

-

111 instance_set_train = gv.latest_scenario().get_config_instance_set_train() 

-

112 instance_set_test = gv.latest_scenario().get_config_instance_set_test() 

-

113 elif scenario == Scenario.PARALLEL_PORTFOLIO: 

-

114 parallel_portfolio_path = gv.latest_scenario().get_parallel_portfolio_path() 

-

115 pap_instance_set =\ 

-

116 gv.latest_scenario().get_parallel_portfolio_instance_set() 

-

117 

-

118 flag_instance_set_train = instance_set_train is not None 

-

119 flag_instance_set_test = instance_set_test is not None 

-

120 

-

121 # Reporting for algorithm selection 

-

122 if selection or test_case_dir is not None: 

-

123 objective = gv.settings().get_general_sparkle_objectives()[0] 

-

124 if not objective.time: 

-

125 print("ERROR: The selection report is not implemented for " 

-

126 " non-runtime objectives!") 

-

127 sys.exit(-1) 

-

128 selection_scenario = gv.latest_scenario().get_selection_scenario_path() 

-

129 actual_portfolio_selector_path = selection_scenario / "portfolio_selector" 

-

130 if not actual_portfolio_selector_path.is_file(): 

-

131 print("Before generating a Sparkle report, please first construct the " 

-

132 "Sparkle portfolio selector. Not generating a Sparkle report, stopping" 

-

133 " execution!") 

-

134 sys.exit(-1) 

-

135 

-

136 print("Generating report for selection...") 

-

137 train_data = PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path) 

-

138 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

139 test_data = None 

-

140 test_case_path = Path(test_case_dir) if test_case_dir is not None else None 

-

141 if test_case_dir is not None and (test_case_path 

-

142 / "performance_data.csv").exists(): 

-

143 test_data = PerformanceDataFrame(test_case_path / "performance_data.csv") 

-

144 # Create machine readable selection output 

-

145 instance_folders = set(Path(instance).parent 

-

146 for instance in train_data.instances) 

-

147 instance_sets = [] 

-

148 for dir in instance_folders: 

-

149 set = instance_set(dir) 

-

150 instance_sets.append(set) 

-

151 test_set = None if test_case_dir is None else instance_set(Path(test_case_dir)) 

-

152 cutoff_time = gv.settings().get_general_target_cutoff_time() 

-

153 output = gv.settings().DEFAULT_selection_output_analysis 

-

154 selection_output = SelectionOutput( 

-

155 selection_scenario, train_data, feature_data, 

-

156 instance_sets, test_set, objective, cutoff_time, 

-

157 output) 

-

158 selection_output.write_output() 

-

159 print("Machine readable output is placed at: ", selection_output.output) 

-

160 

-

161 if not only_json: 

-

162 sgfs.generate_report_selection( 

-

163 gv.settings().DEFAULT_selection_output_analysis, 

-

164 gv.settings().DEFAULT_latex_source, 

-

165 "template-Sparkle-for-selection.tex", 

-

166 gv.settings().DEFAULT_latex_bib, 

-

167 gv.settings().DEFAULT_extractor_dir, 

-

168 selection_scenario, 

-

169 feature_data, 

-

170 train_data, 

-

171 objective, 

-

172 gv.settings().get_general_extractor_cutoff_time(), 

-

173 gv.settings().get_general_target_cutoff_time(), 

-

174 test_data 

-

175 ) 

-

176 if test_case_dir is None: 

-

177 print("Report generated ...") 

-

178 else: 

-

179 print("Report for test generated ...") 

-

180 

-

181 elif gv.latest_scenario().get_latest_scenario() == Scenario.PARALLEL_PORTFOLIO: 

-

182 # Reporting for parallel portfolio 

-

183 # Machine readable Output 

-

184 cutoff_time = gv.settings().get_general_target_cutoff_time() 

-

185 objective = gv.settings().get_general_sparkle_objectives()[0] 

-

186 output = gv.settings().DEFAULT_parallel_portfolio_output_analysis 

-

187 parallel_portfolio_output = ParallelPortfolioOutput(parallel_portfolio_path, 

-

188 pap_instance_set, 

-

189 objective, 

-

190 output) 

-

191 parallel_portfolio_output.write_output() 

-

192 print("Machine readable output is placed at: ", parallel_portfolio_output.output) 

-

193 

-

194 if not only_json: 

-

195 sgrfpph.generate_report_parallel_portfolio( 

-

196 parallel_portfolio_path, 

-

197 gv.settings().DEFAULT_parallel_portfolio_output_analysis, 

-

198 gv.settings().DEFAULT_latex_source, 

-

199 gv.settings().DEFAULT_latex_bib, 

-

200 gv.settings().get_general_sparkle_objectives()[0], 

-

201 gv.settings().get_general_target_cutoff_time(), 

-

202 pap_instance_set) 

-

203 print("Parallel portfolio report generated ...") 

-

204 else: 

-

205 # Reporting for algorithm configuration 

-

206 if solver is None: 

-

207 print("Error! No Solver found for configuration report generation.") 

-

208 sys.exit(-1) 

-

209 

-

210 # If only the testing set is given return an error 

-

211 if not flag_instance_set_train and flag_instance_set_test: 

-

212 print("Argument Error! Only a testing set was provided, please also " 

-

213 "provide a training set") 

-

214 print(f"Usage: {sys.argv[0]} --solver <solver> [--instance-set-train " 

-

215 "<instance-set-train>] [--instance-set-test <instance-set-test>]") 

-

216 sys.exit(-1) 

-

217 instance_set_train_name = instance_set_train.name 

-

218 gv.settings().get_general_sparkle_configurator()\ 

-

219 .set_scenario_dirs(solver, instance_set_train) 

-

220 # Generate a report depending on which instance sets are provided 

-

221 if flag_instance_set_train or flag_instance_set_test: 

-

222 # Check if there are result to generate a report from 

-

223 validator = Validator(gv.settings().DEFAULT_validation_output) 

-

224 train_res = validator.get_validation_results( 

-

225 solver, instance_set_train) 

-

226 if instance_set_test is not None: 

-

227 test_res = validator.get_validation_results(solver, 

-

228 instance_set_test) 

-

229 if len(train_res) == 0 or (instance_set_test is not None 

-

230 and len(test_res) == 0): 

-

231 print("Error: Results not found for the given solver and instance set(s)" 

-

232 ' combination. Make sure the "configure_solver" and "validate_' 

-

233 'configured_vs_default" commands were correctly executed. ') 

-

234 sys.exit(-1) 

-

235 else: 

-

236 print("Error: No results from validate_configured_vs_default found that " 

-

237 "can be used in the report!") 

-

238 sys.exit(-1) 

-

239 # Extract config scenario data for report, but this should be read from the 

-

240 # scenario file instead as we can't know wether features were used or not now 

-

241 number_of_runs = gv.settings().get_config_number_of_runs() 

-

242 solver_calls = gv.settings().get_config_solver_calls() 

-

243 cpu_time = gv.settings().get_config_cpu_time() 

-

244 wallclock_time = gv.settings().get_config_wallclock_time() 

-

245 cutoff_time = gv.settings().get_general_target_cutoff_time() 

-

246 cutoff_length = gv.settings().get_configurator_target_cutoff_length() 

-

247 sparkle_objectives =\ 

-

248 gv.settings().get_general_sparkle_objectives() 

-

249 configurator = gv.settings().get_general_sparkle_configurator() 

-

250 configurator.scenario = configurator.scenario_class( 

-

251 solver, instance_set_train, number_of_runs, solver_calls, cpu_time, 

-

252 wallclock_time, cutoff_time, cutoff_length, sparkle_objectives) 

-

253 configurator.scenario._set_paths(configurator.output_path) 

-

254 ablation_scenario = None 

-

255 if args.flag_ablation: 

-

256 ablation_scenario = AblationScenario( 

-

257 solver, instance_set_train, instance_set_test, 

-

258 gv.settings().DEFAULT_ablation_output) 

-

259 

-

260 # Create machine readable output 

-

261 solver_name = gv.latest_scenario().get_config_solver().name 

-

262 instance_set_name = gv.latest_scenario().get_config_instance_set_train().name 

-

263 output = gv.settings().DEFAULT_configuration_output_analysis 

-

264 config_output = ConfigurationOutput(configurator.scenario.directory, 

-

265 solver, configurator, 

-

266 instance_set_train, 

-

267 instance_set_test, 

-

268 output) 

-

269 config_output.write_output() 

-

270 print("Machine readable output is placed at: ", config_output.output) 

-

271 

-

272 if not only_json: 

-

273 sgrfch.generate_report_for_configuration( 

-

274 solver, 

-

275 gv.settings().get_general_sparkle_configurator(), 

-

276 Validator(gv.settings().DEFAULT_validation_output), 

-

277 gv.settings().DEFAULT_extractor_dir, 

-

278 gv.settings().DEFAULT_configuration_output_analysis, 

-

279 gv.settings().DEFAULT_latex_source, 

-

280 gv.settings().DEFAULT_latex_bib, 

-

281 instance_set_train, 

-

282 gv.settings().get_general_extractor_cutoff_time(), 

-

283 instance_set_test, 

-

284 ablation=ablation_scenario 

-

285 ) 

-

286 

-

287 # Write used settings to file 

-

288 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_initialise_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_initialise_py.html deleted file mode 100644 index dbedb2a57..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_initialise_py.html +++ /dev/null @@ -1,256 +0,0 @@ - - - - - Coverage for sparkle/CLI/initialise.py: 21% - - - - - -
-
-

- Coverage for sparkle/CLI/initialise.py: - 21% -

- -

- 77 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Command to initialise a Sparkle platform.""" 

-

3import subprocess 

-

4import argparse 

-

5import shutil 

-

6import os 

-

7from pathlib import Path 

-

8 

-

9from sparkle.platform import CommandName 

-

10from sparkle.CLI.help.argparse_custom import DownloadExamplesArgument 

-

11from sparkle.CLI.help import snapshot_help as snh 

-

12from sparkle.platform.settings_objects import Settings 

-

13from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

14from sparkle.CLI.help import global_variables as gv 

-

15 

-

16 

-

17def parser_function() -> argparse.ArgumentParser: 

-

18 """Parse CLI arguments for the initialise command.""" 

-

19 parser = argparse.ArgumentParser( 

-

20 description=("Initialise the Sparkle platform in the current directory.")) 

-

21 parser.add_argument(*DownloadExamplesArgument.names, 

-

22 **DownloadExamplesArgument.kwargs) 

-

23 return parser 

-

24 

-

25 

-

26def detect_sparkle_platform_exists(check: callable = all) -> Path: 

-

27 """Return whether a Sparkle platform is currently active. 

-

28 

-

29 The default working directories are checked for existence, for each directory in the 

-

30 CWD. If any of the parents of the CWD yield true, this path is returned 

-

31 

-

32 Args: 

-

33 check: Method to check if the working directory exists. Defaults to all. 

-

34 

-

35 Returns: 

-

36 Path to the Sparkle platform if it exists, None otherwise. 

-

37 """ 

-

38 cwd = Path.cwd() 

-

39 while str(cwd) != cwd.root: 

-

40 if check([(cwd / wd).exists() for wd in gv.settings().DEFAULT_working_dirs]): 

-

41 return cwd 

-

42 cwd = cwd.parent 

-

43 return None 

-

44 

-

45 

-

46def check_for_initialise(requirements: list[CommandName] = None)\ 

-

47 -> None: 

-

48 """Function to check if initialize command was executed and execute it otherwise. 

-

49 

-

50 Args: 

-

51 argv: List of the arguments from the caller. 

-

52 requirements: The requirements that have to be executed before the calling 

-

53 function. 

-

54 """ 

-

55 platform_path = detect_sparkle_platform_exists() 

-

56 if platform_path is None: 

-

57 print("-----------------------------------------------") 

-

58 print("No Sparkle platform found; " 

-

59 + "The platform will now be initialized automatically") 

-

60 if requirements is not None: 

-

61 if len(requirements) == 1: 

-

62 print(f"The command {requirements[0]} has \ 

-

63 to be executed before executing this command.") 

-

64 else: 

-

65 print(f"""The commands {", ".join(requirements)} \ 

-

66 have to be executed before executing this command.""") 

-

67 print("-----------------------------------------------") 

-

68 initialise_sparkle() 

-

69 elif platform_path != Path.cwd(): 

-

70 print(f"[WARNING] Sparkle platform found in {platform_path} instead of " 

-

71 f"{Path.cwd()}. Switching to CWD to {platform_path}") 

-

72 os.chdir(platform_path) 

-

73 

-

74 

-

75def initialise_sparkle(download_examples: bool = False) -> None: 

-

76 """Initialize a new Sparkle platform. 

-

77 

-

78 Args: 

-

79 download_examples: Downloads examples from the Sparkle Github. 

-

80 WARNING: May take a some time to complete due to the large amount of data. 

-

81 """ 

-

82 print("Start initialising Sparkle platform ...") 

-

83 # Check if Settings file exists, otherwise initialise a default one 

-

84 if not Path(Settings.DEFAULT_settings_path).exists(): 

-

85 print("Settings file does not exist, initializing default settings ...") 

-

86 gv.__settings = Settings(Settings.DEFAULT_example_settings_path) 

-

87 gv.settings().write_settings_ini(Path(Settings.DEFAULT_settings_path)) 

-

88 

-

89 gv.settings().DEFAULT_snapshot_dir.mkdir(exist_ok=True) 

-

90 if detect_sparkle_platform_exists(check=any): 

-

91 snh.save_current_sparkle_platform() 

-

92 snh.remove_current_platform() 

-

93 

-

94 print("Current Sparkle platform found!") 

-

95 print("Current Sparkle platform recorded!") 

-

96 

-

97 for working_dir in gv.settings().DEFAULT_working_dirs: 

-

98 working_dir.mkdir(exist_ok=True) 

-

99 

-

100 # Initialise the FeatureDataFrame 

-

101 FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

102 

-

103 # Initialise the Performance DF with the static dimensions 

-

104 # TODO: We have many sparkle settings values regarding ``number of runs'' 

-

105 # E.g. configurator, parallel portfolio, and here too. Should we unify this more, or 

-

106 # just make another setting that does this specifically for performance data? 

-

107 PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path, 

-

108 objectives=gv.settings().get_general_sparkle_objectives(), 

-

109 n_runs=1) 

-

110 

-

111 # Check that Runsolver is compiled, otherwise, compile 

-

112 if not gv.settings().DEFAULT_runsolver_exec.exists(): 

-

113 print("Runsolver does not exist, trying to compile...") 

-

114 if not (gv.settings().DEFAULT_runsolver_dir / "Makefile").exists(): 

-

115 print("WARNING: Runsolver executable doesn't exist and cannot find makefile." 

-

116 " Please verify the contents of the directory: " 

-

117 f"{gv.settings().DEFAULT_runsolver_dir}") 

-

118 else: 

-

119 compile_runsolver =\ 

-

120 subprocess.run(["make"], 

-

121 cwd=gv.settings().DEFAULT_runsolver_dir, 

-

122 capture_output=True) 

-

123 if compile_runsolver.returncode != 0: 

-

124 print("WARNING: Compilation of Runsolver failed with the following msg:" 

-

125 f"[{compile_runsolver.returncode}] " 

-

126 f"{compile_runsolver.stderr.decode()}") 

-

127 else: 

-

128 print("Runsolver compiled successfully!") 

-

129 # Check that java is available 

-

130 if shutil.which("java") is None: 

-

131 # NOTE: An automatic resolution of Java at this point would be good 

-

132 # However, loading modules from Python has thusfar not been successfull. 

-

133 print("Could not find Java as an executable!") 

-

134 

-

135 if download_examples: 

-

136 # Download Sparkle examples from Github 

-

137 # NOTE: Needs to be thoroughly tested after Pip install is working 

-

138 print("Downloading examples ...") 

-

139 curl = subprocess.Popen( 

-

140 ["curl", "https://codeload.github.com/ADA-research/Sparkle/tar.gz/main"], 

-

141 stdout=subprocess.PIPE) 

-

142 outpath = Path("outfile.tar.gz") 

-

143 with curl.stdout, outpath.open("wb") as outfile: 

-

144 tar = subprocess.Popen(["tar", "-xz", "--strip=1", "Sparkle-main/Examples"], 

-

145 stdin=curl.stdout, stdout=outfile) 

-

146 curl.wait() # Wait for the download to complete 

-

147 tar.wait() # Wait for the extraction to complete 

-

148 outpath.unlink(missing_ok=True) 

-

149 

-

150 print("New Sparkle platform initialised!") 

-

151 

-

152 

-

153if __name__ == "__main__": 

-

154 # Define command line arguments 

-

155 parser = parser_function() 

-

156 # Process command line arguments 

-

157 args = parser.parse_args() 

-

158 download = False if args.download_examples is None else args.download_examples 

-

159 initialise_sparkle(download_examples=download) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_load_snapshot_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_load_snapshot_py.html deleted file mode 100644 index c84f7648a..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_load_snapshot_py.html +++ /dev/null @@ -1,125 +0,0 @@ - - - - - Coverage for sparkle/CLI/load_snapshot.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/load_snapshot.py: - 0% -

- -

- 15 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to load a Sparkle platform from a .zip file.""" 

-

3import sys 

-

4import argparse 

-

5from pathlib import Path 

-

6 

-

7from sparkle.CLI.help import snapshot_help 

-

8from sparkle.CLI.help import logging as sl 

-

9from sparkle.CLI.help import argparse_custom as ac 

-

10 

-

11 

-

12def parser_function() -> argparse.ArgumentParser: 

-

13 """Define the command line arguments.""" 

-

14 parser = argparse.ArgumentParser() 

-

15 parser.add_argument(*ac.SnapshotArgument.names, 

-

16 **ac.SnapshotArgument.kwargs) 

-

17 return parser 

-

18 

-

19 

-

20if __name__ == "__main__": 

-

21 # Log command call 

-

22 sl.log_command(sys.argv) 

-

23 

-

24 # Define command line arguments 

-

25 parser = parser_function() 

-

26 # Process command line arguments 

-

27 args = parser.parse_args() 

-

28 snapshot_help.load_snapshot(Path(args.snapshot_file_path)) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_feature_extractor_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_feature_extractor_py.html deleted file mode 100644 index a4232cd6c..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_feature_extractor_py.html +++ /dev/null @@ -1,164 +0,0 @@ - - - - - Coverage for sparkle/CLI/remove_feature_extractor.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/remove_feature_extractor.py: - 0% -

- -

- 37 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to remove a feature extractor from the Sparkle platform.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6import shutil 

-

7 

-

8from sparkle.platform import file_help as sfh 

-

9from sparkle.CLI.help import global_variables as gv 

-

10from sparkle.structures import FeatureDataFrame 

-

11from sparkle.CLI.help import logging as sl 

-

12from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

13from sparkle.CLI.initialise import check_for_initialise 

-

14from sparkle.CLI.help import argparse_custom as ac 

-

15from sparkle.CLI.help.nicknames import resolve_object_name 

-

16from sparkle.solver import Extractor 

-

17 

-

18 

-

19def parser_function() -> argparse.ArgumentParser: 

-

20 """Define the command line arguments.""" 

-

21 parser = argparse.ArgumentParser() 

-

22 parser.add_argument(*ac.ExtractorPathArgument.names, 

-

23 **ac.ExtractorPathArgument.kwargs) 

-

24 return parser 

-

25 

-

26 

-

27if __name__ == "__main__": 

-

28 # Log command call 

-

29 sl.log_command(sys.argv) 

-

30 

-

31 # Define command line arguments 

-

32 parser = parser_function() 

-

33 

-

34 # Process command line arguments 

-

35 args = parser.parse_args() 

-

36 extractor_nicknames = gv.file_storage_data_mapping[gv.extractor_nickname_list_path] 

-

37 extractor = resolve_object_name( 

-

38 args.extractor_path, 

-

39 extractor_nicknames, 

-

40 gv.settings().DEFAULT_extractor_dir, 

-

41 class_name=Extractor) 

-

42 

-

43 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.REMOVE_FEATURE_EXTRACTOR]) 

-

44 

-

45 if extractor is None: 

-

46 print(f'Feature extractor path "{args.extractor_path}" does not exist!') 

-

47 sys.exit(-1) 

-

48 

-

49 print(f"Starting removing feature extractor {extractor.name} ...") 

-

50 

-

51 for key in extractor_nicknames: 

-

52 if extractor_nicknames == extractor.directory: 

-

53 sfh.add_remove_platform_item( 

-

54 None, 

-

55 gv.extractor_nickname_list_path, 

-

56 extractor_nicknames, 

-

57 key=key, 

-

58 remove=True) 

-

59 break 

-

60 

-

61 if gv.settings().DEFAULT_feature_data_path.exists(): 

-

62 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

63 feature_data.remove_extractor(extractor.name) 

-

64 feature_data.save_csv() 

-

65 shutil.rmtree(extractor.directory) 

-

66 

-

67 print(f"Removing feature extractor {extractor.name} done!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_instances_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_instances_py.html deleted file mode 100644 index 16f806fb1..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_instances_py.html +++ /dev/null @@ -1,168 +0,0 @@ - - - - - Coverage for sparkle/CLI/remove_instances.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/remove_instances.py: - 0% -

- -

- 42 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to remove an instance set from the Sparkle platform.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6import shutil 

-

7 

-

8from sparkle.CLI.help import global_variables as gv 

-

9from sparkle.platform import file_help as sfh 

-

10from sparkle.structures import FeatureDataFrame, PerformanceDataFrame 

-

11from sparkle.instance import instance_set 

-

12from sparkle.CLI.help import logging as sl 

-

13from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

14from sparkle.CLI.initialise import check_for_initialise 

-

15from sparkle.CLI.help import argparse_custom as ac 

-

16from sparkle.CLI.help.nicknames import resolve_object_name 

-

17 

-

18 

-

19def parser_function() -> argparse.ArgumentParser: 

-

20 """Define the command line arguments.""" 

-

21 parser = argparse.ArgumentParser() 

-

22 parser.add_argument(*ac.InstancesPathRemoveArgument.names, 

-

23 **ac.InstancesPathRemoveArgument.kwargs) 

-

24 return parser 

-

25 

-

26 

-

27if __name__ == "__main__": 

-

28 # Log command call 

-

29 sl.log_command(sys.argv) 

-

30 

-

31 # Define command line arguments 

-

32 parser = parser_function() 

-

33 

-

34 # Process command line arguments 

-

35 args = parser.parse_args() 

-

36 instances_path = resolve_object_name(args.instances_path, 

-

37 target_dir=gv.settings().DEFAULT_instance_dir) 

-

38 

-

39 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.REMOVE_INSTANCES]) 

-

40 

-

41 if instances_path is None or not instances_path.exists() or not\ 

-

42 instances_path.is_dir(): 

-

43 print(f'Could not resolve instances path arg "{args.instances_path}"!') 

-

44 print("Check that the path or nickname is spelled correctly.") 

-

45 sys.exit(-1) 

-

46 

-

47 print(f"Start removing all instances in directory {instances_path} ...") 

-

48 old_instance_set = instance_set(instances_path) 

-

49 # Remove from feature data and performance data 

-

50 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

51 performance_data = PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path) 

-

52 for instance in old_instance_set.instance_paths: 

-

53 feature_data.remove_instances(str(instance)) 

-

54 performance_data.remove_instance(str(instance)) 

-

55 

-

56 feature_data.save_csv() 

-

57 performance_data.save_csv() 

-

58 

-

59 # Remove nickname, if it exists 

-

60 instances_nicknames = gv.file_storage_data_mapping[gv.instances_nickname_path] 

-

61 for key in instances_nicknames: 

-

62 if instances_nicknames[key] == instances_path: 

-

63 sfh.add_remove_platform_item(instances_path, 

-

64 gv.instances_nickname_path, 

-

65 key=key, remove=True) 

-

66 break 

-

67 

-

68 # Remove the directory and all its files 

-

69 shutil.rmtree(instances_path) 

-

70 

-

71 print(f"Removing instances in directory {instances_path} done!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_solver_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_solver_py.html deleted file mode 100644 index aa248c8bf..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_remove_solver_py.html +++ /dev/null @@ -1,169 +0,0 @@ - - - - - Coverage for sparkle/CLI/remove_solver.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/remove_solver.py: - 0% -

- -

- 43 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to remove a solver from the Sparkle platform.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6import shutil 

-

7 

-

8from sparkle.platform import file_help as sfh 

-

9from sparkle.CLI.help import global_variables as gv 

-

10from sparkle.structures import PerformanceDataFrame 

-

11from sparkle.CLI.help import logging as sl 

-

12from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

13from sparkle.CLI.initialise import check_for_initialise 

-

14from sparkle.CLI.help import argparse_custom as ac 

-

15from sparkle.CLI.help.nicknames import resolve_object_name 

-

16 

-

17 

-

18def parser_function() -> argparse.ArgumentParser: 

-

19 """Define the command line arguments.""" 

-

20 parser = argparse.ArgumentParser() 

-

21 parser.add_argument(*ac.SolverRemoveArgument.names, 

-

22 **ac.SolverRemoveArgument.kwargs) 

-

23 return parser 

-

24 

-

25 

-

26if __name__ == "__main__": 

-

27 # Log command call 

-

28 sl.log_command(sys.argv) 

-

29 

-

30 # Define command line arguments 

-

31 parser = parser_function() 

-

32 

-

33 # Process command line arguments 

-

34 args = parser.parse_args() 

-

35 solver_path = resolve_object_name(args.solver, 

-

36 gv.solver_nickname_mapping, 

-

37 gv.settings().DEFAULT_solver_dir) 

-

38 

-

39 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.REMOVE_SOLVER]) 

-

40 if solver_path is None: 

-

41 print(f'Could not resolve Solver path/name "{solver_path}"!') 

-

42 sys.exit(-1) 

-

43 if not solver_path.parent == gv.settings().DEFAULT_solver_dir: 

-

44 # Allow user to only specify solvers in Sparkle solver dir 

-

45 print("Specified Solver is not is platform directory! Exiting.") 

-

46 sys.exit(-1) 

-

47 

-

48 print(f"Start removing solver {solver_path.name} ...") 

-

49 

-

50 solver_nickname_mapping = gv.solver_nickname_mapping 

-

51 if len(solver_nickname_mapping): 

-

52 nickname = None 

-

53 for key in solver_nickname_mapping: 

-

54 if solver_nickname_mapping[key] == str(solver_path): 

-

55 nickname = key 

-

56 break 

-

57 sfh.add_remove_platform_item( 

-

58 nickname, 

-

59 gv.solver_nickname_list_path, 

-

60 gv.file_storage_data_mapping[gv.solver_nickname_list_path], 

-

61 remove=True) 

-

62 

-

63 if gv.settings().DEFAULT_performance_data_path.exists(): 

-

64 performance_data = PerformanceDataFrame( 

-

65 gv.settings().DEFAULT_performance_data_path) 

-

66 if solver_path.name in performance_data.dataframe.columns: 

-

67 performance_data.remove_solver(solver_path.name) 

-

68 performance_data.save_csv() 

-

69 

-

70 shutil.rmtree(solver_path) 

-

71 

-

72 print(f"Removing solver {solver_path.name} done!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_ablation_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_ablation_py.html deleted file mode 100644 index 1e07df545..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_ablation_py.html +++ /dev/null @@ -1,246 +0,0 @@ - - - - - Coverage for sparkle/CLI/run_ablation.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/run_ablation.py: - 0% -

- -

- 76 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to execute ablation analysis.""" 

-

3 

-

4import argparse 

-

5import sys 

-

6from pathlib import PurePath 

-

7 

-

8from runrunner.base import Runner 

-

9 

-

10from sparkle.solver.ablation import AblationScenario 

-

11from sparkle.CLI.help import global_variables as gv 

-

12from sparkle.CLI.help import logging as sl 

-

13from sparkle.platform.settings_objects import Settings, SettingState 

-

14from sparkle.solver import Solver 

-

15from sparkle.instance import instance_set 

-

16from sparkle.CLI.help import argparse_custom as ac 

-

17from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

18from sparkle.CLI.initialise import check_for_initialise 

-

19from sparkle.CLI.help.nicknames import resolve_object_name 

-

20 

-

21 

-

22def parser_function() -> argparse.ArgumentParser: 

-

23 """Define the command line arguments.""" 

-

24 parser = argparse.ArgumentParser( 

-

25 description=("Runs parameter importance between the default and configured " 

-

26 "parameters with ablation. This command requires a finished " 

-

27 "configuration for the solver instance pair."), 

-

28 epilog=("Note that if no test instance set is given, the validation is performed" 

-

29 " on the training set.")) 

-

30 parser.add_argument("--solver", required=False, type=str, help="path to solver") 

-

31 parser.add_argument(*ac.InstanceSetTrainAblationArgument.names, 

-

32 **ac.InstanceSetTrainAblationArgument.kwargs) 

-

33 parser.add_argument(*ac.InstanceSetTestAblationArgument.names, 

-

34 **ac.InstanceSetTestAblationArgument.kwargs) 

-

35 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

36 **ac.SparkleObjectiveArgument.kwargs) 

-

37 parser.add_argument(*ac.TargetCutOffTimeAblationArgument.names, 

-

38 **ac.TargetCutOffTimeAblationArgument.kwargs) 

-

39 parser.add_argument(*ac.WallClockTimeArgument.names, 

-

40 **ac.WallClockTimeArgument.kwargs) 

-

41 parser.add_argument(*ac.NumberOfRunsAblationArgument.names, 

-

42 **ac.NumberOfRunsAblationArgument.kwargs) 

-

43 parser.add_argument(*ac.RacingArgument.names, 

-

44 **ac.RacingArgument.kwargs) 

-

45 parser.add_argument(*ac.SettingsFileArgument.names, 

-

46 **ac.SettingsFileArgument.kwargs) 

-

47 parser.add_argument(*ac.RunOnArgument.names, 

-

48 **ac.RunOnArgument.kwargs) 

-

49 parser.set_defaults(ablation_settings_help=False) 

-

50 return parser 

-

51 

-

52 

-

53if __name__ == "__main__": 

-

54 sl.log_command(sys.argv) 

-

55 

-

56 # Define command line arguments 

-

57 parser = parser_function() 

-

58 

-

59 # Process command line arguments 

-

60 args = parser.parse_args() 

-

61 

-

62 solver_path = resolve_object_name(args.solver, 

-

63 gv.solver_nickname_mapping, 

-

64 gv.settings().DEFAULT_solver_dir) 

-

65 solver = Solver(solver_path) 

-

66 instance_set_train = resolve_object_name( 

-

67 args.instance_set_train, 

-

68 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

69 gv.settings().DEFAULT_instance_dir, instance_set) 

-

70 instance_set_test = resolve_object_name( 

-

71 args.instance_set_test, 

-

72 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

73 gv.settings().DEFAULT_instance_dir, instance_set) 

-

74 

-

75 if args.run_on is not None: 

-

76 gv.settings().set_run_on( 

-

77 args.run_on.value, SettingState.CMD_LINE) 

-

78 run_on = gv.settings().get_run_on() 

-

79 

-

80 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.RUN_ABLATION]) 

-

81 

-

82 if ac.set_by_user(args, "settings_file"): 

-

83 # Do first, so other command line options can override settings from the file 

-

84 gv.settings().read_settings_ini( 

-

85 args.settings_file, SettingState.CMD_LINE 

-

86 ) 

-

87 if ac.set_by_user(args, "objectives"): 

-

88 gv.settings().set_general_sparkle_objectives( 

-

89 args.objectives, SettingState.CMD_LINE 

-

90 ) 

-

91 if ac.set_by_user(args, "target_cutoff_time"): 

-

92 gv.settings().set_general_target_cutoff_time( 

-

93 args.target_cutoff_time, SettingState.CMD_LINE 

-

94 ) 

-

95 if ac.set_by_user(args, "wallclock_time"): 

-

96 gv.settings().set_config_wallclock_time( 

-

97 args.wallclock_time, SettingState.CMD_LINE 

-

98 ) 

-

99 if ac.set_by_user(args, "number_of_runs"): 

-

100 gv.settings().set_config_number_of_runs( 

-

101 args.number_of_runs, SettingState.CMD_LINE 

-

102 ) 

-

103 if ac.set_by_user(args, "racing"): 

-

104 gv.settings().set_ablation_racing_flag( 

-

105 args.number_of_runs, SettingState.CMD_LINE 

-

106 ) 

-

107 

-

108 # Compare current settings to latest.ini 

-

109 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

110 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

111 

-

112 instance_set_train_name = instance_set_train.name 

-

113 configurator = gv.settings().get_general_sparkle_configurator() 

-

114 configurator.set_scenario_dirs(solver, instance_set_train) 

-

115 if instance_set_test is not None: 

-

116 instance_set_test_name = instance_set_test.name 

-

117 else: 

-

118 instance_set_test = instance_set_train 

-

119 instance_set_test_name = instance_set_train.name 

-

120 

-

121 if not configurator.scenario.result_directory.is_dir(): 

-

122 print("Error: No configuration results found for the given solver and training" 

-

123 " instance set. Ablation needs to have a target configuration.") 

-

124 print("Please run configuration first") 

-

125 sys.exit(-1) 

-

126 else: 

-

127 print("Configuration exists!") 

-

128 

-

129 ablation_scenario = AblationScenario( 

-

130 solver, instance_set_train, instance_set_test, 

-

131 gv.settings().DEFAULT_ablation_output, 

-

132 gv.settings().DEFAULT_ablation_exec, 

-

133 gv.settings().DEFAULT_ablation_validation_exec, override_dirs=True) 

-

134 

-

135 # Instances 

-

136 ablation_scenario.create_instance_file() 

-

137 ablation_scenario.create_instance_file(test=True) 

-

138 

-

139 # Configurations 

-

140 ablation_scenario.create_configuration_file() 

-

141 print("Submiting ablation run...") 

-

142 runs = ablation_scenario.submit_ablation(run_on=run_on) 

-

143 

-

144 if run_on == Runner.LOCAL: 

-

145 print("Ablation analysis finished!") 

-

146 else: 

-

147 job_id_str = ",".join([run.run_id for run in runs]) 

-

148 print(f"Ablation analysis running. Waiting for Slurm job(s) with id(s): " 

-

149 f"{job_id_str}") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_configured_solver_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_configured_solver_py.html deleted file mode 100644 index f9eea9160..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_configured_solver_py.html +++ /dev/null @@ -1,211 +0,0 @@ - - - - - Coverage for sparkle/CLI/run_configured_solver.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/run_configured_solver.py: - 0% -

- -

- 59 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to execute a configured solver.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6from pathlib import PurePath 

-

7 

-

8from runrunner.base import Runner 

-

9 

-

10from sparkle.CLI.help import global_variables as gv 

-

11from sparkle.CLI.help import logging as sl 

-

12from sparkle.platform.settings_objects import Settings, SettingState 

-

13from sparkle.instance import instance_set 

-

14from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

15from sparkle.CLI.initialise import check_for_initialise 

-

16from sparkle.CLI.help import argparse_custom as ac 

-

17from sparkle.CLI.help.nicknames import resolve_object_name 

-

18 

-

19 

-

20def parser_function() -> argparse.ArgumentParser: 

-

21 """Define the command line arguments.""" 

-

22 parser = argparse.ArgumentParser() 

-

23 parser.add_argument(*ac.InstancePathPositional.names, 

-

24 **ac.InstancePathPositional.kwargs) 

-

25 parser.add_argument(*ac.SettingsFileArgument.names, 

-

26 **ac.SettingsFileArgument.kwargs) 

-

27 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

28 **ac.SparkleObjectiveArgument.kwargs) 

-

29 parser.add_argument(*ac.RunOnArgument.names, 

-

30 **ac.RunOnArgument.kwargs) 

-

31 return parser 

-

32 

-

33 

-

34if __name__ == "__main__": 

-

35 # Log command call 

-

36 sl.log_command(sys.argv) 

-

37 

-

38 # Define command line arguments 

-

39 parser = parser_function() 

-

40 

-

41 # Process command line arguments 

-

42 args = parser.parse_args() 

-

43 

-

44 # Try to resolve the instance path (Dir or list instance paths) 

-

45 data_set = resolve_object_name( 

-

46 args.instance_path, 

-

47 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

48 gv.settings().DEFAULT_instance_dir, instance_set) 

-

49 if data_set is None: 

-

50 print(f"Could not resolve instance (set): {args.instance_path}! Exiting...") 

-

51 sys.exit(-1) 

-

52 

-

53 if args.run_on is not None: 

-

54 gv.settings().set_run_on( 

-

55 args.run_on.value, SettingState.CMD_LINE) 

-

56 run_on = gv.settings().get_run_on() 

-

57 

-

58 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.RUN_CONFIGURED_SOLVER]) 

-

59 

-

60 if args.settings_file is not None: 

-

61 # Do first, so other command line options can override settings from the file 

-

62 gv.settings().read_settings_ini(args.settings_file, SettingState.CMD_LINE) 

-

63 if args.objectives is not None: 

-

64 gv.settings().set_general_sparkle_objectives( 

-

65 args.objectives, SettingState.CMD_LINE 

-

66 ) 

-

67 

-

68 # Compare current settings to latest.ini 

-

69 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

70 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

71 

-

72 # Get the name of the configured solver and the training set 

-

73 solver = gv.latest_scenario().get_config_solver() 

-

74 train_set = gv.latest_scenario().get_config_instance_set_train() 

-

75 custom_cutoff = gv.settings().get_general_target_cutoff_time() 

-

76 if solver is None or train_set is None: 

-

77 # Print error and stop execution 

-

78 print("ERROR: No configured solver found! Stopping execution.") 

-

79 sys.exit(-1) 

-

80 # Get optimised configuration 

-

81 configurator = gv.settings().get_general_sparkle_configurator() 

-

82 objectives = gv.settings().get_general_sparkle_objectives() 

-

83 _, config_str = configurator.get_optimal_configuration( 

-

84 solver, train_set, objective=objectives[0]) 

-

85 config = solver.config_str_to_dict(config_str) 

-

86 # Call the configured solver 

-

87 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

88 if run_on == Runner.LOCAL: 

-

89 print(f"Start running solver on {data_set.size} instances...") 

-

90 run = solver.run(instance=data_set, 

-

91 objectives=objectives, 

-

92 seed=gv.get_seed(), 

-

93 cutoff_time=custom_cutoff, 

-

94 configuration=config, 

-

95 run_on=run_on, 

-

96 commandname=CommandName.RUN_CONFIGURED_SOLVER, 

-

97 sbatch_options=sbatch_options, 

-

98 cwd=sl.caller_log_dir) 

-

99 

-

100 # Print result 

-

101 if run_on == Runner.SLURM: 

-

102 print(f"Running configured solver. Waiting for Slurm " 

-

103 f"job(s) with id(s): {run.run_id}") 

-

104 else: 

-

105 if isinstance(run, dict): 

-

106 run = [run] 

-

107 for i, solver_output in enumerate(run): 

-

108 print(f"Execution of {solver.name} on instance " 

-

109 f"{data_set.instance_names[i]} completed with status " 

-

110 f"{solver_output['status']} in {solver_output['cpu_time']} seconds.") 

-

111 print("Running configured solver done!") 

-

112 

-

113 # Write used settings to file 

-

114 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_parallel_portfolio_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_parallel_portfolio_py.html deleted file mode 100644 index d0a64ac5b..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_parallel_portfolio_py.html +++ /dev/null @@ -1,431 +0,0 @@ - - - - - Coverage for sparkle/CLI/run_parallel_portfolio.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/run_parallel_portfolio.py: - 0% -

- -

- 207 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Sparkle command to execute a parallel algorithm portfolio.""" 

-

4 

-

5import sys 

-

6import argparse 

-

7import random 

-

8import time 

-

9import shutil 

-

10import csv 

-

11import itertools 

-

12from pathlib import Path, PurePath 

-

13 

-

14from tqdm import tqdm 

-

15 

-

16import runrunner as rrr 

-

17from runrunner.base import Runner 

-

18from runrunner.slurm import Status 

-

19 

-

20from sparkle.CLI.help.reporting_scenario import Scenario 

-

21from sparkle.CLI.help import logging as sl 

-

22from sparkle.CLI.help import global_variables as gv 

-

23from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

24from sparkle.CLI.initialise import check_for_initialise 

-

25from sparkle.CLI.help import argparse_custom as ac 

-

26from sparkle.CLI.help.nicknames import resolve_object_name 

-

27from sparkle.platform.settings_objects import Settings, SettingState 

-

28from sparkle.solver import Solver 

-

29from sparkle.instance import instance_set, InstanceSet 

-

30from sparkle.types import SolverStatus, resolve_objective, UseTime 

-

31 

-

32 

-

33def run_parallel_portfolio(instances_set: InstanceSet, 

-

34 portfolio_path: Path, 

-

35 solvers: list[Solver], 

-

36 run_on: Runner = Runner.SLURM) -> None: 

-

37 """Run the parallel algorithm portfolio. 

-

38 

-

39 Args: 

-

40 instances_set: Set of instances to run on. 

-

41 portfolio_path: Path to the parallel portfolio. 

-

42 solvers: List of solvers to run on the instances. 

-

43 run_on: Currently only supports Slurm. 

-

44 """ 

-

45 num_solvers, num_instances = len(solvers), len(instances_set._instance_paths) 

-

46 seeds_per_solver = gv.settings().get_parallel_portfolio_number_of_seeds_per_solver() 

-

47 num_jobs = num_solvers * num_instances * seeds_per_solver 

-

48 parallel_jobs = min(gv.settings().get_number_of_jobs_in_parallel(), num_jobs) 

-

49 if parallel_jobs > num_jobs: 

-

50 print("WARNING: Not all jobs will be started at the same time due to the " 

-

51 "limitation of number of Slurm jobs that can be run in parallel. Check" 

-

52 " your Sparkle Slurm Settings.") 

-

53 print(f"Sparkle parallel portfolio is running {seeds_per_solver} seed(s) per solver " 

-

54 f"on {num_solvers} solvers for {num_instances} instances ...") 

-

55 cmd_list = [] 

-

56 cutoff = gv.settings().get_general_target_cutoff_time() 

-

57 objectives = gv.settings().get_general_sparkle_objectives() 

-

58 # Create a command for each instance-solver-seed combination 

-

59 for instance, solver in itertools.product(instances_set._instance_paths, solvers): 

-

60 for _ in range(seeds_per_solver): 

-

61 seed = int(random.getrandbits(32)) 

-

62 solver_call_list = solver.build_cmd( 

-

63 instance.absolute(), 

-

64 objectives=objectives, 

-

65 seed=seed, 

-

66 cutoff_time=cutoff) 

-

67 cmd_list.append((" ".join(solver_call_list)).replace("'", '"')) 

-

68 # Jobs are added in to the runrunner object in the same order they are provided 

-

69 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

70 run = rrr.add_to_queue( 

-

71 runner=run_on, 

-

72 cmd=cmd_list, 

-

73 name=CommandName.RUN_PARALLEL_PORTFOLIO, 

-

74 parallel_jobs=parallel_jobs, 

-

75 path=portfolio_path, 

-

76 base_dir=sl.caller_log_dir, 

-

77 srun_options=["-N1", "-n1"] + sbatch_options, 

-

78 sbatch_options=sbatch_options 

-

79 ) 

-

80 check_interval = gv.settings().get_parallel_portfolio_check_interval() 

-

81 instances_done = [False] * num_instances 

-

82 # We record the 'best' of all seed results per solver-instance, 

-

83 # setting start values for objectives that are always present 

-

84 default_objective_values = {} 

-

85 for o in objectives: 

-

86 default_value = float(sys.maxsize) if o.minimise else 0 

-

87 # Default values for time objectives can be linked to cutoff time 

-

88 if o.time: 

-

89 default_value = cutoff + 1.0 

-

90 if o.post_process is not None: 

-

91 default_value = o.post_process(default_value, cutoff) 

-

92 default_objective_values[o.name] = default_value 

-

93 job_output_dict = {instance_name: {solver.name: default_objective_values.copy() 

-

94 for solver in solvers} 

-

95 for instance_name in instances_set._instance_names} 

-

96 n_instance_jobs = num_solvers * seeds_per_solver 

-

97 

-

98 with tqdm(total=len(instances_done)) as pbar: 

-

99 pbar.set_description("Instances done") 

-

100 while not all(instances_done): 

-

101 prev_done = sum(instances_done) 

-

102 time.sleep(check_interval) 

-

103 job_status_list = [r.status for r in run.jobs] 

-

104 job_status_completed = [status == Status.COMPLETED 

-

105 for status in job_status_list] 

-

106 # The jobs are sorted by instance 

-

107 for i, instance in enumerate(instances_set._instance_paths): 

-

108 if instances_done[i]: 

-

109 continue 

-

110 instance_job_slice = slice(i * n_instance_jobs, 

-

111 (i + 1) * n_instance_jobs) 

-

112 if any(job_status_completed[instance_job_slice]): 

-

113 instances_done[i] = True 

-

114 # Kill all running jobs for this instance 

-

115 solver_kills = [0] * num_solvers 

-

116 for job_index in range(i * n_instance_jobs, 

-

117 (i + 1) * n_instance_jobs): 

-

118 if not job_status_completed[job_index]: 

-

119 run.jobs[job_index].kill() 

-

120 solver_index = int( 

-

121 (job_index % n_instance_jobs) / seeds_per_solver) 

-

122 solver_kills[solver_index] += 1 

-

123 for solver_index in range(num_solvers): 

-

124 # All seeds of a solver were killed on instance, set status kill 

-

125 if solver_kills[solver_index] == seeds_per_solver: 

-

126 solver_name = solvers[solver_index].name 

-

127 job_output_dict[instance.name][solver_name]["status"] =\ 

-

128 SolverStatus.KILLED 

-

129 pbar.update(sum(instances_done) - prev_done) 

-

130 

-

131 # Attempt to verify that all logs have been written (Slurm I/O latency) 

-

132 for index, cmd in enumerate(cmd_list): 

-

133 runsolver_configuration = cmd.split(" ")[:11] 

-

134 logs = [portfolio_path / p for p in runsolver_configuration 

-

135 if Path(p).suffix in [".log", ".val", ".rawres"]] 

-

136 if not all([p.exists() for p in logs]): 

-

137 time.sleep(check_interval) 

-

138 

-

139 # Now iterate over runsolver logs to get runtime, get the lowest value per seed 

-

140 for index, cmd in enumerate(cmd_list): 

-

141 runsolver_configuration = cmd.split(" ")[:11] 

-

142 solver_output = Solver.parse_solver_output(run.jobs[i].stdout, 

-

143 runsolver_configuration, 

-

144 portfolio_path) 

-

145 solver_index = int((index % n_instance_jobs) / seeds_per_solver) 

-

146 solver_name = solvers[solver_index].name 

-

147 instance_name = instances_set._instance_names[int(index / n_instance_jobs)] 

-

148 cpu_time = solver_output["cpu_time"] 

-

149 cmd_output = job_output_dict[instance_name][solver_name] 

-

150 if cpu_time > 0.0 and cpu_time < cmd_output["cpu_time"]: 

-

151 for key, value in solver_output.items(): 

-

152 if key in [o.name for o in objectives]: 

-

153 job_output_dict[instance_name][solver_name][key] = value 

-

154 if "status" not in cmd_output or cmd_output["status"] != SolverStatus.KILLED: 

-

155 cmd_output["status"] = solver_output["status"] 

-

156 

-

157 # Fix the CPU/WC time for non existent logs to instance min time + check_interval 

-

158 for instance in job_output_dict.keys(): 

-

159 no_log_solvers = [] 

-

160 min_time = cutoff 

-

161 for solver in job_output_dict[instance].keys(): 

-

162 cpu_time = job_output_dict[instance][solver]["cpu_time"] 

-

163 if cpu_time == -1.0 or cpu_time == float(sys.maxsize): 

-

164 no_log_solvers.append(solver) 

-

165 elif cpu_time < min_time: 

-

166 min_time = cpu_time 

-

167 for solver in no_log_solvers: 

-

168 job_output_dict[instance][solver]["cpu_time"] = min_time + check_interval 

-

169 job_output_dict[instance][solver]["wall_time"] = min_time + check_interval 

-

170 # Fix runtime objectives with resolved CPU/Wall times 

-

171 for key, value in job_output_dict[instance][solver].items(): 

-

172 objective = resolve_objective(key) 

-

173 if objective is not None and objective.time: 

-

174 if objective.use_time == UseTime.CPU_TIME: 

-

175 value = job_output_dict[instance][solver]["cpu_time"] 

-

176 else: 

-

177 value = job_output_dict[instance][solver]["wall_time"] 

-

178 if objective.post_process is not None: 

-

179 value = objective.post_process(value, cutoff) 

-

180 job_output_dict[instance][solver][key] = value 

-

181 

-

182 for index, instance_name in enumerate(instances_set._instance_names): 

-

183 index_str = f"[{index + 1}/{num_instances}] " 

-

184 instance_output = job_output_dict[instance_name] 

-

185 if all([instance_output[k]["status"] == SolverStatus.TIMEOUT 

-

186 for k in instance_output.keys()]): 

-

187 print(f"\n{index_str}{instance_name} was not solved within the cutoff-time.") 

-

188 continue 

-

189 print(f"\n{index_str}{instance_name} yielded the following Solver results:") 

-

190 for sindex in range(index * num_solvers, (index + 1) * num_solvers): 

-

191 solver_name = solvers[sindex % num_solvers].name 

-

192 job_info = job_output_dict[instance_name][solver_name] 

-

193 print(f"\t- {solver_name} ended with status {job_info['status']} in " 

-

194 f"{job_info['cpu_time']}s CPU-Time ({job_info['wall_time']}s WC-Time)") 

-

195 

-

196 # Write the results to a CSV 

-

197 csv_path = portfolio_path / "results.csv" 

-

198 values_header = ["status"] + [o.name for o in objectives] 

-

199 header = ["Instance", "Solver"] + values_header 

-

200 result_rows = [header] 

-

201 for instance_name in job_output_dict.keys(): 

-

202 for solver_name in job_output_dict[instance_name].keys(): 

-

203 job_o = job_output_dict[instance_name][solver_name] 

-

204 values = [instance_name, solver_name] + [ 

-

205 job_o[key] if key in job_o else "None" 

-

206 for key in values_header] 

-

207 result_rows.append(values) 

-

208 with csv_path.open("w") as out: 

-

209 writer = csv.writer(out) 

-

210 writer.writerows(result_rows) 

-

211 

-

212 

-

213def parser_function() -> argparse.ArgumentParser: 

-

214 """Define the command line arguments. 

-

215 

-

216 Returns: 

-

217 parser: The parser with the parsed command line arguments 

-

218 """ 

-

219 parser = argparse.ArgumentParser() 

-

220 parser.add_argument(*ac.InstancePath.names, 

-

221 **ac.InstancePath.kwargs) 

-

222 parser.add_argument(*ac.NicknamePortfolioArgument.names, 

-

223 **ac.NicknamePortfolioArgument.kwargs) 

-

224 parser.add_argument(*ac.SolversArgument.names, 

-

225 **ac.SolversArgument.kwargs) 

-

226 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

227 **ac.SparkleObjectiveArgument.kwargs) 

-

228 parser.add_argument(*ac.CutOffTimeArgument.names, 

-

229 **ac.CutOffTimeArgument.kwargs) 

-

230 parser.add_argument(*ac.SolverSeedsArgument.names, 

-

231 **ac.SolverSeedsArgument.kwargs) 

-

232 parser.add_argument(*ac.RunOnArgument.names, 

-

233 **ac.RunOnArgument.kwargs) 

-

234 parser.add_argument(*ac.SettingsFileArgument.names, 

-

235 **ac.SettingsFileArgument.kwargs) 

-

236 return parser 

-

237 

-

238 

-

239if __name__ == "__main__": 

-

240 # Log command call 

-

241 sl.log_command(sys.argv) 

-

242 

-

243 # Define command line arguments 

-

244 parser = parser_function() 

-

245 

-

246 # Process command line arguments 

-

247 args = parser.parse_args() 

-

248 if args.solvers is not None: 

-

249 solver_paths = [resolve_object_name("".join(s), 

-

250 target_dir=gv.settings().DEFAULT_solver_dir) 

-

251 for s in args.solvers] 

-

252 if None in solver_paths: 

-

253 print("Some solvers not recognised! Check solver names:") 

-

254 for i, name in enumerate(solver_paths): 

-

255 if solver_paths[i] is None: 

-

256 print(f'\t- "{solver_paths[i]}" ') 

-

257 sys.exit(-1) 

-

258 solvers = [Solver(p) for p in solver_paths] 

-

259 else: 

-

260 solvers = [Solver(p) for p in 

-

261 gv.settings().DEFAULT_solver_dir.iterdir() if p.is_dir()] 

-

262 

-

263 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.RUN_PARALLEL_PORTFOLIO]) 

-

264 

-

265 # Compare current settings to latest.ini 

-

266 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

267 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

268 

-

269 # Do first, so other command line options can override settings from the file 

-

270 if args.settings_file is not None: 

-

271 gv.settings().read_settings_ini(args.settings_file, SettingState.CMD_LINE) 

-

272 

-

273 portfolio_path = args.portfolio_name 

-

274 

-

275 if args.run_on is not None: 

-

276 gv.settings().set_run_on( 

-

277 args.run_on.value, SettingState.CMD_LINE) 

-

278 run_on = gv.settings().get_run_on() 

-

279 

-

280 if args.solver_seeds is not None: 

-

281 gv.settings().set_parallel_portfolio_number_of_seeds_per_solver( 

-

282 args.solver_seeds, SettingState.CMD_LINE) 

-

283 

-

284 if run_on == Runner.LOCAL: 

-

285 print("Parallel Portfolio is not fully supported yet for Local runs. Exiting.") 

-

286 sys.exit(-1) 

-

287 

-

288 # Retrieve instance set 

-

289 data_set = resolve_object_name( 

-

290 args.instance_path, 

-

291 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

292 gv.settings().DEFAULT_instance_dir, 

-

293 instance_set) 

-

294 print(f"Running on {data_set.size} instance(s)...") 

-

295 

-

296 if args.cutoff_time is not None: 

-

297 gv.settings().set_general_target_cutoff_time(args.cutoff_time, 

-

298 SettingState.CMD_LINE) 

-

299 

-

300 if args.objectives is not None: 

-

301 gv.settings().set_general_sparkle_objectives( 

-

302 args.objectives, SettingState.CMD_LINE) 

-

303 if not gv.settings().get_general_sparkle_objectives()[0].time: 

-

304 print("ERROR: Parallel Portfolio is currently only relevant for " 

-

305 "RunTime objectives. In all other cases, use validation") 

-

306 sys.exit(-1) 

-

307 

-

308 if args.portfolio_name is not None: # Use a nickname 

-

309 portfolio_path = gv.settings().DEFAULT_parallel_portfolio_output_raw /\ 

-

310 args.portfolio_name 

-

311 else: # Generate a timestamped nickname 

-

312 timestamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.gmtime(time.time())) 

-

313 randintstamp = int(random.getrandbits(32)) 

-

314 portfolio_path = gv.settings().DEFAULT_parallel_portfolio_output_raw /\ 

-

315 f"{timestamp}_{randintstamp}" 

-

316 if portfolio_path.exists(): 

-

317 print(f"[WARNING] Portfolio path {portfolio_path} already exists! " 

-

318 "Overwrite? [y/n] ", end="") 

-

319 user_input = input() 

-

320 if user_input != "y": 

-

321 sys.exit() 

-

322 shutil.rmtree(portfolio_path) 

-

323 portfolio_path.mkdir(parents=True) 

-

324 run_parallel_portfolio(data_set, portfolio_path, solvers, run_on=run_on) 

-

325 

-

326 # Update latest scenario 

-

327 gv.latest_scenario().set_parallel_portfolio_path(portfolio_path) 

-

328 gv.latest_scenario().set_latest_scenario(Scenario.PARALLEL_PORTFOLIO) 

-

329 gv.latest_scenario().set_parallel_portfolio_instance_path(args.instance_path) 

-

330 # Write used scenario to file 

-

331 gv.latest_scenario().write_scenario_ini() 

-

332 # Write used settings to file 

-

333 gv.settings().write_used_settings() 

-

334 print("Running Sparkle parallel portfolio is done!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_portfolio_selector_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_portfolio_selector_py.html deleted file mode 100644 index af5e55eb7..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_portfolio_selector_py.html +++ /dev/null @@ -1,241 +0,0 @@ - - - - - Coverage for sparkle/CLI/run_portfolio_selector.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/run_portfolio_selector.py: - 0% -

- -

- 79 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to execute a portfolio selector.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6from pathlib import PurePath, Path 

-

7 

-

8import runrunner as rrr 

-

9from runrunner import Runner 

-

10 

-

11from sparkle.CLI.help import global_variables as gv 

-

12from sparkle.CLI.help import logging as sl 

-

13from sparkle.platform.settings_objects import Settings, SettingState 

-

14from sparkle.CLI.help import argparse_custom as ac 

-

15from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

-

16from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

17from sparkle.CLI.help.reporting_scenario import Scenario 

-

18from sparkle.CLI.initialise import check_for_initialise 

-

19from sparkle.CLI.help.nicknames import resolve_object_name 

-

20from sparkle.instance import instance_set 

-

21from sparkle.CLI.compute_features import compute_features 

-

22 

-

23 

-

24def parser_function() -> argparse.ArgumentParser: 

-

25 """Define the command line arguments.""" 

-

26 parser = argparse.ArgumentParser() 

-

27 parser.add_argument(*ac.InstancePathPositional.names, 

-

28 **ac.InstancePathPositional.kwargs) 

-

29 parser.add_argument(*ac.RunOnArgument.names, 

-

30 **ac.RunOnArgument.kwargs) 

-

31 parser.add_argument(*ac.SettingsFileArgument.names, 

-

32 **ac.SettingsFileArgument.kwargs) 

-

33 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

34 **ac.SparkleObjectiveArgument.kwargs) 

-

35 

-

36 return parser 

-

37 

-

38 

-

39if __name__ == "__main__": 

-

40 # Log command call 

-

41 sl.log_command(sys.argv) 

-

42 

-

43 # Define command line arguments 

-

44 parser = parser_function() 

-

45 

-

46 # Process command line arguments 

-

47 args = parser.parse_args() 

-

48 

-

49 if args.run_on is not None: 

-

50 gv.settings().set_run_on(args.run_on.value, SettingState.CMD_LINE) 

-

51 run_on = gv.settings().get_run_on() 

-

52 

-

53 data_set = resolve_object_name( 

-

54 args.instance_path, 

-

55 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

56 gv.settings().DEFAULT_instance_dir, instance_set) 

-

57 

-

58 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.RUN_PORTFOLIO_SELECTOR]) 

-

59 

-

60 if ac.set_by_user(args, "settings_file"): 

-

61 gv.settings().read_settings_ini( 

-

62 args.settings_file, SettingState.CMD_LINE 

-

63 ) # Do first, so other command line options can override settings from the file 

-

64 if ac.set_by_user(args, "objectives"): 

-

65 gv.settings().set_general_sparkle_objectives(args.objectives, 

-

66 SettingState.CMD_LINE) 

-

67 

-

68 # Compare current settings to latest.ini 

-

69 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

70 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

71 objectives = gv.settings().get_general_sparkle_objectives() 

-

72 # NOTE: Is this still relevant? 

-

73 if not objectives[0].time: 

-

74 print("ERROR: The run_portfolio_selector command is not yet implemented" 

-

75 " for the QUALITY_ABSOLUTE performance measure!") 

-

76 sys.exit(-1) 

-

77 

-

78 selector_scenario = gv.latest_scenario().get_selection_scenario_path() 

-

79 selector_path = selector_scenario / "portfolio_selector" 

-

80 if not selector_path.exists() or not selector_path.is_file(): 

-

81 print("ERROR: The portfolio selector could not be found. Please make sure to " 

-

82 "first construct a portfolio selector.") 

-

83 sys.exit(-1) 

-

84 if len([p for p in gv.settings().DEFAULT_extractor_dir.iterdir()]) == 0: 

-

85 print("ERROR: No feature extractor added to Sparkle.") 

-

86 sys.exit(-1) 

-

87 

-

88 # Compute the features of the incoming instances 

-

89 test_case_path = selector_scenario / data_set.name 

-

90 test_case_path.mkdir(exist_ok=True) 

-

91 feature_dataframe = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

-

92 feature_dataframe.remove_instances(feature_dataframe.instances) 

-

93 feature_dataframe.csv_filepath = test_case_path / "feature_data.csv" 

-

94 feature_dataframe.add_instances(data_set.instance_paths) 

-

95 feature_dataframe.save_csv() 

-

96 feature_run = compute_features(feature_dataframe, recompute=False, run_on=run_on) 

-

97 

-

98 if run_on == Runner.LOCAL: 

-

99 feature_run.wait() 

-

100 

-

101 # Prepare performance data 

-

102 performance_data = PerformanceDataFrame( 

-

103 test_case_path / "performance_data.csv", 

-

104 objectives=objectives) 

-

105 for instance_name in data_set.instance_names: 

-

106 if instance_name not in performance_data.instances: 

-

107 performance_data.add_instance(instance_name) 

-

108 performance_data.add_solver(selector_path.name) 

-

109 performance_data.save_csv() 

-

110 # Update latest scenario 

-

111 gv.latest_scenario().set_selection_test_case_directory(test_case_path) 

-

112 gv.latest_scenario().set_latest_scenario(Scenario.SELECTION) 

-

113 # Write used scenario to file 

-

114 gv.latest_scenario().write_scenario_ini() 

-

115 

-

116 run_core = Path(__file__).parent.parent.resolve() /\ 

-

117 "CLI" / "core" / "run_portfolio_selector_core.py" 

-

118 cmd_list = [f"python {run_core} " 

-

119 f"--selector {selector_path} " 

-

120 f"--feature-data-csv {feature_dataframe.csv_filepath} " 

-

121 f"--performance-data-csv {performance_data.csv_filepath} " 

-

122 f"--instance {instance_path} " 

-

123 f"--log-dir {sl.caller_log_dir}" 

-

124 for instance_path in data_set.instance_paths] 

-

125 

-

126 selector_run = rrr.add_to_queue( 

-

127 runner=run_on, 

-

128 cmd=cmd_list, 

-

129 name=CommandName.RUN_PORTFOLIO_SELECTOR, 

-

130 base_dir=sl.caller_log_dir, 

-

131 stdout=None, 

-

132 dependencies=feature_run if run_on == Runner.SLURM else None, 

-

133 sbatch_options=gv.settings().get_slurm_extra_options(as_args=True)) 

-

134 

-

135 if run_on == Runner.LOCAL: 

-

136 selector_run.wait() 

-

137 for job in selector_run.jobs: 

-

138 print(job.stdout) 

-

139 print("Running Sparkle portfolio selector done!") 

-

140 else: 

-

141 print("Sparkle portfolio selector is running ...") 

-

142 

-

143 # Write used settings to file 

-

144 gv.settings().write_used_settings() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_solvers_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_solvers_py.html deleted file mode 100644 index 1f5ba8e00..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_run_solvers_py.html +++ /dev/null @@ -1,317 +0,0 @@ - - - - - Coverage for sparkle/CLI/run_solvers.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/run_solvers.py: - 0% -

- -

- 83 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to run solvers to get their performance data.""" 

-

3from __future__ import annotations 

-

4 

-

5import sys 

-

6import argparse 

-

7from pathlib import PurePath, Path 

-

8 

-

9import runrunner as rrr 

-

10from runrunner.base import Runner, Run 

-

11 

-

12from sparkle.CLI.help import global_variables as gv 

-

13from sparkle.structures import PerformanceDataFrame 

-

14from sparkle.CLI.help import logging as sl 

-

15from sparkle.platform.settings_objects import Settings, SettingState 

-

16from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

17from sparkle.CLI.initialise import check_for_initialise 

-

18from sparkle.CLI.help import argparse_custom as ac 

-

19 

-

20 

-

21def parser_function() -> argparse.ArgumentParser: 

-

22 """Define the command line arguments.""" 

-

23 parser = argparse.ArgumentParser() 

-

24 

-

25 parser.add_argument(*ac.RecomputeRunSolversArgument.names, 

-

26 **ac.RecomputeRunSolversArgument.kwargs) 

-

27 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

28 **ac.SparkleObjectiveArgument.kwargs) 

-

29 parser.add_argument(*ac.TargetCutOffTimeRunSolversArgument.names, 

-

30 **ac.TargetCutOffTimeRunSolversArgument.kwargs) 

-

31 parser.add_argument(*ac.AlsoConstructSelectorAndReportArgument.names, 

-

32 **ac.AlsoConstructSelectorAndReportArgument.kwargs) 

-

33 parser.add_argument(*ac.RunOnArgument.names, 

-

34 **ac.RunOnArgument.kwargs) 

-

35 parser.add_argument(*ac.SettingsFileArgument.names, 

-

36 **ac.SettingsFileArgument.kwargs) 

-

37 

-

38 return parser 

-

39 

-

40 

-

41def running_solvers_performance_data( 

-

42 performance_data_csv_path: Path, 

-

43 num_job_in_parallel: int, 

-

44 rerun: bool = False, 

-

45 run_on: Runner = Runner.SLURM) -> Run: 

-

46 """Run the solvers for the performance data. 

-

47 

-

48 Parameters 

-

49 ---------- 

-

50 performance_data_csv_path: Path 

-

51 The path to the performance data file 

-

52 num_job_in_parallel: int 

-

53 The maximum number of jobs to run in parallel 

-

54 rerun: bool 

-

55 Run only solvers for which no data is available yet (False) or (re)run all 

-

56 solvers to get (new) performance data for them (True) 

-

57 run_on: Runner 

-

58 Where to execute the solvers. For available values see runrunner.base.Runner 

-

59 enum. Default: "Runner.SLURM". 

-

60 

-

61 Returns 

-

62 ------- 

-

63 run: runrunner.LocalRun or runrunner.SlurmRun 

-

64 If the run is local return a QueuedRun object with the information concerning 

-

65 the run. 

-

66 """ 

-

67 # Open the performance data csv file 

-

68 performance_dataframe = PerformanceDataFrame(performance_data_csv_path) 

-

69 # List of jobs to do 

-

70 jobs = performance_dataframe.get_job_list(rerun=rerun) 

-

71 num_jobs = len(jobs) 

-

72 

-

73 cutoff_time_str = str(gv.settings().get_general_target_cutoff_time()) 

-

74 

-

75 print(f"Cutoff time for each solver run: {cutoff_time_str} seconds") 

-

76 print(f"Total number of jobs to run: {num_jobs}") 

-

77 

-

78 # If there are no jobs, stop 

-

79 if num_jobs == 0: 

-

80 return None 

-

81 

-

82 if run_on == Runner.LOCAL: 

-

83 print("Running the solvers locally") 

-

84 elif run_on == Runner.SLURM: 

-

85 print("Running the solvers through Slurm") 

-

86 

-

87 sbatch_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

88 srun_options = ["-N1", "-n1"] + sbatch_options 

-

89 objectives = gv.settings().get_general_sparkle_objectives() 

-

90 run_solvers_core = Path(__file__).parent.resolve() / "core" / "run_solvers_core.py" 

-

91 cmd_list = [f"{run_solvers_core} " 

-

92 f"--performance-data {performance_data_csv_path} " 

-

93 f"--instance {inst_p} --solver {solver_p} " 

-

94 f"--objectives {','.join([str(o) for o in objectives])} " 

-

95 f"--log-dir {sl.caller_log_dir}" for inst_p, _, solver_p in jobs] 

-

96 

-

97 run = rrr.add_to_queue( 

-

98 runner=run_on, 

-

99 cmd=cmd_list, 

-

100 parallel_jobs=num_job_in_parallel, 

-

101 name=CommandName.RUN_SOLVERS, 

-

102 base_dir=sl.caller_log_dir, 

-

103 sbatch_options=sbatch_options, 

-

104 srun_options=srun_options) 

-

105 

-

106 if run_on == Runner.LOCAL: 

-

107 # TODO: It would be nice to extract some info per job and print it 

-

108 # As the user now only sees jobs starting and completing without their results 

-

109 run.wait() 

-

110 

-

111 return run 

-

112 

-

113 

-

114def run_solvers_on_instances( 

-

115 recompute: bool = False, 

-

116 run_on: Runner = Runner.SLURM, 

-

117 also_construct_selector_and_report: bool = False) -> None: 

-

118 """Run all the solvers on all the instances that were not not previously run. 

-

119 

-

120 If recompute is True, rerun everything even if previously run. Where the solvers are 

-

121 executed can be controlled with "run_on". 

-

122 

-

123 Parameters 

-

124 ---------- 

-

125 recompute: bool 

-

126 If True, recompute all solver-instance pairs even if they were run before. 

-

127 Default: False 

-

128 run_on: Runner 

-

129 On which computer or cluster environment to run the solvers. 

-

130 Available: Runner.LOCAL, Runner.SLURM. Default: Runner.SLURM 

-

131 also_construct_selector_and_report: bool 

-

132 If True, the selector will be constructed and a report will be produced. 

-

133 """ 

-

134 if recompute: 

-

135 PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path).clean_csv() 

-

136 num_job_in_parallel = gv.settings().get_number_of_jobs_in_parallel() 

-

137 

-

138 runs = [running_solvers_performance_data( 

-

139 performance_data_csv_path=gv.settings().DEFAULT_performance_data_path, 

-

140 num_job_in_parallel=num_job_in_parallel, 

-

141 rerun=recompute, 

-

142 run_on=run_on)] 

-

143 

-

144 # If there are no jobs return 

-

145 if all(run is None for run in runs): 

-

146 print("Running solvers done!") 

-

147 return 

-

148 

-

149 sbatch_user_options = gv.settings().get_slurm_extra_options(as_args=True) 

-

150 if also_construct_selector_and_report: 

-

151 runs.append(rrr.add_to_queue( 

-

152 runner=run_on, 

-

153 cmd="sparkle/CLI/construct_portfolio_selector.py", 

-

154 name=CommandName.CONSTRUCT_PORTFOLIO_SELECTOR, 

-

155 dependencies=runs[-1], 

-

156 base_dir=sl.caller_log_dir, 

-

157 sbatch_options=sbatch_user_options)) 

-

158 

-

159 runs.append(rrr.add_to_queue( 

-

160 runner=run_on, 

-

161 cmd="sparkle/CLI/generate_report.py", 

-

162 name=CommandName.GENERATE_REPORT, 

-

163 dependencies=runs[-1], 

-

164 base_dir=sl.caller_log_dir, 

-

165 sbatch_options=sbatch_user_options)) 

-

166 

-

167 if run_on == Runner.LOCAL: 

-

168 print("Waiting for the local calculations to finish.") 

-

169 for run in runs: 

-

170 if run is not None: 

-

171 run.wait() 

-

172 print("Running solvers done!") 

-

173 elif run_on == Runner.SLURM: 

-

174 print("Running solvers. Waiting for Slurm job(s) with id(s): " 

-

175 f'{",".join(r.run_id for r in runs if r is not None)}') 

-

176 

-

177 

-

178if __name__ == "__main__": 

-

179 # Log command call 

-

180 sl.log_command(sys.argv) 

-

181 

-

182 # Define command line arguments 

-

183 parser = parser_function() 

-

184 

-

185 # Process command line arguments 

-

186 args = parser.parse_args() 

-

187 

-

188 if args.settings_file is not None: 

-

189 # Do first, so other command line options can override settings from the file 

-

190 gv.settings().read_settings_ini(args.settings_file, SettingState.CMD_LINE) 

-

191 

-

192 if args.objectives is not None: 

-

193 gv.settings().set_general_sparkle_objectives( 

-

194 args.objectives, SettingState.CMD_LINE 

-

195 ) 

-

196 

-

197 if args.target_cutoff_time is not None: 

-

198 gv.settings().set_general_target_cutoff_time( 

-

199 args.target_cutoff_time, SettingState.CMD_LINE) 

-

200 

-

201 if args.run_on is not None: 

-

202 gv.settings().set_run_on( 

-

203 args.run_on.value, SettingState.CMD_LINE) 

-

204 run_on = gv.settings().get_run_on() 

-

205 

-

206 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.RUN_SOLVERS]) 

-

207 

-

208 # Compare current settings to latest.ini 

-

209 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

210 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

211 

-

212 print("Start running solvers ...") 

-

213 

-

214 # Write settings to file before starting, since they are used in callback scripts 

-

215 gv.settings().write_used_settings() 

-

216 

-

217 run_solvers_on_instances( 

-

218 recompute=args.recompute, 

-

219 also_construct_selector_and_report=args.also_construct_selector_and_report, 

-

220 run_on=run_on) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_save_snapshot_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_save_snapshot_py.html deleted file mode 100644 index bb25338db..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_save_snapshot_py.html +++ /dev/null @@ -1,115 +0,0 @@ - - - - - Coverage for sparkle/CLI/save_snapshot.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/save_snapshot.py: - 0% -

- -

- 9 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to save the current Sparkle platform in a .zip file.""" 

-

3import sys 

-

4 

-

5from sparkle.CLI.help import snapshot_help 

-

6from sparkle.CLI.help import logging as sl 

-

7import argparse 

-

8 

-

9 

-

10def parser_function() -> argparse.ArgumentParser: 

-

11 """Parser for save_snapshot.""" 

-

12 return argparse.ArgumentParser() 

-

13 

-

14 

-

15if __name__ == "__main__": 

-

16 # Log command call 

-

17 sl.log_command(sys.argv) 

-

18 snapshot_help.save_current_sparkle_platform() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_status_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_status_py.html deleted file mode 100644 index ce5b05770..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_status_py.html +++ /dev/null @@ -1,150 +0,0 @@ - - - - - Coverage for sparkle/CLI/status.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/status.py: - 0% -

- -

- 25 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Command to display the status of the platform.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6 

-

7from sparkle.CLI.initialise import check_for_initialise 

-

8from sparkle.CLI.help import global_variables as gv 

-

9from sparkle.CLI.help import system_status as sssh 

-

10from sparkle.CLI.help import logging as sl 

-

11from sparkle.CLI.help import argparse_custom as ac 

-

12 

-

13 

-

14def parser_function() -> argparse.ArgumentParser: 

-

15 """Define the command line arguments.""" 

-

16 parser = argparse.ArgumentParser() 

-

17 parser.add_argument(*ac.VerboseArgument.names, 

-

18 **ac.VerboseArgument.kwargs) 

-

19 return parser 

-

20 

-

21 

-

22if __name__ == "__main__": 

-

23 # Log command call 

-

24 sl.log_command(sys.argv) 

-

25 

-

26 # Define command line arguments 

-

27 parser = parser_function() 

-

28 

-

29 # Process command line arguments 

-

30 args = parser.parse_args() 

-

31 

-

32 check_for_initialise() 

-

33 

-

34 print("Reporting current system status of Sparkle ...") 

-

35 sssh.print_sparkle_list([s for s in gv.settings().DEFAULT_solver_dir.iterdir()], 

-

36 "Solver", args.verbose) 

-

37 sssh.print_sparkle_list([e for e in gv.settings().DEFAULT_extractor_dir.iterdir()], 

-

38 "Extractor", args.verbose) 

-

39 sssh.print_sparkle_list([i for i in gv.settings().DEFAULT_instance_dir.iterdir()], 

-

40 "Instance Set", args.verbose) 

-

41 

-

42 sssh.print_feature_computation_jobs( 

-

43 gv.settings().DEFAULT_feature_data_path, args.verbose 

-

44 ) 

-

45 sssh.print_performance_computation_jobs( 

-

46 gv.settings().DEFAULT_performance_data_path, args.verbose 

-

47 ) 

-

48 

-

49 # scan configurator log files for warnings 

-

50 configurator = gv.settings().get_general_sparkle_configurator() 

-

51 configurator.get_status_from_logs() 

-

52 

-

53 print("\nCurrent system status of Sparkle reported!") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_validate_configured_vs_default_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_validate_configured_vs_default_py.html deleted file mode 100644 index 8759a2861..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_validate_configured_vs_default_py.html +++ /dev/null @@ -1,238 +0,0 @@ - - - - - Coverage for sparkle/CLI/validate_configured_vs_default.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/validate_configured_vs_default.py: - 0% -

- -

- 70 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to validate a configured solver against its default configuration.""" 

-

3 

-

4import sys 

-

5import argparse 

-

6from pathlib import PurePath 

-

7 

-

8from runrunner.base import Runner 

-

9 

-

10from sparkle.CLI.help import global_variables as gv 

-

11from sparkle.CLI.help import logging as sl 

-

12from sparkle.platform.settings_objects import Settings, SettingState 

-

13from sparkle.CLI.help import argparse_custom as ac 

-

14from sparkle.CLI.help.reporting_scenario import Scenario 

-

15from sparkle.configurator.configurator import Configurator 

-

16from sparkle.solver.validator import Validator 

-

17from sparkle.solver import Solver 

-

18from sparkle.instance import instance_set 

-

19from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

-

20from sparkle.CLI.initialise import check_for_initialise 

-

21from sparkle.CLI.help.nicknames import resolve_object_name 

-

22 

-

23 

-

24def parser_function() -> argparse.ArgumentParser: 

-

25 """Define the command line arguments.""" 

-

26 parser = argparse.ArgumentParser( 

-

27 description=("Test the performance of the configured solver and the default " 

-

28 "solver by doing validation experiments on the training and test " 

-

29 "sets.")) 

-

30 parser.add_argument(*ac.SolverArgument.names, 

-

31 **ac.SolverArgument.kwargs) 

-

32 parser.add_argument(*ac.InstanceSetTrainArgument.names, 

-

33 **ac.InstanceSetTrainArgument.kwargs) 

-

34 parser.add_argument(*ac.InstanceSetTestArgument.names, 

-

35 **ac.InstanceSetTestArgument.kwargs) 

-

36 parser.add_argument(*ac.ConfiguratorArgument.names, 

-

37 **ac.ConfiguratorArgument.kwargs) 

-

38 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

-

39 **ac.SparkleObjectiveArgument.kwargs) 

-

40 parser.add_argument(*ac.TargetCutOffTimeValidationArgument.names, 

-

41 **ac.TargetCutOffTimeValidationArgument.kwargs) 

-

42 parser.add_argument(*ac.SettingsFileArgument.names, 

-

43 **ac.SettingsFileArgument.kwargs) 

-

44 parser.add_argument(*ac.RunOnArgument.names, 

-

45 **ac.RunOnArgument.kwargs) 

-

46 return parser 

-

47 

-

48 

-

49if __name__ == "__main__": 

-

50 # Log command call 

-

51 sl.log_command(sys.argv) 

-

52 

-

53 parser = parser_function() 

-

54 

-

55 # Process command line arguments 

-

56 args = parser.parse_args() 

-

57 solver = resolve_object_name(args.solver, 

-

58 gv.solver_nickname_mapping, 

-

59 gv.settings().DEFAULT_solver_dir, 

-

60 Solver) 

-

61 instance_set_train = resolve_object_name( 

-

62 args.instance_set_train, 

-

63 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

64 gv.settings().DEFAULT_instance_dir, instance_set) 

-

65 instance_set_test = resolve_object_name( 

-

66 args.instance_set_test, 

-

67 gv.file_storage_data_mapping[gv.instances_nickname_path], 

-

68 gv.settings().DEFAULT_instance_dir, instance_set) 

-

69 

-

70 if args.run_on is not None: 

-

71 gv.settings().set_run_on( 

-

72 args.run_on.value, SettingState.CMD_LINE) 

-

73 run_on = gv.settings().get_run_on() 

-

74 

-

75 check_for_initialise( 

-

76 COMMAND_DEPENDENCIES[CommandName.VALIDATE_CONFIGURED_VS_DEFAULT] 

-

77 ) 

-

78 if args.configurator is not None: 

-

79 gv.settings().set_general_sparkle_configurator( 

-

80 value=getattr(Configurator, args.configurator), 

-

81 origin=SettingState.CMD_LINE) 

-

82 if ac.set_by_user(args, "settings_file"): 

-

83 gv.settings().read_settings_ini( 

-

84 args.settings_file, SettingState.CMD_LINE 

-

85 ) # Do first, so other command line options can override settings from the file 

-

86 

-

87 if args.objectives is not None: 

-

88 gv.settings().set_general_sparkle_objectives( 

-

89 args.objectives, SettingState.CMD_LINE 

-

90 ) 

-

91 if ac.set_by_user(args, "target_cutoff_time"): 

-

92 gv.settings().set_general_target_cutoff_time( 

-

93 args.target_cutoff_time, SettingState.CMD_LINE 

-

94 ) 

-

95 # Compare current settings to latest.ini 

-

96 prev_settings = Settings(PurePath("Settings/latest.ini")) 

-

97 Settings.check_settings_changes(gv.settings(), prev_settings) 

-

98 

-

99 # Make sure configuration results exist before trying to work with them 

-

100 configurator = gv.settings().get_general_sparkle_configurator() 

-

101 configurator.set_scenario_dirs(solver, instance_set_train) 

-

102 objectives = gv.settings().get_general_sparkle_objectives() 

-

103 # Record optimised configuration 

-

104 _, opt_config_str = configurator.get_optimal_configuration( 

-

105 solver, instance_set_train, objectives[0]) 

-

106 opt_config = Solver.config_str_to_dict(opt_config_str) 

-

107 

-

108 validator = Validator(gv.settings().DEFAULT_validation_output, sl.caller_log_dir) 

-

109 all_validation_instances = [instance_set_train] 

-

110 if instance_set_test is not None: 

-

111 all_validation_instances.append(instance_set_test) 

-

112 validation = validator.validate( 

-

113 solvers=[solver] * 2, 

-

114 configurations=[None, opt_config], 

-

115 instance_sets=all_validation_instances, 

-

116 objectives=objectives, 

-

117 cut_off=gv.settings().get_general_target_cutoff_time(), 

-

118 sbatch_options=gv.settings().get_slurm_extra_options(as_args=True), 

-

119 run_on=run_on) 

-

120 

-

121 if run_on == Runner.LOCAL: 

-

122 validation.wait() 

-

123 print("Running validation done!") 

-

124 else: 

-

125 print(f"Running validation through Slurm with job ID: {validation.run_id}") 

-

126 

-

127 # Update latest scenario 

-

128 gv.latest_scenario().set_config_solver(solver) 

-

129 gv.latest_scenario().set_config_instance_set_train(instance_set_train.directory) 

-

130 gv.latest_scenario().set_latest_scenario(Scenario.CONFIGURATION) 

-

131 

-

132 if instance_set_test is not None: 

-

133 gv.latest_scenario().set_config_instance_set_test(instance_set_test.directory) 

-

134 else: 

-

135 # Set to default to overwrite possible old path 

-

136 gv.latest_scenario().set_config_instance_set_test() 

-

137 

-

138 # Write used settings to file 

-

139 gv.settings().write_used_settings() 

-

140 # Write used scenario to file 

-

141 gv.latest_scenario().write_scenario_ini() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_wait_py.html b/Documentation/source/_static/coverage/z_a36c84129c2a5cca_wait_py.html deleted file mode 100644 index 5e51bead2..000000000 --- a/Documentation/source/_static/coverage/z_a36c84129c2a5cca_wait_py.html +++ /dev/null @@ -1,254 +0,0 @@ - - - - - Coverage for sparkle/CLI/wait.py: 0% - - - - - -
-
-

- Coverage for sparkle/CLI/wait.py: - 0% -

- -

- 67 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2"""Sparkle command to wait for one or more other commands to complete execution.""" 

-

3import sys 

-

4import signal 

-

5import time 

-

6import argparse 

-

7from pathlib import Path 

-

8 

-

9from runrunner.slurm import SlurmRun 

-

10from runrunner.base import Status 

-

11from tabulate import tabulate 

-

12 

-

13from sparkle.platform.cli_types import VerbosityLevel, TEXT 

-

14from sparkle.CLI.help import logging 

-

15from sparkle.CLI.help import argparse_custom as ac 

-

16from sparkle.CLI.help import global_variables as gv 

-

17 

-

18 

-

19def parser_function() -> argparse.ArgumentParser: 

-

20 """Define the command line arguments. 

-

21 

-

22 Returns: 

-

23 The argument parser. 

-

24 """ 

-

25 parser = argparse.ArgumentParser() 

-

26 parser.add_argument(*ac.JobIDsArgument.names, **ac.JobIDsArgument.kwargs) 

-

27 return parser 

-

28 

-

29 

-

30def get_runs_from_file(path: Path, print_error: bool = False) -> list[SlurmRun]: 

-

31 """Retrieve all run objects from file storage. 

-

32 

-

33 Args: 

-

34 path: Path object where to look recursively for the files. 

-

35 

-

36 Returns: 

-

37 List of all found SlumRun objects. 

-

38 """ 

-

39 runs = [] 

-

40 for file in path.rglob("*.json"): 

-

41 # TODO: RunRunner should be adapted to have more general methods for runs 

-

42 # So this method can work for both local and slurm 

-

43 try: 

-

44 run_obj = SlurmRun.from_file(file) 

-

45 runs.append(run_obj) 

-

46 except Exception as ex: 

-

47 # Not a (correct) RunRunner JSON file 

-

48 if print_error: 

-

49 print(f"[WARNING] Could not load file: {file}. Exception: {ex}") 

-

50 return runs 

-

51 

-

52 

-

53def wait_for_jobs(path: Path, 

-

54 check_interval: int, 

-

55 verbosity: VerbosityLevel = VerbosityLevel.STANDARD, 

-

56 filter: list[str] = None) -> None: 

-

57 """Wait for all active jobs to finish executing. 

-

58 

-

59 Args: 

-

60 path: The Path where to look for the stored jobs. 

-

61 check_interval: The time in seconds between updating the jobs. 

-

62 verbosity: Amount of information shown. 

-

63 The lower verbosity means lower computational load. 

-

64 filter: If present, only show the given job ids. 

-

65 """ 

-

66 # Filter jobs on relevant status 

-

67 jobs = [run for run in get_runs_from_file(path) 

-

68 if run.status == Status.WAITING or run.status == Status.RUNNING] 

-

69 

-

70 if filter is not None: 

-

71 jobs = [job for job in jobs if job.run_id in filter] 

-

72 

-

73 running_jobs = jobs 

-

74 

-

75 def signal_handler(num: int, _: any) -> None: 

-

76 """Create clean exit for CTRL + C.""" 

-

77 if num == signal.SIGINT: 

-

78 sys.exit(0) 

-

79 

-

80 signal.signal(signal.SIGINT, signal_handler) 

-

81 # If verbosity is quiet there is no need for further information 

-

82 if verbosity == VerbosityLevel.QUIET: 

-

83 prev_jobs = len(running_jobs) + 1 

-

84 while len(running_jobs) > 0: 

-

85 if len(running_jobs) < prev_jobs: 

-

86 print(f"Waiting for {len(running_jobs)} jobs...", flush=True) 

-

87 time.sleep(check_interval) 

-

88 prev_jobs = len(running_jobs) 

-

89 running_jobs = [run for run in running_jobs 

-

90 if run.status == Status.WAITING 

-

91 or run.status == Status.RUNNING] 

-

92 

-

93 # If verbosity is standard the command will print a table with relevant information 

-

94 elif verbosity == VerbosityLevel.STANDARD: 

-

95 # Order in which to display the jobs 

-

96 status_order = {Status.COMPLETED: 0, Status.RUNNING: 1, Status.WAITING: 2} 

-

97 while len(running_jobs) > 0: 

-

98 # Information to be printed to the table 

-

99 information = [["RunId", "Name", "Partition", "Status", 

-

100 "Dependencies", "Finished Jobs", "Run Time"]] 

-

101 running_jobs = [run for run in running_jobs 

-

102 if run.status == Status.WAITING 

-

103 or run.status == Status.RUNNING] 

-

104 sorted_jobs = sorted( 

-

105 jobs, key=lambda job: (status_order.get(job.status, 4), job.run_id)) 

-

106 for job in sorted_jobs: 

-

107 # Count number of jobs that have finished 

-

108 finished_jobs_count = sum(1 for status in job.all_status 

-

109 if status == Status.COMPLETED) 

-

110 # Format job.status 

-

111 status_text = \ 

-

112 TEXT.format_text([TEXT.BOLD], job.status) \ 

-

113 if job.status == Status.RUNNING else \ 

-

114 (TEXT.format_text([TEXT.ITALIC], job.status) 

-

115 if job.status == Status.COMPLETED else job.status) 

-

116 information.append( 

-

117 [job.run_id, 

-

118 job.name, 

-

119 job.partition, 

-

120 status_text, 

-

121 "None" if len(job.dependencies) == 0 

-

122 else ", ".join(job.dependencies), 

-

123 f"{finished_jobs_count}/{len(job.all_status)}", 

-

124 job.runtime]) 

-

125 # Print the table 

-

126 table = tabulate(information, headers="firstrow", tablefmt="grid") 

-

127 print(table) 

-

128 time.sleep(check_interval) 

-

129 

-

130 # Clears the table for the new table to be printed 

-

131 lines = table.count("\n") + 1 

-

132 # \033 is the escape character (ESC), 

-

133 # [{lines}A is the escape sequence that moves the cursor up. 

-

134 print(f"\033[{lines}A", end="") 

-

135 # [J is the escape sequence that clears the console from the cursor down 

-

136 print("\033[J", end="") 

-

137 

-

138 print("All jobs done!") 

-

139 

-

140 

-

141if __name__ == "__main__": 

-

142 # Log command call 

-

143 logging.log_command(sys.argv) 

-

144 

-

145 # Define command line arguments 

-

146 parser = parser_function() 

-

147 

-

148 # Process command line arguments 

-

149 args = parser.parse_args() 

-

150 

-

151 check_interval = gv.settings().get_general_check_interval() 

-

152 verbosity = gv.settings().get_general_verbosity() 

-

153 

-

154 wait_for_jobs(path=gv.settings().DEFAULT_log_output, 

-

155 check_interval=check_interval, 

-

156 verbosity=verbosity, 

-

157 filter=args.job_ids) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_bca5cc0da071bfb7___init___py.html b/Documentation/source/_static/coverage/z_bca5cc0da071bfb7___init___py.html deleted file mode 100644 index 05e496a19..000000000 --- a/Documentation/source/_static/coverage/z_bca5cc0da071bfb7___init___py.html +++ /dev/null @@ -1,100 +0,0 @@ - - - - - Coverage for sparkle/configurator/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/configurator/__init__.py: - 100% -

- -

- 0 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides configurator support for Sparkle.""" 

-

2# Imports impossible due to circularity 

-

3# from sparkle.configurator.configurator import Configurator, ConfigurationScenario 

-
- - - diff --git a/Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_cli_py.html b/Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_cli_py.html deleted file mode 100644 index eaecd1382..000000000 --- a/Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_cli_py.html +++ /dev/null @@ -1,121 +0,0 @@ - - - - - Coverage for sparkle/configurator/configurator_cli.py: 0% - - - - - -
-
-

- Coverage for sparkle/configurator/configurator_cli.py: - 0% -

- -

- 15 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Configurator CLI wrapper class to standardise I/O of Configurators.""" 

-

2 

-

3import sys 

-

4import subprocess 

-

5from pathlib import Path 

-

6import implementations 

-

7 

-

8if __name__ == "__main__": 

-

9 args = sys.argv 

-

10 configurator_name = args[1] 

-

11 output_source = Path(args[2]) 

-

12 output_target = Path(args[3]) 

-

13 configurator_call = args[4:] 

-

14 # 1. Resolve for which Configurator we are standardising 

-

15 configurator = implementations.resolve_configurator(configurator_name) 

-

16 # 2. Execute the call, output is automatically piped to the caller's set output 

-

17 subprocess.run(configurator_call) 

-

18 # 3. Standardise the output for Sparkle 

-

19 # 3a. Make sure the output file exists 

-

20 output_target.open("a").close() 

-

21 # 3b. Have the configurator implementation organise the output 

-

22 configurator.organise_output(output_source=output_source, 

-

23 output_target=output_target) 

-

24 print(f"Organising done! See {output_target}") 

-
- - - diff --git a/Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_py.html b/Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_py.html deleted file mode 100644 index d07f96924..000000000 --- a/Documentation/source/_static/coverage/z_bca5cc0da071bfb7_configurator_py.html +++ /dev/null @@ -1,242 +0,0 @@ - - - - - Coverage for sparkle/configurator/configurator.py: 80% - - - - - -
-
-

- Coverage for sparkle/configurator/configurator.py: - 80% -

- -

- 51 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1#!/usr/bin/env python3 

-

2# -*- coding: UTF-8 -*- 

-

3"""Configurator class to use different algorithm configurators like SMAC.""" 

-

4 

-

5from __future__ import annotations 

-

6from abc import abstractmethod 

-

7from pathlib import Path 

-

8 

-

9from runrunner import Runner, Run 

-

10from sparkle.solver import Solver 

-

11from sparkle.solver.validator import Validator 

-

12from sparkle.instance import InstanceSet 

-

13from sparkle.types import SparkleObjective 

-

14 

-

15 

-

16class Configurator: 

-

17 """Abstact class to use different configurators like SMAC.""" 

-

18 configurator_cli_path = Path(__file__).parent.resolve() / "configurator_cli.py" 

-

19 

-

20 def __init__(self: Configurator, validator: Validator, output_path: Path, 

-

21 executable_path: Path, configurator_target: Path, 

-

22 objectives: list[SparkleObjective], base_dir: Path, tmp_path: Path, 

-

23 multi_objective_support: bool = False) -> None: 

-

24 """Initialize Configurator. 

-

25 

-

26 Args: 

-

27 validator: Validator object to validate configurations runs 

-

28 output_path: Output directory of the Configurator. 

-

29 executable_path: Executable of the configurator for Sparkle to call 

-

30 configurator_target: The wrapper algorithm to standardize configurator 

-

31 input/output towards solver wrappers. 

-

32 objectives: The list of Sparkle Objectives the configurator has to 

-

33 optimize. 

-

34 base_dir: Where to execute the configuration 

-

35 tmp_path: Path for the temporary files of the configurator, optional 

-

36 multi_objective_support: Whether the configurator supports 

-

37 multi objective optimization for solvers. 

-

38 """ 

-

39 self.validator = validator 

-

40 self.output_path = output_path 

-

41 self.executable_path = executable_path 

-

42 self.configurator_target = configurator_target 

-

43 self.objectives = objectives 

-

44 self.base_dir = base_dir 

-

45 self.tmp_path = tmp_path 

-

46 self.multiobjective = multi_objective_support 

-

47 self.scenario = None 

-

48 if len(self.objectives) > 1 and not self.multiobjective: 

-

49 print("Warning: Multiple objectives specified but current configurator " 

-

50 f"{self.configurator_path.name} only supports single objective. " 

-

51 f"Defaulted to first specified objective: {self.objectives[0].name}") 

-

52 

-

53 @property 

-

54 def scenario_class(self: Configurator) -> ConfigurationScenario: 

-

55 """Return the scenario class of the configurator.""" 

-

56 return ConfigurationScenario 

-

57 

-

58 @abstractmethod 

-

59 def configure(self: Configurator, 

-

60 scenario: ConfigurationScenario, 

-

61 validate_after: bool = True, 

-

62 sbatch_options: list[str] = [], 

-

63 num_parallel_jobs: int = None, 

-

64 base_dir: Path = None, 

-

65 run_on: Runner = Runner.SLURM) -> Run: 

-

66 """Start configuration job. 

-

67 

-

68 Args: 

-

69 scenario: ConfigurationScenario to execute. 

-

70 validate_after: Whether to validate the configuration on the training set 

-

71 afterwards or not. 

-

72 sbatch_options: List of slurm batch options to use 

-

73 num_parallel_jobs: The maximum number of jobs to run in parallel 

-

74 base_dir: The base_dir of RunRunner where the sbatch scripts will be placed 

-

75 run_on: On which platform to run the jobs. Default: Slurm. 

-

76 

-

77 Returns: 

-

78 A RunRunner Run object. 

-

79 """ 

-

80 raise NotImplementedError 

-

81 

-

82 @abstractmethod 

-

83 def get_optimal_configuration(self: Configurator, 

-

84 solver: Solver, 

-

85 instance_set: InstanceSet, 

-

86 objective: SparkleObjective) -> tuple[float, str]: 

-

87 """Returns the optimal configuration string for a solver of an instance set.""" 

-

88 raise NotImplementedError 

-

89 

-

90 @staticmethod 

-

91 def organise_output(output_source: Path, output_target: Path) -> None | str: 

-

92 """Method to restructure and clean up after a single configurator call.""" 

-

93 raise NotImplementedError 

-

94 

-

95 def set_scenario_dirs(self: Configurator, 

-

96 solver: Solver, instance_set: InstanceSet) -> None: 

-

97 """Patching method to allow the rebuilding of configuration scenario.""" 

-

98 raise NotImplementedError 

-

99 

-

100 def get_status_from_logs(self: Configurator) -> None: 

-

101 """Method to scan the log files of the configurator for warnings.""" 

-

102 raise NotImplementedError 

-

103 

-

104 

-

105class ConfigurationScenario: 

-

106 """Template class to handle a configuration scenarios.""" 

-

107 def __init__(self: ConfigurationScenario, solver: Solver, 

-

108 instance_set: InstanceSet, 

-

109 sparkle_objectives: list[SparkleObjective] = None)\ 

-

110 -> None: 

-

111 """Initialize scenario paths and names. 

-

112 

-

113 Args: 

-

114 solver: Solver that should be configured. 

-

115 instance_set: Instances object for the scenario. 

-

116 sparkle_objectives: SparkleObjectives used for each run of the configuration. 

-

117 """ 

-

118 self.solver = solver 

-

119 self.instance_set = instance_set 

-

120 self.sparkle_objectives = sparkle_objectives 

-

121 self.name = f"{self.solver.name}_{self.instance_set.name}" 

-

122 

-

123 def create_scenario(self: ConfigurationScenario, parent_directory: Path) -> None: 

-

124 """Create scenario with solver and instances in the parent directory. 

-

125 

-

126 This prepares all the necessary subdirectories related to configuration. 

-

127 

-

128 Args: 

-

129 parent_directory: Directory in which the scenario should be created. 

-

130 """ 

-

131 raise NotImplementedError 

-

132 

-

133 def create_scenario_file(self: ConfigurationScenario) -> None: 

-

134 """Create a file with the configuration scenario. 

-

135 

-

136 Writes supplementary information to the target algorithm (algo =) as: 

-

137 algo = {configurator_target} {solver_directory} {sparkle_objective} 

-

138 """ 

-

139 raise NotImplementedError 

-

140 

-

141 @staticmethod 

-

142 def from_file(scenario_file: Path, solver: Solver, instance_set: InstanceSet, 

-

143 ) -> ConfigurationScenario: 

-

144 """Reads scenario file and initalises ConfigurationScenario.""" 

-

145 raise NotImplementedError 

-
- - - diff --git a/Documentation/source/_static/coverage/z_bdd4bbf10be95802___init___py.html b/Documentation/source/_static/coverage/z_bdd4bbf10be95802___init___py.html deleted file mode 100644 index d7776ceb1..000000000 --- a/Documentation/source/_static/coverage/z_bdd4bbf10be95802___init___py.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - - Coverage for sparkle/instance/__init__.py: 75% - - - - - -
-
-

- Coverage for sparkle/instance/__init__.py: - 75% -

- -

- 8 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This package provides instance set support for Sparkle.""" 

-

2from sparkle.instance.instances import \ 

-

3 MultiFileInstanceSet, FileInstanceSet, IterableFileInstanceSet, InstanceSet 

-

4from pathlib import Path 

-

5 

-

6 

-

7def instance_set(target: any) -> InstanceSet: 

-

8 """The combined interface for all instance set types.""" 

-

9 if ((isinstance(target, Path) 

-

10 and (target / MultiFileInstanceSet.instance_csv).exists()) 

-

11 or isinstance(target, list)): 

-

12 return MultiFileInstanceSet(target) 

-

13 elif (isinstance(target, Path) and target.is_dir() 

-

14 and all([p.suffix in IterableFileInstanceSet.supported_filetypes 

-

15 for p in target.iterdir()])): 

-

16 return IterableFileInstanceSet(target) 

-

17 return FileInstanceSet(target) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_bdd4bbf10be95802_instances_py.html b/Documentation/source/_static/coverage/z_bdd4bbf10be95802_instances_py.html deleted file mode 100644 index d13a12b58..000000000 --- a/Documentation/source/_static/coverage/z_bdd4bbf10be95802_instances_py.html +++ /dev/null @@ -1,248 +0,0 @@ - - - - - Coverage for sparkle/instance/instances.py: 56% - - - - - -
-
-

- Coverage for sparkle/instance/instances.py: - 56% -

- -

- 79 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Objects and methods relating to instances for Sparkle.""" 

-

2from __future__ import annotations 

-

3from pathlib import Path 

-

4 

-

5import csv 

-

6import numpy as np 

-

7 

-

8 

-

9class InstanceSet: 

-

10 """Base object representation of a set of instances.""" 

-

11 

-

12 def __init__(self: InstanceSet, target: Path | list[str, Path]) -> None: 

-

13 """Initialise an Instances object from a directory. 

-

14 

-

15 Args: 

-

16 target: The Path, or list of paths to create the instance set from. 

-

17 """ 

-

18 self.directory: Path = target 

-

19 self._instance_names: list[str] = [] 

-

20 self._instance_paths: list[Path] = [] 

-

21 

-

22 @property 

-

23 def size(self: InstanceSet) -> int: 

-

24 """Returns the number of instances in the set.""" 

-

25 return len(self._instance_paths) 

-

26 

-

27 @property 

-

28 def all_paths(self: InstanceSet) -> list[Path]: 

-

29 """Returns all file paths in the instance set as a flat list.""" 

-

30 return self._instance_paths 

-

31 

-

32 @property 

-

33 def instance_paths(self: InstanceSet) -> list[Path]: 

-

34 """Get processed instance paths.""" 

-

35 return self._instance_paths 

-

36 

-

37 @property 

-

38 def instance_names(self: InstanceSet) -> list[str]: 

-

39 """Get processed instance names for multi-file instances.""" 

-

40 return self._instance_names 

-

41 

-

42 @property 

-

43 def name(self: InstanceSet) -> str: 

-

44 """Get instance set name.""" 

-

45 return self.directory.name 

-

46 

-

47 def get_path_by_name(self: InstanceSet, name: str) -> Path | list[Path]: 

-

48 """Retrieves an instance paths by its name. Returns None upon failure.""" 

-

49 for idx, instance_name in enumerate(self._instance_names): 

-

50 if instance_name == name: 

-

51 return self._instance_paths[idx] 

-

52 return None 

-

53 

-

54 

-

55class FileInstanceSet(InstanceSet): 

-

56 """Object representation of a set of single-file instances.""" 

-

57 

-

58 def __init__(self: FileInstanceSet, target: Path) -> None: 

-

59 """Initialise an InstanceSet, where each instance is a file in the directory. 

-

60 

-

61 Args: 

-

62 target: Path to the instances directory. If multiple files are found, 

-

63 they are assumed to have the same number of instances per file. 

-

64 """ 

-

65 super().__init__(target) 

-

66 self.directory: Path = target 

-

67 self._name: str = target.name if target.is_dir() else target.stem 

-

68 if self.directory.is_file(): 

-

69 # Single instance set 

-

70 self._instance_paths = [self.directory] 

-

71 self._instance_names = [self.directory.name] 

-

72 self.directory = self.directory.parent 

-

73 else: 

-

74 # Default situation, treat each file in the directory as an instance 

-

75 self._instance_paths = [p for p in self.directory.iterdir()] 

-

76 self._instance_names = [p.name for p in self._instance_paths] 

-

77 

-

78 @property 

-

79 def name(self: FileInstanceSet) -> str: 

-

80 """Get instance set name.""" 

-

81 return self._name 

-

82 

-

83 

-

84class MultiFileInstanceSet(InstanceSet): 

-

85 """Object representation of a set of multi-file instances.""" 

-

86 instance_csv = "instances.csv" 

-

87 

-

88 def __init__(self: MultiFileInstanceSet, target: Path | list[str, Path]) -> None: 

-

89 """Initialise an Instances object from a directory. 

-

90 

-

91 Args: 

-

92 target: Path to the instances directory. Will read from instance_list.csv. 

-

93 If directory is a list of [str, Path], create an Instance set of one. 

-

94 """ 

-

95 super().__init__(target) 

-

96 if isinstance(target, list): 

-

97 # A single instance represented as a list of [name, path1, path2, ...] 

-

98 instance_list = target 

-

99 target = target[1].parent 

-

100 elif isinstance(target, Path): 

-

101 # A path pointing to the directory of instances 

-

102 instance_file = target / MultiFileInstanceSet.instance_csv 

-

103 # Read from instance_file 

-

104 instance_list = [line for line in csv.reader(instance_file.open())] 

-

105 

-

106 self.directory = target if target.is_dir() else target.parent 

-

107 self._instance_names, self._instance_paths = [], [] 

-

108 for instance in instance_list: 

-

109 self._instance_names.append(instance[0]) 

-

110 self._instance_paths.append([ 

-

111 (self.directory / f) if isinstance(f, str) else f for f in instance[1:]]) 

-

112 

-

113 @property 

-

114 def all_paths(self: MultiFileInstanceSet) -> list[Path]: 

-

115 """Returns all file paths in the instance set as a flat list.""" 

-

116 return [p for instance in self._instance_paths for p in instance] + [ 

-

117 self.directory / MultiFileInstanceSet.instance_csv] 

-

118 

-

119 

-

120class IterableFileInstanceSet(InstanceSet): 

-

121 """Object representation of files containing multiple instances.""" 

-

122 supported_filetypes = set([".csv", ".npy"]) 

-

123 

-

124 def __init__(self: IterableFileInstanceSet, target: Path) -> None: 

-

125 """Initialise an InstanceSet from a single file. 

-

126 

-

127 Args: 

-

128 target: Path to the instances directory. If multiple files are found, 

-

129 they are assumed to have the same number of instances. 

-

130 """ 

-

131 super().__init__(target) 

-

132 self.directory = target 

-

133 self._instance_paths =\ 

-

134 [p for p in self.directory.iterdir() 

-

135 if p.suffix in IterableFileInstanceSet.supported_filetypes] 

-

136 self._size = len(self._instance_paths[0].open().readlines()) 

-

137 self._instance_names = [p.name for p in self._instance_paths] 

-

138 

-

139 @property 

-

140 def size(self: IterableFileInstanceSet) -> int: 

-

141 """Returns the number of instances in the set.""" 

-

142 return self._size 

-

143 

-

144 @staticmethod 

-

145 def __determine_size__(file: Path) -> int: 

-

146 """Determine the number of instances in a file.""" 

-

147 match file.suffix: 

-

148 case ".csv": 

-

149 return len(file.open().readlines()) 

-

150 case ".npy": 

-

151 return len(np.load(file)) 

-
- - - diff --git a/Documentation/source/_static/coverage/z_e7a48d036a5616ad___init___py.html b/Documentation/source/_static/coverage/z_e7a48d036a5616ad___init___py.html deleted file mode 100644 index 7bd2aa4c5..000000000 --- a/Documentation/source/_static/coverage/z_e7a48d036a5616ad___init___py.html +++ /dev/null @@ -1,103 +0,0 @@ - - - - - Coverage for sparkle/tools/__init__.py: 100% - - - - - -
-
-

- Coverage for sparkle/tools/__init__.py: - 100% -

- -

- 5 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Init for the tools module.""" 

-

2from sparkle.tools.general import get_time_pid_random_string 

-

3from sparkle.tools.pcsparser import PCSParser 

-

4from sparkle.tools.runsolver_parsing import get_solver_args 

-

5from sparkle.tools.slurm_parsing import SlurmBatch 

-

6from sparkle.tools.solver_wrapper_parsing import get_solver_call_params 

-
- - - diff --git a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_general_py.html b/Documentation/source/_static/coverage/z_e7a48d036a5616ad_general_py.html deleted file mode 100644 index 82a05c7ab..000000000 --- a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_general_py.html +++ /dev/null @@ -1,111 +0,0 @@ - - - - - Coverage for sparkle/tools/general.py: 67% - - - - - -
-
-

- Coverage for sparkle/tools/general.py: - 67% -

- -

- 6 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""General tools for Sparkle.""" 

-

2import time 

-

3import random 

-

4import os 

-

5 

-

6 

-

7def get_time_pid_random_string() -> str: 

-

8 """Return a combination of time, Process ID, and random int as string. 

-

9 

-

10 Returns: 

-

11 A random string composed of time, PID and a random positive integer value. 

-

12 """ 

-

13 time_stamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(time.time())) 

-

14 return f"{time_stamp}_{os.getpid()}_{int(random.getrandbits(32))}" 

-
- - - diff --git a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_pcsparser_py.html b/Documentation/source/_static/coverage/z_e7a48d036a5616ad_pcsparser_py.html deleted file mode 100644 index e0c0759d0..000000000 --- a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_pcsparser_py.html +++ /dev/null @@ -1,513 +0,0 @@ - - - - - Coverage for sparkle/tools/pcsparser.py: 15% - - - - - -
-
-

- Coverage for sparkle/tools/pcsparser.py: - 15% -

- -

- 244 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""The Parameter Configuration Space Parser class.""" 

-

2from __future__ import annotations 

-

3import re 

-

4import sys 

-

5import numpy as np 

-

6from enum import Enum 

-

7from abc import ABC 

-

8from pathlib import Path 

-

9 

-

10 

-

11class PCSObject(ABC): 

-

12 """General data structure to keep the pcs file in. 

-

13 

-

14 Fields are added by functions, such that checks can be conducted. 

-

15 """ 

-

16 def __init__(self: PCSObject) -> None: 

-

17 """Initialize the PCSObject.""" 

-

18 self.params = [] 

-

19 

-

20 def add_param(self: PCSObject, 

-

21 name: str, 

-

22 structure: str = "integer", 

-

23 domain: list = [-sys.maxsize, sys.maxsize], 

-

24 scale: str = "linear", 

-

25 default: str = "0", 

-

26 comment: str = None) -> None: 

-

27 """Add a parameter to the PCSObject.""" 

-

28 if structure not in ["integer", "real", "categorical", "ordinal"]: 

-

29 raise ValueError(f"Parameter structure {structure} not supported.") 

-

30 

-

31 # Domain check 

-

32 if structure in ["integer", "real"]: 

-

33 if len(domain) != 2: 

-

34 raise ValueError(f"Parameter domain {domain} not supported.") 

-

35 pass 

-

36 elif structure == "categorical": 

-

37 # TODO: check categories 

-

38 scale = None 

-

39 

-

40 self.params.append({ 

-

41 "name": name, 

-

42 "structure": structure, 

-

43 "domain": domain, 

-

44 "scale": scale, 

-

45 "default": default, 

-

46 "comment": comment, 

-

47 "type": "parameter", 

-

48 }) 

-

49 

-

50 def add_constraint(self: PCSObject, **kwargs: any) -> None: 

-

51 """Add a constraint to the PCSObject.""" 

-

52 # TODO add checks 

-

53 self.params.append({**kwargs, "type": "constraint"}) 

-

54 

-

55 def add_forbidden(self: PCSObject, **kwargs: any) -> None: 

-

56 """Add a forbidden clause to the PCSObject.""" 

-

57 # TODO add checks 

-

58 self.params.append({**kwargs, "type": "forbidden"}) 

-

59 

-

60 def add_comment(self: PCSObject, **kwargs: any) -> None: 

-

61 """Add a comment to the PCSObject.""" 

-

62 # TODO add checks 

-

63 self.params.append({**kwargs, "type": "comment"}) 

-

64 

-

65 def clear(self: PCSObject) -> None: 

-

66 """Clear the PCSObject.""" 

-

67 self.params = [] 

-

68 

-

69 def get(self: PCSObject, name: str) -> dict: 

-

70 """Get a parameter from the PCSObject based on the name.""" 

-

71 names = {p["name"]: i for i, p in enumerate(self.params) if "name" in p} 

-

72 if name in names: 

-

73 return self.params[names[name]] 

-

74 return None 

-

75 

-

76 

-

77class PCSConvention(Enum): 

-

78 """Internal pcs convention enum.""" 

-

79 unknown = "" 

-

80 SMAC = "smac" 

-

81 ParamILS = "paramils" 

-

82 

-

83 

-

84class PCSParser(ABC): 

-

85 """Base interface object for the parser. 

-

86 

-

87 It loads the pcs files into the generic pcs object. Once a parameter file is loaded, 

-

88 it can be exported to another file 

-

89 """ 

-

90 

-

91 def __init__(self: PCSParser, inherit: PCSParser = None) -> None: 

-

92 """Initialize the PCSParser.""" 

-

93 if inherit is None: 

-

94 self.pcs = PCSObject() 

-

95 else: 

-

96 self.pcs = inherit.pcs 

-

97 

-

98 @staticmethod 

-

99 def _format_string_to_enum(string: str) -> PCSConvention: 

-

100 """Convert string to PCSConvention.""" 

-

101 for form in PCSConvention: 

-

102 if form.value == string: 

-

103 return form 

-

104 raise Exception("ERROR: parameter configuration space format is not supported.") 

-

105 

-

106 def check_validity(self: PCSParser) -> bool: 

-

107 """Check the validity of the pcs.""" 

-

108 # TODO implement 

-

109 

-

110 # check if for all parameters in constraints and forbidden clauses exists 

-

111 # Check for conflict between default values and constraints and forbidden clauses 

-

112 return True 

-

113 

-

114 def load(self: PCSParser, filepath: Path, convention: str = "smac") -> None: 

-

115 """Main import function.""" 

-

116 if isinstance(filepath, str): 

-

117 filepath = Path(filepath) 

-

118 convention = self._format_string_to_enum(convention) 

-

119 

-

120 # TODO check if file actually exists 

-

121 lines = filepath.open().readlines() 

-

122 if convention == PCSConvention.SMAC: 

-

123 parser = SMACParser(self) 

-

124 parser.parse(lines) 

-

125 self.pcs = parser.pcs 

-

126 else: 

-

127 raise Exception(f"ERROR: Importing the pcs convention for {convention.value}" 

-

128 " is not yet implemented.") 

-

129 

-

130 def export(self: PCSParser, 

-

131 convention: str = "smac", 

-

132 destination: Path = None) -> None: 

-

133 """Main export function.""" 

-

134 convention = self._format_string_to_enum(convention) 

-

135 if convention == PCSConvention.ParamILS: 

-

136 pcs = ParamILSParser(self).compile() 

-

137 else: 

-

138 raise Exception(f"ERROR: Exporting the pcs convention for {convention.value}" 

-

139 " is not yet implemented.") 

-

140 destination.open("w").write(pcs) 

-

141 

-

142 

-

143class SMACParser(PCSParser): 

-

144 """The SMAC parser class.""" 

-

145 

-

146 def parse(self: SMACParser, lines: list[str]) -> None: 

-

147 """Parse the pcs file.""" 

-

148 self.pcs.clear() 

-

149 

-

150 # PARAMS 

-

151 for line in lines: 

-

152 # The only forbidden characters in parameter names are: 

-

153 # spaces, commas, quotes, and parentheses 

-

154 regex = (r"(?P<name>[^\s\"',]*)\s+(?P<structure>\w*)\s+(?P<domain>(\[|\{)" 

-

155 r".*(\]|\}))\s*\[(?P<default>.*)\]\s*(?P<scale>log)" 

-

156 r"*\s*#*(?P<comment>.*)") 

-

157 m = re.match(regex, line) 

-

158 if m is not None: 

-

159 fields = m.groupdict() 

-

160 fields["domain"] = re.sub(r"(?:\[|\]|\{|\})", "", fields["domain"]) 

-

161 fields["domain"] = re.split(r"\s*,\s*", fields["domain"]) 

-

162 self.pcs.add_param(**fields) 

-

163 continue 

-

164 

-

165 # CONSTRAINTS 

-

166 regex = (r"(?P<parameter>[^\s\"',]+)\s*\|\s" 

-

167 r"*(?P<conditions>.+)\s*#*(?P<comment>.*)") 

-

168 m = re.match(regex, line) 

-

169 if m is not None: 

-

170 constraint = m.groupdict() 

-

171 constraint["conditions"] = self._parse_conditions( 

-

172 constraint["conditions"]) 

-

173 self.pcs.add_constraint(**constraint) 

-

174 continue 

-

175 

-

176 # FORBIDDEN CLAUSES 

-

177 regex = r"\s*\{(?P<clauses>[^\}]+)\}\s*#*(?P<comment>.*)" 

-

178 m = re.match(regex, line) 

-

179 if m is not None: 

-

180 forbidden = m.groupdict() 

-

181 conditions = [] 

-

182 # Simple clauses 

-

183 # {<parameter name 1>=<value 1>, ..., <parameter name N>=<value N>} 

-

184 if "," in forbidden["clauses"]: 

-

185 forbidden["clause_type"] = "simple" 

-

186 for clause in re.split(r"\s*,\s*", forbidden["clauses"]): 

-

187 m = re.match(r"(?P<param>[^\s\"',=]+)\s*=\s*" 

-

188 r"(?P<value>[^\s\"',]+)", clause) 

-

189 if m is not None: 

-

190 conditions.append(m.groupdict()) 

-

191 else: 

-

192 print(clause, "ERROR") 

-

193 

-

194 else: # Advanced clauses 

-

195 forbidden["clause_type"] = "advanced" 

-

196 # TODO decide if we need to further parse this down 

-

197 conditions = [expr for expr in re.split(r"\s*(?:\|\||&&)\s*", 

-

198 forbidden["clauses"])] 

-

199 

-

200 if len(conditions) == 0: 

-

201 raise Exception(f"ERROR: cannot parse the following line:\n'{line}'") 

-

202 

-

203 forbidden["clauses"] = conditions 

-

204 

-

205 self.pcs.add_forbidden(**forbidden) 

-

206 continue 

-

207 

-

208 # COMMENTLINE 

-

209 regex = r"\s*#(?P<comment>.*)" 

-

210 m = re.match(regex, line) 

-

211 if m is not None: 

-

212 comment = m.groupdict() 

-

213 self.pcs.add_comment(**comment) 

-

214 continue 

-

215 

-

216 # EMTPY LINE 

-

217 regex = r"^\s*$" 

-

218 m = re.match(regex, line) 

-

219 if m is not None: 

-

220 continue 

-

221 

-

222 # RAISE ERROR 

-

223 raise Exception(f"ERROR: cannot parse the following line: \n'{line}'") 

-

224 

-

225 return 

-

226 

-

227 def _parse_conditions(self: SMACParser, conditions: str) -> list[tuple]: 

-

228 """Parse the conditions.""" 

-

229 conditionlist = [] 

-

230 condition = None 

-

231 operator = None 

-

232 nested = 0 

-

233 nested_start = 0 

-

234 condition_start = 0 

-

235 for pos, char in enumerate(conditions): 

-

236 # Nested clauses 

-

237 if char == "(": 

-

238 if nested == 0: 

-

239 nested_start = pos 

-

240 nested += 1 

-

241 elif char == ")": 

-

242 nested -= 1 

-

243 if nested == 0: 

-

244 condition = self._parse_conditions(conditions[nested_start + 1:pos]) 

-

245 conditionlist.append((operator, condition)) 

-

246 if (pos + 1) == len(conditions): 

-

247 return conditionlist 

-

248 

-

249 if pos > 1 and nested == 0: 

-

250 for op in ["||", "&&"]: 

-

251 if conditions[pos - 1: pos + 1] == op: 

-

252 if not isinstance(condition, list): 

-

253 condition = self._parse_condition( 

-

254 conditions[condition_start:pos - 1]) 

-

255 conditionlist.append((operator, condition)) 

-

256 

-

257 operator = op 

-

258 condition_start = pos + 1 

-

259 

-

260 condition = self._parse_condition(conditions[condition_start:len(conditions)]) 

-

261 conditionlist.append((operator, condition)) 

-

262 

-

263 return conditionlist 

-

264 

-

265 @staticmethod 

-

266 def _parse_condition(condition: str) -> dict: 

-

267 """Parse the condition.""" 

-

268 cont = False 

-

269 

-

270 m = re.match(r"\s*(?P<parameter>[^\s\"',]+)\s*(?P<quantifier>==|!=|<|>|<=|>=)" 

-

271 r"\s*(?P<value>[^\s\"',]+)\s*", condition) 

-

272 if m is not None: 

-

273 condition = { 

-

274 **m.groupdict(), 

-

275 "type": "numerical", 

-

276 } 

-

277 cont = True 

-

278 

-

279 if not cont: 

-

280 m = re.match(r"\s*(?P<parameter>[^\s\"',]+)\s+" 

-

281 r"in\s*\{(?P<items>[^\}]+)\}\s*", condition) 

-

282 if m is not None: 

-

283 condition = { 

-

284 **m.groupdict(), 

-

285 "type": "categorical", 

-

286 } 

-

287 condition["items"] = re.split(r",\s*", condition["items"]) 

-

288 cont = True 

-

289 

-

290 if not cont: 

-

291 raise Exception(f"ERROR: Couldn't parse '{condition}'") 

-

292 

-

293 return condition 

-

294 

-

295 def compile(self: SMACParser) -> str: 

-

296 """Compile the PCS.""" 

-

297 # TODO implement 

-

298 pass 

-

299 

-

300 

-

301class ParamILSParser(PCSParser): 

-

302 """PCS parser for ParamILS format.""" 

-

303 

-

304 def parse(self: ParamILSParser, lines: list[str]) -> None: 

-

305 """Parse the PCS.""" 

-

306 # TODO implement 

-

307 pass 

-

308 

-

309 def compile(self: ParamILSParser) -> str: 

-

310 """Compile the PCS.""" 

-

311 # TODO Produce warning if certain specifications cannot be kept in this format 

-

312 # TODO granularity parameter that sets how log and real ranges should be expanded 

-

313 granularity = 20 

-

314 

-

315 lines = [] 

-

316 for item in self.pcs.params: 

-

317 if item["type"] == "parameter": 

-

318 if item["structure"] in ["ordinal", "categorical"]: 

-

319 domain = ",".join(item["domain"]) 

-

320 elif item["structure"] == "integer": 

-

321 if len(item["domain"]) != 2: 

-

322 raise ValueError(f"Domain {item['domain']} not supported.") 

-

323 

-

324 (minval, maxval) = [int(i) for i in item["domain"]] 

-

325 if item["scale"] != "log": 

-

326 # domain = f"{minval}, {(minval + 1)}..{maxval}" 

-

327 domain = list(np.linspace(minval, maxval, granularity)) 

-

328 domain = list(set(np.round(domain).astype(int))) # Cast to int 

-

329 if int(item["default"]) not in domain: 

-

330 domain += [int(item["default"])] 

-

331 domain.sort() 

-

332 

-

333 domain = ",".join([str(i) for i in domain]) 

-

334 else: 

-

335 domain = list(np.unique(np.geomspace(minval, maxval, granularity, 

-

336 dtype=int))) 

-

337 # add default value 

-

338 if int(item["default"]) not in domain: 

-

339 domain += [int(item["default"])] 

-

340 domain.sort() 

-

341 

-

342 domain = ",".join([str(i) for i in domain]) 

-

343 

-

344 elif item["structure"] == "real": 

-

345 if len(item["domain"]) != 2: 

-

346 raise ValueError(f"Domain {item['domain']} not supported.") 

-

347 

-

348 (minval, maxval) = [float(i) for i in item["domain"]] 

-

349 if item["scale"] != "log": 

-

350 domain = list(np.linspace(minval, maxval, granularity)) 

-

351 else: 

-

352 domain = list(np.unique(np.geomspace(minval, maxval, granularity, 

-

353 dtype=float))) 

-

354 # add default value 

-

355 if float(item["default"]) not in domain: 

-

356 domain += [float(item["default"])] 

-

357 domain.sort() 

-

358 

-

359 # Filter duplicated in string format 

-

360 domain = list(set([f"{i}" for i in domain])) 

-

361 domain.sort(key=float) 

-

362 domain = ",".join(domain) 

-

363 

-

364 domain = "{" + domain + "}" 

-

365 line = f"{item['name']} {domain} [{item['default']}]" 

-

366 if item["comment"] != "": 

-

367 line += f" #{item['comment']}" 

-

368 

-

369 lines.append(line) 

-

370 

-

371 for item in self.pcs.params: 

-

372 if item["type"] == "constraint": 

-

373 line = f"{item['parameter']} | " 

-

374 line += self._compile_conditions(item["conditions"]) 

-

375 if item["comment"] != "": 

-

376 line += f" #{item['comment']}" 

-

377 lines.append(line) 

-

378 

-

379 for item in self.pcs.params: 

-

380 if item["type"] == "forbidden": 

-

381 if item["clause_type"] == "simple": 

-

382 clauses = [f"{cls['param']}={cls['value']}" 

-

383 for cls in item["clauses"]] 

-

384 line = "{" + ",".join(clauses) + "}" 

-

385 if item["comment"] != "": 

-

386 line += f"#{item['comment']}" 

-

387 lines.append(line) 

-

388 else: 

-

389 print("WARNING: Advanced forbidden clauses " 

-

390 "are not supported by ParamILS.") 

-

391 pass 

-

392 

-

393 lines = "\n".join(lines) 

-

394 return lines 

-

395 

-

396 def _compile_conditions(self: ParamILSParser, conditions: list[tuple]) -> str: 

-

397 """Compile a list of conditions.""" 

-

398 line = "" 

-

399 for operator, condition in conditions: 

-

400 if operator is not None: 

-

401 line += f" {operator} " 

-

402 

-

403 if isinstance(condition, list): 

-

404 line += f"({self._compile_conditions(condition)})" 

-

405 else: 

-

406 if condition["type"] == "numerical": 

-

407 line += f"{condition['parameter']} in " + "{" 

-

408 param = self.pcs.get(condition["parameter"]) 

-

409 if param["structure"] == "categorical": 

-

410 if condition["value"] in param["domain"]: 

-

411 line += f"{condition['value']}" + "}" 

-

412 # line += "{parameter} {quantifier} {value}".format(**condition) 

-

413 if condition["type"] == "categorical": 

-

414 items = ", ".join(condition["items"]) 

-

415 line += f"{condition['parameter']} in {{{items}}}" 

-

416 return line 

-
- - - diff --git a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_runsolver_parsing_py.html b/Documentation/source/_static/coverage/z_e7a48d036a5616ad_runsolver_parsing_py.html deleted file mode 100644 index a0bc6a08e..000000000 --- a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_runsolver_parsing_py.html +++ /dev/null @@ -1,226 +0,0 @@ - - - - - Coverage for sparkle/tools/runsolver_parsing.py: 10% - - - - - -
-
-

- Coverage for sparkle/tools/runsolver_parsing.py: - 10% -

- -

- 93 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""Tools to parse runsolver I/O.""" 

-

2import sys 

-

3from pathlib import Path 

-

4import ast 

-

5import re 

-

6 

-

7from sparkle.types import SolverStatus 

-

8 

-

9 

-

10def get_measurements(runsolver_values_path: Path, 

-

11 not_found: float = -1.0) -> tuple[float, float, float]: 

-

12 """Return the CPU and wallclock time reported by runsolver in values log.""" 

-

13 cpu_time, wall_time, memory = not_found, not_found, not_found 

-

14 if runsolver_values_path.exists(): 

-

15 with runsolver_values_path.open("r") as infile: 

-

16 lines = [line.strip().split("=") for line in infile.readlines() 

-

17 if line.count("=") == 1] 

-

18 for keyword, value in lines: 

-

19 if keyword == "WCTIME": 

-

20 wall_time = float(value) 

-

21 elif keyword == "CPUTIME": 

-

22 cpu_time = float(value) 

-

23 elif keyword == "MAXVM": 

-

24 memory = float(int(value) / 1024.0) # MB 

-

25 # Order is fixed, CPU is the last thing we want to read, so break 

-

26 break 

-

27 return cpu_time, wall_time, memory 

-

28 

-

29 

-

30def get_status(runsolver_values_path: Path, runsolver_raw_path: Path) -> SolverStatus: 

-

31 """Get run status from runsolver logs.""" 

-

32 if not runsolver_values_path.exists() and (runsolver_raw_path is not None 

-

33 and not runsolver_raw_path.exists()): 

-

34 # Runsolver logs were not created, job was stopped ''incorrectly'' 

-

35 return SolverStatus.CRASHED 

-

36 # First check if runsolver reported time out 

-

37 if runsolver_values_path.exists(): 

-

38 for line in reversed(runsolver_values_path.open("r").readlines()): 

-

39 if line.strip().startswith("TIMEOUT="): 

-

40 if line.strip() == "TIMEOUT=true": 

-

41 return SolverStatus.TIMEOUT 

-

42 break 

-

43 if runsolver_raw_path is None: 

-

44 return SolverStatus.UNKNOWN 

-

45 if not runsolver_raw_path.exists(): 

-

46 # Runsolver log was not created, job was stopped ''incorrectly'' 

-

47 return SolverStatus.KILLED 

-

48 # Last line of runsolver log should contain the raw sparkle solver wrapper output 

-

49 runsolver_raw_contents = runsolver_raw_path.open("r").read().strip() 

-

50 # cutoff_time = 

-

51 sparkle_wrapper_dict_str = runsolver_raw_contents.splitlines()[-1] 

-

52 solver_regex_filter = re.findall("{.*}", sparkle_wrapper_dict_str)[0] 

-

53 output_dict = ast.literal_eval(solver_regex_filter) 

-

54 status = SolverStatus(output_dict["status"]) 

-

55 # if status == SolverStatus.CRASHED and cpu_time > cutoff_time 

-

56 return status 

-

57 

-

58 

-

59def get_solver_args(runsolver_log_path: Path) -> str: 

-

60 """Retrieves solver arguments dict from runsolver log.""" 

-

61 if runsolver_log_path.exists(): 

-

62 for line in runsolver_log_path.open("r").readlines(): 

-

63 if line.startswith("command line:"): 

-

64 return line.split("sparkle_solver_wrapper.py", 1)[1].strip().strip("'") 

-

65 return "" 

-

66 

-

67 

-

68def get_solver_output(runsolver_configuration: list[str], 

-

69 process_output: str, 

-

70 log_dir: Path) -> dict[str, str | object]: 

-

71 """Decode solver output dictionary when called with runsolver.""" 

-

72 solver_input = None 

-

73 solver_output = None 

-

74 value_data_file = None 

-

75 cutoff_time = sys.maxsize 

-

76 for idx, conf in enumerate(runsolver_configuration): 

-

77 if not isinstance(conf, str): 

-

78 # Looking for arg names 

-

79 continue 

-

80 conf = conf.strip() 

-

81 if conf == "-o" or conf == "--solver-data": 

-

82 # solver output was redirected 

-

83 solver_data_file = Path(runsolver_configuration[idx + 1]) 

-

84 if (log_dir / solver_data_file).exists(): 

-

85 solver_output = (log_dir / solver_data_file).open("r").read() 

-

86 if "-v" in conf or "--var" in conf: 

-

87 value_data_file = Path(runsolver_configuration[idx + 1]) 

-

88 if "--cpu-limit" in conf: 

-

89 cutoff_time = float(runsolver_configuration[idx + 1]) 

-

90 if "-w" in conf or "--watcher-data" in conf: 

-

91 watch_file = Path(runsolver_configuration[idx + 1]) 

-

92 args_str = get_solver_args(log_dir / watch_file) 

-

93 if args_str == "": # Could not find log file or args 

-

94 continue 

-

95 solver_input = re.findall("{.*}", args_str)[0] 

-

96 solver_input = ast.literal_eval(solver_input) 

-

97 cutoff_time = float(solver_input["cutoff_time"]) 

-

98 

-

99 if solver_output is None: 

-

100 # Still empty, try to read from subprocess 

-

101 solver_output = process_output 

-

102 # Format output to only the brackets (dict) 

-

103 # NOTE: It should have only one match, do we want some error logging here? 

-

104 try: 

-

105 solver_regex_filter = re.findall("{.*}", solver_output)[0] 

-

106 output_dict = ast.literal_eval(solver_regex_filter) 

-

107 except Exception: 

-

108 config_str = " ".join(runsolver_configuration) 

-

109 print("WARNING: Solver output decoding failed from RunSolver configuration: " 

-

110 f"'{config_str}'. Setting status to 'UNKNOWN'.") 

-

111 output_dict = {"status": SolverStatus.UNKNOWN} 

-

112 

-

113 output_dict["cutoff_time"] = cutoff_time 

-

114 if value_data_file is not None: 

-

115 cpu_time, wall_time, memory = get_measurements(log_dir / value_data_file) 

-

116 output_dict["cpu_time"] = cpu_time 

-

117 output_dict["wall_time"] = wall_time 

-

118 output_dict["memory"] = memory 

-

119 else: # Could not retrieve cpu and wall time (log does not exist) 

-

120 output_dict["cpu_time"], output_dict["wall_time"] = -1.0, -1.0 

-

121 if output_dict["cpu_time"] > cutoff_time: 

-

122 output_dict["status"] = SolverStatus.TIMEOUT 

-

123 # Add the missing objectives (runtime based) 

-

124 if solver_input is not None and "objectives" in solver_input: 

-

125 objectives = solver_input["objectives"].split(",") 

-

126 for o_name in objectives: 

-

127 if o_name not in output_dict: 

-

128 output_dict[o_name] = None 

-

129 return output_dict 

-
- - - diff --git a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_slurm_parsing_py.html b/Documentation/source/_static/coverage/z_e7a48d036a5616ad_slurm_parsing_py.html deleted file mode 100644 index 4c7585bb0..000000000 --- a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_slurm_parsing_py.html +++ /dev/null @@ -1,151 +0,0 @@ - - - - - Coverage for sparkle/tools/slurm_parsing.py: 42% - - - - - -
-
-

- Coverage for sparkle/tools/slurm_parsing.py: - 42% -

- -

- 24 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This module helps to extract and structure information from Slurm I/O.""" 

-

2from __future__ import annotations 

-

3import re 

-

4from pathlib import Path 

-

5 

-

6 

-

7class SlurmBatch: 

-

8 """Class to parse a Slurm batch file and get structured information. 

-

9 

-

10 Attributes 

-

11 ---------- 

-

12 sbatch_options: list[str] 

-

13 The SBATCH options. Ex.: ["--array=-22%250", "--mem-per-cpu=3000"] 

-

14 cmd_params: list[str] 

-

15 The parameters to pass to the command 

-

16 cmd: str 

-

17 The command to execute 

-

18 srun_options: list[str] 

-

19 A list of arguments to pass to srun. Ex.: ["-n1", "--nodes=1"] 

-

20 file: Path 

-

21 The loaded file Path 

-

22 """ 

-

23 # Precompiled regex 

-

24 re_sbatch = re.compile("#SBATCH (.*)") 

-

25 re_params_all = re.compile(r"params=\( \\\n(?:(.*))\)", re.DOTALL) 

-

26 re_params_items = re.compile(r"'(.*)'") 

-

27 re_srun_all = re.compile(r"srun (.*)") 

-

28 re_srun_split = re.compile(r" (?!-)") 

-

29 

-

30 def __init__(self: SlurmBatch, srcfile: Path) -> None: 

-

31 """Parse the data contained in srcfile and localy store the information.""" 

-

32 self.file = Path(srcfile) 

-

33 

-

34 with Path(self.file).open() as f: 

-

35 filestr = f.read() 

-

36 

-

37 self.sbatch_options = SlurmBatch.re_sbatch.findall(filestr) 

-

38 

-

39 # First find the cmd_params block ... 

-

40 cmd_block = "" 

-

41 if len(SlurmBatch.re_params_all.findall(filestr)) > 0: 

-

42 cmd_block = SlurmBatch.re_params_all.findall(filestr)[0] 

-

43 

-

44 # ... then parse it 

-

45 self.cmd_params = SlurmBatch.re_params_items.findall(cmd_block) 

-

46 

-

47 srun = SlurmBatch.re_srun_all.findall(filestr)[0] 

-

48 srun_args, cmd = SlurmBatch.re_srun_split.split(srun, maxsplit=1) 

-

49 

-

50 self.srun_options = srun_args.split() 

-

51 

-

52 self.cmd = cmd.replace("${params[$SLURM_ARRAY_TASK_ID]}", "").strip() 

-

53 self.cmd = self.cmd.replace("${output[$SLURM_ARRAY_TASK_ID]}", "").strip() 

-

54 self.cmd = self.cmd.replace(">", "").strip() 

-
- - - diff --git a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_solver_wrapper_parsing_py.html b/Documentation/source/_static/coverage/z_e7a48d036a5616ad_solver_wrapper_parsing_py.html deleted file mode 100644 index 5d87e76d3..000000000 --- a/Documentation/source/_static/coverage/z_e7a48d036a5616ad_solver_wrapper_parsing_py.html +++ /dev/null @@ -1,164 +0,0 @@ - - - - - Coverage for sparkle/tools/solver_wrapper_parsing.py: 23% - - - - - -
-
-

- Coverage for sparkle/tools/solver_wrapper_parsing.py: - 23% -

- -

- 30 statements   - - - -

-

- « prev     - ^ index     - » next -       - coverage.py v7.6.1, - created at 2024-09-27 09:26 +0200 -

- -
-
-
-

1"""This module provides tools for the argument parsing for solver wrappers.""" 

-

2from pathlib import Path 

-

3import ast 

-

4from typing import Any 

-

5 

-

6from sparkle.types import resolve_objective 

-

7 

-

8 

-

9def parse_commandline_dict(args: list[str]) -> dict: 

-

10 """Parses a commandline dictionary to the object.""" 

-

11 dict_str = " ".join(args) 

-

12 dict_str = dict_str[dict_str.index("{"):dict_str.index("}") + 1] # Slurm script fix 

-

13 return ast.literal_eval(dict_str) 

-

14 

-

15 

-

16def parse_solver_wrapper_args(args: list[str]) -> dict[Any]: 

-

17 """Parse the arguments passed to the solver wrapper. 

-

18 

-

19 Args: 

-

20 args: a list of arguments passed via the command line. It is ensured by Sparkle 

-

21 that this list contains certain keys such as `solver_dir`. 

-

22 

-

23 Returns: 

-

24 A dictionary mapping argument names to their currently held values. 

-

25 """ 

-

26 args_dict = parse_commandline_dict(args) 

-

27 

-

28 # Some data needs specific formatting 

-

29 args_dict["solver_dir"] = Path(args_dict["solver_dir"]) 

-

30 args_dict["instance"] = Path(args_dict["instance"]) 

-

31 args_dict["seed"] = int(args_dict["seed"]) 

-

32 args_dict["objectives"] = [resolve_objective(name) 

-

33 for name in args_dict["objectives"].split(",")] 

-

34 args_dict["cutoff_time"] = float(args_dict["cutoff_time"]) 

-

35 

-

36 if "config_path" in args_dict: 

-

37 # The arguments were not directly given and must be parsed from a file 

-

38 config_str = Path(args_dict["config_path"]).open("r")\ 

-

39 .readlines()[args_dict["seed"]] 

-

40 # Extract the args without any quotes 

-

41 config_split = [arg.strip().replace("'", "").replace('"', "").strip("-") 

-

42 for arg in config_str.split(" -") if arg.strip() != ""] 

-

43 for arg in config_split: 

-

44 varname, value = arg.strip("'").strip('"').split(" ", maxsplit=1) 

-

45 args_dict[varname] = value 

-

46 del args_dict["config_path"] 

-

47 

-

48 return args_dict 

-

49 

-

50 

-

51def get_solver_call_params(args_dict: dict) -> list[str]: 

-

52 """Gather the additional parameters for the solver call. 

-

53 

-

54 Args: 

-

55 args_dict: Dictionary mapping argument names to their currently held values 

-

56 

-

57 Returns: 

-

58 A list of parameters for the solver call 

-

59 """ 

-

60 params = [] 

-

61 # Certain arguments are not relevant/have already been processed 

-

62 ignore_args = {"solver_dir", "instance", "cutoff_time", "seed", "objectives"} 

-

63 for key in args_dict: 

-

64 if key not in ignore_args and args_dict[key] is not None: 

-

65 params.extend(["-" + str(key), str(args_dict[key])]) 

-

66 

-

67 return params 

-
- - - diff --git a/Documentation/source/_static/junit/assets/style.css b/Documentation/source/_static/junit/assets/style.css deleted file mode 100644 index 561524c69..000000000 --- a/Documentation/source/_static/junit/assets/style.css +++ /dev/null @@ -1,319 +0,0 @@ -body { - font-family: Helvetica, Arial, sans-serif; - font-size: 12px; - /* do not increase min-width as some may use split screens */ - min-width: 800px; - color: #999; -} - -h1 { - font-size: 24px; - color: black; -} - -h2 { - font-size: 16px; - color: black; -} - -p { - color: black; -} - -a { - color: #999; -} - -table { - border-collapse: collapse; -} - -/****************************** - * SUMMARY INFORMATION - ******************************/ -#environment td { - padding: 5px; - border: 1px solid #e6e6e6; - vertical-align: top; -} -#environment tr:nth-child(odd) { - background-color: #f6f6f6; -} -#environment ul { - margin: 0; - padding: 0 20px; -} - -/****************************** - * TEST RESULT COLORS - ******************************/ -span.passed, -.passed .col-result { - color: green; -} - -span.skipped, -span.xfailed, -span.rerun, -.skipped .col-result, -.xfailed .col-result, -.rerun .col-result { - color: orange; -} - -span.error, -span.failed, -span.xpassed, -.error .col-result, -.failed .col-result, -.xpassed .col-result { - color: red; -} - -.col-links__extra { - margin-right: 3px; -} - -/****************************** - * RESULTS TABLE - * - * 1. Table Layout - * 2. Extra - * 3. Sorting items - * - ******************************/ -/*------------------ - * 1. Table Layout - *------------------*/ -#results-table { - border: 1px solid #e6e6e6; - color: #999; - font-size: 12px; - width: 100%; -} -#results-table th, -#results-table td { - padding: 5px; - border: 1px solid #e6e6e6; - text-align: left; -} -#results-table th { - font-weight: bold; -} - -/*------------------ - * 2. Extra - *------------------*/ -.logwrapper { - max-height: 230px; - overflow-y: scroll; - background-color: #e6e6e6; -} -.logwrapper.expanded { - max-height: none; -} -.logwrapper.expanded .logexpander:after { - content: "collapse [-]"; -} -.logwrapper .logexpander { - z-index: 1; - position: sticky; - top: 10px; - width: max-content; - border: 1px solid; - border-radius: 3px; - padding: 5px 7px; - margin: 10px 0 10px calc(100% - 80px); - cursor: pointer; - background-color: #e6e6e6; -} -.logwrapper .logexpander:after { - content: "expand [+]"; -} -.logwrapper .logexpander:hover { - color: #000; - border-color: #000; -} -.logwrapper .log { - min-height: 40px; - position: relative; - top: -50px; - height: calc(100% + 50px); - border: 1px solid #e6e6e6; - color: black; - display: block; - font-family: "Courier New", Courier, monospace; - padding: 5px; - padding-right: 80px; - white-space: pre-wrap; -} - -div.media { - border: 1px solid #e6e6e6; - float: right; - height: 240px; - margin: 0 5px; - overflow: hidden; - width: 320px; -} - -.media-container { - display: grid; - grid-template-columns: 25px auto 25px; - align-items: center; - flex: 1 1; - overflow: hidden; - height: 200px; -} - -.media-container--fullscreen { - grid-template-columns: 0px auto 0px; -} - -.media-container__nav--right, -.media-container__nav--left { - text-align: center; - cursor: pointer; -} - -.media-container__viewport { - cursor: pointer; - text-align: center; - height: inherit; -} -.media-container__viewport img, -.media-container__viewport video { - object-fit: cover; - width: 100%; - max-height: 100%; -} - -.media__name, -.media__counter { - display: flex; - flex-direction: row; - justify-content: space-around; - flex: 0 0 25px; - align-items: center; -} - -.collapsible td:not(.col-links) { - cursor: pointer; -} -.collapsible td:not(.col-links):hover::after { - color: #bbb; - font-style: italic; - cursor: pointer; -} - -.col-result { - width: 130px; -} -.col-result:hover::after { - content: " (hide details)"; -} - -.col-result.collapsed:hover::after { - content: " (show details)"; -} - -#environment-header h2:hover::after { - content: " (hide details)"; - color: #bbb; - font-style: italic; - cursor: pointer; - font-size: 12px; -} - -#environment-header.collapsed h2:hover::after { - content: " (show details)"; - color: #bbb; - font-style: italic; - cursor: pointer; - font-size: 12px; -} - -/*------------------ - * 3. Sorting items - *------------------*/ -.sortable { - cursor: pointer; -} -.sortable.desc:after { - content: " "; - position: relative; - left: 5px; - bottom: -12.5px; - border: 10px solid #4caf50; - border-bottom: 0; - border-left-color: transparent; - border-right-color: transparent; -} -.sortable.asc:after { - content: " "; - position: relative; - left: 5px; - bottom: 12.5px; - border: 10px solid #4caf50; - border-top: 0; - border-left-color: transparent; - border-right-color: transparent; -} - -.hidden, .summary__reload__button.hidden { - display: none; -} - -.summary__data { - flex: 0 0 550px; -} -.summary__reload { - flex: 1 1; - display: flex; - justify-content: center; -} -.summary__reload__button { - flex: 0 0 300px; - display: flex; - color: white; - font-weight: bold; - background-color: #4caf50; - text-align: center; - justify-content: center; - align-items: center; - border-radius: 3px; - cursor: pointer; -} -.summary__reload__button:hover { - background-color: #46a049; -} -.summary__spacer { - flex: 0 0 550px; -} - -.controls { - display: flex; - justify-content: space-between; -} - -.filters, -.collapse { - display: flex; - align-items: center; -} -.filters button, -.collapse button { - color: #999; - border: none; - background: none; - cursor: pointer; - text-decoration: underline; -} -.filters button:hover, -.collapse button:hover { - color: #ccc; -} - -.filter__label { - margin-right: 10px; -} diff --git a/Documentation/source/_static/junit/index.html b/Documentation/source/_static/junit/index.html deleted file mode 100644 index 8a3cc626b..000000000 --- a/Documentation/source/_static/junit/index.html +++ /dev/null @@ -1,770 +0,0 @@ - - - - - index.html - - - -

index.html

-

Report generated on 27-Sep-2024 at 09:34:34 by pytest-html - v4.1.1

-
-

Environment

-
-
- - - - - -
-
-

Summary

-
-
-

109 tests took 00:00:15.

-

(Un)check the boxes to filter the results.

-
- -
-
-
-
- - 0 Failed, - - 109 Passed, - - 0 Skipped, - - 0 Expected failures, - - 0 Unexpected passes, - - 0 Errors, - - 0 Reruns -
-
-  /  -
-
-
-
-
-
-
-
- - - - - - - - - -
ResultTestDurationLinks
- - - \ No newline at end of file diff --git a/Documentation/source/index.md b/Documentation/source/index.md index 218ace180..ea405cb08 100644 --- a/Documentation/source/index.md +++ b/Documentation/source/index.md @@ -4,12 +4,20 @@ % contain the root `toctree` directive. # _Sparkle_ - -[![Tests](../../.reports/junit/junit-badge.svg)](_static/coverage/index.html) -![tests status](https://github.com/ada-research/sparkle/actions/workflows/unittest.yml/badge.svg?event=push) -![Coverage Status](../../.reports/coverage/coverage-badge.svg) -![linter](https://github.com/ada-research/sparkle/actions/workflows/linter.yml/badge.svg?event=push) -![docs](https://github.com/ada-research/sparkle/actions/workflows/documentation.yml/badge.svg?event=push) +```{eval-rst} +.. image:: ../../.reports/junit/junit-badge.svg + :target: _static/junit/index.html + :alt: Unit tests +.. image:: ../../.reports/coverage/coverage-badge.svg + :target: _static/coverage/index.html + :alt: Test Coverage +.. image:: https://github.com/ada-research/sparkle/actions/workflows/unittest.yml/badge.svg?event=push + :alt: Tests passing +.. image:: https://github.com/ada-research/sparkle/actions/workflows/linter.yml/badge.svg?event=push + :alt: Linter +.. image:: https://github.com/ada-research/sparkle/actions/workflows/documentation.yml/badge.svg?event=push + :alt: Documentation +``` > A Programming by Optimisation (PbO)-based problem-solving platform designed to enable the widespread and effective use of PbO techniques for improving the state-of-the-art in solving a broad range of prominent AI problems, including SAT and AI Planning.