diff --git a/testing/results_comparison.py b/testing/results_comparison.py index 2a8e5501..c749e8ba 100644 --- a/testing/results_comparison.py +++ b/testing/results_comparison.py @@ -2,7 +2,22 @@ import pandas as pd # type: ignore from tabulate import tabulate # type: ignore from pandas._testing import assert_frame_equal # type: ignore - +from testing.utils import assertEqual + +def compare_results_pytest(result_diff): + val_error_status_ind = False not in result_diff.val_error_status.tolist() + type_error_status_ind = False not in result_diff.type_error_status.tolist() + df_out = None + erroneous_indexes = None + select_result_diff = None + + if (val_error_status_ind is False) or (type_error_status_ind is False): + erroneous_indexes = list(result_diff.index[result_diff["val_error_status"] == False]) + select_result_diff = result_diff.iloc[erroneous_indexes] + df_out = tabulate(select_result_diff, headers='keys', tablefmt='psql') + + assertEqual(val_error_status_ind, True, "Value Error") + assertEqual(type_error_status_ind, True, "Type Error") def compare_results(result_diff): class CompareResults(unittest.TestCase): diff --git a/testing/tests/cadCAD_exp.py b/testing/tests/cadCAD_exp.py deleted file mode 100644 index bc167283..00000000 --- a/testing/tests/cadCAD_exp.py +++ /dev/null @@ -1,24 +0,0 @@ -import unittest, pandas as pd -from tabulate import tabulate -from testing.models import param_sweep -from testing.results_comparison import dataframe_difference, compare_results -from cadCAD import configs -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor - -exec_mode = ExecutionMode() -exec_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=exec_ctx, configs=configs) -raw_result, _, _ = run.execute() - -result_df = pd.DataFrame(raw_result) -expected_df = pd.read_pickle("expected_results/param_sweep_4.pkl") -result_diff = dataframe_difference(result_df, expected_df) -print(tabulate(result_diff, headers='keys', tablefmt='psql')) - - -class ParamSweepTest(compare_results(result_diff)): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/import_cadCAD_test.py b/testing/tests/import_cadCAD_test.py deleted file mode 100644 index 7ad72bf5..00000000 --- a/testing/tests/import_cadCAD_test.py +++ /dev/null @@ -1,13 +0,0 @@ -import unittest, os, subprocess, json - -class JupyterServerTest(unittest.TestCase): - def test_row_count(self): - command = f'jupyter nbconvert --to=notebook --ExecutePreprocessor.enabled=True {os.getcwd()}/testing/tests/import_cadCAD.ipynb' - process = subprocess.Popen(command.split(), stdout=subprocess.PIPE) - process.communicate() - json_path = f'{os.getcwd()}/testing/tests/cadCAD_memory_address.json' - memory_address = json.load(open(json_path))['memory_address'] - self.assertEqual(type(memory_address) == str, True, "cadCAD is not importable by jupyter server") - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/testing/tests/multi_model_row_count.py b/testing/tests/multi_model_row_count.py deleted file mode 100644 index e7a2fad9..00000000 --- a/testing/tests/multi_model_row_count.py +++ /dev/null @@ -1,100 +0,0 @@ -import json -import os -import unittest, pandas as pd -from cadCAD.configuration import Experiment -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from testing.models import param_sweep, policy_aggregation - -exp = Experiment() -sys_model_A_id = "sys_model_A" -exp.append_model( - model_id=sys_model_A_id, - sim_configs=param_sweep.sim_config, - initial_state=param_sweep.genesis_states, - env_processes=param_sweep.env_process, - partial_state_update_blocks=param_sweep.partial_state_update_blocks -) -sys_model_B_id = "sys_model_B" -exp.append_model( - model_id=sys_model_B_id, - sim_configs=param_sweep.sim_config, - initial_state=param_sweep.genesis_states, - env_processes=param_sweep.env_process, - partial_state_update_blocks=param_sweep.partial_state_update_blocks -) -sys_model_C_id = "sys_model_C" -exp.append_model( - model_id=sys_model_C_id, - sim_configs=policy_aggregation.sim_config, - initial_state=policy_aggregation.genesis_states, - partial_state_update_blocks=policy_aggregation.partial_state_update_block, - policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b -) - -simulation = 3 -model_A_sweeps = len(param_sweep.sim_config) -model_B_sweeps = len(param_sweep.sim_config) -model_C_sweeps = 1 -# total_sweeps = model_A_sweeps + model_B_sweeps - -model_A_runs = param_sweep.sim_config[0]['N'] -model_B_runs = param_sweep.sim_config[0]['N'] -model_C_runs = policy_aggregation.sim_config['N'] -# total_runs = model_A_runs + model_B_runs - -model_A_timesteps = len(param_sweep.sim_config[0]['T']) -model_B_timesteps = len(param_sweep.sim_config[0]['T']) -model_C_timesteps = len(policy_aggregation.sim_config['T']) - -model_A_substeps = len(param_sweep.partial_state_update_blocks) -model_B_substeps = len(param_sweep.partial_state_update_blocks) -model_C_substeps = len(policy_aggregation.partial_state_update_block) -# total_substeps = model_A_substeps + model_B_substeps - -model_A_init_rows = model_A_runs * model_A_sweeps -model_B_init_rows = model_B_runs * model_B_sweeps -model_C_init_rows = model_C_runs * 1 -model_A_rows = model_A_init_rows + (model_A_sweeps * (model_A_runs * model_A_timesteps * model_A_substeps)) -model_B_rows = model_B_init_rows + (model_B_sweeps * (model_B_runs * model_B_timesteps * model_B_substeps)) -model_C_rows = model_C_init_rows + (model_C_sweeps * (model_C_runs * model_C_timesteps * model_C_substeps)) - - -exec_mode = ExecutionMode() -local_mode_ctx = ExecutionContext(context=exec_mode.local_mode) -simulation = Executor(exec_context=local_mode_ctx, configs=exp.configs) -raw_results, _, _ = simulation.execute() - -results_df = pd.DataFrame(raw_results) -param_sweep_df = pd.read_pickle("expected_results/param_sweep_4.pkl") -policy_agg_df = pd.read_pickle("expected_results/policy_agg_4.pkl") -param_sweep_df_rows = len(param_sweep_df.index) -policy_agg_df_rows = len(policy_agg_df.index) - -expected_rows = param_sweep_df_rows + param_sweep_df_rows + policy_agg_df_rows -expected_rows_from_api = model_A_rows + model_B_rows + model_C_rows -result_rows = len(results_df.index) - - -class RowCountTest(unittest.TestCase): - def test_row_count(self): - equal_row_count = expected_rows == expected_rows_from_api == result_rows - self.assertEqual(equal_row_count, True, "Row Count Mismatch between Expected and Multi-Model simulation results") - def test_row_count_from_api(self): - self.assertEqual(expected_rows == expected_rows_from_api, True, "API not producing Expected simulation results") - def test_row_count_from_results(self): - self.assertEqual(expected_rows == result_rows, True, "Engine not producing Expected simulation results") - def test_row_count_from_sys_model_A(self): - self.assertEqual(model_A_rows == param_sweep_df_rows, True, f"{sys_model_A_id}: Row Count Mismatch with Expected results") - def test_row_count_from_sys_model_B(self): - self.assertEqual(model_B_rows == param_sweep_df_rows, True, f"{sys_model_B_id}: Row Count Mismatch with Expected results") - def test_row_count_from_sys_model_C(self): - self.assertEqual(model_C_rows == policy_agg_df_rows, True, f"{sys_model_C_id}: Row Count Mismatch with Expected results") - def test_a_b_row_count(self): - file_path = f'{os.getcwd()}/testing/tests/a_b_tests/0_4_23_record_count.json' - record_count_0_4_23 = json.load(open(file_path))['record_count'] - record_count_current = result_rows - self.assertEqual(record_count_current > record_count_0_4_23, True, "Invalid Row Count for current version") - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/param_sweep.py b/testing/tests/param_sweep.py deleted file mode 100644 index 84af144d..00000000 --- a/testing/tests/param_sweep.py +++ /dev/null @@ -1,23 +0,0 @@ -import unittest, pandas as pd -from tabulate import tabulate -from testing.models import param_sweep -from testing.results_comparison import dataframe_difference, compare_results -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor - -exec_mode = ExecutionMode() -exec_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=exec_ctx, configs=param_sweep.exp.configs) -raw_result, _, _ = run.execute() - -result_df = pd.DataFrame(raw_result) -expected_df = pd.read_pickle("expected_results/param_sweep_4.pkl") -result_diff = dataframe_difference(result_df, expected_df) -print(tabulate(result_diff, headers='keys', tablefmt='psql')) - - -class ParamSweepTest(compare_results(result_diff)): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/policy_aggregation.py b/testing/tests/policy_aggregation.py deleted file mode 100644 index 30b2b439..00000000 --- a/testing/tests/policy_aggregation.py +++ /dev/null @@ -1,23 +0,0 @@ -import unittest, pandas as pd -from tabulate import tabulate -from testing.models import policy_aggregation as policy_agg -from testing.results_comparison import dataframe_difference, compare_results -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor - -exec_mode = ExecutionMode() -exec_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=exec_ctx, configs=policy_agg.exp.configs) -raw_result, _, _ = run.execute() - -result_df = pd.DataFrame(raw_result) -expected_df = pd.read_pickle("expected_results/policy_agg_4.pkl") -result_diff = dataframe_difference(result_df, expected_df) -print(tabulate(result_diff, headers='keys', tablefmt='psql')) - - -class PolicyAggTest(compare_results(result_diff)): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/run1psub0.py b/testing/tests/run1psub0.py deleted file mode 100644 index bb62b322..00000000 --- a/testing/tests/run1psub0.py +++ /dev/null @@ -1,17 +0,0 @@ -import unittest, pandas as pd -from testing.results_comparison import dataframe_difference, compare_results - -expected_df = pd.read_pickle("expected_results/param_sweep_psub0_4.pkl") -param_sweep_df = pd.read_pickle("expected_results/param_sweep_4.pkl") -result_df = param_sweep_df[ - (param_sweep_df.index > 0) & (param_sweep_df['subset'] < 1) & (param_sweep_df['run'] == 1) -] -result_diff = dataframe_difference(result_df, expected_df) - - -class run1psub0Test(compare_results(result_diff)): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/runs_not_zero.py b/testing/tests/runs_not_zero.py deleted file mode 100644 index 972bc340..00000000 --- a/testing/tests/runs_not_zero.py +++ /dev/null @@ -1,22 +0,0 @@ -import unittest -from cadCAD.configuration.utils import config_sim - -val_error_indicator = False -try: - sim_config = config_sim( - { - "N": 0, - "T": range(5) - } - ) -except ValueError: - val_error_indicator = True - - -class RunExceptionTest(unittest.TestCase): - def test_multi_model(self): - self.assertEqual(val_error_indicator, True, "ValueError raised when runs (N) < 1") - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/test_cadCAD_exp.py b/testing/tests/test_cadCAD_exp.py new file mode 100644 index 00000000..c336c1a6 --- /dev/null +++ b/testing/tests/test_cadCAD_exp.py @@ -0,0 +1,22 @@ +import pandas as pd +from tabulate import tabulate +from testing.results_comparison import dataframe_difference, compare_results_pytest +from cadCAD import configs +from cadCAD.engine import ExecutionMode, ExecutionContext, Executor +import pytest + + +@pytest.fixture +def empty_experiment(): + exec_mode = ExecutionMode() + exec_ctx = ExecutionContext(context=exec_mode.local_mode) + run = Executor(exec_context=exec_ctx, configs=configs) + raw_result, _, _ = run.execute() + + result_df = pd.DataFrame(raw_result) + expected_df = pd.read_pickle("expected_results/param_sweep_4.pkl") + return dataframe_difference(result_df, expected_df) + + +def test_experiment(empty_experiment): + compare_results_pytest(empty_experiment) diff --git a/testing/tests/test_import_cadCAD_test.py b/testing/tests/test_import_cadCAD_test.py new file mode 100644 index 00000000..40a65df5 --- /dev/null +++ b/testing/tests/test_import_cadCAD_test.py @@ -0,0 +1,10 @@ +import os, subprocess, json +from testing.utils import assertEqual + +def test_jupyter_nbconvert_row_count(): + command = f'jupyter nbconvert --to=notebook --ExecutePreprocessor.enabled=True {os.getcwd()}/testing/tests/import_cadCAD.ipynb' + process = subprocess.Popen(command.split(), stdout=subprocess.PIPE) + process.communicate() + json_path = f'{os.getcwd()}/testing/tests/cadCAD_memory_address.json' + memory_address = json.load(open(json_path))['memory_address'] + assertEqual(type(memory_address) == str, True, "cadCAD is not importable by jupyter server") diff --git a/testing/tests/test_multi_model_row_count.py b/testing/tests/test_multi_model_row_count.py new file mode 100644 index 00000000..527df596 --- /dev/null +++ b/testing/tests/test_multi_model_row_count.py @@ -0,0 +1,138 @@ +import json +import os +import pandas as pd # type: ignore +from cadCAD.configuration import Experiment +from cadCAD.engine import ExecutionMode, ExecutionContext, Executor +from testing.models import param_sweep, policy_aggregation +import pytest +from dataclasses import dataclass +from testing.utils import assertEqual + +@dataclass +class MultiModelRowCountResults(): + expected_rows: object + expected_rows_from_api: object + result_rows: object + param_sweep_df_rows: object + policy_agg_df_rows: object + sys_model_A_id: object + sys_model_B_id: object + sys_model_C_id: object + model_A_rows: object + model_B_rows: object + model_C_rows: object + +@pytest.fixture +def multi_model_row_count(): + exp = Experiment() + sys_model_A_id = "sys_model_A" + exp.append_model( + model_id=sys_model_A_id, + sim_configs=param_sweep.sim_config, + initial_state=param_sweep.genesis_states, + env_processes=param_sweep.env_process, + partial_state_update_blocks=param_sweep.partial_state_update_blocks + ) + sys_model_B_id = "sys_model_B" + exp.append_model( + model_id=sys_model_B_id, + sim_configs=param_sweep.sim_config, + initial_state=param_sweep.genesis_states, + env_processes=param_sweep.env_process, + partial_state_update_blocks=param_sweep.partial_state_update_blocks + ) + sys_model_C_id = "sys_model_C" + exp.append_model( + model_id=sys_model_C_id, + sim_configs=policy_aggregation.sim_config, + initial_state=policy_aggregation.genesis_states, + partial_state_update_blocks=policy_aggregation.partial_state_update_block, + policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b + ) + + simulation = 3 + model_A_sweeps = len(param_sweep.sim_config) + model_B_sweeps = len(param_sweep.sim_config) + model_C_sweeps = 1 + # total_sweeps = model_A_sweeps + model_B_sweeps + + model_A_runs = param_sweep.sim_config[0]['N'] + model_B_runs = param_sweep.sim_config[0]['N'] + model_C_runs = policy_aggregation.sim_config['N'] + # total_runs = model_A_runs + model_B_runs + + model_A_timesteps = len(param_sweep.sim_config[0]['T']) + model_B_timesteps = len(param_sweep.sim_config[0]['T']) + model_C_timesteps = len(policy_aggregation.sim_config['T']) + + model_A_substeps = len(param_sweep.partial_state_update_blocks) + model_B_substeps = len(param_sweep.partial_state_update_blocks) + model_C_substeps = len(policy_aggregation.partial_state_update_block) + # total_substeps = model_A_substeps + model_B_substeps + + model_A_init_rows = model_A_runs * model_A_sweeps + model_B_init_rows = model_B_runs * model_B_sweeps + model_C_init_rows = model_C_runs * 1 + model_A_rows = model_A_init_rows + (model_A_sweeps * (model_A_runs * model_A_timesteps * model_A_substeps)) + model_B_rows = model_B_init_rows + (model_B_sweeps * (model_B_runs * model_B_timesteps * model_B_substeps)) + model_C_rows = model_C_init_rows + (model_C_sweeps * (model_C_runs * model_C_timesteps * model_C_substeps)) + + + exec_mode = ExecutionMode() + local_mode_ctx = ExecutionContext(context=exec_mode.local_mode) + simulation = Executor(exec_context=local_mode_ctx, configs=exp.configs) + raw_results, _, _ = simulation.execute() + + results_df = pd.DataFrame(raw_results) + param_sweep_df = pd.read_pickle("expected_results/param_sweep_4.pkl") + policy_agg_df = pd.read_pickle("expected_results/policy_agg_4.pkl") + param_sweep_df_rows = len(param_sweep_df.index) + policy_agg_df_rows = len(policy_agg_df.index) + + expected_rows = param_sweep_df_rows + param_sweep_df_rows + policy_agg_df_rows + expected_rows_from_api = model_A_rows + model_B_rows + model_C_rows + result_rows = len(results_df.index) + return MultiModelRowCountResults(expected_rows, + expected_rows_from_api, + result_rows, + param_sweep_df_rows, + policy_agg_df_rows, + sys_model_A_id, + sys_model_B_id, + sys_model_C_id, + model_A_rows, + model_B_rows, + model_C_rows) + + +def test_row_count(multi_model_row_count: MultiModelRowCountResults): + equal_row_count = multi_model_row_count.expected_rows == multi_model_row_count.expected_rows_from_api == multi_model_row_count.result_rows + assertEqual(equal_row_count, True, "Row Count Mismatch between Expected and Multi-Model simulation results") + + +def test_row_count_from_api(multi_model_row_count: MultiModelRowCountResults): + assertEqual(multi_model_row_count.expected_rows == multi_model_row_count.expected_rows_from_api, True, "API not producing Expected simulation results") + + +def test_row_count_from_results(multi_model_row_count: MultiModelRowCountResults): + assertEqual(multi_model_row_count.expected_rows == multi_model_row_count.result_rows, True, "Engine not producing Expected simulation results") + + +def test_row_count_from_sys_model_A(multi_model_row_count: MultiModelRowCountResults): + assertEqual(multi_model_row_count.model_A_rows == multi_model_row_count.param_sweep_df_rows, True, f"{multi_model_row_count.sys_model_A_id}: Row Count Mismatch with Expected results") + + +def test_row_count_from_sys_model_B(multi_model_row_count: MultiModelRowCountResults): + assertEqual(multi_model_row_count.model_B_rows == multi_model_row_count.param_sweep_df_rows, True, f"{multi_model_row_count.sys_model_B_id}: Row Count Mismatch with Expected results") + + +def test_row_count_from_sys_model_C(multi_model_row_count: MultiModelRowCountResults): + assertEqual(multi_model_row_count.model_C_rows == multi_model_row_count.policy_agg_df_rows, True, f"{multi_model_row_count.sys_model_C_id}: Row Count Mismatch with Expected results") + + +def test_a_b_row_count(multi_model_row_count: MultiModelRowCountResults): + file_path = f'{os.getcwd()}/testing/tests/a_b_tests/0_4_23_record_count.json' + record_count_0_4_23 = json.load(open(file_path))['record_count'] + record_count_current = multi_model_row_count.result_rows + assertEqual(record_count_current > record_count_0_4_23, True, "Invalid Row Count for current version") + diff --git a/testing/tests/test_param_sweep.py b/testing/tests/test_param_sweep.py new file mode 100644 index 00000000..ff2801b9 --- /dev/null +++ b/testing/tests/test_param_sweep.py @@ -0,0 +1,21 @@ +from testing.models import param_sweep +from cadCAD.engine import ExecutionMode, ExecutionContext, Executor +import pandas as pd # type: ignore +from testing.results_comparison import dataframe_difference, compare_results_pytest +import pytest + + +@pytest.fixture +def ParamSweep(): + exec_mode = ExecutionMode() + exec_ctx = ExecutionContext(context=exec_mode.local_mode) + run = Executor(exec_context=exec_ctx, configs=param_sweep.exp.configs) + raw_result, _, _ = run.execute() + + result_df = pd.DataFrame(raw_result) + expected_df = pd.read_pickle("expected_results/param_sweep_4.pkl") + result_diff = dataframe_difference(result_df, expected_df) + return result_diff + +def test_pytest_compare_results(ParamSweep): + compare_results_pytest(ParamSweep) diff --git a/testing/tests/test_policy_aggregation.py b/testing/tests/test_policy_aggregation.py new file mode 100644 index 00000000..ee2d2156 --- /dev/null +++ b/testing/tests/test_policy_aggregation.py @@ -0,0 +1,22 @@ +import pandas as pd # type: ignore +from testing.models import policy_aggregation as policy_agg +from testing.results_comparison import dataframe_difference +from cadCAD.engine import ExecutionMode, ExecutionContext, Executor +from testing.results_comparison import dataframe_difference, compare_results_pytest +import pytest + +@pytest.fixture +def PolicyAggregation(): + exec_mode = ExecutionMode() + exec_ctx = ExecutionContext(context=exec_mode.local_mode) + run = Executor(exec_context=exec_ctx, configs=policy_agg.exp.configs) + raw_result, _, _ = run.execute() + + result_df = pd.DataFrame(raw_result) + expected_df = pd.read_pickle("expected_results/policy_agg_4.pkl") + result_diff = dataframe_difference(result_df, expected_df) + return result_diff + +def test_pytest_compare_results(PolicyAggregation): + compare_results_pytest(PolicyAggregation) + diff --git a/testing/tests/test_run1psub0.py b/testing/tests/test_run1psub0.py new file mode 100644 index 00000000..d73dd5a6 --- /dev/null +++ b/testing/tests/test_run1psub0.py @@ -0,0 +1,18 @@ +import pandas as pd # type: ignore +from testing.results_comparison import dataframe_difference, compare_results_pytest +import pytest + + +@pytest.fixture +def Run1Psub0(): + expected_df = pd.read_pickle("expected_results/param_sweep_psub0_4.pkl") + param_sweep_df = pd.read_pickle("expected_results/param_sweep_4.pkl") + result_df = param_sweep_df[ + (param_sweep_df.index > 0) & (param_sweep_df['subset'] < 1) & (param_sweep_df['run'] == 1) + ] + result_diff = dataframe_difference(result_df, expected_df) + return result_diff + +def test_pytest_compare_results(Run1Psub0): + compare_results_pytest(Run1Psub0) + diff --git a/testing/tests/test_runs_not_zero.py b/testing/tests/test_runs_not_zero.py new file mode 100644 index 00000000..31caf689 --- /dev/null +++ b/testing/tests/test_runs_not_zero.py @@ -0,0 +1,15 @@ +from cadCAD.configuration.utils import config_sim +from testing.utils import assertEqual + +def test_runs_not_zero(): + val_error_indicator = False + try: + sim_config = config_sim( + { + "N": 0, + "T": range(5) + } + ) + except ValueError: + val_error_indicator = True + assertEqual(val_error_indicator, True, "ValueError raised when runs (N) < 1") diff --git a/testing/tests/test_timestep1psub0.py b/testing/tests/test_timestep1psub0.py new file mode 100644 index 00000000..4dab94da --- /dev/null +++ b/testing/tests/test_timestep1psub0.py @@ -0,0 +1,19 @@ +import pandas as pd +from testing.results_comparison import dataframe_difference, compare_results_pytest +import pytest + + +@pytest.fixture +def Timestep1Psub0(): + expected_df = pd.read_pickle("expected_results/param_sweep_timestep1_4.pkl") + param_sweep_df = pd.read_pickle("expected_results/param_sweep_4.pkl") + result_df = param_sweep_df[ + (param_sweep_df.index > 0) & + (param_sweep_df['subset'] < 1) & + (param_sweep_df['timestep'] < 2) & + (param_sweep_df['run'] == 1) + ] + return dataframe_difference(result_df, expected_df) + +def test_timestep1psub0(Timestep1Psub0): + compare_results_pytest(Timestep1Psub0) diff --git a/testing/tests/timestep1psub0.py b/testing/tests/timestep1psub0.py deleted file mode 100644 index 64f20eb9..00000000 --- a/testing/tests/timestep1psub0.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest, pandas as pd -from testing.results_comparison import dataframe_difference, compare_results - -expected_df = pd.read_pickle("expected_results/param_sweep_timestep1_4.pkl") -param_sweep_df = pd.read_pickle("expected_results/param_sweep_4.pkl") -result_df = param_sweep_df[ - (param_sweep_df.index > 0) & - (param_sweep_df['subset'] < 1) & - (param_sweep_df['timestep'] < 2) & - (param_sweep_df['run'] == 1) -] -result_diff = dataframe_difference(result_df, expected_df) - - -class timestep1psub0Test(compare_results(result_diff)): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/testing/utils.py b/testing/utils.py index 0fff73de..fe3b44e4 100644 --- a/testing/utils.py +++ b/testing/utils.py @@ -19,3 +19,11 @@ def gen_metric_row(row, cols): def gen_metric_dict(df, cols): return dict([gen_metric_row(row, cols) for index, row in df.iterrows()]) + + + +def assertEqual(_1, _2, _3=None): + if _3 == None: + assert _1 == _2 + else: + assert _1 == _2, _3 \ No newline at end of file