From b87acddd575fb557fc107418565da715b8e5683f Mon Sep 17 00:00:00 2001 From: Emanuel Lima Date: Thu, 14 Dec 2023 17:30:00 -0300 Subject: [PATCH 1/3] Substitute CircleCI for GH Actions (#314) * Substitute CircleCI for GH Actions Signed-off-by: Emanuel Lima --- .circleci/config.yml | 67 --------------------------------- .github/workflows/cadcad-ci.yml | 61 ++++++++++++++++++++++++++++++ requirements.txt | 30 ++++++--------- 3 files changed, 72 insertions(+), 86 deletions(-) delete mode 100644 .circleci/config.yml create mode 100644 .github/workflows/cadcad-ci.yml diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 8d16cb4f..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,67 +0,0 @@ -version: 2.1 - -orbs: - python: circleci/python@0.3.2 -jobs: - test: - executor: python/default - steps: - - checkout - - python/load-cache - - python/install-deps - - python/save-cache - - run: pip install cadCAD==0.4.23 - - run: - command: python testing/tests/a_b_tests/multi_model_row_count_0_4_23.py - name: Multi Model Row Count (ver. 0.4.23) - - run: pip install -r requirements.txt --force-reinstall - - run: python setup.py bdist_wheel - - run: pip install dist/*.whl --force-reinstall - - run: - command: python testing/tests/multi_model_row_count.py - name: Multi Model Row Count - - run: - command: python testing/tests/param_sweep.py - name: Parameter sweep - - run: - command: python testing/tests/policy_aggregation.py - name: Policy Aggregation - - run: - command: python testing/tests/timestep1psub0.py - name: Timestep equals 1 instead of 0 for 1st PSUB - - run: - command: python testing/tests/runs_not_zero.py - name: Value Error thrown when Runs < 1 - - run: - command: python testing/tests/run1psub0.py - name: Run Starts at 1 for PSUB 0 - - run: - command: python testing/tests/append_mod_test.py - name: Auto Append Model ID - - run: - command: python testing/tests/cadCAD_exp.py - name: Package Root Experiment and configs object -# - run: -# command: python -m unittest discover -s testing/tests -p "*_test.py" -# name: Test Suite - jupyterServerTest: - docker: - - image: cimg/python:3.9.5 - steps: - - checkout - - python/load-cache - - run: python --version - - run: pip install --upgrade pip - - run: pip install jupyter - - python/save-cache - - run: python setup.py bdist_wheel - - run: pip install dist/*.whl --force-reinstall - - run: - command: python testing/tests/import_cadCAD_test.py - name: cadCAD importable by Jupyter Server - -workflows: - main: - jobs: - - test - - jupyterServerTest diff --git a/.github/workflows/cadcad-ci.yml b/.github/workflows/cadcad-ci.yml new file mode 100644 index 00000000..9ff30705 --- /dev/null +++ b/.github/workflows/cadcad-ci.yml @@ -0,0 +1,61 @@ +# This workflow will install Python dependencies and run tests with multiple versions of Python + +name: cadCAD CI + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +permissions: + contents: read + +jobs: + build: + continue-on-error: true + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + os: [ubuntu-latest, macos-latest] + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + + - name: Display Python version + run: python -c "import sys; print(sys.version)" + + - name: Install test and build dependencies + run: | + python -m pip install --upgrade pip + python -m pip install jupyter + pip install -r requirements.txt + + - name: Build cadCAD + run: | + python setup.py bdist_wheel + python -m pip install dist/*.whl --force-reinstall + + - name: Run tests + run: | + python testing/tests/multi_model_row_count.py + python testing/tests/param_sweep.py + python testing/tests/policy_aggregation.py + python testing/tests/timestep1psub0.py + python testing/tests/runs_not_zero.py + python testing/tests/run1psub0.py + python testing/tests/append_mod_test.py + python testing/tests/cadCAD_exp.py + + - name: Run Jupyter test + run: | + python testing/tests/import_cadCAD_test.py diff --git a/requirements.txt b/requirements.txt index 0890a625..a951734a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,21 +1,13 @@ -i https://pypi.org/simple -matplotlib==3.3.2 -networkx==2.5 -parameterized==0.7.4 -plotly==4.10.0 -pytest==6.0.2 -scikit-learn==0.23.2 -scipy>=1.5.2 -seaborn==0.11.0 -tabulate==0.8.7 -xarray==0.16.0 -wheel==0.38.1 -pandas==1.1.5 -fn==0.4.3 -funcy==1.16 -dill==0.3.4 -pathos==0.2.8 -numpy==1.22.0 -pytz==2021.1 -six>=1.11.0 +parameterized>=0.7.4 +pytest>=6.0.2 +tabulate>=0.8.7 +wheel>=0.38.1 +pandas>=1.1.5 +funcy>=1.16 +dill>=0.3.4 +pathos>=0.2.8 +numpy>=1.22.0 +pytz>=2021.1 +setuptools>=69.0.2 From b9d291873496f5a79a34910dc9e90a48049d2666 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 14 Dec 2023 17:35:57 -0300 Subject: [PATCH 2/3] fix tests + rm simulations/ folder (#312) --- simulations/__init__.py | 0 simulations/external_data/output.csv | 27 --- simulations/regression_tests/__init__.py | 1 - .../regression_tests/execs/__init__.py | 0 .../regression_tests/execs/config1_test.py | 16 -- .../regression_tests/execs/config2_test.py | 16 -- .../execs/multi_config_test.py | 17 -- .../execs/multi_config_test2.py | 17 -- .../execs/param_sweep_test.py | 16 -- .../regression_tests/execs/policy_agg_test.py | 17 -- simulations/regression_tests/experiments.py | 3 - .../regression_tests/models/__init__.py | 0 .../regression_tests/models/config1.py | 158 ---------------- .../regression_tests/models/config2.py | 148 --------------- .../regression_tests/models/config_multi_1.py | 172 ------------------ .../regression_tests/models/config_multi_2.py | 158 ---------------- .../regression_tests/models/param_sweep.py | 94 ---------- .../models/policy_aggregation.py | 83 --------- .../regression_tests/models/sweep_config.py | 101 ---------- testing/generic_test.py | 3 +- testing/tests/cadCAD_memory_address.json | 1 + testing/tests/dev/compare_results_dev.py | 40 ---- testing/tests/dev/compare_results_old.py | 79 -------- testing/tests/dev/out_check_dev.py | 28 --- testing/tests/dev/utils_dev.py | 5 - testing/tests/import_cadCAD.ipynb | 6 +- 26 files changed, 5 insertions(+), 1201 deletions(-) delete mode 100644 simulations/__init__.py delete mode 100644 simulations/external_data/output.csv delete mode 100644 simulations/regression_tests/__init__.py delete mode 100644 simulations/regression_tests/execs/__init__.py delete mode 100644 simulations/regression_tests/execs/config1_test.py delete mode 100644 simulations/regression_tests/execs/config2_test.py delete mode 100644 simulations/regression_tests/execs/multi_config_test.py delete mode 100644 simulations/regression_tests/execs/multi_config_test2.py delete mode 100644 simulations/regression_tests/execs/param_sweep_test.py delete mode 100644 simulations/regression_tests/execs/policy_agg_test.py delete mode 100644 simulations/regression_tests/experiments.py delete mode 100644 simulations/regression_tests/models/__init__.py delete mode 100644 simulations/regression_tests/models/config1.py delete mode 100644 simulations/regression_tests/models/config2.py delete mode 100644 simulations/regression_tests/models/config_multi_1.py delete mode 100644 simulations/regression_tests/models/config_multi_2.py delete mode 100644 simulations/regression_tests/models/param_sweep.py delete mode 100644 simulations/regression_tests/models/policy_aggregation.py delete mode 100644 simulations/regression_tests/models/sweep_config.py create mode 100644 testing/tests/cadCAD_memory_address.json delete mode 100644 testing/tests/dev/compare_results_dev.py delete mode 100644 testing/tests/dev/compare_results_old.py delete mode 100644 testing/tests/dev/out_check_dev.py delete mode 100644 testing/tests/dev/utils_dev.py diff --git a/simulations/__init__.py b/simulations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/simulations/external_data/output.csv b/simulations/external_data/output.csv deleted file mode 100644 index c28448f2..00000000 --- a/simulations/external_data/output.csv +++ /dev/null @@ -1,27 +0,0 @@ -ds1,ds2,ds3,run,substep,timestep -0,0,1,1,0,0 -1,40,5,1,1,1 -2,40,5,1,2,1 -3,40,5,1,3,1 -4,40,5,1,1,2 -5,40,5,1,2,2 -6,40,5,1,3,2 -7,40,5,1,1,3 -8,40,5,1,2,3 -9,40,5,1,3,3 -10,40,5,1,1,4 -11,40,5,1,2,4 -12,40,5,1,3,4 -0,0,1,2,0,0 -1,40,5,2,1,1 -2,40,5,2,2,1 -3,40,5,2,3,1 -4,40,5,2,1,2 -5,40,5,2,2,2 -6,40,5,2,3,2 -7,40,5,2,1,3 -8,40,5,2,2,3 -9,40,5,2,3,3 -10,40,5,2,1,4 -11,40,5,2,2,4 -12,40,5,2,3,4 diff --git a/simulations/regression_tests/__init__.py b/simulations/regression_tests/__init__.py deleted file mode 100644 index 666406b4..00000000 --- a/simulations/regression_tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from simulations.regression_tests import experiments \ No newline at end of file diff --git a/simulations/regression_tests/execs/__init__.py b/simulations/regression_tests/execs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/simulations/regression_tests/execs/config1_test.py b/simulations/regression_tests/execs/config1_test.py deleted file mode 100644 index 1e9d5c8c..00000000 --- a/simulations/regression_tests/execs/config1_test.py +++ /dev/null @@ -1,16 +0,0 @@ -from pprint import pprint -import pandas as pd -from tabulate import tabulate - -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from simulations.regression_tests.models import config1 - -exec_mode = ExecutionMode() - -local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=local_proc_ctx, configs=config1.exp.configs) - -raw_result, tensor_fields, _ = run.execute() -result = pd.DataFrame(raw_result) -print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -print(tabulate(result, headers='keys', tablefmt='psql')) \ No newline at end of file diff --git a/simulations/regression_tests/execs/config2_test.py b/simulations/regression_tests/execs/config2_test.py deleted file mode 100644 index e745c759..00000000 --- a/simulations/regression_tests/execs/config2_test.py +++ /dev/null @@ -1,16 +0,0 @@ -from pprint import pprint -import pandas as pd -from tabulate import tabulate - -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from simulations.regression_tests.models import config2 - -exec_mode = ExecutionMode() - -local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=local_proc_ctx, configs=config2.exp.configs) - -raw_result, tensor_fields, _ = run.execute() -result = pd.DataFrame(raw_result) -print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -print(tabulate(result, headers='keys', tablefmt='psql')) \ No newline at end of file diff --git a/simulations/regression_tests/execs/multi_config_test.py b/simulations/regression_tests/execs/multi_config_test.py deleted file mode 100644 index d2329404..00000000 --- a/simulations/regression_tests/execs/multi_config_test.py +++ /dev/null @@ -1,17 +0,0 @@ -from tabulate import tabulate -from pprint import pprint -import pandas as pd - -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from simulations.regression_tests.experiments import multi_exp -from simulations.regression_tests.models import config_multi_1, config_multi_2 - -exec_mode = ExecutionMode() - -local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=local_proc_ctx, configs=multi_exp.configs) - -raw_result, tensor_fields, _ = run.execute() -result = pd.DataFrame(raw_result) -print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -print(tabulate(result, headers='keys', tablefmt='psql')) \ No newline at end of file diff --git a/simulations/regression_tests/execs/multi_config_test2.py b/simulations/regression_tests/execs/multi_config_test2.py deleted file mode 100644 index bdf785cd..00000000 --- a/simulations/regression_tests/execs/multi_config_test2.py +++ /dev/null @@ -1,17 +0,0 @@ -from tabulate import tabulate -from pprint import pprint -import pandas as pd - -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from simulations.regression_tests.experiments import multi_exp -from simulations.regression_tests.models import config_multi_1, sweep_config - -exec_mode = ExecutionMode() - -local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=local_proc_ctx, configs=multi_exp.configs) - -raw_result, tensor_fields, _ = run.execute() -result = pd.DataFrame(raw_result) -print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -print(tabulate(result, headers='keys', tablefmt='psql')) \ No newline at end of file diff --git a/simulations/regression_tests/execs/param_sweep_test.py b/simulations/regression_tests/execs/param_sweep_test.py deleted file mode 100644 index 6bb5eb2c..00000000 --- a/simulations/regression_tests/execs/param_sweep_test.py +++ /dev/null @@ -1,16 +0,0 @@ -from pprint import pprint - -import pandas as pd -from tabulate import tabulate -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from simulations.regression_tests.models import sweep_config - -exec_mode = ExecutionMode() - -local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=local_proc_ctx, configs=sweep_config.exp.configs) - -raw_result, tensor_fields, _ = run.execute() -result = pd.DataFrame(raw_result) -print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -print(tabulate(result, headers='keys', tablefmt='psql')) diff --git a/simulations/regression_tests/execs/policy_agg_test.py b/simulations/regression_tests/execs/policy_agg_test.py deleted file mode 100644 index 6a456db7..00000000 --- a/simulations/regression_tests/execs/policy_agg_test.py +++ /dev/null @@ -1,17 +0,0 @@ -from pprint import pprint - -import pandas as pd -from tabulate import tabulate - -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from simulations.regression_tests.models import policy_aggregation - -exec_mode = ExecutionMode() - -local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=local_proc_ctx, configs=policy_aggregation.exp.configs) - -raw_result, tensor_fields, _ = run.execute() -result = pd.DataFrame(raw_result) -print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -print(tabulate(result, headers='keys', tablefmt='psql')) diff --git a/simulations/regression_tests/experiments.py b/simulations/regression_tests/experiments.py deleted file mode 100644 index 87e7a234..00000000 --- a/simulations/regression_tests/experiments.py +++ /dev/null @@ -1,3 +0,0 @@ -from cadCAD.configuration import Experiment - -multi_exp = Experiment() \ No newline at end of file diff --git a/simulations/regression_tests/models/__init__.py b/simulations/regression_tests/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/simulations/regression_tests/models/config1.py b/simulations/regression_tests/models/config1.py deleted file mode 100644 index f27fddaf..00000000 --- a/simulations/regression_tests/models/config1.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -from datetime import timedelta - -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import bound_norm_random, config_sim, time_step, env_trigger - -seeds = { - 'z': np.random.RandomState(1), - 'a': np.random.RandomState(2), - 'b': np.random.RandomState(3), - 'c': np.random.RandomState(4) -} - - -# Policies per Mechanism -def p1m1(_g, step, sL, s, **kwargs): - return {'param1': 1} -def p2m1(_g, step, sL, s, **kwargs): - return {'param1': 1, 'param2': 4} - -def p1m2(_g, step, sL, s, **kwargs): - return {'param1': 'a', 'param2': 2} -def p2m2(_g, step, sL, s, **kwargs): - return {'param1': 'b', 'param2': 4} - -def p1m3(_g, step, sL, s, **kwargs): - return {'param1': ['c'], 'param2': np.array([10, 100])} -def p2m3(_g, step, sL, s, **kwargs): - return {'param1': ['d'], 'param2': np.array([20, 200])} - - -# Internal States per Mechanism -def s1m1(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = s['s1'] + 1 - return (y, x) -def s2m1(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m2(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = s['s1'] + 1 - return (y, x) -def s2m2(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m3(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = s['s1'] + 1 - return (y, x) -def s2m3(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def policies(_g, step, sL, s, _input, **kwargs): - y = 'policies' - x = _input - return (y, x) - - -# Exogenous States -proc_one_coef_A = 0.7 -proc_one_coef_B = 1.3 - -def es3(_g, step, sL, s, _input, **kwargs): - y = 's3' - x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def es4(_g, step, sL, s, _input, **kwargs): - y = 's4' - x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def update_timestamp(_g, step, sL, s, _input, **kwargs): - y = 'timestamp' - return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1)) - - -# Genesis States -genesis_states = { - 's1': 0.0, - 's2': 0.0, - 's3': 1.0, - 's4': 1.0, - 'timestamp': '2018-10-01 15:16:24' -} - - -# Environment Process -trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29'] -env_processes = { - "s3": [lambda _g, x: 5], - "s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10]) -} - - -partial_state_update_block = [ - { - "policies": { - "b1": p1m1, - "b2": p2m1 - }, - "variables": { - "s1": s1m1, - "s2": s2m1, - "s3": es3, - "s4": es4, - "timestamp": update_timestamp - } - }, - { - "policies": { - "b1": p1m2, - "b2": p2m2 - }, - "variables": { - "s1": s1m2, - "s2": s2m2, - # "s3": es3p1, - # "s4": es4p2, - } - }, - { - "policies": { - "b1": p1m3, - "b2": p2m3 - }, - "variables": { - "s1": s1m3, - "s2": s2m3, - # "s3": es3p1, - # "s4": es4p2, - } - } -] - -sim_config_dict = { - "N": 1, - "T": range(5) - } - -sim_config = config_sim(sim_config_dict) - -exp = Experiment() -exp.append_model( - model_id='sys_model_1', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_processes, - partial_state_update_blocks=partial_state_update_block, - policy_ops=[lambda a, b: a + b] -) diff --git a/simulations/regression_tests/models/config2.py b/simulations/regression_tests/models/config2.py deleted file mode 100644 index 88a8aa4e..00000000 --- a/simulations/regression_tests/models/config2.py +++ /dev/null @@ -1,148 +0,0 @@ -import numpy as np -from datetime import timedelta - -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import bound_norm_random, config_sim, env_trigger, time_step - -seeds = { - 'z': np.random.RandomState(1), - 'a': np.random.RandomState(2), - 'b': np.random.RandomState(3), - 'c': np.random.RandomState(3) -} - - -# Policies per Mechanism -def p1m1(_g, step, sL, s, **kwargs): - return {'param1': 1} -def p2m1(_g, step, sL, s, **kwargs): - return {'param2': 4} - -def p1m2(_g, step, sL, s, **kwargs): - return {'param1': 'a', 'param2': 2} -def p2m2(_g, step, sL, s, **kwargs): - return {'param1': 'b', 'param2': 4} - -def p1m3(_g, step, sL, s, **kwargs): - return {'param1': ['c'], 'param2': np.array([10, 100])} -def p2m3(_g, step, sL, s, **kwargs): - return {'param1': ['d'], 'param2': np.array([20, 200])} - - -# Internal States per Mechanism -def s1m1(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = _input['param1'] - return (y, x) -def s2m1(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m2(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = _input['param1'] - return (y, x) -def s2m2(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m3(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = _input['param1'] - return (y, x) -def s2m3(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - - -# Exogenous States -proc_one_coef_A = 0.7 -proc_one_coef_B = 1.3 - -def es3(_g, step, sL, s, _input, **kwargs): - y = 's3' - x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def es4(_g, step, sL, s, _input, **kwargs): - y = 's4' - x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def update_timestamp(_g, step, sL, s, _input, **kwargs): - y = 'timestamp' - return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1)) - - -# Genesis States -genesis_states = { - 's1': 0, - 's2': 0, - 's3': 1, - 's4': 1, - 'timestamp': '2018-10-01 15:16:24' -} - - -# Environment Process -trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29'] -env_processes = { - "s3": [lambda _g, x: 5], - "s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10]) -} - -partial_state_update_block = { - "m1": { - "policies": { - "b1": p1m1, - # "b2": p2m1 - }, - "states": { - "s1": s1m1, - # "s2": s2m1 - "s3": es3, - "s4": es4, - "timestep": update_timestamp - } - }, - "m2": { - "policies": { - "b1": p1m2, - # "b2": p2m2 - }, - "states": { - "s1": s1m2, - # "s2": s2m2 - } - }, - "m3": { - "policies": { - "b1": p1m3, - "b2": p2m3 - }, - "states": { - "s1": s1m3, - "s2": s2m3 - } - } -} - -sim_config_dict = { - "N": 3, - "T": range(5), -} - - -sim_config = config_sim(sim_config_dict) - -exp = Experiment() -exp.append_model( - model_id='sys_model_1', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_processes, - partial_state_update_blocks=partial_state_update_block -) diff --git a/simulations/regression_tests/models/config_multi_1.py b/simulations/regression_tests/models/config_multi_1.py deleted file mode 100644 index 03274368..00000000 --- a/simulations/regression_tests/models/config_multi_1.py +++ /dev/null @@ -1,172 +0,0 @@ -from copy import deepcopy -from pprint import pprint - -import numpy as np -from datetime import timedelta - -# from cadCAD import configs -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import bound_norm_random, config_sim, time_step, env_trigger -from simulations.regression_tests.experiments import multi_exp - -seeds = { - 'z': np.random.RandomState(1), - 'a': np.random.RandomState(2), - 'b': np.random.RandomState(3), - 'c': np.random.RandomState(4) -} - - -# Policies per Mechanism -def p1m1(_g, step, sL, s, **kwargs): - return {'param1': 1} -def p2m1(_g, step, sL, s, **kwargs): - return {'param1': 1, 'param2': 4} - -def p1m2(_g, step, sL, s, **kwargs): - return {'param1': 'a', 'param2': 2} -def p2m2(_g, step, sL, s, **kwargs): - return {'param1': 'b', 'param2': 4} - -def p1m3(_g, step, sL, s, **kwargs): - return {'param1': ['c'], 'param2': np.array([10, 100])} -def p2m3(_g, step, sL, s, **kwargs): - return {'param1': ['d'], 'param2': np.array([20, 200])} - - -# Internal States per Mechanism -def s1m1(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = s['s1'] + 1 - return (y, x) -def s2m1(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m2(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = s['s1'] + 1 - return (y, x) -def s2m2(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m3(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = s['s1'] + 1 - return (y, x) -def s2m3(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def policies(_g, step, sL, s, _input, **kwargs): - y = 'policies' - x = _input - return (y, x) - - -# Exogenous States -proc_one_coef_A = 0.7 -proc_one_coef_B = 1.3 - -def es3(_g, step, sL, s, _input, **kwargs): - y = 's3' - x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def es4(_g, step, sL, s, _input, **kwargs): - y = 's4' - x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def update_timestamp(_g, step, sL, s, _input, **kwargs): - y = 'timestamp' - return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1)) - - -# Genesis States -genesis_states = { - 's1': 0.0, - 's2': 0.0, - 's3': 1.0, - 's4': 1.0, - 'timestamp': '2018-10-01 15:16:24' -} - - -# Environment Process -trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29'] -env_processes = { - "s3": [lambda _g, x: 5], - "s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10]) -} - - -partial_state_update_block = [ - { - "policies": { - "b1": p1m1, - "b2": p2m1 - }, - "variables": { - "s1": s1m1, - "s2": s2m1, - "s3": es3, - "s4": es4, - "timestamp": update_timestamp - } - }, - { - "policies": { - "b1": p1m2, - "b2": p2m2 - }, - "variables": { - "s1": s1m2, - "s2": s2m2, - # "s3": es3p1, - # "s4": es4p2, - } - }, - { - "policies": { - "b1": p1m3, - "b2": p2m3 - }, - "variables": { - "s1": s1m3, - "s2": s2m3, - # "s3": es3p1, - # "s4": es4p2, - } - } -] - -sim_config_dict = { - "N": 2, - "T": range(5) - } - -sim_config = config_sim(sim_config_dict) - -exp = Experiment() -exp.append_model( - user_id='user_a', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_processes, - partial_state_update_blocks=partial_state_update_block, - policy_ops=[lambda a, b: a + b] -) - -multi_exp.append_model( - model_id='sys_model_1', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_processes, - partial_state_update_blocks=partial_state_update_block, - policy_ops=[lambda a, b: a + b] -) diff --git a/simulations/regression_tests/models/config_multi_2.py b/simulations/regression_tests/models/config_multi_2.py deleted file mode 100644 index e7f15eed..00000000 --- a/simulations/regression_tests/models/config_multi_2.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -from datetime import timedelta - -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import bound_norm_random, config_sim, env_trigger, time_step -from simulations.regression_tests.experiments import multi_exp - -seeds = { - 'z': np.random.RandomState(1), - 'a': np.random.RandomState(2), - 'b': np.random.RandomState(3), - 'c': np.random.RandomState(3) -} - - -# Policies per Mechanism -def p1m1(_g, step, sL, s, **kwargs): - return {'param1': 1} -def p2m1(_g, step, sL, s, **kwargs): - return {'param2': 4} - -def p1m2(_g, step, sL, s, **kwargs): - return {'param1': 'a', 'param2': 2} -def p2m2(_g, step, sL, s, **kwargs): - return {'param1': 'b', 'param2': 4} - -def p1m3(_g, step, sL, s, **kwargs): - return {'param1': ['c'], 'param2': np.array([10, 100])} -def p2m3(_g, step, sL, s, **kwargs): - return {'param1': ['d'], 'param2': np.array([20, 200])} - - -# Internal States per Mechanism -def s1m1(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = _input['param1'] - return (y, x) -def s2m1(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m2(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = _input['param1'] - return (y, x) -def s2m2(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - -def s1m3(_g, step, sL, s, _input, **kwargs): - y = 's1' - x = _input['param1'] - return (y, x) -def s2m3(_g, step, sL, s, _input, **kwargs): - y = 's2' - x = _input['param2'] - return (y, x) - - -# Exogenous States -proc_one_coef_A = 0.7 -proc_one_coef_B = 1.3 - -def es3(_g, step, sL, s, _input, **kwargs): - y = 's3' - x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def es4(_g, step, sL, s, _input, **kwargs): - y = 's4' - x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B) - return (y, x) - -def update_timestamp(_g, step, sL, s, _input, **kwargs): - y = 'timestamp' - return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1)) - - -# Genesis States -genesis_states = { - 's1': 0, - 's2': 0, - 's3': 1, - 's4': 1, - 'timestamp': '2018-10-01 15:16:24' -} - - -# Environment Process -trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29'] -env_processes = { - "s3": [lambda _g, x: 5], - "s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10]) -} - -partial_state_update_block = { - "m1": { - "policies": { - "b1": p1m1, - # "b2": p2m1 - }, - "states": { - "s1": s1m1, - # "s2": s2m1 - "s3": es3, - "s4": es4, - "timestep": update_timestamp - } - }, - "m2": { - "policies": { - "b1": p1m2, - # "b2": p2m2 - }, - "states": { - "s1": s1m2, - # "s2": s2m2 - } - }, - "m3": { - "policies": { - "b1": p1m3, - "b2": p2m3 - }, - "states": { - "s1": s1m3, - "s2": s2m3 - } - } -} - -sim_config_dict = { - "N": 3, - "T": range(5), -} - - -sim_config = config_sim(sim_config_dict) - -exp = Experiment() -exp.append_model( - # config_list=configs, - user_id='user_b', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_processes, - partial_state_update_blocks=partial_state_update_block -) - -multi_exp.append_model( - model_id='sys_model_2', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_processes, - partial_state_update_blocks=partial_state_update_block -) diff --git a/simulations/regression_tests/models/param_sweep.py b/simulations/regression_tests/models/param_sweep.py deleted file mode 100644 index 65e3977d..00000000 --- a/simulations/regression_tests/models/param_sweep.py +++ /dev/null @@ -1,94 +0,0 @@ -import pprint -from typing import Dict, List, Any - -# from cadCAD.configuration import append_configs -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, psub_list - -pp = pprint.PrettyPrinter(indent=4) - -def some_function(x): - return x - -# Optional -# dict must contain lists opf 2 distinct lengths -g: Dict[str, List[Any]] = { - 'alpha': [1], - 'beta': [2, some_function], - 'gamma': [3, 4], - 'omega': [7] -} - -psu_steps = ['m1', 'm2', 'm3'] -system_substeps = len(psu_steps) -var_timestep_trigger = var_substep_trigger([0, system_substeps]) -env_timestep_trigger = env_trigger(system_substeps) -env_process = {} - - -# ['s1', 's2', 's3', 's4'] -# Policies per Mechanism -def gamma(_g, step, sL, s, **kwargs): - return {'gamma': _g['gamma']} - - -def omega(_g, step, sL, s, **kwargs): - return {'omega': _g['omega']} - - -# Internal States per Mechanism -def alpha(_g, step, sL, s, _input, **kwargs): - return 'alpha', _g['alpha'] - - -def beta(_g, step, sL, s, _input, **kwargs): - return 'beta', _g['beta'] - - -def policies(_g, step, sL, s, _input, **kwargs): - return 'policies', _input - - -def sweeped(_g, step, sL, s, _input, **kwargs): - return 'sweeped', {'beta': _g['beta'], 'gamma': _g['gamma']} - -psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps} -for m in psu_steps: - psu_block[m]['policies']['gamma'] = gamma - psu_block[m]['policies']['omega'] = omega - psu_block[m]["variables"]['alpha'] = alpha - psu_block[m]["variables"]['beta'] = beta - psu_block[m]['variables']['policies'] = policies - psu_block[m]["variables"]['sweeped'] = var_timestep_trigger(y='sweeped', f=sweeped) - - -# Genesis States -genesis_states = { - 'alpha': 0, - 'beta': 0, - 'policies': {}, - 'sweeped': {} -} - -# Environment Process -env_process['sweeped'] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']]) - - -sim_config = config_sim( - { - "N": 2, - "T": range(2), - "M": g, # Optional - } -) - -partial_state_update_blocks = psub_list(psu_block, psu_steps) - -exp = Experiment() -exp.append_model( - model_id='sys_model_1', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_process, - partial_state_update_blocks=partial_state_update_blocks -) diff --git a/simulations/regression_tests/models/policy_aggregation.py b/simulations/regression_tests/models/policy_aggregation.py deleted file mode 100644 index 7621f7da..00000000 --- a/simulations/regression_tests/models/policy_aggregation.py +++ /dev/null @@ -1,83 +0,0 @@ -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import config_sim - -# Policies per Mechanism -def p1m1(_g, step, sL, s, **kwargs): - return {'policy1': 1} -def p2m1(_g, step, sL, s, **kwargs): - return {'policy2': 2} - -def p1m2(_g, step, sL, s, **kwargs): - return {'policy1': 2, 'policy2': 2} -def p2m2(_g, step, sL, s, **kwargs): - return {'policy1': 2, 'policy2': 2} - -def p1m3(_g, step, sL, s, **kwargs): - return {'policy1': 1, 'policy2': 2, 'policy3': 3} -def p2m3(_g, step, sL, s, **kwargs): - return {'policy1': 1, 'policy2': 2, 'policy3': 3} - - -# Internal States per Mechanism -def add(y, x): - return lambda _g, step, sH, s, _input, **kwargs: (y, s[y] + x) - -def policies(_g, step, sH, s, _input, **kwargs): - y = 'policies' - x = _input - return (y, x) - - -# Genesis States -genesis_states = { - 'policies': {}, - 's1': 0 -} - -variables = { - 's1': add('s1', 1), - "policies": policies -} - -partial_state_update_block = { - "m1": { - "policies": { - "p1": p1m1, - "p2": p2m1 - }, - "variables": variables - }, - "m2": { - "policies": { - "p1": p1m2, - "p2": p2m2 - }, - "variables": variables - }, - "m3": { - "policies": { - "p1": p1m3, - "p2": p2m3 - }, - "variables": variables - } -} - - -sim_config = config_sim( - { - "N": 2, - "T": range(3), - } -) - -# Aggregation == Reduce Map / Reduce Map Aggregation -# using env functions (include in reg test using / for env proc) -exp = Experiment() -exp.append_model( - model_id='sys_model_1', - sim_configs=sim_config, - initial_state=genesis_states, - partial_state_update_blocks=partial_state_update_block, - policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b -) diff --git a/simulations/regression_tests/models/sweep_config.py b/simulations/regression_tests/models/sweep_config.py deleted file mode 100644 index 00d7911d..00000000 --- a/simulations/regression_tests/models/sweep_config.py +++ /dev/null @@ -1,101 +0,0 @@ -from pprint import pprint -from typing import Dict, List, Any - -# from cadCAD.configuration import append_configs -from cadCAD.configuration import Experiment -from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, psub_list -from simulations.regression_tests.experiments import multi_exp - - -def some_function(x): - return x - -# Optional -# dict must contain lists opf 2 distinct lengths -g: Dict[str, List[Any]] = { - 'alpha': [1], - 'beta': [2, some_function], - 'gamma': [3, 4], - 'omega': [7] -} - -psu_steps = ['m1', 'm2', 'm3'] -system_substeps = len(psu_steps) -var_timestep_trigger = var_substep_trigger([0, system_substeps]) -env_timestep_trigger = env_trigger(system_substeps) -env_process = {} - - -# ['s1', 's2', 's3', 's4'] -# Policies per Mechanism -def gamma(_g, step, sL, s, **kwargs): - return {'gamma': _g['gamma']} - - -def omega(_g, step, sL, s, **kwargs): - return {'omega': _g['omega']} - - -# Internal States per Mechanism -def alpha(_g, step, sL, s, _input, **kwargs): - return 'alpha', _g['alpha'] - - -def beta(_g, step, sL, s, _input, **kwargs): - return 'beta', _g['beta'] - - -def policies(_g, step, sL, s, _input, **kwargs): - return 'policies', _input - - -def sweeped(_g, step, sL, s, _input, **kwargs): - return 'sweeped', {'beta': _g['beta'], 'gamma': _g['gamma']} - -psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps} -for m in psu_steps: - psu_block[m]['policies']['gamma'] = gamma - psu_block[m]['policies']['omega'] = omega - psu_block[m]["variables"]['alpha'] = alpha - psu_block[m]["variables"]['beta'] = beta - psu_block[m]['variables']['policies'] = policies - psu_block[m]["variables"]['sweeped'] = var_timestep_trigger(y='sweeped', f=sweeped) - - -# Genesis States -genesis_states = { - 'alpha': 0, - 'beta': 0, - 'policies': {}, - 'sweeped': {} -} - -# Environment Process -env_process['sweeped'] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']]) - - -sim_config = config_sim( - { - "N": 2, - "T": range(2), - "M": g, # Optional - } -) - -partial_state_update_blocks = psub_list(psu_block, psu_steps) - -exp = Experiment() -exp.append_model( - model_id='sys_model_1', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_process, - partial_state_update_blocks=partial_state_update_blocks -) -multi_exp.append_model( - model_id='sys_model_2', - sim_configs=sim_config, - initial_state=genesis_states, - env_processes=env_process, - partial_state_update_blocks=partial_state_update_blocks -) \ No newline at end of file diff --git a/testing/generic_test.py b/testing/generic_test.py index 5c35cca6..015312ce 100644 --- a/testing/generic_test.py +++ b/testing/generic_test.py @@ -1,9 +1,8 @@ import unittest from functools import reduce -from tabulate import tabulate +from tabulate import tabulate # type: ignore from parameterized import parameterized - def generate_assertions_df(df, expected_results, target_cols, evaluations): test_names = [] for eval_f in evaluations: diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json new file mode 100644 index 00000000..859ec289 --- /dev/null +++ b/testing/tests/cadCAD_memory_address.json @@ -0,0 +1 @@ +{"memory_address": "0x10fbedd50"} \ No newline at end of file diff --git a/testing/tests/dev/compare_results_dev.py b/testing/tests/dev/compare_results_dev.py deleted file mode 100644 index b950ae9d..00000000 --- a/testing/tests/dev/compare_results_dev.py +++ /dev/null @@ -1,40 +0,0 @@ -import unittest -import pandas as pd -from copy import deepcopy - -from testing.models import param_sweep -from testing.results_comparison import dataframe_difference, compare_results -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor - -exec_mode = ExecutionMode() -exec_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=exec_ctx, configs=param_sweep.exp.configs) - -raw_result, tensor_fields, sessions = run.execute() -result = pd.DataFrame(raw_result) -# print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -# pprint(sessions) -# print(tabulate(result, headers='keys', tablefmt='psql')) - - -result_1 = result -result_2 = deepcopy(result) -result_df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 5]}) -result_df2 = pd.DataFrame({'a': ['hi', 2], 'b': [3.0, 4.0]}) - -# print(result_df1.shape) -# exit() - - - -equivalent_result_diff = dataframe_difference(result_1, result_2) -different_result_diff = dataframe_difference(result_df1, result_df2) - -class dfCompareTest(compare_results(different_result_diff)): - pass - -class EquivalentTest(compare_results(equivalent_result_diff)): - pass - -if __name__ == '__main__': - unittest.main() diff --git a/testing/tests/dev/compare_results_old.py b/testing/tests/dev/compare_results_old.py deleted file mode 100644 index 68210670..00000000 --- a/testing/tests/dev/compare_results_old.py +++ /dev/null @@ -1,79 +0,0 @@ -from copy import deepcopy - -import pandas as pd -import numpy as np -# import pandasql -# from tabulate import tabulate -from tabulate import tabulate - -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor -from testing.models import param_sweep - -exec_mode = ExecutionMode() -exec_ctx = ExecutionContext(context=exec_mode.local_mode) -run = Executor(exec_context=exec_ctx, configs=param_sweep.exp.configs) - -raw_result, tensor_fields, sessions = run.execute() -result = pd.DataFrame(raw_result) -# print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -# pprint(sessions) -# print(tabulate(result, headers='keys', tablefmt='psql')) - -# result_1 = result -# result_2 = deepcopy(result) - -# test_df1 = pd.testing.assert_frame_equal(result_1, result_2) -# print(tabulate(test_df1, headers='keys', tablefmt='psql')) - -result_df = pd.DataFrame({'a': [1, 2], 'b': [3, 5]}).reset_index() -df2 = pd.DataFrame({'a': [3.1, 2], 'b': [3.0, 4.0]}).reset_index() - -# test_df2 = pd.testing.assert_frame_equal(df1, df2) -# print(tabulate(test_df2, headers='keys', tablefmt='psql')) - -def dataframe_difference(df1: pd.DataFrame, df2: pd.DataFrame, which=None): - """ - Find rows which are different between two DataFrames. - https://hackersandslackers.com/compare-rows-pandas-dataframes/ - """ - comparison_df = df1.merge( - df2, - indicator=True, - how='outer' - ) - if which is None: - diff_df = comparison_df[comparison_df['_merge'] != 'both'] - else: - diff_df = comparison_df[comparison_df['_merge'] == which] - # diff_df.to_csv('data/diff.csv') - return diff_df - - -merge_df = dataframe_difference(result_df, df2) -cols_no__merge = list(filter(lambda col: '_merge' not in col, merge_df.columns.tolist())) -cols_no_index = list(filter(lambda col: 'index' not in col, cols_no__merge)) -aggregation = dict((k, 'unique') for k in cols_no_index) -diff_df = merge_df[cols_no__merge].groupby('index').agg(aggregation) -# print(tabulate(diff_df, headers='keys', tablefmt='psql')) - - -def discrepancies(row): - return dict([ - (col, list(vals)) for col, vals in row.items() - if type(vals) is np.ndarray and len(vals) > 1 - ]) - - -def val_error(val): - if type(val) is dict: - return False - else: - return True - - -diff_df['discrepancies'] = diff_df.apply(discrepancies, axis=1) -discrepancies_df = diff_df[['discrepancies']] -result_diff = result_df.merge(discrepancies_df, how='left', on='index') -result_diff['val_error'] = result_diff['discrepancies'].apply(val_error) -print(tabulate(result_diff, headers='keys', tablefmt='psql')) - diff --git a/testing/tests/dev/out_check_dev.py b/testing/tests/dev/out_check_dev.py deleted file mode 100644 index 99d5fedb..00000000 --- a/testing/tests/dev/out_check_dev.py +++ /dev/null @@ -1,28 +0,0 @@ -from pprint import pprint - -import pandas as pd -from tabulate import tabulate -from cadCAD.engine import ExecutionMode, ExecutionContext, Executor - -from testing.models import param_sweep -from cadCAD import configs - -exec_mode = ExecutionMode() - -exec_ctx = ExecutionContext(context=exec_mode.local_mode) -# exec_ctx = ExecutionContext(context=exec_mode.multi_proc) -run = Executor(exec_context=exec_ctx, configs=configs) - -raw_result, tensor_fields, sessions = run.execute() -result = pd.DataFrame(raw_result) -# print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -# pprint(sessions) -print(tabulate(result, headers='keys', tablefmt='psql')) - -print() - -raw_result, tensor_fields, sessions = run.execute() -result = pd.DataFrame(raw_result) -# print(tabulate(tensor_fields[0], headers='keys', tablefmt='psql')) -# pprint(sessions) -print(tabulate(result, headers='keys', tablefmt='psql')) \ No newline at end of file diff --git a/testing/tests/dev/utils_dev.py b/testing/tests/dev/utils_dev.py deleted file mode 100644 index 63885f81..00000000 --- a/testing/tests/dev/utils_dev.py +++ /dev/null @@ -1,5 +0,0 @@ -def gen_metric_row(row, cols): - return ((row['run'], row['timestep'], row['substep']), {col: row[col] for col in cols}) - -def gen_metric_dict(df, cols): - return dict([gen_metric_row(row, cols) for index, row in df.iterrows()]) diff --git a/testing/tests/import_cadCAD.ipynb b/testing/tests/import_cadCAD.ipynb index 01b8083d..325b2c60 100644 --- a/testing/tests/import_cadCAD.ipynb +++ b/testing/tests/import_cadCAD.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 16, + "execution_count": 2, "id": "15b9b09a", "metadata": {}, "outputs": [], @@ -12,7 +12,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 3, "id": "1b54b99a", "metadata": {}, "outputs": [], @@ -42,7 +42,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.13" + "version": "3.11.0" } }, "nbformat": 4, From 0a291e8cafc51eeb46c4ee2481a2a2bc02d91c62 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 14 Dec 2023 17:36:30 -0300 Subject: [PATCH 3/3] Add Type Hints for cadCAD objects (#313) * add types.py --- cadCAD/types.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 cadCAD/types.py diff --git a/cadCAD/types.py b/cadCAD/types.py new file mode 100644 index 00000000..5b47964e --- /dev/null +++ b/cadCAD/types.py @@ -0,0 +1,18 @@ +from typing import TypedDict, Callable, Union, Dict, List, Tuple + +State = Dict[str, object] +Parameters = Dict[str, object] +Substep = int +StateHistory = List[List[State]] +PolicyOutput = Dict[str, object] +StateVariable = object + +PolicyFunction = Callable[[Parameters, Substep, StateHistory, State], PolicyOutput] +StateUpdateFunction = Callable[[Parameters, Substep, StateHistory, State, PolicyOutput], Tuple[str, StateVariable]] + +class StateUpdateBlock(TypedDict): + policies: Dict[str, PolicyFunction] + variables: Dict[str, StateUpdateFunction] + + +StateUpdateBlocks = List[StateUpdateBlock]