Skip to content

Commit

Permalink
Merge pull request #2748 from PrincetonUniversity/devel
Browse files Browse the repository at this point in the history
Devel
  • Loading branch information
kmantel authored Jul 27, 2023
2 parents 06f3006 + 589e149 commit b8cd10e
Show file tree
Hide file tree
Showing 282 changed files with 36,850 additions and 7,778 deletions.
16 changes: 12 additions & 4 deletions .github/actions/install-pnl/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,22 @@ runs:
- name: Drop pytorch on x86
shell: bash
run: |
echo > env_constraints.txt
if [ $(python -c 'import struct; print(struct.calcsize("P") * 8)') == 32 ]; then
sed -i /torch/d requirements.txt
sed -i /modeci_mdf/d requirements.txt
# pywinpty is a transitive dependency and v1.0+ removed support for x86 wheels
# terminado >= 0.10.0 pulls in pywinpty >= 1.1.0
echo "pywinpty<1" >> env_constraints.txt
# jupyter_sever pulls jupyter_server_terminals which depends on in pywinpty >= 2.0.3
echo "jupyter_server<2" >> env_constraints.txt
# scipy >=1.9.2 doesn't provide win32 wheel and GA doesn't have working fortran on windows
echo "scipy<1.9.2" >> env_constraints.txt
# scikit-learn >= 1.1.3 doesn't provide win32 wheel
[[ ${{ runner.os }} = Windows* ]] && pip install "pywinpty<1" "terminado<0.10" "scipy<1.9.2" "scikit-learn<1.1.3" "statsmodels<0.13.3" "jupyter-server<2" -c requirements.txt
echo "scikit-learn<1.1.3" >> env_constraints.txt
# countourpy >=1.1.0 doesn't provide win32 wheel
echo "contourpy<1.1.0" >> env_constraints.txt
# pillow >= 10.0.0 doesn't provide win32 wheel
echo "pillow < 10.0.0" >> env_constraints.txt
fi
- name: Install updated package
Expand All @@ -66,7 +74,7 @@ runs:
echo "new_package=$NEW_PACKAGE" >> $GITHUB_OUTPUT
# save a list of all installed packages (including pip, wheel; it's never empty)
pip freeze --all > orig
pip install "$(echo $NEW_PACKAGE | sed 's/[-_]/./g' | xargs grep *requirements.txt -h -e | head -n1)"
pip install "$(echo $NEW_PACKAGE | sed 's/[-_]/./g' | xargs grep *requirements.txt -h -e | head -n1)" -c env_constraints.txt -c broken_trans_deps.txt
pip show "$NEW_PACKAGE" | grep 'Version' | tee new_version.deps
# uninstall new packages but skip those from previous steps (pywinpty, terminado on windows x86)
# the 'orig' list is not empty (includes at least pip, wheel)
Expand All @@ -78,7 +86,7 @@ runs:
- name: Python dependencies
shell: bash
run: |
pip install -e .[${{ inputs.features }}]
pip install -e .[${{ inputs.features }}] -c env_constraints.txt -c broken_trans_deps.txt
- name: "Cleanup old wheels"
shell: bash
Expand Down
14 changes: 10 additions & 4 deletions .github/workflows/pnl-ci-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
# The entire matrix is set up and 'base' builds are pruned based
# on event name and final configuration (ubuntu, python3.7).
matrix:
python-version: [3.7, 3.8, 3.9]
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
python-architecture: ['x64']
os: [ubuntu-latest, macos-latest, windows-latest]
event:
Expand All @@ -37,9 +37,13 @@ jobs:
pnl-version: 'base'
- os: windows-latest
pnl-version: 'base'
- python-version: 3.8
- python-version: '3.8'
pnl-version: 'base'
- python-version: 3.9
- python-version: '3.9'
pnl-version: 'base'
- python-version: '3.10'
pnl-version: 'base'
- python-version: '3.11'
pnl-version: 'base'

outputs:
Expand Down Expand Up @@ -74,7 +78,9 @@ jobs:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
# Block python3.7.17 on macos. see:
# https://github.com/actions/setup-python/issues/682
python-version: ${{ (matrix.os == 'macos-latest' && matrix.python-version == '3.7') && '3.7.16' || matrix.python-version }}
architecture: ${{ matrix.python-architecture }}

- name: Get pip cache location
Expand Down
55 changes: 46 additions & 9 deletions .github/workflows/pnl-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,22 +48,51 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: [3.7, 3.8, 3.9]
python-version: ['3.7', '3.11']
python-architecture: ['x64']
extra-args: ['']
os: [ubuntu, macos, windows]
include:
# code-coverage build on macos python 3.9
- python-version: '3.9'
os: macos
extra-args: '--cov=psyneulink'

# --forked run of python only tests
# Python tests are enough to test potential naming issues
- python-version: '3.9'
os: ubuntu
extra-args: '--forked -m "not llvm"'

# add 32-bit build on windows
- python-version: 3.8
- python-version: '3.8'
python-architecture: 'x86'
os: windows
# code-coverage build on macos python 3.9
- python-version: 3.9

# fp32 run on linux python 3.10
- python-version: '3.10'
os: ubuntu
extra-args: '--fp-precision=fp32'

# --benchmark-enable run on macos python 3.10
- python-version: '3.10'
os: macos
extra-args: '--cov=psyneulink'
# pytest needs both '--benchmark-only' and '-m benchmark'
# The former fails the test if benchamrks cannot be enabled
# The latter works around a crash in pytest when collecting tests:
# https://github.com/ionelmc/pytest-benchmark/issues/243
extra-args: '-m benchmark --benchmark-enable --benchmark-only --benchmark-min-rounds=2 --benchmark-max-time=0.001 --benchmark-warmup=off -n0 --dist=no'

# add python 3.8 build on macos since 3.7 is broken
# https://github.com/actions/virtual-environments/issues/4230
- python-version: '3.8'
python-architecture: 'x64'
os: macos

exclude:
# 3.7 is broken on macos-11, https://github.com/actions/virtual-environments/issues/4230
- python-version: 3.7
# 3.7 is broken on macos-11,
# https://github.com/actions/virtual-environments/issues/4230
- python-version: '3.7'
os: macos

steps:
Expand Down Expand Up @@ -115,17 +144,25 @@ jobs:
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Print test machine/env info
shell: bash
run: |
python -c "import numpy; numpy.show_config()"
case "$RUNNER_OS" in
Linux*) lscpu;;
esac
- name: Test with pytest
timeout-minutes: 180
run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto ${{ matrix.extra-args }}

- name: Upload test results
uses: actions/upload-artifact@v3
with:
name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.extra-args }}
name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}
path: tests_out.xml
retention-days: 5
if: success() || failure()
if: (success() || failure()) && ! contains(matrix.extra-args, 'forked')

- name: Upload coveralls code coverage
if: contains(matrix.extra-args, '--cov=psyneulink')
Expand Down
6 changes: 5 additions & 1 deletion CONVENTIONS.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,11 @@ Extensions of Core objects
- arguments_of_constructors, instance_attributes and instance_methods:
lowercase and underscore separator(s) [constructor_arg, method_arg, object_attribute]
- keywords:
all capitals and underscore separator(s) [KEY_WORD]
- all capitals and underscore separator(s) [KEY_WORD]
- assigned values:
- argument of a method or function: lower case [KEY_WORD = 'argument_value']
- names of a Components: upper case [KEY_WORD = 'NAME']

DEPRECATED:
- internal keywords:
prepend kw followed by camelCase [kwKeyword]
Expand Down
32 changes: 22 additions & 10 deletions Scripts/Debug/Yotam LCA Model LLVM.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,16 @@
LCA_BIN_EXECUTE=os.getenv("LCA", "LLVMRun")
RUN_TOTAL=True

def _get_execution_mode(bin_execute):
if bin_execute.lower() == 'llvmrun':
return pnl.ExecutionMode.LLVMRun
elif bin_execute.lower() == 'pytorch':
return pnl.ExecutionMode.PyTorch
elif bin_execute.lower() == 'python':
return pnl.ExecutionMode.Python

assert False, "Unknown execution mode: {}".format(bin_execute)

# read in bipartite graph, return graph object, number of possible tasks, number of
# input dimensions and number of output dimensions.
# file format Ni No (input dimension number, output dimension number)
Expand Down Expand Up @@ -171,12 +181,12 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs=
mnet.learn(
inputs=input_set,
minibatch_size=1,
bin_execute=MNET_BIN_EXECUTE,
execution_mode=_get_execution_mode(MNET_BIN_EXECUTE),
patience=patience,
min_delta=min_delt,
)
t2 = time.time()
print("training 1:", MNET_BIN_EXECUTE, t2-t1)
print("training 1 time:", MNET_BIN_EXECUTE, t2 - t1)

# Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay)
if attach_LCA:
Expand Down Expand Up @@ -293,12 +303,12 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200,
mnet.learn(
inputs=input_set,
minibatch_size=input_set['epochs'],
bin_execute=MNET_BIN_EXECUTE,
execution_mode=_get_execution_mode(MNET_BIN_EXECUTE),
patience=patience,
min_delta=min_delt,
)
t2 = time.time()
print("training 2:", MNET_BIN_EXECUTE, t2-t1)
print("training 2 time:", MNET_BIN_EXECUTE, t2 - t1)

for projection in mnet.projections:
if hasattr(projection.parameters, 'matrix'):
Expand Down Expand Up @@ -487,9 +497,10 @@ def evaluate_net_perf_lca(mnet_lca, test_tasks, all_tasks, num_features, num_inp
}
print('running LCA total')
t1 = time.time()
mnet_lca.run( { mnet_lca.nodes['mnet'] : inputs_total }, bin_execute=LCA_BIN_EXECUTE)
mnet_lca.run({mnet_lca.nodes['mnet']: inputs_total},
execution_mode=_get_execution_mode(LCA_BIN_EXECUTE))
t2 = time.time()
print("LCA total:", LCA_BIN_EXECUTE, t2 - t1)
print("LCA run total time:", LCA_BIN_EXECUTE, t2 - t1)
# Run the outer composition, one point at a time (for debugging purposes)
for i in range(num_test_points):
if RUN_TOTAL:
Expand All @@ -504,9 +515,10 @@ def evaluate_net_perf_lca(mnet_lca, test_tasks, all_tasks, num_features, num_inp

print('running LCA', i)
t1 = time.time()
mnet_lca.run( { mnet_lca.nodes['mnet'] : input_set['inputs'] }, bin_execute=LCA_BIN_EXECUTE )
mnet_lca.run({mnet_lca.nodes['mnet']: input_set['inputs']},
execution_mode=_get_execution_mode(LCA_BIN_EXECUTE))
t2 = time.time()
print("LCA:", LCA_BIN_EXECUTE, t2 - t1)
print("LCA time:", LCA_BIN_EXECUTE, t2 - t1)
iterations = mnet_lca.nodes['lca'].num_executions_before_finished if LCA_BIN_EXECUTE == "Python" else ugly_get_compile_param_value(mnet_lca, 'lca', 'num_executions_before_finished')
print("ITERATIONS:", iterations)
print('input: ', input_test_pts[i, :])
Expand Down Expand Up @@ -617,9 +629,9 @@ def evaluate_net_perf_mse(mnet, test_tasks, all_tasks, num_features, num_input_d

print("running mnet2:", MNET2_BIN_EXECUTE)
t1 = time.time()
mnet.run(input_set, bin_execute=MNET2_BIN_EXECUTE)
mnet.run(input_set, execution_mode=_get_execution_mode(MNET2_BIN_EXECUTE))
t2 = time.time()
print("mnet2:", MNET2_BIN_EXECUTE, t2-t1)
print("mnet2 time:", MNET2_BIN_EXECUTE, t2 - t1)

# Retrieve results
output_test_pts = np.array(mnet.parameters.results.get(mnet)[-num_test_points:]).reshape(num_test_points, output_layer_size)
Expand Down
Loading

0 comments on commit b8cd10e

Please sign in to comment.