Skip to content

Commit

Permalink
tests/llvm: Add llvm_not_implemented mark.
Browse files Browse the repository at this point in the history
Combination of llvm and llvm_not_implemented marks is automatically
skipped.

Signed-off-by: Jan Vesely <[email protected]>
  • Loading branch information
jvesely committed Oct 20, 2024
1 parent 1239e4a commit 719978f
Show file tree
Hide file tree
Showing 8 changed files with 67 additions and 72 deletions.
3 changes: 3 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ def pytest_runtest_setup(item):
if m in item.keywords and not item.config.getvalue(m):
pytest.skip('{0} tests not requested'.format(m))

if 'llvm' in item.keywords and 'llvm_not_implemented' in item.keywords:
pytest.skip('LLVM implementation not available')

if 'cuda' in item.keywords and not pnlvm.ptx_enabled:
pytest.skip('PTX engine not enabled/available')

Expand Down
1 change: 1 addition & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ markers =
acnested
composition: PsyNeuLink Composition tests
llvm: Tests using LLVM runtime compiler
llvm_not_implemented: Tests that should use LLVM runtime compiler but the functionality is not yet implemented
cuda: Tests using LLVM runtime compiler and CUDA GPGPU backend
control: Tests including control mechanism and/or control projection
state_features: Tests for OptimizationControlMechanism state_features specifications
Expand Down
15 changes: 4 additions & 11 deletions tests/composition/test_autodiffcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -1331,10 +1331,8 @@ def test_xor_nested_train_then_no_train(self, num_epochs, learning_rate,
(400, 4, 10, .00001),
]
)
@pytest.mark.llvm_not_implemented
def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, patience, min_delta, autodiff_mode):
if autodiff_mode != pnl.ExecutionMode.PyTorch:
pytest.skip("LLVM not available")

xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
xor_targets = np.array([[0], [1], [1], [0]])

Expand Down Expand Up @@ -2800,15 +2798,12 @@ def test_training_then_processing(self, autodiff_mode):
@pytest.mark.parametrize(
'loss, expected', [
(Loss.CROSS_ENTROPY, [[[0.99330715]], [[0.99933202]], [[0.99933202]], [[0.99985049]]]),
(Loss.L1, [[[0.99330641]], [[0.9993319 ]], [[0.9993319 ]], [[0.99985045]]]),
pytest.param(Loss.L1, [[[0.99330641]], [[0.9993319 ]], [[0.9993319 ]], [[0.99985045]]], marks=pytest.mark.llvm_not_implemented),
(Loss.MSE, [[[0.99330509]], [[0.99933169]], [[0.99933169]], [[0.9998504]]]),
(Loss.POISSON_NLL, [[[0.99330385]], [[0.99933149]], [[0.99933149]], [[0.99985034]]]),
pytest.param(Loss.POISSON_NLL, [[[0.99330385]], [[0.99933149]], [[0.99933149]], [[0.99985034]]], marks=pytest.mark.llvm_not_implemented),
]
)
def test_loss_specs(self, loss, expected, autodiff_mode):
if autodiff_mode is not pnl.ExecutionMode.PyTorch and loss in [Loss.POISSON_NLL, Loss.L1]:
pytest.skip("Loss spec not yet implemented!")

xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2))
xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic())
xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic())
Expand All @@ -2834,10 +2829,8 @@ def test_loss_specs(self, loss, expected, autodiff_mode):
tol = {'atol': 2e-6, 'rtol': 2e-6} if loss == Loss.CROSS_ENTROPY else {}
np.testing.assert_allclose(xor.learning_results, expected, **tol)

@pytest.mark.llvm_not_implemented
def test_pytorch_loss_spec(self, autodiff_mode):
if autodiff_mode is not pnl.ExecutionMode.PyTorch:
pytest.skip("Loss spec not yet implemented!")

import torch
ls = torch.nn.SoftMarginLoss(reduction='sum')

Expand Down
4 changes: 1 addition & 3 deletions tests/composition/test_composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -5569,11 +5569,9 @@ def test_partially_overlapping_local_and_control_mech_control_specs_in_unnested_
class TestImportComposition:
@pytest.mark.pytorch
@pytest.mark.composition
@pytest.mark.llvm_not_implemented
def test_import_composition(self, comp_mode):

if comp_mode != pnl.ExecutionMode.Python:
pytest.skip('Compilation not yet support for Composition.import.')

em = EMComposition(memory_template=(2,5), memory_capacity=4)

i1 = ProcessingMechanism()
Expand Down
4 changes: 1 addition & 3 deletions tests/functions/test_integrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def DriftOnASphereFun(init, value, iterations, noise, **kwargs):
(pnl.DriftDiffusionIntegrator, DriftIntFun),
(pnl.LeakyCompetingIntegrator, LeakyFun),
(pnl.AccumulatorIntegrator, AccumulatorFun),
(pnl.DriftOnASphereIntegrator, DriftOnASphereFun),
pytest.param((pnl.DriftOnASphereIntegrator, DriftOnASphereFun), marks=pytest.mark.llvm_not_implemented),
], ids=lambda x: x[0])
@pytest.mark.benchmark
def test_execute(func, func_mode, variable, noise, params, benchmark):
Expand All @@ -209,8 +209,6 @@ def test_execute(func, func_mode, variable, noise, params, benchmark):

if 'DriftOnASphereIntegrator' in func[0].componentName:
params = {**params, 'dimension': len(variable) + 1}
if func_mode != 'Python':
pytest.skip("DriftOnASphereIntegrator not yet compiled")

elif issubclass(func_class, pnl.AccumulatorIntegrator):
params = {**params, 'increment': RAND0_1}
Expand Down
18 changes: 12 additions & 6 deletions tests/functions/test_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@
# (Functions.Buffer, test_var, {'rate':RAND1}, [[0.0],[0.0]]),
pytest.param(Functions.Buffer, test_var[0], {'history':512, 'rate':RAND1, 'initializer':[test_var[0]]},
# TODO: Why is the first result using rate^2 ?
[test_var[0] * RAND1 * RAND1, test_var[0] * RAND1], id="Buffer"),
[test_var[0] * RAND1 * RAND1, test_var[0] * RAND1],
marks=pytest.mark.llvm_not_implemented,
id="Buffer"),

# Tests using Mersenne-Twister as function PRNG
pytest.param(Functions.DictionaryMemory, test_var, {'seed': module_seed},
Expand Down Expand Up @@ -71,15 +73,19 @@
# ContentAddressableMemory
pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed},
np.zeros_like(test_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Retrieval"),
pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed},
np.zeros_like(test_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Storage"),
pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed},
[test_var[0], test_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory High Storage/Retrieval"),
pytest.param(Functions.ContentAddressableMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed},
[test_var[0], test_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Initializer"),

# Tests using philox var
Expand Down Expand Up @@ -117,15 +123,19 @@
# ContentAddressableMemory
pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed},
np.zeros_like(philox_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Retrieval Philox"),
pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed},
np.zeros_like(philox_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Storage Philox"),
pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.98, 'storage_prob':0.98, 'seed': module_seed},
[philox_var[0], philox_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory High Storage/Retrieval Philox"),
pytest.param(Functions.ContentAddressableMemory, philox_var, {'initializer':philox_initializer, 'rate':RAND1, 'seed': module_seed},
[philox_var[0], philox_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Initializer Philox"),
]

Expand All @@ -134,11 +144,6 @@
@pytest.mark.benchmark
@pytest.mark.parametrize("func, variable, params, expected", test_data)
def test_basic(func, variable, params, expected, benchmark, func_mode):
if func is Functions.Buffer and func_mode != 'Python':
pytest.skip("Not implemented")
if func is Functions.ContentAddressableMemory and func_mode != 'Python':
pytest.skip("Not implemented")

benchmark.group = func.componentName
f = func(default_variable=variable, **params)
if variable is philox_var:
Expand All @@ -153,6 +158,7 @@ def test_basic(func, variable, params, expected, benchmark, func_mode):
# "duplicate_keys"
if len(variable) == 2:
EX([variable[0], variable[1] * 4])

res = benchmark(EX, variable)

# This still needs to use "allclose" as the key gets manipulated before
Expand Down
Loading

0 comments on commit 719978f

Please sign in to comment.