Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Split run test modules into individual per-module runs #3

Merged
merged 3 commits into from
Mar 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions .github/workflows/minitorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,30 +26,43 @@ jobs:
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 --ignore "N801, E203, E266, E501, W503, F812, F401, F841, E741, N803, N802, N806" minitorch/ tests/ project/
- name: Test with pytest

- name: Test Module 0
run: |
echo "Module 0"
pytest tests -x -m task0_1
pytest tests -x -m task0_2
pytest tests -x -m task0_3
pytest tests -x -m task0_4

- name: Test Module 1
run: |
echo "Module 1"
pytest tests -x -m task1_1
pytest tests -x -m task1_2
pytest tests -x -m task1_3
pytest tests -x -m task1_4

- name: Test Module 2
run: |
echo "Module 2"
pytest tests -x -m task2_1
pytest tests -x -m task2_2
pytest tests -x -m task2_3
pytest tests -x -m task2_4

- name: Test Module 3
run: |
echo "Module 3"
pytest tests -x -m task3_1
pytest tests -x -m task3_2
pytest tests -x -m task3_3
pytest tests -x -m task3_4

- name: Test Module 4
run: |
echo "Module 4"
pytest tests -x -m task4_1
pytest tests -x -m task4_2
pytest tests -x -m task4_3
pytest tests -x -m task4_4
pytest tests -x -m task4_4
8 changes: 4 additions & 4 deletions minitorch/autodiff.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from dataclasses import dataclass
from typing import Any, Iterable, List, Tuple
from typing import Any, Iterable, List, Tuple # noqa: F401

from typing_extensions import Protocol

Expand All @@ -23,7 +23,7 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6)
An approximation of $f'_i(x_0, \ldots, x_{n-1})$
"""
# TODO: Implement for Task 1.1.
raise NotImplementedError('Need to implement for Task 1.1')
raise NotImplementedError("Need to implement for Task 1.1")


variable_count = 1
Expand Down Expand Up @@ -62,7 +62,7 @@ def topological_sort(variable: Variable) -> Iterable[Variable]:
Non-constant Variables in topological order starting from the right.
"""
# TODO: Implement for Task 1.4.
raise NotImplementedError('Need to implement for Task 1.4')
raise NotImplementedError("Need to implement for Task 1.4")


def backpropagate(variable: Variable, deriv: Any) -> None:
Expand All @@ -77,7 +77,7 @@ def backpropagate(variable: Variable, deriv: Any) -> None:
No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
"""
# TODO: Implement for Task 1.4.
raise NotImplementedError('Need to implement for Task 1.4')
raise NotImplementedError("Need to implement for Task 1.4")


@dataclass
Expand Down
12 changes: 6 additions & 6 deletions minitorch/cuda_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def _map(
in_index = cuda.local.array(MAX_DIMS, numba.int32)
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_map) # type: ignore

Expand Down Expand Up @@ -196,7 +196,7 @@ def _zip(
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_zip) # type: ignore

Expand Down Expand Up @@ -229,7 +229,7 @@ def _sum_practice(out: Storage, a: Storage, size: int) -> None:
pos = cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")


jit_sum_practice = cuda.jit()(_sum_practice)
Expand Down Expand Up @@ -279,7 +279,7 @@ def _reduce(
pos = cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_reduce) # type: ignore

Expand Down Expand Up @@ -316,7 +316,7 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None:
"""
BLOCK_DIM = 32
# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")


jit_mm_practice = cuda.jit()(_mm_practice)
Expand Down Expand Up @@ -386,7 +386,7 @@ def _tensor_matrix_multiply(
# b) Copy into shared memory for b matrix
# c) Compute the dot produce for position c[i, j]
# TODO: Implement for Task 3.4.
raise NotImplementedError('Need to implement for Task 3.4')
raise NotImplementedError("Need to implement for Task 3.4")


tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply)
19 changes: 9 additions & 10 deletions minitorch/fast_conv.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
from typing import Tuple

import numpy as np
from numba import njit, prange
import numpy as np # noqa: F401
from numba import njit, prange # noqa: F401

from .autodiff import Context
from .tensor import Tensor
from .tensor_data import MAX_DIMS, Index # noqa: F401
from .tensor_data import (
MAX_DIMS,
Index,
Shape,
Strides,
broadcast_index,
Expand Down Expand Up @@ -77,11 +76,11 @@ def _tensor_conv1d(
and in_channels == in_channels_
and out_channels == out_channels_
)
s1 = input_strides
s2 = weight_strides
s1 = input_strides # noqa: F841
s2 = weight_strides # noqa: F841

# TODO: Implement for Task 4.1.
raise NotImplementedError('Need to implement for Task 4.1')
raise NotImplementedError("Need to implement for Task 4.1")


tensor_conv1d = njit(parallel=True)(_tensor_conv1d)
Expand Down Expand Up @@ -203,11 +202,11 @@ def _tensor_conv2d(
s1 = input_strides
s2 = weight_strides
# inners
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3]
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3]
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3] # noqa: F841
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3] # noqa: F841

# TODO: Implement for Task 4.2.
raise NotImplementedError('Need to implement for Task 4.2')
raise NotImplementedError("Need to implement for Task 4.2")


tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)
Expand Down
18 changes: 10 additions & 8 deletions minitorch/fast_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@

from typing import TYPE_CHECKING

import numpy as np
from numba import njit, prange
import numpy as np # noqa: F401
from numba import njit
from numba import prange # noqa: F401

from .tensor_data import MAX_DIMS # noqa: F401
from .tensor_data import (
MAX_DIMS,
broadcast_index,
index_to_position,
shape_broadcast,
Expand All @@ -18,7 +19,8 @@
from typing import Callable, Optional

from .tensor import Tensor
from .tensor_data import Index, Shape, Storage, Strides
from .tensor_data import Index # noqa: F401
from .tensor_data import Shape, Storage, Strides

# TIP: Use `NUMBA_DISABLE_JIT=1 pytest tests/ -m task3_1` to run these tests without JIT.

Expand Down Expand Up @@ -160,7 +162,7 @@ def _map(
in_strides: Strides,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_map) # type: ignore

Expand Down Expand Up @@ -199,7 +201,7 @@ def _zip(
b_strides: Strides,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_zip) # type: ignore

Expand Down Expand Up @@ -233,7 +235,7 @@ def _reduce(
reduce_dim: int,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_reduce) # type: ignore

Expand Down Expand Up @@ -283,7 +285,7 @@ def _tensor_matrix_multiply(
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0

# TODO: Implement for Task 3.2.
raise NotImplementedError('Need to implement for Task 3.2')
raise NotImplementedError("Need to implement for Task 3.2")


tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)
8 changes: 4 additions & 4 deletions minitorch/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ def modules(self) -> Sequence[Module]:
def train(self) -> None:
"Set the mode of this module and all descendent modules to `train`."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def eval(self) -> None:
"Set the mode of this module and all descendent modules to `eval`."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
"""
Expand All @@ -48,12 +48,12 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
The name and `Parameter` of each ancestor parameter.
"""
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def parameters(self) -> Sequence[Parameter]:
"Enumerate over all the parameters of this module and its descendents."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def add_parameter(self, k: str, v: Any) -> Parameter:
"""
Expand Down
18 changes: 9 additions & 9 deletions minitorch/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from .autodiff import Context
from .fast_ops import FastOps
from .tensor import Tensor
from .tensor_functions import Function, rand, tensor
from .tensor_functions import Function, rand, tensor # noqa: F401


def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
Expand All @@ -24,7 +24,7 @@ def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
assert height % kh == 0
assert width % kw == 0
# TODO: Implement for Task 4.3.
raise NotImplementedError('Need to implement for Task 4.3')
raise NotImplementedError("Need to implement for Task 4.3")


def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
Expand All @@ -40,7 +40,7 @@ def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""
batch, channel, height, width = input.shape
# TODO: Implement for Task 4.3.
raise NotImplementedError('Need to implement for Task 4.3')
raise NotImplementedError("Need to implement for Task 4.3")


max_reduce = FastOps.reduce(operators.max, -1e9)
Expand Down Expand Up @@ -68,13 +68,13 @@ class Max(Function):
def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor:
"Forward of max should be max reduction"
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")

@staticmethod
def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
"Backward of max should be argmax (see above)"
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def max(input: Tensor, dim: int) -> Tensor:
Expand All @@ -97,7 +97,7 @@ def softmax(input: Tensor, dim: int) -> Tensor:
softmax tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def logsoftmax(input: Tensor, dim: int) -> Tensor:
Expand All @@ -116,7 +116,7 @@ def logsoftmax(input: Tensor, dim: int) -> Tensor:
log of softmax tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
Expand All @@ -132,7 +132,7 @@ def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""
batch, channel, height, width = input.shape
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
Expand All @@ -148,4 +148,4 @@ def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
tensor with random positions dropped out
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")
Loading
Loading