Skip to content

Commit

Permalink
Format with black and ignore F841 for now
Browse files Browse the repository at this point in the history
  • Loading branch information
dantp-ai committed Feb 29, 2024
1 parent 330bd63 commit 246f231
Show file tree
Hide file tree
Showing 14 changed files with 92 additions and 93 deletions.
6 changes: 3 additions & 3 deletions minitorch/autodiff.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6)
An approximation of $f'_i(x_0, \ldots, x_{n-1})$
"""
# TODO: Implement for Task 1.1.
raise NotImplementedError('Need to implement for Task 1.1')
raise NotImplementedError("Need to implement for Task 1.1")


variable_count = 1
Expand Down Expand Up @@ -62,7 +62,7 @@ def topological_sort(variable: Variable) -> Iterable[Variable]:
Non-constant Variables in topological order starting from the right.
"""
# TODO: Implement for Task 1.4.
raise NotImplementedError('Need to implement for Task 1.4')
raise NotImplementedError("Need to implement for Task 1.4")


def backpropagate(variable: Variable, deriv: Any) -> None:
Expand All @@ -77,7 +77,7 @@ def backpropagate(variable: Variable, deriv: Any) -> None:
No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
"""
# TODO: Implement for Task 1.4.
raise NotImplementedError('Need to implement for Task 1.4')
raise NotImplementedError("Need to implement for Task 1.4")


@dataclass
Expand Down
12 changes: 6 additions & 6 deletions minitorch/cuda_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def _map(
in_index = cuda.local.array(MAX_DIMS, numba.int32)
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_map) # type: ignore

Expand Down Expand Up @@ -196,7 +196,7 @@ def _zip(
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_zip) # type: ignore

Expand Down Expand Up @@ -229,7 +229,7 @@ def _sum_practice(out: Storage, a: Storage, size: int) -> None:
pos = cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")


jit_sum_practice = cuda.jit()(_sum_practice)
Expand Down Expand Up @@ -279,7 +279,7 @@ def _reduce(
pos = cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_reduce) # type: ignore

Expand Down Expand Up @@ -316,7 +316,7 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None:
"""
BLOCK_DIM = 32
# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")


jit_mm_practice = cuda.jit()(_mm_practice)
Expand Down Expand Up @@ -386,7 +386,7 @@ def _tensor_matrix_multiply(
# b) Copy into shared memory for b matrix
# c) Compute the dot produce for position c[i, j]
# TODO: Implement for Task 3.4.
raise NotImplementedError('Need to implement for Task 3.4')
raise NotImplementedError("Need to implement for Task 3.4")


tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply)
12 changes: 6 additions & 6 deletions minitorch/fast_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ def _tensor_conv1d(
and in_channels == in_channels_
and out_channels == out_channels_
)
s1 = input_strides
s2 = weight_strides
s1 = input_strides # noqa: F841
s2 = weight_strides # noqa: F841

# TODO: Implement for Task 4.1.
raise NotImplementedError('Need to implement for Task 4.1')
raise NotImplementedError("Need to implement for Task 4.1")


tensor_conv1d = njit(parallel=True)(_tensor_conv1d)
Expand Down Expand Up @@ -202,11 +202,11 @@ def _tensor_conv2d(
s1 = input_strides
s2 = weight_strides
# inners
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3]
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3]
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3] # noqa: F841
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3] # noqa: F841

# TODO: Implement for Task 4.2.
raise NotImplementedError('Need to implement for Task 4.2')
raise NotImplementedError("Need to implement for Task 4.2")


tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)
Expand Down
8 changes: 4 additions & 4 deletions minitorch/fast_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def _map(
in_strides: Strides,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_map) # type: ignore

Expand Down Expand Up @@ -201,7 +201,7 @@ def _zip(
b_strides: Strides,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_zip) # type: ignore

Expand Down Expand Up @@ -235,7 +235,7 @@ def _reduce(
reduce_dim: int,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_reduce) # type: ignore

Expand Down Expand Up @@ -285,7 +285,7 @@ def _tensor_matrix_multiply(
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0

# TODO: Implement for Task 3.2.
raise NotImplementedError('Need to implement for Task 3.2')
raise NotImplementedError("Need to implement for Task 3.2")


tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)
8 changes: 4 additions & 4 deletions minitorch/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ def modules(self) -> Sequence[Module]:
def train(self) -> None:
"Set the mode of this module and all descendent modules to `train`."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def eval(self) -> None:
"Set the mode of this module and all descendent modules to `eval`."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
"""
Expand All @@ -48,12 +48,12 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
The name and `Parameter` of each ancestor parameter.
"""
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def parameters(self) -> Sequence[Parameter]:
"Enumerate over all the parameters of this module and its descendents."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def add_parameter(self, k: str, v: Any) -> Parameter:
"""
Expand Down
16 changes: 8 additions & 8 deletions minitorch/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
assert height % kh == 0
assert width % kw == 0
# TODO: Implement for Task 4.3.
raise NotImplementedError('Need to implement for Task 4.3')
raise NotImplementedError("Need to implement for Task 4.3")


def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
Expand All @@ -40,7 +40,7 @@ def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""
batch, channel, height, width = input.shape
# TODO: Implement for Task 4.3.
raise NotImplementedError('Need to implement for Task 4.3')
raise NotImplementedError("Need to implement for Task 4.3")


max_reduce = FastOps.reduce(operators.max, -1e9)
Expand Down Expand Up @@ -68,13 +68,13 @@ class Max(Function):
def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor:
"Forward of max should be max reduction"
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")

@staticmethod
def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
"Backward of max should be argmax (see above)"
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def max(input: Tensor, dim: int) -> Tensor:
Expand All @@ -97,7 +97,7 @@ def softmax(input: Tensor, dim: int) -> Tensor:
softmax tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def logsoftmax(input: Tensor, dim: int) -> Tensor:
Expand All @@ -116,7 +116,7 @@ def logsoftmax(input: Tensor, dim: int) -> Tensor:
log of softmax tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
Expand All @@ -132,7 +132,7 @@ def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""
batch, channel, height, width = input.shape
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
Expand All @@ -148,4 +148,4 @@ def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
tensor with random positions dropped out
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")
22 changes: 11 additions & 11 deletions minitorch/scalar.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,30 +93,30 @@ def __rtruediv__(self, b: ScalarLike) -> Scalar:

def __add__(self, b: ScalarLike) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def __bool__(self) -> bool:
return bool(self.data)

def __lt__(self, b: ScalarLike) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def __gt__(self, b: ScalarLike) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def __eq__(self, b: ScalarLike) -> Scalar: # type: ignore[override]
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def __sub__(self, b: ScalarLike) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def __neg__(self) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def __radd__(self, b: ScalarLike) -> Scalar:
return self + b
Expand All @@ -126,19 +126,19 @@ def __rmul__(self, b: ScalarLike) -> Scalar:

def log(self) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def exp(self) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def sigmoid(self) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

def relu(self) -> Scalar:
# TODO: Implement for Task 1.2.
raise NotImplementedError('Need to implement for Task 1.2')
raise NotImplementedError("Need to implement for Task 1.2")

# Variable elements for backprop

Expand Down Expand Up @@ -174,7 +174,7 @@ def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
assert h.ctx is not None

# TODO: Implement for Task 1.3.
raise NotImplementedError('Need to implement for Task 1.3')
raise NotImplementedError("Need to implement for Task 1.3")

def backward(self, d_output: Optional[float] = None) -> None:
"""
Expand Down
Loading

0 comments on commit 246f231

Please sign in to comment.