Skip to content

Commit

Permalink
Style - black formatted
Browse files Browse the repository at this point in the history
  • Loading branch information
jacky1c committed Aug 27, 2024
1 parent 7d35d59 commit 07f46e9
Show file tree
Hide file tree
Showing 17 changed files with 111 additions and 110 deletions.
6 changes: 3 additions & 3 deletions minitorch/autodiff.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6)
An approximation of $f'_i(x_0, \ldots, x_{n-1})$
"""
# TODO: Implement for Task 1.1.
raise NotImplementedError('Need to implement for Task 1.1')
raise NotImplementedError("Need to implement for Task 1.1")


variable_count = 1
Expand Down Expand Up @@ -62,7 +62,7 @@ def topological_sort(variable: Variable) -> Iterable[Variable]:
Non-constant Variables in topological order starting from the right.
"""
# TODO: Implement for Task 1.4.
raise NotImplementedError('Need to implement for Task 1.4')
raise NotImplementedError("Need to implement for Task 1.4")


def backpropagate(variable: Variable, deriv: Any) -> None:
Expand All @@ -77,7 +77,7 @@ def backpropagate(variable: Variable, deriv: Any) -> None:
No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
"""
# TODO: Implement for Task 1.4.
raise NotImplementedError('Need to implement for Task 1.4')
raise NotImplementedError("Need to implement for Task 1.4")


@dataclass
Expand Down
12 changes: 6 additions & 6 deletions minitorch/cuda_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def _map(
in_index = cuda.local.array(MAX_DIMS, numba.int32)
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_map) # type: ignore

Expand Down Expand Up @@ -196,7 +196,7 @@ def _zip(
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_zip) # type: ignore

Expand Down Expand Up @@ -229,7 +229,7 @@ def _sum_practice(out: Storage, a: Storage, size: int) -> None:
pos = cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")


jit_sum_practice = cuda.jit()(_sum_practice)
Expand Down Expand Up @@ -279,7 +279,7 @@ def _reduce(
pos = cuda.threadIdx.x

# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")

return cuda.jit()(_reduce) # type: ignore

Expand Down Expand Up @@ -316,7 +316,7 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None:
"""
BLOCK_DIM = 32
# TODO: Implement for Task 3.3.
raise NotImplementedError('Need to implement for Task 3.3')
raise NotImplementedError("Need to implement for Task 3.3")


jit_mm_practice = cuda.jit()(_mm_practice)
Expand Down Expand Up @@ -386,7 +386,7 @@ def _tensor_matrix_multiply(
# b) Copy into shared memory for b matrix
# c) Compute the dot produce for position c[i, j]
# TODO: Implement for Task 3.4.
raise NotImplementedError('Need to implement for Task 3.4')
raise NotImplementedError("Need to implement for Task 3.4")


tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply)
4 changes: 2 additions & 2 deletions minitorch/fast_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def _tensor_conv1d(
s2 = weight_strides

# TODO: Implement for Task 4.1.
raise NotImplementedError('Need to implement for Task 4.1')
raise NotImplementedError("Need to implement for Task 4.1")


tensor_conv1d = njit(parallel=True)(_tensor_conv1d)
Expand Down Expand Up @@ -207,7 +207,7 @@ def _tensor_conv2d(
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3]

# TODO: Implement for Task 4.2.
raise NotImplementedError('Need to implement for Task 4.2')
raise NotImplementedError("Need to implement for Task 4.2")


tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)
Expand Down
8 changes: 4 additions & 4 deletions minitorch/fast_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def _map(
in_strides: Strides,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_map) # type: ignore

Expand Down Expand Up @@ -199,7 +199,7 @@ def _zip(
b_strides: Strides,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_zip) # type: ignore

Expand Down Expand Up @@ -233,7 +233,7 @@ def _reduce(
reduce_dim: int,
) -> None:
# TODO: Implement for Task 3.1.
raise NotImplementedError('Need to implement for Task 3.1')
raise NotImplementedError("Need to implement for Task 3.1")

return njit(parallel=True)(_reduce) # type: ignore

Expand Down Expand Up @@ -283,7 +283,7 @@ def _tensor_matrix_multiply(
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0

# TODO: Implement for Task 3.2.
raise NotImplementedError('Need to implement for Task 3.2')
raise NotImplementedError("Need to implement for Task 3.2")


tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)
8 changes: 4 additions & 4 deletions minitorch/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ def modules(self) -> Sequence[Module]:
def train(self) -> None:
"Set the mode of this module and all descendent modules to `train`."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def eval(self) -> None:
"Set the mode of this module and all descendent modules to `eval`."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
"""
Expand All @@ -48,12 +48,12 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
The name and `Parameter` of each ancestor parameter.
"""
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def parameters(self) -> Sequence[Parameter]:
"Enumerate over all the parameters of this module and its descendents."
# TODO: Implement for Task 0.4.
raise NotImplementedError('Need to implement for Task 0.4')
raise NotImplementedError("Need to implement for Task 0.4")

def add_parameter(self, k: str, v: Any) -> Parameter:
"""
Expand Down
16 changes: 8 additions & 8 deletions minitorch/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
assert height % kh == 0
assert width % kw == 0
# TODO: Implement for Task 4.3.
raise NotImplementedError('Need to implement for Task 4.3')
raise NotImplementedError("Need to implement for Task 4.3")


def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
Expand All @@ -40,7 +40,7 @@ def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""
batch, channel, height, width = input.shape
# TODO: Implement for Task 4.3.
raise NotImplementedError('Need to implement for Task 4.3')
raise NotImplementedError("Need to implement for Task 4.3")


max_reduce = FastOps.reduce(operators.max, -1e9)
Expand Down Expand Up @@ -68,13 +68,13 @@ class Max(Function):
def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor:
"Forward of max should be max reduction"
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")

@staticmethod
def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
"Backward of max should be argmax (see above)"
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def max(input: Tensor, dim: int) -> Tensor:
Expand All @@ -97,7 +97,7 @@ def softmax(input: Tensor, dim: int) -> Tensor:
softmax tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def logsoftmax(input: Tensor, dim: int) -> Tensor:
Expand All @@ -116,7 +116,7 @@ def logsoftmax(input: Tensor, dim: int) -> Tensor:
log of softmax tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
Expand All @@ -132,7 +132,7 @@ def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""
batch, channel, height, width = input.shape
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")


def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
Expand All @@ -148,4 +148,4 @@ def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
tensor with random positions dropped out
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError('Need to implement for Task 4.4')
raise NotImplementedError("Need to implement for Task 4.4")
42 changes: 21 additions & 21 deletions minitorch/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,49 +13,49 @@
def mul(x: float, y: float) -> float:
"$f(x, y) = x * y$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def id(x: float) -> float:
"$f(x) = x$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def add(x: float, y: float) -> float:
"$f(x, y) = x + y$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def neg(x: float) -> float:
"$f(x) = -x$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def lt(x: float, y: float) -> float:
"$f(x) =$ 1.0 if x is less than y else 0.0"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def eq(x: float, y: float) -> float:
"$f(x) =$ 1.0 if x is equal to y else 0.0"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def max(x: float, y: float) -> float:
"$f(x) =$ x if x is greater than y else y"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def is_close(x: float, y: float) -> float:
"$f(x) = |x - y| < 1e-2$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def sigmoid(x: float) -> float:
Expand All @@ -71,7 +71,7 @@ def sigmoid(x: float) -> float:
for stability.
"""
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def relu(x: float) -> float:
Expand All @@ -81,7 +81,7 @@ def relu(x: float) -> float:
(See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .)
"""
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


EPS = 1e-6
Expand All @@ -100,25 +100,25 @@ def exp(x: float) -> float:
def log_back(x: float, d: float) -> float:
r"If $f = log$ as above, compute $d \times f'(x)$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def inv(x: float) -> float:
"$f(x) = 1/x$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def inv_back(x: float, d: float) -> float:
r"If $f(x) = 1/x$ compute $d \times f'(x)$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


def relu_back(x: float, d: float) -> float:
r"If $f = relu$ compute $d \times f'(x)$"
# TODO: Implement for Task 0.1.
raise NotImplementedError('Need to implement for Task 0.1')
raise NotImplementedError("Need to implement for Task 0.1")


# ## Task 0.3
Expand All @@ -140,13 +140,13 @@ def map(fn: Callable[[float], float]) -> Callable[[Iterable[float]], Iterable[fl
new list
"""
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")


def negList(ls: Iterable[float]) -> Iterable[float]:
"Use `map` and `neg` to negate each element in `ls`"
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")


def zipWith(
Expand All @@ -166,13 +166,13 @@ def zipWith(
"""
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")


def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]:
"Add the elements of `ls1` and `ls2` using `zipWith` and `add`"
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")


def reduce(
Expand All @@ -191,16 +191,16 @@ def reduce(
fn(x_1, x_0)))`
"""
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")


def sum(ls: Iterable[float]) -> float:
"Sum up a list using `reduce` and `add`."
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")


def prod(ls: Iterable[float]) -> float:
"Product of a list using `reduce` and `mul`."
# TODO: Implement for Task 0.3.
raise NotImplementedError('Need to implement for Task 0.3')
raise NotImplementedError("Need to implement for Task 0.3")
Loading

0 comments on commit 07f46e9

Please sign in to comment.