Skip to content

Commit

Permalink
cnn demo
Browse files Browse the repository at this point in the history
  • Loading branch information
siAyush committed Mar 20, 2021
1 parent 2ba6317 commit 10f0cdc
Show file tree
Hide file tree
Showing 6 changed files with 176 additions and 6 deletions.
18 changes: 14 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,18 @@ Deep Learning
- Convolutional Layer
- Dropout Layer
- Flatten Layer
- Fully-Connected (Dense) Layer
- Fully-Connected RNN Layer
- Fully-Connected Layer
- Max Pooling Layer
- Reshape Layer
- RNN Layer
- Up Sampling Layer
- Zero Padding Layer

- Models
- [Neural Network](https://github.com/siAyush/ScratchML/blob/main/scratch_ml/demo/neural_network.py)
- Convolutional Neural Network
- [Convolutional Neural Network](https://github.com/siAyush/ScratchML/blob/main/scratch_ml/demo/convolutional_neural_network.py)
- [Recurrent Neural Network](https://github.com/siAyush/ScratchML/blob/main/scratch_ml/demo/recurrent_neural_network.py)
- Generative Adversarial Network
- [Generative Adversarial Network](https://github.com/siAyush/ScratchML/blob/main/scratch_ml/demo/generative_adversarial_network.py)

## Installation

Expand Down Expand Up @@ -108,3 +108,13 @@ $ python3 scratch_ml/demo/neural_network.py
![Neural Network](./images/neural_network.png)

![Neural Network](./images/neural_network_error.png)

### Convolutional Neural Network

```
$ python3 scratch_ml/demo/convolutional_neural_network.py
```

![CNN](./images/cnn_2.png)

![CNN](./images/cnn.png)
Binary file added images/cnn.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/cnn_2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
67 changes: 65 additions & 2 deletions scratch_ml/deep_learning/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def forward_pass(self, x, training=True):
x, self.filter_shape, stride=self.stride, output_shape=self.padding)
# Turn weights into column shape
self.w_col = self.w.reshape((self.n_filters, -1))
output = self.w_col.dot(self.X_col) + self.w0
output = self.w_col.dot(self.x_col) + self.w0
# Reshape into (n_filters, out_height, out_width, batch_size)
output = output.reshape(self.output_shape() + (batch_size, ))
# Redistribute axises so that batch size comes first
Expand Down Expand Up @@ -332,7 +332,70 @@ def output_shape(self):


class BatchNormalization(Layer):
pass
"""Batch normalization"""

def __init__(self, momentum=0.99):
self.momentum = momentum
self.trainable = True
self.eps = 0.01
self.running_mean = None
self.running_var = None

def initialize(self, optimizer):
self.gamma = np.ones(self.input_shape)
self.beta = np.zeros(self.input_shape)
self.gamma_opt = copy.copy(optimizer)
self.beta_opt = copy.copy(optimizer)

def parameters(self):
return np.prod(self.gamma.shape) + np.prod(self.beta.shape)

def forward_pass(self, x, training=True):
# Initialize running mean and variance if first run
if self.running_mean is None:
self.running_mean = np.mean(x, axis=0)
self.running_var = np.var(x, axis=0)

if training and self.trainable:
mean = np.mean(x, axis=0)
var = np.var(x, axis=0)
self.running_mean = self.momentum * \
self.running_mean + (1 - self.momentum) * mean
self.running_var = self.momentum * \
self.running_var + (1 - self.momentum) * var
else:
mean = self.running_mean
var = self.running_var

# Statistics saved for backward pass
self.x_centered = x - mean
self.stddev_inv = 1 / np.sqrt(var + self.eps)
x_norm = self.x_centered * self.stddev_inv
output = self.gamma * x_norm + self.beta

return output

def backward_pass(self, gradient):
gamma = self.gamma
if self.trainable:
x_norm = self.x_centered * self.stddev_inv
grad_gamma = np.sum(gradient * x_norm, axis=0)
grad_beta = np.sum(gradient, axis=0)
self.gamma = self.gamma_opt.update(self.gamma, grad_gamma)
self.beta = self.beta_opt.update(self.beta, grad_beta)
batch_size = gradient.shape[0]

# The gradient of the loss with respect to the layer inputs.
gradient = (1 / batch_size) * gamma * self.stddev_inv * (
batch_size * gradient
- np.sum(gradient, axis=0)
- self.x_centered * self.stddev_inv**2 *
np.sum(gradient * self.x_centered, axis=0))

return gradient

def output_shape(self):
return self.input_shape


class UpSampling2D(Layer):
Expand Down
71 changes: 71 additions & 0 deletions scratch_ml/demo/convolutional_neural_network.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import math
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from scratch_ml.deep_learning.optimizers import Adam
from scratch_ml.deep_learning import NeuralNetwork
from scratch_ml.deep_learning.layers import Dense, Dropout, Conv2D, Flatten, Activation, BatchNormalization
from scratch_ml.utils import to_categorical, train_test_split, Plot, CrossEntropy


def main():

data = datasets.load_digits()
X = data.data
y = data.target

# Convert to one-hot encoding
y = to_categorical(y.astype("int"))
X_train, X_test, y_train, y_test = train_test_split(
X, y, seed=1)

# Reshape X to (n_samples, channels, height, width)
X_train = X_train.reshape((-1, 1, 8, 8))
X_test = X_test.reshape((-1, 1, 8, 8))

optimizer = Adam()
Model = NeuralNetwork(optimizer=optimizer,
loss=CrossEntropy,
validation_data=(X_test, y_test))

Model.add(Conv2D(n_filters=16, filter_shape=(3, 3),
stride=1, input_shape=(1, 8, 8), padding='same'))
Model.add(Activation("relu"))
Model.add(Dropout(0.25))
Model.add(BatchNormalization())
Model.add(Conv2D(n_filters=32, filter_shape=(
3, 3), stride=1, padding="same"))
Model.add(Activation("relu"))
Model.add(Dropout(0.25))
Model.add(BatchNormalization())
Model.add(Flatten())
Model.add(Dense(256))
Model.add(Activation("relu"))
Model.add(Dropout(0.4))
Model.add(BatchNormalization())
Model.add(Dense(10))
Model.add(Activation("softmax"))
Model.summary(name="CNN")

train_err, val_err = Model.fit(
X_train, y_train, n_epochs=50, batch_size=256)
n = len(train_err)
training, = plt.plot(range(n), train_err, label="Training Error")
validation, = plt.plot(range(n), val_err, label="Validation Error")
plt.legend(handles=[training, validation])
plt.title("Error Plot")
plt.ylabel("Error")
plt.xlabel("Iterations")
plt.show()

_, accuracy = Model.test_on_batch(X_test, y_test)
print("Accuracy:", accuracy)
y_pred = np.argmax(Model.predict(X_test), axis=1)
X_test = X_test.reshape(-1, 8*8)

Plot().plot_2d(X_test, y_pred, title="Convolutional Neural Network",
accuracy=accuracy, legend_label=range(10))


if __name__ == "__main__":
main()
26 changes: 26 additions & 0 deletions scratch_ml/demo/generative_adversarial_network.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import math
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from scratch_ml.deep_learning.optimizers import Adam
from scratch_ml.deep_learning import NeuralNetwork
from scratch_ml.deep_learning.layers import Dense, Dropout, Conv2D, Flatten, Activation, BatchNormalization
from scratch_ml.utils import to_categorical, train_test_split, CrossEntropy


class GAN():
"""Generative Adversarial Network with fully connected neural nets as
Generator and Discriminator."""

def __init__(self):
pass

def generator():
pass

def discriminator():
pass


if __name__ == '__main__':
gan = GAN()

0 comments on commit 10f0cdc

Please sign in to comment.