forked from Schoyen/fys-stk-lecture
-
Notifications
You must be signed in to change notification settings - Fork 0
/
fizzbuzz.py
96 lines (65 loc) · 2.52 KB
/
fizzbuzz.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import numpy as np
import tensorflow as tf
import sklearn.neural_network
import sklearn.model_selection
NUM_DIGITS = 10
NUM_VALUES = int(2 ** NUM_DIGITS)
def binary_encoding(x, num_digits):
# Make digits one-hot encoded
return np.array([x >> i & 1 for i in range(num_digits)])
def fizz_buzz_one_hot(x):
# One-hot encode fizz buzz solution
if x % 15 == 0:
return np.array([0, 0, 0, 1])
if x % 5 == 0:
return np.array([0, 0, 1, 0])
if x % 3 == 0:
return np.array([0, 1, 0, 0])
return (np.array([1, 0, 0, 0]))
def fizz_buzz(x: int, ind: np.array) -> str:
# Output fizz buzz response from one-hot encoded solution
return [str(x), "fizz", "buzz", "fizzbuzz"][np.argmax(ind)]
for i in range(100):
print(fizz_buzz(i, fizz_buzz_one_hot(i)))
print(binary_encoding(1, NUM_DIGITS))
print(binary_encoding(2, NUM_DIGITS))
print(binary_encoding(3, NUM_DIGITS))
print(binary_encoding(4, NUM_DIGITS))
print(binary_encoding(7, NUM_DIGITS))
print(binary_encoding(10, NUM_DIGITS))
# Create training and test data
X, y = np.zeros((NUM_VALUES, NUM_DIGITS)), np.zeros((NUM_VALUES, 4))
for i in range(NUM_VALUES):
X[i] = binary_encoding(i, NUM_DIGITS)
y[i] = fizz_buzz_one_hot(i)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
X, y, test_size=0.3
)
# Train (sklearn) neural network classifier
# Output accuracy
# Output response of the first 100 numbers
# Train (Tensorflow/Keras) neural network classifier
# Keras is an interface to tensorflow
# unlike sklearn, with keras you can have different activation functions per layer
model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(30),
tf.keras.layers.Dense(4, activation="softmax")
])
# neural network is low bas very high variance architechture
# so there is a great risk of overfitting
# dropout: during training, with each batch, with 20% chance a neuron will drop out
# this for some reason lifts the testing accuracy
# Compile model
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
# Fit to training data
model.fit(X_train, y_train, epochs=400, batch_size=32, validation_data=(X_test, y_test))
# Output response of the first 100 numbers
for i in range(100):
pred = model.predict(binary_encoding(i, NUM_DIGITS).reshape(1, -1))
print(f"[fizz_buzz(i, fizz_buzz_one_hot(i)):<10] " +
f"|| {fizz_buzz(i, pred)}")