-
Notifications
You must be signed in to change notification settings - Fork 0
/
tensorkeras1.py
72 lines (58 loc) · 2.46 KB
/
tensorkeras1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.model_selection import train_test_split
import pandas as pd
from time import time
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Embedding, Dropout, LSTM, Conv1D
from tensorflow.python.keras import layers
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.callbacks import TensorBoard
pd.set_option('max_colwidth', 300)
data = pd.read_csv('mixed.csv', sep=';')
data.sample(20)
X = data["text"]
y = data["sentiment"]
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=123)
num_words = 10000
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(x_train)
x_train_tokens = tokenizer.texts_to_sequences(x_train)
x_test_tokens = tokenizer.texts_to_sequences(x_test)
num_tokens = [len(tokens) for tokens in x_train_tokens + x_test_tokens]
num_tokens = np.array(num_tokens)
max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens)
max_tokens = int(max_tokens)
pad = 'pre'
x_train_pad = pad_sequences(x_train_tokens, maxlen=max_tokens,
padding=pad, truncating=pad)
x_test_pad = pad_sequences(x_test_tokens, maxlen=max_tokens,
padding=pad, truncating=pad)
model = Sequential()
embedding_size = 8
model.add(Embedding(input_dim=num_words,
output_dim=embedding_size,
input_length=max_tokens,
name='layer_embedding'))
#model.add(LSTM(units=128))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(64, 5, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Flatten())
model.add(Dense(3, activation='softmax'))
tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
optimizer = Adam(lr=0.008)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.fit(x_train_pad, y_train, validation_split=0.05, epochs=10, batch_size=5000, callbacks=[tensorboard])
result = model.evaluate(x_test_pad, y_test, batch_size=4096)
print(result)
model.save('modelwithres_test.h5')