-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpokemon.py
More file actions
343 lines (286 loc) · 13.7 KB
/
pokemon.py
File metadata and controls
343 lines (286 loc) · 13.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
'''
Description: Creating a machine learning model that will predict what type the pokemon is based
on the given stats and body style.
Authors: Isaac Garay and Riley Peters
Links: https://www.kaggle.com/datasets/alopez247/pokemon
https://bulbapedia.bulbagarden.net/wiki/Stat
https://www.graphviz.org/download/
'''
import numpy as np
import argparse
import os
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Input, Dense
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from sklearn.tree import DecisionTreeClassifier, plot_tree
from wordcloud import WordCloud
from sklearn.metrics import confusion_matrix
import pdb
ROOT = os.path.dirname(os.path.abspath(__file__)) # Root directory of this code
TRAIN_SPLIT = 0.9
XTRAIN = np.zeros(1)
YTRAIN = np.zeros(1)
XTEST = np.zeros(1)
YTEST = np.zeros(1)
parser = argparse.ArgumentParser(description="Train a neural network or decision tree to classify which type a pokemon is based on their stats")
parser.add_argument('-model', '--model', help='chooses to train either the neural network (nn) or the decision tree (dt) model - Example: python .\pokemon.py -model nn')
def main(args):
# Importing data
# Currently, the data is all strings due to the data having some text fields.
# We could modify the data table in the future if we want instead of converting a string to a value if we want to.
data = np.loadtxt(os.path.expanduser(os.path.join(ROOT, 'data.csv')), dtype=str, delimiter=",")
# Extracting the modelChoice and running the correct function
modelChoice = args.model
if modelChoice == 'nn':
neural_network(data)
elif modelChoice == 'dt':
decision_tree(data)
else:
print("Please run the model with either:\n -model nn\n -model dt")
return
def decision_tree(data):
# Extracting useful stats (hp, atk, def, spatk, spdef, speed)
stats = data[:, 5:11].astype(np.int32)
# Extracting all the types for each pokemon
types = data[:, 2]
# Extracting body style for each pokemon
body_style = data[:, 22]
_, uniquebodies = np.unique(body_style, return_inverse=True)
# Combine stats and body_style into one input array
stats = np.hstack((stats,uniquebodies[:, None]))
# Creating a mapping of all the types to numbers
# Then converting the types array to those numbers that way we can get an output that neural network will undestand
_, uniquetypes = np.unique(types, return_inverse=True)
maxt = np.max(uniquetypes) + 1
# Spliting the data into a training and a testing set
XTRAIN, YTRAIN, XTEST, YTEST = splitData(stats, uniquetypes, TRAIN_SPLIT, True)
# Creating Decision Tree
myTree = DecisionTreeClassifier(criterion='entropy')
myTree = myTree.fit(XTRAIN, YTRAIN)
# Test Decision Tree
testPrediction = myTree.predict(XTEST)
trainPrediction = myTree.predict(XTRAIN)
# Show confusion matrix for test data
test = confusion_matrix(YTEST, testPrediction)
train = confusion_matrix(YTRAIN, trainPrediction)
# Accounting for missing lables
labels = np.unique(YTEST)
missing = []
index = 0
offset = 0
while index < len(_):
if index != labels[index - offset]:
missing.append(index)
offset += 1
index += 1
for element in missing:
if element not in np.unique(testPrediction):
test = np.insert(test, element, np.zeros(len(test[0])),0)
test = np.insert(test, element, np.zeros(len(test)),1)
# Shrinking testing confusion matrix
testOutcome = []
for row in range(len(test)):
correct = 0
error = 0
for element in range(len(test[0])):
if row == element:
correct = test[row][element]
else:
error += test[row][element]
testOutcome.append([correct, error])
testOutcome = np.array(testOutcome)
# Shrinking training confusion matrix
trainOutcome = []
for row in range(len(train)):
correct = 0
error = 0
for element in range(len(train[0])):
if row == element:
correct = train[row][element]
else:
error += train[row][element]
trainOutcome.append([correct, error])
trainOutcome = np.array(trainOutcome)
# Compare training and test accuracy
testAccuracy = np.sum(testOutcome[:,0]) / np.sum(testOutcome)
trainAccuracy = np.sum(trainOutcome[:,0]) / np.sum(trainOutcome)
print("Training Accuracy: " + str(trainAccuracy))
print("Testing Accuracy: " + str(testAccuracy))
# Making Bar Graph for Data
x = np.arange(len(_))
correct = testOutcome[:,0]
incorrect = testOutcome[:,1]
fig, ax = plt.subplots()
colors = ['#A6B91A', '#705746', '#6F35FC', '#F7D02C', '#D685AD', '#C22E28', '#EE8130', '#A98FF3', '#735797', '#7AC74C', '#E2BF65', '#96D9D6', '#A8A77A', '#A33EA1', '#F95587', '#B6A136', '#B7B7CE', '#6390F0']
ax.bar(x, correct, width=1, align='edge', edgecolor='white', linewidth=0.7, color=colors)
ax.bar(x, incorrect, width=1, bottom=correct, align='edge', edgecolor='white', linewidth=0.7, color='r')
plt.xticks(np.arange(len(_)), _, color='black', rotation=60, fontsize='12', horizontalalignment='center')
plt.xlabel('Pokemon Types', fontweight='bold', color = 'black', fontsize='14', horizontalalignment='right')
plt.ylabel('Number of Guesses Per Type', fontweight='bold', color = 'black', fontsize='14', horizontalalignment='center')
ylim = 0
for element in testOutcome:
temp = np.sum(element)
if temp > ylim:
ylim = temp
ax.set(xlim=(0, len(_)+1), xticks=np.arange(0, len(_)), ylim=(0, ylim + 1), yticks=np.arange(0, len(_)))
# Graphing Decision Tree (Which is a mistake)
plt.figure()
plot_tree(myTree, class_names = _, filled=True, rounded=True, fontsize=2)
plt.show()
def neural_network(data):
# Extracting useful stats (hp, atk, def, spatk, spdef, speed)
stats = data[:, 5:11].astype(np.int32)
# Extracting all the types for each pokemon
types = data[:, 2]
# Extracting body style for each pokemon
body_style = data[:, 22]
_, uniquebodies = np.unique(body_style, return_inverse=True)
# Combine stats and body_style into one input array
stats = np.hstack((stats,uniquebodies[:, None]))
# Creating a mapping of all the types to numbers
# Then converting the types array to those numbers that way we can get an output that neural network will undestand
_, uniquetypes = np.unique(types, return_inverse=True)
maxt = np.max(uniquetypes) + 1
onehot = np.eye(maxt)[uniquetypes]
# Spliting the data into a training and a testing set
XTRAIN, YTRAIN, XTEST, YTEST = splitData(stats, onehot, TRAIN_SPLIT, True)
# Creating Neural Network
model = Sequential()
model.add(Input(shape=(7,)))
model.add(Dense(units=100, activation='swish', name='hidden1'))
model.add(Dense(units=50, activation='swish', name='hidden2'))
model.add(Dense(units=50, activation='swish', name='hidden3'))
model.add(Dense(units=25, activation='swish', name='hidden4'))
model.add(Dense(units=18, activation='softmax', name='output')) #softmax good when you have a neuron for each output
model.summary()
"""This is strictly for generating the .png file that shows what our model looks like
Uncomment the line below if you wish to update the .png if you changed the layout
Of the model. Note: You have to have pydot installed AND graphviz installed
The graphviz download link is in the Links section at the top of the file"""
#plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
# Create an EarlyStopping callback
# Stops the model if there is no improvement after some amount of epochs
es = EarlyStopping(monitor='accuracy', mode='max', verbose=1, patience=30)
# If you wanted to save the model this would allow you to do so
# mcp_save = ModelCheckpoint('.saved_model.hdf5', save_best_only=True, monitor='accuracy', mode='max')
reduce_lr_loss = ReduceLROnPlateau(monitor='accuracy', factor=0.1, patience=10, verbose=1, mode='max')
# Train
history = model.fit(XTRAIN, YTRAIN, epochs=2000, batch_size=10, verbose=1, validation_split=0.1, callbacks=[es, reduce_lr_loss])
# Test
metrics = model.evaluate(XTEST, YTEST, verbose=1)
# y contains the guessed types and actual_index contains the correct indexes
y = np.argmax(model.predict(XTEST, verbose=0), axis=1)
_, actual_index = np.where(YTEST)
# Convert the guessed types from numbers into a string
guessed_types = []
for elem in y:
guessed_types.append(np.unique(types)[elem])
# Conver the actual types from one hot arrays into a string
actual_types = []
for elem in actual_index:
actual_types.append(np.unique(types)[elem])
# Convert the output into a single string instead of a list
guessed_text = " ".join(guessed_types)
actual_text = " ".join(actual_types)
# Create a confusion matrix
test = confusion_matrix(actual_index, y)
# Accounting for missing lables
labels = np.unique(actual_index)
missing = []
index = 0
offset = 0
while index < len(np.unique(types)):
if index != labels[index - offset]:
missing.append(index)
offset += 1
index += 1
for element in missing:
if element not in np.unique(y):
test = np.insert(test, element, np.zeros(len(test[0])),0)
test = np.insert(test, element, np.zeros(len(test)),1)
# Shrink the confusion matrix into right and wrong columns
testOutcome = []
for row in range(len(test)):
correct = 0
error = 0
for element in range(len(test[0])):
if row == element:
correct = test[row][element]
else:
error += test[row][element]
testOutcome.append([correct, error])
testOutcome = np.array(testOutcome)
# Create a bar graph here
x = np.arange(len(np.unique(types)))
correct = testOutcome[:,0]
incorrect = testOutcome[:,1]
fig, ax = plt.subplots()
colors = ['#A6B91A', '#705746', '#6F35FC', '#F7D02C', '#D685AD', '#C22E28', '#EE8130', '#A98FF3', '#735797', '#7AC74C', '#E2BF65', '#96D9D6', '#A8A77A', '#A33EA1', '#F95587', '#B6A136', '#B7B7CE', '#6390F0']
ax.bar(x, correct, width=1, align='edge', edgecolor='white', linewidth=0.7, color=colors)
ax.bar(x, incorrect, width=1, bottom=correct, align='edge', edgecolor='white', linewidth=0.7, color='r')
plt.xticks(np.arange(len(np.unique(types))), np.unique(types), color='black', rotation=60, fontsize='12', horizontalalignment='center')
plt.xlabel('Pokemon Types', fontweight='bold', color = 'black', fontsize='14', horizontalalignment='right')
plt.ylabel('Number of Guesses Per Type', fontweight='bold', color = 'black', fontsize='14', horizontalalignment='center')
ylim = 0
for element in testOutcome:
temp = np.sum(element)
if temp > ylim:
ylim = temp
ax.set(xlim=(0, len(np.unique(types))+1), xticks=np.arange(0, len(np.unique(types))), ylim=(0, ylim + 1), yticks=np.arange(0, len(np.unique(types))))
# Generate a word cloud for the guessed types
plt.figure()
guessed_wordcloud = WordCloud(background_color='white').generate(guessed_text)
plt.imshow(guessed_wordcloud, interpolation='bilinear')
plt.axis(False)
plt.title("Neural Network Testing Prediction")
# Generate the word cloud for the actual types
plt.figure()
actual_wordcloud = WordCloud(background_color='white').generate(actual_text)
plt.imshow(actual_wordcloud, interpolation='bilinear')
plt.axis(False)
plt.title("Neural Network Testing Actual")
# Display the model
plt.figure()
plt.imshow(plt.imread('model_plot.png'))
plt.axis(False)
plt.title('Visual Representation of the Neural Network Layers')
plt.show()
'''Desciption: Splits the given data set into a training and testing set given a set percentage
Inputs: data, training split percentage (as a decimal), shuffle boolean (whether or not the indexs it picks are random or just in order)
Output: An array for all the training indexes and an array for all the testing indexes'''
def splitData(stats, onehot, train_split, shuffle):
# First get the number of cases that are going to be a part of our training set.
# Then setup a list of potential indexes
trainNum = int(len(stats) * train_split)
indexes = np.arange(len(stats))
# Shuffle the array when specified to achieve a varied training set
if shuffle:
np.random.shuffle(indexes)
# Create two arrays that contain the indexs for the training set and then the indexes for the test set
training_indexes = indexes[0:trainNum]
testing_indexes = indexes[trainNum:len(stats)]
# Initialize empty lists
XTRAIN = []
YTRAIN = []
XTEST = []
YTEST = []
# Loop over the indexes and append the NDarrays to the list
for i in training_indexes:
XTRAIN.append(stats[i])
YTRAIN.append(onehot[i])
for i in testing_indexes:
XTEST.append(stats[i])
YTEST.append(onehot[i])
# Convert all the lists into NDarrays
XTRAIN = np.array(XTRAIN)
YTRAIN = np.array(YTRAIN)
XTEST = np.array(XTEST)
YTEST = np.array(YTEST)
# Make sure there is at least 1 of
# Return the NDarray
return XTRAIN, YTRAIN, XTEST, YTEST
if __name__ == "__main__":
main(parser.parse_args())