This repository was archived by the owner on Dec 24, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathae.py
More file actions
47 lines (32 loc) · 1.34 KB
/
ae.py
File metadata and controls
47 lines (32 loc) · 1.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# https://gist.github.com/saliksyed/593c950ba1a3b9dd08d5
from time import time
from glob import glob
import tensorflow as tf
from clustering.autoencoder import build_autoencoder
from clustering.import_stage import build_import_stage
def deep_test():
i = 0
with tf.Graph().as_default() as graph:
files = glob('data/*.tfrecord.gz')
input_batch = build_import_stage(files)
x = tf.reshape(input_batch, (-1, 32*32*3), name='reshaped_inputs')
ae = build_autoencoder(x, [1024, 512, 256, 2])
global_step = tf.Variable(initial_value=0, trainable=False, name='global_step')
with tf.name_scope('training'):
train_step = tf.train.AdamOptimizer(0.001).minimize(ae.loss, global_step=global_step)
tf.summary.scalar('loss', ae.loss)
sv = tf.train.Supervisor(graph=graph, logdir='log')
with sv.managed_session() as sess:
start_time = time()
# initial metric
loss, i = sess.run([ae.loss, global_step])
print('%5i: loss %f' % (i, loss))
# run the training
while not sv.should_stop():
_, loss, i = sess.run([train_step, ae.loss, global_step])
if time() - start_time >= 10.:
start_time = time()
print('%5i: loss %f' % (i, loss))
if __name__ == '__main__':
# simple_test()
deep_test()