Skip to content

Instantly share code, notes, and snippets.

@Mainvooid
Last active April 9, 2019 08:51
Show Gist options
  • Select an option

  • Save Mainvooid/d7e91bdc13a439a371f0e9d3144ab8d3 to your computer and use it in GitHub Desktop.

Select an option

Save Mainvooid/d7e91bdc13a439a371f0e9d3144ab8d3 to your computer and use it in GitHub Desktop.
使用Tensorboard可视化计算流和损失函数(基础) #TensorFlow #Python
'''
使用Tensorboard可视化计算流和损失函数
使用手写数字数据集MNIST(http://yann.lecun.com/exdb/mnist/)
'''
from __future__ import print_function
import tensorflow as tf
# 导入 MNIST 数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 参数
learning_rate = 0.01 # 学习率
training_epochs = 25 # epochs次数(迭代完一次数据集称一次epochs)
batch_size = 100 # 每批次数据量(规模)
display_epoch = 1
logs_path = './logs/'
# tf计算流输入
# minst数据图像大小28*28=784
x = tf.placeholder(tf.float32, [None, 784], name='InputData')
# 0-9 数字识别 => 10 类
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
# 初始化模型权重和偏差
W = tf.Variable(tf.zeros([784, 10]), name='Weights')
b = tf.Variable(tf.zeros([10]), name='Bias')
# 构建模型并将所有操作封装到名称域scope中
# Tensorboard图形可视化时更清晰
with tf.name_scope('Model'):
# 模型
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax回归模型
with tf.name_scope('Loss'):
# 使用交叉熵最小化损失
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
with tf.name_scope('SGD'):
# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
with tf.name_scope('Accuracy'):
# 准确度
acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
# 初始化变量
init = tf.global_variables_initializer()
# 创建一个summary来监测成本
tf.summary.scalar("loss", cost)
# 创建一个summary来监测精确度
tf.summary.scalar("accuracy", acc)
# 将所有summary合并为一个操作op
merged_summary_op = tf.summary.merge_all()
# 开始训练
with tf.Session() as sess:
# 运行变量初始化
sess.run(init)
# 定义Tensorboard的事件文件路径
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
# 循环训练
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples / batch_size)
# 循环所有批次
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# 运行优化操作(backprop),成本操作(获取损失值)
# 以及summary操作(产生事件文件以可视化)
_,c, summary = sess.run([optimizer, cost, merged_summary_op], feed_dict={x: batch_xs, y: batch_ys})
# 在每次迭代中将数据写入事件文件
summary_writer.add_summary(summary, epoch * total_batch + i)
# 计算平均损失
avg_cost += c / total_batch
# 每个epoch打印log
if (epoch + 1) % display_epoch == 0: print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# 测试模型
# 计算精确度
print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
print("Run the command line:\n"
"--> tensorboard --logdir=./logs/ "
"\nThen open http://localhost:6006/ into your web browser")
'''
Accuracy: 0.9138
'''
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment