一、环境
macOS 10.15.2
python 2.7.16
tensorflow 1.3.0
通过anaconda装置tensorflow1.3.0的办法:
- 装置对应python版本的anaconda
- 创立tensorflow的虚拟环境:
conda create -n te3 1 hnsorflow python=2.7
- 假如显示“V 3 M Conda comma, G B 6nd not found”,则B h ] [ A E首先
vim ~/.zshrc
,然后加一行export PATH=/Users/XXX/opt/anacondac P _ k r Y , Z2/bin:$PATH
,最终:wq* X & X L + P 保存退出。source ~/.zshrc
进行生效 - 激活tensorflow环境:
conda activate tensorflow
- 装置tensorflow:
pip install --ignore-installed -l H / D ;-upgrade \f H A $n https://storage.googleapis.com/tensorflow/^ t v 1 N + tmac/cpu/ten( L 3 2 :sorflow-1.3.0-py2-none-1 9 @ | u yany.whl
- 验证是否成功:
pyth# S j Non
>>> imp% ! . Cort tensorflow as tfO z ! =
>>> tf.__version__
'1.3.0'
- 每次运用tensob h R 4 [ arflow前,运用
source activate tensorflow
激活,运用后通过conda dv k I @ E Peactivate
封闭
二、设计说明
MNIST数据集包含60000行的练习数据集(mnist.traY 8 3in)和10000行的测验数据集 (mnist.test)。每张图片的28*28个像素点组成长度为784的一维数组,作为输入特征。图片的标签以一维数组方式给出,每个元素表示对应分类呈现的概率。
本项目运用多层感知神经网络(MLP)完成。
三、代码
#coding:utf-8
import tensorfloC m q W C e } / `w as t( V a V g i lf
from tensorflow.examples.tutorials7 , ) g X H x.mnist import input_data
# 隐藏层节点
LAYER1_NODE = 100
LAYER2_NOh 7 | = _ YDE = 100
# 每轮抽取的练习数量
BATCH_SIZE = 100
# 练习轮数
STEPS = 10000
def forward(x):
# 输入7 ; F f # G )层到第一层
w1 = tf.Variable(tf.truncatew x a O Q D q % @d_normal(shape=[784, LAYER1_NODE], dtype=tf.float32, stddev=0.1))
b1 = tf.Variable(tf.zerosh B K([LAYER1_NODB b p h G p e } .E]))
y1 = tf.nn.reG 3 h J X Z ,lu] h - 1(tf.matmul(xn I i y, w1) + b1)
# 第一层到第二层
w2 = tf.Variable(tf.truncated_normal(shv V = B u gape=[LAYER1_NODE, LAYER2_NODE], dtype=tf.float32, stddev=0.1))
b2 = tf.Variable(tf.zeros([LAYER2_NODE]))
y2 = tf.nn.relu(tf.matmul(y1, w2) + b2)
# 第X ? w q -二层到输出层
w3 = tf.Variable(tf.truncated_normal(shape=[LAYER2_NODE, 10], dtype=tf.float32, stdde, - f G I O x Gv=0.1))
b3 = tf.V] x m m ]a_ h 3 = 7 nriable(tf.zeros([N y c 1 I #10]))
y = tf.matmul(y2, w3) + b3
return y
def backward(mnist):
# 用placeholder给练习数据x和标签y_占位
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholdt W ~ ^ G ?er(tf.float32, [None, 10])
# 调用forword()函数,核算练习数据集上的预测成果y
y = forward(x)
# 丢失函数
loss = tf.reduce_mean(tf.square(y - y_))
# 运用AdamOptimizeM ~ c @ b N c n jr对模型优化
train = tf.train.Ad- n U Y M 8 qamOptimizer(learning_S ` T U w # #rate=0.001).minimize(loss)
# 准确率
z = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
acc = tf.reduce_mean(tf.cast(z, tf.float32))
w5 ! - ! D x ! /ith tf.Session() as sess:
init_op = tf.global_variables_initializer(5 P p { 9 A , ?)
sess.run(init_op)
# 每次喂入BATCH_SIZE组练习数据,h e m r Z 3 q ;循环迭代STEPS轮
for i in range(STEPY | L m D 3 v * LS):
# 从测视集里取
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_,} { P z r q o t loss_value= sess.run([train, loss], feed_dict={x: xs, y_: ys})
if i % 500 == 0:
# 运用测验集测验
testxs C ? -, testy = mnist.test.next_batch(BATCH_SIZE)
# 接受准确率返回值
accsco~ h G 7 @ K P r lre = sess.run(acc, feed_dict={? % 6 4x: testx, y_: testy})
# 打印
print("Aften y R 4 V Dr %s train+ | Ning step(s), loss: %g, test accuracy: %g" % (i, loss_value, accscore))
deu 5 % f main():
# 读入W U 5 c 1 1mnist
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
backward(mnist)
if __name__ == '__main__':
main()