我正在尝试实现一个版本的Googlenet初始神经网络,但是MNIST data set的准确性为10%。这令人担忧,因为对于简单的神经网络,该数据集的准确度应为97 +%。因此,我有信心没有正确实现初始神经网络。我在下面包含了我的代码。
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
x = tf.placeholder(dtype = tf.float32, shape = [None,784])
y_ = tf.placeholder(dtype = tf.float32, shape = [None,10])
x_input = tf.reshape(x,[-1,28,28,1])
# 1x1 Convolution
W1x1 = tf.Variable(tf.random_normal([1,1,1,1]))
b1x1 = tf.Variable(tf.random_normal([1]))
output1x1 = tf.add(tf.nn.conv2d(x_input,W1x1, strides = [1,1,1,1], padding = 'SAME'),b1x1)
output1x1 = tf.nn.relu(output1x1)
# 5x5 Convolution
W5x5 = tf.Variable(tf.random_normal([1,1,1,1]))
b5x5 = tf.Variable(tf.random_normal([1]))
output5x5 = tf.add(tf.nn.conv2d(output1x1,W5x5, strides = [1,1,1,1], padding = 'SAME'),b5x5)
output5x5 = tf.nn.relu(output5x5)
# 3x3 Convolution
W3x3 = tf.Variable(tf.random_normal([1,1,1,1]))
b3x3 = tf.Variable(tf.random_normal([1]))
output3x3 = tf.add(tf.nn.conv2d(output1x1,W3x3, strides = [1,1,1,1], padding = 'SAME'),b3x3)
output3x3 = tf.nn.relu(output3x3)
# AveragePooling followed by 1x1 convolution
outputPool = tf.nn.avg_pool(output1x1, ksize = [1,2,2,1], strides = [1,1,1,1], padding = "SAME")
Wo1x1 = tf.Variable(tf.random_normal([1,1,1,1]))
bo1x1 = tf.Variable(tf.random_normal([1]))
outputo1x1 = tf.add(tf.nn.conv2d(outputPool,Wo1x1, strides = [1,1,1,1], padding = 'SAME'),bo1x1)
outputo1x1 = tf.nn.relu(outputo1x1)
# Concatonate the 4 convolution products
finalouput = tf.concat([output1x1, output5x5, output3x3, outputo1x1], 3)
finalouput = tf.reshape(finalouput, [-1, 7*7*64])
#Add a fully connected layer
W_fc = tf.Variable(tf.random_normal([7*7*64,1024]))
b_fc = tf.Variable(tf.random_normal([1024]))
output_fc = tf.add(tf.matmul(finalouput,W_fc), b_fc )
output_fc = tf.nn.relu(output_fc)
output_fc = tf.nn.dropout(output_fc, keep_prob = 0.85)
#Final layer
W_final = tf.Variable(tf.random_normal([1024,10]))
b_final = tf.Variable(tf.random_normal([10]))
predictions = tf.add(tf.matmul(output_fc,W_final), b_final)
# Train the model
cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels = y_ ,logits = predictions))
optimiser = tf.train.AdamOptimizer(1e-3).minimize(cost)
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1]})
print('step %d, training accuracy %g' % (i, train_accuracy))
optimiser.run(feed_dict={x: batch[0], y_: batch[1]})
print('test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels,}))
最佳答案
问题在于权重初始化。使用tf.random_normal()
初始化的权重的标准偏差为1
很大,减少该数目应可以解决问题。
将权重初始化更改为:
W** = tf.Variable(tf.random_normal(..., stddev=0.01))
b** = tf.Variable(tf.random_normal(..., stddev=0.001))
关于python - TensorFlow Googlenet初期的结果不佳,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/45420926/