我是张量流和机器学习的新手,但是尝试对DNN进行建模,并在其前面嵌入层。出于某种原因,我不断收到成本结果和准确性的麻烦。我想我的代码有问题,所以去了:
这是我的模型和训练例程:
def neural_network_model(x):
W = tf.Variable(
tf.truncated_normal([vocab_size, embedding_size], stddev=1 / math.sqrt(vocab_size)),
name="W")
embedded = tf.nn.embedding_lookup(W, x)
embedding_aggregated = tf.reduce_sum(embedded, [1])
hidden_1_layer = {
'weights': tf.Variable(tf.random_normal([embedding_size, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))
}
hidden_2_layer = {
'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))
}
hidden_3_layer = {
'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))
}
output = {
'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))
}
l1 = tf.matmul(embedding_aggregated, hidden_1_layer['weights']) + hidden_1_layer['biases']
l1 = tf.nn.relu(l1)
l2 = tf.matmul(l1, hidden_2_layer['weights']) + hidden_2_layer['biases']
l2 = tf.nn.relu(l2)
l3 = tf.matmul(l2, hidden_3_layer['weights']) + hidden_3_layer['biases']
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output['weights']) + output['biases']
return output
def train_neural_network(x_batch, y_batch, test_x, test_y):
global_step = tf.Variable(0, trainable=False, name='global_step')
logits = neural_network_model(x_batch)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y_batch))
tf.scalar_summary('cost', cost)
optimizer = tf.train.AdagradOptimizer(0.01).minimize(cost, global_step = global_step)
test_logits = neural_network_model(test_x)
prediction = tf.nn.softmax(test_logits)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(test_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.scalar_summary('accuracy', accuracy)
merged = tf.merge_all_summaries()
saver = tf.train.Saver()
model_dir = "model_embedding"
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
with tf.Session() as sess:
train_writer = tf.train.SummaryWriter(model_dir + "/eval", sess.graph)
if (latest_checkpoint != None):
print("Restoring: ", latest_checkpoint)
saver.restore(sess, latest_checkpoint)
else:
print("Nothing to restore")
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
epoch = 1
while not coord.should_stop():
epoch_loss = 0
_, c, summary = sess.run([optimizer, cost, merged])
# embd = sess.run(emb)
# for idx in range(xb.size):
# print(xb[idx])
# print(yb[idx])
train_writer.add_summary(summary, global_step = global_step.eval())
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
print("Global step: ", global_step.eval())
print('Accuracy:',accuracy.eval())
#saver.save(sess, model_dir+'/model.ckpt', global_step=global_step) # default to last 5 checkpoint saves
epoch += 1
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
我的数据是一堆单词整数ID,它们的大小统一填充为2056,最后添加了padding令牌,因此我的许多张量在末尾都有一堆vocab_size整数值,以便填充到2056。
我的代码有什么明显的地方吗?
最佳答案
对于遇到相同问题的人:
我的错误是重新使用neural_network_model()
函数,从而创建了一组新的变量。答案在于阅读如何共享变量,并且TF在Sharing Variables上有一个很好的页面来描述这一点
关于machine-learning - 具有嵌入式层的DNN返回正弦波成本/准确性,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/39735874/