import tensorflow as tf
import numpy as np
def weight(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias(shape):
return tf.Variable(tf.constant(0.1, shape=shape))
def output(input,w,b):
return tf.matmul(input,w)+b
x_columns = 33
y_columns = 1
layer1_num = 7
layer2_num = 7
epoch_num = 10
train_num = 1000
batch_size = 100
display_size = 1
x = tf.placeholder(tf.float32,[None,x_columns])
y = tf.placeholder(tf.float32,[None,y_columns])
layer1 =
tf.nn.relu(output(x,weight([x_columns,layer1_num]),bias([layer1_num])))
layer2=tf.nn.relu
(output(layer1,weight([layer1_num,layer2_num]),bias([layer2_num])))
prediction = output(layer2,weight([layer2_num,y_columns]),bias([y_columns]))
loss=tf.reduce_mean
(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
train_step = tf.train.AdamOptimizer().minimize(loss)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for epoch in range(epoch_num):
avg_loss = 0.
for i in range(train_num):
index = np.random.choice(len(x_train),batch_size)
x_train_batch = x_train[index]
y_train_batch = y_train[index]
_,c = sess.run([train_step,loss],feed_dict=
{x:x_train_batch,y:y_train_batch})
avg_loss += c/train_num
if epoch % display_size == 0:
print("Epoch:{0},Loss:{1}".format(epoch+1,avg_loss))
print("Training Finished")
我的模特得到
纪元:2,亏损:0.0
纪元:3,亏损:0.0
纪元:4,亏损:0.0
纪元:5,亏损:0.0
纪元:6,亏损:0.0
纪元:7,亏损:0.0
纪元:8,亏损:0.0
纪元:9,亏损:0.0
纪元:10,亏损:0.0
培训结束
我该如何解决这个问题?
最佳答案
softmax_cross_entropy_with_logits
期望标签采用一次性格式,即形状为[batch_size, num_classes]
。在这里,您有y_columns = 1
,这意味着只有1个类,它必须始终是预测的类和“基本事实”(从网络的角度来看),因此无论权重是多少,您的输出始终是正确的。因此,loss=0
。
我猜您确实有不同的类,并且y_train
包含标签的ID。然后predictions
的形状应为[batch_size, num_classes]
,而不是softmax_cross_entropy_with_logits
,而应使用 tf.nn.sparse_softmax_cross_entropy_with_logits