问题描述
这里是我使用的代码。我试图得到一个1,0,或希望一个概率的结果,一个真正的测试集。当我刚刚分割训练集并在训练集上运行它时,我得到〜93%的准确率,但是当我训练程序并运行它在实际测试集(没有1和0的填充列1 )它只返回nan。
import tensorflow as tf
import numpy as np
从numpy import genfromtxt
import sklearn
#转换为一个热
def convertOneHot(data):
y = np.array([int(i [0])for i in数据])
y_onehot = [0] * len(y)
for i,j in enumerate(y):
y_onehot [i] = [0] *(y.max + 1)
y_onehot [i] [j] = 1
return(y,y_onehot)
data = genfromtxt('cs-training.csv' delimiter =',')#培训数据
test_data = genfromtxt('cs-test-actual.csv',delimiter =',')#实际测试数据
在实际测试数据开始时删除nan's
g = 0
for test in test_data:
i [0] = 1
test_data [g] = i
g + = 1
x_train = np.array([i [1 ::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
x_test = np.array([i [1 ::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
A = data.shape [1] -1# ,注意首先是y
B = len(y_train_onehot [0])
tf_in = tf.placeholder(float,[None,A])#特性
tf_weight = tf.Variable tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
#通过反向传播进行训练
tf_softmax_correct = tf.placeholder(float,[None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct * tf。 log(tf_softmax))
#使用tf.train.GradientDescentOptimizer的训练
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
#Add精确度检查节点
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1),tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction,float ))
saver = tf.train.Saver([tf_weight,tf_bias])
#初始化并运行
init = tf.initialize_all_variables b sess = tf.Session()
sess.run(init)
print(...)
#在范围内运行训练
(1):
sess.run(tf_train_step,feed_dict = {tf_in:x_train,tf_softmax_correct:y_train_onehot})
#print y_train_onehot
saver.save(sess,'trained_csv_model')
ans = sess.run(tf_softmax,feed_dict = {tf_in:x_test})
print ans
#打印精度
#result = sess.run(tf_accuracy ,feed_dict = {tf_in:x_test,tf_softmax_correct:y_test_onehot})
#print result
我打印 ans
我得到以下。
[nan nan]
[nan nan]
...,
[nan nan]
[nan nan]
[nan nan]]
我不知道我在这里做错了。所有我想要的是 ans
产生一个1,0或者特别是一个概率数组,其中数组内的每个单元的长度为2。
我不期望很多人能为我回答这个问题,但请至少尝试。我被困在这里等待一个天才的时刻的中风,没有在2天来,现在所以我想,我会问。谢谢!
test_data
出来看起来像这样 -
[[1.00000000e + 00 8.85519080e-01 4.30000000e + 01 ...,0.00000000e + 00
0.00000000e + 00 0.00000000e + 00]
[1.00000000e + 00 4.63295269e-01 5.70000000e + 01 ...,4.00000000e + 00
0.00000000e + 00 2.00000000e + 00]
[1.00000000e + 00 4.32750360e-02 5.90000000 e + 01 ...,1.00000000e + 00
0.00000000e + 00 2.00000000e + 00]
...,
[1.00000000e + 00 8.15963730e-02 7.00000000e + 01。 ..,0.00000000e + 00
0.00000000e + 00 nan]
[1.00000000e + 00 3.35456547e-01 5.60000000e + 01 ...,2.00000000e + 00
1.00000000e + 00 3.00000000e + 00]
[1.00000000e + 00 4.41841663e-01 2.90000000e + 01 ...,0.00000000e + 00
0.00000000e + 00 0.00000000e + 00]]
而且数据中的第一个单位等于1的唯一原因是因为我摆脱了填充那个位置的nan以避免错误。请注意,第一列之后的所有内容都是功能。第一列是我想要能够预测的。
编辑:
代码到以下 -
import tensorflow as tf
import numpy as np
从numpy import genfromtxt
import sklearn
from sklearn.cross_validation import train_test_split
从tensorflow导入打印
#转换为一个热
def convertOneHot(data):
y = np.array([int(i [0])for i in data])
y_onehot = [0] * len(y)
for i,j in enumerate(y):
y_onehot [i] = [0] *(y.max()+ 1)
y_onehot [i] [j] = 1
return(y,y_onehot)
#buildDataFromIris()
data = genfromtxt('cs-training.csv',delimiter =',')#培训数据
test_data = genfromtxt cs-test-actual.csv',delimiter =',')#测试数据
#for i in test_data [0]:
#print i
#print test_data
#print test_data
g = 0
for i in test_data:
i [0] = 1.
test_data [g] = i
g + = 1
#print 1,test_data
x_train = np.array([i [1 ::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
#print len(x_train),len(y_train),len(y_train_onehot)
x_test = np.array([i [1 ::] for i in test_data] )
y_test,y_test_onehot = convertOneHot(test_data)
#for u in y_test_onehot [0]:
#print u
#print y_test_onehot
#print len(x_test) ,len(y_test),len(y_test_onehot)
#print x_test [0]
#print'1'
#示例
#B = 3种Iris(setosa,virginica和versicolor)
A = data.shape [1] -1#特征数,注意首先是y
#print A
B = len(y_train_onehot [0])
#print B
#print y_train_onehot
tf_in = tf.placeholder(float,[None,A])#Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax tf.matmul(tf_in,tf_weight)+ tf_bias)
tf_bias = tf.Print(tf_bias,[tf_bias],Bias:)
tf_weight = tf.Print(tf_weight,[tf_weight ],Weight:)
tf_in = tf.Print(tf_in,[tf_in],TF_in:)
matmul_result = tf.matmul(tf_in,tf_weight)
matmul_result = tf。打印(matmul_result,[matmul_result],Matmul:)
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
print tf_bias
print tf_weight
print tf_in $ b b print matmul_result
#通过反向传播进行训练
tf_softmax_correct = tf.placeholder(float,[None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct * tf。 log(tf_softmax))
print tf_softmax_correct
#使用tf.train.GradientDescentOptimizer的训练
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
#添加精度检查节点
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1),tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean (tf.cast(tf_correct_prediction,float))
print tf_correct_prediction
print tf_accuracy
#saver = tf.train.Saver([tf_weight,tf_bias] )
#初始化并运行
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print(...)
prediction = []
#运行训练
#probabilities = []
#print y_train_onehot
# -----------------------------------------'
for i in range (1):
sess.run(tf_train_step,feed_dict = {tf_in:x_train,tf_softmax_correct:y_train_onehot})
#print y_train_onehot
#saver.save(sess,'trained_csv_model')
ans = sess.run(tf_softmax,feed_dict = {tf_in:x_test})
print ans
打印输出后,我看到其中一个对象是布尔值。我不知道是否是这个问题,但请看看下面的内容,看看是否有任何方法可以帮助。
Tensor(Print_16:0,shape = TensorShape([Dimension(2)]),dtype = float32)
Tensor(Print_17:0,shape = TensorShape (Dimension(None),Dimension(10)]),dtype = float32)
Tensor(Print_18:0 print_19:0,shape = TensorShape([Dimension(None),Dimension(2)]),dtype = float32)
Tensor(Placeholder_9:0,shape = TensorShape 2)]),dtype = float32)
Tensor(Equal_4:0,shape = TensorShape([Dimension(None)]),dtype = bool)
Tensor(Mean_4:0 = TensorShape([]),dtype = float32)
...
[[nan nan]
[nan nan]
[nan nan]
... ,
[nan nan]
[nan nan]
[nan nan]]
tf.Print
。它是一个操作,打印的值,因为tensorflow正在执行,并返回张量进一步计算,所以你可以只是将它们内嵌在你的模型。 尝试扔在少数这些。而不是此行:
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight)+ tf_bias)
尝试:
tf_bias = tf.Print(tf_bias,[tf_bias],Bias:)
tf_weight = tf.Print(tf_weight,[tf_weight],Weight:)
tf_in = tf.Print (tf_in,[tf_in],TF_in:)
matmul_result = tf.matmul(tf_in,tf_weight)
matmul_result = tf.Print(matmul_result,[matmul_result],Matmul:)
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
查看Tensorflow认为中间值是。如果NaNs显示在更早的管道,它应该让你更好地了解问题在哪里。祝你好运!
更新添加:这里有一个删除了调试版本尝试,其中我摆脱了输入函数,只是生成一些随机数据:
导入张量流为tf
import numpy as np
def dense_to_one_hot(labels_dense,num_classes = 10):
将类标签从标量转换为一热矢量。
num_labels = labels_dense.shape [0]
index_offset = np.arange(num_labels)* num_classes
labels_one_hot = np.zeros((num_labels,num_classes))
labels_one_hot.flat [index_offset + labels_dense.ravel (0,1,[50,10])
y_train = np.random.randint(0)(1)
$ b x_train = np.random.normal ,10,[50])
y_train_onehot = dense_to_one_hot(y_train,10)
x_test = np.random.normal(0,1,[50,10])
y_test = np.random.randint(0,10,[50])
y_test_onehot = dense_to_one_hot(y_test,10)
#多个特征,在本例中为4
# B = 3种虹膜(setosa,virginica和versicolor)
A = 10
B = 10
tf_in = tf.placeholder(float,[None,A] )#特性
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf .nn.softmax(tf.matmul(tf_in,tf_weight)+ tf_bias)
tf_bias = tf.Print(tf_bias,[tf_bias],Bias:)
tf_weight = tf.Print (tf_weight,[tf_weight],Weight:)
tf_in = tf.Print(tf_in,[tf_in],TF_in:)
matmul_result = tf.matmul(tf_in,tf_weight)
matmul_result = tf.Print(matmul_result,[matmul_result],Matmul:)
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
#通过反向传播培训
tf_softmax_correct = tf.placeholder(float,[None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct * tf.log(tf_softmax))
#使用tf.train .GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
#添加精度检查节点
tf_correct_prediction = tf.equal(tf.argmax 1),tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction,float))
print tf_correct_prediction
print tf_accuracy
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
:
print运行训练步骤
sess.run(tf_train_step,feed_dict = {tf_in:x_train,tf_softmax_correct:y_train_onehot})
#print y_train_onehot
#saver.save sess,'trained_csv_model')
print运行eval步骤
ans = sess.run(tf_softmax,feed_dict = {tf_in:x_test})
print ans
您应该看到以Bias:开头的行等。
Here is the code that I am using. I'm trying to get a 1, 0, or hopefully a probability in result to a real test set. When I just split up the training set and run it on the training set I get a ~93% accuracy rate, but when I train the program and run it on the actual test set (the one without the 1's and 0's filling in column 1) it returns nothing but nan's.
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import sklearn
# Convert to one hot
def convertOneHot(data):
y=np.array([int(i[0]) for i in data])
y_onehot=[0]*len(y)
for i,j in enumerate(y):
y_onehot[i]=[0]*(y.max() + 1)
y_onehot[i][j]=1
return (y,y_onehot)
data = genfromtxt('cs-training.csv',delimiter=',') # Training data
test_data = genfromtxt('cs-test-actual.csv',delimiter=',') # Actual test data
#This part is to get rid of the nan's at the start of the actual test data
g = 0
for i in test_data:
i[0] = 1
test_data[g] = i
g += 1
x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
A=data.shape[1]-1 # Number of features, Note first is y
B=len(y_train_onehot[0])
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
saver = tf.train.Saver([tf_weight,tf_bias])
# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print("...")
# Run the training
for i in range(1):
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#print y_train_onehot
saver.save(sess, 'trained_csv_model')
ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
print ans
#Print accuracy
#result = sess.run(tf_accuracy, feed_dict={tf_in: x_test, tf_softmax_correct: y_test_onehot})
#print result
When I print ans
I get the following.
[[ nan nan]
[ nan nan]
[ nan nan]
...,
[ nan nan]
[ nan nan]
[ nan nan]]
I don't know what I'm doing wrong here. All I want is for ans
to yield a 1, 0, or especially an array of probabilities where every unit inside the array has a length of 2.
I don't expect that many people are going to be able to answer this question for me, but please try at the very least. I'm stuck here waiting for a stroke of genius moment which hasn't come in 2 days now so I figured that I would ask. Thank you!
The test_data
comes out looking like this-
[[ 1.00000000e+00 8.85519080e-01 4.30000000e+01 ..., 0.00000000e+00
0.00000000e+00 0.00000000e+00]
[ 1.00000000e+00 4.63295269e-01 5.70000000e+01 ..., 4.00000000e+00
0.00000000e+00 2.00000000e+00]
[ 1.00000000e+00 4.32750360e-02 5.90000000e+01 ..., 1.00000000e+00
0.00000000e+00 2.00000000e+00]
...,
[ 1.00000000e+00 8.15963730e-02 7.00000000e+01 ..., 0.00000000e+00
0.00000000e+00 nan]
[ 1.00000000e+00 3.35456547e-01 5.60000000e+01 ..., 2.00000000e+00
1.00000000e+00 3.00000000e+00]
[ 1.00000000e+00 4.41841663e-01 2.90000000e+01 ..., 0.00000000e+00
0.00000000e+00 0.00000000e+00]]
And the only reason that the first unit in the data is equal to 1 is because I got rid of the nan's that filled that position in order to avoid errors. Note that everything after the first column is a feature. The first column is what I'm trying to be able to predict.
EDIT:
I changed the code to the following-
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import sklearn
from sklearn.cross_validation import train_test_split
from tensorflow import Print
# Convert to one hot
def convertOneHot(data):
y=np.array([int(i[0]) for i in data])
y_onehot=[0]*len(y)
for i,j in enumerate(y):
y_onehot[i]=[0]*(y.max() + 1)
y_onehot[i][j]=1
return (y,y_onehot)
#buildDataFromIris()
data = genfromtxt('cs-training.csv',delimiter=',') # Training data
test_data = genfromtxt('cs-test-actual.csv',delimiter=',') # Test data
#for i in test_data[0]:
# print i
#print test_data
#print test_data
g = 0
for i in test_data:
i[0] = 1.
test_data[g] = i
g += 1
#print 1, test_data
x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
#print len(x_train), len(y_train), len(y_train_onehot)
x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
#for u in y_test_onehot[0]:
# print u
#print y_test_onehot
#print len(x_test), len(y_test), len(y_test_onehot)
#print x_test[0]
#print '1'
# A number of features, 4 in this example
# B = 3 species of Iris (setosa, virginica and versicolor)
A=data.shape[1]-1 # Number of features, Note first is y
#print A
B=len(y_train_onehot[0])
#print B
#print y_train_onehot
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
print tf_bias
print tf_weight
print tf_in
print matmul_result
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
print tf_softmax_correct
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
print tf_correct_prediction
print tf_accuracy
#saver = tf.train.Saver([tf_weight,tf_bias])
# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print("...")
prediction = []
# Run the training
#probabilities = []
#print y_train_onehot
#print '-----------------------------------------'
for i in range(1):
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#print y_train_onehot
#saver.save(sess, 'trained_csv_model')
ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
print ans
After the print out I see that one of the objects is Boolean. I don't know if that is the issue but take a look at the following and see if there is any way that you can help.
Tensor("Print_16:0", shape=TensorShape([Dimension(2)]), dtype=float32)
Tensor("Print_17:0", shape=TensorShape([Dimension(10), Dimension(2)]), dtype=float32)
Tensor("Print_18:0", shape=TensorShape([Dimension(None), Dimension(10)]), dtype=float32)
Tensor("Print_19:0", shape=TensorShape([Dimension(None), Dimension(2)]), dtype=float32)
Tensor("Placeholder_9:0", shape=TensorShape([Dimension(None), Dimension(2)]), dtype=float32)
Tensor("Equal_4:0", shape=TensorShape([Dimension(None)]), dtype=bool)
Tensor("Mean_4:0", shape=TensorShape([]), dtype=float32)
...
[[ nan nan]
[ nan nan]
[ nan nan]
...,
[ nan nan]
[ nan nan]
[ nan nan]]
I don't know the direct answer, but I know how I'd approach debugging it: tf.Print
. It's an op that prints the value as tensorflow is executing, and returns the tensor for further computation, so you can just sprinkle them inline in your model.
Try throwing in a few of these. Instead of this line:
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
Try:
tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
to see what Tensorflow thinks the intermediate values are. If the NaNs are showing up earlier in the pipeline, it should give you a better idea of where the problem lies. Good luck! If you get some data out of this, feel free to follow up and we'll see if we can get you further.
Updated to add: Here's a stripped-down debugging version to try, where I got rid of the input functions and just generated some random data:
import tensorflow as tf
import numpy as np
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
x_train=np.random.normal(0, 1, [50,10])
y_train=np.random.randint(0, 10, [50])
y_train_onehot = dense_to_one_hot(y_train, 10)
x_test=np.random.normal(0, 1, [50,10])
y_test=np.random.randint(0, 10, [50])
y_test_onehot = dense_to_one_hot(y_test, 10)
# A number of features, 4 in this example
# B = 3 species of Iris (setosa, virginica and versicolor)
A=10
B=10
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
print tf_correct_prediction
print tf_accuracy
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1):
print "Running the training step"
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#print y_train_onehot
#saver.save(sess, 'trained_csv_model')
print "Running the eval step"
ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
print ans
You should see lines starting with "Bias: ", etc.
这篇关于为什么TensorFlow从CSV文件返回[[nan nan]]而不是概率?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!