我目前正在Tensorflow中开发一个程序,该程序读取1750 x 1750像素的数据。我通过一个卷积网络运行它:
import os
import sys
import tensorflow as tf
import Input
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 100, "hello")
tf.app.flags.DEFINE_string('data_dir', '/Volumes/Machine_Learning_Data', "hello")
def inputs():
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'Data')
images, labels = Input.inputs(data_dir = data_dir, batch_size = FLAGS.batch_size)
return images, labels
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def conv2d(images, W):
return tf.nn.conv2d(images, W, strides = [1, 1, 1, 1], padding = 'SAME')
def max_pool_5x5(images):
return tf.nn.max_pool(images, ksize = [1, 5, 5, 1], strides = [1, 1, 1, 1], padding = 'SAME')
def forward_propagation(images):
with tf.variable_scope('conv1') as scope:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
image_matrix = tf.reshape(images, [-1, 1750, 1750, 1])
h_conv1 = tf.nn.sigmoid(conv2d(image_matrix, W_conv1) + b_conv1)
h_pool1 = max_pool_5x5(h_conv1)
with tf.variable_scope('conv2') as scope:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_5x5(h_conv2)
with tf.variable_scope('conv3') as scope:
W_conv3 = weight_variable([5, 5, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_5x5(h_conv3)
with tf.variable_scope('local3') as scope:
W_fc1 = weight_variable([10 * 10 * 128, 256])
b_fc1 = bias_variable([256])
h_pool3_flat = tf.reshape(h_pool3, [-1, 10 * 10 * 128])
h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([256, 4])
b_fc2 = bias_variable([4])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y_conv
def error(forward_propagation_results, labels):
labels = tf.cast(labels, tf.float32)
mean_squared_error = tf.square(tf.sub(labels, forward_propagation_results))
cost = tf.reduce_mean(mean_squared_error)
train = tf.train.GradientDescentOptimizer(learning_rate = 0.3).minimize(cost)
return train
print cost
不幸的是弹出了一个错误
用于广播的不兼容形状:TensorShape([Dimension(100)])和TensorShape([Dimension(9187500),Dimension(4)])
而且我无法调试它。
矩阵尺寸有什么问题?解释器说错误发生在tf.sub行。
编辑:
这是调用函数的代码的主要部分。
import Input
import Process
import tensorflow as tf
def train():
with tf.Session() as sess:
images, labels = Process.inputs()
forward_propgation_results = Process.forward_propagation(images)
train_loss = Process.error(forward_propgation_results, labels)
init = tf.initialize_all_variables()
sess.run(init)
def main(argv = None):
train()
if __name__ == '__main__':
tf.app.run()
最佳答案
我发现了以下问题:
您的labels
输入是一个简单的一维标签标识符数组,但需要对其进行一热编码,以使其成为尺寸为[batch_size, 4]
且填充为1或0的矩阵。
您的最大池化操作需要具有不同于1的跨度,以实际减小图像的宽度和高度。因此,设置strides=[1, 5, 5, 1]
应该可以。
解决此问题后,您的最大池化操作实际上并没有像您假设的那样将宽度/高度从1750降低到10,而是降低到14(因为1750 / 5 / 5 / 5 == 14
。所以您可能想在这里增加权重矩阵,但是还有其他选择。
您的图像是否有可能以3个频道开头?您在这里假设为灰度,因此您应该重塑image_matrix
使其具有3个通道,或者将图像转换为灰度。
应用这些修复程序之后,网络输出和标签都应具有形状[batch_size, 4]
,并且您应该能够计算出差异。
编辑:在下面的聊天中讨论了代码之后,我已经对此进行了调整。