到目前为止,我一直在试验 Tensorflow 和 Keras。我从 image_ocr.py 获取了一个代码,它允许我训练打印文本 ocr。我想看到训练的进展,并成功地将训练模型的准确性和损失可视化。然而,据我所知,OCR RNN 并没有将准确性作为验证,而是使用平均编辑距离来验证单词的准确性。在这种情况下,我一直试图在 Tensorboard 中从 VizCallback 类中获取一个名为 mean_ed mean_norm_ed 的变量。我已经尝试过这个 link 的方法,但它仍然不起作用。谁能帮我可视化平均编辑距离变量?以下是我的代码中的代码片段:

class VizCallback(keras.callbacks.Callback):

def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
    self.test_func = test_func
    self.output_dir = os.path.join(
        OUTPUT_DIR, run_name)
    self.text_img_gen = text_img_gen
    self.num_display_words = num_display_words
    if not os.path.exists(self.output_dir):
        os.makedirs(self.output_dir)

def on_train_begin(self, logs={}):
    self.med = []
    self.nmed = []

def show_edit_distance(self, num, logs={}):
    num_left = num
    mean_norm_ed = 0.0
    mean_ed = 0.0
    while num_left > 0:
        word_batch = next(self.text_img_gen)[0]
        num_proc = min(word_batch['the_input'].shape[0], num_left)
        decoded_res = decode_batch(self.test_func, word_batch['the_input'][0:num_proc])
        for j in range(num_proc):
            edit_dist = editdistance.eval(decoded_res[j], word_batch['source_str'][j])
            mean_ed += float(edit_dist)
            mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
        num_left -= num_proc
    mean_norm_ed = mean_norm_ed / num
    mean_ed = mean_ed / num
    #Create scalar summaries for both mean edit distance and normalized mean edit distance
    tf_med_ph = tf.placeholder(tf.float32,shape=None,name='med_summary')
    tf_nmed_ph = tf.placeholder(tf.float32,shape=None,name='nmed_summary')
    tf_med = tf.summary.scalar('med', tf_med_ph)
    tf_nmed = tf.summary.scalar('nmed', tf_nmed_ph)
    performance_summaries = tf.summary.merge([tf_med,tf_nmed])

    #Create a session for displaying the summary
    config = tf.ConfigProto(allow_soft_placement=True)
    session = tf.InteractiveSession(config=config)
    summ_writer = tf.summary.FileWriter(os.path.join('summaries','first'), session.graph)

    # Execute the summaries defined above
    summ = session.run(performance_summaries, feed_dict={tf_med_ph:mean_ed, tf_nmed_ph:mean_norm_ed})

    # Write the obtained summaries to the file, so it can be displayed in the TensorBoard
    summ_writer.add_summary(summ, epoch)

    session.close()
    print('\nOut of %d samples:  Mean edit distance: %.3f Mean normalized edit distance: %0.3f'
          % (num, mean_ed, mean_norm_ed))

def on_epoch_end(self, epoch, logs={}):
    self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
    self.show_edit_distance(256)
    word_batch = next(self.text_img_gen)[0]
    res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words])
    if word_batch['the_input'][0].shape[0] < 256:
        cols = 2
    else:
        cols = 1
    for i in range(self.num_display_words):
        plt.subplot(self.num_display_words // cols, cols, i + 1)
        if K.image_data_format() == 'channels_first':
            the_input = word_batch['the_input'][i, 0, :, :]
        else:
            the_input = word_batch['the_input'][i, :, :, 0]
        plt.imshow(the_input.T, cmap='Greys_r')
        plt.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i]))
    fig = plt.gcf()
    fig.set_size_inches(10, 13)
    plt.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
    plt.close()

def train(run_name, start_epoch, stop_epoch, img_w):
# Input Parameters
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))

# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
minibatch_size = 32

if K.image_data_format() == 'channels_first':
    input_shape = (1, img_w, img_h)
else:
    input_shape = (img_w, img_h, 1)

fdir = os.path.dirname(get_file('wordlists.tgz',
                                origin='http://test.com/wordlist.tgz', untar=True))

img_gen = TextImageGenerator(monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
                             bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
                             minibatch_size=minibatch_size,
                             img_w=img_w,
                             img_h=img_h,
                             downsample_factor=(pool_size ** 2),
                             val_split=words_per_epoch - val_words
                             )
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
               activation=act, kernel_initializer='he_normal',
               name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
               activation=act, kernel_initializer='he_normal',
               name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)

# Two layers of bidirectional GRUs
# GRU seems to work as well, if not better than LSTM:

gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)

# transforms RNN output to character activations:
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
              name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()

labels = Input(name='the_labels', shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])

# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)

model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)

#Make tensorboard instance
init_op = tf.initialize_all_variables()

sess = tf.Session()
sess.run(init_op)
tbname="tensorboard-of-{}".format(int(time.time()))
tensorboard = keras.callbacks.TensorBoard(
log_dir="logs/{}".format(tbname),
histogram_freq=0,
write_images=True)

# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd,
          metrics=['accuracy'])
if start_epoch > 0:
    weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
    model.load_weights(weight_file)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])

viz_cb = VizCallback(run_name, test_func, img_gen.next_val())

model.fit_generator(generator=img_gen.next_train(),
                    steps_per_epoch=(words_per_epoch - val_words) // minibatch_size,
                    epochs=stop_epoch,
                    validation_data=img_gen.next_val(),
                    validation_steps=val_words // minibatch_size,
                    callbacks=[tensorboard,viz_cb, img_gen],
                    initial_epoch=start_epoch)

任何帮助都会非常有用。谢谢!

P.S. 我正在使用 Tensorflow 1.9.0 和 Python 3.6.8

更新
现在只需将变量 performance_summaries 从 VizCallbak 类传递给 train 函数中的指标即可。这里有什么帮助吗?

最佳答案

您可以修改 show_edit_distance 以在每次调用此函数时添加摘要:

def show_edit_distance(self, num, epoch):
    ...
    summary = tf.Summary()
    summary.value.add(tag='mean_ed', simple_value=mean_ed)
    summ_writer.add_summary(summary, epoch)

    summary = tf.Summary()
    summary.value.add(tag='mean_norm_ed', simple_value=mean_norm_ed)
    summ_writer.add_summary(summary, epoch)
    ...

请注意,您将需要一个额外的参数 epoch :
def on_epoch_end(self, epoch, logs={}):
    ...
    self.show_edit_distance(256, epoch)
    ...

Tensorboard 回调应该自动获取这些摘要,因为它们被添加到 GraphKeys.SUMMARIES 集合中。

注意: 不幸的是,我无法测试解决方案。如果我遗漏了什么,请告诉我。

关于python - 如何使用 Keras 回调在 Tensorboard 中可视化平均编辑距离?,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/56264958/

10-12 22:33