这是Keras,python的代码。当以下代码在Linux Mint上运行时,所有批次均未完成。它总是像第32批一样停止
1/100 .............. ETA 30:00 Loss ..
2/100 =........... ETA 29:59 Loss ..
3/100 ==......... ETA 29:58 Loss ..
.
.
.
32/100 ==....... ETA 25:00 Loss ..
(由于未知原因,培训停止了)
但是,当此代码在Windows上运行时,它将按预期运行,直到第100批为止。是什么导致此行为?
from __future__ import print_function
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers.noise import AlphaDropout
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
import os.path
f_log = './log'
f_model = './model/dogvscat'
model_yaml = 'dogvscat_model.yaml'
model_filename = 'dogvscat_model.json'
weights_filename = 'dogvscat_model_weights.hdf5'
batch_size = 64
epochs = 15
nb_validation_samples = 100
print('Building model...')
if os.path.isfile(os.path.join(f_model,model_filename)):
print('Saved parameters found. I will use this file...')
json_string = open(os.path.join(f_model, model_filename)).read()
model = model_from_json(json_string)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.load_weights(os.path.join(f_model,weights_filename))
else:
print('Saved parameters Not found. Creating new model...')
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(128, 128, 3)))
model.add(Activation('selu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('selu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, kernel_initializer='lecun_normal'))
model.add(Activation('selu'))
model.add(AlphaDropout(0.1))
model.add(Dense(2))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(128, 128),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(128, 128),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
tb_cb = keras.callbacks.TensorBoard(log_dir=f_log, histogram_freq=0)
cp_cb = keras.callbacks.ModelCheckpoint(filepath = os.path.join(f_model,weights_filename), monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
cbks = [tb_cb, cp_cb]
history = model.fit_generator(
train_generator,
steps_per_epoch=nb_validation_samples,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples,
callbacks=cbks
)
score = model.evaluate_generator(validation_generator, nb_validation_samples)
print('')
print('Test score:', score[0])
print('Test accuracy:', score[1])
json_string = model.to_json()
open(os.path.join(f_model,model_filename), 'w').write(json_string)
yaml_string = model.to_yaml()
open(os.path.join(f_model,model_yaml), 'w').write(yaml_string)
print('save weights')
model.save_weights(os.path.join(f_model,weights_filename))
最佳答案
您的代码中有错误,应按如下所示更改steps_per_epoch
,validation_steps
。
history = model.fit_generator(
train_generator,
steps_per_epoch=np.ceil(nb_training_samples/batch_size),
epochs=epochs,
validation_data=validation_generator,
validation_steps=np.ceil(nb_validation_samples/batch_size),
callbacks=cbks
)
每一步都使用您的batch_size样本,因此您需要
np.ceil(nb_training_samples/batch_size)
个步骤关于machine-learning - Keras:批次未完成,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/47329767/