我在喀拉拉邦拥有以下神经网络(可能无需复习它即可回答我的问题:
简短的摘要:
这是一个以图像为输入并输出图像的神经网络。神经网络大多是卷积的。我使用发电机。另外,我有两个回调:一个用于TensorBoard,另一个用于保存chechpoint
class modelsClass(object):
def __init__(self, img_rows = 272, img_cols = 480):
self.img_rows = img_rows
self.img_cols = img_cols
def addPadding(self, layer, level): #height, width, level):
w1, h1 = self.img_cols, self.img_rows
w2, h2 = int(w1/2), int(h1/2)
w3, h3 = int(w2/2), int(h2/2)
w4, h4 = int(w3/2), int(h3/2)
h = [h1, h2, h3, h4]
w = [w1, w2, w3, w4]
# Target width and height
tw = w[level-1]
th = h[level-1]
# Source width and height
lsize = keras.int_shape(layer)
sh = lsize[1]
sw = lsize[2]
pw = (0, tw - sw)
ph = (0, th - sh)
layer = ZeroPadding2D(padding=(ph, pw), data_format="channels_last")(layer)
return layer
[我需要在此处用一些文本破坏代码才能发布问题]
def getmodel(self):
input_blurred = Input((self.img_rows, self.img_cols,3))
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_blurred)
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(conv5)
up6 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(conv5)
up6 = self.addPadding(up6,level=4)
up6 = concatenate([up6,conv4], axis=3)
conv6 = Conv2D(512, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv6)
up7 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv6)
up7 = self.addPadding(up7,level=3)
up7 = concatenate([up7,conv3], axis=3)
conv7 = Conv2D(256, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv7)
up8 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv7)
up8 = self.addPadding(up8,level=2)
up8 = concatenate([up8,conv2], axis=3)
conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv8)
up9 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv8)
up9 = self.addPadding(up9,level=1)
up9 = concatenate([up9,conv1], axis=3)
conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(3, (1, 1), activation='linear')(conv9)
model = Model(inputs=input_blurred, outputs=conv10)
return model
然后代码是:
models = modelsClass(720, 1280)
model = models.getmodel()
model.compile(optimizer='adam', loss='mean_absolute_error')
model_checkpoint = ModelCheckpoint('checkpoints/cp.ckpt', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq='epoch')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='some_dir', histogram_freq=1)
model_history = model.fit_generator(generator_train, epochs=3,
steps_per_epoch=900,
callbacks=[tensorboard_callback, model_checkpoint],
validation_data=generator_val, validation_steps=100)
其中
generator_train.__len__ = 900
,generator_val.__len__ = 100
,两者的批处理大小均为1。时期1的时间为10分钟,而时期2则需要3个小时。我想知道可能是什么问题
最佳答案
以下是一些可以降低程序速度的常规事项:
其他程序使用的CPU / GPU
内存交换:由于没有足够的RAM,您的计算机将内存从RAM移到磁盘。可能是因为您在脚本中尝试将所有内容保留在内存中(例如以前输出的列表,甚至可能带有它们的渐变),或者是因为另一个程序也开始使用大量RAM。
电脑发热(可能在第一个时期后变热)
节省电池电量(如果是笔记本电脑并且拔下电源,则可能)
关于python - 为什么时代2花费的时间是时代1的18倍?,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/59917535/