问题描述
系统信息Colab Tensorflow 2.2.0
System informationColab tensorflow 2.2.0
描述当前行为:当我尝试解决自己的数据问题(即多个标签语义细分)时,我遇到了此错误.
Describe the current behavior:I faced this error when i tried to solve my own data issues, which is multiple label semantic segmentations.
下面是代码
import tensorflow as tf
import tensorflow.keras.backend as K
IMG_WIDTH = 512
IMG_HEIGHT = 512
IMG_CHANNELS = 3
# batch_shape=(512,512,3)
# inputs = Input(batch_shape=(4, 512, 512, 3))
#Build the model
inputs = tf.keras.layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
#s = tf.keras.layers.Lambda(lambda x: x / 255)(inputs)
#Contraction path
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(inputs)
c1 = tf.keras.layers.Dropout(0.1)(c1)
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
p1 = tf.keras.layers.MaxPooling2D((2, 2))(c1)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
c2 = tf.keras.layers.Dropout(0.1)(c2)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
p2 = tf.keras.layers.MaxPooling2D((2, 2))(c2)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
c3 = tf.keras.layers.Dropout(0.2)(c3)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
p3 = tf.keras.layers.MaxPooling2D((2, 2))(c3)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
c4 = tf.keras.layers.Dropout(0.2)(c4)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c4)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
c5 = tf.keras.layers.Dropout(0.3)(c5)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
#Expansive path
u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = tf.keras.layers.concatenate([u6, c4])
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = tf.keras.layers.concatenate([u7, c3])
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = tf.keras.layers.Dropout(0.2)(c7)
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = tf.keras.layers.concatenate([u8, c2])
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = tf.keras.layers.Dropout(0.1)(c8)
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = tf.keras.layers.concatenate([u9, c1], axis=3)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = tf.keras.layers.Dropout(0.1)(c9)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = tf.keras.layers.Conv2D(6, (1, 1), activation='softmax')(c9)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
# define optomizer
optim = tf.keras.optimizers.Adam()
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f*y_true_f) + K.sum(y_pred_f*y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.-dice_coef(y_true, y_pred)
smooth = 1.
loss= tf.keras.losses.CategoricalCrossentropy()
model.compile(optim, loss, metrics=[dice_coef,'accuracy'])
#model.compile(optim, metrics, loss)
model.summary()
#SET UP FOR DATA TRAINING
BATCH_SIZE = 4
CLASSES = ['0', '1','2','3','4','5']
LR = 0.0001
EPOCHS = 40
n_classes = len(CLASSES)
# Dataset for train images
train_dataset = Dataset(
x_train_dir,
y_train_dir,
classes=CLASSES,
augmentation=get_training_augmentation(),
preprocessing=get_preprocessing(),
with_shape_assert= True,
)
# Dataset for validation images
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
classes=CLASSES,
augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(),
with_shape_assert= True,
)
train_dataloader = Dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloader(valid_dataset, batch_size=4, shuffle=False)
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, 512, 512, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, 512, 512, n_classes)
# define callbacks for learning rate scheduling and best checkpoints saving
callbacks = [
tf.keras.callbacks.ModelCheckpoint('./best_model.h5', save_weights_only=False, save_best_only=True, mode='min'),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
]
results_2704 = model.fit(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
validation_data=valid_dataloader,
callbacks=callbacks,
validation_steps=len(valid_dataloader),verbose=1
)
这将给出错误:
ValueError:没有为任何变量提供渐变:['conv2d/kernel:0','conv2d/bias:0','conv2d_1/kernel:0','conv2d_1/bias:0','conv2d_2/kernel:0','conv2d_2/bias:0','conv2d_3/kernel:0','conv2d_3/bias:0','conv2d_4/kernel:0','conv2d_4/bias:0','conv2d_5/kernel:0','conv2d_5/bias:0','conv2d_6/kernel:0','conv2d_6/bias:0','conv2d_7/kernel:0','conv2d_7/bias:0','conv2d_8/kernel:0','conv2d_8/bias:0','conv2d_9/bias:0','conv2d_9/bias:0','conv2d_transpose/kernel:0','conv2d_transpose/bias:0','conv2d_10/kernel:0','conv2d_10/bias:0','conv2d_11/bias:0','conv2d_11/bias:0','conv2d_transpose_1/内核:0','conv2d_transpose_1/bias:0','conv2d_12/kernel:0','conv2d_12/bias:0','conv2d_13/kernel:0','conv2d_13/bias:0','conv2d_transpose_2/kernel:0','conv2d_transpose_2/bias:0','conv2d_14/kernel:0','conv2d_14/bias:0','conv2d_15/kernel:0','conv2d_15/bias:0','conv2d_transpose_3/bias:0','conv2d_transpose_3/bias:0','conv2d_16/kernel:0','conv2d_16/bias:0','conv2d_17/kernel:0','conv2d_17/bias:0','conv2d_18/kernel:0','conv2d_18/bias:0'].
我知道这可能是由于死梯度造成的,我一直在尝试解决此问题,同时还在Tensorflow github上发布了一个月,但直到现在我仍然不知道解决方案.所以我在这里发布以寻求其他Tensorflow专家的帮助,他们在等待Tensorflow支持人员进行更新时可能会给我一些提示.我四处搜寻,我知道使用 tf.GradientTape()
可能有助于解决问题,但我自己仍然找不到正确的方法.
I know it is maybe due to the dead gradients and I have been trying to solve this problem while also posted on Tensorflow github for a month but till now i still do not figure out the solution. So I post here to seek for the help of other Tensorflow experts who may give me some hints while waiting for updating from Tensorflow support person. I searched around and I knew that using tf.GradientTape()
may help solve the issue but I myself still could not figure out the right way.
真的很期待任何建议.非常感谢
Really looking forward to any advise. Thank you very much
推荐答案
仅传递训练数据但错过了通过 model.fit()
中的标签时,会出现此错误.我可以使用以下代码重新创建您的错误.您可以从下载程序中正在使用的数据集这里.
You get this error when you pass only the training data and missed to pass the labels in model.fit()
. I was able to recreate your error using below code. You can download the dataset I am using in the program from here.
重新创建问题的代码-
%tensorflow_version 2.x
# MLP for Pima Indians Dataset saved to single file
import numpy as np
from numpy import loadtxt
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# load pima indians dataset
dataset = np.loadtxt("/content/pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# define model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Model Summary
#model.summary()
# Fit the model
model.fit(X, epochs=150, batch_size=10, verbose=0)
输出-
2.2.0
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-7ddca8f2992e> in <module>()
28
29 # Fit the model
---> 30 model.fit(X, epochs=150, batch_size=10, verbose=0)
10 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:541 train_step **
self.trainable_variables)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1804 _minimize
trainable_variables))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:521 _aggregate_gradients
filtered_grads_and_vars = _filter_grads(grads_and_vars)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:1219 _filter_grads
([v.name for _, v in grads_and_vars],))
ValueError: No gradients provided for any variable: ['dense_5/kernel:0', 'dense_5/bias:0', 'dense_6/kernel:0', 'dense_6/bias:0', 'dense_7/kernel:0', 'dense_7/bias:0'].
解决方案-在 model.fit()
中传递训练标签,您的错误将得到解决.
Solution - Pass the training labels in model.fit()
and your error will be fixed.
已修改
model.fit(X , epochs=150, batch_size=10, verbose=0)
到
model.fit(X , Y, epochs=150, batch_size=10, verbose=0)
代码-
%tensorflow_version 2.x
# MLP for Pima Indians Dataset saved to single file
import numpy as np
from numpy import loadtxt
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# load pima indians dataset
dataset = np.loadtxt("/content/pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# define model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Model Summary
#model.summary()
# Fit the model
model.fit(X , Y, epochs=150, batch_size=10, verbose=0)
输出-
2.2.0
<tensorflow.python.keras.callbacks.History at 0x7f9208433eb8>
希望这能回答您的问题.学习愉快.
Hope this answers your question. Happy Learning.
这篇关于ValueError:没有为任何变量提供渐变:['conv2d/kernel:0','conv2d/bias:0','conv2d_1/kernel:0','conv2d_1/bias:0',的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!