问题描述
这么大的图片,我正在尝试制作一个keras w2v自动编码器.我试图从此官方示例中遵循CustomVariationalLayer
类>.
So big picture, I'm trying to make a keras w2v auto-encoder. I tried to follow the CustomVariationalLayer
class from this official example.
我的课是这样:
class custom_ae_layer(Layer):
"""custom keras layer to handle looking up wv inputs
example from https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder.py
"""
def __init__(self, **kwargs):
self.is_placeholder = True
super(custom_ae_layer, self).__init__(**kwargs)
def ae_loss(self, reconstruction,emb_lookup):
loss = K.sum(emb_lookup - reconstruction,axis=-1)
return K.mean(loss)
def call(self, inputs):
reconstruction = inputs[1]
emb_lookup = inputs[0]
loss = self.ae_loss(emb_lookup,reconstruction)
self.add_loss(loss)
return emb_lookup
无论我返回emb_lookup
还是reconstruction
,都会发生此错误.我的图层和官方示例之间的主要区别在于,我使用嵌入查找作为输入,这是 keras.layers.Embedding对象,而重建是
This error occurs regardless of if I return emb_lookup
or reconstruction
. The major difference between my layer and the official example is I use an embedding lookup as an input, which is the output of the keras.layers.Embedding object, and reconstruction is
recon_layer = Dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input)
s_recon_layer = K.squeeze(recon_layer,2)
无论我返回emb_lookup
还是reconstruction
,都会发生此错误.
This error occurs regardless of if I return emb_lookup
or reconstruction
.
完整的错误消息是这样的:
Full error message is this:
Traceback (most recent call last):
File "semi_sup_cnn_big_data_test.py", line 166, in <module>
main()
File "semi_sup_cnn_big_data_test.py", line 84, in main
args,run_time,micro,macro = basic_cnn_train_val_test(args)
File "semi_sup_cnn_big_data_test.py", line 100, in basic_cnn_train_val_test
clf,args = init_export_network(args)
File "/home/qqi/git/MPI_CNN/models/auto_encoder_multilayer_cnn.py", line 257, in init_export_network
model = Model(model_input, y)
File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 88, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1705, in __init__
build_map_of_graph(x, finished_nodes, nodes_in_progress)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1695, in build_map_of_graph
layer, node_index, tensor_index)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1665, in build_map_of_graph
layer, node_index, tensor_index = tensor._keras_history
AttributeError: 'Tensor' object has no attribute '_keras_history'
根据要求,这是完整的init_export_network功能:
As requested, here is the full init_export_network function:
def init_export_network(in_args):
import_dir = os.path.join('cv_data',
in_args.data_name,
in_args.label_name,
in_args.this_fold)
# set output dir as models/[model_name]/[data_name]/[label_file_name]/[this_fold]
output_dir = os.path.join("initialized_models",
in_args.model_name,
in_args.data_name,
in_args.label_name,
in_args.this_fold)
print("exporting to", output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
print(output_dir, "data dir identified but will be re-populated")
shutil.rmtree(output_dir)
os.makedirs(output_dir)
"returns base cnn architecture and placeholder/untrained weights"
# unpckl wv_matrix, class_names
wv_matrix = unpckl(os.path.join(import_dir,'wv_matrix.pickle'))
print("valid pre-processed data found in", import_dir)
# define network layers ----------------------------------------------------
input_shape = (in_args.seq_len,)
output_shape = (in_args.seq_len,len(wv_matrix[0]),)
emb_size = len(wv_matrix[0])
model_input = Input(shape=input_shape)
emb_lookup = Embedding(len(wv_matrix),
len(wv_matrix[0]),
embeddings_regularizer=l2(in_args.emb_l2_rate),
input_length=in_args.seq_len, name="embedding")(model_input)
#emb_lookup = Embedding(len(wv_matrix), len(wv_matrix[0]), input_length=in_args.seq_len, name="embedding", )(model_input)
if in_args.emb_dropout:
emb_lookup = Dropout(in_args.emb_dropout)(emb_lookup)
conv_blocks = []
# conv blocks --------------------------------------------------------------
print("emb_lookup shape!!!!",emb_lookup.shape)
for ith_conv,sz in enumerate(in_args.filter_sizes):
if ith_conv == 0:
conv_input = emb_lookup
else:
conv_input = conv
conv = Convolution1D(filters=in_args.feat_maps[ith_conv],
kernel_size=sz,
padding="valid",
activation="relu",
kernel_initializer = 'lecun_uniform',
kernel_regularizer=l2(in_args.l2_rate),
strides=1,
name = "{}_conv".format(ith_conv))(conv_input)
print("{}_conv".format(ith_conv), conv.shape)
# deconv blocks with dimensions reverse of multilayer_cnn ------------------
deconv_blocks = []
deconv_filter_sizes = in_args.filter_sizes
deconv_filter_sizes.reverse()
#print("conv_shape!!!", conv.shape)
conv_input = conv
print("conv_upsampling_shape!!!", conv_input.shape)
#unpool_shape = ((conv[1],-1,conv[2]))
#conv_input = Reshape((1,conv_input[1],conv_input[2]))(conv_input)
#print("conv_input_shape!!!", conv_input.shape)
#conv_input = Reshape(unpool_shape),conv_input
#conv_input = Reshape(unpool_shape)(conv_input)
deconv_input=K.expand_dims(conv_input,2)
print("conv_reshape_shape!!!", conv_input)
for ith_conv,sz in enumerate(deconv_filter_sizes):
print("{}_deconv input shape!!!".format(ith_conv), deconv_input)
deconv = Conv2DTranspose(filters=in_args.feat_maps[ith_conv],
kernel_size=(sz,1),
#kernel_size=sz,
padding="valid",
activation="relu",
kernel_initializer = 'lecun_uniform',
kernel_regularizer=l2(in_args.l2_rate),
strides=(1,1),
name = "{}_deconv".format(ith_conv))(deconv_input)
deconv_input = deconv
print("{}_deconv input shape!!!".format(ith_conv), deconv_input)
print("deconv_output shape",deconv)
#z = Flatten()(conv)
#deconv_out = Flatten(deconv)
#outshape = (in_args.seq_len,len(wv_matrix[0]))
outshape = len(wv_matrix[0])
recon_layer = Dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input)
print("recon_layer shape",recon_layer)
#s_recon_layer = K.squeeze(recon_layer,2)
s_recon_layer = Lambda(lambda x: K.squeeze(x, 2))(recon_layer)
print("squeezed recon_layer shape",s_recon_layer)
#print("conv_reshape_shape!!!", conv_input.shape)(conv)
# end define network layers ------------------------------------------------
#model_output = Dense(outshape, activation="elu",kernel_regularizer=l2(in_args.l2_rate))(z)
y = custom_ae_layer()([model_input,emb_lookup,s_recon_layer])
model = Model(model_input, y)
# finished network layers definition - compile network
opt = optimizers.Adamax()
model.compile(loss=None, optimizer='adamax')
embedding_layer = model.get_layer("embedding")
embedding_layer.set_weights([wv_matrix])
# load wv_matrix into embedidng layer
print("Initializing embedding layer with word2vec weights, shape", wv_matrix.shape)
# save model architecture as json
open(os.path.join(output_dir,"structure.json"),"w").write(model.to_json())
# save initialized model weights as .hdf5fmacro
model.save_weights(os.path.join(output_dir, "weights"+".hdf5"))
print("multilayer network/initial weights successfully saved in", output_dir)
print(in_args)
#print(model.summary())
return model,in_args
推荐答案
错误消息看起来与以下问题非常相似: https: //stackoverflow.com/a/45309816/1531463
The error message looks pretty much similar to this question: https://stackoverflow.com/a/45309816/1531463
简而言之,我认为您需要换行:
In short, I think you need to wrap this line:
s_recon_layer = K.squeeze(recon_layer,2)
(或任何其他后端函数调用)进入Lambda
层.
(or any other backend function calls) into a Lambda
layer.
具体地说,
s_recon_layer = Lambda(lambda x: K.squeeze(x, 2))(recon_layer)
这篇关于Keras自定义图层-AttributeError:"Tensor"对象没有属性"_keras_history"的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!