我正在使用Keras,并且有一个自定义层,但是当我使用它时,会发生以下错误,并且我不知道是什么问题。您能帮我解决这个问题吗?当我在另一个系统上使用相同的代码时,这很奇怪,不会出现此错误!

import os
import time
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import scipy.io as sio
from scipy.misc import imread
import cv2
import skimage.transform as imgTrans
from skimage.measure import compare_ssim, compare_psnr
import PIL
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from tqdm import tqdm
import tensorflow as tf
keras = tf.keras
layers = keras.layers
from my_circular_layer_new import Conv2D_circular
import Various_Functions as vf
from scipy.ndimage.filters import convolve, median_filter
from scipy.ndimage.filters import gaussian_filter
def buildModel(model_path, patch_rows=32, patch_cols=32, channels=1, block_size=8 ,num_bitplane=1, use_circular=True):

    conv2d_layer = layers.Conv2D if use_circular == False else Conv2D_circular

    w_rows = int((patch_rows) / block_size)
    w_cols = int((patch_cols) / block_size)

    input_img = layers.Input(shape=(patch_rows, patch_cols, 1), name='input_img')
    input_strenght_alpha = layers.Input(shape=(1,), name='strenght_factor_alpha')
    input_watermark = layers.Input(shape=(w_rows, w_cols, num_bitplane), name='input_watermark')

    # Rearrange input
    rearranged_img = l1 = layers.Lambda(tf.space_to_depth, arguments={'block_size':block_size}, name='rearrange_img')(input_img)


    dct_layer = layers.Conv2D(64, (1, 1), activation='linear', padding='same', use_bias=False, trainable=False, name='dct1')
    dct_layer2 = layers.Conv2D(64, (1, 1), activation='linear', padding='same', use_bias=False, trainable=False, name='dct2')
    idct_layer = layers.Conv2D(64, (1, 1), activation='linear', padding='same', use_bias=False, trainable=False, name='idct')
    dct_layer_img = dct_layer(rearranged_img)

    # Concatenating The Image's dct coefs and watermark
    encoder_input = layers.Concatenate(axis=-1, name='encoder_input')([dct_layer_img, input_watermark])

    # Encoder
    encoder_model = layers.Conv2D(64, (1, 1), dilation_rate=1, activation='elu', padding='same', name='enc_conv1')(encoder_input)
    encoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='enc_conv2')(encoder_model)
    encoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='enc_conv3')(encoder_model)
    encoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='enc_conv4')(encoder_model)
    encoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='enc_conv5')(encoder_model)
    encoder_model = idct_layer(encoder_model)

    # Strength
    encoder_model = layers.Lambda(multiply_scalar, arguments={'scalar':input_strenght_alpha}, name='strenght_factor')(encoder_model)

    encoder_model = layers.Add(name='residual_add')([encoder_model, l1])
    encoder_model = x = layers.Lambda(tf.depth_to_space, arguments={'block_size':block_size}, name='enc_output_depth2space')(encoder_model)

    # Attack (The attacks occure in test phase)

    # Watermark decoder
    input_attacked_img = layers.Input(shape=(patch_rows, patch_cols, 1), name='input_attacked_img')
    decoder_model = layers.Lambda(tf.space_to_depth, arguments={'block_size':block_size}, name='dec_input_space2depth')(input_attacked_img)
    decoder_model = dct_layer2(decoder_model)
    decoder_model = layers.Conv2D(64, (1, 1), dilation_rate=1, activation='elu', padding='same', name='dec_conv1')(decoder_model)
    decoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='dec_conv2')(decoder_model)
    decoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='dec_conv3')(decoder_model)
    decoder_model = conv2d_layer(64, (2, 2), dilation_rate=1, activation='elu', padding='same', name='dec_conv4')(decoder_model)
    decoder_model = layers.Conv2D(num_bitplane, (1, 1), dilation_rate=1, activation='sigmoid', padding='same', name='dec_output_depth2space')(decoder_model)

    # Whole model
    embedding_net = tf.keras.models.Model(inputs=[input_img, input_watermark, input_strenght_alpha], outputs=[x])
    extractor_net = tf.keras.models.Model(inputs=[input_attacked_img], outputs=[decoder_model])

    # Set weights
    DCT_MTX = sio.loadmat('./Weights/Transforms/DCT_coef.mat')['DCT_coef']
    dct_mtx = np.reshape(DCT_MTX, [1,1,64,64])
    embedding_net.get_layer('dct1').set_weights(np.array([dct_mtx]))
    extractor_net.get_layer('dct2').set_weights(np.array([dct_mtx]))

    IDCT_MTX = sio.loadmat('./Weights/Transforms/IDCT_coef.mat')['IDCT_coef']
    idct_mtx = np.reshape(IDCT_MTX, [1,1,64,64])
    embedding_net.get_layer('idct').set_weights(np.array([idct_mtx]))

    embedding_net.load_weights(model_path,by_name = True)
    extractor_net.load_weights(model_path,by_name = True)
    return embedding_net, extractor_net


> TypeError: The following are legacy tf.layers.Layers:
> <my_circular_layer_new.Conv2D_circular object at 0x000001F773AC2160>
> <my_circular_layer_new.Conv2D_circular object at 0x000001F773B1F978>
> <my_circular_layer_new.Conv2D_circular object at 0x000001F773B1F9E8>
> <my_circular_layer_new.Conv2D_circular object at 0x000001F773B1F630>
> To use keras as a framework (for instance using the Network, Model, or
> Sequential classes), please use the tf.keras.layers implementation
> instead. (Or, if writing custom layers, subclass from tf.keras.layers
> rather than tf.layers)



  
    TypeError:以下是传统的tf.layers.Layers:
    
    
    
    
    要将keras用作框架(例如,使用Network,Model或
    顺序类),请使用tf.keras.layers实现
    代替。 (或者,如果编写自定义图层,则是tf.keras.layers的子类
    而不是tf.layers)

最佳答案

我想您可能有相同库的两个不同版本,并且给您错误的系统具有比另一个更高的版本。降级一个系统或升级另一个系统

关于python - 当我拥有自定义图层时,为什么会出现此错误“以下是传统tf.layers.Layers”?,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/57946626/

10-09 13:39