# import the necessary packages
from keras.initializers import glorot_uniform
from keras.layers import AveragePooling2D, Input, Add
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense


class SmallerVGGNet:
    @staticmethod
    def build(width, height, depth, classes, finalact):

        X1 = Input(shape=(height, width, depth))
        output = -1

        # # CONV => RELU => POOL
        X = Conv2D(16, (3, 3), padding="same", strides=(1, 1), name="con_layer1")(X1)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = MaxPooling2D(pool_size=(3, 3))(X)

        X = Conv2D(32, (3, 3), padding="same", strides=(2, 2), name="con_layer2")(X)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = Conv2D(32, (3, 3), padding="same", strides=(1, 1), name="con_layer3")(X)
        X = Activation("relu")(X)
        X = BatchNormalization(axis=output)(X)
        X = MaxPooling2D(pool_size=(3, 3))(X)

        # First component
        X0 = Conv2D(256, (3, 3), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
        X0 = BatchNormalization(axis=3)(X0)
        X0 = Activation("relu")(X0)

        # (CONV => RELU) * 2 => POOL
        X = Conv2D(64, (3, 3), padding="same", strides=(2, 2), name="con_layer4")(X0)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = Conv2D(64, (3, 3), padding="same", strides=(1, 1), name="con_layer5")(X)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = AveragePooling2D(pool_size=(2, 2))(X)

        # Second Component
        X0 = Conv2D(512, (3, 3), strides=(2, 2), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
        X0 = BatchNormalization(axis=3)(X0)
        X0 = Activation("relu")(X0)

        # (CONV => RELU) * 2 => POOL
        X = Conv2D(128, (3, 3), padding="same", strides=(1, 1), name="con_layer6")(X1)
        X = BatchNormalization(axis=3)(X)
        X = Activation("relu")(X)
        X = Conv2D(128, (3, 3), padding="same", strides=(2, 2), name="con_layer7")(X)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = MaxPooling2D(pool_size=(2, 2))(X)

        # Third Component
        X0 = Conv2D(1024, (3, 3), strides=(2, 2), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X1)
        X0 = BatchNormalization(axis=3)(X0)
        X0 = Dense(128, activation="relu")(X0)
        X0 = Activation("relu")(X0)

        X = Flatten()(X1)
        X = Dense(128)(X)
        X = BatchNormalization()(X)
        X = Dropout(0.5)(X)
        output = Dense(classes, activation=finalact)(X)

        model = Model(inputs=[X1], outputs=output)

        print(model.summary())
        return model

ValueError:负尺寸大小是由于输入形状为[?,2,2,64,],[3,3,64,512]的'conv2d_2 / convolution'(op:'Conv2D')从2中减去3引起的。

该错误是由于行号50 ..当我将值更改为(X)时发生此错误。如果我将值更改为(X1),它将运行,但我需要传递值(X)

最佳答案

最简单的解决方法是将padding='same'添加到所有池化层。这是可以解决负尺寸问题的更新代码。

# import the necessary packages
from keras.initializers import glorot_uniform
from keras.layers import AveragePooling2D, Input, Add
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense


class SmallerVGGNet:
    @staticmethod
    def build(width, height, depth, classes, finalact):

        X1 = Input(shape=(height, width, depth))
        output = -1

        # # CONV => RELU => POOL
        X = Conv2D(16, (3, 3), padding='same', strides=(1, 1), name="con_layer1")(X1)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = MaxPooling2D(pool_size=(3, 3), padding='same')(X)

        X = Conv2D(32, (3, 3), padding='same', strides=(2, 2), name="con_layer2")(X)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = Conv2D(32, (3, 3), padding='same', strides=(1, 1), name="con_layer3")(X)
        X = Activation("relu")(X)
        X = BatchNormalization(axis=output)(X)
        X = MaxPooling2D(pool_size=(3, 3), padding='same')(X)

        # First component
        X0 = Conv2D(256, (3, 3), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
        X0 = BatchNormalization(axis=3)(X0)
        X0 = Activation("relu")(X0)

        # (CONV => RELU) * 2 => POOL
        X = Conv2D(64, (3, 3), padding='same', strides=(2, 2), name="con_layer4")(X0)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = Conv2D(64, (3, 3), padding='same', strides=(1, 1), name="con_layer5")(X)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = AveragePooling2D(pool_size=(2, 2),padding='same')(X)

        # Second Component
        X0 = Conv2D(512, (3, 3), strides=(2, 2), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
        X0 = BatchNormalization(axis=3)(X0)
        X0 = Activation("relu")(X0)

        # (CONV => RELU) * 2 => POOL
        X = Conv2D(128, (3, 3), padding='same', strides=(1, 1), name="con_layer6")(X1)
        X = BatchNormalization(axis=3)(X)
        X = Activation("relu")(X)
        X = Conv2D(128, (3, 3), padding='same', strides=(2, 2), name="con_layer7")(X)
        X = BatchNormalization(axis=output)(X)
        X = Activation("relu")(X)
        X = MaxPooling2D(pool_size=(2, 2),padding='same')(X)

        # Third Component
        X0 = Conv2D(1024, (3, 3), strides=(2, 2), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X1)
        X0 = BatchNormalization(axis=3)(X0)
        X0 = Dense(128, activation="relu")(X0)
        X0 = Activation("relu")(X0)

        X = Flatten()(X1)
        X = Dense(128)(X)
        X = BatchNormalization()(X)
        X = Dropout(0.5)(X)
        output = Dense(classes, activation=finalact)(X)

        model = Model(inputs=[X1], outputs=output)

        print(model.summary())
        return model

关于python - ValueError: 'conv2d_2/convolution'的负尺寸大小是由2减去3引起的,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/59960818/

10-11 19:37