我正在尝试使用张量流2训练模型。

我收到错误:

ValueError: Attempt to convert a value (<tensorflow.python.keras.engine.training.Model object at 0x7f1ab822ecc0>) with an unsupported type (<class 'tensorflow.python.keras.engine.training.Model'>) to a Tensor.


当我尝试打电话时

return loss_object(y_true=y, y_pred=ypred)

loss函数中。

ypred的类型是

<class'tensorflow.python.keras.engine.training.Model'>

它应该是张量。

import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split


df = pd.DataFrame({'A': np.array([100, 105.4, 108.3, 111.1, 113, 114.7]),
                   'B': np.array([11, 11.8, 12.3, 12.8, 13.1,13.6]),
                   'C': np.array([55, 56.3, 57, 58, 59.5, 60.4]),
                   'Target': np.array([4000, 4200.34, 4700, 5300, 5800, 6400])})



def data():
    X_train, X_test, y_train, y_test  = train_test_split(df.iloc[:, :3].values,
                                                         df.iloc[:, 3].values,
                                                         test_size=0.2,
                                                         random_state=134)

    return X_train, X_test, y_train, y_test



X_train, X_test, y_train, y_test = data()



features = {'A': X_train[:, 0],
            'B': X_train[:, 1],
            'C': X_train[:, 2]}

labels = y_train


batch_size = 1
def train_input_fn(features, labels, batch_size):
    train_dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
    train_dataset = train_dataset.shuffle(1000).repeat().batch(batch_size)
    return train_dataset


def pack_features_vector(features, labels):
    '''Pack the features into a single array'''
    features = tf.stack(list(features.values()), axis=1)
    return features, labels


train_dataset = train_input_fn(features, labels, batch_size).map(pack_features_vector)


class Model():

    def __init__(self):
        pass

    def build_model(self, features):
        inputs = tf.keras.Input(shape=(features.shape[1],))

        x = tf.keras.layers.Dense(2, activation='relu')(inputs)
        preds = tf.keras.layers.Dense(1)(x)

        model = tf.keras.Model(inputs=inputs, outputs=preds)

        return model

    def loss(self, loss_object, X, y):
        ypred = self.build_model(X)
        print(type(ypred))
        print(ypred)
        return loss_object(y_true=y, y_pred=ypred)

    def grad(self, loss_object, X, y):
        with tf.GradientTape() as tape:
            loss_value = self.loss(loss_object, X, y)
        return loss_value, tape.gradient(loss_value,  self.build_model(X).trainable_variables)

    def train(self, X, y, optimizer, loss_object):
        loss_value, grads = self.grad(loss_object,  X, y)
        optimizer.apply_gradients(zip(grads,  self.build_model(X).trainable_variables))


learning_rate = 0.001
optimizer=tf.optimizers.RMSprop(learning_rate)
loss_object=tf.keras.losses.mean_squared_error


epochs = 1

for epoch in range(epochs):
    epoch_loss_avg = tf.keras.metrics.Mean()
    epoch_acc = tf.keras.metrics.MeanSquaredError()

    for X, y in train_dataset:
        Model().train(X, y, optimizer, loss_object)


如果我不使用该类并改为运行:

inputs = tf.keras.Input(shape=(3,))

x = tf.keras.layers.Dense(2, activation='relu')(inputs)

preds = tf.keras.layers.Dense(1)(x)

model = tf.keras.Model(inputs=inputs, outputs=preds)

for x, y in train_dataset:
    ypred = model(x)
    print(type(ypred))
    loss_object(y, ypred)


运行正常!

model(x)的类型是<class 'tensorflow.python.framework.ops.EagerTensor'>

但是在类代码中,self.build_model(X)的类型是model

最佳答案

在方法中,将第一行从ypred = self.build_model(X)更改为ypred = self.build_model()(X)

可以与数据设置“配合使用”的另一种方法:

import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split


df = pd.DataFrame({'A': np.array([100, 105.4, 108.3, 111.1, 113, 114.7]),
                   'B': np.array([11, 11.8, 12.3, 12.8, 13.1,13.6]),
                   'C': np.array([55, 56.3, 57, 58, 59.5, 60.4]),
                   'Target': np.array([4000, 4200.34, 4700, 5300, 5800, 6400])})



def data():
    X_train, X_test, y_train, y_test  = train_test_split(df.iloc[:, :3].values,
                                                         df.iloc[:, 3].values,
                                                         test_size=0.2,
                                                         random_state=134)

    return X_train, X_test, y_train, y_test



X_train, X_test, y_train, y_test = data()



features = {'A': X_train[:, 0],
            'B': X_train[:, 1],
            'C': X_train[:, 2]}

labels = y_train


batch_size = 1
def train_input_fn(features, labels, batch_size):
    train_dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
    train_dataset = train_dataset.shuffle(1000).repeat().batch(batch_size)
    return train_dataset


def pack_features_vector(features, labels):
    '''Pack the features into a single array'''
    features = tf.stack(list(features.values()), axis=1)
    return features, labels


train_dataset = train_input_fn(features, labels, batch_size).map(pack_features_vector)


class Model(tf.keras.Model):

    def __init__(self):
        super(Model,self).__init__()

        self.l1= tf.keras.layers.Dense(2, activation='relu')
        self.out = tf.keras.layers.Dense(1)

    def __call__(self,x):
        x=self.l1(x)
        return self.out(x)



learning_rate = 1
optimizer=tf.optimizers.RMSprop(learning_rate)
loss_object=tf.keras.losses.mean_squared_error
model = Model()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')


for x,y in train_dataset:
    with tf.GradientTape() as tape:
        y_ = model(x)
        loss = loss_object(y, y_)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    print("loss",train_loss(loss),"accuracy",train_accuracy(y,y_))

10-08 07:56