从一组用数字标记的类别中,我可以预测序列中的下一个类别。我已经在一个文本生成器上建模(因此有随机标题!)。

我为每个类别创建了一个数字,因此它可以被keras和tensorflow解释为通过枚举函数分配这些数字的数字信息。它抛出一个错误,提示我应该对输出使用OneHotEncoding。我不知道该如何进行。

我已经对信息的OneHotEncoding进行了采样,但是我不知道如何将其应用到代码的主体中/反之亦然如何更改我的代码,以便没有OneHotEncoding的输入起作用。

我认为我对M / c学习还不够了解,我正在自学。

import numpy as np
from numpy import array
from numpy import argmax

import tensorflow as tf

import keras
from keras.utils import to_categorical
from keras.utils import np_utils
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Input, Dense
from keras.layers import TimeDistributed
from keras.models import Model

data= ['10001426', '10001426','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550', '10001426', '10001426', '10001426','10001426','5431000', '10001426', '10001426', '10001466','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550', '5431000', '10001426', '10001426', '10001466','10001426','5121550']
data= array(data)

chars=['10001426','5121550','5431000','10001466']
chars= array(chars)
"""
#OneHotEncode - turns the category into an encoded array
encoded = to_categorical(data)
print(encoded)
encoded2 = to_categorical(chars)
print(encoded2)

#Invert OneHotEncode

 inverted = argmax(encoded[0])
 print inverted
 inverted2 = argmax(encoded[0])
 print inverted2
"""
#Parameters
SEQ_LENGTH = 2 # Learn in steps of 2
VOCAB_SIZE = len(chars) #numer of features - how many categories of fault

#Prepare training data

ix_to_char={ix:char for ix, char in enumerate(chars)}
char_to_ix={char:ix for ix, char in enumerate(chars)}

X= np.zeros((len(data)/SEQ_LENGTH, SEQ_LENGTH, VOCAB_SIZE))
y= np.zeros((len(data)/SEQ_LENGTH, SEQ_LENGTH, VOCAB_SIZE))


for i in range((len(data)/SEQ_LENGTH)):
    if (i+1)*SEQ_LENGTH<len(data):
       X_sequence = data[(i)*SEQ_LENGTH:(i+1)*SEQ_LENGTH]


        X_sequence_ix=[char_to_ix[value] for value in X_sequence]
        input_sequence= np.zeros((SEQ_LENGTH, VOCAB_SIZE))

        print ((i+1)*SEQ_LENGTH, len(data))
        print input_sequence

    for j in range(SEQ_LENGTH):
        input_sequence[j][X_sequence_ix[j]]=1.
    X[i]=input_sequence

    y_sequence = data[i*SEQ_LENGTH+1:(i+1)*(SEQ_LENGTH+1)]
    y_sequence_ix = [char_to_ix[value] for value in y_sequence]
    target_sequence= np.zeros((SEQ_LENGTH, VOCAB_SIZE))

    for j in range(SEQ_LENGTH):
        if (i+1)*(SEQ_LENGTH+1)<(SEQ_LENGTH):
           target_sequence[j][y_sequence_ix[j]]=1
    y[i]=target_sequence
    print y[i]


#Create the network
HIDDEN_DIM=1
LAYER_NUM= 1

model = Sequential()
model.add(LSTM(HIDDEN_DIM, input_shape=(None, VOCAB_SIZE),
return_sequences=True))

for i in range(LAYER_NUM-1):
    model.add(LSTM(HIDDEN_DIM, return_sequences=True))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy",optimizer="rmsprop")


#Train the network

nb_epoch = 0
BATCH_SIZE = 5
GENERATE_LENGTH = 7

while True:
    print ('\n\n')
    model.fit(X,y,batch_size=BATCH_SIZE,verbose=1, epochs=1)
    nb_epoch +=1
    generate_text(model, GENERATE_LENGTH)

    if nb_epoch %5==0:
        model.save_weights('checkpoint_{}_epoch_{}.hdf5'.format(HIDDEN_DIM, nb_epoch))

model.summary()

最佳答案

您忘记了最后一层的输出应为VOCAB_SIZE。您可以通过添加特殊的Dense层来执行此操作:

for i in range(LAYER_NUM-1):
    model.add(LSTM(HIDDEN_DIM, return_sequences=True))
model.add(Dense(VOCAB_SIZE))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy",optimizer="rmsprop")


或通过设置最后一个LSTM层的适当输出(我将跳过此部分的代码,因为这有点乏味)。

08-25 05:17