# coding: utf-8 # In[1]: import urllib.request
import os
import tarfile # In[2]: url="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
filepath="example/data/aclImdb_v1.tar.gz"
if not os.path.isfile(filepath):
result=urllib.request.urlretrieve(url,filepath)
print('downloaded:',result)
if not os.path.exists("example/data/aclImdb_v1/aclImdb"):
tfile = tarfile.open("data/aclImdb_v1.tar.gz", 'r:gz')
result=tfile.extractall('data/') # In[3]: from keras.datasets import imdb
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer # In[4]: import re
def rm_tags(text):
re_tag = re.compile(r'<[^>]+>')
return re_tag.sub('', text) # In[5]: import os
def read_files(filetype):
path = "example/data/aclImdb_v1/aclImdb/"
file_list=[] positive_path=path + filetype+"/pos/"
for f in os.listdir(positive_path):
file_list+=[positive_path+f] negative_path=path + filetype+"/neg/"
for f in os.listdir(negative_path):
file_list+=[negative_path+f] print('read',filetype, 'files:',len(file_list))
all_labels = ([1] * 12500 + [0] * 12500) all_texts = []
for fi in file_list:
with open(fi,encoding='utf8') as file_input:
all_texts += [rm_tags(" ".join(file_input.readlines()))] return all_labels,all_texts # In[6]: y_train,train_text=read_files("train") # In[7]: y_test,test_text=read_files("test") # In[8]: train_text[0] # In[9]: y_train[0] # In[10]: train_text[12500] # In[11]: y_train[12500] # In[12]: token = Tokenizer(num_words=2000)
token.fit_on_texts(train_text) # In[13]: print(token.document_count)
print(token.word_index) # In[14]: x_train_seq = token.texts_to_sequences(train_text)
x_test_seq = token.texts_to_sequences(test_text) # In[15]: print(x_train_seq[0]) # In[16]: x_train = sequence.pad_sequences(x_train_seq, maxlen=100)
x_test = sequence.pad_sequences(x_test_seq, maxlen=100) # In[17]: x_train[0] # In[18]: from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation,Flatten
from keras.layers.embeddings import Embedding
model = Sequential()
model.add(Embedding(output_dim=32,
input_dim=2000,
input_length=100))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(units=256,
activation='relu' ))
model.add(Dropout(0.2))
model.add(Dense(units=1,
activation='sigmoid' ))
model.summary() # In[19]: model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_history =model.fit(x_train, y_train,batch_size=100,
epochs=10,verbose=2,
validation_split=0.2) # In[20]: get_ipython().magic('pylab inline')
import matplotlib.pyplot as plt
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show() # In[21]: show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss') # In[22]: scores = model.evaluate(x_test, y_test, verbose=1)
scores[1] # In[23]: probility=model.predict(x_test) # In[24]: probility[:10] # In[25]: probility[12500:12510] # In[26]: predict=model.predict_classes(x_test) # In[27]: predict_classes=predict.reshape(-1) # In[28]: SentimentDict={1:'正面的',0:'负面的'}
def display_test_Sentiment(i):
print(test_text[i])
print('标签label:',SentimentDict[y_test[i]],
'预测结果:',SentimentDict[predict_classes[i]]) # In[29]: display_test_Sentiment(2) # In[30]: display_test_Sentiment(12505) # In[31]: from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import SimpleRNN
model = Sequential()
model.add(Embedding(output_dim=32,
input_dim=2000,
input_length=100))
model.add(Dropout(0.35))
model.add(SimpleRNN(units=16))
model.add(Dense(units=256,activation='relu' ))
model.add(Dropout(0.35))
model.add(Dense(units=1,activation='sigmoid' ))
model.summary() # In[32]: model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_history =model.fit(x_train, y_train,batch_size=100,
epochs=10,verbose=2,
validation_split=0.2) # In[33]: scores = model.evaluate(x_test, y_test, verbose=1)
scores[1] # In[34]: from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation,Flatten
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
model = Sequential()
model.add(Embedding(output_dim=32,
input_dim=2000,
input_length=100))
model.add(Dropout(0.2))
model.add(LSTM(32))
model.add(Dense(units=256,
activation='relu' ))
model.add(Dropout(0.2))
model.add(Dense(units=1,
activation='sigmoid' ))
model.summary() # In[35]: model.compile(loss='binary_crossentropy',
#optimizer='rmsprop',
optimizer='adam',
metrics=['accuracy'])
train_history =model.fit(x_train, y_train,batch_size=100,
epochs=10,verbose=2,
validation_split=0.2) # In[36]: show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')
scores = model.evaluate(x_test, y_test, verbose=1)
scores[1] # In[ ]:
文本来源于IMDb网络电影数据集。下载,放到合适的路径下,然后,开始。
过滤掉HTML标签。因为数据集中有相关标签。:
之后读取所有数据和目标标签,然后建立字典:
将文本转化为数字串:
格式化数字串长度为100
建立MLP模型,其中嵌入层将每个长度为100的数字串转为100个32维的向量,将文字映射成多维的几何空间向量,让每一个文字有上下的关联性。
编译,训练,绘图,评估后的准确率:
建立RNN模型,有关RNN模型的介绍:https://www.cnblogs.com/bai2018/p/10466418.html
测试评估:
建立LSTM模型,相关介绍:https://www.cnblogs.com/bai2018/p/10466497.html
准确率: