sklearn相关模块导入

 from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
import jieba
import numpy as np

一、字典数据抽取

 def dictvec():
"""
字典数据抽取
:return: None
"""
dict = DictVectorizer(sparse=False)
# 调用ift_transform
data = dict.fit_transform([{"city": "北京", "temperature": 100}])
print(dict.get_feature_names())
print(dict.inverse_transform(data))
print(data) return None

二、对文本进行特征值化

  1、英文

 def countvec():
"""
对文本进行特征值化
:return:None
"""
cv = CountVectorizer()
data = cv.fit_transform(["life is short i like python", "life is too long, i dislike python"])
print(cv.get_feature_names())
print(data.toarray())
return None

  2、中文

 def cutword():
"""
中文特征值化分词
:return:None
"""
con1 = jieba.cut("这是一个什么样的时代,这是一个以互联网时代为代表的时代\n")
con2 = jieba.cut("看到这些我们都想到了什么,什么才能让我们想起不该想起的东西")
# 转换成列表
# content1 = list(con1)
# content2 = list(con2)
# 转换成字符串
c1 = " ".join(con1)
c2 = " ".join(con2)
print(c1,c2)
return c1, c2 def hanzivec():
"""
中文特征值化
:return:None
"""
c1, c2 = cutword()
# print(c1, c2)
cv = CountVectorizer()
data = cv.fit_transform([c1, c2])
print(cv.get_feature_names())
print(data.toarray()) return None def tfidfvec():
"""
中文特征值化
:return:None
"""
c1, c2 = cutword()
# print(c1, c2)
tf = TfidfVectorizer()
data = tf.fit_transform([c1, c2])
# print(data)
print(tf.get_feature_names())
print(data.toarray()) return None

三、归一化计算

 def mm():
"""
归一化计算
:return: None
"""
mm=MinMaxScaler(feature_range=(4,5))
data=mm.fit_transform([[60,2,40],[90,4,30],[75,6,50]])
print(data)

四、标准化计算

 def ss():
"""
标准化计算
:return: None
"""
ss=StandardScaler()
data=ss.fit_transform([[1,-1,4],[2,1,0],[9,2,3]])
print(data)

五、缺失值处理

 def im():
"""
缺失值处理
:return:
"""
im=Imputer(missing_values="NaN",strategy="mean",axis=0)
data=im.fit_transform([[1,2],[np.nan,3],[7,6]])
print(data)

六、特征选择-删除低方差的特征

 def var():
"""
特征选择-删除低方差的特征
:return:
"""
var=VarianceThreshold(threshold=0.0)
data=var.fit_transform([[0,3,5,4],[0,2,9,4],[0,8,3,4],[0,8,1,4]])
print(data)

七、数据降维处理

 def pca():
"""
数据降维处理
:return:
"""
pca=PCA(n_components=0.9)
data=pca.fit_transform([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[7,8,9]])
print(data)
05-11 14:04