logistic回归的基本思想
logistic回归是一种分类方法,用于两分类问题。其基本思想为:
a. 寻找合适的假设函数,即分类函数,用以预测输入数据的判断结果;
b. 构造代价函数,即损失函数,用以表示预测的输出结果与训练数据的实际类别之间的偏差;
c. 最小化代价函数,从而获取最优的模型参数。
import numpy
from numpy import *
import matplotlib.pyplot as plt
import random
def loadDataSet(filename):
fr = open(filename)
dataMat = []
labelMat = []
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append( [1.0,float(lineArr[0]),float(lineArr[1])] )
labelMat.append(int(lineArr[2]))
return dataMat,labelMat #阶跃函数
def sigmoid(inX):
return 1.0/(1 + numpy.exp(-inX)) #基于梯度上升法的logistic回归分类器
def gradAscent(dataMatIn,classLabels):
dataMatrix = mat(dataMatIn)
labelMatrix = mat(classLabels).transpose()
m , n = shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500
weights = ones((n,1))
#对回归系数进行maxCycles次梯度上升
for i in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = labelMatrix - h
weights = weights + alpha * dataMatrix.transpose() * error
return weights #分析数据:画出决策边界
def plotBestFit(weights):
dataMat,labelMat = loadDataSet('test.txt')
dataArr = array(dataMat)
n = list(shape(dataArr))[0]
xcord1 = [] ; ycord1 = []
xcord2 = [] ; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green') #最佳拟合直线
x = arange(-3.0, 3.0, 0.1)
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',shape(x)) y = (-weights[0] - weights[1] * x) / weights[2]
print('-----------------------------------------',shape(y))
ax.plot(x,y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() #随机梯度上升
def stocGradAscent0(dataMatrix,classLabels):
m , n = numpy.shape(dataMatrix)
alpha = 0.01#步长
weights = numpy.ones((n))
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights #改进的随机梯度上升
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
m , n = shape(dataMatrix)
weights = ones(n)
dataIndex = list(range(m))
print (dataIndex)
for j in range(numIter):
for i in range(m):
alpha = 4/(1.0+j+i) + 0.1 #alpha每次迭代都要调整
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid (sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del dataIndex[randIndex]
print("randIndex",randIndex)
print("dataIndex",dataIndex)
if randIndex==0:
return weights if __name__ == '__main__':
dataArr,labelMat = loadDataSet('test.txt')
weights = stocGradAscent1(array(dataArr),labelMat)
# weights = gradAscent(dataArr,labelMat)
# print(shape(weights))
plotBestFit(weights)
应用:从疝气病预测病马的死亡率
import numpy
from numpy import *
import matplotlib.pyplot as plt
import random #阶跃函数
def sigmoid(inX):
return 1.0/(1 + numpy.exp(-inX)) #分类回归函数
def classifyVector(inX,weights):
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0 #改进的随机梯度上升算法
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m, n = shape (dataMatrix)
weights = ones (n)
dataIndex = list (range (m))
for j in range (numIter):
for i in range (m):
alpha = 4 / (1.0 + j + i) + 0.1 # alpha每次迭代都要调整
randIndex = int (random.uniform (0, len (dataIndex)))
h = sigmoid (sum (dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del dataIndex[randIndex]
if randIndex == 0:
return weights #测试,返回错误率
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
curLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(curLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(curLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500) errorCount = 0
numTestVec = 0
for line in frTest.readlines():
numTestVec += 1.0
curLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(curLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(curLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print("错误率",errorRate)
return errorRate def multiTest():
numTests = 10
errorSum = 0.0
for i in range(numTests):
errorSum += colicTest()
print("%d 次迭代之后,平均错误率为%f"%(numTests,errorSum/float(numTests))) multiTest()