前言:本文旨在对如何使用 numpy 实现逻辑回归拟合的过程做具体分析,有关逻辑回归原理部分不做过多论述。

数据集准备

准备用于二分类的数据集,可直接复制到对应的 txt 文件
-0.017612 14.053064 0 -1.395634 4.662541 1 -0.752157 6.538620 0 -1.322371 7.152853 0 0.423363 11.054677 0 0.406704 7.067335 1 0.667394 12.741452 0 -2.460150 6.866805 1 0.569411 9.548755 0 -0.026632 10.427743 0 0.850433 6.920334 1 1.347183 13.175500 0 1.176813 3.167020 1 -1.781871 9.097953 0 -0.566606 5.749003 1 0.931635 1.589505 1 -0.024205 6.151823 1 -0.036453 2.690988 1 -0.196949 0.444165 1 1.014459 5.754399 1 1.985298 3.230619 1 -1.693453 -0.557540 1 -0.576525 11.778922 0 -0.346811 -1.678730 1 -2.124484 2.672471 1 1.217916 9.597015 0 -0.733928 9.098687 0 -3.642001 -1.618087 1 0.315985 3.523953 1 1.416614 9.619232 0 -0.386323 3.989286 1 0.556921 8.294984 1 1.224863 11.587360 0 -1.347803 -2.406051 1 1.196604 4.951851 1 0.275221 9.543647 0 0.470575 9.332488 0 -1.889567 9.542662 0 -1.527893 12.150579 0 -1.185247 11.309318 0 -0.445678 3.297303 1 1.042222 6.105155 1 -0.618787 10.320986 0 1.152083 0.548467 1 0.828534 2.676045 1 -1.237728 10.549033 0 -0.683565 -2.166125 1 0.229456 5.921938 1 -0.959885 11.555336 0 0.492911 10.993324 0 0.184992 8.721488 0 -0.355715 10.325976 0 -0.397822 8.058397 0 0.824839 13.730343 0 1.507278 5.027866 1 0.099671 6.835839 1 -0.344008 10.717485 0 1.785928 7.718645 1 -0.918801 11.560217 0 -0.364009 4.747300 1 -0.841722 4.119083 1 0.490426 1.960539 1 -0.007194 9.075792 0 0.356107 12.447863 0 0.342578 12.281162 0 -0.810823 -1.466018 1 2.530777 6.476801 1 1.296683 11.607559 0 0.475487 12.040035 0 -0.783277 11.009725 0 0.074798 11.023650 0 -1.337472 0.468339 1 -0.102781 13.763651 0 -0.147324 2.874846 1 0.518389 9.887035 0 1.015399 7.571882 0 -1.658086 -0.027255 1 1.319944 2.171228 1 2.056216 5.019981 1 -0.851633 4.375691 1 -1.510047 6.061992 0 -1.076637 -3.181888 1 1.821096 10.283990 0 3.010150 8.401766 1 -1.099458 1.688274 1 -0.834872 -1.733869 1 -0.846637 3.849075 1 1.400102 12.628781 0 1.752842 5.468166 1 0.078557 0.059736 1 0.089392 -0.715300 1 1.825662 12.693808 0 0.197445 9.744638 0 0.126117 0.922311 1 -0.679797 1.220530 1 0.677983 2.556666 1 0.761349 10.693862 0 -2.168791 0.143632 1 1.388610 9.341997 0 0.317029 14.739025 0

读取数据

构造出[[1,data,data],][result,]两种矩阵

1
2
3
4
5
6
7
8
9
10
11
12
from numpy import *
filename = 'Resources/LogisticRegressionSet.txt'
def loadDataSet():
    dataMat = []
    labelMat = [] # 构造两个空列表
    fr = open(filename)
    for line in fr.readlines():
        lineArr = line.strip().split() # 切割元素
        dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
        # 前面的1,表示方程的常量。比如两个特征X1,X2,共需要三个参数,W1+W2*X1+W3*X2
        labelMat.append(int(lineArr[2]))
    return dataMat, labelMat

构造 Logistic(Sigmoid)函数

1
2
def sigmoid(inX):  # sigmoid函数
    return 1.0 / (1 + exp(-inX))

构造梯度上升函数

普通梯度上升

通过矩阵乘法后算出差值反复迭代,利用梯度上升法求出结果,注意矩阵转置的意义

1
2
3
4
5
6
7
8
9
10
11
12
def gradAscent(dataMat, labelMat):  # 梯度上升求最优参数
    dataMatrix = mat(dataMat)  # 将读取的数据转换为矩阵
    classLabels = mat(labelMat).transpose()  # 将读取的数据转换为矩阵
    m, n = shape(dataMatrix)
    alpha = 0.001  # 设置梯度的阀值,该值越大梯度上升幅度越大
    maxCycles = 500  # 设置迭代的次数,一般看实际数据进行设定,有些可能200次就够了
    weights = ones((n, 1))  # 设置初始的参数,并都赋默认值为1。注意这里权重以矩阵形式表示三个参数。
    for k in range(maxCycles):
        h = sigmoid(dataMatrix * weights)
        error = (classLabels - h)  # 求导后差值
        weights = weights + alpha * dataMatrix.transpose() * error  # 迭代更新权重
    return weights

随机梯度上升

与上述算法的区别在于只选择一行数据更新权重

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
def stocGradAscent0(dataMat, labelMat):
# 随机梯度上升,当数据量比较大时,每次迭代都选择全量数据进行计算,
#计算量会非常大。所以采用每次迭代中一次只选择其中的一行数据进行更新权重。
    dataMatrix = mat(dataMat)
    classLabels = labelMat
    m, n = shape(dataMatrix)
    alpha = 0.01
    maxCycles = 500
    weights = ones((n, 1))
    for k in range(maxCycles):
        for i in range(m):  # 遍历计算每一行
            h = sigmoid(sum(dataMatrix[i] * weights))
            error = classLabels[i] - h
            weights = weights + alpha * error * dataMatrix[i].transpose()
    return weights

改进版随机梯度上升

该算法与上述算法的区别在于两点:

  • 步长随迭代次数的增加而减小
  • 采用了随机抽样的方法
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
def stocGradAscent1(dataMat, labelMat):
    # 改进版随机梯度上升,在每次迭代中随机选择样本来更新权重,并且随迭代次数增加,权重变化越小。
    dataMatrix = mat(dataMat)
    classLabels = labelMat
    m, n = shape(dataMatrix)
    weights = ones((n, 1))
    maxCycles = 500
    for j in range(maxCycles):  # 迭代
        dataIndex = [i for i in range(m)]
        for i in range(m):  # 随机遍历每一行
            alpha = 4 / (1 + j + i) + 0.0001  # 随迭代次数增加,权重变化越小。
            randIndex = int(random.uniform(0, len(dataIndex)))  # 随机抽样
            h = sigmoid(sum(dataMatrix[randIndex] * weights))
            error = classLabels[randIndex] - h
            weights = weights + alpha * error * dataMatrix[randIndex].transpose()
            del (dataIndex[randIndex])  # 去除已经抽取的样本
    return weights

画出图像

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
def plotBestFit(weights):  # 画出最终分类的图
    import matplotlib.pyplot as plt
    dataMat, labelMat = loadDataSet()
    dataArr = array(dataMat)
    n = shape(dataArr)[0]
    xcord1 = []
    ycord1 = []
    xcord2 = []
    ycord2 = []
    for i in range(n):
        if int(labelMat[i]) == 1:
            xcord1.append(dataArr[i, 1])
            ycord1.append(dataArr[i, 2])
        else:
            xcord2.append(dataArr[i, 1])
            ycord2.append(dataArr[i, 2])
    fig = plt.figure()
    ax = fig.add_subplot(111) # 一行一列一个格子
    ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
    ax.scatter(xcord2, ycord2, s=30, c='green') # 画出散点图
    x = arange(-3.0, 3.0, 0.1) # 生成数组
    y = (-weights[0] - weights[1] * x) / weights[2] # 决策边界
    ax.plot(x, y)
    plt.xlabel('X1')
    plt.ylabel('X2')
    plt.show()

定义主函数

1
2
3
4
def main():
    dataMat, labelMat = loadDataSet()
    weights = gradAscent(dataMat, labelMat).getA()
    plotBestFit(weights)

调用主函数

1
2
if __name__ == '__main__':
    main()

运行结果

  • 普通梯度上升
  • 随机梯度上升
  • 改进版随机梯度上升

本文源码参考自 逻辑回归原理(python 代码实现)