分类法 Classification - EX 5: Linear and Quadratic Discriminant Analysis with confidence ellipsoid

优质
小牛编辑
129浏览
2023-12-01

分类法/范例五:Linear and Quadratic Discriminant Analysis with confidence ellipsoid

线性判别以及二次判别的比较

http://scikit-learn.org/stable/auto_examples/classification/plot_lda_qda.html

(一)资料产生function

这个范例引入的套件,主要特点在:

  1. scipy.linalg:线性代数相关函式,这裏主要使用到linalg.eigh 特征值相关问题
  2. matplotlib.colors: 用来处理绘图时的色彩分布
  3. LinearDiscriminantAnalysis:线性判别演算法
  4. QuadraticDiscriminantAnalysis:二次判别演算法
  1. %matplotlib inline
  2. from scipy import linalg
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. import matplotlib as mpl
  6. from matplotlib import colors
  7. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
  8. from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis

接下来是设定一个线性变化的colormap,LinearSegmentedColormap(name, segmentdata) 预设会传回一个256个值的数值颜色对应关係。用一个具备有三个项目的dict变数segmentdata来设定。以'red': [(0, 1, 1), (1, 0.7, 0.7)]来解释,就是我们希望数值由0到1的过程,红色通道将由1线性变化至0.7。

  1. cmap = colors.LinearSegmentedColormap(
  2. 'red_blue_classes',
  3. {'red': [(0, 1, 1), (1, 0.7, 0.7)],
  4. 'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
  5. 'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
  6. plt.cm.register_cmap(cmap=cmap)

我们可以用以下程式码来观察。当输入数值为np.arange(0,1.1,0.1)也就是0,0.1…,1.0 时RGB数值的变化情形。

  1. values = np.arange(0,1.1,0.1)
  2. cmap_values = mpl.cm.get_cmap('red_blue_classes')(values)
  3. import pandas as pd
  4. pd.set_option('precision',2)
  5. df=pd.DataFrame(np.hstack((values.reshape(11,1),cmap_values)))
  6. df.columns = ['Value', 'R', 'G', 'B', 'Alpha']
  7. print(df)
  1. Value R G B Alpha
  2. 0 0.0 1.0 0.7 0.7 1
  3. 1 0.1 1.0 0.7 0.7 1
  4. 2 0.2 0.9 0.7 0.8 1
  5. 3 0.3 0.9 0.7 0.8 1
  6. 4 0.4 0.9 0.7 0.8 1
  7. 5 0.5 0.8 0.7 0.9 1
  8. 6 0.6 0.8 0.7 0.9 1
  9. 7 0.7 0.8 0.7 0.9 1
  10. 8 0.8 0.8 0.7 0.9 1
  11. 9 0.9 0.7 0.7 1.0 1
  12. 10 1.0 0.7 0.7 1.0 1

接着我们产生两组资料, 每组资料有 600笔资料,2个特征 X: 600x2以及2个类别 y:600 (前300个元素为0,余下为1)

  1. dataset_fixed_cov():2个类别的特征具备有相同共变数(covariance)
  2. dataset_fixed_cov():2个类别的特征具备有不同之共变数
    差异落在X资料的产生np.dot(np.random.randn(n, dim), C)np.dot(np.random.randn(n, dim), C.T)的不同。np.dot(np.random.randn(n, dim), C)会产生300x2之矩阵,其乱数产生的范围可交由C矩阵来控制。在dataset_fixed_cov()中,前后300笔资料产生之范围皆由C来调控。我们可以在最下方的结果图示看到上排影像(相同共变数)的资料分布无论是红色(代表类别1)以及蓝色(代表类别2)其分布形状相似。而下排影像(不同共变数),分布形状则不同。图示中,横轴及纵轴分别表示第一及第二个特征,读者可以试着将 0.83这个数字减少或是将C.T改成C,看看最后结果图形有了什幺改变?
  1. def dataset_fixed_cov():
  2. '''Generate 2 Gaussians samples with the same covariance matrix'''
  3. n, dim = 300, 2
  4. np.random.seed(0)
  5. C = np.array([[0., -0.23], [0.83, .23]])
  6. X = np.r_[np.dot(np.random.randn(n, dim), C),
  7. np.dot(np.random.randn(n, dim), C) + np.array([1, 1])] #利用 + np.array([1, 1]) 产生类别间的差异
  8. y = np.hstack((np.zeros(n), np.ones(n))) #产生300个零及300个1并连接起来
  9. return X, y
  10. def dataset_cov():
  11. '''Generate 2 Gaussians samples with different covariance matrices'''
  12. n, dim = 300, 2
  13. np.random.seed(0)
  14. C = np.array([[0., -1.], [2.5, .7]]) * 2.
  15. X = np.r_[np.dot(np.random.randn(n, dim), C),
  16. np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
  17. y = np.hstack((np.zeros(n), np.ones(n)))
  18. return X, y

(二)绘图函式

  1. 找出 True Positive及False Negative 之辨认点
  2. 以红色及蓝色分别表示分类为 0及1的资料点,而以深红跟深蓝来表示误判资料
  3. lda.predict_proba()画出分类的机率分布(请参考范例三)

(为了方便在ipython notebook环境下显示,下面函式有经过微调)

  1. def plot_data(lda, X, y, y_pred, fig_index):
  2. splot = plt.subplot(2, 2, fig_index)
  3. if fig_index == 1:
  4. plt.title('Linear Discriminant Analysis',fontsize=28)
  5. plt.ylabel('Data with fixed covariance',fontsize=28)
  6. elif fig_index == 2:
  7. plt.title('Quadratic Discriminant Analysis',fontsize=28)
  8. elif fig_index == 3:
  9. plt.ylabel('Data with varying covariances',fontsize=28)
  10. # 步骤一:找出 True Positive及False postive 之辨认点
  11. tp = (y == y_pred) # True Positive
  12. tp0, tp1 = tp[y == 0], tp[y == 1] #tp0 代表分类为0且列为 True Positive之资料点
  13. X0, X1 = X[y == 0], X[y == 1]
  14. X0_tp, X0_fp = X0[tp0], X0[~tp0]
  15. X1_tp, X1_fp = X1[tp1], X1[~tp1]
  16. # 步骤二:以红蓝来画出分类资料,以深红跟深蓝来表示误判资料
  17. # class 0: dots
  18. plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
  19. plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
  20. # class 1: dots
  21. plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
  22. plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
  23. #步骤三:画出分类的机率分布(请参考范例三)
  24. # class 0 and 1 : areas
  25. nx, ny = 200, 100
  26. x_min, x_max = plt.xlim()
  27. y_min, y_max = plt.ylim()
  28. xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
  29. np.linspace(y_min, y_max, ny))
  30. Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
  31. Z = Z[:, 1].reshape(xx.shape)
  32. plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
  33. norm=colors.Normalize(0., 1.))
  34. plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
  35. # means
  36. plt.plot(lda.means_[0][0], lda.means_[0][1],
  37. 'o', color='black', markersize=10)
  38. plt.plot(lda.means_[1][0], lda.means_[1][1],
  39. 'o', color='black', markersize=10)
  40. return splot
  1. def plot_ellipse(splot, mean, cov, color):
  2. v, w = linalg.eigh(cov)
  3. u = w[0] / linalg.norm(w[0])
  4. angle = np.arctan(u[1] / u[0])
  5. angle = 180 * angle / np.pi # convert to degrees
  6. # filled Gaussian at 2 standard deviation
  7. ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
  8. 180 + angle, color=color)
  9. ell.set_clip_box(splot.bbox)
  10. ell.set_alpha(0.5)
  11. splot.add_artist(ell)
  12. splot.set_xticks(())
  13. splot.set_yticks(())

(三)测试资料并绘图

  1. def plot_lda_cov(lda, splot):
  2. plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
  3. plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
  4. def plot_qda_cov(qda, splot):
  5. plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
  6. plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
  7. ###############################################################################
  8. figure = plt.figure(figsize=(30,20), dpi=300)
  9. for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
  10. # Linear Discriminant Analysis
  11. lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
  12. y_pred = lda.fit(X, y).predict(X)
  13. splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
  14. plot_lda_cov(lda, splot)
  15. plt.axis('tight')
  16. # Quadratic Discriminant Analysis
  17. qda = QuadraticDiscriminantAnalysis(store_covariances=True)
  18. y_pred = qda.fit(X, y).predict(X)
  19. splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
  20. plot_qda_cov(qda, splot)
  21. plt.axis('tight')
  22. plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis',fontsize=28)
  23. plt.show()

png

Python source code: plot_lda_qda.py

http://scikit-learn.org/stable/_downloads/plot_lda_qda.py

  1. print(__doc__)
  2. from scipy import linalg
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. import matplotlib as mpl
  6. from matplotlib import colors
  7. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
  8. from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
  9. ###############################################################################
  10. # colormap
  11. cmap = colors.LinearSegmentedColormap(
  12. 'red_blue_classes',
  13. {'red': [(0, 1, 1), (1, 0.7, 0.7)],
  14. 'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
  15. 'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
  16. plt.cm.register_cmap(cmap=cmap)
  17. ###############################################################################
  18. # generate datasets
  19. def dataset_fixed_cov():
  20. '''Generate 2 Gaussians samples with the same covariance matrix'''
  21. n, dim = 300, 2
  22. np.random.seed(0)
  23. C = np.array([[0., -0.23], [0.83, .23]])
  24. X = np.r_[np.dot(np.random.randn(n, dim), C),
  25. np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
  26. y = np.hstack((np.zeros(n), np.ones(n)))
  27. return X, y
  28. def dataset_cov():
  29. '''Generate 2 Gaussians samples with different covariance matrices'''
  30. n, dim = 300, 2
  31. np.random.seed(0)
  32. C = np.array([[0., -1.], [2.5, .7]]) * 2.
  33. X = np.r_[np.dot(np.random.randn(n, dim), C),
  34. np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
  35. y = np.hstack((np.zeros(n), np.ones(n)))
  36. return X, y
  37. ###############################################################################
  38. # plot functions
  39. def plot_data(lda, X, y, y_pred, fig_index):
  40. splot = plt.subplot(2, 2, fig_index)
  41. if fig_index == 1:
  42. plt.title('Linear Discriminant Analysis')
  43. plt.ylabel('Data with fixed covariance')
  44. elif fig_index == 2:
  45. plt.title('Quadratic Discriminant Analysis')
  46. elif fig_index == 3:
  47. plt.ylabel('Data with varying covariances')
  48. tp = (y == y_pred) # True Positive
  49. tp0, tp1 = tp[y == 0], tp[y == 1]
  50. X0, X1 = X[y == 0], X[y == 1]
  51. X0_tp, X0_fp = X0[tp0], X0[~tp0]
  52. X1_tp, X1_fp = X1[tp1], X1[~tp1]
  53. # class 0: dots
  54. plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
  55. plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
  56. # class 1: dots
  57. plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
  58. plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
  59. # class 0 and 1 : areas
  60. nx, ny = 200, 100
  61. x_min, x_max = plt.xlim()
  62. y_min, y_max = plt.ylim()
  63. xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
  64. np.linspace(y_min, y_max, ny))
  65. Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
  66. Z = Z[:, 1].reshape(xx.shape)
  67. plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
  68. norm=colors.Normalize(0., 1.))
  69. plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
  70. # means
  71. plt.plot(lda.means_[0][0], lda.means_[0][1],
  72. 'o', color='black', markersize=10)
  73. plt.plot(lda.means_[1][0], lda.means_[1][1],
  74. 'o', color='black', markersize=10)
  75. return splot
  76. def plot_ellipse(splot, mean, cov, color):
  77. v, w = linalg.eigh(cov)
  78. u = w[0] / linalg.norm(w[0])
  79. angle = np.arctan(u[1] / u[0])
  80. angle = 180 * angle / np.pi # convert to degrees
  81. # filled Gaussian at 2 standard deviation
  82. ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
  83. 180 + angle, color=color)
  84. ell.set_clip_box(splot.bbox)
  85. ell.set_alpha(0.5)
  86. splot.add_artist(ell)
  87. splot.set_xticks(())
  88. splot.set_yticks(())
  89. def plot_lda_cov(lda, splot):
  90. plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
  91. plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
  92. def plot_qda_cov(qda, splot):
  93. plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
  94. plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
  95. ###############################################################################
  96. for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
  97. # Linear Discriminant Analysis
  98. lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
  99. y_pred = lda.fit(X, y).predict(X)
  100. splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
  101. plot_lda_cov(lda, splot)
  102. plt.axis('tight')
  103. # Quadratic Discriminant Analysis
  104. qda = QuadraticDiscriminantAnalysis(store_covariances=True)
  105. y_pred = qda.fit(X, y).predict(X)
  106. splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
  107. plot_qda_cov(qda, splot)
  108. plt.axis('tight')
  109. plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
  110. plt.show()