|
总结如下:
1、Adaline算法是Roseblatt感知器的改进;
2、二者区别区别如下:
Rosenblatt感知器
Adaline算法
3、Adaline算法分类Iris程序如下:
运行结果:
特征缩放后的结果如下:(特征缩放能有助于收敛,但不一定肯定收敛!)
############################################################################## from sklearn import datasets import numpy as np import matplotlib.pylab as plt import pandas as pd ############################################################################## #读取数据集 df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None) print(df.tail()) #显示所有样本点 y = df.iloc[0:100, 4].values y = np.where(y == 'Iris-setosa', -1, 1) X = df.iloc[0:100, [0,2]].values plt.scatter(X[:50,0], X[:50, 1], color = 'red', marker = 'o', label = 'setosa') plt.scatter(X[50:100, 0], X[50:100, 1], color = 'blue', marker = 'x', label = 'versicolor') plt.xlabel('petal length') plt.ylabel('sepal length') plt.show() ############################################################################## class AdalineGD(object): def __init__(self, eta=0.01, n_inter=50): self.eta = eta self.n_inter = n_inter def fit(self, X, y): self.w_ = np.zeros(1 + X.shape[1]) self.cost_ = [] for i in range(self.n_inter): output = self.net_input(X) errors = (y - output) self.w_[1:] += self.eta * X.T.dot(errors) self.w_[0] += self.eta * errors.sum() cost = (errors**2).sum()/ 2.0 self.cost_.append(cost) return self def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def activation(self, X): return self.net_input(X) def predict(self, X): return np.where(self.activation(X) >= 0.0, 1, -1) ############################################################################## #对比学习速率eta的差异 fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,4)) ada1 = AdalineGD(eta=0.01,n_inter=10).fit(X, y) ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('log(Sum-squared-error)') ax[0].set_title('Adaline - learning rate 0.01') ada2 = AdalineGD(eta=0.0001,n_inter=10).fit(X, y) ax[1].plot(range(1, len(ada2.cost_) + 1), np.log10(ada2.cost_), marker='o') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('log(Sum-squared-error)') ax[1].set_title('Adaline - learning rate 0.0001') ############################################################################## # 特征缩放,能有助于收敛;但不是说之前不收敛的,特征缩放后一定能收敛; X_std = np.copy(X) X_std[:,0] = (X[:, 0] - X[:,0].mean()) / X[:, 0].std() X_std[:,1] = (X[:, 1] - X[:,1].mean()) / X[:, 1].std() fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,4)) ada3 = AdalineGD(eta=0.01,n_inter=10).fit(X_std, y) ax[0].plot(range(1, len(ada3.cost_) + 1), np.log10(ada3.cost_), marker='o') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('log(Sum-squared-error)') ax[0].set_title('Adaline - learning rate 0.01 after standerdized') ada4 = AdalineGD(eta=0.0001,n_inter=10).fit(X_std, y) ax[1].plot(range(1, len(ada4.cost_) + 1), np.log10(ada4.cost_), marker='o') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('log(Sum-squared-error)') ax[1].set_title('Adaline - learning rate 0.0001 after standerdized') ##############################################################################
#摘至《Python 机器学习》,作者:Sebastian Raschaka, 机械工业出版社;
Archiver|手机版|科学网 ( 京ICP备07017567号-12 )
GMT+8, 2024-10-19 21:37
Powered by ScienceNet.cn
Copyright © 2007- 中国科学报社