|
===============================================================
总结如下:
1、随机梯度下降算法可以看成是梯度下降算法的近似,但通常它能更快收敛;
2、随机梯度算法基于单个样本训练更新权重(梯度下降算法是基于所有样本更新权重!),因此更容易跳出小范围的局部最优点;但其误差曲线不如梯度下降算法平滑;
3、因为样本要随机选择,所以通常在每次迭代时都都打乱训练集,以防止进入循环;
4、梯度随机算法中,通常采用随时间变化的自适应学习速率来代替固定学习速率eta;
5、随机梯度下降算法不一定得到全局最优解,但会趋近于它;借助于自适应学习速率,随机梯度下降算法会进一步趋近于全局最优解;
6、随机梯度下降算法可以用于在线学习,尤其在海量数据场合;
===============================================================
仿真结果如下:
代码如下:
############################################################################## from sklearn import datasets import numpy as np import matplotlib.pylab as plt import pandas as pd from numpy.random import seed from matplotlib.colors import ListedColormap ############################################################################## #读取数据集 df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None) #print(df.tail()) #显示所有样本点 y = df.iloc[0:100, 4].values y = np.where(y == 'Iris-setosa', -1, 1) X = df.iloc[0:100, [0,2]].values plt.scatter(X[:50,0], X[:50, 1], color = 'red', marker = 'o', label = 'setosa') plt.scatter(X[50:100, 0], X[50:100, 1], color = 'blue', marker = 'x', label = 'versicolor') plt.xlabel('petal length') plt.ylabel('sepal length') plt.show() ############################################################################## class AdalineSGD(object): def __init__(self, eta=0.01, n_inter=10, shuffle=True, random_state=None): self.eta = eta self.n_inter = n_inter self.w_initialized = False self.shuffle = shuffle if random_state: seed(random_state) def fit(self, X, y): self._initialize_weights(X.shape[1]) self.cost_ = [] for i in range(self.n_inter): if self.shuffle: X, y = self._shuffle(X,y) cost = [] #kk=0 #print("*****************************") #print(len(y)) for xi, target in zip(X,y): cost.append(self._update_weights(xi, target)) #kk += 1 #print(kk) avg_cost = sum(cost) / len(y) self.cost_.append(avg_cost) return self ''' def partial_fit(self, X, y): if not self.w_initialized: self._initialize_weights(X.shape[1]) if y.ravel().shape[0] > 1: for xi, target in zip(X, y): self._update_weights(xi, target) else: self._update_weights(X, y) return self ''' def _shuffle(self, X, y): r = np.random.permutation(len(y)) return X[r], y[r] def _initialize_weights(self, m): self.w_ = np.zeros(1 + m) self.w_initialized = True def _update_weights(self, xi, target): output = self.net_input(xi) error = (target - output) #print(error) self.w_[1:] += self.eta * xi.dot(error) self.w_[0] += self.eta * error cost = 0.5 * error**2 return cost def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def activation(self, X): return self.net_input(X) def predict(self, X): return np.where(self.activation(X) >= 0.0, 1, -1) ############################################################################## # 特征缩放,能有助于收敛;但不是说之前不收敛的,特征缩放后一定能收敛; X_std = np.copy(X) X_std[:,0] = (X[:, 0] - X[:,0].mean()) / X[:, 0].std() X_std[:,1] = (X[:, 1] - X[:,1].mean()) / X[:, 1].std() ada = AdalineSGD(n_inter=15, eta=0.01, random_state=1) ada.fit(X_std, y) plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o') plt.xlabel('Epochs') plt.ylabel('Averrage Cost') plt.show() ############################################################################## def plot_decision_regions(X, y, classifier, test_idx= None, resolution=0.02): #setup marker generator and color map markers = ('s', 'x', 'o', '^','v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap= ListedColormap(colors[:len(np.unique(y))]) #plot the decison surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() -1 , X[:, 1].max() + 1 xx1,xx2 = np.meshgrid(np.arange(x1_min, x1_max,resolution), np.arange(x2_min, x2_max,resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha = 0.4, cmap = cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) #plot all samples X_test, y_test = X[test_idx, :], y[test_idx] for idx, cl in enumerate(np.unique(y)): plt.scatter(x= X[y==cl, 0], y = X[y==cl, 1], alpha=0.8, c=cmap(idx), marker= markers[idx],label = cl) #highlight test samples if test_idx: X_test, y_test = X[test_idx, :], y[test_idx] plt.scatter(X_test[:, 0], X_test[:,1], c='',alpha = 1.0, linewidth = 1, marker = '0', s = 55, label = 'test set') ############################################################################## plot_decision_regions(X_std, y, classifier = ada) plt.title('Adaline - Stochastic Gradient Descent') plt.xlabel('sepal length [standerdized]') plt.ylabel('petal length [standerdized]')
补充:
1. numpy.random.permutation: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.permutation.html
2.numpy.random.seed :https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html
3.numpy.ravel : https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.ravel.html
4. zip: http://www.cnblogs.com/frydsh/archive/2012/07/10/2585370.html
#摘至《Python 机器学习》,作者:Sebastian Raschaka, 机械工业出版社;
Archiver|手机版|科学网 ( 京ICP备07017567号-12 )
GMT+8, 2024-9-21 13:16
Powered by ScienceNet.cn
Copyright © 2007- 中国科学报社