||
教学笔记,留存备用。
黄金分割法搜索极小值
function [xmin,fmin,k] = goldsearch(fun,a,b,eps,itmax)
%黄金分割法搜索极小值
% ---input
% fun 所求的目标函数
% a 区间的下界
% b 区间的上界
% eps 区间的最小阈值长度,精度
% ---output
% xmin 函数取极小值时自变量的值
if nargin<5, itmax = 1000; end
if nargin<4, eps = 1e-6; end
k = 0; %初始迭代次数
x1 = a + 0.382*(b-a);
x2 = a + 0.618*(b-a);
f1 = fun(x1); f2 = fun(x2);
while (abs(b-a)>eps)&&(k<itmax)
if f1>f2
a = x1; x1 = x2;
x2 = a +0.618*(b-a);
f1 = f2; f2 = fun(x2);
else
b = x2; x2 = x1;
x1 = a+0.382*(b-a);
f2 = f1; f1 = fun(x1);
end
k = k+1; % 更新迭代次数
end
xmin=(b+a)/2; % 极小值点
fmin = fun(xmin); % 极小值
梯度下降法
function [x0,fval,k] = gradientdescent(fun,gfun,x0,eps,itmax)
% ---input
% fun 所求的目标函数
% gfun 目标函数的梯度导数
% x0 初始值
% ---output
% x 极小值处自变量的值
% feval 极小值
% k 迭代次数
% % 测试代码
% fun = @(x)x(1)^2+x(2)^2+x(3)^2-10*cos(2*pi*x(1))-10*cos(2*pi*x(2))-10*cos(2*pi*x(3))
% gfun = @(x)[2*x(1)+20*pi*sin(2*pi*x(1)); 2*x(2)+20*pi*sin(2*pi*x(2)); 2*x(3)+20*pi*sin(2*pi*x(3))];
% x0=rand(3,1); [xs,fval,k] = gradientdescent(fun,gfun,x0)
% 理想值xs = [0 0 0];fval = -30; 此算法基本找不到最优值,对此问题是失败的算法,初值极为敏感
if nargin<5, itmax = 300; end
if nargin<4, eps = 1e-8; end
[m,n] = size(x0); if m<n, x0 = x0'; end %变量默认为列向量
grad = gfun(x0); [m1,n1] = size(grad); if m1<n1, grad = grad'; end %变量默认为列向量
temp0 = norm(grad); %初始的梯度范数
k = 0; % 迭代次数计数器
while (temp0>eps)&&(k<itmax)
goldf = @(u)(fun(x0-u*grad)); % @(u) 将表达式转换成关于u的匿名函数,老版本也可用inline
lamda = goldsearch(goldf,0,0.5); %寻找最优步长
x0 = x0 - lamda*grad;
grad = gfun(x0); %更新梯度
temp0 = norm(grad); % 更新后的梯度范数,足够小时迭代终止
k = k+1; %统计迭代次数
end
fval = fun(x0);
共轭梯度法
function [x0,fval,k] = conjugategradient(fun,gfun,x0,eps,itmax)
% ---input
% fun 所求的目标函数
% gfun 目标函数的梯度导数
% x0 初始值
% ---output
% x 极小值处自变量的值
% feval 极小值
% k 迭代次数
% % 测试代码
% fun = @(x)x(1)^2+x(2)^2+x(3)^2-10*cos(2*pi*x(1))-10*cos(2*pi*x(2))-10*cos(2*pi*x(3))
% gfun = @(x)[2*x(1)+20*pi*sin(2*pi*x(1)); 2*x(2)+20*pi*sin(2*pi*x(2)); 2*x(3)+20*pi*sin(2*pi*x(3))];
% x0=rand(3,1); [xs,fval,k] = gradientdescent(fun,gfun,x0)
% 理想值xs = [0 0 0];fval = -30; 此算法基本找不到最优值,对此问题是失败的算法,初值极为敏感
if nargin<5, itmax = 300; end
if nargin<4, eps = 1e-8; end
[m,n] = size(x0); if m<n, x0 = x0'; end %变量默认为列向量
grad0 = gfun(x0); [m1,n1] = size(grad0); if m1<n1, grad0 = grad0'; end %变量默认为列向量
temp0 = norm(grad0); %初始的梯度范数
k = 0; % 迭代次数计数器
while (temp0>eps)&&(k<itmax)
goldf = @(u)(fun(x0-u*grad0)); % @(u) 将表达式转换成关于u的匿名函数,老版本也可用inline
lambda = goldsearch(goldf,0,0.5); %寻找最优步长
x0 = x0 - lambda*grad0; %更新搜索点
grad1 = gfun(x0); %更新梯度
temp0 = norm(grad1); % 更新后的梯度范数,足够小时迭代终止
b0 = grad1'*grad1/(grad0'*grad0); grad0 = grad1+b0*grad0; % F-R
k = k+1;%统计迭代次数
end
fval = fun(x0);
测试的复杂案例
目标函数
function f = objectivefcn1(x)
f = 0;
for k = 1:10
f = f + k*exp(-(x(1)-x(2))^2 - 2*x(1)^2)*cos(x(2))*sin(2*x(2));
end
目标函数梯度
function fg = objectivefcn1grad(x)
fg = [0;0];
for k = 1:10
fg = fg + k*[-sin(2*x(2))*exp(-(x(1) - x(2))^2 - 2*x(1)^2)*cos(x(2))*(6*x(1) - 2*x(2));...
2*cos(2*x(2))*exp(- (x(1) - x(2))^2 - 2*x(1)^2)*cos(x(2)) - sin(2*x(2))*exp(- (x(1) - x(2))^2 - 2*x(1)^2)*sin(x(2)) + sin(2*x(2))*exp(- (x(1) - x(2))^2 - 2*x(1)^2)*cos(x(2))*(2*x(1) - 2*x(2))];
end
主程序
clear,clc
x0 = [-0.2,-0.5];
[x1,fval1] = fminsearch(@objectivefcn1,x0)
[x2,fval2] = fminunc(@objectivefcn1,x0)
% 共轭梯度法 算法对初值很敏感 虽然梯度范数极小,但不是最小值,陷入局部极小值
[x3,fval3,k1] = conjugategradient(@objectivefcn1,@objectivefcn1grad,x0)
% 最速下降法 算法对初值很敏感
[x4,fval4,k2] = gradientdescent(@objectivefcn1,@objectivefcn1grad,x0)
运行结果
x1 =
-0.1695 -0.5086
fval1 =
-34.3907
警告: Gradient must be provided for trust-region algorithm; using
quasi-newton algorithm instead.
> In fminunc (line 395)
In objectivefcn1_main (line 4)
Local minimum found.
Optimization completed because the size of the gradient is less than
the default value of the optimality tolerance.
<stopping criteria details>
x2 =
-0.1696 -0.5087
fval2 =
-34.3907
x3 =
-0.1696
-0.5087
fval3 =
-34.3907
k1 =
16
x4 =
-0.1696
-0.5087
fval4 =
-34.3907
k2 =
56
Archiver|手机版|科学网 ( 京ICP备07017567号-12 )
GMT+8, 2024-12-28 09:56
Powered by ScienceNet.cn
Copyright © 2007- 中国科学报社