最优化共轭梯度法matlab代码
时间: 2023-12-29 19:03:53 浏览: 86
以下是两种MATLAB编写的最优化共轭梯度法的代码:
1. 使用黄金分割法精确一维搜索的最优化共轭梯度法代码:
```matlab
function [x, fval] = conjugate_gradient_golden(x0, epsilon)
% 初始化
x = x0;
fval = objective_function(x);
grad = gradient(x);
d = -grad;
% 迭代
while norm(grad) > epsilon
alpha = golden_section_search(x, d);
x = x + alpha * d;
grad_new = gradient(x);
beta = norm(grad_new)^2 / norm(grad)^2;
d = -grad_new + beta * d;
grad = grad_new;
fval = objective_function(x);
end
end
function fval = objective_function(x)
% 目标函数
fval = x(1)^2 + 2 * x(2)^2;
end
function grad = gradient(x)
% 数值微分法计算梯度
h = 1e-6;
grad = zeros(size(x));
for i = 1:length(x)
x_plus_h = x;
x_plus_h(i) = x_plus_h(i) + h;
grad(i) = (objective_function(x_plus_h) - objective_function(x)) / h;
end
end
function alpha = golden_section_search(x, d)
% 黄金分割法精确一维搜索
a = 0;
b = 1;
rho = 0.618;
epsilon = 1e-6;
while abs(b - a) > epsilon
x1 = a + (1 - rho) * (b - a);
x2 = a + rho * (b - a);
f1 = objective_function(x + x1 * d);
f2 = objective_function(x + x2 * d);
if f1 < f2
b = x2;
else
a = x1;
end
end
alpha = (a + b) / 2;
end
```
2. 使用Wolfe-Powell非精确一维搜索的最优化共轭梯度法代码:
```matlab
function [x, fval] = conjugate_gradient_wolfe_powell(x0, epsilon)
% 初始化
x = x0;
fval = objective_function(x);
grad = gradient(x);
d = -grad;
% 迭代
while norm(grad) > epsilon
alpha = wolfe_powell_search(x, d);
x = x + alpha * d;
grad_new = gradient(x);
beta = norm(grad_new)^2 / norm(grad)^2;
d = -grad_new + beta * d;
grad = grad_new;
fval = objective_function(x);
end
end
function fval = objective_function(x)
% 目标函数
fval = x(1)^2 + 2 * x(2)^2;
end
function grad = gradient(x)
% 数值微分法计算梯度
h = 1e-6;
grad = zeros(size(x));
for i = 1:length(x)
x_plus_h = x;
x_plus_h(i) = x_plus_h(i) + h;
grad(i) = (objective_function(x_plus_h) - objective_function(x)) / h;
end
end
function alpha = wolfe_powell_search(x, d)
% Wolfe-Powell非精确一维搜索
alpha = 1;
c1 = 0.1;
c2 = 0.9;
rho = 0.618;
epsilon = 1e-6;
while true
f0 = objective_function(x);
g0 = gradient(x);
f1 = objective_function(x + alpha * d);
if f1 > f0 + c1 * alpha * g0' * d || (f1 >= objective_function(x + alpha * d) && alpha < epsilon)
alpha = zoom(x, d, alpha);
break;
end
g1 = gradient(x + alpha * d);
if abs(g1' * d) <= -c2 * g0' * d
break;
end
if g1' * d >= 0
alpha = zoom(x, d, alpha);
break;
end
alpha = rho * alpha;
end
end
function alpha = zoom(x, d, alpha_lo)
% Wolfe-Powell非精确一维搜索中的zoom函数
alpha_hi = alpha_lo * 2;
c1 = 0.1;
c2 = 0.9;
epsilon = 1e-6;
while true
alpha = (alpha_lo + alpha_hi) / 2;
f0 = objective_function(x);
g0 = gradient(x);
f1 = objective_function(x + alpha * d);
if f1 > f0 + c1 * alpha * g0' * d || f1 >= objective_function(x + alpha * d)
alpha_hi = alpha;
else
g1 = gradient(x + alpha * d);
if abs(g1' * d) <= -c2 * g0' * d
break;
end
if g1' * d * (alpha_hi - alpha_lo) >= 0
alpha_hi = alpha_lo;
end
alpha_lo = alpha;
end
if abs(alpha_hi - alpha_lo) < epsilon
break;
end
end
end
```
阅读全文