将下面这段源码转换为伪代码:def levenberg_marquardt(fun, grad, jacobian, x0, iterations, tol): """ Minimization of scalar function of one or more variables using the Levenberg-Marquardt algorithm. Parameters ---------- fun : function Objective function. grad : function Gradient function of objective function. jacobian :function function of objective function. x0 : numpy.array, size=9 Initial value of the parameters to be estimated. iterations : int Maximum iterations of optimization algorithms. tol : float Tolerance of optimization algorithms. Returns ------- xk : numpy.array, size=9 Parameters wstimated by optimization algorithms. fval : float Objective function value at xk. grad_val : float Gradient value of objective function at xk. grad_log : numpy.array The record of gradient of objective function of each iteration. """ fval = None # y的最小值 grad_val = None # 梯度的最后一次下降的值 x_log = [] # x的迭代值的数组,n*9,9个参数 y_log = [] # y的迭代值的数组,一维 grad_log = [] # 梯度下降的迭代值的数组 x0 = asarray(x0).flatten() if x0.ndim == 0: x0.shape = (1,) # iterations = len(x0) * 200 k = 1 xk = x0 updateJ = 1 lamda = 0.01 old_fval = fun(x0) gfk = grad(x0) gnorm = np.amax(np.abs(gfk)) J = [None] H = [None] while (gnorm > tol) and (k < iterations): if updateJ == 1: x_log = np.append(x_log, xk.T) yk = fun(xk) y_log = np.append(y_log, yk) J = jacobian(x0) H = np.dot(J.T, J) H_lm = H + (lamda * np.eye(9)) gfk = grad(xk) pk = - np.linalg.inv(H_lm).dot(gfk) pk = pk.A.reshape(1, -1)[0] # 二维变一维 xk1 = xk + pk fval = fun(xk1) if fval < old_fval: lamda = lamda / 10 xk = xk1 old_fval = fval updateJ = 1 else: updateJ = 0 lamda = lamda * 10 gnorm = np.amax(np.abs(gfk)) k = k + 1 grad_log = np.append(grad_log, np.linalg.norm(xk - x_log[-1:])) fval = old_fval grad_val = grad_log[-1] return xk, fval, grad_val, x_log, y_log, grad_log
时间: 2024-02-10 22:10:01 浏览: 64
faux-code-generator:将真实代码转换为伪代码
5星 · 资源好评率100%
FUNCTION levenberg_marquardt(fun, grad, jacobian, x0, iterations, tol):
fval = None
grad_val = None
x_log = []
y_log = []
grad_log = []
x0 = asarray(x0).flatten()
IF x0.ndim == 0 THEN
x0.shape = (1,)
k = 1
xk = x0
updateJ = 1
lamda = 0.01
old_fval = fun(x0)
gfk = grad(x0)
gnorm = np.amax(np.abs(gfk))
J = [None]
H = [None]
WHILE (gnorm > tol) AND (k < iterations) DO
IF updateJ == 1 THEN
x_log = np.append(x_log, xk.T)
yk = fun(xk)
y_log = np.append(y_log, yk)
J = jacobian(x0)
H = np.dot(J.T, J)
END IF
H_lm = H + (lamda * np.eye(9))
gfk = grad(xk)
pk = - np.linalg.inv(H_lm).dot(gfk)
pk = pk.A.reshape(1, -1)[0]
xk1 = xk + pk
fval = fun(xk1)
IF fval < old_fval THEN
lamda = lamda / 10
xk = xk1
old_fval = fval
updateJ = 1
ELSE
updateJ = 0
lamda = lamda * 10
END IF
gnorm = np.amax(np.abs(gfk))
k = k + 1
grad_log = np.append(grad_log, np.linalg.norm(xk - x_log[-1:]))
END WHILE
fval = old_fval
grad_val = grad_log[-1]
RETURN xk, fval, grad_val, x_log, y_log, grad_log
END FUNCTION
阅读全文