NormGrad = sum(abs(GradNew)); GradNew=GradNew/NormGrad; CostOld=CostNew; switch option.firstbasevariable case 'first' [val,coord] = max(SigmaNew) ; case 'random' [val,coord] = max(SigmaNew) ; coord=find(SigmaNew==val); indperm=randperm(length(coord)); coord=coord(indperm(1)); case 'fullrandom' indzero=find(SigmaNew~=0); if ~isempty(indzero) [mini,coord]=min(GradNew(indzero)); coord=indzero(coord); else [val,coord] = max(SigmaNew) ; end end GradNew = GradNew - GradNew(coord); desc = - GradNew.* ( (SigmaNew>0) | (GradNew<0) ); desc(coord) = - sum(desc); stepmin = 0; costmin = CostOld; costmax = 0; ind = find(desc<0); stepmax = min(-(SigmaNew(ind))./desc(ind)); deltmax = stepmax; if isempty(stepmax) || stepmax==0 Sigma = SigmaNew; return end if stepmax > 0.1 stepmax=0.1; end while costmax<costmin [costmax, S] = costgraph(KH,stepmax,desc,SigmaNew); if costmax<costmin costmin = costmax; SigmaNew = SigmaNew + stepmax * desc; desc = desc .* ( (SigmaNew>option.numericalprecision)|(desc>0)); desc(coord) = - sum(desc([[1:coord-1] [coord+1:end]])); ind = find(desc<0); if ~isempty(ind) stepmax = min(-(SigmaNew(ind))./desc(ind)); deltmax = stepmax; costmax = 0; else stepmax = 0; deltmax = 0; end end end Step = [stepmin stepmax]; Cost = [costmin costmax]; [val,coord] = min(Cost); while (stepmax-stepmin)>option.goldensearch_deltmax*(abs(deltmax)) && stepmax > eps stepmedr = stepmin+(stepmax-stepmin)/gold; stepmedl = stepmin+(stepmedr-stepmin)/gold; [costmedr, S1] = costgraph(KH,stepmedr,desc,SigmaNew); [costmedl, S2] = costgraph(KH,stepmedl,desc,SigmaNew); Step = [stepmin stepmedl stepmedr stepmax]; Cost = [costmin costmedl costmedr costmax]; [val,coord] = min(Cost); switch coord case 1 stepmax = stepmedl; costmax = costmedl; S = S2; case 2 stepmax = stepmedr; costmax = costmedr; S = S2; case 3 stepmin = stepmedl; costmin = costmedl; S = S2; case 4 stepmin = stepmedr; costmin = costmedr; S = S1; end end
时间: 2023-12-19 09:07:16 浏览: 79
val_map.txt
5星 · 资源好评率100%
这段代码看起来像是一个优化算法的实现,但是没有足够的上下文信息我无法确定具体是哪种优化算法。它似乎使用了一个金分割搜索的方法来找到最小化目标函数的步长,并且在每次迭代中更新参数向量 SigmaNew。此外,还有一些特殊情况的处理,例如当步长超过某个阈值时的限制,以及当梯度为零时的处理等。需要更多的信息才能确定该代码的具体作用和用途。
阅读全文