ReFRACtor
nlls_solver_lm.cc
Go to the documentation of this file.
1 #include <nlls_solver_lm.h>
2 #include<algorithm>
3 #include<cassert>
4 #include<iomanip>
5 
6 
7 using namespace FullPhysics;
8 using namespace blitz;
9 using namespace std;
10 using namespace Eigen;
11 
12 
13 #define MAP_BRM_ERM( BMatrix ) Map< const Matrix<double, Dynamic, Dynamic, RowMajor> >(BMatrix.data(), BMatrix.rows(), BMatrix.cols())
14 #define MAP_BV_ECV( BVector ) Map< const VectorXd >(BVector.data(), BVector.size())
15 #define MAP_ECV_BV( EVector ) blitz::Array<double, 1>(EVector.data(), shape(EVector.size()), neverDeleteData)
16 
17 
18 // Given val1 and val2, this function computes the cos (c)
19 // and sin (s) for a rotation operation, that can rotate val2
20 // completely onto val1, i. e. s*val1+c*val2=0.
21 //
22 // -- -- -- -- -- --
23 // | c -s | | val1 | | r |
24 // | | * | | = | |
25 // | s c | | val2 | | 0 |
26 // -- -- -- -- -- --
27 //
28 void givens(double val1, double val2, double &c, double &s)
29 {
30  double tempDbl;
31 
32  if (!val2) {
33  c = 1.0;
34  s = 0.0;
35  } else {
36  if (fabs(val2) > fabs(val1)) {
37  tempDbl = -val1 / val2;
38  s = 1.0 / sqrt(1.0 + tempDbl * tempDbl);
39  c = s * tempDbl;
40  } else {
41  tempDbl = -val2 / val1;
42  c = 1.0 / sqrt(1.0 + tempDbl * tempDbl);
43  s = c * tempDbl;
44  }
45  }
46 }
47 
48 
50  const boost::shared_ptr<NLLSProblem>& p, int max_cost_function_calls,
51  const NLLSSolverLM::Options& opt, double dx_tol_abs, double dx_tol_rel,
52  double g_tol_abs, double g_tol_rel, bool vrbs )
53  : NLLSSolver(p, max_cost_function_calls, vrbs),
54  Opt(opt),
55  Dx_tol_abs(dx_tol_abs), Dx_tol_rel(dx_tol_rel),
56  G_tol_abs(g_tol_abs), G_tol_rel(g_tol_rel),
57  CR_ratio(0), Lambda(0)
58 {
59 }
60 
61 
62 
64  const VectorXd& dx, const VectorXd& x, double dx_tol_rel )
65 {
66  //return (dx.cwiseAbs().array() >= dx_tol_rel*(dx_tol_rel+x.cwiseAbs().array())).any()?CONTINUE:SUCCESS;
67 
68  for(VectorXd::Index i=0; i<dx.size(); i++)
69  if( abs(dx(i)) >= dx_tol_rel*(dx_tol_rel + abs(x(i))) )
70  return CONTINUE;
71 
72  return SUCCESS;
73 }
74 
75 
76 
78  const VectorXd& dx, double dx_tol_abs )
79 {
80  //return (dx.lpNorm<Infinity>() < dx_tol_abs)?SUCCESS:CONTINUE;
81  return (dx.lpNorm<1>() < dx_tol_abs)?SUCCESS:CONTINUE;
82 }
83 
84 
85 
87  const Eigen::VectorXd& dx, const Eigen::VectorXd& x, double dx_tol_rel, double dx_tol_abs )
88 {
89  // The values of the step-test tolerances determine
90  // which tests are performed. If both tolerances are
91  // greater than zero, then both tests must be successful
92  // to have convergence.
93  //
94  status_t stat_rel = test_dx_rel(dx, x, dx_tol_rel);
95  status_t stat_abs = test_dx_abs(dx, dx_tol_abs);
97  if( (dx_tol_abs>0) && (dx_tol_rel>0) )
98  status = ((stat_rel==SUCCESS) && (stat_abs==SUCCESS))?SUCCESS:CONTINUE;
99  else if(dx_tol_rel>0)
100  status = stat_rel;
101  else if(dx_tol_abs>0)
102  status = stat_abs;
103  return status;
104 }
105 
106 
107 
109  const VectorXd& g, const VectorXd& x, double cost, double g_tol_rel )
110 {
111  return (g.cwiseProduct( x.cwiseAbs().cwiseMax(1.0) ).lpNorm<Infinity>() < abs(g_tol_rel*max(cost,1.0)))
112  ?SUCCESS:CONTINUE;
113 }
114 
115 
116 
118  const VectorXd& g, double g_tol_abs )
119 {
120  //return (g.lpNorm<Infinity>() < g_tol_abs)?SUCCESS:CONTINUE;
121  return (g.lpNorm<1>() < g_tol_abs)?SUCCESS:CONTINUE;
122 }
123 
124 
125 
127 {
128  W = MAP_BRM_ERM( P->jacobian() ).colwise().stableNorm().cwiseMax(Opt.min_W);
129 
130  VectorXd X( MAP_BV_ECV(P->parameters()) );
131 
132  stat = UNTRIED;
133  //
134  // Any code that may cause an error before trying
135  // to take a step should be placed here.
136  //
137  stat = CONTINUE;
138 
139  do {
140 
141  // The following three lines are only for recording purpose.
142  // Info at the initial guess (the starting point) is also
143  // recorded here.
144  //
146  record_accepted_point(P->parameters());
148 
149  // Print state right after recording.
150  //
151  if(verbose)
152  print_state();
153 
154  // After iterate() returns, Dx (step) can have a size zero (no step
155  // taken) for one of the following reasons:
156  //
157  // iterate() returns ERROR, and no step is computed.
158  //
159  // iterate() returns STALLED, and no step is computed.
160  //
161  // iterate() returns SUCCESS, and the last computed step was
162  // small enough to assume convergence, but the step was rejected
163  // because it increased the value of the cost function.
164  //
165  // In any of the above cases just return; however, if a step is
166  // taken, just break out of the loop so that the information at
167  // the new point is recorded at the end of this method.
168  //
169  // Moreover, when iterate() is called, it will evaluate the cost
170  // function of the problem as many times as needed (almost never
171  // more than just a few times) until error encountered or a good
172  // step is found even if the number of the cost function evaluations
173  // exceeds the limit max_cost_f_calls.
174  //
175  stat = iterate();
176  if(Dx.size() == 0)
177  return;
178  else if(stat == SUCCESS)
179  break;
180 
181  // The values of the gradient-test tolerances determine which
182  // tests are performed. If both tolerances are greater than
183  // zero, then both tests must be successful to have convergence.
184  //
185  // The gradient based convergence test could be done before
186  // calling iterate() just because the initial guess could be a
187  // minimum. However, the initial guess could also be a maximum,
188  // where gradient is also zero or very close zero. Therefore,
189  // it is better to go through at least one iteration to see
190  // whether or not the cost function can be decreased.
191  //
192  status_t stat_rel = test_grad_rel(MAP_BV_ECV(P->gradient()), MAP_BV_ECV(P->parameters()), P->cost(), G_tol_rel);
193  status_t stat_abs = test_grad_abs(MAP_BV_ECV(P->gradient()), G_tol_abs);
194  if( (G_tol_abs>0) && (G_tol_rel>0) )
195  stat = ((stat_rel==SUCCESS) && (stat_abs==SUCCESS))?SUCCESS:CONTINUE;
196  else if(G_tol_rel>0)
197  stat = stat_rel;
198  else if(G_tol_abs>0)
199  stat = stat_abs;
200  if(stat == SUCCESS)
201  break;
202 
203  // Performs a relative and/or absolute step convergence test.
204  //
205  stat = test_dx(Dx, MAP_BV_ECV(P->parameters()), Dx_tol_rel, Dx_tol_abs);
206  if(stat == SUCCESS)
207  break;
208 
209  } while( (stat == CONTINUE)
210  && (P->num_cost_evaluations() < max_cost_f_calls)
211  && (P->message() == NLLSProblem::NONE) );
212 
213  // The following three lines are only for recording purpose.
214  //
216  record_accepted_point(P->parameters());
218 
219  // Print state right after recording.
220  //
221  if(verbose)
222  print_state();
223 }
224 
225 
226 
227 void NLLSSolverLM::print_state(ostream &ostr)
228 {
229  std::ios oldState(nullptr);
230  oldState.copyfmt(ostr);
231 
232  ostr << "Solver hm_lmder; at point # " << num_accepted_steps()
233  << "; (|f(x)|^2)/2 = " << P->cost() << "; status = " << status_str() << endl;
234 
235  (void) ostr.precision(15);
236 
237  ostr << "Where the point x is" << endl << fixed << setw(25) << P->parameters() << endl;
238  ostr << "The gradient g(x) is" << endl << fixed << setw(25) << P->gradient() << endl << endl;
239 
240  ostr.copyfmt(oldState);
241 }
242 
243 
244 
246 {
247 
248  // Dx (step) will get populated if a SUCCESSful step is computed.
249  //
250  Dx.resize(0);
251 
252  // Perform the rank reveling QR decomposition on the Jacobian.
253  //
254  ColPivHouseholderQR< Matrix<double, Dynamic, Dynamic, RowMajor> > j_QR( MAP_BRM_ERM(P->jacobian()) );
255 
256  // Just for convenience and shorter expressions.
257  //
258  uint32_t n = P->expected_parameter_size();
259  uint32_t r = j_QR.rank();
260 
261  // In practice it is not very likely, but in theory it is possible to
262  // get a Jacobian that is zero. Jacobian can be zero at a minimum, at
263  // a maximum or at a saddle point. A zero Jacobian is either valid
264  // or an error due to the implementation of the problem. Here, we cannot
265  // assume that it is an error in the implementation of some other code.
266  // However, a zero Jacobian gives no sense of direction, and the solver
267  // will not be able to compute a step, hence iterate() returns STALLED
268  // when Jacobian is zero at the starting point.
269  //
270  if(r <= 0)
271  return STALLED;
272 
273  double res_norm_sqrd = MAP_BV_ECV(P->residual()).squaredNorm();
274  VectorXd QTResidual = j_QR.householderQ().transpose() * MAP_BV_ECV(P->residual());
275  VectorXd perm_W = j_QR.colsPermutation().transpose()*W;
276  MatrixXd Rrn = j_QR.matrixR().topRows(r).template triangularView<Upper>();
277  MatrixXd TRrn = Rrn*perm_W.cwiseInverse().asDiagonal();
278 
279  // Compute the Gauss-Newton step. If the linear system is full
280  // rank, then the unique solution to the linear least square
281  // problem is the Gauss-Newton step. If the system is rank
282  // deficient, then solve the system for the unique minimum-norm
283  // scaled-step. It is very important that in the case of rank
284  // deficiency, when there are infinite solutions for the step, we
285  // solve not for the unique smallest norm solution but to solve
286  // for the unique smallest norm scaled solution.
287  //
288  VectorXd step;
289  VectorXd scaled_step;
290  double scaled_step_norm;
291  if(r == n) {
292  step = j_QR.solve(-MAP_BV_ECV(P->residual()));
293  scaled_step = W.cwiseProduct(step);
294  scaled_step_norm = scaled_step.stableNorm();
295  } else {
296  HouseholderQR<MatrixXd> TRrnT_QR(TRrn.transpose());
297  MatrixXd TrrT( TRrnT_QR.matrixQR().topRows(r).template triangularView<Upper>() );
298  VectorXd y = TrrT.transpose().triangularView<Lower>().solve(-QTResidual.head(r));
299  scaled_step_norm = y.stableNorm();
300  VectorXd y_augmented(n);
301  y_augmented << y, VectorXd::Zero(n-r);
302  scaled_step = j_QR.colsPermutation()*(TRrnT_QR.householderQ()*y_augmented);
303  step = W.cwiseInverse().cwiseProduct(scaled_step);
304  }
305  Lambda = 0;
306 
307  // This is the loop that tries to find an acceptable step. An
308  // acceptable step is one that significantly reduces the value
309  // of the cost function. A significant reduction is a reduction
310  // that is larger than data noise, round-off and truncation errors.
311  //
312  bool cost_reducing_step=true;
313  do {
314 
315  // If the step is not within the trust region, then compute a smaller
316  // Levenberg-Marquardt step. Only for the first iteration of the loop,
317  // at this point the step is the Gauss-Newton step and its corresponding
318  // Levenberg-Marquardt parameter is zero.
319  //
320  if( scaled_step_norm > (1.0 + Opt.tr_rad_tol)*Opt.tr_rad ) {
321 
322  // Compute an upper-bound for updating (increasing the value of)
323  // the Lev-Mar parameter.
324  //
325  double upper = (TRrn.transpose()*QTResidual.head(r)).stableNorm()/Opt.tr_rad;
326 
327  // Compute a lower-bound for updating (increasing the value of)
328  // the Lev-Mar parameter.
329  //
330  double lower = 0;
331  if(r == n) {
332  double phi_0 = scaled_step_norm-Opt.tr_rad;
333  VectorXd temp =j_QR.colsPermutation().transpose()*(W.asDiagonal()*scaled_step/scaled_step_norm);
334  VectorXd z = Rrn.transpose().triangularView<Lower>().solve(temp);
335  double phi_prime_0 = -scaled_step_norm * z.squaredNorm();
336  lower = -phi_0/phi_prime_0;
337  }
338  if( !cost_reducing_step )
339  lower = max(Lambda, lower);
340 
341  // At this point (lower >= 0) && (upper > lower) must be true.
342  assert( (0 <= lower) && (lower < upper) );
343 
344  bool step_in_trust_region_found=false;
345  do {
346 
347  // Levenberg-Marquard parameter (lambda) limit check. If lambda is
348  // not in the open interval (lower, upper), then use the algorithm
349  // to choose a value in the open interval.
350  //
351  if( (Lambda <= lower) || (Lambda >= upper) )
352  Lambda = max(0.001*upper, sqrt(lower*upper));
353 
354  // Compute a smaller step for lambda > 0. Givens rotations form a
355  // major part of this step.
356  //
357  VectorXd QTRes_head = QTResidual.head(n);
358  VectorXd QTRes_extra = VectorXd::Zero(n);
359  MatrixXd Rnn = MatrixXd::Zero(n,n);
360  Rnn.topRows(r) = Rrn;
361  MatrixXd diag = (sqrt(Lambda)*(j_QR.colsPermutation().transpose()*W)).asDiagonal();
362  for(uint32_t i=0; i<n; i++) {
363  double cs, sn;
364 
365  for(uint32_t j=i; j>0; j--) {
366  givens( diag(j-1,i), diag(j,i), cs, sn);
367  //
368  RowVectorXd tempv = diag.block(j-1,i,1,n-i);
369  diag.block(j-1,i,1,n-i) = cs*tempv - sn*diag.block(j,i,1,n-i);
370  diag.block(j,i,1,n-i) = sn*tempv + cs*diag.block(j,i,1,n-i);
371  //
372  double temps = QTRes_extra[j-1];
373  QTRes_extra[j-1] = cs*temps - sn*QTRes_extra[j];
374  QTRes_extra[j] = sn*temps + cs*QTRes_extra[j];
375  }
376 
377  givens( Rnn(i,i), diag(0,i), cs, sn );
378  //
379  RowVectorXd tempv = Rnn.block(i,i,1,n-i);
380  Rnn.block(i,i,1,n-i) = cs*tempv - sn*diag.block(0,i,1,n-i);
381  diag.block(0,i,1,n-i) = sn*tempv + cs*diag.block(0,i,1,n-i);
382  //
383  double temps = QTRes_head[i];
384  QTRes_head[i] = cs*temps - sn*QTRes_extra[0];
385  QTRes_extra[0] = sn*temps + cs*QTRes_extra[0];
386  }
387  VectorXd y = Rnn.triangularView<Upper>().solve(-QTRes_head);
388  step = j_QR.colsPermutation()*y;
389 
390  // After finding the smaller Levenberg-Marquardt step, scale the
391  // step and find its norm.
392  //
393  scaled_step = W.cwiseProduct(step);
394  scaled_step_norm = scaled_step.stableNorm();
395 
396  // True if the norm of the new scaled step is almost equal to the
397  // size of the trust region radius, false otherwise.
398  //
399  step_in_trust_region_found =
400  (scaled_step_norm >= ((1.0 - Opt.tr_rad_tol)*Opt.tr_rad)) && (scaled_step_norm <= ((1.0 + Opt.tr_rad_tol)*Opt.tr_rad));
401 
402  // If the step is still outside of the trust region, then update
403  // lower, lambda and upper for another iteration to find a smaller
404  // step.
405  //
406  if(!step_in_trust_region_found) {
407  double phi = scaled_step_norm-Opt.tr_rad;
408  VectorXd temp =j_QR.colsPermutation().transpose()*(W.asDiagonal()*scaled_step/scaled_step_norm);
409  VectorXd z = Rnn.transpose().triangularView<Lower>().solve(temp);
410  double phi_prime = -scaled_step_norm * z.squaredNorm();
411 
412  // Update the upper limit of the Levenberg-Marquardt parameter
413  //
414  if(phi < 0)
415  upper = Lambda;
416 
417  // Update the lower limit of the Levenberg-Marquardt parameter.
418  //
419  lower = max(lower, (Lambda-phi/phi_prime));
420 
421  // Update the Levenberg-Marquardt parameter.
422  //
423  Lambda = Lambda-((phi+Opt.tr_rad)/Opt.tr_rad)*(phi/phi_prime);
424  }
425 
426  } while(!step_in_trust_region_found);
427 
428  }
429 
430  double jacob_step_norm_sqrd =
431  (Rrn.triangularView<Upper>()*(j_QR.colsPermutation().transpose()*step)).squaredNorm();
432 
433  // Here, we save the current state of the problem before moving
434  // the problem to the next point. In this way if the step is not
435  // acceptable, then we can easily restore the state of the problem
436  // before taking the step.
437  //
438  blitz::Array<double, 1> x = P->parameters();
439 
440  // Go to the next point for trial.
441  //
442  blitz::Array<double, 1> x_next(x+MAP_ECV_BV(step));
443  P->parameters( blitz::Array<double,1>(x_next));
444  double next_res_norm_sqrd = MAP_BV_ECV(P->residual()).squaredNorm();
445 
446  // The updating of the trust region radius depends on
447  // next_res_norm_sqrd whether or not the step is acceptable.
448  // Therefore, if the problem encounters an error, it will
449  // not be possible to continue solving the problem. If
450  // error, go back to the previous point and return ERROR.
451  //
452  if(P->message() == NLLSProblem::ERROR) {
453  P->parameters(x);
454  return ERROR;
455  }
456 
457  // Compute the ratio of the actual to the predicted reduction
458  // in the value of the cost function with the current step.
459  //
460  if( (next_res_norm_sqrd >= res_norm_sqrd) || (scaled_step_norm <= 0) ) {
461  CR_ratio = 0;
462  } else {
463  CR_ratio = (1.0 - next_res_norm_sqrd/res_norm_sqrd) /
464  ( (jacob_step_norm_sqrd/res_norm_sqrd)
465  + (2.0*Lambda*scaled_step_norm*scaled_step_norm/res_norm_sqrd) );
466  }
467 
468  // If there is no significant reduction in the value of the cost
469  // function then the step is not good.
470  //
471  cost_reducing_step = (CR_ratio > Opt.cr_ratio_tol);
472  if(!cost_reducing_step) {
473 
474  // Step does not reduce the value of the cost function;
475  // therefore, go back to the previous point.
476  //
477  P->parameters(x);
478 
479  } else {
480 
481  // Step is accepted; therefore, update the scaling matrix
482  // using the Jacobian at the new point.
483  //
484  W = W.cwiseMax( MAP_BRM_ERM(P->jacobian()).colwise().stableNorm().transpose() );
485 
486  // However, if the problem encounters an error when evaluating
487  // the Jacobian at the new point, then go back to the previous
488  // point and return ERROR.
489  //
490  if(P->message() == NLLSProblem::ERROR) {
491  P->parameters(x);
492  return ERROR;
493  }
494 
495  // The new step is good. It reduces the values of the cost
496  // function, and there is not Jacobian evaluation error at
497  // the new point; therefore, save the step.
498  //
499  Dx = step;
500 
501  }
502 
503  // If the step gets small enough to assume convergence, then
504  // return SUCCESS regardless whether or not the step is accepted.
505  //
506  if(test_dx(step, MAP_BV_ECV(x_next), Dx_tol_rel, Dx_tol_abs) == SUCCESS)
507  return SUCCESS;
508 
509  // Update the trust-region radius.
510  //
511  double tr_scaling_factor = 1.0;
512  if(CR_ratio <= 0.25) {
513  if(next_res_norm_sqrd <= res_norm_sqrd)
514  tr_scaling_factor = 0.5;
515  else if(next_res_norm_sqrd >= 100*res_norm_sqrd)
516  tr_scaling_factor = 0.1;
517  else {
518  double temp = -( jacob_step_norm_sqrd/res_norm_sqrd
519  + Lambda*scaled_step_norm*scaled_step_norm/res_norm_sqrd );
520  tr_scaling_factor = temp / (2.0*temp + 1.0 - next_res_norm_sqrd/res_norm_sqrd);
521  if(tr_scaling_factor < 0.1)
522  tr_scaling_factor = 0.1;
523  else if(tr_scaling_factor > 0.5)
524  tr_scaling_factor = 0.5;
525  }
526  Opt.tr_rad *= tr_scaling_factor;
527  } else if( (CR_ratio > 0.25 && CR_ratio < 0.75 && Lambda <= 0) || (CR_ratio >= 0.75) ) {
528  tr_scaling_factor = 2.0;
529  Opt.tr_rad = tr_scaling_factor*scaled_step_norm;
530  }
531 
532  // If at this point the implementor of the problem believes
533  // the problem is solved, then just return SUCCESS.
534  //
535  if(P->message() == NLLSProblem::SOLVED)
536  return SUCCESS;
537 
538  } while(!cost_reducing_step && (P->message() == NLLSProblem::NONE));
539 
540  return CONTINUE;
541 }
virtual const char *const status_str() const
Returns the string version of the solver status.
solve method called but an error was encountered
virtual void print_state(std::ostream &ostr=std::cout)
Prints solver state.
const Unit s("s", 1.0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0)
FullPhysics::DoubleWithUnit max(const FullPhysics::DoubleWithUnit &x, const FullPhysics::DoubleWithUnit &y)
virtual void solve()
The method that solves the optimization problem.
void record_cost_at_accepted_point(double cost)
Called to record the cost function value at an accepted point.
solve method called and a solution found
FullPhysics::AutoDerivative< double > sqrt(const FullPhysics::AutoDerivative< double > &x)
static status_t test_dx(const Eigen::VectorXd &dx, const Eigen::VectorXd &x, double dx_tol_rel, double dx_tol_abs)
The base class for the solvers of the Nonlinear-Least-Squares Problem.
Definition: nlls_solver.h:24
static status_t test_dx_abs(const Eigen::VectorXd &dx, double dx_tol_abs)
double tr_rad
A positive number and the initial trust region radius.
solve method was called but stalled
status_t
Enum type for the status of the iterative solver.
#define MAP_BV_ECV(BVector)
void record_gradient_at_accepted_point(const blitz::Array< double, 1 > &gradient)
For recording the gradient of the cost function evaluated at an accepted point.
virtual status_t iterate()
STL namespace.
virtual int num_accepted_steps() const
Returns the number of the accepted steps.
void givens(double val1, double val2, double &c, double &s)
static status_t test_grad_rel(const Eigen::VectorXd &g, const Eigen::VectorXd &x, double cost, double g_tol_rel)
static status_t test_dx_rel(const Eigen::VectorXd &dx, const Eigen::VectorXd &x, double dx_tol_rel)
Apply value function to a blitz array.
solve method called but did not converge to a solution
solve method not called yet
double tr_rad_tol
A positive number and a tolerance for comparing to the trust region radius.
virtual status_t status() const
Returns a value of IterativeSolver::status_t type.
double min_W
A positive number and the smallest value for the diagonal entries of the diagonal weight matrix...
The problem is solved.
Definition: cost_func.h:40
There is an error in the problem.
Definition: cost_func.h:41
void record_accepted_point(const blitz::Array< double, 1 > &point)
Called to record an accepted point.
#define MAP_ECV_BV(EVector)
#define MAP_BRM_ERM(BMatrix)
const Unit g("g", 1e-3 *kg)
Contains classes to abstract away details in various Spurr Radiative Transfer software.
Definition: doxygen_python.h:1
boost::shared_ptr< NLLSProblem > P
Definition: nlls_solver.h:64
static status_t test_grad_abs(const Eigen::VectorXd &g, double g_tol_abs)
There are no messages.
Definition: cost_func.h:39
NLLSSolverLM(const boost::shared_ptr< NLLSProblem > &p, int max_cost_function_calls, const NLLSSolverLM::Options &opt=NLLSSolverLM::Options(), double dx_tol_abs=0.000001, double dx_tol_rel=0.000001, double g_tol_abs=0.000001, double g_tol_rel=6.0555e-06, bool vrbs=false)
Constructor.
double cr_ratio_tol
A positive number and tolerance on the ratio of the actual to the predicted reduction of the value of...

Copyright © 2017, California Institute of Technology.
ALL RIGHTS RESERVED.
U.S. Government Sponsorship acknowledged.
Generated Fri Aug 24 2018 15:44:10