UltraScan III
us_fit_worker.cpp
Go to the documentation of this file.
1 
3 #include "us_fit_worker.h"
4 #include "us_settings.h"
5 #include "us_gui_settings.h"
6 #include "us_sleep.h"
7 #include <cfloat>
8 
9 const double dflt_min = (double)FLT_MIN;
10 const double dflt_max = (double)FLT_MAX;
11 
12 // Construct fit worker thread
14  QObject* parent )
15  : QThread( parent ),
16  emath ( emath ),
17  fitpars( fitpars )
18 {
20  abort = false;
21  paused = false;
22 
23  redefine_work();
24 }
25 
26 // Destroy fit worker thread
28 {
29  wait();
30 }
31 
32 // Redefine work
34 {
37  k_iter = 0;
44 
45  fitpars.k_iter = 0;
46  fitpars.k_step = 0;
47  fitpars.ndecomps = 0;
48  fitpars.nfuncev = 0;
49  fitpars.variance = 0.0;
50  fitpars.std_dev = 0.0;
51  fitpars.improve = 0.0;
52 DbgLv(1) << "Define improve=0";
54  fitpars.completed = false;
55  fitpars.converged = false;
56  fitpars.aborted = false;
57  fitpars.infomsg = "???";
58 }
59 
60 // Run fit iterations
62 {
63  fit_iterations(); // do all the work here
64 
65  quit();
66  exec();
67 
68  emit work_complete(); // signal that the thread's work is done
69 }
70 
71 // Flag that work should be paused or resumed
72 void US_FitWorker::flag_paused( bool pauseit )
73 {
74  paused = pauseit;
75 }
76 
77 // Flag that work should be aborted
79 {
80  abort = true;
81 }
82 
83 // Do the work of the thread: run fit iterations
85 {
86  int stati = 0;
87  int stats = 0;
88  bool autocnvg = fitpars.autocnvg;
89  double fltolr = tolerance * 1e-6;
90 
91  // With autoconverge, start method is either Lev.-Marquardt or Quasi-Newton
92  nlsmeth = autocnvg ? ( ( nlsmeth == 0 ) ? 0 : 3 ) : nlsmeth;
93  variance = tolerance * 2.0;
94  k_iter = 0;
95 DbgLv(1) << "FWk: fit_iterations";
96 DbgLv(1) << "FWk: mxiters" << mxiters << " tolerance" << tolerance;
97  stati = emath->calc_model( fitpars.guess );
99 
100  // Iterate fitting for max iter count or until convergence
101  while( k_iter < mxiters && variance > tolerance )
102  {
103  stati = 0;
104  old_vari = variance;
105 
106  // Do an interation with a selected method
107  switch( nlsmeth )
108  {
109  case 0: // Levenberg-Marquardt
110  stati = fit_iter_LM();
111  break;
112  case 1: // Modified Gauss-Newton
113  stati = fit_iter_MGN();
114  break;
115  case 2: // Hybrid Method
116  stati = fit_iter_HM();
117  break;
118  case 3: // Quasi-Newton
119  stati = fit_iter_QN();
120  break;
121  case 4: // Generalized Linear LS
122  stati = fit_iter_GLLS();
123  break;
124  case 5: // NonNegative LS
125  stati = fit_iter_NNLS();
126  break;
127  }
128 
129  fitpars.k_iter = ++k_iter;
131  fitpars.std_dev = ( variance > 0.0 ) ? sqrt( variance ) : variance;
133 DbgLv(1) << "EOI improve v ov" << fitpars.improve << variance << old_vari;
135  fitpars.completed = ( k_iter >= mxiters );
136 
137  if ( stati != 0 )
138  {
139  fitpars.aborted = true;
140  stats = stati;
141  }
142 
143  emit work_progress( fitpars.k_step );
144 
145  if ( abort )
146  {
147  fitpars.aborted = true;
148  fitpars.infomsg = "User Aborted";
149  }
150 
152  {
153  if ( fitpars.completed )
154  fitpars.infomsg = tr( "%1 iterations reached" ).arg( mxiters );
155 
156  break;
157  }
158 
159  check_paused();
160 
161  // With autoconverge, flip-flop between Lev.-Marquardt and Quasi-Newton
162  nlsmeth = ( autocnvg &&
163  qAbs( fitpars.improve ) < fltolr &&
164  ( nlsmeth == 0 || nlsmeth == 3 ) ) ?
165  ( 3 - nlsmeth ) : nlsmeth;
166 DbgLv(1) << "EOI autocnvg" << autocnvg << " nlsmeth ftol" << nlsmeth << fltolr;
167  }
168 
169  return stats;
170 }
171 
172 // Run an iteration: Levenberg-Marquardt
174 {
175  int stati = 0;
176  int lamloop = 0;
177  double old_vari = variance;
178  double dv_toler = tolerance * 1e-12;
179 
180  // Create the jacobian matrix ( points rows by parameters columns )
181  emath->calc_jacobian();
182 
183  // Get the (parameters by parameters) info matrix: J' * J
185 
186  // Add Lambda to the info matrix diagonal
187  for ( int ii = 0; ii < nfpars; ii++ )
188  fitpars.info[ ii ][ ii ] += lambda;
189 
190  // Compute the B vector: Jacobian times yDelta vector
191  emath->calc_B();
192 
193  // LLtranspose matrix is a copy of the info matrix
194  US_Matrix::mcopy( fitpars.info, fitpars.LLtr, nfpars, nfpars );
195 
196  fitpars.ndecomps++;
197  check_paused();
198 
199  // Cholesky decomposition of the LLtr matrix
201  ? 0 : -2;
202  check_paused();
203 
204  // Cholesky Solve: replace B vector
206  ? 0 : -3;
207  check_paused();
208 DbgLv(1) << "FWk: ChoSS stati" << stati << " LLtr0" << fitpars.LLtr[0][0]
209  << "lambda" << lambda << fitpars.lambda
210  << "lam start,step" << fitpars.lam_start << fitpars.lam_step;
211 
212  // Test guess vector is original guess + B vector
214 DbgLv(1) << "FWk: ii tguess guess BB" << 0
215  << fitpars.tguess[ 0 ] << fitpars.guess[ 0 ] << fitpars.BB[ 0 ];
216 DbgLv(1) << "FWk: ii tguess guess BB" << 1
217  << fitpars.tguess[ 1 ] << fitpars.guess[ 1 ] << fitpars.BB[ 1 ];
218 
219  // Y-Guess is modified based on test-guess
220  stati = emath->calc_model( fitpars.tguess );
221  check_paused();
222 
223  // Compute new Y-Delta and determine variance
225 
226  if ( variance < 0.0 )
227  {
228 DbgLv(1) << "FWk: *** VARIANCE<0 ***" << variance;
229  double dlt1 = qAbs( fitpars.y_delta[ 0 ] );
230  double dltn = qAbs( fitpars.y_delta[ ntpts - 1 ] );
231  int lgdl = qRound( log10( max( dlt1, dltn ) ) );
232  double dlmx = pow( 10.0, lgdl );
233  fitpars.infomsg = tr( "Huge Variance w/ ydelta max %1" ).arg( dlmx );
234 DbgLv(1) << "FWk: *** VARI<0: lgd dlm dl1 dln" << lgdl << dlmx << dlt1 << dltn;
235  return -101;
236  }
237 
238  double dv = variance - old_vari;
239  if ( qAbs( dv ) < dv_toler )
240  old_vari = variance;
241 
242 DbgLv(1) << "FWk: old,new vari" << old_vari << variance << dv << dv_toler;
243 
244  // Modify guess based on direction of iteration variance change
245  if ( variance < old_vari )
246  { // Reduction: guess is test-guess; Lambda reduced
248 
250  }
251 
252  else if ( variance == old_vari )
253  { // No change: convergence!
254  fitpars.converged = true;
255  fitpars.improve = dv;
256 DbgLv(1) << "CNV improve nv ov" << fitpars.improve << variance << old_vari;
257  fitpars.infomsg = tr( "Variance change " ) +
258  ( ( dv == 0.0 ) ? tr( "zero" ) : tr( "near zero" ) );
259  }
260 
261  else // variance > old_vari
262  { // Greater variance: increase Lambda
263  US_Matrix::add_diag( fitpars.info, (-lambda), nfpars );
264 
265  lamloop++;
266  lambda *= pow( (double)fitpars.lam_step, (double)lamloop );
267 
268  if ( lambda < 1.0e10 )
269  {
271  for ( int ii = 0; ii < nfpars; ii++ )
272  fitpars.info[ ii ][ ii ] += lambda;
273  }
274 
275  else
276  {
277  lambda = 1.0e6;
278  fitpars.converged = true;
279  fitpars.infomsg = "Lambda large";
280  }
281 
282  // Y-guess based on current Guess vector
283  stati = emath->calc_model( fitpars.guess );
284  }
285 
286  // Bump step count
287  fitpars.k_step++;
288 
289  return stati;
290 }
291 
292 // Run an iteration: Modified Gauss-Newton
294 {
295  int stati = 0;
296 
297  return stati;
298 }
299 
300 // Run an iteration: Hybrid Method
302 {
303  int stati = 0;
304 
305  return stati;
306 }
307 
308 // Run an iteration: Quasi-Newton
310 {
311  int stati = 0;
312  QVector< double > vsearch( nfpars );
313  QVector< double > vgamma ( nfpars );
314  QVector< double > vdelta ( nfpars );
315  double* search = vsearch.data();
316  double* gamma = vgamma .data();
317  double* delta = vdelta .data();
318 
319  if ( k_iter == 0 )
320  {
321  QVector< double* > vminf;
322  QVector< double > vdinf;
323  double** wminf = US_Matrix::construct( vminf, vdinf, nfpars, nfpars );
324 
325  // Set up the Hessian matrix, initialized to an identity matrix
327 
328  // Create the (points x parameters) jacobian matrix
329 DbgLv(1) << "FW:QN: calc_jac";
330  emath->calc_jacobian();
331 
332  // Get the (parameters x parameters) info matrix: J' * J
333 DbgLv(1) << "FW:QN: calc_AtA";
335 
336  // Compute the B vector: Jacobian-transpose times yDelta vector: J' * d
337  emath->calc_B();
338 
339  // Create a work matrix that is a copy of the info matrix
341 
342  // Cholesky Invert from info to LL-transpose
343 DbgLv(1) << "FW:QN: ChoInv";
345 
346  // Copy LL-transpose to info matrix
348 DbgLv(1) << "FW:QN: End iter0";
349 
351  }
352 
353  check_paused();
354 
355  // Get a test variance value from B array; return now if convergence
356  double test_vari = US_Matrix::dotproduct( fitpars.BB, nfpars );
357 
358  test_vari = ( test_vari > 0.0 ) ? sqrt( test_vari ) : test_vari;
359 
360 DbgLv(1) << "FW:QN: test_vari" << test_vari;
361  if ( test_vari > 0.0 && sqrt( test_vari ) < tolerance )
362  stati = 0;
363 
364  else
365  {
366  // Build search array ( info * B ) and gamma ( B copy )
368 
369  US_Matrix::vcopy( fitpars.BB, gamma, nfpars );
370 DbgLv(1) << "FW:QN: search0 gamma0" << search[0] << gamma[0];
371 
372  double resids = variance * (double)nfpars;
373  double alpha = linesearch( search, resids );
374 DbgLv(1) << "FW:QN: linesearch return - alpha" << alpha;
375 
376  if ( alpha == 0.0 )
377  stati = 0;
378 
379  if ( alpha < 0.0 )
380  {
381  stati = -410;
382  }
383 
384  else
385  {
386  for ( int ii = 0; ii < nfpars; ii++ )
387  fitpars.guess[ ii ] += ( search[ ii ] * alpha );
388 
389  // Update solution, needed to calculate y_delta
390  stati = emath->calc_model( fitpars.guess );
391  check_paused();
392 DbgLv(1) << "FW:QN: calc_model return";
393 
394  // Calculate the Jacobian matrix
395  emath->calc_jacobian();
396  check_paused();
397 DbgLv(1) << "FW:QN: calc_jac return";
398 
399  // Compute new Y-Delta and determine variance
401 
402  // Compute the B vector: Jacobian-transpose times yDelta vector
403  emath->calc_B();
404  check_paused();
405 DbgLv(1) << "FW:QN: calc_B return";
406 
407  // Update gamma and delta arrays
408  for ( int ii = 0; ii < nfpars; ii++ )
409  {
410  gamma[ ii ] -= fitpars.BB[ ii ];
411  delta[ ii ] = search[ ii ] * alpha;
412  }
413 
414  // Update Quasi-Newton
415  updateQN( gamma, delta );
416 DbgLv(1) << "FW:QN: updateQN return";
417  } // END: alpha > 0.0
418  } // END: test_vari >= tolerance
419 
420  // Bump step count
421  fitpars.k_step++;
422 
423  return stati;
424 }
425 
426 // Run an iteration: Generalized Linear Least Squares
428 {
429  int stati = 0;
430 
431  return stati;
432 }
433 
434 // Run an iteration: NonNegative Least Squares
436 {
437  int stati = 0;
438 
439  return stati;
440 }
441 
442 
443 // Block/Resume based on paused flag setting
445 {
446  while ( paused )
447  { // If paused, loop to sleep and re-check
448  US_Sleep::msleep( 10 );
449  qApp->processEvents();
450  }
451 }
452 
453 // Line search
454 double US_FitWorker::linesearch( double* search, double f0 )
455 {
456  double alpha = 0.0;
457 
458  double x0 = 0.0;
459  double x1 = 0.5;
460  double x2 = 1.0;
461  double old_f0 = 0.0;
462  double old_f1 = 0.0;
463  double old_f2 = 0.0;
464  double hh = 0.1;
465  int iter = 0;
466 
467  double f1 = calc_testParameter( search, x1 );
468  if ( f1 < 0.0 ) return( 0.0 );
469 
470  double f2 = calc_testParameter( search, x2 );
471  if ( f2 < 0.0 ) return( 0.0 );
472 
473  while( f0 >= 10000.0 || f0 < 0.0 ||
474  f1 >= 10000.0 || f1 < 0.0 ||
475  f2 >= 10000.0 || f2 < 0.0 )
476  {
477  x1 /= 10.0;
478  x2 /= 10.0;
479 
480  f1 = calc_testParameter( search, x1 );
481  if ( f1 < 0.0 ) return( 0.0 );
482 
483  f2 = calc_testParameter( search, x2 );
484  if ( f2 < 0.0 ) return( 0.0 );
485 
486  if ( x1 < dflt_min ) return( -1.0 );
487  }
488 
489  bool check_flag = true;
490 
491  while( check_flag )
492  {
493  if ( ( isNan( f0 ) && isNan( f1 ) ) ||
494  ( isNan( f1 ) && isNan( f2 ) ) ||
495  ( isNan( f0 ) && isNan( f2 ) ) )
496  return( -2.0 );
497 
498  if ( ( qAbs( f2 - old_f2 ) < dflt_min ) &&
499  ( qAbs( f1 - old_f1 ) < dflt_min ) &&
500  ( qAbs( f0 - old_f0 ) < dflt_min ) )
501  return( 0.0 );
502 
503  old_f0 = f0;
504  old_f1 = f1;
505  old_f2 = f2;
506 
507  if ( ( ( qAbs( f2 - f0 ) < dflt_min ) &&
508  ( qAbs( f1 - f0 ) < dflt_min ) ) ||
509  ( ( qAbs( f2 - f1 ) < dflt_min ) &&
510  f0 > f1 ) )
511  return( 0.0 );
512 
513  if ( ( qAbs( x0 ) < dflt_min ) &&
514  ( qAbs( x1 ) < dflt_min ) &&
515  ( qAbs( x2 ) < dflt_min ) )
516  return( 0.0 );
517 
518  if ( ( ( qAbs( f0 - f1 ) < dflt_min ) &&
519  ( qAbs( f1 - f2 ) < dflt_min ) ) ||
520  ( ( qAbs( f0 - f1 ) < dflt_min ) &&
521  f2 > f1 ) )
522  return( 0.0 );
523 
524  if ( f0 > f1 && f2 > f1 )
525  {
526  check_flag = false;
527  break;
528  }
529 
530  else if ( ( f2 > f1 && f1 > f0 ) ||
531  ( f1 > f0 && f1 > f2 ) ||
532  ( f1 == f1 && f1 > f0 ) )
533  { // Shift left
534  x2 = x1;
535  f2 = f1;
536  x1 = ( x0 + x2 ) * 0.5;
537 
538  f1 = calc_testParameter( search, x1 );
539  if ( f1 < 0.0 ) return( 0.0 );
540  }
541 
542  else if ( f0 > f1 && f1 > f2 )
543  { // Shift right
544  x0 = x1;
545  f0 = f1;
546  x1 = x2;
547  f1 = f2;
548  x2 = x2 + ( pow( 2.0, (double)( iter + 2 ) ) * hh );
549 
550  f2 = calc_testParameter( search, x2 );
551  if ( f2 < 0.0 ) return( 0.0 );
552  }
553 
554  iter++;
555  }
556 
557  x1 = ( x0 + x2 ) * 0.5;
558  hh = x1 - x0;
559 
560  f1 = calc_testParameter( search, x1 );
561  if ( f1 < 0.0 ) return( 0.0 );
562 
563  while ( true )
564  {
565  if ( f0 < f1 )
566  { // Shift left
567  x2 = x1;
568  f2 = f1;
569  x1 = x0;
570  f1 = f0;
571  x0 = x1 - hh;
572 
573  f0 = calc_testParameter( search, x0 );
574  if ( f0 < 0.0 ) break;
575  }
576 
577  if ( f2 < f1 )
578  { // Shift right
579  x0 = x1;
580  f0 = f1;
581  x1 = x2;
582  f1 = f2;
583  x2 = x1 + hh;
584 
585  f2 = calc_testParameter( search, x2 );
586  if ( f2 < 0.0 ) break;
587  }
588 
589  if ( qAbs( f0 - f1 * 2.0 + f2 ) < dflt_min )
590  break;
591  double xmin = x1 + ( hh * ( f0 - f2 ) ) / ( 2. * ( f0 - f1 * 2. + f2 ) );
592  double fmin = calc_testParameter( search, xmin );
593  if ( fmin < 0.0 ) break;
594 
595  if ( fmin < f1 )
596  {
597  x1 = xmin;
598  f1 = fmin;
599  }
600 
601  hh /= 2.0;
602  if ( hh < tolerance ) { alpha = x1; break; }
603 
604  x0 = x1 - hh;
605  x2 = x1 + hh;
606 
607  f0 = calc_testParameter( search, x0 );
608  if ( f0 < 0.0 ) break;
609  f2 = calc_testParameter( search, x2 );
610  if ( f2 < 0.0 ) break;
611  }
612 
613  return alpha;
614 }
615 
616 // Calculate test parameter
617 double US_FitWorker::calc_testParameter( double* search, double step )
618 {
619  double resid = 0.0;
620 
621  for ( int ii = 0; ii < nfpars; ii++ )
622  fitpars.tguess[ ii ] = fitpars.guess[ ii ] + search[ ii ] * step;
623 
624  if ( emath->calc_model( fitpars.tguess ) < 0.0 )
625  {
626  for ( int ii = 0; ii < nfpars; ii++ )
627  fitpars.tguess[ ii ] = fitpars.guess[ ii ];
628 
630  }
631 
632  resid = emath->calc_residuals();
633 
634  return resid;
635 }
636 
637 // Update information matrix for Quasi-Newton
638 void US_FitWorker::updateQN( double* gamma, double* delta )
639 {
640  QVector< double > vhgamma( nfpars );
641  QVector< double > vvv ( nfpars );
642  QVector< double* > mvvtrns( nfpars );
643  QVector< double* > mhgtrns( nfpars );
644  QVector< double* > mdatrns( nfpars );
645  QVector< double > dvvtrns( nfpars * nfpars );
646  QVector< double > dhgtrns( nfpars * nfpars );
647  QVector< double > ddatrns( nfpars * nfpars );
648  double* hgamma = vhgamma.data();
649  double* vv = vvv .data();
650 
651  // Construct the work matrices, all parameters x parameters
652  double** vvtrns = US_Matrix::construct( mvvtrns, dvvtrns, nfpars, nfpars );
653  double** hgtrns = US_Matrix::construct( mhgtrns, dhgtrns, nfpars, nfpars );
654  double** datrns = US_Matrix::construct( mdatrns, ddatrns, nfpars, nfpars );
655 
656  // Compute the h-gamma array: info-matrix times gamma
657  US_Matrix::mvv( fitpars.info, gamma, hgamma, nfpars, nfpars );
658 
659  // Compute scalars: lambda = gamma dot hgamma; deltgam = delta dot gamma
660  double lambda = US_Matrix::dotproduct( gamma, hgamma, nfpars );
661  double deltgam = US_Matrix::dotproduct( delta, gamma, nfpars );
662 
663  // Compute vv array: scaled delta minus scaled hgamma
664  for ( int ii = 0; ii < nfpars; ii++ )
665  vv[ ii ] = ( delta[ ii ] / deltgam ) - ( hgamma[ ii ] / lambda );
666 
667  // Compute the 3 transpose matrices, each Mij = Vi1 * V1j
668  for ( int ii = 0; ii < nfpars; ii++ )
669  {
670  double rowvv = vv [ ii ];
671  double rowda = delta [ ii ];
672  double rowhg = hgamma[ ii ];
673 
674  for ( int jj = 0; jj < nfpars; jj++ )
675  {
676  vvtrns[ ii ][ jj ] = vv [ jj ] * rowvv;
677  datrns[ ii ][ jj ] = delta [ jj ] * rowda;
678  hgtrns[ ii ][ jj ] = hgamma[ jj ] * rowhg;
679  }
680  }
681 
682  // Update the info matrix by add/subtract of 3 scaled transpose matrices
683  for ( int ii = 0; ii < nfpars; ii++ )
684  for ( int jj = 0; jj < nfpars; jj++ )
685  fitpars.info[ ii ][ jj ] = fitpars.info[ ii ][ jj ]
686  - ( hgtrns [ ii ][ jj ] / lambda )
687  + ( datrns [ ii ][ jj ] / deltgam )
688  + ( vvtrns [ ii ][ jj ] * lambda );
689 }
690 
691 
692 // Convenience function: is a double value NAN (Not A valid Number)?
693 bool US_FitWorker::isNan( double value )
694 {
695  if ( value != value ) return true; // NAN: one case where val!=val
696  double aval = qAbs( value ); // mark NAN if beyond float range
697  return ( aval < dflt_min || aval > dflt_max );
698 }
699