Jpp  18.0.1-rc.2
the software that should make you happy
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
JGradient.hh
Go to the documentation of this file.
1 #ifndef __JFIT__JGRADIENT__
2 #define __JFIT__JGRADIENT__
3 
4 #include <limits>
5 #include <vector>
6 #include <cmath>
7 #include <memory>
8 #include <ostream>
9 #include <iomanip>
10 
11 #include "JLang/JManip.hh"
12 #include "JLang/JException.hh"
13 #include "Jeep/JMessage.hh"
14 
15 
16 /**
17  * \author mdejong
18  */
19 
20 namespace JFIT {}
21 namespace JPP { using namespace JFIT; }
22 
23 namespace JFIT {
24 
25  /**
26  * Auxiliary data structure for fit parameter.
27  */
28  struct JParameter_t {
29  /**
30  * Virtual destructor.
31  */
32  virtual ~JParameter_t()
33  {}
34 
35  /**
36  * Apply step.
37  *
38  * \param step step
39  */
40  virtual void apply(const double step) = 0;
41  };
42 
43 
44  /**
45  * Auxiliary data structure for editable parameter.
46  */
47  struct JModifier_t :
48  public std::shared_ptr<JParameter_t>
49  {
50  /**
51  * Constructor.
52  *
53  * \param name name
54  * \param parameter parameter
55  * \param value value
56  */
58  JParameter_t* parameter,
59  const double value) :
60  std::shared_ptr<JParameter_t>(parameter),
61  name (name),
62  value(value)
63  {}
64 
66  double value;
67  };
68 
69 
70  /**
71  * Conjugate gradient fit.
72  */
73  struct JGradient :
74  public std::vector<JModifier_t>
75  {
76  /**
77  * Constructor.
78  *
79  * The number of iterations and epsilon refer to the number of steps and
80  * the distance to the minimum, respectively.\n
81  * The number of extra steps can be used to overcome a possible hurdle on the way.
82  *
83  * \param Nmax maximum number of iterations
84  * \param Nextra maximum number of extra steps
85  * \param epsilon epsilon
86  * \param debug debug
87  */
88  JGradient(const size_t Nmax = std::numeric_limits<size_t>::max(),
89  const size_t Nextra = 0,
90  const double epsilon = 1.0e-4,
91  const int debug = 3) :
92  Nmax (Nmax),
93  Nextra (Nextra),
95  debug (debug)
96  {}
97 
98 
99  /**
100  * Fit.
101  *
102  * The template parameter should provide for the following function operator.
103  * <pre>
104  * double operator()(int option);
105  * </pre>
106  * The value of the option corresponds to the following cases.
107  * - 0 => step wise improvement of the chi2;
108  * - 1 => evaluation of the chi2 before the determination of the gradient of the chi2; and
109  * - 2 => evaluation of the derivative of the chi2 to each fit parameter.
110  *
111  * \param getChi2 chi2 function
112  */
113  template<class T>
114  double operator()(const T& getChi2)
115  {
116  using namespace std;
117  using namespace JPP;
118 
119  if (this->empty()) {
120  return numeric_limits<double>::max();
121  }
122 
123  this->evaluate(getChi2);
124 
125  const size_t N = this->size();
126 
127  vector<double> H(N);
128  vector<double> G(N);
129 
130  for (size_t i = 0; i != N; ++i) {
131  G[i] = -1.0 * gradient[i];
132  H[i] = G[i];
133  gradient[i] = H[i];
134  }
135 
136  size_t number_of_iterations = 0;
137 
138  for ( ; number_of_iterations != Nmax; ++number_of_iterations) {
139 
140  DEBUG("chi2[0] " << setw(4) << number_of_iterations << ' ' << FIXED(12,5) << chi2[0] << endl);
141 
142  // minimise chi2 in direction of gradient
143 
144  chi2[1] = chi2[0];
145  chi2[2] = chi2[1];
146 
147  size_t m = 0;
148 
149  for (double ds = 1.0; ds > 1.0e-3; ) {
150 
151  this->move(+1.0 * ds);
152 
153  chi2[3] = getChi2(0);
154 
155  DEBUG("chi2[3] " << setw(4) << m << ' ' << FIXED(12,5) << chi2[3] << ' ' << FIXED(12,5) << ds << endl);
156 
157  if (chi2[3] < chi2[2]) {
158 
159  chi2[1] = chi2[2];
160  chi2[2] = chi2[3];
161 
162  m = 0;
163 
164  continue;
165  }
166 
167  if (ds == 1.0) {
168 
169  if (m == 0) {
170  chi2[4] = chi2[3];
171  }
172 
173  if (m != Nextra) {
174 
175  ++m;
176 
177  continue;
178 
179  } else {
180 
181  for ( ; m != 0; --m) {
182  this->move(-1.0 * ds);
183  }
184 
185  chi2[3] = chi2[4];
186  }
187  }
188 
189  this->move(-1.0 * ds);
190 
191  if (chi2[2] < chi2[3]) {
192 
193  // final step based on parabolic interpolation through following points
194  //
195  // x1 = -1 * ds -> chi2[1]
196  // x2 = 0 * ds -> chi2[2]
197  // x3 = +1 * ds -> chi2[3]
198 
199  const double f21 = chi2[2] - chi2[1]; // f(x2) - f(x1)
200  const double f23 = chi2[2] - chi2[3]; // f(x2) - f(x3)
201 
202  const double xs = 0.5 * (f21 - f23) / (f23 + f21);
203 
204  this->move(+1.0 * xs * ds);
205 
206  chi2[3] = getChi2(0);
207 
208  if (chi2[3] < chi2[2]) {
209 
210  chi2[2] = chi2[3];
211 
212  } else {
213 
214  this->move(-1.0 * xs * ds);
215 
216  chi2[2] = getChi2(0);
217  }
218 
219  DEBUG("chi2[2] " << setw(4) << number_of_iterations << ' ' << FIXED(12,5) << chi2[2] << ' ' << SCIENTIFIC(12,5) << ds << endl);
220 
221  break;
222 
223  } else {
224 
225  ds *= 0.5;
226  }
227  }
228 
229  if (fabs(chi2[2] - chi2[0]) < epsilon * 0.5 * (fabs(chi2[0]) + fabs(chi2[2]))) {
230 
231  chi2[0] = chi2[2];
232 
233  break;
234  }
235 
236  this->evaluate(getChi2);
237 
238  double gg = 0.0;
239  double dgg = 0.0;
240 
241  for (size_t i = 0; i != N; ++i){
242  gg += G[i]*G[i];
243  dgg += (gradient[i] + G[i]) * gradient[i];
244  }
245 
246  if (gg == 0.0) {
247  break;
248  }
249 
250  dgg /= gg;
251 
252  for (size_t i = 0; i != N; ++i){
253  G[i] = -1.0 * gradient[i];
254  H[i] = G[i] + dgg * H[i];
255  gradient[i] = H[i];
256  }
257  }
258 
259  DEBUG("chi2[0] " << setw(4) << number_of_iterations << ' ' << FIXED(12,5) << chi2[0] << endl);
260 
261  return chi2[0];
262  }
263 
264 
265  size_t Nmax; //!< maximum number of iterations
266  size_t Nextra; //!< maximum number of extra steps
267  double epsilon; //!< epsilon
268  int debug; //!< debug
269 
270  private:
271  /**
272  * Evaluate gradient.
273  */
274  template<class T>
275  void evaluate(const T& getChi2)
276  {
277  using namespace std;
278  using namespace JPP;
279 
280  const size_t N = this->size();
281 
282  gradient.resize(N);
283 
284  for (std::vector<double>::iterator i = gradient.begin(); i != gradient.end(); ++i) {
285  *i = 0.0;
286  }
287 
288  chi2[0] = getChi2(1);
289 
290  size_t width = 1;
291 
292  for (size_t i = 0; i != N; ++i) {
293  if ((*this)[i].name.size() > width) {
294  width = (*this)[i].name.size();
295  }
296  }
297 
298  double V = 0.0;
299 
300  for (size_t i = 0; i != N; ++i) {
301 
302  if ((*this)[i].value != 0.0) {
303 
304  (*this)[i]->apply(+0.5 * (*this)[i].value);
305 
306  chi2[1] = getChi2(2);
307 
308  (*this)[i]->apply(-0.5 * (*this)[i].value);
309  (*this)[i]->apply(-0.5 * (*this)[i].value);
310 
311  chi2[2] = getChi2(2);
312 
313  gradient[i] = chi2[1] - chi2[2];
314 
315  (*this)[i]->apply(+0.5 * (*this)[i].value);
316 
317  DEBUG(setw(width) << left << (*this)[i].name << right << ' ' << FIXED(12,5) << (*this)[i].value << ' ' << FIXED(12,5) << gradient[i] << endl);
318 
319  } else {
320 
321  gradient[i] = 0.0;
322  }
323 
324  V += gradient[i] * gradient[i];
325  }
326 
327  DEBUG(setw(width) << left << "|gradient|" << right << ' ' << FIXED(12,5) << sqrt(V) << endl);
328  }
329 
330 
331  /**
332  * Move.
333  *
334  * \param factor factor
335  */
336  void move(const double factor)
337  {
338  if (factor > 0.0) {
339  for (size_t i = 0; i != this->size(); ++i) {
340  (*this)[ i ]->apply((*this)[ i ].value * gradient[ i ] * factor);
341  }
342  } else if (factor < 0.0) {
343  for (size_t i = this->size(); i != 0; --i) {
344  (*this)[i-1]->apply((*this)[i-1].value * gradient[i-1] * factor);
345  }
346  }
347  }
348 
350  double chi2[5];
351  };
352 }
353 
354 #endif
JModifier_t(const std::string &name, JParameter_t *parameter, const double value)
Constructor.
Definition: JGradient.hh:57
void move(const double factor)
Move.
Definition: JGradient.hh:336
virtual ~JParameter_t()
Virtual destructor.
Definition: JGradient.hh:32
std::string name
Definition: JGradient.hh:65
Exceptions.
then echo Enter input within $TIMEOUT_S seconds echo n User name
Definition: JCookie.sh:42
double operator()(const T &getChi2)
Fit.
Definition: JGradient.hh:114
static const double H
Planck constant [eV s].
void evaluate(const T &getChi2)
Evaluate gradient.
Definition: JGradient.hh:275
Auxiliary data structure for floating point format specification.
Definition: JManip.hh:446
V(JDAQEvent-JTriggerReprocessor)*1.0/(JDAQEvent+1.0e-10)
size_t Nextra
maximum number of extra steps
Definition: JGradient.hh:266
double epsilon
epsilon
Definition: JGradient.hh:267
do set_variable OUTPUT_DIRECTORY $WORKDIR T
JGradient(const size_t Nmax=std::numeric_limits< size_t >::max(), const size_t Nextra=0, const double epsilon=1.0e-4, const int debug=3)
Constructor.
Definition: JGradient.hh:88
then awk string
std::vector< double > gradient
Definition: JGradient.hh:349
virtual void apply(const double step)=0
Apply step.
size_t Nmax
maximum number of iterations
Definition: JGradient.hh:265
Auxiliary data structure for fit parameter.
Definition: JGradient.hh:28
General purpose messaging.
I/O manipulators.
Auxiliary data structure for editable parameter.
Definition: JGradient.hh:47
then usage $script< input file >[option[primary[working directory]]] nWhere option can be N
Definition: JMuonPostfit.sh:40
int debug
debug
Definition: JGradient.hh:268
double chi2[5]
Definition: JGradient.hh:350
double getChi2(const double P)
Get chi2 corresponding to given probability.
Definition: JFitToolkit.hh:56
Conjugate gradient fit.
Definition: JGradient.hh:73
Auxiliary data structure for floating point format specification.
Definition: JManip.hh:484
#define DEBUG(A)
Message macros.
Definition: JMessage.hh:62