GeneralizationQuotient.h
Go to the documentation of this file.
1 /*!
2  *
3  *
4  * \brief Stopping criterion monitoring the quotient of generalization loss and training progress
5  *
6  *
7  *
8  * \author O. Krause
9  * \date 2010
10  *
11  *
12  * \par Copyright 1995-2017 Shark Development Team
13  *
14  * <BR><HR>
15  * This file is part of Shark.
16  * <http://shark-ml.org/>
17  *
18  * Shark is free software: you can redistribute it and/or modify
19  * it under the terms of the GNU Lesser General Public License as published
20  * by the Free Software Foundation, either version 3 of the License, or
21  * (at your option) any later version.
22  *
23  * Shark is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26  * GNU Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public License
29  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
30  *
31  */
32 
33 #ifndef SHARK_TRAINERS_STOPPINGCRITERA_GENERALIZATION_QUOTIENT__H
34 #define SHARK_TRAINERS_STOPPINGCRITERA_GENERALIZATION_QUOTIENT__H
35 
37 #include <shark/Core/ResultSets.h>
38 #include <queue>
39 #include <numeric>
40 #include <algorithm>
41 #include <shark/LinAlg/Base.h>
42 namespace shark{
43 
44 
45 /// \brief SStopping criterion monitoring the quotient of generalization loss and training progress
46 ///
47 /// The GeneralizationQuotient is, as the name suggests, a quotient of two other stopping criteria,
48 /// namely the generalization loss and the
49 ///
50 ///
51 ///
52 ///
53 ///
54 
55 
56 /// This stopping criterion is based on the empirical fact that the generalization error does not have a smooth surface.
57 /// It is normal that during periods of fast learning the generalization loss might increase first and than decrease again.
58 /// This class calculates the quotient of training progress and generalization loss. It stops if it is bigger than
59 /// maxloss > 0.
60 
61 ///
62 /// Terminology for this and other stopping criteria is taken from (and also see):
63 ///
64 /// Lutz Prechelt. Early Stopping - but when? In Genevieve B. Orr and
65 /// Klaus-Robert Müller: Neural Networks: Tricks of the Trade, volume
66 /// 1524 of LNCS, Springer, 1997.
67 ///
68 template<class PointType = RealVector>
69 class GeneralizationQuotient: public AbstractStoppingCriterion< ValidatedSingleObjectiveResultSet<PointType> >{
70 private:
72 public:
74 
75  GeneralizationQuotient(std::size_t intervalSize,double maxLoss){
76  SHARK_ASSERT( intervalSize > 0 );
77  m_maxLoss = maxLoss;
78  m_intervalSize = intervalSize;
79  reset();
80  }
81  /// returns true if training should stop
82  bool stop(ResultSet const& set){
83  m_minTraining = std::min(m_minTraining, set.value);
84  double gl = set.validation/m_minTraining -1;
85 
86  m_meanPerformance += set.value/m_intervalSize;
87  m_interval.push(set.value/m_intervalSize);
88 
89  if(m_interval.size() > m_intervalSize){
90  m_meanPerformance -= m_interval.front();
91  m_interval.pop();
92  }
93  else
94  return false;
95  double progress = (m_meanPerformance/m_minTraining)-1;
96 
97  return gl/progress > m_maxLoss;
98  }
99  void reset(){
100  m_interval = std::queue<double>();
101  m_minTraining = std::numeric_limits<double>::max();
102  m_meanPerformance = 0;
103  }
104 protected:
106  double m_maxLoss;
108 
109  std::queue<double> m_interval;
110  std::size_t m_intervalSize;
111 };
112 }
113 
114 
115 #endif