CSvmDerivative.h
Go to the documentation of this file.
1 //===========================================================================
2 /*!
3  *
4  *
5  * \brief Derivative of a C-SVM hypothesis w.r.t. its hyperparameters.
6  *
7  * \par
8  * This class provides two main member functions for computing the
9  * derivative of a C-SVM hypothesis w.r.t. its hyperparameters. First,
10  * the derivative is prepared in general. Then, the derivative can be
11  * computed comparatively cheaply for any input sample. Needs to be
12  * supplied with pointers to a KernelExpansion and CSvmTrainer.
13  *
14  *
15  *
16  * \author M. Tuma, T. Glasmachers
17  * \date 2007-2012
18  *
19  *
20  * \par Copyright 1995-2017 Shark Development Team
21  *
22  * <BR><HR>
23  * This file is part of Shark.
24  * <http://shark-ml.org/>
25  *
26  * Shark is free software: you can redistribute it and/or modify
27  * it under the terms of the GNU Lesser General Public License as published
28  * by the Free Software Foundation, either version 3 of the License, or
29  * (at your option) any later version.
30  *
31  * Shark is distributed in the hope that it will be useful,
32  * but WITHOUT ANY WARRANTY; without even the implied warranty of
33  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34  * GNU Lesser General Public License for more details.
35  *
36  * You should have received a copy of the GNU Lesser General Public License
37  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
38  *
39  */
40 //===========================================================================
41 
42 
43 #ifndef SHARK_MODELS_CSVMDERIVATIVE_H
44 #define SHARK_MODELS_CSVMDERIVATIVE_H
45 
46 #include <shark/Core/INameable.h>
50 namespace shark {
51 
52 
53 /// \brief
54 ///
55 /// This class provides two main member functions for computing the
56 /// derivative of a C-SVM hypothesis w.r.t. its hyperparameters.
57 /// The constructor takes a pointer to a KernelClassifier and an SvmTrainer,
58 /// in the assumption that the former was trained by the latter. It heavily
59 /// accesses their members to calculate the derivative of the alpha and offset
60 /// values w.r.t. the SVM hyperparameters, that is, the regularization
61 /// parameter C and the kernel parameters. This is done in the member function
62 /// prepareCSvmParameterDerivative called by the constructor. After this initial,
63 /// heavier computation step, modelCSvmParameterDerivative can be called on an
64 /// input sample to the SVM model, and the method will yield the derivative of
65 /// the hypothesis w.r.t. the SVM hyperparameters.
66 ///
67 /// \tparam InputType Same basis type as the kernel expansion's
68 /// \tparam CacheType While the basic cache type defaults to float in the QP algorithms, it here defaults to double, because the SVM derivative benefits a lot from higher precision.
69 ///
70 template< class InputType, class CacheType = double >
71 class CSvmDerivative : public ISerializable, public INameable
72 {
73 public:
74  typedef CacheType QpFloatType;
78 
79 protected:
80 
81  // key external members through which main information is obtained
82  KernelExpansion<InputType>* mep_ke; ///< pointer to the KernelExpansion which has to have been been trained by the below SvmTrainer
83  TrainerType* mep_tr; ///< pointer to the SvmTrainer with which the above KernelExpansion has to have been trained
84  KernelType* mep_k; ///< convenience pointer to the underlying kernel function
85  RealMatrix& m_alpha; ///< convenience reference to the alpha values of the KernelExpansion
86  const Data<InputType>& m_basis; ///< convenience reference to the underlying data of the KernelExpansion
87  const RealVector& m_db_dParams_from_solver; ///< convenience access to the correction term from the solver, for the rare case that there are no free SVs
88 
89  // convenience copies from the CSvmTrainer and the underlying kernel function
90  double m_C; ///< the regularization parameter value with which the SvmTrainer trained the KernelExpansion
91  bool m_unconstrained; ///< is the unconstrained flag of the SvmTrainer set? Influences the derivatives!
92  std::size_t m_nkp; ///< convenience member holding the Number of Kernel Parameters.
93  std::size_t m_nhp; ///< convenience member holding the Number of Hyper Parameters.
94 
95  // information calculated from the KernelExpansion state in the prepareDerivative-step
96  std::size_t m_noofFreeSVs; ///< number of free SVs
97  std::size_t m_noofBoundedSVs; ///< number of bounded SVs
98  std::vector< std::size_t > m_freeAlphaIndices; ///< indices of free SVs
99  std::vector< std::size_t > m_boundedAlphaIndices; ///< indices of bounded SVs
100  RealVector m_freeAlphas; ///< free non-SV alpha values
101  RealVector m_boundedAlphas; ///< bounded non-SV alpha values
102  RealVector m_boundedLabels; ///< labels of bounded non-SVs
103 
104  /// Main member and result, computed in the prepareDerivative-step:
105  /// Stores the derivative of the **free** alphas w.r.t. SVM hyperparameters as obtained
106  /// through the CSvmTrainer (for C) and through the kernel (for the kernel parameters).
107  /// Each row corresponds to one **free** alpha, each column to one hyperparameter.
108  /// The **last** column is the derivative of (free_alphas, b) w.r.t C. All **previous**
109  /// columns are w.r.t. the kernel parameters.
110  RealMatrix m_d_alphab_d_theta;
111 
112 public:
113 
114  /// Constructor. Only sets up the main pointers and references to the external instances and data, and
115  /// performs basic sanity checks.
116  /// \param ke pointer to the KernelExpansion which has to have been been trained by the below SvmTrainer
117  /// \param trainer pointer to the SvmTrainer with which the above KernelExpansion has to have been trained
118  CSvmDerivative( KeType* ke, TrainerType* trainer )
119  : mep_ke( &ke->decisionFunction() ),
120  mep_tr( trainer ),
121  mep_k( trainer->kernel() ),
122  m_alpha( mep_ke->alpha() ),
123  m_basis( mep_ke->basis() ),
124  m_db_dParams_from_solver( trainer->get_db_dParams() ),
125  m_C ( trainer->C() ),
126  m_unconstrained( trainer->isUnconstrained() ),
127  m_nkp( trainer->kernel()->numberOfParameters() ),
128  m_nhp( trainer->kernel()->numberOfParameters()+1 )
129  {
130  SHARK_RUNTIME_CHECK( mep_ke->kernel() == trainer->kernel(), "[CSvmDerivative::CSvmDerivative] KernelExpansion and SvmTrainer must use the same KernelFunction.");
131  SHARK_RUNTIME_CHECK( mep_ke != NULL, "[CSvmDerivative::CSvmDerivative] KernelExpansion cannot be NULL.");
132  SHARK_RUNTIME_CHECK( mep_ke->outputShape().numElements() == 1, "[CSvmDerivative::CSvmDerivative] only defined for binary SVMs.");
133  SHARK_RUNTIME_CHECK( mep_ke->hasOffset() == 1, "[CSvmDerivative::CSvmDerivative] only defined for SVMs with offset.");
134  SHARK_RUNTIME_CHECK( m_alpha.size2() == 1, "[CSvmDerivative::CSvmDerivative] this class is only defined for binary SVMs.");
135  prepareCSvmParameterDerivative(); //main
136  }
137 
138  /// \brief From INameable: return the class name.
139  std::string name() const
140  { return "CSvmDerivative"; }
141 
142  inline const KeType* ke() { return mep_ke; }
143  inline const TrainerType* trainer() { return mep_tr; }
144 
145  //! Computes the derivative of the model w.r.t. regularization and kernel parameters.
146  //! Be sure to call prepareCSvmParameterDerivative after SVM training and before calling this function!
147  //! \param input an example to be scored by the SVM
148  //! \param derivative a vector of derivatives of the score. The last entry is w.r.t. C.
149  void modelCSvmParameterDerivative(const InputType& input, RealVector& derivative )
150  {
151  // create temporary batch helpers
152  RealMatrix unit_weights(1,1,1.0);
153  RealMatrix bof_results(1,1);
154  typename Batch<InputType>::type bof_xi = Batch<InputType>::createBatch(input,1);
155  typename Batch<InputType>::type bof_input = Batch<InputType>::createBatch(input,1);
156  getBatchElement(bof_input, 0) = input; //fixed over entire function scope
157 
158  // init helpers
159  RealVector der( m_nhp );
160  boost::shared_ptr<State> state = mep_k->createState(); //state from eval and for derivatives
161  derivative.resize( m_nhp );
162 
163  // start calculating derivative
164  noalias(derivative) = row(m_d_alphab_d_theta,m_noofFreeSVs); //without much thinking, we add db/d(\theta) to all derivatives
165  // first: go through free SVs and add their contributions (the actual ones, which use the matrix d_alphab_d_theta)
166  for ( std::size_t i=0; i<m_noofFreeSVs; i++ ) {
167  getBatchElement(bof_xi, 0) = m_basis.element(m_freeAlphaIndices[i]);
168  mep_k->eval( bof_input, bof_xi, bof_results, *state );
169  double ker = bof_results(0,0);
170  double cur_alpha = m_freeAlphas(i);
171  mep_k->weightedParameterDerivative( bof_input, bof_xi, unit_weights, *state, der );
172  noalias(derivative) += ker * row(m_d_alphab_d_theta,i); //for C, simply add up the individual contributions
173  noalias(subrange(derivative,0,m_nkp))+= cur_alpha*der;
174  }
175  // second: go through all bounded SVs and add their "trivial" derivative contributions
176  for ( std::size_t i=0; i<m_noofBoundedSVs; i++ ) {
177  getBatchElement(bof_xi, 0) = m_basis.element(m_boundedAlphaIndices[i]);
178  mep_k->eval( bof_input, bof_xi, bof_results, *state );
179  double ker = bof_results(0,0);
180  double cur_label = m_boundedLabels(i);
181  mep_k->weightedParameterDerivative( bof_input, bof_xi, unit_weights, *state, der );
182  derivative( m_nkp ) += ker * cur_label; //deriv of bounded alpha w.r.t. C is simply the label
183  noalias(subrange(derivative,0,m_nkp))+= cur_label * m_C * der;
184  }
185  if ( m_unconstrained )
186  derivative( m_nkp ) *= m_C; //compensate for log encoding via chain rule
187  //(the kernel parameter derivatives are correctly differentiated according to their
188  // respective encoding via the kernel's derivative, so we don't need to correct for those.)
189 
190  // in some rare cases, there are no free SVs and we have to manually correct the derivatives using a correcting term from the SvmTrainer.
191  if ( m_noofFreeSVs == 0 ) {
192  noalias(derivative) += m_db_dParams_from_solver;
193  }
194  }
195 
196  //! Whether there are free SVs in the solution. Useful to monitor for degenerate solutions
197  //! with only bounded and no free SVs. Be sure to call prepareCSvmParameterDerivative after
198  //! SVM training and before calling this function.
199  bool hasFreeSVs() { return ( m_noofFreeSVs != 0 ); }
200  //! Whether there are bounded SVs in the solution. Useful to monitor for degenerate solutions
201  //! with only bounded and no free SVs. Be sure to call prepareCSvmParameterDerivative after
202  //! SVM training and before calling this function.
203  bool hasBoundedSVs() { return ( m_noofBoundedSVs != 0 ); }
204 
205  /// Access to the matrix of SVM coefficients' derivatives. Derivative w.r.t. C is last.
206  const RealMatrix& get_dalphab_dtheta() {
207  return m_d_alphab_d_theta;
208  }
209 
210  /// From ISerializable, reads a network from an archive
211  virtual void read( InArchive & archive ) {
212  }
213  /// From ISerializable, writes a network to an archive
214  virtual void write( OutArchive & archive ) const {
215  }
216 
217 private:
218 
219  /////////// DERIVATIVE OF BINARY (C-)SVM ////////////////////
220 
221  //! Fill m_d_alphab_d_theta, the matrix of derivatives of free SVs w.r.t. C-SVM hyperparameters
222  //! as obtained through the CSvmTrainer (for C) and through the kernel (for the kernel parameters).
223  //! \par
224  //! Note: we follow the alpha-encoding-conventions of Glasmacher's dissertation, where the alpha values
225  //! are re-encoded by multiplying each with the corresponding label
226  //!
227  void prepareCSvmParameterDerivative() {
228  // init convenience size indicators
229  std::size_t numberOfAlphas = m_alpha.size1();
230 
231  // first round through alphas: count free and bounded SVs
232  for ( std::size_t i=0; i<numberOfAlphas; i++ ) {
233  double cur_alpha = m_alpha(i,0); //we assume (and checked) that there is only one class
234  if ( cur_alpha != 0.0 ) {
235  if ( cur_alpha == m_C || cur_alpha == -m_C ) { //the svm formulation using reparametrized alphas is assumed
236  m_boundedAlphaIndices.push_back(i);
237  } else {
238  m_freeAlphaIndices.push_back(i);
239  }
240  }
241  }
242  m_noofFreeSVs = m_freeAlphaIndices.size(); //don't forget to add b to the count where appropriate
243  m_noofBoundedSVs = m_boundedAlphaIndices.size();
244  // in contrast to the Shark2 implementation, we here don't store useless constants (i.e., 0, 1, -1), but only the derivs w.r.t. (\alpha_free, b)
245  m_d_alphab_d_theta.resize(m_noofFreeSVs+1, m_nhp);
246  m_d_alphab_d_theta.clear();
247  m_freeAlphaIndices.push_back( numberOfAlphas ); //b is always free (but don't forget to add to count manually)
248 
249  // 2nd round through alphas: build up the RealVector of free and bounded alphas (needed for matrix-vector-products later)
250  m_freeAlphas.resize( m_noofFreeSVs+1);
251  m_boundedAlphas.resize( m_noofBoundedSVs );
252  m_boundedLabels.resize( m_noofBoundedSVs );
253  for ( std::size_t i=0; i<m_noofFreeSVs; i++ )
254  m_freeAlphas(i) = m_alpha( m_freeAlphaIndices[i], 0 );
255  m_freeAlphas( m_noofFreeSVs ) = mep_ke->offset(0);
256  for ( std::size_t i=0; i<m_noofBoundedSVs; i++ ) {
257  double cur_alpha = m_alpha( m_boundedAlphaIndices[i], 0 );
258  m_boundedAlphas(i) = cur_alpha;
259  m_boundedLabels(i) = ( (cur_alpha > 0.0) ? 1.0 : -1.0 );
260  }
261 
262  //if there are no free support vectors, we are done.
263  if ( m_noofFreeSVs == 0 ) {
264  return;
265  }
266 
267  // set up helper variables.
268  // See Tobias Glasmacher's dissertation, chapter 9.3, for a calculation of the derivatives as well as
269  // for a definition of these variables. -> It's very easy to follow this code with that chapter open.
270  // The Keerthi-paper "Efficient method for gradient-based..." is also highly recommended for cross-reference.
271  RealVector der( m_nkp ); //derivative storage helper
272  boost::shared_ptr<State> state = mep_k->createState(); //state object for derivatives
273 
274  // create temporary batch helpers
275  RealMatrix unit_weights(1,1,1.0);
276  RealMatrix bof_results(1,1);
277  typename Batch<InputType>::type bof_xi;
278  typename Batch<InputType>::type bof_xj;
279  if ( m_noofFreeSVs != 0 ) {
280  bof_xi = Batch<InputType>::createBatch( m_basis.element(m_freeAlphaIndices[0]), 1 ); //any input works
281  bof_xj = Batch<InputType>::createBatch( m_basis.element(m_freeAlphaIndices[0]), 1 ); //any input works
282  } else if ( m_noofBoundedSVs != 0 ) {
283  bof_xi = Batch<InputType>::createBatch( m_basis.element(m_boundedAlphaIndices[0]), 1 ); //any input works
284  bof_xj = Batch<InputType>::createBatch( m_basis.element(m_boundedAlphaIndices[0]), 1 ); //any input works
285  } else {
286  throw SHARKEXCEPTION("[CSvmDerivative::prepareCSvmParameterDerivative] Something went very wrong.");
287  }
288 
289 
290  // initialize H and dH
291  //H is the the matrix
292  //H = (K 1; 1 0)
293  // where the ones are row or column vectors and 0 is a scalar.
294  // K is the kernel matrix spanned by the free support vectors
295  RealMatrix H( m_noofFreeSVs + 1, m_noofFreeSVs + 1,0.0);
296  std::vector< RealMatrix > dH( m_nkp , RealMatrix(m_noofFreeSVs+1, m_noofFreeSVs+1));
297  for ( std::size_t i=0; i<m_noofFreeSVs; i++ ) {
298  getBatchElement(bof_xi, 0) = m_basis.element(m_freeAlphaIndices[i]); //fixed over outer loop
299  // fill the off-diagonal entries..
300  for ( std::size_t j=0; j<i; j++ ) {
301  getBatchElement(bof_xj, 0) = m_basis.element(m_freeAlphaIndices[j]); //get second sample into a batch
302  mep_k->eval( bof_xi, bof_xj, bof_results, *state );
303  H( i,j ) = H( j,i ) = bof_results(0,0);
304  mep_k->weightedParameterDerivative( bof_xi, bof_xj, unit_weights, *state, der );
305  for ( std::size_t k=0; k<m_nkp; k++ ) {
306  dH[k]( i,j ) = dH[k]( j,i ) = der(k);
307  }
308  }
309  // ..then fill the diagonal entries..
310  mep_k->eval( bof_xi, bof_xi, bof_results, *state );
311  H( i,i ) = bof_results(0,0);
312  H( i, m_noofFreeSVs) = H( m_noofFreeSVs, i) = 1.0;
313  mep_k->weightedParameterDerivative( bof_xi, bof_xi, unit_weights, *state, der );
314  for ( std::size_t k=0; k<m_nkp; k++ ) {
315  dH[k]( i,i ) = der(k);
316  }
317  // ..and finally the last row/column (pertaining to the offset parameter b)..
318  for (std::size_t k=0; k<m_nkp; k++)
319  dH[k]( m_noofFreeSVs, i ) = dH[k]( i, m_noofFreeSVs ) = 0.0;
320  }
321 
322  // ..the lower-right-most entry gets set separately:
323  for ( std::size_t k=0; k<m_nkp; k++ ) {
324  dH[k]( m_noofFreeSVs, m_noofFreeSVs ) = 0.0;
325  }
326 
327  // initialize R and dR
328  RealMatrix R( m_noofFreeSVs+1, m_noofBoundedSVs );
329  std::vector< RealMatrix > dR( m_nkp, RealMatrix(m_noofFreeSVs+1, m_noofBoundedSVs));
330  for ( std::size_t i=0; i<m_noofBoundedSVs; i++ ) {
331  getBatchElement(bof_xi, 0) = m_basis.element(m_boundedAlphaIndices[i]); //fixed over outer loop
332  for ( std::size_t j=0; j<m_noofFreeSVs; j++ ) { //this time, we (have to) do it row by row
333  getBatchElement(bof_xj, 0) = m_basis.element(m_freeAlphaIndices[j]); //get second sample into a batch
334  mep_k->eval( bof_xi, bof_xj, bof_results, *state );
335  R( j,i ) = bof_results(0,0);
336  mep_k->weightedParameterDerivative( bof_xi, bof_xj, unit_weights, *state, der );
337  for ( std::size_t k=0; k<m_nkp; k++ )
338  dR[k]( j,i ) = der(k);
339  }
340  R( m_noofFreeSVs, i ) = 1.0; //last row is for b
341  for ( std::size_t k=0; k<m_nkp; k++ )
342  dR[k]( m_noofFreeSVs, i ) = 0.0;
343  }
344 
345 
346  //O.K.: A big step of the computation of the derivative m_d_alphab_d_theta is
347  // the multiplication with H^{-1} B. (where B are the other terms).
348  // However instead of storing m_d_alphab_d_theta_i = -H^{-1}*b_i
349  //we store _i and compute the multiplication with the inverse
350  //afterwards by solving the system Hx_i = b_i
351  //for i = 1....m_nkp+1
352  //this is a lot faster and numerically more stable.
353 
354  // compute the derivative of (\alpha, b) w.r.t. C
355  if ( m_noofBoundedSVs > 0 ) {
356  noalias(column(m_d_alphab_d_theta,m_nkp)) = prod( R, m_boundedLabels);
357  }
358  // compute the derivative of (\alpha, b) w.r.t. the kernel parameters
359  for ( std::size_t k=0; k<m_nkp; k++ ) {
360  RealVector sum = prod( dH[k], m_freeAlphas ); //sum = dH * \alpha_f
361  if(m_noofBoundedSVs > 0)
362  noalias(sum) += prod( dR[k], m_boundedAlphas ); // sum += dR * \alpha_r , i.e., the C*y_g is expressed as alpha_g
363  //fill the remaining columns of the derivative matrix (except the last, which is for C)
364  noalias(column(m_d_alphab_d_theta,k)) = sum;
365  }
366 
367  //lastly solve the system Hx_i = b_i
368  // MAJOR STEP: this is the achilles heel of the current implementation, cf. keerthi 2007
369  // TODO: mtq: explore ways for speed-up..
370  //compute via moore penrose pseudo-inverse
371  noalias(m_d_alphab_d_theta) = - solveH(H,m_d_alphab_d_theta);
372 
373  // that's all, folks; we're done.
374  }
375 
376  RealMatrix solveH(RealMatrix const& H, RealMatrix const& rhs){
377  //implement using moore penrose pseudo inverse
378  RealMatrix HTH=prod(trans(H),H);
379  RealMatrix result = solve(HTH,prod(H,rhs),blas::symm_semi_pos_def(),blas::left());
380  return result;
381  }
382 };//class
383 
384 
385 }//namespace
386 #endif