KernelTargetAlignment.h
Go to the documentation of this file.
1 /*!
2  *
3  *
4  * \brief Kernel Target Alignment - a measure of alignment of a kernel Gram matrix with labels.
5  *
6  *
7  *
8  * \author T. Glasmachers, O.Krause
9  * \date 2010-2012
10  *
11  *
12  * \par Copyright 1995-2017 Shark Development Team
13  *
14  * <BR><HR>
15  * This file is part of Shark.
16  * <http://shark-ml.org/>
17  *
18  * Shark is free software: you can redistribute it and/or modify
19  * it under the terms of the GNU Lesser General Public License as published
20  * by the Free Software Foundation, either version 3 of the License, or
21  * (at your option) any later version.
22  *
23  * Shark is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26  * GNU Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public License
29  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
30  *
31  */
32 #ifndef SHARK_OBJECTIVEFUNCTIONS_KERNELTARGETALIGNMENT_H
33 #define SHARK_OBJECTIVEFUNCTIONS_KERNELTARGETALIGNMENT_H
34 
36 #include <shark/Data/Dataset.h>
37 #include <shark/Data/Statistics.h>
39 
40 
41 namespace shark{
42 
43 /*!
44  * \brief Kernel Target Alignment - a measure of alignment of a kernel Gram matrix with labels.
45  *
46  * \par
47  * The Kernel Target Alignment (KTA) was originally proposed in the paper:<br/>
48  * <i>On Kernel-Target Alignment</i>. N. Cristianini, J. Shawe-Taylor,
49  * A. Elisseeff, J. Kandola. Innovations in Machine Learning, 2006.<br/>
50  * Here we provide a version with centering of the features as proposed
51  * in the paper:<br/>
52  * <i>Two-Stage Learning Kernel Algorithms</i>. C. Cortes, M. Mohri,
53  * A. Rostamizadeh. ICML 2010.<br/>
54  *
55  * \par
56  * The kernel target alignment is defined as
57  * \f[ \hat A = \frac{\langle K, y y^T \rangle}{\sqrt{\langle K, K \rangle \cdot \langle y y^T, y y^T \rangle}} \f]
58  * where K is the kernel Gram matrix of the data and y is the vector of
59  * +1/-1 valued labels. The outer product \f$ y y^T \f$ corresponds to
60  * an &quot;ideal&quot; Gram matrix corresponding to a kernel that maps
61  * the two classes each to a single point, thus minimizing within-class
62  * distance for fixed inter-class distance. The inner products denote the
63  * Frobenius product of matrices:
64  * http://en.wikipedia.org/wiki/Matrix_multiplication#Frobenius_product
65  *
66  * \par
67  * In kernel-based learning, the kernel Gram matrix K is of the form
68  * \f[ K_{i,j} = k(x_i, x_j) = \langle \phi(x_i), \phi(x_j) \rangle \f]
69  * for a Mercer kernel function k and inputs \f$ x_i, x_j \f$. In this
70  * version of the KTA we use centered feature vectors. Let
71  * \f[ \psi(x_i) = \phi(x_i) - \frac{1}{\ell} \sum_{j=1}^{\ell} \phi(x_j) \f]
72  * denote the centered feature vectors, then the centered Gram matrix
73  * \f$ K^c \f$ is given by
74  * \f[ K^c_{i,j} = \langle \psi(x_i), \psi(x_j) \rangle = K_{i,j} - \frac{1}{\ell} \sum_{n=1}^\ell K_{i,n} + K_{j,n} + \frac{1}{\ell^2} \sum_{m,n=1}^\ell K_{n,m} \f]
75  * The alignment measure computed by this class is the exact same formula
76  * for \f$ \hat A \f$, but with \f$ K^c \f$ plugged in in place of $\f$ K \f$.
77  *
78  * \par
79  * KTA measures the Frobenius inner product between a kernel Gram matrix
80  * and this ideal matrix. The interpretation is that KTA measures how
81  * well a given kernel fits a classification problem. The actual measure
82  * is invariant under kernel rescaling.
83  * In Shark, objective functions are minimized by convention. Therefore
84  * the negative alignment \f$ - \hat A \f$ is implemented. The measure is
85  * extended for multi-class problems by using prototype vectors instead
86  * of scalar labels.
87  *
88  * \par
89  * The following properties of KTA are important from a model selection
90  * point of view: it is relatively fast and easy to compute, it is
91  * differentiable w.r.t. the kernel function, and it is independent of
92  * the actual classifier.
93  *
94  * \par
95  * The following notation is used in several of the methods of the class.
96  * \f$ K^c \f$ denotes the centered Gram matrix, y is the vector of labels,
97  * Y is the outer product of this vector with itself, k is the row
98  * (or column) wise average of the uncentered Gram matrix K, my is the
99  * label average, and u is the vector of all ones, and \f$ \ell \f$ is the
100  * number of data points, and thus the size of the Gram matrix.
101  */
102 template<class InputType = RealVector,class LabelType = unsigned int>
104 {
105 private:
106  typedef typename Batch<LabelType>::type BatchLabelType;
107 public:
108  /// \brief Construction of the Kernel Target Alignment (KTA) from a kernel object.
110  LabeledData<InputType, LabelType> const& dataset,
112  bool centering = true
113  ){
114  SHARK_RUNTIME_CHECK(kernel != NULL, "[KernelTargetAlignment] kernel must not be NULL");
115 
116  mep_kernel = kernel;
117 
120 
121  if(mep_kernel -> hasFirstParameterDerivative())
123 
124  m_data = dataset;
125  m_elements = dataset.numberOfElements();
126  m_centering = centering;
127 
128  setupY(dataset.labels(), centering);
129  }
130 
131  /// \brief From INameable: return the class name.
132  std::string name() const
133  { return "KernelTargetAlignment"; }
134 
135  /// Return the current kernel parameters as a starting point for an optimization run.
137  return mep_kernel -> parameterVector();
138  }
139 
140 
141  std::size_t numberOfVariables()const{
142  return mep_kernel->numberOfParameters();
143  }
144 
145  /// \brief Evaluate the (centered, negative) Kernel Target Alignment (KTA).
146  ///
147  /// See the class description for more details on this computation.
148  double eval(RealVector const& input) const{
149  mep_kernel->setParameterVector(input);
150 
151  return -evaluateKernelMatrix().error;
152  }
153 
154  /// \brief Compute the derivative of the KTA as a function of the kernel parameters.
155  ///
156  /// It holds:
157  /// \f[ \langle K^c, K^c \rangle = \langle K,K \rangle -2 \ell \langle k,k \rangle + mk^2 \ell^2 \\
158  /// (\langle K^c, K^c \rangle )' = 2 \langle K,K' \rangle -4\ell \langle k, \frac{1}{\ell} \sum_j K'_ij \rangle +2 \ell^2 mk \sum_ij 1/(\ell^2) K'_ij \\
159  /// = 2 \langle K,K' \rangle -4 \langle k, \sum_j K'_ij \rangle + 2 mk \sum_ij K_ij \\
160  /// = 2 \langle K,K' \rangle -4 \langle k u^T, K' \rangle + 2 mk \langle u u^T, K' \rangle \\
161  /// = 2\langle K -2 k u^T + mk u u^T, K' \rangle ) \\
162  /// \langle Y, K^c \rangle = \langle Y, K \rangle - 2 n \langle y, k \rangle + n^2 my mk \\
163  /// (\langle Y , K^c \rangle )' = \langle Y -2 y u^T + my u u^T, K' \rangle \f]
164  /// now the derivative is computed from this values in a second sweep over the data:
165  /// we get:
166  /// \f[ \hat A' = 1/\langle K^c,K^c \rangle ^{3/2} (\langle K^c,K^c \rangle (\langle Y,K^c \rangle )' - 0.5*\langle Y, K^c \rangle (\langle K^c , K^c \rangle )') \\
167  /// = 1/\langle K^c,K^c \rangle ^{3/2} \langle \langle K^c,K^c \rangle (Y -2 y u^T + my u u^T)- \langle Y, K^c \rangle (K -2 k u^T+ mk u u^T),K' \rangle \\
168  /// = 1/\langle K^c,K^c \rangle ^{3/2} \langle W,K' \rangle \f]
169  ///reordering rsults in
170  /// \f[ W= \langle K^c,K^c \rangle Y - \langle Y, K^c \rangle K \\
171  /// - 2 (\langle K^c,K^c \rangle y - \langle Y, K^c \rangle k) u^T \\
172  /// + (\langle K^c,K^c \rangle my - \langle Y, K^c \rangle mk) u u^T \f]
173  /// where \f$ K' \f$ is the derivative of K with respct of the kernel parameters.
174  ResultType evalDerivative( const SearchPointType & input, FirstOrderDerivative & derivative ) const {
175  mep_kernel->setParameterVector(input);
176  // the drivative is calculated in two sweeps of the data. first the required statistics
177  // \langle K^c,K^c \rangle , mk and k are evaluated exactly as in eval
178 
179  KernelMatrixResults results = evaluateKernelMatrix();
180 
181  std::size_t parameters = mep_kernel->numberOfParameters();
182  derivative.resize(parameters);
183  derivative.clear();
184  SHARK_PARALLEL_FOR(int i = 0; i < (int)m_data.numberOfBatches(); ++i){
185  std::size_t startX = 0;
186  for(int j = 0; j != i; ++j){
187  startX+= batchSize(m_data.batch(j));
188  }
189  RealVector threadDerivative(parameters,0.0);
190  RealVector blockDerivative;
191  boost::shared_ptr<State> state = mep_kernel->createState();
192  RealMatrix blockK;//block of the KernelMatrix
193  RealMatrix blockW;//block of the WeightMatrix
194  std::size_t startY = 0;
195  for(int j = 0; j <= i; ++j){
196  mep_kernel->eval(m_data.batch(i).input,m_data.batch(j).input,blockK,*state);
197  mep_kernel->weightedParameterDerivative(
198  m_data.batch(i).input,m_data.batch(j).input,
199  generateDerivativeWeightBlock(i,j,startX,startY,blockK,results),//takes symmetry into account
200  *state,
201  blockDerivative
202  );
203  noalias(threadDerivative) += blockDerivative;
204  startY += batchSize(m_data.batch(j));
205  }
207  noalias(derivative) += threadDerivative;
208  }
209  }
210  derivative *= -1;
211  derivative /= m_elements;
212  return -results.error;
213  }
214 
215 private:
216  AbstractKernelFunction<InputType>* mep_kernel; ///< kernel function
217  LabeledData<InputType,LabelType> m_data; ///< data points
218  RealVector m_columnMeanY; ///< mean label vector
219  double m_meanY; ///< mean label element
220  std::size_t m_numberOfClasses; ///< number of classes
221  std::size_t m_elements; ///< number of data points
222  bool m_centering;
223 
224  struct KernelMatrixResults{
225  RealVector k;
226  double KcKc;
227  double YcKc;
228  double error;
229  double meanK;
230  };
231 
232  void setupY(Data<unsigned int>const& labels, bool centering){
233  //preprocess Y so calculate column means and overall mean
234  //the most efficient way to do this is via the class counts
235  std::vector<std::size_t> classCount = classSizes(labels);
236  m_numberOfClasses = classCount.size();
237  RealVector classMean(m_numberOfClasses);
238  double dm1 = m_numberOfClasses-1.0;
239  m_meanY = 0;
240  for(std::size_t i = 0; i != m_numberOfClasses; ++i){
241  classMean(i) = classCount[i]-(m_elements-classCount[i])/dm1;
242  m_meanY += classCount[i] * classMean(i);
243  }
244  classMean /= m_elements;
245  m_meanY /= sqr(double(m_elements));
246  m_columnMeanY.resize(m_elements);
247  for(std::size_t i = 0; i != m_elements; ++i){
248  m_columnMeanY(i) = classMean(labels.element(i));
249  }
250  if(!centering){
251  m_meanY = 0;
252  m_columnMeanY.clear();
253  }
254  }
255 
256  void setupY(Data<RealVector>const& labels, bool centering){
257  RealVector meanLabel = mean(labels);
258  m_columnMeanY.resize(m_elements);
259  for(std::size_t i = 0; i != m_elements; ++i){
260  m_columnMeanY(i) = inner_prod(labels.element(i),meanLabel);
261  }
262  m_meanY=inner_prod(meanLabel,meanLabel);
263  if(!centering){
264  m_meanY = 0;
265  m_columnMeanY.clear();
266  }
267  }
268  void computeBlockY(UIntVector const& labelsi,UIntVector const& labelsj, RealMatrix& blockY)const{
269  std::size_t blockSize1 = labelsi.size();
270  std::size_t blockSize2 = labelsj.size();
271  double dm1 = m_numberOfClasses-1.0;
272  for(std::size_t k = 0; k != blockSize1; ++k){
273  for(std::size_t l = 0; l != blockSize2; ++l){
274  if( labelsi(k) == labelsj(l))
275  blockY(k,l) = 1;
276  else
277  blockY(k,l) = -1.0/dm1;
278  }
279  }
280  }
281 
282  void computeBlockY(RealMatrix const& labelsi,RealMatrix const& labelsj, RealMatrix& blockY)const{
283  noalias(blockY) = labelsi % trans(labelsj);
284  }
285 
286  /// Update a sub-block of the matrix \f$ \langle Y, K^x \rangle \f$.
287  double updateYK(UIntVector const& labelsi,UIntVector const& labelsj, RealMatrix const& block)const{
288  std::size_t blockSize1 = labelsi.size();
289  std::size_t blockSize2 = labelsj.size();
290  //todo optimize the i=j case
291  double result = 0;
292  double dm1 = m_numberOfClasses-1.0;
293  for(std::size_t k = 0; k != blockSize1; ++k){
294  for(std::size_t l = 0; l != blockSize2; ++l){
295  if(labelsi(k) == labelsj(l))
296  result += block(k,l);
297  else
298  result -= block(k,l)/dm1;
299  }
300  }
301  return result;
302  }
303 
304  /// Update a sub-block of the matrix \f$ \langle Y, K^x \rangle \f$.
305  double updateYK(RealMatrix const& labelsi,RealMatrix const& labelsj, RealMatrix const& block)const{
306  RealMatrix Y(labelsi.size1(), labelsj.size1());
307  computeBlockY(labelsi,labelsj,Y);
308  return sum(Y * block);
309  }
310 
311  /// Compute a sub-block of the matrix
312  /// \f[ W = \langle K^c, K^c \rangle Y - \langle Y, K^c \rangle K -2 (\langle K^c, K^c \rangle y - \langle Y, K^c \rangle k) u^T + (\langle K^c, K^c \rangle my - \langle Y, K^c \rangle mk) u u^T \f]
313  RealMatrix generateDerivativeWeightBlock(
314  std::size_t i, std::size_t j,
315  std::size_t start1, std::size_t start2,
316  RealMatrix const& blockK,
317  KernelMatrixResults const& matrixStatistics
318  )const{
319  std::size_t blockSize1 = batchSize(m_data.batch(i));
320  std::size_t blockSize2 = batchSize(m_data.batch(j));
321  //double n = m_elements;
322  double KcKc = matrixStatistics.KcKc;
323  double YcKc = matrixStatistics.YcKc;
324  double meanK = matrixStatistics.meanK;
325  RealMatrix blockW(blockSize1,blockSize2);
326 
327  //first calculate \langle Kc,Kc \rangle Y.
328  computeBlockY(m_data.batch(i).label,m_data.batch(j).label,blockW);
329  blockW *= KcKc;
330  //- \langle Y,K^c \rangle K
331  blockW-=YcKc*blockK;
332  // -2(\langle Kc,Kc \rangle y -\langle Y, K^c \rangle k) u^T
333  // implmented as: -(\langle K^c,K^c \rangle y -2\langle Y, K^c \rangle k) u^T -u^T(\langle K^c,K^c \rangle y -2\langle Y, K^c \rangle k)^T
334  //todo find out why this is correct and the calculation above is not.
335  blockW-=repeat(subrange(KcKc*m_columnMeanY - YcKc*matrixStatistics.k,start2,start2+blockSize2),blockSize1);
336  blockW-=trans(repeat(subrange(KcKc*m_columnMeanY - YcKc*matrixStatistics.k,start1,start1+blockSize1),blockSize2));
337  // + (\langle Kc,Kc \rangle my-2\langle Y, Kc \rangle mk) u u^T
338  blockW+= KcKc*m_meanY-YcKc*meanK;
339  blockW /= KcKc*std::sqrt(KcKc);
340  //symmetry
341  if(i != j)
342  blockW *= 2.0;
343  return blockW;
344  }
345 
346  /// \brief Evaluate the centered kernel Gram matrix.
347  ///
348  /// The computation is as follows. By means of a
349  /// number of identities it holds
350  /// \f[ \langle K^c, K^c \rangle = \\
351  /// \langle K^c, K^c \rangle = \langle K,K \rangle -2n\langle k,k \rangle +mk^2n^2 \\
352  /// \langle K^c, Y \rangle = \langle K, Y \rangle - 2 n \langle k, y \rangle + n^2 mk my \f]
353  /// where k is the row mean over K and y the row mean over y, mk, my the total means of K and Y
354  /// and n the number of elements
355  KernelMatrixResults evaluateKernelMatrix()const{
356  //it holds
357  // \langle K^c,K^c \rangle = \langle K,K \rangle -2n\langle k,k \rangle +mk^2n^2
358  // \langle K^c,Y \rangle = \langle K, Y \rangle - 2 n \langle k, y \rangle + n^2 mk my
359  // where k is the row mean over K and y the row mean over y, mk, my the total means of K and Y
360  // and n the number of elements
361 
362  double KK = 0; //stores \langle K,K \rangle
363  double YK = 0; //stores \langle Y,K^c \rangle
364  RealVector k(m_elements,0.0);//stores the row/column means of K
365  SHARK_PARALLEL_FOR(int i = 0; i < (int)m_data.numberOfBatches(); ++i){
366  std::size_t startRow = 0;
367  for(int j = 0; j != i; ++j){
368  startRow+= batchSize(m_data.batch(j));
369  }
370  std::size_t rowSize = batchSize(m_data.batch(i));
371  double threadKK = 0;
372  double threadYK = 0;
373  RealVector threadk(m_elements,0.0);
374  std::size_t startColumn = 0; //starting column of the current block
375  for(int j = 0; j <= i; ++j){
376  std::size_t columnSize = batchSize(m_data.batch(j));
377  RealMatrix blockK = (*mep_kernel)(m_data.batch(i).input,m_data.batch(j).input);
378  if(i == j){
379  threadKK += frobenius_prod(blockK,blockK);
380  subrange(threadk,startColumn,startColumn+columnSize)+=sum_rows(blockK);//update sum_rows(K)
381  threadYK += updateYK(m_data.batch(i).label,m_data.batch(j).label,blockK);
382  }
383  else{//use symmetry ok K
384  threadKK += 2.0 * frobenius_prod(blockK,blockK);
385  subrange(threadk,startColumn,startColumn+columnSize)+=sum_rows(blockK);
386  subrange(threadk,startRow,startRow+rowSize)+=sum_columns(blockK);//symmetry: block(j,i)
387  threadYK += 2.0 * updateYK(m_data.batch(i).label,m_data.batch(j).label,blockK);
388  }
389  startColumn+=columnSize;
390  }
392  KK += threadKK;
393  YK +=threadYK;
394  noalias(k) +=threadk;
395  }
396  }
397  //calculate the error
398  double n = (double)m_elements;
399  k /= n;//means
400  double meanK = sum(k)/n;
401  if(!m_centering){
402  k.clear();
403  meanK = 0;
404  }
405  double n2 = sqr(n);
406  double YcKc = YK-2.0*n*inner_prod(k,m_columnMeanY)+n2*m_meanY*meanK;
407  double KcKc = KK - 2.0*n*inner_prod(k,k)+n2*sqr(meanK);
408 
409  KernelMatrixResults results;
410  results.k=k;
411  results.YcKc = YcKc;
412  results.KcKc = KcKc;
413  results.meanK = meanK;
414  results.error = YcKc/std::sqrt(KcKc)/n;
415  return results;
416  }
417 };
418 
419 
420 }
421 #endif