ExampleModifiedKernelMatrix.h
Go to the documentation of this file.
1 //===========================================================================
2 /*!
3  *
4  *
5  * \brief Kernel matrix which supports kernel evaluations on data with missing features.
6  *
7  *
8  * \par
9  *
10  *
11  *
12  * \author T. Glasmachers
13  * \date 2007-2012
14  *
15  *
16  * \par Copyright 1995-2017 Shark Development Team
17  *
18  * <BR><HR>
19  * This file is part of Shark.
20  * <http://shark-ml.org/>
21  *
22  * Shark is free software: you can redistribute it and/or modify
23  * it under the terms of the GNU Lesser General Public License as published
24  * by the Free Software Foundation, either version 3 of the License, or
25  * (at your option) any later version.
26  *
27  * Shark is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30  * GNU Lesser General Public License for more details.
31  *
32  * You should have received a copy of the GNU Lesser General Public License
33  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
34  *
35  */
36 //===========================================================================
37 
38 
39 #ifndef SHARK_LINALG_EXAMPLEMODIFIEDKERNELMATRIX_H
40 #define SHARK_LINALG_EXAMPLEMODIFIEDKERNELMATRIX_H
41 
42 #include <shark/Data/Dataset.h>
43 #include <shark/LinAlg/Base.h>
44 
45 #include <vector>
46 #include <cmath>
47 #include <algorithm>
48 
49 
50 namespace shark {
51 
52 
53 /// Kernel matrix which supports kernel evaluations on data with missing features. At the same time, the entry of the
54 /// Gram matrix between examples i and j can be multiplied by two scaling factors corresponding to
55 /// the examples i and j, respectively. To this end, this class holds a vector of as many scaling coefficients
56 /// as there are examples in the dataset.
57 /// @note: most of code in this class is borrowed from KernelMatrix by copy/paste, which is obviously terribly ugly.
58 /// We could/should refactor classes in this file as soon as possible.
59 template <typename InputType, typename CacheType>
61 {
62 public:
63  typedef CacheType QpFloatType;
64 
65  /// Constructor
66  /// \param kernelfunction kernel function defining the Gram matrix
67  /// \param data data to evaluate the kernel function
69  AbstractKernelFunction<InputType> const& kernelfunction,
70  Data<InputType> const& data)
71  : kernel(kernelfunction)
72  , m_accessCounter( 0 )
73  {
74  std::size_t elements = data.numberOfElements();
75  x.resize(elements);
76  std::iota(x.begin(),x.end(),data.elements().begin());
77  }
78 
79  /// return a single matrix entry
80  QpFloatType operator () (std::size_t i, std::size_t j) const
81  { return entry(i, j); }
82 
83  /// swap two variables
84  void flipColumnsAndRows(std::size_t i, std::size_t j)
85  { std::swap(x[i], x[j]); }
86 
87  /// return the size of the quadratic matrix
88  std::size_t size() const
89  { return x.size(); }
90 
91  /// query the kernel access counter
92  unsigned long long getAccessCount() const
93  { return m_accessCounter; }
94 
95  /// reset the kernel access counter
97  { m_accessCounter = 0; }
98 
99  /// return a single matrix entry
100  /// Override the Base::entry(...)
101  /// formula: \f$ K\left(x_i, x_j\right)\frac{1}{s_i}\frac{1}{s_j} \f$
102  QpFloatType entry(std::size_t i, std::size_t j) const
103  {
104  // typedef typename InputType::value_type InputValueType;
106  SIZE_CHECK(i < size());
107  SIZE_CHECK(j < size());
108 
109  return (QpFloatType)evalSkipMissingFeatures(
110  kernel,
111  *x[i],
112  *x[j]) * (1.0 / m_scalingCoefficients[i]) * (1.0 / m_scalingCoefficients[j]);
113  }
114 
115  /// \brief Computes the i-th row of the kernel matrix.
116  ///
117  ///The entries start,...,end of the i-th row are computed and stored in storage.
118  ///There must be enough room for this operation preallocated.
119  void row(std::size_t i, std::size_t start,std::size_t end, QpFloatType* storage) const{
120  for(std::size_t j = start; j < end; j++){
121  storage[j-start] = entry(i,j);
122  }
123  }
124 
125  /// \brief Computes the kernel-matrix
126  template<class M>
127  void matrix(
128  blas::matrix_expression<M, blas::cpu_tag> & storage
129  ) const{
130  for(std::size_t i = 0; i != size(); ++i){
131  for(std::size_t j = 0; j != size(); ++j){
132  storage(i,j) = entry(i,j);
133  }
134  }
135  }
136 
137  void setScalingCoefficients(const RealVector& scalingCoefficients)
138  {
139  SIZE_CHECK(scalingCoefficients.size() == size());
140  m_scalingCoefficients = scalingCoefficients;
141  }
142 
143 protected:
144 
145  /// Kernel function defining the kernel Gram matrix
147 
149  /// Array of data pointers for kernel evaluations
150  std::vector<PointerType> x;
151  /// counter for the kernel accesses
152  mutable unsigned long long m_accessCounter;
153 
154 private:
155 
156  /// The scaling coefficients
157  RealVector m_scalingCoefficients;
158 };
159 
160 }
161 #endif