Normalizer.h
Go to the documentation of this file.
1 /*!
2  *
3  *
4  * \brief Model for scaling and translation of data vectors.
5  *
6  *
7  *
8  * \author T. Glasmachers
9  * \date 2013
10  *
11  *
12  * \par Copyright 1995-2017 Shark Development Team
13  *
14  * <BR><HR>
15  * This file is part of Shark.
16  * <http://shark-ml.org/>
17  *
18  * Shark is free software: you can redistribute it and/or modify
19  * it under the terms of the GNU Lesser General Public License as published
20  * by the Free Software Foundation, either version 3 of the License, or
21  * (at your option) any later version.
22  *
23  * Shark is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26  * GNU Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public License
29  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
30  *
31  */
32 #ifndef SHARK_MODELS_NORMALIZER_H
33 #define SHARK_MODELS_NORMALIZER_H
34 
36 #include <shark/LinAlg/Base.h>
37 
38 
39 namespace shark {
40 
41 
42 ///
43 /// \brief "Diagonal" linear model for data normalization.
44 ///
45 /// \par
46 /// The Normalizer is a restricted and often more efficient variant of
47 /// the LinearModel class. It restricts the linear model in two respects:
48 /// (1) input and output dimension must agree,
49 /// (2) computations are independent for each component.
50 /// This is useful mostly for data normalization (therefore the name).
51 /// The model's operation is of the form \f$ x \mapsto A x + b \f$ where
52 /// A is a diagonal matrix. This reduces memory requirements to linear,
53 /// which is why there is no sparse version of this model (as opposed to
54 /// the more general linear model). Also, the addition of b is optional.
55 ///
56 template <class VectorType = RealVector>
57 class Normalizer : public AbstractModel<VectorType, VectorType>
58 {
59 public:
62 
65 
66  /// \brief Construction from dimension
67  Normalizer(std::size_t dimension = 0, bool hasOffset = false)
68  { setStructure(dimension,hasOffset);}
69 
70  /// \brief Construction from matrix and and optional offset vector
72  { setStructure(diagonal, offset);}
73 
74 
75  /// \brief From INameable: return the class name.
76  std::string name() const
77  { return "Normalizer"; }
78 
79  /// \brief derivative storage object (empty for this model)
80  boost::shared_ptr<State> createState() const{
81  return boost::shared_ptr<State>(new EmptyState());
82  }
83 
84  /// \brief check for the presence of an offset term
85  bool hasOffset() const{
86  return !m_b.empty();
87  }
88 
89  /// \brief obtain the input dimension
90  Shape inputShape() const{
91  return m_A.size();
92  }
93 
94  /// \brief obtain the output dimension
95  Shape outputShape() const{
96  return m_A.size();
97  }
98 
99  /// \brief return the diagonal of the matrix
100  VectorType const& diagonal() const{
101  return m_A;
102  }
103 
104  /// \brief return the offset vector
105  VectorType const& offset() const{
106  return m_b;
107  }
108 
109  /// \brief obtain the parameter vector
111  return m_A | m_b;
112  }
113 
114  /// \brief overwrite the parameter vector
115  void setParameterVector(VectorType const& newParameters){
116  SIZE_CHECK(newParameters.size() == numberOfParameters());
117  std::size_t dim = m_A.size();
118  noalias(m_A) = subrange(newParameters,0,dim);
119  noalias(m_b) = subrange(newParameters, dim, newParameters.size());
120  }
121 
122  /// \brief return the number of parameter
123  std::size_t numberOfParameters() const{
124  return m_A.size() + m_b.size();
125  }
126 
127  /// \brief overwrite structure and parameters
128  void setStructure(VectorType const& diagonal, VectorType const& offset = VectorType()){
129  m_A = diagonal;
130  m_b = offset;
131  }
132 
133  /// \brief overwrite structure and parameters
134  void setStructure(std::size_t dimension, bool hasOffset = false){
135  m_A.resize(dimension);
136  m_b.resize(hasOffset? dimension : 0);
137  }
138 
139  using base_type::eval;
140 
141  /// \brief Evaluate the model: output = matrix * input + offset.
142  void eval(BatchInputType const& input, BatchOutputType& output) const{
143  SIZE_CHECK(input.size2() == m_A.size());
144  output.resize(input.size1(), input.size2());
145  noalias(output) = input * repeat(m_A,input.size1());
146  if (hasOffset()){
147  noalias(output) += repeat(m_b,input.size1());
148  }
149  }
150 
151  /// \brief Evaluate the model: output = matrix * input + offset.
152  void eval(BatchInputType const& input, BatchOutputType& output, State& state) const{
153  eval(input, output);
154  }
155 
156  /// from ISerializable
157  void read(InArchive& archive){
158  archive & m_A;
159  archive & m_b;
160  }
161 
162  /// from ISerializable
163  void write(OutArchive& archive) const{
164  archive & m_A;
165  archive & m_b;
166  }
167 
168 protected:
169  VectorType m_A; ///< matrix A (see class documentation)
170  VectorType m_b; ///< vector b (see class documentation)
171 };
172 
173 
174 }
175 #endif