36 #ifndef SHARK_OBJECTIVEFUNCTIONS_NEGATIVEGAUSSIANPROCESSEVIDENCE_H 37 #define SHARK_OBJECTIVEFUNCTIONS_NEGATIVEGAUSSIANPROCESSEVIDENCE_H 65 template<
class InputType = RealVector,
class OutputType = RealVector,
class LabelType = RealVector>
76 DatasetType
const& dataset,
78 bool unconstrained =
false 81 , m_unconstrained(unconstrained)
89 {
return "NegativeGaussianProcessEvidence"; }
97 double eval(
const RealVector& parameters)
const {
107 double betaInv = parameters.back();
109 betaInv = std::exp(betaInv);
115 RealVector t = column(createBatch<RealVector>(m_dataset.
labels().
elements()),0);
117 blas::cholesky_decomposition<RealMatrix> cholesky(M);
121 double logDet = 2* trace(log(cholesky.lower_factor()));
127 RealVector z = solve(cholesky.lower_factor(),t,blas::lower(), blas::left());
131 double e = 0.5 * (-logDet - norm_sqr(z) - N * std::log(2.0 * M_PI));
149 derivative.resize(1 + kp);
155 double betaInv = parameters.back();
157 betaInv = std::exp(betaInv);
162 RealVector t = column(createBatch<RealVector>(m_dataset.
labels().
elements()),0);
165 blas::cholesky_decomposition<RealMatrix> cholesky(M);
181 RealMatrix W= blas::identity_matrix<double>(N);
182 cholesky.solve(W,blas::left());
185 RealVector z = prod(W,t);
190 noalias(W) += outer_prod(z,z);
199 double betaInvDerivative = 0.5 * trace(W) ;
201 betaInvDerivative *= betaInv;
204 noalias(derivative) = - (kernelGradient | betaInvDerivative);
207 for(std::size_t i=0; i<derivative.size(); i++)
208 if(std::abs(derivative(i)) < m_derivativeThresholds(i)) derivative(i) = 0;
212 double logDetM = 2* trace(log(cholesky.lower_factor()));
213 double e = 0.5 * (-logDetM - inner_prod(t, z) - N * std::log(2.0 * M_PI));
224 SHARK_ASSERT(m_derivativeThresholds.size() == c.size());
225 m_derivativeThresholds = c;
231 DatasetType m_dataset;
234 RealVector m_derivativeThresholds;
237 KernelType* mep_kernel;
242 bool m_unconstrained;