Shark machine learning library
About Shark
News!
Contribute
Credits and copyright
Downloads
Getting Started
Installation
Using the docs
Documentation
Tutorials
Quick references
Class list
Global functions
FAQ
Showroom
include
shark
LinAlg
BLAS
kernels
potrf.hpp
Go to the documentation of this file.
1
/*!
2
*
3
*
4
* \brief Dispatches the POTRF algorithm
5
*
6
* \author O. Krause
7
* \date 2012
8
*
9
*
10
* \par Copyright 1995-2014 Shark Development Team
11
*
12
* <BR><HR>
13
* This file is part of Shark.
14
* <http://image.diku.dk/shark/>
15
*
16
* Shark is free software: you can redistribute it and/or modify
17
* it under the terms of the GNU Lesser General Public License as published
18
* by the Free Software Foundation, either version 3 of the License, or
19
* (at your option) any later version.
20
*
21
* Shark is distributed in the hope that it will be useful,
22
* but WITHOUT ANY WARRANTY; without even the implied warranty of
23
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24
* GNU Lesser General Public License for more details.
25
*
26
* You should have received a copy of the GNU Lesser General Public License
27
* along with Shark. If not, see <http://www.gnu.org/licenses/>.
28
*
29
*/
30
31
#ifndef REMORA_KERNELS_POTRF_HPP
32
#define REMORA_KERNELS_POTRF_HPP
33
34
#ifdef REMORA_USE_ATLAS_LAPACK
35
#include "
atlas/potrf.hpp
"
36
#else
37
// if no bindings are included, we have to provide the default has_optimized_gemv
38
// otherwise the binding will take care of this
39
namespace
remora
{
namespace
bindings {
40
template
<
class
M>
41
struct
has_optimized_potrf
42
:
public
std::false_type {};
43
}}
44
#endif
45
46
#include "
default/potrf.hpp
"
47
48
namespace
remora
{
namespace
kernels {
49
50
///\brief Implements the POsitive TRiangular matrix Factorisation POTRF.
51
///
52
/// It is better known as the cholesky decomposition for dense matrices.
53
/// The algorithm works in place and does not require additional memory.
54
template
<
class
Triangular,
typename
MatA>
55
std::size_t potrf(
56
matrix_container<MatA, cpu_tag>& A
57
) {
58
REMORA_SIZE_CHECK(A().size1() == A().size2());
59
return
bindings::potrf<Triangular>(A,
typename
bindings::has_optimized_potrf<MatA>::type());
60
}
61
62
}}
63
#endif