28 #ifndef REMORA_MATRIX_SPARSE_HPP 29 #define REMORA_MATRIX_SPARSE_HPP 32 #include "detail/matrix_proxy_classes.hpp" 35 #include <boost/serialization/vector.hpp> 39 template<
class T,
class I=std::
size_t>
40 class compressed_matrix:
public matrix_container<compressed_matrix<T, I>, cpu_tag > {
41 typedef compressed_matrix<T, I> self_type;
47 typedef T
const& const_reference;
50 const_reference value()
const {
51 return const_cast<self_type const&
>(m_matrix)(m_i,m_j);
53 value_type& ref()
const {
55 size_type
const *start = m_matrix.m_indices.data() + m_matrix.m_rowStart[m_i];
56 size_type
const *end = m_matrix.m_indices.data() + m_matrix.m_rowEnd[m_i];
58 size_type
const *pos = std::lower_bound(start,end,m_j);
60 if (pos != end && *pos == m_j)
61 return m_matrix.m_values[(pos-start)+m_matrix.m_rowStart[m_i]];
65 m_matrix.m_values.data(),
66 m_matrix.m_indices.data(),
67 pos-start + m_matrix.m_rowStart[m_i]
70 return *m_matrix.set_element(posIter, m_j, m_matrix.m_zero);
76 reference(compressed_matrix &m, size_type i, size_type j):
77 m_matrix(m), m_i(i), m_j(j) {}
80 value_type& operator = (value_type d)
const {
83 value_type& operator=(reference
const & other){
84 return ref() = other.value();
86 value_type& operator += (value_type d)
const {
89 value_type& operator -= (value_type d)
const {
92 value_type& operator *= (value_type d)
const {
95 value_type& operator /= (value_type d)
const {
99 operator const_reference()
const {
103 compressed_matrix& m_matrix;
108 typedef matrix_reference<self_type const> const_closure_type;
109 typedef matrix_reference<self_type> closure_type;
110 typedef sparse_matrix_storage<T,I> storage_type;
111 typedef sparse_matrix_storage<value_type const,size_type const> const_storage_type;
112 typedef elementwise<sparse_tag> evaluation_category;
113 typedef row_major orientation;
117 : m_size1(0), m_size2(0), m_nnz(0)
118 , m_rowStart(1,0), m_indices(0), m_values(0), m_zero(0) {}
120 compressed_matrix(size_type size1, size_type size2, size_type non_zeros = 0)
121 : m_size1(size1), m_size2(size2), m_nnz(0)
122 , m_rowStart(size1 + 1,0)
124 , m_indices(non_zeros), m_values(non_zeros), m_zero(0) {}
127 compressed_matrix(matrix_expression<E, cpu_tag>
const& e, size_type non_zeros = 0)
128 : m_size1(e().size1()), m_size2(e().size2()), m_nnz(0)
129 , m_rowStart(e().size1() + 1, 0)
130 , m_rowEnd(e().size1(), 0)
131 , m_indices(non_zeros), m_values(non_zeros), m_zero(0) {
136 size_type size1()
const {
139 size_type size2()
const {
143 std::size_t nnz_capacity()
const {
144 return m_values.size();
146 std::size_t row_capacity(size_type row)
const {
147 REMORA_RANGE_CHECK(row < size1());
148 return m_rowStart[row+1] - m_rowStart[row];
150 std::size_t nnz()
const {
153 std::size_t inner_nnz(size_type row)
const {
154 return m_rowEnd[row] - m_rowStart[row];
158 storage_type raw_storage(){
159 return {m_values.data(),m_indices.data(), m_rowStart.data(), m_rowEnd.data()};
163 const_storage_type raw_storage()
const{
164 return {m_values.data(),m_indices.data(), m_rowStart.data(), m_rowEnd.data()};
167 typename device_traits<cpu_tag>::queue_type& queue(){
168 return device_traits<cpu_tag>::default_queue();
171 void set_filled(std::size_t non_zeros) {
175 void set_row_filled(size_type i,std::size_t non_zeros) {
176 REMORA_SIZE_CHECK(i < size1());
177 REMORA_SIZE_CHECK(non_zeros <=row_capacity(i));
179 m_rowEnd[i] = m_rowStart[i]+non_zeros;
182 m_rowStart[size1()] = m_rowEnd[i];
185 void resize(size_type size1, size_type size2) {
190 m_rowStart.resize(m_size1 + 1);
191 m_rowEnd.resize(m_size1);
192 std::fill(m_rowStart.begin(),m_rowStart.end(),0);
193 std::fill(m_rowEnd.begin(),m_rowEnd.end(),0);
195 void reserve(std::size_t non_zeros) {
196 if (non_zeros < nnz_capacity())
return;
198 m_indices.resize(non_zeros);
199 m_values.resize(non_zeros);
202 void reserve_row(size_type row, std::size_t non_zeros) {
203 REMORA_RANGE_CHECK(row < size1());
204 non_zeros = std::min(m_size2,non_zeros);
205 if (non_zeros <= row_capacity(row))
return;
206 std::size_t spaceDifference = non_zeros - row_capacity(row);
209 if (spaceDifference > nnz_capacity()-m_rowStart.back()) {
210 reserve(nnz_capacity()+std::max<std::size_t>(nnz_capacity(),2*spaceDifference));
213 for (size_type i = size1()-1; i != row; --i) {
214 value_type* values = m_values.data() + m_rowStart[i];
215 value_type* valueRowEnd = m_values.data() + m_rowEnd[i];
216 size_type* indices = m_indices.data() + m_rowStart[i];
217 size_type* indicesEnd = m_indices.data() + m_rowEnd[i];
218 std::copy_backward(values,valueRowEnd, valueRowEnd+spaceDifference);
219 std::copy_backward(indices,indicesEnd, indicesEnd+spaceDifference);
220 m_rowStart[i]+=spaceDifference;
221 m_rowEnd[i]+=spaceDifference;
223 m_rowStart.back() +=spaceDifference;
224 REMORA_SIZE_CHECK(row_capacity(row) == non_zeros);
233 const_reference operator()(size_type i, size_type j)
const {
234 REMORA_SIZE_CHECK(i < size1());
235 REMORA_SIZE_CHECK(j < size2());
237 size_type
const *start = m_indices.data() + m_rowStart[i];
238 size_type
const *end = m_indices.data() + m_rowEnd[i];
240 size_type
const *pos = std::lower_bound(start,end,j);
242 if (pos != end && *pos == j)
243 return m_values[(pos-start)+m_rowStart[i]];
248 reference operator()(size_type i, size_type j) {
249 REMORA_SIZE_CHECK(i < size1());
250 REMORA_SIZE_CHECK(j < size2());
251 return reference(*
this,i,j);
256 compressed_matrix &operator = (matrix_container<C, cpu_tag>
const& m) {
257 resize(m().size1(), m().size2());
262 compressed_matrix &operator = (matrix_expression<E, cpu_tag>
const& e) {
263 self_type temporary(e, nnz_capacity());
269 void swap(compressed_matrix &m) {
273 m_rowStart.swap(m.m_rowStart);
274 m_rowEnd.swap(m.m_rowEnd);
275 m_indices.swap(m.m_indices);
276 m_values.swap(m.m_values);
279 friend void swap(compressed_matrix &m1, compressed_matrix &m2) {
283 friend void swap_rows(compressed_matrix& a, size_type i, compressed_matrix& b, size_type j) {
284 REMORA_SIZE_CHECK(i < a.size1());
285 REMORA_SIZE_CHECK(j < b.size1());
286 REMORA_SIZE_CHECK(a.size2() == b.size2());
289 if(a.inner_nnz(i) < b.inner_nnz(j)){
294 std::size_t nnzi = a.inner_nnz(i);
295 std::size_t nnzj = b.inner_nnz(j);
298 b.reserve_row(j,nnzi);
299 REMORA_SIZE_CHECK(b.row_capacity(j) >= nnzi);
300 REMORA_SIZE_CHECK(a.row_capacity(i) >= nnzj);
302 size_type* indicesi = a.m_indices.data() + a.m_rowStart[i];
303 size_type* indicesj = b.m_indices.data() + b.m_rowStart[j];
304 value_type* valuesi = a.m_values.data() + a.m_rowStart[i];
305 value_type* valuesj = b.m_values.data() + b.m_rowStart[j];
308 std::swap_ranges(indicesi,indicesi+nnzi,indicesj);
309 std::swap_ranges(valuesi, valuesi+nnzi,valuesj);
316 a.set_row_filled(i,nnzj);
317 b.set_row_filled(j,nnzi);
320 friend void swap_rows(compressed_matrix& a, size_type i, size_type j) {
325 typedef iterators::compressed_storage_iterator<value_type const, size_type const> const_row_iterator;
326 typedef iterators::compressed_storage_iterator<value_type, size_type const> row_iterator;
328 const_row_iterator row_begin(size_type i)
const {
329 REMORA_SIZE_CHECK(i < size1());
330 REMORA_RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);
331 return const_row_iterator(m_values.data(), m_indices.data(), m_rowStart[i],i);
334 const_row_iterator row_end(size_type i)
const {
335 REMORA_SIZE_CHECK(i < size1());
336 REMORA_RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);
337 return const_row_iterator(m_values.data(), m_indices.data(), m_rowEnd[i],i);
340 row_iterator row_begin(size_type i) {
341 REMORA_SIZE_CHECK(i < size1());
342 REMORA_RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);
343 return row_iterator(m_values.data(), m_indices.data(), m_rowStart[i],i);
346 row_iterator row_end(size_type i) {
347 REMORA_SIZE_CHECK(i < size1());
348 REMORA_RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);
349 return row_iterator(m_values.data(), m_indices.data(), m_rowEnd[i],i);
352 typedef iterators::compressed_storage_iterator<value_type const, size_type const> const_column_iterator;
353 typedef iterators::compressed_storage_iterator<value_type, size_type const> column_iterator;
355 row_iterator set_element(row_iterator pos, size_type index, value_type value) {
356 std::size_t row = pos.row();
357 REMORA_RANGE_CHECK(row < size1());
358 REMORA_RANGE_CHECK(size_type(row_end(row) - pos) <= row_capacity(row));
362 if (pos != row_end(row) && pos.index() == index) {
368 std::ptrdiff_t arrayPos = (pos - row_begin(row)) + m_rowStart[row];
371 if (row_capacity(row) == inner_nnz(row))
372 reserve_row(row,std::max<std::size_t>(2*row_capacity(row),1));
376 m_values.begin() + arrayPos, m_values.begin() + m_rowEnd[row],
377 m_values.begin() + m_rowEnd[row] + 1
380 m_indices.begin()+arrayPos, m_indices.begin() + m_rowEnd[row],
381 m_indices.begin() + m_rowEnd[row] + 1
384 m_values[arrayPos] = value;
385 m_indices[arrayPos] = index;
390 return row_iterator(m_values.data(), m_indices.data(), arrayPos,row);
394 row_iterator clear_range(row_iterator start, row_iterator end) {
395 std::size_t row = start.row();
396 REMORA_RANGE_CHECK(row == end.row());
398 size_type rowEndPos = m_rowEnd[row];
399 size_type rowStartPos = m_rowStart[row];
400 size_type rangeStartPos = start - row_begin(row)+rowStartPos;
401 size_type rangeEndPos = end - row_begin(row)+rowStartPos;
402 std::ptrdiff_t rangeSize = end - start;
406 m_values.begin()+rangeEndPos,m_values.begin() + rowEndPos, m_values.begin() + rangeStartPos
409 m_indices.begin()+rangeEndPos,m_indices.begin() + rowEndPos , m_indices.begin() + rangeStartPos
411 m_rowEnd[row] -= rangeSize;
414 return row_iterator(m_values.data(), m_indices.data(), rangeStartPos,row);
417 row_iterator clear_element(row_iterator elem) {
418 REMORA_RANGE_CHECK(elem != row_end());
419 row_iterator next = elem;
421 clear_range(elem,next);
425 template<
class Archive>
426 void serialize(Archive &ar,
const unsigned int ) {
427 ar &boost::serialization::make_nvp(
"outer_indices", m_rowStart);
428 ar &boost::serialization::make_nvp(
"outer_indices_end", m_rowEnd);
429 ar &boost::serialization::make_nvp(
"inner_indices", m_indices);
430 ar &boost::serialization::make_nvp(
"values", m_values);
437 std::vector<size_type> m_rowStart;
438 std::vector<size_type> m_rowEnd;
439 std::vector<size_type> m_indices;
440 std::vector<value_type> m_values;
444 template<
class T,
class O>
445 struct matrix_temporary_type<T,O,sparse_tag, cpu_tag> {
446 typedef compressed_matrix<T> type;