28 #ifndef REMORA_VECTOR_SPARSE_HPP 29 #define REMORA_VECTOR_SPARSE_HPP 32 #include "detail/vector_proxy_classes.hpp" 35 #include <boost/serialization/collection_size_type.hpp> 36 #include <boost/serialization/nvp.hpp> 37 #include <boost/serialization/vector.hpp> 59 template<
class T,
class I = std::
size_t>
60 class compressed_vector:
public vector_container<compressed_vector<T, I>, cpu_tag > {
62 typedef T& true_reference;
63 typedef compressed_vector<T, I> self_type;
66 typedef const T& const_reference;
73 const_reference value()
const {
74 return const_cast<self_type const&
>(m_vector)(m_i);
76 value_type& ref()
const {
78 size_type
const* start = m_vector.m_indices.data();
79 size_type
const* end = start + m_vector.nnz();
80 size_type
const *pos = std::lower_bound(start,end,m_i);
82 if (pos != end&& *pos == m_i)
83 return m_vector.m_values[pos-start];
86 iterator posIter(m_vector.m_values.data(),m_vector.m_indices.data(),pos-start);
87 return *m_vector.set_element(posIter, m_i, m_vector.m_zero);
93 reference(self_type& m, size_type i):
94 m_vector(m), m_i(i) {}
97 value_type& operator = (value_type d)
const {
101 value_type& operator=(reference
const& v ){
102 return ref() = v.value();
105 value_type& operator += (value_type d)
const {
108 value_type& operator -= (value_type d)
const {
111 value_type& operator *= (value_type d)
const {
114 value_type& operator /= (value_type d)
const {
126 operator const_reference()
const{
134 typedef vector_reference<self_type const> const_closure_type;
135 typedef vector_reference<self_type> closure_type;
136 typedef sparse_vector_storage<T,I> storage_type;
137 typedef sparse_vector_storage<value_type const,size_type const> const_storage_type;
138 typedef elementwise<sparse_tag> evaluation_category;
141 compressed_vector():m_size(0), m_nnz(0),m_indices(1,0),m_zero(0){}
142 explicit compressed_vector(size_type size, value_type value = value_type(), size_type non_zeros = 0)
143 :m_size(size), m_nnz(0), m_indices(non_zeros,0), m_values(non_zeros),m_zero(0){}
145 compressed_vector(vector_expression<AE, cpu_tag>
const& ae, size_type non_zeros = 0)
146 :m_size(ae().size()), m_nnz(0), m_indices(non_zeros,0), m_values(non_zeros),m_zero(0)
152 size_type size()
const {
155 size_type nnz_capacity()
const {
156 return m_indices.size();
158 size_type nnz()
const {
162 void set_filled(size_type filled) {
163 REMORA_SIZE_CHECK(filled <= nnz_capacity());
168 storage_type raw_storage(){
169 return {m_values.data(), m_indices.data(), m_nnz};
173 const_storage_type raw_storage()
const{
174 return {m_values.data(), m_indices.data(), m_nnz};
177 typename device_traits<cpu_tag>::queue_type& queue(){
178 return device_traits<cpu_tag>::default_queue();
181 void resize(size_type size) {
185 void reserve(size_type non_zeros) {
186 if(non_zeros <= nnz_capacity())
return;
187 non_zeros = std::min(size(),non_zeros);
188 m_indices.resize(non_zeros);
189 m_values.resize(non_zeros);
193 const_reference operator()(size_type i)
const {
194 REMORA_SIZE_CHECK(i < m_size);
195 std::size_t pos = lower_bound(i);
196 if (pos == nnz() || m_indices[pos] != i)
198 return m_values [pos];
200 reference operator()(size_type i) {
201 return reference(*
this,i);
205 const_reference operator [](size_type i)
const {
208 reference operator [](size_type i) {
218 compressed_vector& operator = (compressed_vector
const& v) {
221 m_indices = v.m_indices;
222 m_values = v.m_values;
226 compressed_vector& operator = (vector_container<C, cpu_tag>
const& v) {
227 resize(v().size(),
false);
232 compressed_vector& operator = (vector_expression<AE, cpu_tag>
const& ae) {
233 self_type temporary(ae, nnz_capacity());
239 void swap(compressed_vector& v) {
242 m_indices.swap(v.m_indices);
243 m_values.swap(v.m_values);
246 friend void swap(compressed_vector& v1, compressed_vector& v2){
251 typedef iterators::compressed_storage_iterator<value_type const, size_type const> const_iterator;
252 typedef iterators::compressed_storage_iterator<value_type, size_type const> iterator;
254 const_iterator begin()
const {
255 return const_iterator(m_values.data(),m_indices.data(),0);
258 const_iterator end()
const {
259 return const_iterator(m_values.data(),m_indices.data(),nnz());
263 return iterator(m_values.data(),m_indices.data(),0);
267 return iterator(m_values.data(),m_indices.data(),nnz());
271 iterator set_element(iterator pos, size_type index, value_type value) {
272 REMORA_RANGE_CHECK(size_type(pos - begin()) <=m_size);
274 if(pos != end() && pos.index() == index){
279 std::ptrdiff_t arrayPos = pos - begin();
280 if (m_nnz <= nnz_capacity())
281 reserve(std::max<std::size_t>(2 * nnz_capacity(),1));
285 m_values.begin()+arrayPos,m_values.begin() + m_nnz , m_values.begin() + m_nnz +1
288 m_indices.begin()+arrayPos,m_indices.begin() + m_nnz , m_indices.begin() + m_nnz +1
291 m_values[arrayPos] = value;
292 m_indices[arrayPos] = index;
297 return iterator(m_values.data(),m_indices.data(),arrayPos);
300 iterator clear_range(iterator start, iterator end) {
302 std::ptrdiff_t startPos = start - begin();
303 std::ptrdiff_t endPos = end - begin();
307 m_values.begin()+endPos,m_values.begin() + m_nnz, m_values.begin() + startPos
310 m_indices.begin()+endPos,m_indices.begin() + m_nnz , m_indices.begin() + startPos
312 m_nnz -= endPos - startPos;
314 return iterator(m_values.data(),m_indices.data(), startPos);
317 iterator clear_element(iterator pos){
319 std::ptrdiff_t arrayPos = pos - begin();
320 if(arrayPos == m_nnz-1){
326 m_values.begin()+arrayPos+1,m_values.begin() + m_nnz , m_values.begin() + arrayPos
329 m_indices.begin()+arrayPos+1,m_indices.begin() + m_nnz , m_indices.begin() + arrayPos
332 return iterator(m_values.data(),m_indices.data(),arrayPos);
336 template<
class Archive>
337 void serialize(Archive& ar,
const unsigned int ) {
338 boost::serialization::collection_size_type s(m_size);
339 ar & boost::serialization::make_nvp(
"size",s);
340 if (Archive::is_loading::value) {
344 ar & boost::serialization::make_nvp(
"nnz", m_nnz);
345 ar & boost::serialization::make_nvp(
"indices", m_indices);
346 ar & boost::serialization::make_nvp(
"values", m_values);
350 std::size_t lower_bound( size_type t)
const{
351 size_type
const* begin = m_indices.data();
352 size_type
const* end = m_indices.data()+nnz();
353 return std::lower_bound(begin, end, t)-begin;
358 std::vector<size_type> m_indices;
359 std::vector<value_type> m_values;
364 struct vector_temporary_type<T,sparse_tag, cpu_tag>{
365 typedef compressed_vector<T> type;