GetFEM  5.4.2
bgeot_tensor.h
Go to the documentation of this file.
1 /* -*- c++ -*- (enables emacs c++ mode) */
2 /*===========================================================================
3 
4  Copyright (C) 2000-2020 Yves Renard
5 
6  This file is a part of GetFEM
7 
8  GetFEM is free software; you can redistribute it and/or modify it
9  under the terms of the GNU Lesser General Public License as published
10  by the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version along with the GCC Runtime Library
12  Exception either version 3.1 or (at your option) any later version.
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  License and GCC Runtime Library Exception for more details.
17  You should have received a copy of the GNU Lesser General Public License
18  along with this program; if not, write to the Free Software Foundation,
19  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20 
21  As a special exception, you may use this file as it is a part of a free
22  software library without restriction. Specifically, if other files
23  instantiate templates or use macros or inline functions from this file,
24  or you compile this file and link it with other files to produce an
25  executable, this file does not by itself cause the resulting executable
26  to be covered by the GNU Lesser General Public License. This exception
27  does not however invalidate any other reasons why the executable file
28  might be covered by the GNU Lesser General Public License.
29 
30 ===========================================================================*/
31 
32 /**@file bgeot_tensor.h
33  @author Yves Renard <Yves.Renard@insa-lyon.fr>
34  @date October 09, 2000.
35  @brief tensor class, used in mat_elem computations.
36 */
37 #ifndef BGEOT_TENSOR_H__
38 #define BGEOT_TENSOR_H__
39 
40 #include "bgeot_small_vector.h"
41 #include "getfem/getfem_omp.h"
42 
43 
44 namespace bgeot {
45 
46  /* ********************************************************************* */
47  /* Class tensor<T>. */
48  /* ********************************************************************* */
49 
50  typedef size_t size_type;
51  typedef gmm::uint16_type short_type;
52 
53  class multi_index : public std::vector<size_type> {
54  public :
55 
56  void incrementation(const multi_index &m) {
57  iterator it = begin(), ite = end();
58  const_iterator itm = m.begin();
59  if (it != ite) {
60  ++(*it);
61  while (*it >= *itm && it != (ite-1)) { *it = 0; ++it; ++itm; ++(*it); }
62  } else resize(1);
63  }
64 
65  void reset() { std::fill(begin(), end(), 0); }
66 
67  inline bool finished(const multi_index &m) {
68  if (m.size() == 0)
69  return (size() == 1);
70  else
71  return ((*this)[size()-1] >= m[size()-1]);
72  }
73 
74  multi_index(size_t n) : std::vector<size_type>(n)
75  { std::fill(begin(), end(), size_type(0)); }
76  multi_index(size_type i, size_type j)
77  : std::vector<size_type>(2)
78  { (*this)[0] = i; (*this)[1] = j; }
79  multi_index(size_type i, size_type j, size_type k)
80  : std::vector<size_type>(3)
81  { (*this)[0] = i; (*this)[1] = j; (*this)[2] = k; }
82  multi_index(size_type i, size_type j, size_type k, size_type l)
83  : std::vector<size_type>(4)
84  { (*this)[0] = i; (*this)[1] = j; (*this)[2] = k; (*this)[3] = l; }
85 
86  multi_index() {}
87 
88  bool is_equal(const multi_index &m) const {
89  if (this->size() != m.size()) return false;
90  for (size_type i = 0; i < m.size(); ++i)
91  if (m[i] != (*this)[i]) return false;
92  return true;
93  }
94 
95  size_type total_size() const {
96  size_type s = 1;
97  for (size_type k = 0; k < this->size(); ++k) s *= (*this)[k];
98  return s;
99  }
100 
101  size_type memsize() const {
102  return std::vector<size_type>::capacity()*sizeof(size_type) +
103  sizeof(multi_index);
104  }
105  };
106 
107  inline std::ostream &operator <<(std::ostream &o,
108  const multi_index& mi) { /* a compiler ...*/
109  multi_index::const_iterator it = mi.begin(), ite = mi.end();
110  bool f = true;
111  o << "(";
112  for ( ; it != ite; ++it)
113  { if (!f) o << ", "; o << *it; f = false; }
114  o << ")";
115  return o;
116  }
117 
118  template<class T> class tensor : public std::vector<T> {
119  protected:
120 
121  multi_index sizes_;
122  multi_index coeff_;
123 
124  public:
125 
126  typedef typename std::vector<T>::size_type size_type;
127  typedef typename std::vector<T>::iterator iterator;
128  typedef typename std::vector<T>::const_iterator const_iterator;
129 
130  template<class CONT> inline const T& operator ()(const CONT &c) const {
131  typename CONT::const_iterator it = c.begin();
132  multi_index::const_iterator q = coeff_.begin(), e = coeff_.end();
133 #ifndef NDEBUG
134  multi_index::const_iterator qv = sizes_.begin();
135 #endif
136  size_type d = 0;
137  for ( ; q != e; ++q, ++it) {
138  d += (*q) * (*it);
139  GMM_ASSERT2(*it < *qv++, "Index out of range.");
140  }
141  return *(this->begin() + d);
142  }
143 
144  inline T& operator ()(size_type i, size_type j, size_type k,
145  size_type l) {
146  GMM_ASSERT2(order() == 4, "Bad tensor order.");
147  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k + coeff_[3]*l;
148  GMM_ASSERT2(d < size(), "Index out of range.");
149  return *(this->begin() + d);
150  }
151 
152  inline T& operator ()(size_type i, size_type j, size_type k) {
153  GMM_ASSERT2(order() == 3, "Bad tensor order.");
154  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k;
155  GMM_ASSERT2(d < size(), "Index out of range.");
156  return *(this->begin() + d);
157  }
158 
159  inline T& operator ()(size_type i, size_type j) {
160  GMM_ASSERT2(order() == 2, "Bad tensor order");
161  size_type d = coeff_[0]*i + coeff_[1]*j;
162  GMM_ASSERT2(d < size(), "Index out of range.");
163  return *(this->begin() + d);
164  }
165 
166  inline const T& operator ()(size_type i, size_type j, size_type k,
167  size_type l) const {
168  GMM_ASSERT2(order() == 4, "Bad tensor order.");
169  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k + coeff_[3]*l;
170  GMM_ASSERT2(d < size(), "Index out of range.");
171  return *(this->begin() + d);
172  }
173 
174  inline const T& operator ()(size_type i, size_type j,
175  size_type k) const {
176  GMM_ASSERT2(order() == 3, "Bad tensor order.");
177  size_type d = coeff_[0]*i + coeff_[1]*j + coeff_[2]*k;
178  GMM_ASSERT2(d < size(), "Index out of range.");
179  return *(this->begin() + d);
180  }
181 
182  inline const T& operator ()(size_type i, size_type j) const {
183  GMM_ASSERT2(order() == 2, "Bad tensor order.");
184  size_type d = coeff_[0]*i + coeff_[1]*j;
185  GMM_ASSERT2(d < size(), "Index out of range.");
186  return *(this->begin() + d);
187  }
188 
189  template<class CONT> inline T& operator ()(const CONT &c) {
190  typename CONT::const_iterator it = c.begin();
191  multi_index::iterator q = coeff_.begin(), e = coeff_.end();
192  size_type d = 0;
193  for ( ; q != e; ++q, ++it) d += (*q) * (*it);
194  GMM_ASSERT2(d < size(), "Index out of range.");
195  return *(this->begin() + d);
196  }
197 
198  inline size_type size() const { return std::vector<T>::size(); }
199  inline size_type size(size_type i) const { return sizes_[i]; }
200  inline const multi_index &sizes() const { return sizes_; }
201  inline size_type order() const { return sizes_.size(); }
202 
203  void init(const multi_index &c) {
204  auto it = c.begin();
205  size_type d = 1;
206  sizes_ = c; coeff_.resize(c.size());
207  auto p = coeff_.begin(), pe = coeff_.end();
208  for ( ; p != pe; ++p, ++it) { *p = d; d *= *it; }
209  this->resize(d);
210  }
211 
212  inline void init() { sizes_.resize(0); coeff_.resize(0); this->resize(1); }
213 
214  inline void init(size_type i) {
215  sizes_.resize(1); sizes_[0] = i; coeff_.resize(1); coeff_[0] = 1;
216  this->resize(i);
217  }
218 
219  inline void init(size_type i, size_type j) {
220  sizes_.resize(2); sizes_[0] = i; sizes_[1] = j;
221  coeff_.resize(2); coeff_[0] = 1; coeff_[1] = i;
222  this->resize(i*j);
223  }
224 
225  inline void init(size_type i, size_type j, size_type k) {
226  sizes_.resize(3); sizes_[0] = i; sizes_[1] = j; sizes_[2] = k;
227  coeff_.resize(3); coeff_[0] = 1; coeff_[1] = i; coeff_[2] = i*j;
228  this->resize(i*j*k);
229  }
230 
231  inline void init(size_type i, size_type j, size_type k, size_type l) {
232  sizes_.resize(4);
233  sizes_[0] = i; sizes_[1] = j; sizes_[2] = k; sizes_[3] = k;
234  coeff_.resize(4);
235  coeff_[0] = 1; coeff_[1] = i; coeff_[2] = i*j; coeff_[3] = i*j*k;
236  this->resize(i*j*k*l);
237  }
238 
239  inline void adjust_sizes(const multi_index &mi) { init(mi); }
240  inline void adjust_sizes() { init(); }
241  inline void adjust_sizes(size_type i) { init(i); }
242  inline void adjust_sizes(size_type i, size_type j) { init(i, j); }
243  inline void adjust_sizes(size_type i, size_type j, size_type k)
244  { init(i, j, k); }
245  inline void adjust_sizes(size_type i, size_type j, size_type k, size_type l)
246  { init(i, j, k, l); }
247 
248  inline size_type adjust_sizes_changing_last(const tensor &t, size_type P) {
249  const multi_index &mi = t.sizes_; size_type d = mi.size();
250  sizes_.resize(d); coeff_.resize(d);
251  if (d) {
252  std::copy(mi.begin(), mi.end(), sizes_.begin());
253  std::copy(t.coeff_.begin(), t.coeff_.end(), coeff_.begin());
254  size_type e = coeff_.back();
255  sizes_.back() = P;
256  this->resize(e*P);
257  return e;
258  } else {
259  this->resize(1);
260  return 1;
261  }
262  }
263 
264  inline void remove_unit_dim() {
265  if (sizes_.size()) {
266  size_type i = 0, j = 0;
267  for (; i < sizes_.size(); ++i)
268  if (sizes_[i] != 1) { sizes_[j]=sizes_[i]; coeff_[j]=coeff_[i]; ++j; }
269  if (!j) ++j;
270  sizes_.resize(j);
271  coeff_.resize(j);
272  }
273  }
274 
275  /** reduction of tensor t with respect to index ni with matrix m:
276  * t(...,j,...) <-- t(...,i,..) m(i, j)
277  */
278  void mat_reduction(const tensor &t, const gmm::dense_matrix<T> &m, int ni);
279  void mat_transp_reduction(const tensor &t, const gmm::dense_matrix<T> &m,
280  int ni);
281  /** mm(i,j) = t(i,j,k,l) * m(k,l); For order four tensor. */
282  void mat_mult(const gmm::dense_matrix<T> &m, gmm::dense_matrix<T> &mm);
283 
284  /** tt = t(...) * t2(...) */
285  void product(const tensor &t2, tensor &tt);
286  /** tt = t(...,k) * t2(k,...) */
287  void dot_product(const tensor &t2, tensor &tt);
288  void dot_product(const gmm::dense_matrix<T> &m, tensor &tt);
289  /** tt = t(...,k,l) * t2(k,l,...) */
290  void double_dot_product(const tensor &t2, tensor &tt);
291  void double_dot_product(const gmm::dense_matrix<T> &m, tensor &tt);
292 
293  size_type memsize() const {
294  return sizeof(T) * this->size()
295  + sizeof(*this) + sizes_.memsize() + coeff_.memsize();
296  }
297 
298  std::vector<T> &as_vector() { return *this; }
299  const std::vector<T> &as_vector() const { return *this; }
300 
301 
302  tensor<T>& operator +=(const tensor<T>& w)
303  { gmm::add(w.as_vector(), this->as_vector()); return *this; }
304 
305  tensor<T>& operator -=(const tensor<T>& w) {
306  gmm::add(gmm::scaled(w.as_vector(), T(-1)), this->as_vector());
307  return *this;
308  }
309 
310  tensor<T>& operator *=(const scalar_type w)
311  { gmm::scale(this->as_vector(), w); return *this; }
312 
313  tensor<T>& operator /=(const scalar_type w)
314  { gmm::scale(this->as_vector(), scalar_type(1)/w); return *this; }
315 
316  tensor &operator =(const tensor &t) {
317  if (this->size() != t.size()) this->resize(t.size());
318  std::copy(t.begin(), t.end(), this->begin());
319  if (sizes_.size() != t.sizes_.size()) sizes_.resize(t.sizes_.size());
320  std::copy(t.sizes_.begin(), t.sizes_.end(), sizes_.begin());
321  if (coeff_.size() != t.coeff_.size()) coeff_.resize(t.coeff_.size());
322  std::copy(t.coeff_.begin(), t.coeff_.end(), coeff_.begin());
323  return *this;
324  }
325 
326  tensor(const tensor &t)
327  : std::vector<T>(t), sizes_(t.sizes_), coeff_(t.coeff_) { }
328  tensor(const multi_index &c) { init(c); }
329  tensor(size_type i) = delete; // { init(i); }
330  tensor(size_type i, size_type j) { init(i, j); }
331  tensor(size_type i, size_type j, size_type k) { init(i, j, k); }
332  tensor(size_type i, size_type j, size_type k, size_type l)
333  { init(i, j, k, l); }
334  tensor() {}
335  };
336 
337  template<class T> void tensor<T>::mat_transp_reduction
338  (const tensor &t, const gmm::dense_matrix<T> &m, int ni) {
339  /* contraction of tensor t by its index ni and the transpose of matrix m. */
340 
341  THREAD_SAFE_STATIC std::vector<T> tmp;
342  THREAD_SAFE_STATIC multi_index mi;
343 
344  mi = t.sizes();
345  size_type dimt = mi[ni], dim = m.nrows();
346 
347  GMM_ASSERT2(dimt, "Inconsistent dimension.");
348  GMM_ASSERT2(dimt == m.ncols(), "Dimensions mismatch.");
349  GMM_ASSERT2(&t != this, "Does not work when t and *this are the same.");
350 
351  mi[ni] = dim;
352  if (tmp.size() < dimt) tmp.resize(dimt);
353  adjust_sizes(mi);
354 
355  const_iterator pft = t.begin();
356  iterator pf = this->begin();
357  size_type dd = coeff_[ni]*( sizes()[ni]-1)-1, co = coeff_[ni];
358  size_type ddt = t.coeff_[ni]*(t.sizes()[ni]-1)-1, cot = t.coeff_[ni];
359  std::fill(mi.begin(), mi.end(), 0);
360  for (;!mi.finished(sizes()); mi.incrementation(sizes()), ++pf, ++pft) {
361  if (mi[ni] != 0) {
362  for (size_type k = 0; k <= size_type(ni); ++k)
363  mi[k] = size_type(sizes()[k] - 1);
364  pf += dd; pft += ddt;
365  } else {
366  const_iterator pl = pft; iterator pt = tmp.begin();
367  *pt++ = *pl;
368  for(size_type k = 1; k < dimt; ++k, ++pt) { pl += cot; *pt = *pl;}
369 
370  iterator pff = pf;
371  for (size_type k = 0; k < dim; ++k) {
372  if (k) pff += co;
373  *pff = T(0); pt = tmp.begin(); pl = m.begin() + k;
374  *pff += (*pl) * (*pt); ++pt;
375  for (size_type l = 1; l < dimt; ++l, ++pt) {
376  pl += dim;
377  *pff += (*pl) * (*pt);
378  }
379  }
380  }
381  }
382  }
383 
384  template<class T> void tensor<T>::mat_mult(const gmm::dense_matrix<T> &m,
385  gmm::dense_matrix<T> &mm) {
386  GMM_ASSERT2(order() == 4,
387  "This operation is for order four tensors only.");
388  GMM_ASSERT2(sizes_[2] == gmm::mat_nrows(m) &&
389  sizes_[3] == gmm::mat_ncols(m), "Dimensions mismatch.");
390  mm.base_resize(sizes_[0], sizes_[1]);
391  gmm::clear(mm);
392 
393  const_iterator pt = this->begin();
394  const_iterator pm = m.begin();
395  for (size_type l = 0; l < sizes_[3]; ++l)
396  for (size_type k = 0; k < sizes_[2]; ++k) {
397  iterator pmm = mm.begin();
398  for (size_type j = 0; j < sizes_[1]; ++j)
399  for (size_type i = 0; i < sizes_[0]; ++i)
400  *pmm++ += *pt++ * (*pm);
401  ++pm;
402  }
403  }
404 
405  template<class T> void tensor<T>::mat_reduction
406  (const tensor &t, const gmm::dense_matrix<T> &m, int ni) {
407  /* contraction of tensor t by its index ni and the matrix m. */
408  THREAD_SAFE_STATIC std::vector<T> tmp;
409  THREAD_SAFE_STATIC multi_index mi;
410 
411  mi = t.sizes();
412  size_type dimt = mi[ni], dim = m.ncols();
413  GMM_ASSERT2(dimt, "Inconsistent dimension.");
414  GMM_ASSERT2(dimt == m.nrows(), "Dimensions mismatch.");
415  GMM_ASSERT2(&t != this, "Does not work when t and *this are the same.");
416 
417  mi[ni] = dim;
418  if (tmp.size() < dimt) tmp.resize(dimt);
419  adjust_sizes(mi);
420  const_iterator pft = t.begin();
421  iterator pf = this->begin();
422  size_type dd = coeff_[ni]*( sizes()[ni]-1)-1, co = coeff_[ni];
423  size_type ddt = t.coeff_[ni]*(t.sizes()[ni]-1)-1, cot = t.coeff_[ni];
424  std::fill(mi.begin(), mi.end(), 0);
425  for (;!mi.finished(sizes()); mi.incrementation(sizes()), ++pf, ++pft) {
426  if (mi[ni] != 0) {
427  for (size_type k = 0; k <= size_type(ni); ++k)
428  mi[k] = size_type(sizes()[k] - 1);
429  pf += dd; pft += ddt;
430  }
431  else {
432  const_iterator pl = pft; iterator pt = tmp.begin();
433  *pt++ = *pl;
434  for(size_type k = 1; k < dimt; ++k, ++pt) { pl += cot; *pt = *pl; }
435 
436  iterator pff = pf; pl = m.begin();
437  for (size_type k = 0; k < dim; ++k) {
438  if (k) pff += co;
439  *pff = T(0); pt = tmp.begin();
440  for (size_type l = 0; l < dimt; ++l, ++pt, ++pl)
441  *pff += (*pl) * (*pt);
442  }
443  }
444  }
445  }
446 
447 
448  template<class T> void tensor<T>::product(const tensor<T> &t2,
449  tensor<T> &tt) {
450  size_type res_order = order() + t2.order();
451  multi_index res_size(res_order);
452  for (size_type i = 0 ; i < this->order(); ++i) res_size[i] = this->size(i);
453  for (size_type i = 0 ; i < t2.order(); ++i) res_size[order() + i] = t2.size(i);
454  tt.adjust_sizes(res_size);
455  gmm::clear(tt.as_vector());
456 
457  size_type size1 = this->size();
458  size_type size2 = t2.size();
459  const_iterator pt2 = t2.begin();
460  iterator ptt = tt.begin();
461  for (size_type j = 0; j < size2; ++j, ++pt2) {
462  const_iterator pt1 = this->begin();
463  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
464  *ptt += *pt1 * (*pt2);
465  }
466  }
467 
468 
469  template<class T> void tensor<T>::dot_product(const tensor<T> &t2,
470  tensor<T> &tt) {
471  GMM_ASSERT2(size(order()-1) == t2.size(0),
472  "Dimensions mismatch between last dimension of first tensor "
473  "and first dimension of second tensor.");
474  size_type res_order = order() + t2.order() - 2;
475  multi_index res_size(res_order);
476  for (size_type i = 0 ; i < this->order() - 1; ++i) res_size[i] = this->size(i);
477  for (size_type i = 0 ; i < t2.order() - 1; ++i) res_size[order() - 1 + i] = t2.size(i);
478  tt.adjust_sizes(res_size);
479  gmm::clear(tt.as_vector());
480 
481  size_type size0 = t2.size(0);
482  size_type size1 = this->size()/size0;
483  size_type size2 = t2.size()/size0;
484  const_iterator pt2 = t2.begin();
485  iterator ptt = tt.begin();
486  for (size_type j = 0; j < size2; ++j) {
487  const_iterator pt1 = this->begin();
488  iterator ptt0 = ptt;
489  for (size_type q = 0; q < size0; ++q, ++pt2) {
490  ptt = ptt0;
491  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
492  *ptt += *pt1 * (*pt2);
493  }
494  }
495  }
496 
497  template<class T> void tensor<T>::dot_product(const gmm::dense_matrix<T> &m,
498  tensor<T> &tt) {
499  GMM_ASSERT2(size(order()-1) == gmm::mat_nrows(m),
500  "Dimensions mismatch between last dimensions of tensor "
501  "and rows of the matrix.");
502  tensor<T> t2(multi_index(gmm::mat_nrows(m),gmm::mat_ncols(m)));
503  gmm::copy(m.as_vector(), t2.as_vector());
504  dot_product(t2, tt);
505  }
506 
507 
508  template<class T> void tensor<T>::double_dot_product(const tensor<T> &t2,
509  tensor<T> &tt) {
510  GMM_ASSERT2(order() >= 2 && t2.order() >= 2,
511  "Tensors of wrong size. Tensors of order two or higher are required.");
512  GMM_ASSERT2(size(order()-2) == t2.size(0) && size(order()-1) == t2.size(1),
513  "Dimensions mismatch between last two dimensions of first tensor "
514  "and first two dimensions of second tensor.");
515  size_type res_order = order() + t2.order() - 4;
516  multi_index res_size(res_order);
517  for (size_type i = 0 ; i < this->order() - 2; ++i) res_size[i] = this->size(i);
518  for (size_type i = 0 ; i < t2.order() - 2; ++i) res_size[order() - 2 + i] = t2.size(i);
519  tt.adjust_sizes(res_size);
520  gmm::clear(tt.as_vector());
521 
522  size_type size0 = t2.size(0)*t2.size(1);
523  size_type size1 = this->size()/size0;
524  size_type size2 = t2.size()/size0;
525  const_iterator pt2 = t2.begin();
526  iterator ptt = tt.begin();
527  for (size_type j = 0; j < size2; ++j) {
528  const_iterator pt1 = this->begin();
529  iterator ptt0 = ptt;
530  for (size_type q = 0; q < size0; ++q, ++pt2) {
531  ptt = ptt0;
532  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
533  *ptt += *pt1 * (*pt2);
534  }
535  }
536  }
537 
538  template<class T> void tensor<T>::double_dot_product(const gmm::dense_matrix<T> &m,
539  tensor<T> &tt) {
540  GMM_ASSERT2(order() >= 2,
541  "Tensor of wrong size. Tensor of order two or higher is required.");
542  GMM_ASSERT2(size(order()-2) == gmm::mat_nrows(m) &&
543  size(order()-1) == gmm::mat_ncols(m),
544  "Dimensions mismatch between last two dimensions of tensor "
545  "and dimensions of the matrix.");
546  tensor<T> t2(multi_index(gmm::mat_nrows(m),gmm::mat_ncols(m)));
547  gmm::copy(m.as_vector(), t2.as_vector());
548  double_dot_product(t2, tt);
549  }
550 
551 
552  template<class T> std::ostream &operator <<
553  (std::ostream &o, const tensor<T>& t) {
554  o << "sizes " << t.sizes() << " " << vref(t.as_vector());
555  return o;
556  }
557 
558  typedef tensor<scalar_type> base_tensor;
559  typedef tensor<complex_type> base_complex_tensor;
560 
561 
562 } /* end of namespace bgeot. */
563 
564 
565 #endif /* BGEOT_TENSOR_H */
bgeot::operator<<
std::ostream & operator<<(std::ostream &o, const convex_structure &cv)
Print the details of the convex structure cvs to the output stream o.
Definition: bgeot_convex_structure.cc:71
bgeot::size_type
size_t size_type
used as the common size type in the library
Definition: bgeot_poly.h:49
gmm::clear
void clear(L &l)
clear (fill with zeros) a vector or matrix.
Definition: gmm_blas.h:59
bgeot::short_type
gmm::uint16_type short_type
used as the common short type integer in the library
Definition: bgeot_config.h:72
getfem_omp.h
Tools for multithreaded, OpenMP and Boost based parallelization.
gmm::resize
void resize(V &v, size_type n)
*‍/
Definition: gmm_blas.h:209
bgeot_small_vector.h
Small (dim < 8) vectors.
bgeot
Basic Geometric Tools.
Definition: bgeot_convex_ref.cc:27

Rabisu Mirror Service We provide mirrors to support Open source communities. Our mirror server is located in Istanbul/Turkey region.

Please do not hesitate to contact mirror@rabisu.com for new open source mirror submissions.