CASM
AClustersApproachtoStatisticalMechanics
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
Tensor.hh
Go to the documentation of this file.
1 #ifndef TENSOR_HH
2 #define TENSOR_HH
3 
4 #include <iostream>
5 #include <cmath>
6 
12 
13 namespace CASM {
14 
15  //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16  template<class T>
18 
19  template<class T>
20  class ReturnTensor;
21 
22  //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 
31  template<class T>
32  class Tensor : public Array<T> {
35 
36  public:
37  using Array<T> :: resize;
38  using Array<T> :: size;
39  using Array<T> :: at;
40 
41  //These constructors take two arguments even though they really just need one, since rank==dim.size() if
42  //you're doing it right.
43  Tensor(Index tNrank, Array<Index> tNdim) : Nrank(tNrank), Ndim(tNdim) {
44  Index N = 1;
45  idim.resize(Nrank);
46  max_ind.resize(Nrank);
47  for(Index i = 0; i < Nrank; i++) {
48  max_ind[i] = Ndim[i] - 1;
49  idim[i] = N;
50  N *= Ndim[i];
51  }
52 
53  resize(N);
54  };
55 
56  Tensor(Index tNrank, Array<Index> tNdim, T init_val) : Nrank(tNrank), Ndim(tNdim) {
57  Index N = 1;
58  idim.resize(Nrank);
59  max_ind.resize(Nrank);
60  for(Index i = 0; i < Nrank; i++) {
61  max_ind[i] = Ndim[i] - 1;
62  idim[i] = N;
63  N *= Ndim[i];
64  }
65 
66  resize(N, init_val);
67  };
68 
69  Tensor(Index tNrank = 0) : Nrank(tNrank), Ndim(tNrank, 3) {
70  Index N = 1;
71  idim.resize(Nrank);
72  max_ind.resize(Nrank);
73  for(Index i = 0; i < Nrank; i++) {
74  max_ind[i] = Ndim[i] - 1;
75  idim[i] = N;
76  N *= Ndim[i];
77  }
78  resize(N);
79  };
80 
81  Tensor(const Eigen::Matrix<T, 3, 3> &mat_init) : Nrank(2), Ndim(2, 3) {
82  Index N = 1;
83  idim.resize(Nrank);
84  max_ind.resize(Nrank);
85  for(Index i = 0; i < Nrank; i++) {
86  max_ind[i] = Ndim[i] - 1;
87  idim[i] = N;
88  N *= Ndim[i];
89  }
90  resize(N);
91  for(int i = 0; i < 3; i++)
92  for(int j = 0; j < 3; j++)
93  (*this)(i, j) = mat_init(i, j);
94  }
95 
96  Tensor(ReturnTensor<T> &init_cont) {
97  swap(init_cont);
98  };
99 
101  swap(RHS);
102  return *this;
103  };
104 
105  Tensor &operator=(const T &RHS) {
106  for(Index i = 0; i < size(); i++)
107  at(i) = RHS;
108  return *this;
109  }
110 
111  template<int dim1, int dim2, int flag1>
112  Tensor &operator=(const Eigen::Matrix<T, dim1, dim2, flag1> &LHS);
113 
114  static ReturnTensor<T> identity(Index trank, Index tdim);
115 
116  void swap(Tensor &RHS);
117  void redefine(Array<Index> _Ndim);
118  void redefine(Array<Index> _Ndim, T fill_val);
119 
120  Tensor &dim_permute(const Array<Index> &iperm);
121  Tensor &dim_permute(const Permutation &perm);
122 
123  Tensor &dim_ipermute(const Array<Index> &perm);
124  Tensor &dim_ipermute(const Permutation &perm);
125 
127  bool next_permute();
129  void permute_symmetrize();
132  void permute_symmetrize(const Array< Array<Index> > &unique_dim);
133  void reset_idim();
134 
136 
137  Index rank() const;
138  const Array<Index> &ind_max() const;
139  const Array<Index> &dim()const;
140  Index dim(Index i) const;
141 
142  const Array<Index> &mult_array() const;
143 
144  const T &get(const Array<Index> &inds) const;
145  T &at(const Array<Index> &inds);
146  const T &at(const Array<Index> &inds) const;
147  T &operator()(const Array<Index> &inds);
148  const T &operator()(const Array<Index> &inds) const;
149  T &operator()(Index i, Index j);
150  const T &operator()(Index i, Index j) const;
151 
152  Tensor &operator+=(const Tensor &RHS);
153  Tensor &operator-=(const Tensor &RHS);
154  Tensor &operator*=(T RHS);
155  Tensor &operator/=(T RHS);
156 
158  ReturnTensor<T> operator+(const Tensor &RHS);
159  ReturnTensor<T> operator-(const Tensor &RHS);
160 
161  //We should find a way to do tensor contraction/multiplication
162 
163  T scalar_prod(const Tensor &RHS) const;
164  ReturnTensor<T> tensor_prod(const Tensor &RHS);
165  Tensor &normalize();
166 
167  Tensor &transform(const Eigen::MatrixXd &op);
168 
169  //rep_IDs specify the representations that transform each dimension of tensor
170  Tensor &transform(const SymOp &op, Array<SymGroupRepID> rep_IDs);
171 
172  Tensor &apply_sym(const SymOp &op);
173 
174  Tensor &symmetrize_index(const SymOp &op, Array<SymGroupRepID> rep_IDs, Array<Index> inner_ind);
175 
176  Tensor slice(Array<Index> slice_Ndim, Array<Index> slice_ind);
177 
178  void read(std::istream &stream); //Added by Ivy -- TODO: Make more general for not just rank 2 tensors
179 
180  //Tensor<T> matrix_multiply(Vector3<T> &tvec);
181 
182  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> convert_to_Eigen() const;
183 
184  };
185 
186 
187  template<class T>
188  Tensor<T> operator*(const T &LHS, const Tensor<T> &RHS);
189 
190  template<class T>
191  Tensor<T> operator*(const SymOp &LHS, const Tensor<T> &RHS);
192 
193  template<class T>
194  std::ostream &operator << (std::ostream &stream, const Tensor<T> &RHS);
195 
196  template<class T>
197  std::istream &operator >> (std::istream &stream, Tensor<T> &RHS);
198 
199  //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
200 
201  template<class T>
202  class ReturnTensor : public Tensor<T> {
203 
204  public:
205  using Tensor<T>::swap;
206 
207  ReturnTensor(Tensor<T> &init_tens) {
208  swap(init_tens);
209  }
210 
212  swap(RHS);
213  return *this;
214  };
215 
216  };
217 
218  //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
219 
220  template<class T>
221  class TensorBasis : public Array< Tensor<T> > {
222  public:
223  using Array< Tensor<T> > :: size;
224  using Array< Tensor<T> > :: at;
225  using Array< Tensor<T> > :: clear;
226 
228 
229  double eci(Index i) const {
230  return coeffs[i];
231  };
232  double &eci(Index i) {
233  return coeffs[i];
234  }; //Added by Ivy 09/25/2012
235 
236  void generate_basis(Index Nrank, const SymGroup &sym_group);
237  void generate_basis(Index Nrank, const SymGroup &sym_group, Index Rep_ID);
238  void generate_basis(Index Nrank, const SymGroup &sym_group, const SymGroupRep &perm_group);
239 
240  void make_orthogonal_to(const TensorBasis &ortho_basis);
241  TensorBasis &apply_sym(const SymOp &op);
242  void normalize();
243  void idealize();
244  bool read(std::istream &stream); //Added by Ivy
245  };
246 
247  //************************************************************
248 
249  template<class T>
251  Index trank(Nrank);
252  Nrank = RHS.Nrank;
253  RHS.Nrank = trank;
254  Ndim.swap(RHS.Ndim);
255  idim.swap(RHS.idim);
256  max_ind.swap(RHS.max_ind);
257  Array<T>::swap(RHS);
258  return;
259  }
260 
261  //************************************************************
262  template< class T >
263  template<int dim1, int dim2, int flag1>
264  Tensor<T> &Tensor<T>::operator=(const Eigen::Matrix<T, dim1, dim2, flag1> &LHS) {
265  Array<Index> tdim(2);
266  if(rank() != 2 || dim(0) != LHS.rows() || dim(1) != LHS.cols()) {
267  tdim[0] = LHS.rows();
268  tdim[1] = LHS.cols();
269  redefine(tdim);
270  }
271  for(int i = 0; i < LHS.rows(); i++) {
272  tdim[0] = i;
273  for(int j = 0; j < LHS.rows(); j++) {
274  tdim[1] = j;
275  at(tdim) = LHS(i, j);
276  }
277  }
278  return (*this);
279  }
280 
281 
282  //************************************************************
283 
284  template<class T>
286  Nrank = _Ndim.size();
287  Ndim = _Ndim;
288  Index N = 1;
289  idim.resize(Nrank);
290  max_ind.resize(Nrank);
291  for(Index i = 0; i < Nrank; i++) {
292  max_ind[i] = Ndim[i] - 1;
293  idim[i] = N;
294  N *= Ndim[i];
295  }
296 
297  resize(N);
298  }
299 
300  //************************************************************
301 
302  template<class T>
303  void Tensor<T>::redefine(Array<Index> _Ndim, T fill_val) {
304  Nrank = _Ndim.size();
305  Ndim = _Ndim;
306  Index N = 1;
307  idim.resize(Nrank);
308  max_ind.resize(Nrank);
309  for(Index i = 0; i < Nrank; i++) {
310  max_ind[i] = Ndim[i] - 1;
311  idim[i] = N;
312  N *= Ndim[i];
313  }
314 
315  resize(N, fill_val);
316  }
317 
318  //************************************************************
319 
320  template<class T>
322  if(iperm.size() != Nrank) {
323  // temporarily changed this to cout
324  std::cout << "WARNING: In Tensor<T>::dim_permute, permutation array is incompatible with tensor rank! Continuing without permuting indeces...\n";
325  return *this;
326  }
327 
328  idim.permute(iperm);
329  Ndim.permute(iperm);
330  max_ind.permute(iperm);
331 
332  return *this;
333  }
334 
335  //************************************************************
336 
337  template<class T>
339  return dim_permute(perm.perm_array());
340  }
341  //************************************************************
342 
343  template<class T>
345  if(iperm.size() != Nrank) {
346  std::cerr << "WARNING: In Tensor<T>::dim_permute, permutation array is incompatible with tensor rank! Continuing without permuting indeces...\n";
347  return *this;
348  }
349 
350  idim.ipermute(iperm);
351  Ndim.ipermute(iperm);
352  max_ind.ipermute(iperm);
353 
354  return *this;
355  }
356 
357  //************************************************************
358 
359  template<class T>
361  return dim_ipermute(perm.perm_array());
362  }
363  //************************************************************
364 
365  template<class T>
367  Array<Index> tperm;
368 
369  idim.sort(tperm);
370  Ndim.permute(tperm);
371  max_ind.permute(tperm);
372 
373  return *this;
374  }
375 
376  //************************************************************
377 
378  template<class T>
380  Index i = Nrank - 2;
381  bool is_valid = true;
382  Array<Index> iperm;
383  for(Index k = 0; k < Nrank; k++)
384  iperm[k] = k;
385 
386  while(valid_index(i) && idim[i + 1] <= idim[i])
387  i--;
388 
389 
390  Index j = Nrank - 1;
391 
392  if(valid_index(i)) {
393  while(idim[j] <= idim[i])
394  j--;
395 
396  idim.swap_elem(i, j);
397  Ndim.swap_elem(i, j);
398  max_ind.swap_elem(i, j);
399 
400  i++;
401  j = Nrank - 1;
402  }
403 
404  else {
405  is_valid = false;
406  i = 0;
407  }
408 
409  while(i < j && valid_index(j)) {
410  idim.swap_elem(i, j);
411  Ndim.swap_elem(i, j);
412  max_ind.swap_elem(i, j);
413 
414  i++;
415  j--;
416  }
417 
418  return is_valid;
419  }
420 
421  //************************************************************
422 
423  template<class T>
425  if(idim.is_ascending())
426  return;
427 
428  Tensor ttens(Nrank, Ndim);
429  Counter<Array<Index> > icount(ttens.element_counter());
430 
431  do {
432  ttens(icount()) = at(icount());
433  }
434  while((++icount).valid());
435 
436  swap(ttens);
437 
438  return;
439  }
440 
441  //************************************************************
442 
443  template<class T>
445  reset_idim();
446  Tensor<T> ttens(*this);
447  Index nperm = 1;
448  while(ttens.next_permute()) {
449  (*this) += ttens;
450  nperm++;
451  }
452  (*this) /= nperm;
453  return;
454  }
455 
456  //************************************************************
457 
458  template<class T>
459  void Tensor<T>::permute_symmetrize(const Array< Array<Index> > &unique_dim) {
460  reset_idim();
461  Array<Array<Index> > tunique(unique_dim);
462  for(Index i = 0; i < tunique.size(); i++)
463  tunique[i].sort();
464  Array<Index> tperm = Ndim;
465  Tensor<T> ttens(*this);
466  Index iset(0), i(0), j(0), nperm(1);
467  (*this) = 0;
468  while(iset < tunique.size()) {
469  for(i = 0; i < tunique.size(); i++)
470  for(j = 0; j < tunique[i].size(); j++)
471  tperm[unique_dim[i][j]] = tunique[i][j];
472  (*this) += ttens.dim_permute(tperm);
473  ttens.dim_unpermute();
474  nperm++;
475  iset = 0;
476  while(iset < tunique.size() && !tunique[iset].next_permute())
477  iset++;
478  }
479  (*this) /= double(nperm);
480 
481  }
482 
483  //************************************************************
484 
485  template<class T>
487  return Counter<Array<Index> >(Array<Index>(Nrank, 0), max_ind, Array<Index>(Nrank, 1));
488  }
489 
490  //************************************************************
491 
492  template<class T>
494  return max_ind;
495  }
496 
497  //************************************************************
498 
499  template<class T>
500  const Array<Index> &Tensor<T>::dim()const {
501  return Ndim;
502  }
503 
504  //************************************************************
505 
506  template<class T>
508  return Ndim[i];
509  }
510 
511  //************************************************************
512 
513  template<class T>
515  return idim;
516  }
517 
518  //************************************************************
519 
520  template<class T>
522  return Nrank;
523  }
524 
525  //************************************************************
526 
527  template<class T>
528  const T &Tensor<T>::get(const Array<Index> &inds) const {
529  Index lin_index = 0;
530  for(Index i = 0; i < Nrank; i++)
531  lin_index += inds[i] * idim[i];
532 
533  return at(lin_index);
534  };
535 
536  //************************************************************
537 
538  template<class T>
539  T &Tensor<T>::at(const Array<Index> &inds) {
540  Index lin_index = 0;
541  for(Index i = 0; i < Nrank; i++)
542  lin_index += inds[i] * idim[i];
543 
544  return at(lin_index);
545  };
546 
547  //************************************************************
548 
549  template<class T>
550  const T &Tensor<T>::at(const Array<Index> &inds) const {
551  Index lin_index = 0;
552  for(Index i = 0; i < Nrank; i++)
553  lin_index += inds[i] * idim[i];
554 
555  return at(lin_index);
556  };
557 
558  //************************************************************
559 
560  template<class T>
562  Index lin_index = 0;
563  for(Index i = 0; i < Nrank; i++)
564  lin_index += inds[i] * idim[i];
565 
566  return at(lin_index);
567  };
568 
569  //************************************************************
570 
571  template<class T>
572  const T &Tensor<T>::operator()(const Array<Index> &inds) const {
573  Index lin_index = 0;
574  for(Index i = 0; i < Nrank; i++)
575  lin_index += inds[i] * idim[i];
576 
577  return at(lin_index);
578  };
579  //************************************************************
580 
581  template<class T>
583  return at(i * idim[0] + j * idim[1]);
584  };
585 
586  //************************************************************
587 
588  template<class T>
589  const T &Tensor<T>::operator()(Index i, Index j) const {
590  return at(i * idim[0] + j * idim[1]);
591  };
592 
593  //************************************************************
594 
595  template<class T>
597  if(Ndim != RHS.Ndim) {
598  std::cerr << "Attempting to add two incompatible tensors!\n";
599  exit(1);
600  }
601 
602  if(idim == RHS.idim) {
603  for(Index i = 0; i < size(); i++)
604  at(i) += RHS[i];
605 
606  return *this;
607  }
608 
609 
610  Counter<Array<Index> > icount(Array<Index>(Nrank, 0), max_ind, Array<Index>(Nrank, 1));
611  do {
612  at(icount()) += RHS(icount());
613 
614  }
615  while((++icount).valid());
616 
617  return *this;
618  }
619 
620  //************************************************************
621 
622  template<class T>
624  if(Ndim != RHS.Ndim) {
625  std::cerr << "Attempting to add two incompatible tensors!\n";
626  exit(1);
627  }
628 
629  if(idim == RHS.idim) {
630  for(Index i = 0; i < size(); i++)
631  at(i) -= RHS[i];
632 
633  return *this;
634  }
635 
636  Counter<Array<Index> > icount(Array<Index>(Nrank, 0), max_ind, Array<Index>(Nrank, 1));
637  do {
638  at(icount()) -= RHS(icount());
639  }
640  while((++icount).valid());
641 
642 
643  return *this;
644  }
645 
646  //************************************************************
647 
648  template<class T>
650  for(Index i = 0; i < size(); i++)
651  at(i) *= RHS;
652 
653  return *this;
654 
655  }
656 
657  //************************************************************
658 
659  template<class T>
661  for(Index i = 0; i < size(); i++)
662  at(i) /= RHS;
663 
664  return *this;
665 
666  }
667 
668  //************************************************************
669 
670  template<class T>
672  Tensor<T> ttens(*this);
673  for(Index i = 0; i < ttens.size(); i++)
674  ttens[i] = -at(i);
675  return ReturnTensor<T>(ttens);
676  }
677 
678  //************************************************************
679 
680  template<class T>
682  return ReturnTensor<T>(Tensor<T>(*this) += RHS);
683  }
684 
685  //************************************************************
686 
687  template<class T>
689  return ReturnTensor<T>(Tensor<T>(*this) -= RHS);
690  }
691 
692  //************************************************************
693 
694  template<class T>
695  T Tensor<T>::scalar_prod(const Tensor &RHS) const {
696  if(Ndim != RHS.Ndim) {
697  std::cerr << "Attempting to take scalar product of two incompatible tensors!\n";
698  assert(0);
699  //exit(1);
700  }
701 
702  T tprod = 0;
703 
704  if(idim == RHS.idim) {
705  for(Index i = 0; i < size(); i++)
706  tprod += at(i) * RHS[i];
707  }
708 
709  else {
710  Counter<Array<Index> > icount(Array<Index>(Nrank, 0), max_ind, Array<Index>(Nrank, 1));
711  do {
712  tprod += at(icount()) * RHS(icount());
713  }
714  while((++icount).valid());
715  }
716 
717  return tprod;
718 
719  }
720 
721  //************************************************************
722 
723  template<class T>
725  Tensor<T> ttens(rank() + RHS.rank(), array_cat(dim(), RHS.dim()), 0);
726 
727  Counter<Array<Index> > icount(element_counter()), jcount(RHS.element_counter());
728 
729  Index i, j, tot_ind;
730  do {
731  jcount.reset();
732  do {
733  tot_ind = 0;
734  for(i = 0; i < icount().size(); i++)
735  tot_ind += icount[i] * ttens.idim[i];
736 
737  for(j = 0; j < jcount().size(); j++)
738  tot_ind += jcount[j] * ttens.idim[i + j];
739 
740  ttens[tot_ind] = at(icount()) * RHS(jcount());
741 
742  }
743  while((++jcount).valid());
744  }
745  while((++icount).valid());
746 
747  return ReturnTensor<T>(ttens);
748  }
749 
750 
751  //************************************************************
752 
753  template<class T>
755 
756  return (*this) /= sqrt(scalar_prod(*this));
757  }
758 
759  //************************************************************
760 
761  template<class T>
763  Tensor<T> ttens(trank, Array<Index>(trank, tdim), 0);
764  for(Index i = 0; i < tdim; i++)
765  ttens(Array<Index>(trank, i)) = 1;
766 
767  return ReturnTensor<T>(ttens);
768  }
769 
770  //************************************************************
771  template<class T>
773  if(!rank()) return *this;
774  Array<Eigen::MatrixXd const *> rep_mats(op.get_matrix_reps(rep_IDs));
775 
776  Tensor<T> ttens(Nrank, Ndim);
777  Index nd;
778 
779  T tval;
780  Counter<Array<Index> > icount(element_counter());
781  Counter<Array<Index> > jcount(icount);
782  do {
783  ttens(icount()) = 0;
784  jcount.reset();
785  do {
786  tval = at(jcount());
787  for(nd = 0; nd < Nrank; nd++)
788  tval *= (*rep_mats[nd])(icount[nd], jcount[nd]);
789  ttens(icount()) += tval;
790 
791  }
792  while((++jcount).valid());
793 
794  }
795  while((++icount).valid());
796  swap(ttens);
797  return *this;
798  }
799 
800  //************************************************************
801  template<class T>
803  if(!rank()) return *this;
804  Array<Eigen::MatrixXd const *> rep_mats(op.get_matrix_reps(rep_IDs));
805 
806  Index nd;
807 
808  T tval;
809  Counter<Array<Index> > icount(element_counter());
810  do {
811  tval = 1.0;
812  for(nd = 0; nd < Nrank; nd++)
813  tval *= (*rep_mats[nd])(icount[nd], inner_ind[nd]);
814  at(icount()) += tval;
815 
816  }
817  while((++icount).valid());
818  return *this;
819  }
820 
821  //************************************************************
822  template<class T>
824  if(!rank()) return *this;
825 
826  Tensor<T> ttens(Nrank, Ndim);
827  Index nd;
828 
829  T tval;
830  Counter<Array<Index> > icount(element_counter());
831  Counter<Array<Index> > jcount(icount);
832  do {
833  ttens(icount()) = 0;
834  jcount.reset();
835  do {
836  tval = at(jcount());
837  for(nd = 0; nd < Nrank; nd++)
838  tval *= op.matrix()(icount[nd], jcount[nd]);
839  ttens(icount()) += tval;
840 
841  }
842  while((++jcount).valid());
843 
844  }
845  while((++icount).valid());
846  swap(ttens);
847  return *this;
848  }
849 
850  //************************************************************
851  //************************************************************
852  template<class T>
853  void Tensor<T>::read(std::istream &stream) {
854 
855  double val;
856  Array<Index> min(rank(), 0), max(ind_max()), inc(rank(), 1);
857  Counter<Array<Index> > icount(min, max, inc);
858 
859  do {
860  stream >> val;
861  at(icount()) = val;
862  }
863  while((++icount).valid());
864 
865  return;
866  };
867 
868  //************************************************************
874  //************************************************************
875  /*template<class T>
876  Tensor<T> Tensor<T>::matrix_multiply(Vector3<T> &tvec) {
877 
878  Index j = 0;
879  Array<Index> tensordim(2, 0);
880  tensordim[0] = 3;
881  tensordim[1] = 1;
882  Tensor<T> result(2, tensordim, 0);
883 
884  for(Index i = 0; i < size(); i++) {
885  if(i % 3 == 0) {
886  j++;
887  }
888  result.at(j - 1) += at(i) * tvec.at(i % 3);
889  }
890 
891  return result;
892 
893  };
894  */
895  //************************************************************
896 
897  template<class T>
899  if(!rank()) return *this;
900 
901  Tensor<T> ttens(Nrank, Ndim);
902  Index nd;
903 
904  T tval;
905  Counter<Array<Index> > icount(element_counter());
906  Counter<Array<Index> > jcount(icount);
907  do {
908  ttens(icount()) = 0;
909  jcount.reset();
910  do {
911  tval = at(jcount());
912  for(nd = 0; nd < Nrank; nd++)
913  tval *= mat(icount[nd], jcount[nd]);
914  ttens(icount()) += tval;
915 
916  }
917  while((++jcount).valid());
918 
919  }
920  while((++icount).valid());
921  swap(ttens);
922  return *this;
923  }
924 
925  //************************************************************
926 
927  template<class T>
928  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Tensor<T>::convert_to_Eigen() const {
929  if(rank() != 2) {
930  std::cerr << "WARNING: Attempting to assign Eigen::Matrix object using a Tensor of rank " << rank()
931  << "!\nTensor must have rank=2 for assignment to be valid. Exiting...\n";
932  exit(1);
933  }
934  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> tmat(dim(0), dim(1));
935  tmat.setZero();
936  for(Index i = 0; i < tmat.rows(); i++) {
937  for(Index j = 0; j < tmat.rows(); j++) {
938  tmat(i, j) = (*this)(i, j);
939  }
940  }
941  return tmat;
942  }
943 
944  //************************************************************
945 
946  template<class T>
947  Tensor<T> operator*(const T &LHS, const Tensor<T> &RHS) {
948  return Tensor<T>(RHS) *= LHS;
949  }
950 
951  //************************************************************
952 
953  template<class T>
954  Tensor<T> operator*(const SymOp &LHS, const Tensor<T> &RHS) {
955  return Tensor<T>(RHS).apply_sym(LHS);
956  }
957 
958  //************************************************************
959 
960  template<class T>
961  T dot(const Tensor<T> &LHS, const Tensor<T> &RHS) {
962  return LHS.scalar_prod(RHS);
963  }
964 
965  //************************************************************
966 
967  template<class T>
968  T norm(const Tensor<T> &ttens) {
969  return sqrt(ttens.scalar_prod(ttens));
970  }
971 
972 
973  //************************************************************
974 
975  template<class T>
976  std::ostream &operator<<(std::ostream &stream, const Tensor<T> &RHS) {
977  std::ios::fmtflags old_flags = stream.flags();
978  stream.flags(std::ios::showpoint | std::ios::fixed | std::ios::right);
979  int twide = stream.width();
980 
981  if(!RHS.rank()) {
982  stream << RHS[0] << '\n';
983  stream.flags(old_flags);
984  return stream;
985  }
986 
987  Array<Index> min(RHS.rank(), 0), max(RHS.ind_max()), inc(RHS.rank(), 1);
988  Counter<Array<Index> > icount(min, max, inc);
989  do {
990  if(RHS.rank() > 2 && icount[0] == 0 && icount[1] == 0) {
991  stream << "Block (:,:," << icount[2];
992  for(Index j = 3; j < RHS.rank(); j++)
993  stream << "," << icount[j];
994  stream << ") :\n";
995  }
996 
997  stream << std::setw(0) << " " << std::setw(twide) << RHS(icount());
998  if(icount[0] == RHS.ind_max()[0]) {
999  stream << '\n';
1000 
1001  // if(RHS.rank() > 1 && icount[1] == RHS.ind_max()[1])
1002  // stream << "\n"; //commented out by Ivy 07/01/13 so that an extra new line isn't printed.
1003  }
1004 
1005  }
1006  while((++icount).valid());
1007  stream.flags(old_flags);
1008  return stream;
1009  }
1010 
1011 
1012 
1013  //************************************************************
1014 
1015  template<class T>
1016  std::istream &operator >> (std::istream &stream, Tensor<T> &RHS) {
1017  std::cerr << "WARNING: Input stream operator has not been implemented for Tensor class! Exiting...\n";
1018  exit(1);
1019  }
1020 
1021 
1022 
1023  //************************************************************
1024 
1025  template<class T>
1026  void TensorBasis<T>::generate_basis(Index Nrank, const SymGroup &sym_group) {
1027  Index i, ns;
1028  clear();
1029 
1030  Tensor<T> ttens(Nrank, Array<Index>(Nrank, 3), 0);
1031 
1032  for(i = 0; i < ttens.size(); i++) {
1033  Tensor<T> avtens(Nrank, Array<Index>(Nrank, 3), 0);
1034  ttens[i] = 1;
1035  for(ns = 0; ns < sym_group.size(); ns++)
1036  avtens += sym_group[ns] * ttens;
1037  if(!this->contains(avtens))
1038  this->push_back(avtens);
1039  ttens[i] = 0;
1040  }
1041  idealize();
1042 
1043  coeffs.resize(size(), NAN); //Changed by Ivy 09/25/2012
1044 
1045  }
1046 
1047  //************************************************************
1048 
1049  template<class T>
1050  void TensorBasis<T>::generate_basis(Index Nrank, const SymGroup &sym_group, Index Rep_ID) {
1051  if(!sym_group.size()) {
1052  std::cerr << "WARNING: Attempting to generate tensor basis from an invalid symmetry group. Exiting...";
1053  exit(0);
1054  }
1055  Index i, ns;
1056  clear();
1057 
1058  Tensor<T> ttens(Nrank, Array<Index>(Nrank, 3), 0);
1059 
1060  for(i = 0; i < ttens.size(); i++) {
1061  Tensor<T> avtens(Nrank, Array<Index>(Nrank, 3), 0);
1062  ttens[i] = 1;
1063  for(ns = 0; ns < sym_group.size(); ns++)
1064  avtens += sym_group[ns] * ttens;
1065  if(!contains(avtens))
1066  push_back(avtens);
1067  ttens[i] = 0;
1068  }
1069  idealize();
1070 
1071  coeffs.resize(size(), NAN); //Changed by Ivy 09/25/2012
1072 
1073  }
1074 
1075 
1076  //************************************************************
1077 
1078  template<class T>
1079  void TensorBasis<T>::generate_basis(Index Nrank, const SymGroup &sym_group, const SymGroupRep &perm_group) {
1080  Index i, ns;
1081  clear();
1082 
1083  if(sym_group.size() != perm_group.size()) {
1084  std::cerr << "WARNING: Attempting to generate tensor basis, but symmetry group and permutation group are not compatible!\n";
1085  }
1086 
1087  Tensor<T> ttens(Nrank, Array<Index>(Nrank, 3), 0);
1088 
1089  for(i = 0; i < ttens.size(); i++) {
1090  Tensor<T> avtens(Nrank, Array<Index>(Nrank, 3), 0);
1091  ttens[i] = 1;
1092  for(ns = 0; ns < sym_group.size(); ns++)
1093  avtens += (sym_group[ns] * ttens).dim_permute(*(perm_group[ns]->get_permutation()));
1094  if(!this->contains(avtens))
1095  this->push_back(avtens);
1096  ttens[i] = 0;
1097  }
1098  idealize();
1099 
1100  coeffs.resize(size(), NAN); //Changed by Ivy 09/25/2012
1101  return;
1102  }
1103 
1104  //************************************************************
1105 
1106  template<class T>
1108  Index i, j;
1109  for(i = 0; i < size(); i++) {
1110  for(j = 0; j < ortho_basis.size(); j++) {
1111  at(i) -= (at(i).scalar_prod(ortho_basis[j]) / norm(ortho_basis[j])) * ortho_basis[j];
1112  }
1113  }
1114  return;
1115  }
1116 
1117  //************************************************************
1118 
1119  template<class T>
1121  for(Index i = 0; i < size(); i++) {
1122  at(i).apply_sym(op);
1123  }
1124  return *this;
1125  }
1126 
1127  //************************************************************
1128 
1129  template<class T>
1131  for(Index i = 0; i < size(); i++)
1132  at(i).normalize();
1133  return;
1134  }
1135 
1136  //************************************************************
1137 
1138  template<class T>
1140  //GaussJordan<Tensor<T>, T>::eliminate(*this);
1141 
1142  // for(Index i=0; i<size(); i++)
1143  //std::cout << "Basis Tensor " << i << ": \n" << at(i);
1144 
1145  GramSchmidt<Tensor<T>, T>::orthogonalize(*this);
1146  return;
1147  }
1148 
1149  //************************************************************
1150  template<class T>
1151  bool TensorBasis<T>::read(std::istream &stream) {
1152 
1153  char ch;
1154  Index num_elem, rank;
1155  Array<Index> dim;
1156  double tcoeff;
1157 
1158  ch = stream.peek();
1159  while(ch < '0' || ch > '9') {
1160  stream.ignore(256, '\n');
1161  ch = stream.peek();
1162  }
1163  stream >> num_elem;
1164  if(!num_elem) {
1165  stream.ignore(256, '\n');
1166  return false;
1167  }
1168  stream >> rank;
1169 
1170  dim.resize(rank);
1171 
1172  for(Index i = 0; i < rank; i++) {
1173  stream >> dim[i];
1174  }
1175 
1176 #ifdef DEBUG
1177  std::cout << "dim is " << dim << "\n";
1178 #endif //DEBUG
1179 
1180  stream.ignore(1000, '\n');
1181 
1182  coeffs.resize(num_elem, NAN);
1183 
1184  Tensor<T> ttensor(rank, dim);
1185 
1186  for(Index j = 0; j < num_elem; j++) {
1187  ttensor.redefine(dim);
1188 
1189  //Check for "<" to find "<ECI>"
1190  ch = stream.peek();
1191  while(ch != char(60)) {
1192  if(((ch < '0') || (ch > '9')) && (ch != '-')) {
1193  stream.ignore(1000, '\n');
1194  }
1195  else {
1196  break;
1197  }
1198  ch = stream.peek();
1199  }
1200 
1201  //Reading in coefficient
1202  if(((ch != char(60)) && ((ch >= '0') || (ch <= '9'))) || (ch == '-')) {
1203  stream >> tcoeff;
1204 #ifdef DEBUG
1205  std::cout << "Coefficient is " << tcoeff << "\n";
1206 #endif //DEBUG
1207  eci(j) = tcoeff;
1208  }
1209 
1210  stream.ignore(1000, '\n');
1211 
1212  ttensor.read(stream);
1213  this->push_back(ttensor);
1214  }
1215 
1216 
1217  return true;
1218  }
1219 
1221 };
1222 
1223 #endif
Eigen::MatrixXd MatrixXd
ReturnTensor & operator=(Tensor< T > &RHS)
Definition: Tensor.hh:211
Tensor & symmetrize_index(const SymOp &op, Array< SymGroupRepID > rep_IDs, Array< Index > inner_ind)
Definition: Tensor.hh:802
Tensor & transform(const Eigen::MatrixXd &op)
Definition: Tensor.hh:898
pair_type eci
Definition: settings.cc:112
Tensor & dim_permute(const Array< Index > &iperm)
Definition: Tensor.hh:321
Tensor & operator=(const T &RHS)
Definition: Tensor.hh:105
const Array< Index > & ind_max() const
Definition: Tensor.hh:493
Tensor(const Eigen::Matrix< T, 3, 3 > &mat_init)
Definition: Tensor.hh:81
Tensor & dim_ipermute(const Array< Index > &perm)
Definition: Tensor.hh:344
A Counter allows looping over many incrementing variables in one loop.
Definition: Counter.hh:71
Index size() const
Definition: Array.hh:145
Tensor & dim_unpermute()
Definition: Tensor.hh:366
Counter< Array< Index > > element_counter() const
Definition: Tensor.hh:486
void generate_basis(Index Nrank, const SymGroup &sym_group)
Definition: Tensor.hh:1026
const Array< Index > & perm_array() const
Definition: Permutation.hh:57
double & eci(Index i)
Definition: Tensor.hh:232
ReturnTensor< T > operator-()
Definition: Tensor.hh:671
void resize(Index new_N)
Definition: Array.hh:468
Tensor(Index tNrank, Array< Index > tNdim, T init_val)
Definition: Tensor.hh:56
TensorBasis & apply_sym(const SymOp &op)
Definition: Tensor.hh:1120
Array< Index > max_ind
Definition: Tensor.hh:34
const Array< Index > & mult_array() const
Definition: Tensor.hh:514
Array< double > coeffs
Definition: Tensor.hh:227
bool next_permute()
Definition: Array.hh:956
Main CASM namespace.
Definition: complete.cpp:8
void normalize()
Definition: Tensor.hh:1130
T & operator()(const Array< Index > &inds)
Definition: Tensor.hh:561
void swap(Array< T > &RHS)
Definition: Array.hh:449
void swap(Tensor &RHS)
Definition: Tensor.hh:250
Array< Index > idim
Definition: Tensor.hh:34
Index N
Definition: Array.hh:57
T scalar_prod(const Tensor &RHS) const
Definition: Tensor.hh:695
bool read(std::istream &stream)
Definition: Tensor.hh:1151
const T & get(const Array< Index > &inds) const
Definition: Tensor.hh:528
void swap(ConfigDoF &A, ConfigDoF &B)
Definition: ConfigDoF.cc:195
SymGroup is a collection of symmetry operations that satisfy the group property The symmetry operatio...
Definition: SymGroup.hh:33
const matrix_type & matrix() const
Const access of entire cartesian symmetry matrix.
Definition: SymOp.hh:57
Tensor(Index tNrank=0)
Definition: Tensor.hh:69
Tensor & operator=(ReturnTensor< T > &RHS)
Definition: Tensor.hh:100
Tensor(Index tNrank, Array< Index > tNdim)
Definition: Tensor.hh:43
SymOp is the Coordinate representation of a symmetry operation it keeps fraction (FRAC) and Cartesian...
Definition: SymOp.hh:28
static ReturnTensor< T > identity(Index trank, Index tdim)
Definition: Tensor.hh:762
BasisSet operator*(const SymOp &LHS, const BasisSet &RHS)
Definition: BasisSet.cc:1154
Tensor & operator+=(const Tensor &RHS)
Definition: Tensor.hh:596
T norm(const Tensor< T > &ttens)
Definition: Tensor.hh:968
EigenIndex Index
For long integer indexing:
Tensor & operator-=(const Tensor &RHS)
Definition: Tensor.hh:623
bool next_permute()
Definition: Tensor.hh:379
const Array< Index > & dim() const
Definition: Tensor.hh:500
void redefine(Array< Index > _Ndim)
Definition: Tensor.hh:285
Array & ipermute(const Array< Index > &perm_array)
Definition: Array.hh:940
void read(std::istream &stream)
Definition: Tensor.hh:853
T max(const T &A, const T &B)
Definition: CASM_math.hh:32
T min(const T &A, const T &B)
Definition: CASM_math.hh:25
Tensor< T > & at(Index ind)
Definition: Array.hh:157
T dot(const Tensor< T > &LHS, const Tensor< T > &RHS)
Definition: Tensor.hh:961
Index rank() const
Definition: Tensor.hh:521
void swap_elem(Index i, Index j)
Definition: Array.hh:231
Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic > convert_to_Eigen() const
Definition: Tensor.hh:928
T & at(const Array< Index > &inds)
Definition: Tensor.hh:539
Tensor & operator*=(T RHS)
Definition: Tensor.hh:649
Array< Index > Ndim
Definition: Tensor.hh:34
std::istream & operator>>(std::istream &_in, std::vector< T > &vec)
void reset_idim()
Definition: Tensor.hh:424
void make_orthogonal_to(const TensorBasis &ortho_basis)
Definition: Tensor.hh:1107
Index Nrank
Definition: Tensor.hh:33
ReturnTensor< T > operator+(const Tensor &RHS)
Definition: Tensor.hh:681
void sort(const CompareType &comp)
Definition: Array.hh:766
ReturnTensor< T > tensor_prod(const Tensor &RHS)
Definition: Tensor.hh:724
Tensor & operator/=(T RHS)
Definition: Tensor.hh:660
SymGroupRep is an alternative representation of a SymGroup for something other than real space...
Definition: SymGroupRep.hh:30
double eci(Index i) const
Definition: Tensor.hh:229
Array< Eigen::MatrixXd const * > get_matrix_reps(Array< SymGroupRepID > _rep_IDs) const
get array of pointers to matrix representations for representations corresponding to _rep_IDs ...
void permute_symmetrize()
Symmetrize tensor with respect to index permutation.
Definition: Tensor.hh:444
Tensor & apply_sym(const SymOp &op)
Definition: Tensor.hh:823
bool contains(const Container &container, const T &value)
Equivalent to container.end() != std::find(container.begin(), container.end(), value) ...
Definition: algorithm.hh:66
ReturnTensor(Tensor< T > &init_tens)
Definition: Tensor.hh:207
void reset()
Reset the current value of the Counter to the initial value.
Definition: Counter.hh:170
Array< T > array_cat(const Array< T > &A1, const Array< T > &A2)
Definition: Array.hh:1072
Basic std::vector like container (deprecated)
Tensor(ReturnTensor< T > &init_cont)
Definition: Tensor.hh:96
Tensor slice(Array< Index > slice_Ndim, Array< Index > slice_ind)
Tensor & normalize()
Definition: Tensor.hh:754
Array & permute(const Array< Index > &perm_array)
Definition: Array.hh:923
bool valid_index(Index i)