92 for (
int i = 0; i < 3; i++)
93 for (
int j = 0; j < 3; j++) (*
this)(i, j) = mat_init(i, j);
108 template <
int dim1,
int dim2,
int flag1>
177 void read(std::istream &stream);
235 bool read(std::istream &stream);
254 template <
int dim1,
int dim2,
int flag1>
256 const Eigen::Matrix<T, dim1, dim2, flag1> &LHS) {
258 if (rank() != 2 || dim(0) != LHS.rows() || dim(1) != LHS.cols()) {
259 tdim[0] = LHS.rows();
260 tdim[1] = LHS.cols();
263 for (
int i = 0; i < LHS.rows(); i++) {
265 for (
int j = 0; j < LHS.rows(); j++) {
267 at(tdim) = LHS(i, j);
277 Nrank = _Ndim.
size();
281 max_ind.resize(Nrank);
282 for (
Index i = 0; i < Nrank; i++) {
283 max_ind[i] = Ndim[i] - 1;
295 Nrank = _Ndim.
size();
299 max_ind.resize(Nrank);
300 for (
Index i = 0; i < Nrank; i++) {
301 max_ind[i] = Ndim[i] - 1;
313 if (iperm.
size() != Nrank) {
315 throw std::runtime_error(
316 "Tensor<T>::dim_permute, permutation array is incompatible with tensor "
323 max_ind.permute(iperm);
338 if (iperm.
size() != Nrank) {
339 std::cerr <<
"WARNING: In Tensor<T>::dim_permute, permutation array is "
340 "incompatible with tensor rank! Continuing without permuting "
366 max_ind.permute(tperm);
376 bool is_valid =
true;
378 for (
Index k = 0; k < Nrank; k++) iperm[k] = k;
380 while (
valid_index(i) && idim[i + 1] <= idim[i]) i--;
385 while (idim[j] <= idim[i]) j--;
388 Ndim.swap_elem(i, j);
389 max_ind.swap_elem(i, j);
401 idim.swap_elem(i, j);
402 Ndim.swap_elem(i, j);
403 max_ind.swap_elem(i, j);
416 if (idim.is_ascending())
return;
418 Tensor ttens(Nrank, Ndim);
422 ttens(icount()) = at(icount());
423 }
while ((++icount).valid());
451 for (
Index i = 0; i < tunique.
size(); i++) tunique[i].sort();
454 Index iset(0), i(0), j(0), nperm(1);
456 while (iset < tunique.
size()) {
457 for (i = 0; i < tunique.
size(); i++)
458 for (j = 0; j < tunique[i].
size(); j++)
459 tperm[unique_dim[i][j]] = tunique[i][j];
466 (*this) /= double(nperm);
517 for (
Index i = 0; i < Nrank; i++) lin_index += inds[i] * idim[i];
519 return at(lin_index);
527 for (
Index i = 0; i < Nrank; i++) lin_index += inds[i] * idim[i];
529 return at(lin_index);
537 for (
Index i = 0; i < Nrank; i++) lin_index += inds[i] * idim[i];
539 return at(lin_index);
547 for (
Index i = 0; i < Nrank; i++) lin_index += inds[i] * idim[i];
549 return at(lin_index);
557 for (
Index i = 0; i < Nrank; i++) lin_index += inds[i] * idim[i];
559 return at(lin_index);
565 return at(i * idim[0] + j * idim[1]);
572 return at(i * idim[0] + j * idim[1]);
579 if (Ndim != RHS.
Ndim) {
580 std::cerr <<
"Attempting to add two incompatible tensors!\n";
584 if (idim == RHS.
idim) {
585 for (
Index i = 0; i < size(); i++) at(i) += RHS[i];
593 at(icount()) += RHS(icount());
595 }
while ((++icount).valid());
604 if (Ndim != RHS.
Ndim) {
605 std::cerr <<
"Attempting to add two incompatible tensors!\n";
609 if (idim == RHS.
idim) {
610 for (
Index i = 0; i < size(); i++) at(i) -= RHS[i];
618 at(icount()) -= RHS(icount());
619 }
while ((++icount).valid());
628 for (
Index i = 0; i < size(); i++) at(i) *= RHS;
637 for (
Index i = 0; i < size(); i++) at(i) /= RHS;
647 for (
Index i = 0; i < ttens.
size(); i++) ttens[i] = -at(i);
669 if (Ndim != RHS.
Ndim) {
671 <<
"Attempting to take scalar product of two incompatible tensors!\n";
678 if (idim == RHS.
idim) {
679 for (
Index i = 0; i < size(); i++) tprod += at(i) * RHS[i];
686 tprod += at(icount()) * RHS(icount());
687 }
while ((++icount).valid());
707 for (i = 0; i < icount().size(); i++)
708 tot_ind += icount[i] * ttens.
idim[i];
710 for (j = 0; j < jcount().
size(); j++)
711 tot_ind += jcount[j] * ttens.
idim[i + j];
713 ttens[tot_ind] = at(icount()) * RHS(jcount());
715 }
while ((++jcount).valid());
716 }
while ((++icount).valid());
725 return (*
this) /= sqrt(scalar_prod(*
this));
733 for (
Index i = 0; i < tdim; i++) ttens(Array<Index>(trank, i)) = 1;
741 if (!rank())
return *
this;
755 for (nd = 0; nd < Nrank; nd++)
756 tval *= (*rep_mats[nd])(icount[nd], jcount[nd]);
757 ttens(icount()) += tval;
759 }
while ((++jcount).valid());
761 }
while ((++icount).valid());
771 if (!rank())
return *
this;
780 for (nd = 0; nd < Nrank; nd++)
781 tval *= (*rep_mats[nd])(icount[nd], inner_ind[nd]);
782 at(icount()) += tval;
784 }
while ((++icount).valid());
791 if (!rank())
return *
this;
804 for (nd = 0; nd < Nrank; nd++)
805 tval *= op.
matrix()(icount[nd], jcount[nd]);
806 ttens(icount()) += tval;
808 }
while ((++jcount).valid());
810 }
while ((++icount).valid());
826 }
while ((++icount).valid());
862 if (!rank())
return *
this;
875 for (nd = 0; nd < Nrank; nd++) tval *= mat(icount[nd], jcount[nd]);
876 ttens(icount()) += tval;
878 }
while ((++jcount).valid());
880 }
while ((++icount).valid());
891 std::cerr <<
"WARNING: Attempting to assign Eigen::Matrix object using a "
894 <<
"!\nTensor must have rank=2 for assignment to be valid. "
898 Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> tmat(dim(0), dim(1));
900 for (
Index i = 0; i < tmat.rows(); i++) {
901 for (
Index j = 0; j < tmat.rows(); j++) {
902 tmat(i, j) = (*this)(i, j);
940 std::ios::fmtflags old_flags = stream.flags();
941 stream.flags(std::ios::showpoint | std::ios::fixed | std::ios::right);
942 int twide = stream.width();
945 stream << RHS[0] <<
'\n';
946 stream.flags(old_flags);
953 if (RHS.
rank() > 2 && icount[0] == 0 && icount[1] == 0) {
954 stream <<
"Block (:,:," << icount[2];
955 for (
Index j = 3; j < RHS.
rank(); j++) stream <<
"," << icount[j];
959 stream << std::setw(0) <<
" " << std::setw(twide) << RHS(icount());
960 if (icount[0] == RHS.ind_max()[0]) {
968 }
while ((++icount).valid());
969 stream.flags(old_flags);
977 std::cerr <<
"WARNING: Input stream operator has not been implemented for "
978 "Tensor class! Exiting...\n";
991 for (i = 0; i < ttens.
size(); i++) {
994 for (ns = 0; ns < sym_group.size(); ns++) avtens += sym_group[ns] * ttens;
995 if (!this->
contains(avtens)) this->push_back(avtens);
1000 coeffs.resize(size(), NAN);
1008 if (!sym_group.size()) {
1009 std::cerr <<
"WARNING: Attempting to generate tensor basis from an invalid "
1010 "symmetry group. Exiting...";
1018 for (i = 0; i < ttens.
size(); i++) {
1021 for (ns = 0; ns < sym_group.size(); ns++) avtens += sym_group[ns] * ttens;
1022 if (!
contains(avtens)) push_back(avtens);
1027 coeffs.resize(size(), NAN);
1038 if (sym_group.size() != perm_group.size()) {
1039 std::cerr <<
"WARNING: Attempting to generate tensor basis, but symmetry "
1040 "group and permutation group are not compatible!\n";
1045 for (i = 0; i < ttens.
size(); i++) {
1048 for (ns = 0; ns < sym_group.size(); ns++)
1049 avtens += (sym_group[ns] * ttens)
1050 .
dim_permute(*(perm_group[ns]->get_permutation()));
1051 if (!this->
contains(avtens)) this->push_back(avtens);
1056 coeffs.resize(size(), NAN);
1065 for (i = 0; i < size(); i++) {
1066 for (j = 0; j < ortho_basis.
size(); j++) {
1067 at(i) -= (at(i).scalar_prod(ortho_basis[j]) /
norm(ortho_basis[j])) *
1078 for (
Index i = 0; i < size(); i++) {
1088 for (
Index i = 0; i < size(); i++) at(i).normalize();
1109 Index num_elem, rank;
1114 while (ch < '0' || ch >
'9') {
1115 stream.ignore(256,
'\n');
1120 stream.ignore(256,
'\n');
1127 for (
Index i = 0; i < rank; i++) {
1131 stream.ignore(1000,
'\n');
1133 coeffs.
resize(num_elem, NAN);
1137 for (
Index j = 0; j < num_elem; j++) {
1142 while (ch !=
char(60)) {
1143 if (((ch <
'0') || (ch >
'9')) && (ch !=
'-')) {
1144 stream.ignore(1000,
'\n');
1152 if (((ch !=
char(60)) && ((ch >=
'0') || (ch <=
'9'))) || (ch ==
'-')) {
1157 stream.ignore(1000,
'\n');
1159 ttensor.
read(stream);
1160 this->push_back(ttensor);
Basic std::vector like container (deprecated)
Tensor< T > & at(Index ind)
void swap_elem(Index i, Index j)
A Counter allows looping over many incrementing variables in one loop.
void reset()
Reset the current value of the Counter to the initial value.
const std::vector< Index > & perm_array() const
ReturnTensor(Tensor< T > &init_tens)
ReturnTensor & operator=(Tensor< T > &RHS)
SymGroup is a collection of symmetry operations that satisfy the group property The symmetry operatio...
SymGroupRep is an alternative representation of a SymGroup for something other than real space....
SymOp is the Coordinate representation of a symmetry operation it keeps fraction (FRAC) and Cartesian...
const matrix_type & matrix() const
Const access of entire cartesian symmetry matrix.
Array< Eigen::MatrixXd const * > get_matrix_reps(Array< SymGroupRepID > _rep_IDs) const
double eci(Index i) const
Tensor(const Eigen::Matrix< T, 3, 3 > &mat_init)
Tensor slice(Array< Index > slice_Ndim, Array< Index > slice_ind)
Tensor & operator=(ReturnTensor< T > &RHS)
Tensor(Index tNrank, Array< Index > tNdim, T init_val)
Tensor(Index tNrank, Array< Index > tNdim)
Tensor & operator=(const T &RHS)
Tensor & operator=(const Eigen::Matrix< T, dim1, dim2, flag1 > &LHS)
Tensor(ReturnTensor< T > &init_cont)
void swap(Array< T > &RHS)
Array< T > array_cat(const Array< T > &A1, const Array< T > &A2)
Array & ipermute(const Array< Index > &perm_array)
void sort(const CompareType &comp)
void generate_basis(Index Nrank, const SymGroup &sym_group)
void redefine(Array< Index > _Ndim)
Tensor & symmetrize_index(const SymOp &op, Array< SymGroupRepID > rep_IDs, Array< Index > inner_ind)
const Array< Index > & dim() const
T scalar_prod(const Tensor &RHS) const
ReturnTensor< T > operator+(const Tensor &RHS)
Tensor & operator+=(const Tensor &RHS)
T & operator()(const Array< Index > &inds)
Tensor & operator/=(T RHS)
Tensor & transform(const Eigen::MatrixXd &op)
Counter< Array< Index > > element_counter() const
T dot(const Tensor< T > &LHS, const Tensor< T > &RHS)
static ReturnTensor< T > identity(Index trank, Index tdim)
Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic > convert_to_Eigen() const
const Array< Index > & ind_max() const
ReturnTensor< T > tensor_prod(const Tensor &RHS)
const Array< Index > & mult_array() const
void make_orthogonal_to(const TensorBasis &ortho_basis)
void read(std::istream &stream)
Tensor & apply_sym(const SymOp &op)
Tensor & operator*=(T RHS)
void permute_symmetrize()
Symmetrize tensor with respect to index permutation.
Tensor & operator-=(const Tensor &RHS)
ReturnTensor< T > operator-()
T & at(const Array< Index > &inds)
Tensor & dim_ipermute(const Array< Index > &perm)
Tensor & dim_permute(const Array< Index > &iperm)
bool read(std::istream &stream)
const T & get(const Array< Index > &inds) const
TensorBasis & apply_sym(const SymOp &op)
T norm(const Tensor< T > &ttens)
std::ostream & operator<<(std::ostream &_stream, const FormattedPrintable &_formatted)
std::istream & operator>>(std::istream &_in, std::vector< T > &vec)
void swap(ConfigDoF &A, ConfigDoF &B)
BasisSet operator*(const SymOp &LHS, const BasisSet &RHS)
T min(const T &A, const T &B)
bool contains(const Container &container, const T &value)
Equivalent to container.end() != std::find(container.begin(), container.end(), value)
bool valid_index(Index i)
INDEX_TYPE Index
For long integer indexing:
T max(const T &A, const T &B)