19 #define TENSOR_DIM (m_input_tensor.dimensions()) 20 #define TENSOR_NUMEL (m_input_tensor.numel()) 40 virtual MAT update(
const int mode) = 0;
48 const int m_low_rank_k;
53 bool m_enable_dim_tree;
58 std::vector<bool> m_stale_mttkrp;
61 void update_factor_mode(
const int ¤t_mode,
const MAT &factor) {
62 m_ncp_factors.
set(current_mode, factor);
65 if (m_enable_dim_tree) {
66 MAT temp = m_ncp_factors.
factor(current_mode).t();
70 int num_modes = this->m_input_tensor.
modes();
71 for (
int mode = 0; mode < num_modes; mode++) {
72 if (mode != current_mode) this->m_stale_mttkrp[current_mode] =
true;
75 virtual void accelerate() {}
79 : m_ncp_factors(i_tensor.dimensions(), i_k, false),
80 m_input_tensor(i_tensor),
84 gram_without_one.zeros(i_k, i_k);
85 ncp_mttkrp_t =
new MAT[i_tensor.
modes()];
87 for (
int i = 0; i < i_tensor.
modes(); i++) {
89 ncp_krp[i].zeros(current_size, i_k);
91 this->m_stale_mttkrp.push_back(
true);
94 m_compute_error =
false;
96 m_normA = i_tensor.
norm();
99 this->m_enable_dim_tree =
false;
102 for (
int i = 0; i < m_input_tensor.
modes(); i++) {
104 ncp_mttkrp_t[i].clear();
107 delete[] ncp_mttkrp_t;
108 if (this->m_enable_dim_tree) {
111 delete lowranktensor;
115 this->m_enable_dim_tree = i_dim_tree;
118 m_input_tensor.
modes() / 2);
122 void num_it(
const int i_n) { this->m_num_it = i_n; }
124 for (m_current_it = 0; m_current_it < m_num_it; m_current_it++) {
125 INFO <<
"iter::" << this->m_current_it << std::endl;
126 for (
int j = 0; j < this->m_input_tensor.
modes(); j++) {
129 INFO <<
"gram_without_" << j <<
"::" << arma::cond(gram_without_one)
131 << gram_without_one << std::endl;
133 if (this->m_stale_mttkrp[j]) {
136 INFO <<
"krp_leave_out_" << j << std::endl << ncp_krp[j] << std::endl;
138 if (this->m_enable_dim_tree) {
139 double multittv_time = 0;
140 double mttkrp_time = 0;
142 multittv_time, mttkrp_time);
144 m_input_tensor.
mttkrp(j, ncp_krp[j], &ncp_mttkrp_t[j]);
146 this->m_stale_mttkrp[j] =
false;
148 INFO <<
"mttkrp for factor" << j << std::endl
149 << ncp_mttkrp_t[j] << std::endl;
153 MAT factor = update(j);
155 INFO <<
"iter::" << i <<
"::factor:: " << j << std::endl
156 << factor << std::endl;
158 update_factor_mode(j, factor.t());
160 if (m_compute_error) {
162 this->m_rel_error = temp_err;
163 INFO <<
"relative_error at it::" << this->m_current_it
164 <<
"::" << temp_err << std::endl;
166 if (this->m_accelerated) accelerate();
168 INFO <<
"ncp factors" << std::endl;
169 m_ncp_factors.
print();
174 this->m_accelerated = set_acceleration;
175 this->m_compute_error =
true;
178 return this->m_stale_mttkrp[current_mode];
182 int m_modes = this->m_input_tensor.
modes();
184 for (
int i = 0; i < m_modes; i++) {
185 update_factor_mode(i, new_factors.
factor(i));
188 for (
int i = 0; i < m_modes; i++) {
189 update_factor_mode(i, new_factors.
factor(i).t());
217 int m = m_ncp_factors.
factor(0).n_rows;
218 int n = ncp_krp[0].n_rows;
219 int k = m_ncp_factors.
factor(0).n_cols;
229 m_ncp_factors.
factor(0) * arma::diagmat(m_ncp_factors.
lambda());
231 dgemm_(&nt, &t, &m, &n, &k, &alpha, unnorm_fac.memptr(), &lda,
232 ncp_krp[0].memptr(), &ldb, &beta, &lowranktensor->
m_data[0], &ldc);
238 double err = m_input_tensor.
err(*lowranktensor);
239 err = std::sqrt(err / this->m_normA);
243 reset(new_factors_t,
true);
249 #endif // NTF_AUNTF_HPP_ double current_error() const
NCPFactors & ncp_factors()
Data is stored such that the unfolding is column major.
void print()
prints the entire NCPFactors including the factor matrices
void accelerated(const bool &set_acceleration)
void gram_leave_out_one(const unsigned int i_n, MAT *o_UtU)
Returns the hadamard of the factor grams except i_n.
double computeObjectiveError(const NCPFactors &new_factors_t)
double computeObjectiveError()
void set_factor(const double *arma_factor_ptr, const long int mode)
void normalize()
only during initialization. Reset's all lambda.
double err(const Tensor &b) const
Computes the squared error with the input tensor.
std::vector< double > m_data
void num_it(const int i_n)
void mttkrp(const int i_n, const MAT &i_krp, MAT *o_mttkrp) const
size of krp must be product of all dimensions leaving out nxk.
MAT krp_leave_out_one(const unsigned int i_n)
KRP leaving out the mode i_n.
void dim_tree(bool i_dim_tree)
VEC lambda() const
returns the lambda vector
void in_order_reuse_MTTKRP(long int n, double *out, bool colmajor, double &multittv_time, double &mttkrp_time)
void set(const int i_n, const MAT &i_factor)
Set the mode i_n with the given factor matrix.
void set_lambda(const VEC &new_lambda)
sets the lambda vector
int modes() const
Return the number of modes. It is a scalar value.
AUNTF(const planc::Tensor &i_tensor, const int i_k, algotype i_algo)
UVEC dimensions() const
Returns a vector of dimensions on every mode.
double norm() const
returns the frobenius norm of the tensor
void reset(const NCPFactors &new_factors, bool trans=false)
ncp_factors contains the factors of the ncp every ith factor is of size n_i * k number of factors is ...
void compute_error(bool i_error)
MAT & factor(const int i_n) const
factor matrix of a mode i_n
bool is_stale_mttkrp(const int ¤t_mode) const