KratosMultiphysics
KRATOS Multiphysics (Kratos) is a framework for building parallel, multi-disciplinary simulation software, aiming at modularity, extensibility, and high performance. Kratos is written in C++, and counts with an extensive Python interface.
mvqn_convergence_accelerator.hpp
Go to the documentation of this file.
1 // | / |
2 // ' / __| _` | __| _ \ __|
3 // . \ | ( | | ( |\__ `
4 // _|\_\_| \__,_|\__|\___/ ____/
5 // Multi-Physics
6 //
7 // License: BSD License
8 // Kratos default license: kratos/license.txt
9 //
10 // Main author: Ruben Zorrilla
11 //
12 
13 #if !defined(KRATOS_MVQN_CONVERGENCE_ACCELERATOR)
14 #define KRATOS_MVQN_CONVERGENCE_ACCELERATOR
15 
16 // System includes
17 
18 // External includes
19 
20 // Project includes
23 #include "utilities/svd_utils.h"
25 
26 // Application includes
29 
30 namespace Kratos
31 {
34 
38 
42 
46 
50 
56 template<class TSparseSpace, class TDenseSpace>
57 class MVQNFullJacobianConvergenceAccelerator: public ConvergenceAccelerator<TSparseSpace, TDenseSpace>
58 {
59 public:
60 
64 
66  typedef typename BaseType::Pointer BaseTypePointer;
67 
70 
73 
77 
83 
90  explicit MVQNFullJacobianConvergenceAccelerator(Parameters rConvAcceleratorParameters)
91  {
92  rConvAcceleratorParameters.ValidateAndAssignDefaults(GetDefaultParameters());
93  mOmega_0 = rConvAcceleratorParameters["w_0"].GetDouble();
94  mAbsCutOff = rConvAcceleratorParameters["abs_cut_off_tol"].GetDouble();
95  mUsedInBlockNewtonEquations = rConvAcceleratorParameters["interface_block_newton"].GetBool();
96  }
97 
103  explicit MVQNFullJacobianConvergenceAccelerator(const double AbsCutOff)
104  : mOmega_0(0.0)
105  , mAbsCutOff(AbsCutOff)
106  , mUsedInBlockNewtonEquations(true)
107  {
108  }
109 
114  // Required to build the IBN-MVQN
116 
121 
125 
129 
134  void InitializeSolutionStep() override
135  {
136  KRATOS_TRY;
137 
138  mConvergenceAcceleratorIteration = 0;
139 
140  KRATOS_CATCH( "" );
141  }
142 
151  const VectorType& rResidualVector,
152  VectorType& rIterationGuess) override
153  {
154  KRATOS_TRY;
155 
156  // Update observation matrices and inverse Jacobian approximation
157  UpdateInverseJacobianApproximation(rResidualVector, rIterationGuess);
158 
159  // Update the iteration guess values
160  UpdateIterationGuess(rIterationGuess);
161 
162  KRATOS_CATCH( "" );
163  }
164 
170  {
171  KRATOS_TRY;
172 
173  //TODO: I THINK THAT IN HERE WE CAN CLEAR THE MATRIX M
174 
175  // Variables update
176  mpIterationValue_0 = mpIterationValue_1;
177  mpResidualVector_0 = mpResidualVector_1;
178  mConvergenceAcceleratorIteration += 1;
179 
180  KRATOS_CATCH( "" );
181  }
182 
187  void FinalizeSolutionStep() override
188  {
189  KRATOS_TRY;
190 
191  // Update previous time step Jacobian as the last iteration Jacobian.
192  // Note that it is required to check if the last iteration Jacobian exists. It exist a corner case (if the
193  // very fist time step only requires the preliminary fixed point relaxation iteration to converge the
194  // previous iteration Jacobian is not needed) that might set a nullptr as previous step Jacobian.
195  if (mpJac_k1) {
196  mpJac_n = mpJac_k1;
197  }
198 
199  // Clear the observation matrices for the next step
200  mpObsMatrixV = nullptr;
201  mpObsMatrixW = nullptr;
202 
203  KRATOS_CATCH( "" );
204  }
205 
207  {
208  Parameters mvqn_default_parameters(R"({
209  "solver_type" : "MVQN",
210  "w_0" : 0.825,
211  "abs_cut_off_tol" : 1e-8,
212  "interface_block_newton" : false
213  })");
214 
215  return mvqn_default_parameters;
216  }
217 
221 
225 
229 
233 
234  friend class IBQNMVQNConvergenceAccelerator<TSparseSpace, TDenseSpace>;
235  friend class IBQNMVQNRandomizedSVDConvergenceAccelerator<TSparseSpace, TDenseSpace>;
236 
238 protected:
241 
249  const VectorType& rResidualVector,
250  const VectorType& rIterationGuess)
251  {
252  // Append and check the singularity of the current iteration information
253  AppendCurrentIterationInformation(rResidualVector, rIterationGuess);
254 
255  // Update the inverse Jacobian approximation
257  }
258 
264  void UpdateIterationGuess(VectorType& rIterationGuess)
265  {
266  if (mConvergenceAcceleratorFirstCorrectionPerformed == false) {
267  // The very first correction of the problem is done with a fixed point iteration
268  TSparseSpace::UnaliasedAdd(rIterationGuess, mOmega_0, *mpResidualVector_1);
269  mConvergenceAcceleratorFirstCorrectionPerformed = true;
270  } else {
271  // Perform the correction
272  // In the first iterations, the correction is performed with previous step Jacobian (stored in mpJac_k1)
273  // In the subsequent iterations, the previous step Jacobian has been updated with the observation matrices
274  VectorType AuxVec(mProblemSize);
276  TSparseSpace::UnaliasedAdd(rIterationGuess, -1.0, AuxVec);
277  }
278  }
279 
290  const VectorType& rResidualVector,
291  const VectorType& rIterationGuess)
292  {
293  // Initialize the problem size with the first residual
294  if (mProblemSize == 0 ) {
295  mProblemSize = TSparseSpace::Size(rResidualVector);
296  }
297 
298  // Update the current iteration and residual vector pointers
299  VectorPointerType pAuxResidualVector(new VectorType(rResidualVector));
300  VectorPointerType pAuxIterationGuess(new VectorType(rIterationGuess));
301  std::swap(mpResidualVector_1, pAuxResidualVector);
302  std::swap(mpIterationValue_1, pAuxIterationGuess);
303 
304  if (mConvergenceAcceleratorIteration != 0) {
305  // Store current observation information
307 
308  // Checks if the latest information is relevant. If not, the new columns are dropped
309  // This also computes the trans(V)*V inverse matrix required for the Jacobian approximation
311  }
312  }
313 
320  {
321  if (mConvergenceAcceleratorIteration == 1) {
322  // Resize and initalize the observation matrices
323  InitializeDataColumns();
324  } else {
325  // Reshape the existent observation matrices
326  const std::size_t n_old_cols = TDenseSpace::Size2(*mpObsMatrixV);
327  if (n_old_cols < mProblemSize) {
328  AppendDataColumns();
329  } else {
330  DropAndAppendDataColumns();
331  }
332  }
333  }
334 
342  {
343  // Compute the matrix trans(V)*V
344  const auto& r_V = *mpObsMatrixV;
345  std::size_t data_cols = TDenseSpace::Size2(r_V);
346  MatrixType transV_V(data_cols, data_cols);
347  noalias(transV_V) = prod(trans(r_V), r_V);
348 
349  // Perform the auxiliary matrix trans(V)*V SVD decomposition
350  MatrixType u_svd; // Orthogonal matrix (m x m)
351  MatrixType w_svd; // Rectangular diagonal matrix (m x n)
352  MatrixType v_svd; // Orthogonal matrix (n x n)
353  std::string svd_type = "Jacobi"; // SVD decomposition type
354  const double svd_rel_tol = 1.0e-6; // Relative tolerance of the SVD decomposition (it will be multiplied by the input matrix norm)
355  const std::size_t max_iter = 500; // Maximum number of iterations of the Jacobi SVD decomposition
356  SVDUtils<double>::SingularValueDecomposition(transV_V, u_svd, w_svd, v_svd, svd_type, svd_rel_tol, max_iter);
357 
358  // Get the maximum and minimum eigenvalues
359  // Note that eigenvalues of trans(A)*A are equal to the eigenvalues of A^2
360  double max_eig_V = 0.0;
361  double min_eig_V = std::numeric_limits<double>::max();
362  for (std::size_t i_col = 0; i_col < data_cols; ++i_col){
363  const double i_eigval = std::sqrt(w_svd(i_col,i_col));
364  if (max_eig_V < i_eigval) {
365  max_eig_V = i_eigval;
366  } else if (min_eig_V > i_eigval) {
367  min_eig_V = i_eigval;
368  }
369  }
370 
371  // Check if the current information is relevant
372  if (min_eig_V < mAbsCutOff * max_eig_V) {
373  KRATOS_WARNING("MVQNFullJacobianConvergenceAccelerator")
374  << "Dropping new observation columns information. Residual observation matrix min. eigval.: " << min_eig_V << " (tolerance " << mAbsCutOff * max_eig_V << ")" << std::endl;
375  // Drop the observation matrices last column
376  this->DropLastDataColumn();
377  // Update the number of columns
378  --data_cols;
379  // Recompute trans(V)*V
380  transV_V.resize(data_cols, data_cols, false);
381  const auto& r_new_V = *(pGetResidualObservationMatrix());
382  noalias(transV_V) = prod(trans(r_new_V),r_new_V);
383  // Recompute the SVD for the matrix pseudo-inverse calculation
384  SVDUtils<double>::SingularValueDecomposition(transV_V, u_svd, w_svd, v_svd, svd_type, svd_rel_tol);
385  }
386 
387  // Calculate and save the residual observation matrices pseudo-inverse
388  // This will be used later on for the inverse Jacobian approximation
389  // Note that we take advantage of the fact that the matrix is always squared
390  // MatrixType aux2_inv = ZeroMatrix(data_cols, data_cols);
391  MatrixPointerType p_aux_inv(new MatrixType(data_cols, data_cols, 0.0));
392  auto& r_aux_inv = *p_aux_inv;
393  for (std::size_t i = 0; i < data_cols; ++i) {
394  for (std::size_t j = 0; j < data_cols; ++j) {
395  const double aux = v_svd(j,i) / w_svd(j,j);
396  for (std::size_t k = 0; k < data_cols; ++k) {
397  r_aux_inv(i,k) += aux * u_svd(k,j);
398  }
399  }
400  }
401  std::swap(mpVtransVPseudoInv, p_aux_inv);
402  }
403 
410  {
411  if (!mJacobiansAreInitialized) {
412  // Initialize the Jacobian matrices
413  // This method already considers if the current MVQN accelerator is applied in a block Newton iteration
414  InitializeJacobianMatrices();
415  mJacobiansAreInitialized = true;
416  } else {
417  // Update the current Jacobian matrix if there are already observations (from iteration 1)
418  // If there are no observations, the previous step Jacobian will be used to calculate the update
420  }
421  }
422 
428  virtual void CalculateCorrectionWithJacobian(VectorType& rCorrection)
429  {
430  TDenseSpace::Mult(*mpJac_k1, *mpResidualVector_1, rCorrection);
431  }
432 
438  {
439  // Compute the current inverse Jacobian approximation
440  if (mpObsMatrixV != nullptr && mpObsMatrixW != nullptr) {
441  // If there are already observations use these in the Jacobian update formula
442  const auto& r_V = *mpObsMatrixV;
443  const auto& r_W = *mpObsMatrixW;
444  const std::size_t n_dofs = GetProblemSize();
445  const std::size_t data_cols = TDenseSpace::Size2(r_V);
446  MatrixType aux1(n_dofs, data_cols);
447  MatrixType aux2(n_dofs, data_cols);
448  noalias(aux1) = r_W - prod(*mpJac_n, r_V);
449  noalias(aux2) = prod(aux1, *mpVtransVPseudoInv);
450  MatrixPointerType p_aux_jac_k1 = MatrixPointerType(new MatrixType(*mpJac_n + prod(aux2,trans(r_V))));
451  std::swap(mpJac_k1, p_aux_jac_k1);
452  } else {
453  // If there is no observations yet (iteration 0) use the previous step Jacobian as current one
454  mpJac_k1 = mpJac_n;
455  }
456  }
457 
461 
467  std::size_t GetProblemSize() const override
468  {
469  return mProblemSize;
470  }
471 
473  {
474  return mConvergenceAcceleratorIteration;
475  }
476 
477  std::size_t GetNumberOfObservations() const
478  {
479  return TDenseSpace::Size2(*mpObsMatrixV);
480  }
481 
483  {
484  return mConvergenceAcceleratorFirstCorrectionPerformed;
485  }
486 
494  {
495  return mUsedInBlockNewtonEquations;
496  }
497 
504  {
505  return mpResidualVector_1;
506  }
507 
514  {
515  return mpJac_k1;
516  }
517 
524  {
525  return mpObsMatrixV;
526  }
527 
534  {
535  return mpObsMatrixW;
536  }
537 
543  void SetInitialRelaxationOmega(const double Omega)
544  {
545  mOmega_0 = Omega;
546  }
547 
553  void SetCutOffTolerance(const double CutOffTolerance)
554  {
555  mAbsCutOff = CutOffTolerance;
556  }
557 
559  {
560  KRATOS_ERROR << "Jacobian decomposition not available for this convergence accelerator." << std::endl;
561  }
562 
564  {
565  KRATOS_ERROR << "Jacobian decomposition not available for this convergence accelerator." << std::endl;
566  }
567 
569  {
570  KRATOS_ERROR << "Jacobian decomposition not available for this convergence accelerator." << std::endl;
571  }
572 
574  {
575  KRATOS_ERROR << "Jacobian decomposition not available for this convergence accelerator." << std::endl;
576  }
577 
579 private:
582 
583 
587 
588  double mOmega_0 = 0.825; // Relaxation factor for the initial fixed point iteration
589  //TODO: THIS IS NO LONGER ABSOLUTE --> CHANGE THE NAME
590  double mAbsCutOff = 1.0e-8; // Tolerance for the absolute cut-off criterion
591  bool mUsedInBlockNewtonEquations = false; // Indicates if the current MVQN is to be used in the interface block Newton equations
592  unsigned int mProblemSize = 0; // Residual to minimize size
593  unsigned int mConvergenceAcceleratorIteration = 0; // Convergence accelerator iteration counter
594  bool mJacobiansAreInitialized = false; // Indicates that the Jacobian matrices have been already initialized
595  bool mConvergenceAcceleratorFirstCorrectionPerformed = false; // Indicates that the initial fixed point iteration has been already performed
596 
597  VectorPointerType mpResidualVector_0; // Previous iteration residual vector
598  VectorPointerType mpResidualVector_1; // Current iteration residual vector
599  VectorPointerType mpIterationValue_0; // Previous iteration guess
600  VectorPointerType mpIterationValue_1; // Current iteration guess
601 
602  MatrixPointerType mpJac_n; // Previous step Jacobian approximation
603  MatrixPointerType mpJac_k1; // Current iteration Jacobian approximation
604  MatrixPointerType mpObsMatrixV; // Residual increment observation matrix
605  MatrixPointerType mpObsMatrixW; // Solution increment observation matrix
606  MatrixPointerType mpVtransVPseudoInv; // Auxiliary matrix trans(V)*V pseudo-inverse
607 
608 
612 
613  void InitializeJacobianMatrices()
614  {
615  if (mUsedInBlockNewtonEquations) {
616  // Initialize the previous step Jacobian to zero
617  MatrixPointerType p_new_jac_n = Kratos::make_shared<MatrixType>(mProblemSize,mProblemSize);
618  (*p_new_jac_n) = ZeroMatrix(mProblemSize,mProblemSize);
619  std::swap(p_new_jac_n,mpJac_n);
620 
621  // Initialize the current Jacobian approximation to a zero matrix
622  // Note that this is only required for the Interface Block Newton algorithm
623  MatrixPointerType p_new_jac_k1 = Kratos::make_shared<MatrixType>(mProblemSize,mProblemSize);
624  (*p_new_jac_k1) = ZeroMatrix(mProblemSize,mProblemSize);
625  std::swap(p_new_jac_k1, mpJac_k1);
626  } else {
627  // Initialize the previous step Jacobian approximation to minus the diagonal matrix
628  MatrixPointerType p_new_jac_n = Kratos::make_shared<MatrixType>(mProblemSize,mProblemSize);
629  (*p_new_jac_n) = -1.0 * IdentityMatrix(mProblemSize,mProblemSize);
630  std::swap(p_new_jac_n,mpJac_n);
631  }
632  }
633 
634  void InitializeDataColumns()
635  {
636  // Resize the observation matrices in accordance to the problem size
637  MatrixPointerType p_aux_V = MatrixPointerType(new MatrixType(mProblemSize, 1));
638  MatrixPointerType p_aux_W = MatrixPointerType(new MatrixType(mProblemSize, 1));
639  std::swap(mpObsMatrixV, p_aux_V);
640  std::swap(mpObsMatrixW, p_aux_W);
641 
642  // First observation matrices fill
643  IndexPartition<unsigned int>(mProblemSize).for_each([&](unsigned int I){
644  (*mpObsMatrixV)(I,0) = (*mpResidualVector_1)(I) - (*mpResidualVector_0)(I);
645  (*mpObsMatrixW)(I,0) = (*mpIterationValue_1)(I) - (*mpIterationValue_0)(I);
646  });
647  }
648 
653  void AppendDataColumns()
654  {
655  // Create two auxilar observation matrices to reshape the existent ones
656  const std::size_t n_old_cols = mpObsMatrixV->size2();
657  MatrixPointerType p_new_V = Kratos::make_shared<MatrixType>(mProblemSize, n_old_cols + 1);
658  MatrixPointerType p_new_W = Kratos::make_shared<MatrixType>(mProblemSize, n_old_cols + 1);
659 
660  // Recover the previous iterations information
661  IndexPartition<unsigned int>(mProblemSize).for_each([&](unsigned int I){
662  for (unsigned int j = 0; j < n_old_cols; j++){
663  (*p_new_V)(I,j) = (*mpObsMatrixV)(I,j);
664  (*p_new_W)(I,j) = (*mpObsMatrixW)(I,j);
665  }
666  });
667 
668  // Fill the attached column with the current iteration information
669  IndexPartition<unsigned int>(mProblemSize).for_each([&](unsigned int I){
670  (*p_new_V)(I, n_old_cols) = (*mpResidualVector_1)(I) - (*mpResidualVector_0)(I);
671  (*p_new_W)(I, n_old_cols) = (*mpIterationValue_1)(I) - (*mpIterationValue_0)(I);
672  });
673 
674  std::swap(mpObsMatrixV,p_new_V);
675  std::swap(mpObsMatrixW,p_new_W);
676  }
677 
682  void DropAndAppendDataColumns()
683  {
684  // Observation matrices size is equal to the interface DOFs number. Oldest columns are to be dropped.
685  MatrixPointerType p_new_V = Kratos::make_shared<MatrixType>(mProblemSize, mProblemSize);
686  MatrixPointerType p_new_W = Kratos::make_shared<MatrixType>(mProblemSize, mProblemSize);
687 
688  // Drop the oldest column and reorder data
689  IndexPartition<unsigned int>(mProblemSize).for_each([&](unsigned int I){
690  for (unsigned int j = 0; j < (mProblemSize-1); j++){
691  (*p_new_V)(I,j) = (*mpObsMatrixV)(I,j+1);
692  (*p_new_W)(I,j) = (*mpObsMatrixW)(I,j+1);
693  }
694  });
695 
696  // Fill the last observation matrices column
697  IndexPartition<unsigned int>(mProblemSize).for_each([&](unsigned int I){
698  (*p_new_V)(I, mProblemSize-1) = (*mpResidualVector_1)(I) - (*mpResidualVector_0)(I);
699  (*p_new_W)(I, mProblemSize-1) = (*mpIterationValue_1)(I) - (*mpIterationValue_0)(I);
700  });
701 
702  std::swap(mpObsMatrixV,p_new_V);
703  std::swap(mpObsMatrixW,p_new_W);
704  }
705 
710  void DropLastDataColumn()
711  {
712  // Set two auxiliar observation matrices
713  const auto n_cols = mpObsMatrixV->size2() - 1;
714  MatrixPointerType p_aux_V = Kratos::make_shared<MatrixType>(mProblemSize, n_cols);
715  MatrixPointerType p_aux_W = Kratos::make_shared<MatrixType>(mProblemSize, n_cols);
716 
717  // Drop the last column
718  IndexPartition<std::size_t>(mProblemSize).for_each([&](unsigned int IRow){
719  for (std::size_t i_col = 0; i_col < n_cols; ++i_col){
720  (*p_aux_V)(IRow, i_col) = (*mpObsMatrixV)(IRow, i_col);
721  (*p_aux_W)(IRow, i_col) = (*mpObsMatrixW)(IRow, i_col);
722  }
723  });
724 
725  // Set the member observation matrices pointers
726  std::swap(mpObsMatrixV,p_aux_V);
727  std::swap(mpObsMatrixW,p_aux_W);
728  }
729 
733 
737 
741 
745 
749 
751 
752 }; /* Class MVQNFullJacobianConvergenceAccelerator */
753 
757 
761 
763 
764 } /* namespace Kratos.*/
765 
766 #endif /* KRATOS_MVQN_CONVERGENCE_ACCELERATOR defined */
Base class for convergence accelerators This class is intended to be the base of any convergence acce...
Definition: convergence_accelerator.h:43
Interface Block Newton convergence accelerator Interface Block Newton equations convergence accelerat...
Definition: ibqn_mvqn_convergence_accelerator.h:61
Interface Block Newton MVQN with randomized Jacobian convergence accelerator Interface Block Newton e...
Definition: ibqn_mvqn_randomized_svd_convergence_accelerator.h:60
MVQN acceleration scheme MultiVectorQuasiNewton convergence accelerator from Bogaers et al....
Definition: mvqn_convergence_accelerator.hpp:58
virtual MatrixPointerType pGetJacobianDecompositionMatrixSigmaV()
Definition: mvqn_convergence_accelerator.hpp:563
MVQNFullJacobianConvergenceAccelerator(const double AbsCutOff)
Construct a new MVQNFullJacobianConvergenceAccelerator object This constructor intended to be used wi...
Definition: mvqn_convergence_accelerator.hpp:103
void UpdateIterationGuess(VectorType &rIterationGuess)
Calculate the current iteration correction This method calculates the current iteration correction an...
Definition: mvqn_convergence_accelerator.hpp:264
void CalculateInverseJacobianApproximation()
Calculates the inverse Jacobian approximation This method calculates the MVQN inverse Jacobian approx...
Definition: mvqn_convergence_accelerator.hpp:409
BaseType::DenseVectorType VectorType
Definition: mvqn_convergence_accelerator.hpp:68
MVQNFullJacobianConvergenceAccelerator(const MVQNFullJacobianConvergenceAccelerator &rOther)
virtual MatrixPointerType pGetOldJacobianDecompositionMatrixQU()
Definition: mvqn_convergence_accelerator.hpp:568
void InitializeSolutionStep() override
Initialize the internal iteration counter This method initializes the convergence acceleratior iterat...
Definition: mvqn_convergence_accelerator.hpp:134
KRATOS_CLASS_POINTER_DEFINITION(MVQNFullJacobianConvergenceAccelerator)
void SetInitialRelaxationOmega(const double Omega)
Set the Initial Relaxation Omega This method sets the relaxation parameter to be used in the very fir...
Definition: mvqn_convergence_accelerator.hpp:543
std::size_t GetNumberOfObservations() const
Definition: mvqn_convergence_accelerator.hpp:477
void FinalizeSolutionStep() override
Save the current step Jacobian This method saves the current step Jacobian as previous step Jacobian ...
Definition: mvqn_convergence_accelerator.hpp:187
virtual void UpdateInverseJacobianApproximation(const VectorType &rResidualVector, const VectorType &rIterationGuess)
Update the inverse Jacobian approximation This method first appends the current iteration values to t...
Definition: mvqn_convergence_accelerator.hpp:248
void StoreDataColumns()
Store the current iterations data columns This method initializes the observation matrices for the cu...
Definition: mvqn_convergence_accelerator.hpp:319
BaseType::Pointer BaseTypePointer
Definition: mvqn_convergence_accelerator.hpp:66
BaseType::DenseMatrixType MatrixType
Definition: mvqn_convergence_accelerator.hpp:71
bool IsFirstCorrectionPerformed() const
Definition: mvqn_convergence_accelerator.hpp:482
std::size_t GetProblemSize() const override
Get the Problem Size object This method returns the problem size, which equals the number of interfac...
Definition: mvqn_convergence_accelerator.hpp:467
void UpdateSolution(const VectorType &rResidualVector, VectorType &rIterationGuess) override
Performs the solution update This method computes the solution update using a Jacobian approximation....
Definition: mvqn_convergence_accelerator.hpp:150
virtual void UpdateCurrentJacobianMatrix()
Updates the inverse Jacobian approximation This method updates the inverse Jacobian approximation wit...
Definition: mvqn_convergence_accelerator.hpp:437
MVQNFullJacobianConvergenceAccelerator()=default
Construct a new MVQNFullJacobianConvergenceAccelerator object Default constructor of MVQNFullJacobian...
BaseType::DenseMatrixPointerType MatrixPointerType
Definition: mvqn_convergence_accelerator.hpp:72
void CheckCurrentIterationInformationSingularity()
Check the new data singularity This method checks that the new observation data is not linearly depen...
Definition: mvqn_convergence_accelerator.hpp:341
ConvergenceAccelerator< TSparseSpace, TDenseSpace > BaseType
Definition: mvqn_convergence_accelerator.hpp:65
virtual MatrixPointerType pGetJacobianDecompositionMatrixQU()
Definition: mvqn_convergence_accelerator.hpp:558
VectorPointerType pGetCurrentIterationResidualVector()
Get the residual vector This method returns a pointer to the current iteration residual vector.
Definition: mvqn_convergence_accelerator.hpp:503
MatrixPointerType pGetSolutionObservationMatrix()
Get the observation matrix W This method returns a pointer to the solution observation matrix.
Definition: mvqn_convergence_accelerator.hpp:533
void AppendCurrentIterationInformation(const VectorType &rResidualVector, const VectorType &rIterationGuess)
Append the current iteration information to the observation matrices This method appends the current ...
Definition: mvqn_convergence_accelerator.hpp:289
MVQNFullJacobianConvergenceAccelerator(Parameters rConvAcceleratorParameters)
Construct a new MVQNFullJacobianConvergenceAccelerator object MultiVector Quasi-Newton convergence ac...
Definition: mvqn_convergence_accelerator.hpp:90
std::size_t GetConvergenceAcceleratorIteration() const
Definition: mvqn_convergence_accelerator.hpp:472
MatrixPointerType pGetResidualObservationMatrix()
Get the observation matrix V This method returns a pointer to the residual observation matrix.
Definition: mvqn_convergence_accelerator.hpp:523
MatrixPointerType pGetInverseJacobianApproximation() override
Get the inverse Jacobian approximation This method returns a pointer to the current inverse Jacobian ...
Definition: mvqn_convergence_accelerator.hpp:513
bool IsUsedInBlockNewtonEquations() const
Returns the a bool indicating if IBQN are used This method returns a bool flag indicating if the curr...
Definition: mvqn_convergence_accelerator.hpp:493
void SetCutOffTolerance(const double CutOffTolerance)
Set the Cut Off Tolerance This method sets the cut off relative tolerance value to be used in the inf...
Definition: mvqn_convergence_accelerator.hpp:553
virtual void CalculateCorrectionWithJacobian(VectorType &rCorrection)
Calculates the current iteration correction with the Jacobian This method calculates the current iter...
Definition: mvqn_convergence_accelerator.hpp:428
BaseType::DenseVectorPointerType VectorPointerType
Definition: mvqn_convergence_accelerator.hpp:69
void FinalizeNonLinearIteration() override
Do the MVQN variables update Updates the MVQN iteration variables for the next non-linear iteration.
Definition: mvqn_convergence_accelerator.hpp:169
virtual Parameters GetDefaultParameters() const
Definition: mvqn_convergence_accelerator.hpp:206
virtual ~MVQNFullJacobianConvergenceAccelerator()
Definition: mvqn_convergence_accelerator.hpp:120
virtual MatrixPointerType pGetOldJacobianDecompositionMatrixSigmaV()
Definition: mvqn_convergence_accelerator.hpp:573
This class provides to Kratos a data structure for I/O based on the standard of JSON.
Definition: kratos_parameters.h:59
double GetDouble() const
This method returns the double contained in the current Parameter.
Definition: kratos_parameters.cpp:657
void ValidateAndAssignDefaults(const Parameters &rDefaultParameters)
This function is designed to verify that the parameters under testing match the form prescribed by th...
Definition: kratos_parameters.cpp:1306
bool GetBool() const
This method returns the boolean contained in the current Parameter.
Definition: kratos_parameters.cpp:675
static std::size_t SingularValueDecomposition(const MatrixType &InputMatrix, MatrixType &UMatrix, MatrixType &SMatrix, MatrixType &VMatrix, Parameters ThisParameters)
This function gives the SVD of a given mxn matrix (m>=n), returns U,S; where A=U*S*V.
Definition: svd_utils.h:103
#define KRATOS_CATCH(MoreInfo)
Definition: define.h:110
#define KRATOS_TRY
Definition: define.h:109
TDenseSpace::VectorType DenseVectorType
Definition: convergence_accelerator.h:56
TSparseSpace::VectorType VectorType
Definition: convergence_accelerator.h:50
#define KRATOS_ERROR
Definition: exception.h:161
TDenseSpace::MatrixPointerType DenseMatrixPointerType
Definition: convergence_accelerator.h:59
TDenseSpace::MatrixType DenseMatrixType
Definition: convergence_accelerator.h:57
TSparseSpace::VectorPointerType VectorPointerType
Definition: convergence_accelerator.h:53
TSparseSpace::MatrixPointerType MatrixPointerType
Definition: convergence_accelerator.h:54
TDenseSpace::VectorPointerType DenseVectorPointerType
Definition: convergence_accelerator.h:60
TSparseSpace::MatrixType MatrixType
Definition: convergence_accelerator.h:51
#define KRATOS_WARNING(label)
Definition: logger.h:265
static double max(double a, double b)
Definition: GeometryFunctions.h:79
TSpaceType::IndexType Size2(TSpaceType &dummy, typename TSpaceType::MatrixType const &rM)
Definition: add_strategies_to_python.cpp:123
TSpaceType::IndexType Size(TSpaceType &dummy, typename TSpaceType::VectorType const &rV)
Definition: add_strategies_to_python.cpp:111
void Mult(TSpaceType &dummy, typename TSpaceType::MatrixType &rA, typename TSpaceType::VectorType &rX, typename TSpaceType::VectorType &rY)
Definition: add_strategies_to_python.cpp:98
void UnaliasedAdd(TSpaceType &dummy, typename TSpaceType::VectorType &x, const double A, const typename TSpaceType::VectorType &rY)
Definition: add_strategies_to_python.cpp:170
REF: G. R. Cowper, GAUSSIAN QUADRATURE FORMULAS FOR TRIANGLES.
Definition: mesh_condition.cpp:21
AMatrix::IdentityMatrix< double > IdentityMatrix
Definition: amatrix_interface.h:564
KratosZeroMatrix< double > ZeroMatrix
Definition: amatrix_interface.h:559
AMatrix::MatrixProductExpression< TExpression1Type, TExpression2Type > prod(AMatrix::MatrixExpression< TExpression1Type, TCategory1 > const &First, AMatrix::MatrixExpression< TExpression2Type, TCategory2 > const &Second)
Definition: amatrix_interface.h:568
T & noalias(T &TheMatrix)
Definition: amatrix_interface.h:484
AMatrix::TransposeMatrix< const T > trans(const T &TheMatrix)
Definition: amatrix_interface.h:486
n_dofs
Definition: generate_total_lagrangian_mixed_volumetric_strain_element.py:151
int max_iter
Definition: hinsberg_optimization.py:139
float aux2
Definition: isotropic_damage_automatic_differentiation.py:240
float aux1
Definition: isotropic_damage_automatic_differentiation.py:239
int k
Definition: quadrature.py:595
int j
Definition: quadrature.py:648
integer i
Definition: TensorModule.f:17