KratosMultiphysics
KRATOS Multiphysics (Kratos) is a framework for building parallel, multi-disciplinary simulation software, aiming at modularity, extensibility, and high performance. Kratos is written in C++, and counts with an extensive Python interface.
mixedulm_linear_solver.h
Go to the documentation of this file.
1 // KRATOS ______ __ __ _____ __ __ __
2 // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
3 // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
4 // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
5 // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
6 //
7 // License: BSD License
8 // license: ContactStructuralMechanicsApplication/license.txt
9 //
10 // Main authors: Vicente Mataix Ferrandiz
11 //
12 
13 #pragma once
14 
15 // System includes
16 #include <string>
17 #include <iostream>
18 #include <sstream>
19 #include <cstddef>
20 
21 // External includes
22 
23 // Project includes
24 #include "includes/define.h"
25 #include "includes/model_part.h"
32 
33 namespace Kratos
34 {
37 
41 
45 
49 
53 
62 template<class TSparseSpaceType, class TDenseSpaceType,
63  class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>,
64  class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
66  public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>
67 {
68 public:
72 
76  enum class BlockType {
77  OTHER,
78  MASTER,
80  SLAVE_ACTIVE,
81  LM_INACTIVE,
82  LM_ACTIVE
83  };
84 
87 
89  KRATOS_DEFINE_LOCAL_FLAG( BLOCKS_ARE_ALLOCATED );
90 
92  KRATOS_DEFINE_LOCAL_FLAG( IS_INITIALIZED );
93 
96 
99 
102 
104  using LinearSolverPointerType = typename LinearSolverType::Pointer;
105 
108 
111 
114 
117 
119  using DofType = typename ModelPart::DofType;
120 
123 
126 
129 
131  using SizeType = std::size_t;
132 
134  using IndexType = std::size_t;
135 
138 
141 
143  static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
144 
148 
156  LinearSolverPointerType pSolverDispBlock,
157  const double MaxTolerance,
158  const std::size_t MaxIterationNumber
159  ) : BaseType (MaxTolerance, MaxIterationNumber),
160  mpSolverDispBlock(pSolverDispBlock)
161  {
162  // Initializing the remaining variables
163  mOptions.Set(BLOCKS_ARE_ALLOCATED, false);
164  mOptions.Set(IS_INITIALIZED, false);
165  }
166 
174  LinearSolverPointerType pSolverDispBlock,
175  Parameters ThisParameters = Parameters(R"({})")
176  ): BaseType (),
177  mpSolverDispBlock(pSolverDispBlock)
178 
179  {
180  KRATOS_TRY
181 
182  // Now validate agains defaults -- this also ensures no type mismatch
183  Parameters default_parameters = GetDefaultParameters();
184  ThisParameters.ValidateAndAssignDefaults(default_parameters);
185 
186  // Initializing the remaining variables
187  this->SetTolerance( ThisParameters["tolerance"].GetDouble() );
188  this->SetMaxIterationsNumber( ThisParameters["max_iteration_number"].GetInt() );
189  mEchoLevel = ThisParameters["echo_level"].GetInt();
190  mOptions.Set(BLOCKS_ARE_ALLOCATED, false);
191  mOptions.Set(IS_INITIALIZED, false);
192 
193  KRATOS_CATCH("")
194  }
195 
196 
199  : BaseType(rOther),
200  mpSolverDispBlock(rOther.mpSolverDispBlock),
201  mOptions(rOther.mOptions),
202  mDisplacementDofs(rOther.mDisplacementDofs),
203  mMasterIndices(rOther.mMasterIndices),
204  mSlaveInactiveIndices(rOther.mSlaveInactiveIndices),
205  mSlaveActiveIndices(rOther.mSlaveActiveIndices),
206  mLMInactiveIndices(rOther.mLMInactiveIndices),
207  mLMActiveIndices(rOther.mLMActiveIndices),
208  mOtherIndices(rOther.mOtherIndices),
209  mGlobalToLocalIndexing(rOther.mGlobalToLocalIndexing),
210  mWhichBlockType(rOther.mWhichBlockType),
211  mKDispModified(rOther.mKDispModified),
212  mKLMAModified(rOther.mKLMAModified),
213  mKLMIModified(rOther.mKLMIModified),
214  mKSAN(rOther.mKSAN),
215  mKSAM(rOther.mKSAM),
216  mKSASI(rOther.mKSASI),
217  mKSASA(rOther.mKSASA),
218  mPOperator(rOther.mPOperator),
219  mCOperator(rOther.mCOperator),
220  mResidualLMActive(rOther.mResidualLMActive),
221  mResidualLMInactive(rOther.mResidualLMInactive),
222  mResidualDisp(rOther.mResidualDisp),
223  mLMActive(rOther.mLMActive),
224  mLMInactive(rOther.mLMInactive),
225  mDisp(rOther.mDisp),
226  mEchoLevel(rOther.mEchoLevel),
227  mFileCreated(rOther.mFileCreated)
228  {
229  }
230 
232  ~MixedULMLinearSolver() override {}
233 
237 
240  {
241  return *this;
242  }
243 
247 
257  void Initialize (
258  SparseMatrixType& rA,
259  VectorType& rX,
260  VectorType& rB
261  ) override
262  {
263  if (mOptions.Is(BLOCKS_ARE_ALLOCATED)) {
264  mpSolverDispBlock->Initialize(mKDispModified, mDisp, mResidualDisp);
265  mOptions.Set(IS_INITIALIZED, true);
266  } else
267  KRATOS_DETAIL("MixedULM Initialize") << "Linear solver intialization is deferred to the moment at which blocks are available" << std::endl;
268  }
269 
280  SparseMatrixType& rA,
281  VectorType& rX,
282  VectorType& rB
283  ) override
284  {
285  // Copy to local matrices
286  if (mOptions.IsNot(BLOCKS_ARE_ALLOCATED)) {
287  FillBlockMatrices (true, rA, rX, rB);
288  mOptions.Set(BLOCKS_ARE_ALLOCATED, true);
289  } else {
290  FillBlockMatrices (false, rA, rX, rB);
291  mOptions.Set(BLOCKS_ARE_ALLOCATED, true);
292  }
293 
294  if(mOptions.IsNot(IS_INITIALIZED))
295  this->Initialize(rA,rX,rB);
296 
297  mpSolverDispBlock->InitializeSolutionStep(mKDispModified, mDisp, mResidualDisp);
298  }
299 
308  SparseMatrixType& rA,
309  VectorType& rX,
310  VectorType& rB
311  ) override
312  {
313  // Auxiliary size
314  const SizeType lm_active_size = mLMActiveIndices.size();
315  const SizeType lm_inactive_size = mLMInactiveIndices.size();
316  const SizeType total_disp_size = mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + mSlaveActiveIndices.size();
317 
318  // Get the u and lm residuals
319  GetUPart (rB, mResidualDisp);
320 
321  // Solve u block
322  if (mDisp.size() != total_disp_size)
323  mDisp.resize(total_disp_size, false);
324  mpSolverDispBlock->Solve (mKDispModified, mDisp, mResidualDisp);
325 
326  // Write back solution
327  SetUPart(rX, mDisp);
328 
329  // Solve LM
330  if (lm_active_size > 0) {
331  // Now we compute the residual of the LM
332  GetLMAPart (rB, mResidualLMActive);
333 
334  // LM = D⁻1*rLM
335  if (mLMActive.size() != lm_active_size)
336  mLMActive.resize(lm_active_size, false);
337  TSparseSpaceType::Mult (mKLMAModified, mResidualLMActive, mLMActive);
338 
339  // Write back solution
340  SetLMAPart(rX, mLMActive);
341  }
342 
343  if (lm_inactive_size > 0) {
344  // Now we compute the residual of the LM
345  GetLMIPart (rB, mResidualLMInactive);
346 
347  // LM = D⁻1*rLM
348  if (mLMInactive.size() != lm_inactive_size)
349  mLMInactive.resize(lm_inactive_size, false);
350  TSparseSpaceType::Mult (mKLMIModified, mResidualLMInactive, mLMInactive);
351 
352  // Write back solution
353  SetLMIPart(rX, mLMInactive);
354  }
355  }
356 
365  SparseMatrixType& rA,
366  VectorType& rX,
367  VectorType& rB
368  ) override
369  {
370  mpSolverDispBlock->FinalizeSolutionStep(mKDispModified, mDisp, mResidualDisp);
371  }
372 
377  void Clear() override
378  {
379  mOptions.Set(BLOCKS_ARE_ALLOCATED, false);
380  mpSolverDispBlock->Clear();
381 
382  // Clear displacement DoFs
383  auto& r_data_dofs = mDisplacementDofs.GetContainer();
384  for (IndexType i=0; i<r_data_dofs.size(); ++i) {
385  delete r_data_dofs[i];
386  }
387  r_data_dofs.clear();
388 
389  // We clear the matrixes and vectors
390  mKDispModified.clear();
391  mKLMAModified.clear();
392  mKLMIModified.clear();
393 
394  mKSAN.clear();
395  mKSAM.clear();
396  mKSASI.clear();
397  mKSASA.clear();
398 
399  mPOperator.clear();
400  mCOperator.clear();
401 
402  mResidualLMActive.clear();
403  mResidualLMInactive.clear();
404  mResidualDisp.clear();
405 
406  mLMActive.clear();
407  mLMInactive.clear();
408  mDisp.clear();
409 
410  mOptions.Set(IS_INITIALIZED, false);
411  }
412 
420  bool Solve(
421  SparseMatrixType& rA,
422  VectorType& rX,
423  VectorType& rB
424  ) override
425  {
426  // We print the system before condensate (if needed)
427  if (mEchoLevel == 2) { //if it is needed to print the debug info
428  KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl;
429  } else if (mEchoLevel == 3) { //if it is needed to print the debug info
430  KRATOS_INFO("LHS BEFORE CONDENSATION") << "SystemMatrix = " << rA << std::endl;
431  KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl;
432  } else if (mEchoLevel >= 4) { //print to matrix market file
433  const std::string matrix_market_name = "before_condensation_A_" + std::to_string(mFileCreated) + ".mm";
434  TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), rA, false);
435 
436  const std::string matrix_market_vectname = "before_condensation_b_" + std::to_string(mFileCreated) + ".mm.rhs";
437  TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), rB);
438  }
439 
440  if (mOptions.IsNot(IS_INITIALIZED))
441  this->Initialize (rA,rX,rB);
442 
443  this->InitializeSolutionStep (rA,rX,rB);
444 
445  this->PerformSolutionStep (rA,rX,rB);
446 
447  this->FinalizeSolutionStep (rA,rX,rB);
448 
449  // We print the resulting system (if needed)
450  if (mEchoLevel == 2) { //if it is needed to print the debug info
451  KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl;
452  KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl;
453  } else if (mEchoLevel == 3) { //if it is needed to print the debug info
454  KRATOS_INFO("LHS") << "SystemMatrix = " << mKDispModified << std::endl;
455  KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl;
456  KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl;
457  } else if (mEchoLevel >= 4) { //print to matrix market file
458  const std::string matrix_market_name = "A_" + std::to_string(mFileCreated) + ".mm";
459  TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), mKDispModified, false);
460 
461  const std::string matrix_market_vectname = "b_" + std::to_string(mFileCreated) + ".mm.rhs";
462  TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), mResidualDisp);
463  mFileCreated++;
464  }
465 
466  return false;
467  }
468 
476  bool Solve (
477  SparseMatrixType& rA,
478  DenseMatrixType& rX,
479  DenseMatrixType& rB
480  ) override
481  {
482  return false;
483  }
484 
493  {
494  return true;
495  }
496 
507  SparseMatrixType& rA,
508  VectorType& rX,
509  VectorType& rB,
510  DofsArrayType& rDofSet,
511  ModelPart& rModelPart
512  ) override
513  {
514  // Allocating auxiliary parameters
516 
517  // Count LM dofs
518  SizeType n_lm_inactive_dofs = 0, n_lm_active_dofs = 0;
519  SizeType n_master_dofs = 0;
520  SizeType n_slave_inactive_dofs = 0, n_slave_active_dofs = 0;
521  SizeType tot_active_dofs = 0;
522 
523  // We separate if we consider a block builder and solver or an elimination builder and solver
524  if (rModelPart.IsNot(TO_SPLIT)) {
525  // In case of block builder and solver
526  for (auto& i_dof : rDofSet) {
527  node_id = i_dof.Id();
528  const Node& node = rModelPart.GetNode(node_id);
529  if (i_dof.EquationId() < rA.size1()) {
530  tot_active_dofs++;
531  if (IsLMDof(i_dof)) {
532  if (node.Is(ACTIVE))
533  ++n_lm_active_dofs;
534  else
535  ++n_lm_inactive_dofs;
536  } else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
537  if (node.Is(MASTER)) {
538  ++n_master_dofs;
539  } else if (node.Is(SLAVE)) {
540  if (node.Is(ACTIVE))
541  ++n_slave_active_dofs;
542  else
543  ++n_slave_inactive_dofs;
544  }
545  }
546  }
547  }
548  } else {
549  // In case of elimination builder and solver
550  for (auto& i_dof : rDofSet) {
551  node_id = i_dof.Id();
552  const Node& node = rModelPart.GetNode(node_id);
553  tot_active_dofs++;
554  if (IsLMDof(i_dof)) {
555  if (node.Is(ACTIVE))
556  ++n_lm_active_dofs;
557  else
558  ++n_lm_inactive_dofs;
559  } else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
560  if (node.Is(MASTER)) {
561  ++n_master_dofs;
562  } else if (node.Is(SLAVE)) {
563  if (node.Is(ACTIVE))
564  ++n_slave_active_dofs;
565  else
566  ++n_slave_inactive_dofs;
567  }
568  }
569  }
570  }
571 
572  KRATOS_ERROR_IF(tot_active_dofs != rA.size1()) << "Total system size does not coincide with the free dof map: " << tot_active_dofs << " vs " << rA.size1() << std::endl;
573 
574  // Resize arrays as needed
575  if (mMasterIndices.size() != n_master_dofs)
576  mMasterIndices.resize (n_master_dofs,false);
577  if (mSlaveInactiveIndices.size() != n_slave_inactive_dofs)
578  mSlaveInactiveIndices.resize (n_slave_inactive_dofs,false);
579  if (mSlaveActiveIndices.size() != n_slave_active_dofs)
580  mSlaveActiveIndices.resize (n_slave_active_dofs,false);
581  if (mLMInactiveIndices.size() != n_lm_inactive_dofs)
582  mLMInactiveIndices.resize (n_lm_inactive_dofs,false);
583  if (mLMActiveIndices.size() != n_lm_active_dofs)
584  mLMActiveIndices.resize (n_lm_active_dofs,false);
585 
586  const SizeType n_other_dofs = tot_active_dofs - n_lm_inactive_dofs - n_lm_active_dofs - n_master_dofs - n_slave_inactive_dofs - n_slave_active_dofs;
587  if (mOtherIndices.size() != n_other_dofs)
588  mOtherIndices.resize (n_other_dofs, false);
589  if (mGlobalToLocalIndexing.size() != tot_active_dofs)
590  mGlobalToLocalIndexing.resize (tot_active_dofs,false);
591  if (mWhichBlockType.size() != tot_active_dofs)
592  mWhichBlockType.resize(tot_active_dofs, false);
593 
594  // Size check
595  KRATOS_ERROR_IF_NOT(n_lm_active_dofs == n_slave_active_dofs) << "The number of active LM dofs: " << n_lm_active_dofs << " and active slave nodes dofs: " << n_slave_active_dofs << " does not coincide" << std::endl;
596 
603  SizeType lm_inactive_counter = 0, lm_active_counter = 0;
604  SizeType master_counter = 0;
605  SizeType slave_inactive_counter = 0, slave_active_counter = 0;
606  SizeType other_counter = 0;
607  IndexType global_pos = 0;
608 
609  // We separate if we consider a block builder and solver or an elimination builder and solver
610  if (rModelPart.IsNot(TO_SPLIT)) {
611  // In case of block builder and solver
612  for (auto& i_dof : rDofSet) {
613  node_id = i_dof.Id();
614  const Node& r_node = rModelPart.GetNode(node_id);
615  if (i_dof.EquationId() < rA.size1()) {
616  if (IsLMDof(i_dof)) {
617  if (r_node.Is(ACTIVE)) {
618  mLMActiveIndices[lm_active_counter] = global_pos;
619  mGlobalToLocalIndexing[global_pos] = lm_active_counter;
620  mWhichBlockType[global_pos] = BlockType::LM_ACTIVE;
621  ++lm_active_counter;
622  } else {
623  mLMInactiveIndices[lm_inactive_counter] = global_pos;
624  mGlobalToLocalIndexing[global_pos] = lm_inactive_counter;
625  mWhichBlockType[global_pos] = BlockType::LM_INACTIVE;
626  ++lm_inactive_counter;
627  }
628  } else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
629  if (r_node.Is(MASTER)) {
630  mMasterIndices[master_counter] = global_pos;
631  mGlobalToLocalIndexing[global_pos] = master_counter;
632  mWhichBlockType[global_pos] = BlockType::MASTER;
633  ++master_counter;
634  } else if (r_node.Is(SLAVE)) {
635  if (r_node.Is(ACTIVE)) {
636  mSlaveActiveIndices[slave_active_counter] = global_pos;
637  mGlobalToLocalIndexing[global_pos] = slave_active_counter;
638  mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE;
639  ++slave_active_counter;
640  } else {
641  mSlaveInactiveIndices[slave_inactive_counter] = global_pos;
642  mGlobalToLocalIndexing[global_pos] = slave_inactive_counter;
643  mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE;
644  ++slave_inactive_counter;
645  }
646  } else { // We need to consider always an else to ensure that the system size is consistent
647  mOtherIndices[other_counter] = global_pos;
648  mGlobalToLocalIndexing[global_pos] = other_counter;
649  mWhichBlockType[global_pos] = BlockType::OTHER;
650  ++other_counter;
651  }
652  } else {
653  mOtherIndices[other_counter] = global_pos;
654  mGlobalToLocalIndexing[global_pos] = other_counter;
655  mWhichBlockType[global_pos] = BlockType::OTHER;
656  ++other_counter;
657  }
658  ++global_pos;
659  }
660  }
661  } else {
662  // In case of elimination builder and solver
663  for (auto& i_dof : rDofSet) {
664  node_id = i_dof.Id();
665  const Node& r_node = rModelPart.GetNode(node_id);
666  if (IsLMDof(i_dof)) {
667  if (r_node.Is(ACTIVE)) {
668  mLMActiveIndices[lm_active_counter] = global_pos;
669  mGlobalToLocalIndexing[global_pos] = lm_active_counter;
670  mWhichBlockType[global_pos] = BlockType::LM_ACTIVE;
671  ++lm_active_counter;
672  } else {
673  mLMInactiveIndices[lm_inactive_counter] = global_pos;
674  mGlobalToLocalIndexing[global_pos] = lm_inactive_counter;
675  mWhichBlockType[global_pos] = BlockType::LM_INACTIVE;
676  ++lm_inactive_counter;
677  }
678  } else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
679  if (r_node.Is(MASTER)) {
680  mMasterIndices[master_counter] = global_pos;
681  mGlobalToLocalIndexing[global_pos] = master_counter;
682  mWhichBlockType[global_pos] = BlockType::MASTER;
683  ++master_counter;
684  } else if (r_node.Is(SLAVE)) {
685  if (r_node.Is(ACTIVE)) {
686  mSlaveActiveIndices[slave_active_counter] = global_pos;
687  mGlobalToLocalIndexing[global_pos] = slave_active_counter;
688  mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE;
689  ++slave_active_counter;
690  } else {
691  mSlaveInactiveIndices[slave_inactive_counter] = global_pos;
692  mGlobalToLocalIndexing[global_pos] = slave_inactive_counter;
693  mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE;
694  ++slave_inactive_counter;
695  }
696  } else { // We need to consider always an else to ensure that the system size is consistent
697  mOtherIndices[other_counter] = global_pos;
698  mGlobalToLocalIndexing[global_pos] = other_counter;
699  mWhichBlockType[global_pos] = BlockType::OTHER;
700  ++other_counter;
701  }
702  } else {
703  mOtherIndices[other_counter] = global_pos;
704  mGlobalToLocalIndexing[global_pos] = other_counter;
705  mWhichBlockType[global_pos] = BlockType::OTHER;
706  ++other_counter;
707  }
708  ++global_pos;
709  }
710  }
711 
712  KRATOS_DEBUG_ERROR_IF(master_counter != n_master_dofs) << "The number of active slave dofs counter : " << master_counter << "is higher than the expected: " << n_master_dofs << std::endl;
713  KRATOS_DEBUG_ERROR_IF(slave_active_counter != n_slave_active_dofs) << "The number of active slave dofs counter : " << slave_active_counter << "is higher than the expected: " << n_slave_active_dofs << std::endl;
714  KRATOS_DEBUG_ERROR_IF(slave_inactive_counter != n_slave_inactive_dofs) << "The number of inactive slave dofs counter : " << slave_inactive_counter << "is higher than the expected: " << n_slave_inactive_dofs << std::endl;
715  KRATOS_DEBUG_ERROR_IF(lm_active_counter != n_lm_active_dofs) << "The number of active LM dofs counter : " << lm_active_counter << "is higher than the expected: " << n_lm_active_dofs << std::endl;
716  KRATOS_DEBUG_ERROR_IF(lm_inactive_counter != n_lm_inactive_dofs) << "The number of inactive LM dofs counter : " << lm_inactive_counter << "is higher than the expected: " << n_lm_inactive_dofs << std::endl;
717  KRATOS_DEBUG_ERROR_IF(other_counter != n_other_dofs) << "The number of other dofs counter : " << other_counter << "is higher than the expected: " << n_other_dofs << std::endl;
718 
719  // Refactor mDisplacementDofs with the new indices
720  // Ordering of the dofs is important
721  const auto it_dof_begin = rDofSet.begin();
722  mDisplacementDofs.reserve(mOtherIndices.size() + mMasterIndices.size() + mSlaveActiveIndices.size() + mSlaveInactiveIndices.size() + mLMInactiveIndices.size() + mLMActiveIndices.size());
723 
724  // Copy dofs
725  std::size_t counter = 0;
726  for (auto& r_index : mOtherIndices) {
727  auto it_dof = it_dof_begin + r_index;
728  auto* p_dof = new DofType(*it_dof);
729  p_dof->SetEquationId(counter);
730  mDisplacementDofs.push_back(p_dof);
731  ++counter;
732  }
733  for (auto& r_index : mMasterIndices) {
734  auto it_dof = it_dof_begin + r_index;
735  auto* p_dof = new DofType(*it_dof);
736  p_dof->SetEquationId(counter);
737  mDisplacementDofs.push_back(p_dof);
738  ++counter;
739  }
740  for (auto& r_index : mSlaveInactiveIndices) {
741  auto it_dof = it_dof_begin + r_index;
742  auto* p_dof = new DofType(*it_dof);
743  p_dof->SetEquationId(counter);
744  mDisplacementDofs.push_back(p_dof);
745  ++counter;
746  }
747  for (auto& r_index : mSlaveActiveIndices) {
748  auto it_dof = it_dof_begin + r_index;
749  auto* p_dof = new DofType(*it_dof);
750  p_dof->SetEquationId(counter);
751  mDisplacementDofs.push_back(p_dof);
752  ++counter;
753  }
754 
755  // Provide physical data as needed in the displacement solver
756  if(mpSolverDispBlock->AdditionalPhysicalDataIsNeeded() ) {
757  mpSolverDispBlock->ProvideAdditionalData(rA, rX, rB, mDisplacementDofs, rModelPart);
758  }
759  }
760 
764 
770  {
771  return mDisplacementDofs;
772  }
773 
779  {
780  return mDisplacementDofs;
781  }
782 
786 
790 
792  std::string Info() const override
793  {
794  return "Mixed displacement LM linear solver";
795  }
796 
798  void PrintInfo (std::ostream& rOStream) const override
799  {
800  rOStream << "Mixed displacement LM linear solver";
801  }
802 
804  void PrintData (std::ostream& rOStream) const override
805  {
806  }
807 
811 
813 protected:
816 
820 
824 
828 
849  const bool NeedAllocation,
850  SparseMatrixType& rA,
851  VectorType& rX,
852  VectorType& rB
853  )
854  {
855  KRATOS_TRY
856 
857  // Auxiliary sizes
858  const SizeType other_dof_size = mOtherIndices.size();
859  const SizeType master_size = mMasterIndices.size();
860  const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
861  const SizeType slave_active_size = mSlaveActiveIndices.size();
862  const SizeType lm_active_size = mLMActiveIndices.size();
863  const SizeType lm_inactive_size = mLMInactiveIndices.size();
864 
865  if (NeedAllocation)
866  AllocateBlocks();
867 
868  // Get access to A data
869  const IndexType* index1 = rA.index1_data().begin();
870  const IndexType* index2 = rA.index2_data().begin();
871  const double* values = rA.value_data().begin();
872 
873  // Allocate the auxiliary blocks by push_back
874  SparseMatrixType KMLMA(master_size, lm_active_size);
875  SparseMatrixType KLMALMA(lm_active_size, lm_active_size);
876  SparseMatrixType KSALMA(slave_active_size, lm_active_size);
877  SparseMatrixType KLMILMI(lm_inactive_size, lm_inactive_size);
878 
879  IndexType* KMLMA_ptr = new IndexType[master_size + 1];
880  IndexType* mKSAN_ptr = new IndexType[slave_active_size + 1];
881  IndexType* mKSAM_ptr = new IndexType[slave_active_size + 1];
882  IndexType* mKSASI_ptr = new IndexType[slave_active_size + 1];
883  IndexType* mKSASA_ptr = new IndexType[slave_active_size + 1];
884  IndexType* KSALMA_ptr = new IndexType[slave_active_size + 1];
885  IndexType* KLMILMI_ptr = new IndexType[lm_inactive_size + 1];
886  IndexType* KLMALMA_ptr = new IndexType[lm_active_size + 1];
887 
888  IndexPartition<std::size_t>(master_size +1).for_each([&](std::size_t i) {
889  KMLMA_ptr[i] = 0;
890  });
891  IndexPartition<std::size_t>(slave_active_size +1).for_each([&](std::size_t i) {
892  mKSAN_ptr[i] = 0;
893  mKSAM_ptr[i] = 0;
894  mKSASI_ptr[i] = 0;
895  mKSASA_ptr[i] = 0;
896  KSALMA_ptr[i] = 0;
897  });
898  IndexPartition<std::size_t>(lm_inactive_size +1).for_each([&](std::size_t i) {
899  KLMILMI_ptr[i] = 0;
900  });
901  IndexPartition<std::size_t>(lm_active_size +1).for_each([&](std::size_t i) {
902  KLMALMA_ptr[i] = 0;
903  });
904 
905  // We iterate over original matrix
906  IndexPartition<std::size_t>(rA.size1()).for_each([&](std::size_t i) {
907  const IndexType row_begin = index1[i];
908  const IndexType row_end = index1[i+1];
909  const IndexType local_row_id = mGlobalToLocalIndexing[i];
910 
911  IndexType KMLMA_cols = 0;
912  IndexType mKSAN_cols = 0;
913  IndexType mKSAM_cols = 0;
914  IndexType mKSASI_cols = 0;
915  IndexType mKSASA_cols = 0;
916  IndexType KSALMA_cols = 0;
917  IndexType KLMILMI_cols = 0;
918  IndexType KLMALMA_cols = 0;
919 
920  if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA
921  for (IndexType j=row_begin; j<row_end; j++) {
922  const IndexType col_index = index2[j];
923  if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block
924  ++KMLMA_cols;
925  }
926  }
927  KRATOS_DEBUG_ERROR_IF(local_row_id > master_size) << "MASTER:: Local row ID: " << local_row_id <<" is greater than the number of rows " << master_size << std::endl;
928  KMLMA_ptr[local_row_id + 1] = KMLMA_cols;
929  } else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM
930  for (IndexType j=row_begin; j<row_end; j++) {
931  const IndexType col_index = index2[j];
932  if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block
933  ++mKSAN_cols;
934  } else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block
935  ++mKSAM_cols;
936  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block
937  ++mKSASI_cols;
938  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block
939  ++mKSASA_cols;
940  } else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal)
941  ++KSALMA_cols;
942  }
943  }
944  KRATOS_DEBUG_ERROR_IF(local_row_id > slave_active_size) << "SLAVE_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << slave_active_size << std::endl;
945  mKSAN_ptr[local_row_id + 1] = mKSAN_cols;
946  mKSAM_ptr[local_row_id + 1] = mKSAM_cols;
947  mKSASI_ptr[local_row_id + 1] = mKSASI_cols;
948  mKSASA_ptr[local_row_id + 1] = mKSASA_cols;
949  KSALMA_ptr[local_row_id + 1] = KSALMA_cols;
950  } else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI
951  for (IndexType j=row_begin; j<row_end; j++) {
952  const IndexType col_index = index2[j];
953  if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal)
954  ++KLMILMI_cols;
955  }
956  }
957  KRATOS_DEBUG_ERROR_IF(local_row_id > lm_inactive_size) << "LM_INACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_inactive_size << std::endl;
958  KLMILMI_ptr[local_row_id + 1] = KLMILMI_cols;
959  } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA
960  for (IndexType j=row_begin; j<row_end; j++) {
961  const IndexType col_index = index2[j];
962  if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block
963  ++KLMALMA_cols;
964  }
965  }
966  KRATOS_DEBUG_ERROR_IF(local_row_id > lm_active_size) << "LM_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_active_size << std::endl;
967  KLMALMA_ptr[local_row_id + 1] = KLMALMA_cols;
968  }
969  });
970 
971  // We initialize the blocks sparse matrix
972  std::partial_sum(KMLMA_ptr, KMLMA_ptr + master_size + 1, KMLMA_ptr);
973  const std::size_t KMLMA_nonzero_values = KMLMA_ptr[master_size];
974  IndexType* aux_index2_KMLMA= new IndexType[KMLMA_nonzero_values];
975  double* aux_val_KMLMA= new double[KMLMA_nonzero_values];
976 
977  std::partial_sum(mKSAN_ptr, mKSAN_ptr + slave_active_size + 1, mKSAN_ptr);
978  const std::size_t mKSAN_nonzero_values = mKSAN_ptr[slave_active_size];
979  IndexType* aux_index2_mKSAN= new IndexType[mKSAN_nonzero_values];
980  double* aux_val_mKSAN= new double[mKSAN_nonzero_values];
981 
982  std::partial_sum(mKSAM_ptr, mKSAM_ptr + slave_active_size + 1, mKSAM_ptr);
983  const std::size_t mKSAM_nonzero_values = mKSAM_ptr[slave_active_size];
984  IndexType* aux_index2_mKSAM= new IndexType[mKSAM_nonzero_values];
985  double* aux_val_mKSAM= new double[mKSAM_nonzero_values];
986 
987  std::partial_sum(mKSASI_ptr, mKSASI_ptr + slave_active_size + 1, mKSASI_ptr);
988  const std::size_t mKSASI_nonzero_values = mKSASI_ptr[slave_active_size];
989  IndexType* aux_index2_mKSASI= new IndexType[mKSASI_nonzero_values];
990  double* aux_val_mKSASI= new double[mKSASI_nonzero_values];
991 
992  std::partial_sum(mKSASA_ptr, mKSASA_ptr + slave_active_size + 1, mKSASA_ptr);
993  const std::size_t mKSASA_nonzero_values = mKSASA_ptr[slave_active_size];
994  IndexType* aux_index2_mKSASA= new IndexType[mKSASA_nonzero_values];
995  double* aux_val_mKSASA = new double[mKSASA_nonzero_values];
996 
997  std::partial_sum(KSALMA_ptr, KSALMA_ptr + slave_active_size + 1, KSALMA_ptr);
998  const std::size_t KSALMA_nonzero_values = KSALMA_ptr[slave_active_size];
999  IndexType* aux_index2_KSALMA= new IndexType[KSALMA_nonzero_values];
1000  double* aux_val_KSALMA = new double[KSALMA_nonzero_values];
1001 
1002  std::partial_sum(KLMILMI_ptr, KLMILMI_ptr + lm_inactive_size + 1, KLMILMI_ptr);
1003  const std::size_t KLMILMI_nonzero_values = KLMILMI_ptr[lm_inactive_size];
1004  IndexType* aux_index2_KLMILMI= new IndexType[KLMILMI_nonzero_values];
1005  double* aux_val_KLMILMI = new double[KLMILMI_nonzero_values];
1006 
1007  std::partial_sum(KLMALMA_ptr, KLMALMA_ptr + lm_active_size + 1, KLMALMA_ptr);
1008  const std::size_t KLMALMA_nonzero_values = KLMALMA_ptr[lm_active_size];
1009  IndexType* aux_index2_KLMALMA = new IndexType[KLMALMA_nonzero_values];
1010  double* aux_val_KLMALMA = new double[KLMALMA_nonzero_values];
1011 
1012  // We iterate over original matrix
1013  IndexPartition<std::size_t>(rA.size1()).for_each([&](std::size_t i) {
1014  const IndexType row_begin = index1[i];
1015  const IndexType row_end = index1[i+1];
1016  const IndexType local_row_id = mGlobalToLocalIndexing[i];
1017 
1018  if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA
1019  IndexType KMLMA_row_beg = KMLMA_ptr[local_row_id];
1020  IndexType KMLMA_row_end = KMLMA_row_beg;
1021  for (IndexType j=row_begin; j<row_end; j++) {
1022  const IndexType col_index = index2[j];
1023  if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block
1024  const double value = values[j];
1025  const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
1026  aux_index2_KMLMA[KMLMA_row_end] = local_col_id;
1027  aux_val_KMLMA[KMLMA_row_end] = value;
1028  ++KMLMA_row_end;
1029  }
1030  }
1031  } else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM
1032  IndexType mKSAN_row_beg = mKSAN_ptr[local_row_id];
1033  IndexType mKSAN_row_end = mKSAN_row_beg;
1034  IndexType mKSAM_row_beg = mKSAM_ptr[local_row_id];
1035  IndexType mKSAM_row_end = mKSAM_row_beg;
1036  IndexType mKSASI_row_beg = mKSASI_ptr[local_row_id];
1037  IndexType mKSASI_row_end = mKSASI_row_beg;
1038  IndexType mKSASA_row_beg = mKSASA_ptr[local_row_id];
1039  IndexType mKSASA_row_end = mKSASA_row_beg;
1040  IndexType KSALMA_row_beg = KSALMA_ptr[local_row_id];
1041  IndexType KSALMA_row_end = KSALMA_row_beg;
1042  for (IndexType j=row_begin; j<row_end; j++) {
1043  const IndexType col_index = index2[j];
1044  const double value = values[j];
1045  const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
1046  if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block
1047  aux_index2_mKSAN[mKSAN_row_end] = local_col_id;
1048  aux_val_mKSAN[mKSAN_row_end] = value;
1049  ++mKSAN_row_end;
1050  } else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block
1051  aux_index2_mKSAM[mKSAM_row_end] = local_col_id;
1052  aux_val_mKSAM[mKSAM_row_end] = value;
1053  ++mKSAM_row_end;
1054  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block
1055  aux_index2_mKSASI[mKSASI_row_end] = local_col_id;
1056  aux_val_mKSASI[mKSASI_row_end] = value;
1057  ++mKSASI_row_end;
1058  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block
1059  aux_index2_mKSASA[mKSASA_row_end] = local_col_id;
1060  aux_val_mKSASA[mKSASA_row_end] = value;
1061  ++mKSASA_row_end;
1062  } else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal)
1063  aux_index2_KSALMA[KSALMA_row_end] = local_col_id;
1064  aux_val_KSALMA[KSALMA_row_end] = value;
1065  ++KSALMA_row_end;
1066  }
1067  }
1068  } else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI
1069  IndexType KLMILMI_row_beg = KLMILMI_ptr[local_row_id];
1070  IndexType KLMILMI_row_end = KLMILMI_row_beg;
1071  for (IndexType j=row_begin; j<row_end; j++) {
1072  const IndexType col_index = index2[j];
1073  if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal)
1074  const double value = values[j];
1075  const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
1076  aux_index2_KLMILMI[KLMILMI_row_end] = local_col_id;
1077  aux_val_KLMILMI[KLMILMI_row_end] = value;
1078  ++KLMILMI_row_end;
1079  }
1080  }
1081  } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA
1082  IndexType KLMALMA_row_beg = KLMALMA_ptr[local_row_id];
1083  IndexType KLMALMA_row_end = KLMALMA_row_beg;
1084  for (IndexType j=row_begin; j<row_end; j++) {
1085  const IndexType col_index = index2[j];
1086  if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block
1087  const double value = values[j];
1088  const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
1089  aux_index2_KLMALMA[KLMALMA_row_end] = local_col_id;
1090  aux_val_KLMALMA[KLMALMA_row_end] = value;
1091  ++KLMALMA_row_end;
1092  }
1093  }
1094  }
1095  });
1096 
1097  CreateMatrix(KMLMA, master_size, lm_active_size, KMLMA_ptr, aux_index2_KMLMA, aux_val_KMLMA);
1098  CreateMatrix(mKSAN, slave_active_size, other_dof_size, mKSAN_ptr, aux_index2_mKSAN, aux_val_mKSAN);
1099  CreateMatrix(mKSAM, slave_active_size, master_size, mKSAM_ptr, aux_index2_mKSAM, aux_val_mKSAM);
1100  CreateMatrix(mKSASI, slave_active_size, slave_inactive_size, mKSASI_ptr, aux_index2_mKSASI, aux_val_mKSASI);
1101  CreateMatrix(mKSASA, slave_active_size, slave_active_size, mKSASA_ptr, aux_index2_mKSASA, aux_val_mKSASA);
1102  CreateMatrix(KSALMA, slave_active_size, lm_active_size, KSALMA_ptr, aux_index2_KSALMA, aux_val_KSALMA);
1103  CreateMatrix(KLMILMI, lm_inactive_size, lm_inactive_size, KLMILMI_ptr, aux_index2_KLMILMI, aux_val_KLMILMI);
1104  CreateMatrix(KLMALMA, lm_active_size, lm_active_size, KLMALMA_ptr, aux_index2_KLMALMA, aux_val_KLMALMA);
1105 
1106  // We compute directly the inverse of the KSALMA matrix
1107  // KSALMA it is supposed to be a diagonal matrix (in fact it is the key point of this formulation)
1108  // (NOTE: technically it is not a stiffness matrix, we give that name)
1109  if (lm_active_size > 0) {
1110  ComputeDiagonalByLumping(KSALMA, mKLMAModified, ZeroTolerance);
1111  }
1112 
1113  // We compute directly the inverse of the KLMILMI matrix
1114  // KLMILMI it is supposed to be a diagonal matrix (in fact it is the key point of this formulation)
1115  // (NOTE: technically it is not a stiffness matrix, we give that name)
1116  if (lm_inactive_size > 0) {
1117  ComputeDiagonalByLumping(KLMILMI, mKLMIModified, ZeroTolerance);
1118  }
1119 
1120  // Compute the P and C operators
1121  if (slave_active_size > 0) {
1122  SparseMatrixMultiplicationUtility::MatrixMultiplication(KMLMA, mKLMAModified, mPOperator);
1123  SparseMatrixMultiplicationUtility::MatrixMultiplication(KLMALMA, mKLMAModified, mCOperator);
1124  }
1125 
1126  // We proceed with the auxiliary products for the master blocks
1127  SparseMatrixType master_auxKSAN(master_size, other_dof_size);
1128  SparseMatrixType master_auxKSAM(master_size, master_size);
1129  SparseMatrixType master_auxKSASI(master_size, slave_inactive_size);
1130  SparseMatrixType master_auxKSASA(master_size, slave_active_size);
1131 
1132  if (slave_active_size > 0) {
1133  SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAN, master_auxKSAN);
1134  SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAM, master_auxKSAM);
1135  if (slave_inactive_size > 0)
1136  SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASI, master_auxKSASI);
1137  SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASA, master_auxKSASA);
1138  }
1139 
1140  // We proceed with the auxiliary products for the active slave blocks
1141  SparseMatrixType aslave_auxKSAN(slave_active_size, other_dof_size);
1142  SparseMatrixType aslave_auxKSAM(slave_active_size, master_size);
1143  SparseMatrixType aslave_auxKSASI(slave_active_size, slave_inactive_size);
1144  SparseMatrixType aslave_auxKSASA(slave_active_size, slave_active_size);
1145 
1146  if (slave_active_size > 0) {
1147  SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAN, aslave_auxKSAN);
1148  SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAM, aslave_auxKSAM);
1149  if (slave_inactive_size > 0)
1150  SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASI, aslave_auxKSASI);
1151  SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASA, aslave_auxKSASA);
1152  }
1153 
1154  // Auxiliary indexes
1155  const SizeType other_dof_initial_index = 0;
1156  const SizeType master_dof_initial_index = other_dof_size;
1157  const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
1158  const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
1159 
1160  // The auxiliary index structure
1161  const SizeType nrows = mKDispModified.size1();
1162  const SizeType ncols = mKDispModified.size2();
1163  IndexType* K_disp_modified_ptr_aux1 = new IndexType[nrows + 1];
1164  K_disp_modified_ptr_aux1[0] = 0;
1165 
1166  IndexPartition<std::size_t>(rA.size1()).for_each([&](std::size_t i) {
1167  if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA
1168  ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1);
1169  } else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM
1170  ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1);
1171  } else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA
1172  ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1);
1173  } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA
1174  ComputeNonZeroColumnsPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1);
1175  }
1176  });
1177 
1178  // We initialize the final sparse matrix
1179  std::partial_sum(K_disp_modified_ptr_aux1, K_disp_modified_ptr_aux1 + nrows + 1, K_disp_modified_ptr_aux1);
1180  const SizeType nonzero_values_aux1 = K_disp_modified_ptr_aux1[nrows];
1181  IndexType* aux_index2_K_disp_modified_aux1 = new IndexType[nonzero_values_aux1];
1182  double* aux_val_K_disp_modified_aux1 = new double[nonzero_values_aux1];
1183 
1184  IndexPartition<std::size_t>(rA.size1()).for_each([&](std::size_t i) {
1185  if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA
1186  ComputeAuxiliaryValuesDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
1187  } else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM
1188  ComputeAuxiliaryValuesDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
1189  } else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA
1190  ComputeAuxiliaryValuesDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
1191  } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA
1192  ComputeAuxiliaryValuesPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
1193  }
1194  });
1195 
1196  // Create the first auxiliary matrix
1197  CreateMatrix(mKDispModified, nrows, ncols, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
1198 
1199  // Now we create the second matrix block to sum
1200  IndexType* K_disp_modified_ptr_aux2 = new IndexType[nrows + 1];
1201  IndexPartition<std::size_t>(nrows + 1).for_each([&](std::size_t i) {
1202  K_disp_modified_ptr_aux2[i] = 0;
1203  });
1204 
1205  IndexPartition<std::size_t>(master_size).for_each([&](std::size_t i) {
1206  IndexType K_disp_modified_cols_aux2 = 0;
1207  // Get access to master_auxKSAN data
1208  if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) {
1209  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAN, i, K_disp_modified_cols_aux2);
1210  }
1211  // Get access to master_auxKSAM data
1212  if (master_auxKSAM.nnz() > 0) {
1213  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAM, i, K_disp_modified_cols_aux2);
1214  }
1215  // Get access to master_auxKSASI data
1216  if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
1217  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASI, i, K_disp_modified_cols_aux2);
1218  }
1219  // Get access to master_auxKSASA data
1220  if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) {
1221  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASA, i, K_disp_modified_cols_aux2);
1222  }
1223  K_disp_modified_ptr_aux2[master_dof_initial_index + i + 1] = K_disp_modified_cols_aux2;
1224  });
1225 
1226  IndexPartition<std::size_t>(slave_active_size).for_each([&](std::size_t i) {
1227  IndexType K_disp_modified_cols_aux2 = 0;
1228  // Get access to aslave_auxKSAN data
1229  if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) {
1230  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAN, i, K_disp_modified_cols_aux2);
1231  }
1232  // Get access to aslave_auxKSAM data
1233  if (aslave_auxKSAM.nnz() > 0 && master_size > 0) {
1234  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAM, i, K_disp_modified_cols_aux2);
1235  }
1236  // Get access to aslave_auxKSASI data
1237  if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
1238  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASI, i, K_disp_modified_cols_aux2);
1239  }
1240  // Get access to aslave_auxKSASA data
1241  if (aslave_auxKSASA.nnz() > 0) {
1242  SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASA, i, K_disp_modified_cols_aux2);
1243  }
1244  K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i + 1] = K_disp_modified_cols_aux2;
1245  });
1246 
1247  // We initialize the final sparse matrix
1248  std::partial_sum(K_disp_modified_ptr_aux2, K_disp_modified_ptr_aux2 + nrows + 1, K_disp_modified_ptr_aux2);
1249  const SizeType nonzero_values_aux2 = K_disp_modified_ptr_aux2[nrows];
1250  IndexType* aux_index2_K_disp_modified_aux2 = new IndexType[nonzero_values_aux2];
1251  double* aux_val_K_disp_modified_aux2 = new double[nonzero_values_aux2];
1252 
1253  IndexPartition<std::size_t>(master_size).for_each([&](std::size_t i) {
1254  const IndexType row_beg = K_disp_modified_ptr_aux2[master_dof_initial_index + i];
1255  IndexType row_end = row_beg;
1256  // Get access to master_auxKSAN data
1257  if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) {
1258  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index);
1259  }
1260  // Get access to master_auxKSAM data
1261  if (master_auxKSAM.nnz() > 0) {
1262  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index);
1263  }
1264  // Get access to master_auxKSASI data
1265  if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
1266  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index);
1267  }
1268  // Get access to master_auxKSASA data
1269  if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) {
1270  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index);
1271  }
1272  });
1273 
1274  IndexPartition<std::size_t>(slave_active_size).for_each([&](std::size_t i) {
1275  const IndexType row_beg = K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i];
1276  IndexType row_end = row_beg;
1277  // Get access to aslave_auxKSAN data
1278  if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) {
1279  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index);
1280  }
1281  // Get access to aslave_auxKSAM data
1282  if (aslave_auxKSAM.nnz() > 0 && master_size > 0) {
1283  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index);
1284  }
1285  // Get access to aslave_auxKSASI data
1286  if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
1287  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index);
1288  }
1289  // Get access to aslave_auxKSASA data
1290  if (aslave_auxKSASA.nnz() > 0) {
1291  SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index);
1292  }
1293  });
1294 
1295  // Create the second auxiliary matrix
1296  SparseMatrixType K_disp_modified_aux2(nrows, ncols);
1297  CreateMatrix(K_disp_modified_aux2, nrows, ncols, K_disp_modified_ptr_aux2, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2);
1298 
1299  // We sum the auxiliary matrices
1300  SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(mKDispModified, K_disp_modified_aux2, - 1.0);
1301 
1302  // Finally we ensure that the matrix is structurally symmetric
1303  EnsureStructuralSymmetryMatrix(mKDispModified);
1304 
1305  #ifdef KRATOS_DEBUG
1306  CheckMatrix(mKDispModified);
1307  #endif
1308 
1309 // // DEBUG
1310 // LOG_MATRIX_PRETTY(rA)
1311 // LOG_MATRIX_PRETTY(mKDispModified)
1312 
1313  KRATOS_CATCH ("")
1314  }
1315 
1326 private:
1332 
1333  LinearSolverPointerType mpSolverDispBlock;
1334 
1335  Flags mOptions;
1336 
1337  DofsArrayType mDisplacementDofs;
1338 
1339  IndexVectorType mMasterIndices;
1340  IndexVectorType mSlaveInactiveIndices;
1341  IndexVectorType mSlaveActiveIndices;
1342  IndexVectorType mLMInactiveIndices;
1343  IndexVectorType mLMActiveIndices;
1344  IndexVectorType mOtherIndices;
1345  IndexVectorType mGlobalToLocalIndexing;
1346  BlockTypeVectorType mWhichBlockType;
1347 
1348  SparseMatrixType mKDispModified;
1349  SparseMatrixType mKLMAModified;
1350  SparseMatrixType mKLMIModified;
1351 
1352  SparseMatrixType mKSAN;
1353  SparseMatrixType mKSAM;
1354  SparseMatrixType mKSASI;
1355  SparseMatrixType mKSASA;
1356 
1357  SparseMatrixType mPOperator;
1358  SparseMatrixType mCOperator;
1359 
1360  VectorType mResidualLMActive;
1361  VectorType mResidualLMInactive;
1362  VectorType mResidualDisp;
1363 
1364  VectorType mLMActive;
1365  VectorType mLMInactive;
1366  VectorType mDisp;
1367 
1368  IndexType mEchoLevel = 0;
1369  IndexType mFileCreated = 0;
1370 
1374 
1378 
1388  inline void ComputeNonZeroColumnsDispDoFs(
1389  const IndexType* Index1,
1390  const IndexType* Index2,
1391  const double* Values,
1392  const int CurrentRow,
1393  const IndexType InitialIndex,
1394  IndexType* Ptr
1395  )
1396  {
1397  const IndexType row_begin = Index1[CurrentRow];
1398  const IndexType row_end = Index1[CurrentRow + 1];
1399 
1400  IndexType cols = 0;
1401 
1402  const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
1403  for (IndexType j=row_begin; j<row_end; j++) {
1404  const IndexType col_index = Index2[j];
1405  if (mWhichBlockType[col_index] == BlockType::OTHER) {
1406  ++cols;
1407  } else if (mWhichBlockType[col_index] == BlockType::MASTER) {
1408  ++cols;
1409  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
1410  ++cols;
1411  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
1412  ++cols;
1413  }
1414  }
1415  Ptr[local_row_id + 1] = cols;
1416  }
1417 
1428  inline void ComputeNonZeroColumnsPartialDispDoFs(
1429  const IndexType* Index1,
1430  const IndexType* Index2,
1431  const double* Values,
1432  const int CurrentRow,
1433  const IndexType InitialIndex,
1434  IndexType* Ptr
1435  )
1436  {
1437  const IndexType row_begin = Index1[CurrentRow];
1438  const IndexType row_end = Index1[CurrentRow + 1];
1439 
1440  IndexType cols = 0;
1441 
1442  const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
1443  for (IndexType j=row_begin; j<row_end; j++) {
1444  const IndexType col_index = Index2[j];
1445  if (mWhichBlockType[col_index] == BlockType::MASTER) {
1446  ++cols;
1447  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
1448  ++cols;
1449  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
1450  ++cols;
1451  }
1452  }
1453  Ptr[local_row_id + 1] = cols;
1454  }
1455 
1467  inline void ComputeAuxiliaryValuesDispDoFs(
1468  const IndexType* Index1,
1469  const IndexType* Index2,
1470  const double* Values,
1471  const int CurrentRow,
1472  const IndexType InitialIndex,
1473  IndexType* Ptr,
1474  IndexType* AuxIndex2,
1475  double* AuxVals
1476  )
1477  {
1478  // Auxiliary sizes
1479  const SizeType other_dof_size = mOtherIndices.size();
1480  const SizeType master_size = mMasterIndices.size();
1481  const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
1482 
1483  // Auxiliary indexes
1484  const SizeType other_dof_initial_index = 0;
1485  const SizeType master_dof_initial_index = other_dof_size;
1486  const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
1487  const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
1488 
1489  // Some indexes
1490  const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
1491 
1492  const IndexType row_begin_A = Index1[CurrentRow];
1493  const IndexType row_end_A = Index1[CurrentRow + 1];
1494 
1495  const IndexType row_beg = Ptr[local_row_id];
1496  IndexType row_end = row_beg;
1497 
1498  for (IndexType j=row_begin_A; j<row_end_A; j++) {
1499  const IndexType col_index = Index2[j];
1500  const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
1501  const double value = Values[j];
1502  if (mWhichBlockType[col_index] == BlockType::OTHER) {
1503  AuxIndex2[row_end] = local_col_id + other_dof_initial_index;
1504  AuxVals[row_end] = value;
1505  ++row_end;
1506  } else if (mWhichBlockType[col_index] == BlockType::MASTER) {
1507  AuxIndex2[row_end] = local_col_id + master_dof_initial_index;
1508  AuxVals[row_end] = value;
1509  ++row_end;
1510  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
1511  AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index;
1512  AuxVals[row_end] = value;
1513  ++row_end;
1514  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
1515  AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index;
1516  AuxVals[row_end] = value;
1517  ++row_end;
1518  }
1519  }
1520  }
1521 
1534  inline void ComputeAuxiliaryValuesPartialDispDoFs(
1535  const IndexType* Index1,
1536  const IndexType* Index2,
1537  const double* Values,
1538  const int CurrentRow,
1539  const IndexType InitialIndex,
1540  IndexType* Ptr,
1541  IndexType* AuxIndex2,
1542  double* AuxVals
1543  )
1544  {
1545  // Auxiliary sizes
1546  const SizeType other_dof_size = mOtherIndices.size();
1547  const SizeType master_size = mMasterIndices.size();
1548  const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
1549 
1550  // Auxiliary indexes
1551  const SizeType master_dof_initial_index = other_dof_size;
1552  const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
1553  const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
1554 
1555  // Some indexes
1556  const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
1557 
1558  const IndexType row_begin_A = Index1[CurrentRow];
1559  const IndexType row_end_A = Index1[CurrentRow + 1];
1560 
1561  const IndexType row_beg = Ptr[local_row_id];
1562  IndexType row_end = row_beg;
1563 
1564  for (IndexType j=row_begin_A; j<row_end_A; j++) {
1565  const IndexType col_index = Index2[j];
1566  const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
1567  const double value = Values[j];
1568  if (mWhichBlockType[col_index] == BlockType::MASTER) {
1569  AuxIndex2[row_end] = local_col_id + master_dof_initial_index;
1570  AuxVals[row_end] = value;
1571  ++row_end;
1572  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
1573  AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index;
1574  AuxVals[row_end] = value;
1575  ++row_end;
1576  } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
1577  AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index;
1578  AuxVals[row_end] = value;
1579  ++row_end;
1580  }
1581  }
1582  }
1583 
1587  inline void AllocateBlocks()
1588  {
1589  // Clear displacement DoFs
1590  auto& r_data_dofs = mDisplacementDofs.GetContainer();
1591  for (IndexType i=0; i<r_data_dofs.size(); ++i) {
1592  delete r_data_dofs[i];
1593  }
1594  r_data_dofs.clear();
1595 
1596  // We clear the matrices
1597  mKDispModified.clear();
1598  mKLMAModified.clear();
1599  mKLMIModified.clear();
1600 
1601  mKSAN.clear();
1602  mKSAM.clear();
1603  mKSASI.clear();
1604  mKSASA.clear();
1605 
1606  mPOperator.clear();
1607  mCOperator.clear();
1608 
1609  mResidualLMActive.clear();
1610  mResidualLMInactive.clear();
1611  mResidualDisp.clear();
1612 
1613  mLMActive.clear();
1614  mLMInactive.clear();
1615  mDisp.clear();
1616 
1617  // Auxiliary sizes
1618  const SizeType other_dof_size = mOtherIndices.size();
1619  const SizeType master_size = mMasterIndices.size();
1620  const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
1621  const SizeType slave_active_size = mSlaveActiveIndices.size();
1622  const SizeType lm_active_size = mLMActiveIndices.size();
1623  const SizeType lm_inactive_size = mLMInactiveIndices.size();
1624  const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size;
1625 
1626  // We do the allocation
1627  mKDispModified.resize(total_size, total_size, false);
1628  mKLMAModified.resize(lm_active_size, lm_active_size, false);
1629  mKLMAModified.reserve(lm_active_size);
1630  mKLMIModified.resize(lm_inactive_size, lm_inactive_size, false);
1631  mKLMIModified.reserve(lm_inactive_size);
1632 
1633  mKSAN.resize(slave_active_size, other_dof_size, false);
1634  mKSAM.resize(slave_active_size, master_size, false);
1635  mKSASI.resize(slave_active_size, slave_inactive_size, false);
1636  mKSASA.resize(slave_active_size, slave_active_size, false);
1637 
1638  mPOperator.resize(master_size, slave_active_size, false);
1639  mCOperator.resize(lm_active_size, slave_active_size, false);
1640 
1641  mResidualLMActive.resize(lm_active_size, false );
1642  mResidualLMInactive.resize(lm_inactive_size, false );
1643  mResidualDisp.resize(total_size );
1644 
1645  mLMActive.resize(lm_active_size, false);
1646  mLMInactive.resize(lm_inactive_size, false);
1647  mDisp.resize(total_size, false);
1648  }
1649 
1655  inline void GetUPart (
1656  const VectorType& rTotalResidual,
1657  VectorType& ResidualU
1658  )
1659  {
1660  // Auxiliary sizes
1661  const SizeType other_dof_size = mOtherIndices.size();
1662  const SizeType master_size = mMasterIndices.size();
1663  const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
1664  const SizeType slave_active_size = mSlaveActiveIndices.size();
1665  const SizeType lm_active_size = mLMActiveIndices.size();
1666  const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size;
1667 
1668  // Resize in case the size is not correct
1669  if (ResidualU.size() != total_size )
1670  ResidualU.resize (total_size, false);
1671 
1672  IndexPartition<std::size_t>(other_dof_size).for_each([&](std::size_t i) {
1673  ResidualU[i] = rTotalResidual[mOtherIndices[i]];
1674  });
1675 
1676  // The corresponding residual for the active slave DoF's
1677  VectorType aux_res_active_slave(slave_active_size);
1678  IndexPartition<std::size_t>(slave_active_size).for_each([&](std::size_t i) {
1679  aux_res_active_slave[i] = rTotalResidual[mSlaveActiveIndices[i]];
1680  });
1681 
1682  if (slave_active_size > 0) {
1683  // We compute the complementary residual for the master dofs
1684  VectorType aux_complement_master_residual(master_size);
1685  TSparseSpaceType::Mult(mPOperator, aux_res_active_slave, aux_complement_master_residual);
1686 
1687  IndexPartition<std::size_t>(master_size).for_each([&](std::size_t i) {
1688  ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]] - aux_complement_master_residual[i];
1689  });
1690  } else {
1691  IndexPartition<std::size_t>(master_size).for_each([&](std::size_t i) {
1692  ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]];
1693  });
1694  }
1695 
1696  IndexPartition<std::size_t>(slave_inactive_size).for_each([&](std::size_t i) {
1697  ResidualU[other_dof_size + master_size + i] = rTotalResidual[mSlaveInactiveIndices[i]];
1698  });
1699 
1700  if (slave_active_size > 0) {
1701  // We compute the complementary residual for the master dofs
1702  VectorType aux_complement_active_lm_residual(lm_active_size);
1703  TSparseSpaceType::Mult(mCOperator, aux_res_active_slave, aux_complement_active_lm_residual);
1704 
1705  IndexPartition<std::size_t>(lm_active_size).for_each([&](std::size_t i) {
1706  ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]] - aux_complement_active_lm_residual[i];
1707  });
1708  } else {
1709  IndexPartition<std::size_t>(lm_active_size).for_each([&](std::size_t i) {
1710  ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]];
1711  });
1712  }
1713  }
1714 
1720  inline void GetLMAPart(
1721  const VectorType& rTotalResidual,
1722  VectorType& rResidualLMA
1723  )
1724  {
1725  // Auxiliary sizes
1726  const SizeType other_dof_size = mOtherIndices.size();
1727  const SizeType master_size = mMasterIndices.size();
1728  const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
1729  const SizeType slave_active_size = mSlaveActiveIndices.size();
1730 
1731  // We add the other
1732  if (slave_active_size > 0) {
1733 
1734  // We get the displacement residual of the active slave nodes
1735  if (rResidualLMA.size() != slave_active_size )
1736  rResidualLMA.resize (slave_active_size, false);
1737 
1738  IndexPartition<std::size_t>(rResidualLMA.size()).for_each([&](std::size_t i) {
1739  rResidualLMA[i] = rTotalResidual[mSlaveActiveIndices[i]];
1740  });
1741 
1742  // From the computed displacements we get the components of the displacements for each block
1743  VectorType disp_N(other_dof_size);
1744  VectorType disp_M(master_size);
1745  VectorType disp_SI(slave_inactive_size);
1746  VectorType disp_SA(slave_active_size);
1747 
1748  IndexPartition<std::size_t>(other_dof_size).for_each([&](std::size_t i) {
1749  disp_N[i] = mDisp[i];
1750  });
1751 
1752  IndexPartition<std::size_t>(master_size).for_each([&](std::size_t i) {
1753  disp_M[i] = mDisp[other_dof_size + i];
1754  });
1755 
1756  IndexPartition<std::size_t>(slave_inactive_size).for_each([&](std::size_t i) {
1757  disp_SI[i] = mDisp[other_dof_size + master_size + i];
1758  });
1759 
1760  IndexPartition<std::size_t>(slave_active_size).for_each([&](std::size_t i) {
1761  disp_SA[i] = mDisp[other_dof_size + master_size + slave_inactive_size + i];
1762  });
1763 
1764  VectorType aux_mult(slave_active_size);
1765  TSparseSpaceType::Mult(mKSAN, disp_N, aux_mult);
1766  TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
1767  TSparseSpaceType::Mult(mKSAM, disp_M, aux_mult);
1768  TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
1769  if (slave_inactive_size > 0) {
1770  TSparseSpaceType::Mult(mKSASI, disp_SI, aux_mult);
1771  TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
1772  }
1773  TSparseSpaceType::Mult(mKSASA, disp_SA, aux_mult);
1774  TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
1775  }
1776  }
1777 
1783  inline void GetLMIPart (
1784  const VectorType& rTotalResidual,
1785  VectorType& rResidualLMI
1786  )
1787  {
1788  // Auxiliary size
1789  const SizeType lm_inactive_size = mLMInactiveIndices.size();
1790 
1791  // We get the displacement residual of the active slave nodes
1792  if (rResidualLMI.size() != lm_inactive_size )
1793  rResidualLMI.resize (lm_inactive_size, false);
1794 
1795  IndexPartition<std::size_t>(lm_inactive_size).for_each([&](std::size_t i) {
1796  rResidualLMI[i] = rTotalResidual[mLMInactiveIndices[i]];
1797  });
1798  }
1799 
1805  inline void SetUPart (
1806  VectorType& rTotalResidual,
1807  const VectorType& ResidualU
1808  )
1809  {
1810  const SizeType other_indexes_size = mOtherIndices.size();
1811  const SizeType master_indexes_size = mMasterIndices.size();
1812  const SizeType slave_inactive_indexes_size = mSlaveInactiveIndices.size();
1813  const SizeType slave_active_indexes_size = mSlaveActiveIndices.size();
1814 
1815  IndexPartition<std::size_t>(other_indexes_size).for_each([&](std::size_t i) {
1816  rTotalResidual[mOtherIndices[i]] = ResidualU[i];
1817  });
1818  IndexPartition<std::size_t>(master_indexes_size).for_each([&](std::size_t i) {
1819  rTotalResidual[mMasterIndices[i]] = ResidualU[other_indexes_size + i];
1820  });
1821  IndexPartition<std::size_t>(slave_inactive_indexes_size).for_each([&](std::size_t i) {
1822  rTotalResidual[mSlaveInactiveIndices[i]] = ResidualU[other_indexes_size + master_indexes_size + i];
1823  });
1824  IndexPartition<std::size_t>(slave_active_indexes_size).for_each([&](std::size_t i) {
1825  rTotalResidual[mSlaveActiveIndices[i]] = ResidualU[other_indexes_size + master_indexes_size + slave_inactive_indexes_size + i];
1826  });
1827  }
1828 
1834  inline void SetLMAPart (
1835  VectorType& rTotalResidual,
1836  const VectorType& ResidualLMA
1837  )
1838  {
1839  IndexPartition<std::size_t>(ResidualLMA.size()).for_each([&](std::size_t i) {
1840  rTotalResidual[mLMActiveIndices[i]] = ResidualLMA[i];
1841  });
1842  }
1843 
1849  inline void SetLMIPart (
1850  VectorType& rTotalResidual,
1851  const VectorType& ResidualLMI
1852  )
1853  {
1854  IndexPartition<std::size_t>(ResidualLMI.size()).for_each([&](std::size_t i) {
1855  rTotalResidual[mLMInactiveIndices[i]] = ResidualLMI[i];
1856  });
1857  }
1858 
1863  void EnsureStructuralSymmetryMatrix (SparseMatrixType& rA)
1864  {
1865  // We compute the transposed matrix
1866  const SizeType size_system_1 = rA.size1();
1867  const SizeType size_system_2 = rA.size2();
1868  SparseMatrixType transpose(size_system_2, size_system_1);
1869 
1870  SparseMatrixMultiplicationUtility::TransposeMatrix<SparseMatrixType, SparseMatrixType>(transpose, rA, 0.0);
1871 
1872  // Finally we sum the auxiliary matrices
1873  SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(rA, transpose, 1.0);
1874  }
1875 
1880  double CheckMatrix (const SparseMatrixType& rA)
1881  {
1882  // Get access to A data
1883  const std::size_t* index1 = rA.index1_data().begin();
1884  const std::size_t* index2 = rA.index2_data().begin();
1885  const double* values = rA.value_data().begin();
1886  double norm = 0.0;
1887  for (std::size_t i=0; i<rA.size1(); ++i) {
1888  std::size_t row_begin = index1[i];
1889  std::size_t row_end = index1[i+1];
1890  if (row_end - row_begin == 0)
1891  KRATOS_WARNING("Checking sparse matrix") << "Line " << i << " has no elements" << std::endl;
1892 
1893  for (std::size_t j=row_begin; j<row_end; j++) {
1894  KRATOS_ERROR_IF( index2[j] > rA.size2() ) << "Array above size of A" << std::endl;
1895  norm += values[j]*values[j];
1896  }
1897  }
1898 
1899  return std::sqrt (norm);
1900  }
1901 
1912  void CreateMatrix(
1913  SparseMatrixType& AuxK,
1914  const SizeType NRows,
1915  const SizeType NCols,
1916  IndexType* Ptr,
1917  IndexType* AuxIndex2,
1918  double* AuxVal
1919  )
1920  {
1921  // We reorder the rows
1922  SparseMatrixMultiplicationUtility::SortRows(Ptr, NRows, NCols, AuxIndex2, AuxVal);
1923 
1924  // Finally we build the final matrix
1925  SparseMatrixMultiplicationUtility::CreateSolutionMatrix(AuxK, NRows, NCols, Ptr, AuxIndex2, AuxVal);
1926 
1927  // Release memory
1928  delete[] Ptr;
1929  delete[] AuxIndex2;
1930  delete[] AuxVal;
1931  }
1932 
1940  void ComputeDiagonalByLumping (
1941  const SparseMatrixType& rA,
1942  SparseMatrixType& rdiagA,
1943  const double Tolerance = ZeroTolerance
1944  )
1945  {
1946  // Aux values
1947  const std::size_t size_A = rA.size1();
1948 // VectorType diagA_vector(size_A);
1949 //
1950 // // In case of not pure lumped matrix
1951 // if (rA.nnz() > size_A) {
1952 // // Get access to A data
1953 // const std::size_t* index1 = rA.index1_data().begin();
1954 // const double* values = rA.value_data().begin();
1955 //
1956 // IndexPartition<std::size_t>(size_A).for_each([&](std::size_t i) {
1957 // const std::size_t row_begin = index1[i];
1958 // const std::size_t row_end = index1[i+1];
1959 // double temp = 0.0;
1960 // for (std::size_t j=row_begin; j<row_end; j++)
1961 // temp += values[j]*values[j];
1962 //
1963 // diagA_vector[i] = std::sqrt(temp);
1964 // });
1965 // } else { // Otherwise
1966 // IndexPartition<std::size_t>(size_A).for_each([&](std::size_t i) {
1967 // diagA_vector[i] = rA(i, i);
1968 // });
1969 // }
1970 
1971  IndexType* ptr = new IndexType[size_A + 1];
1972  ptr[0] = 0;
1973  IndexType* aux_index2 = new IndexType[size_A];
1974  double* aux_val = new double[size_A];
1975 
1976  IndexPartition<std::size_t>(size_A).for_each([&](std::size_t i) {
1977  ptr[i+1] = i+1;
1978  aux_index2[i] = i;
1979  const double value = rA(i, i);
1980 // const double value = diagA_vector[i];
1981  if (std::abs(value) > Tolerance)
1982  aux_val[i] = 1.0/value;
1983  else // Auxiliary value
1984  aux_val[i] = 1.0;
1985  });
1986 
1987  SparseMatrixMultiplicationUtility::CreateSolutionMatrix(rdiagA, size_A, size_A, ptr, aux_index2, aux_val);
1988 
1989  delete[] ptr;
1990  delete[] aux_index2;
1991  delete[] aux_val;
1992  }
1993 
1999  static inline bool IsDisplacementDof(const DofType& rDoF)
2000  {
2001  const auto& r_variable = rDoF.GetVariable();
2002  if (r_variable == DISPLACEMENT_X ||
2003  r_variable == DISPLACEMENT_Y ||
2004  r_variable == DISPLACEMENT_Z) {
2005  return true;
2006  }
2007 
2008  return false;
2009  }
2010 
2016  static inline bool IsLMDof(const DofType& rDoF)
2017  {
2018  const auto& r_variable = rDoF.GetVariable();
2019  if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X ||
2020  r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y ||
2021  r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) {
2022  return true;
2023  }
2024 
2025  return false;
2026  }
2027 
2033  Parameters GetDefaultParameters()
2034  {
2035  Parameters default_parameters( R"(
2036  {
2037  "solver_type" : "mixed_ulm_linear_solver",
2038  "tolerance" : 1.0e-6,
2039  "max_iteration_number" : 200,
2040  "echo_level" : 0
2041  } )" );
2042 
2043  return default_parameters;
2044  }
2045 
2056 }; // Class MixedULMLinearSolver
2060 
2061 // Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
2062 template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
2063 const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::BLOCKS_ARE_ALLOCATED(Kratos::Flags::Create(0));
2064 template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
2065 const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::IS_INITIALIZED(Kratos::Flags::Create(1));
2066 
2071 template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
2072 inline std::istream& operator >> (std::istream& IStream,
2074 {
2075  return IStream;
2076 }
2078 template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
2079 inline std::ostream& operator << (std::ostream& rOStream,
2081 {
2082  rThis.PrintInfo (rOStream);
2083  rOStream << std::endl;
2084  rThis.PrintData (rOStream);
2085  return rOStream;
2086 }
2088 } // namespace Kratos.
Definition: flags.h:58
void Set(const Flags ThisFlag)
Definition: flags.cpp:33
bool Is(Flags const &rOther) const
Definition: flags.h:274
static Flags Create(IndexType ThisPosition, bool Value=true)
Definition: flags.h:138
bool IsNot(Flags const &rOther) const
Definition: flags.h:291
This class is useful for index iteration over containers.
Definition: parallel_utilities.h:451
void for_each(TUnaryFunction &&f)
Definition: parallel_utilities.h:514
Definition: amatrix_interface.h:41
void resize(std::size_t NewSize1, std::size_t NewSize2, bool preserve=0)
Definition: amatrix_interface.h:224
Base class for all the iterative solvers in Kratos.
Definition: iterative_solver.h:68
Base class for all the linear solvers in Kratos.
Definition: linear_solver.h:65
This solver is designed for the solution of mixed U-LM problems (this solver in particular is optimiz...
Definition: mixedulm_linear_solver.h:67
MixedULMLinearSolver(LinearSolverPointerType pSolverDispBlock, Parameters ThisParameters=Parameters(R"({})"))
Second constructor, it uses a Kratos parameters as input instead of direct input.
Definition: mixedulm_linear_solver.h:173
KRATOS_DEFINE_LOCAL_FLAG(IS_INITIALIZED)
The flag that indicates if the solution is initialized.
std::size_t SizeType
The size type.
Definition: mixedulm_linear_solver.h:131
~MixedULMLinearSolver() override
Destructor.
Definition: mixedulm_linear_solver.h:232
typename ModelPart::ConditionsContainerType ConditionsArrayType
An array of conditions.
Definition: mixedulm_linear_solver.h:125
static constexpr double ZeroTolerance
The zero tolerance considerered.
Definition: mixedulm_linear_solver.h:143
typename TSparseSpaceType::MatrixType SparseMatrixType
The sparse matrix type.
Definition: mixedulm_linear_solver.h:107
typename ModelPart::DofsArrayType DofsArrayType
The array containing the dofs.
Definition: mixedulm_linear_solver.h:122
MixedULMLinearSolver(LinearSolverPointerType pSolverDispBlock, const double MaxTolerance, const std::size_t MaxIterationNumber)
Default constructor.
Definition: mixedulm_linear_solver.h:155
void PrintInfo(std::ostream &rOStream) const override
Print information about this object.
Definition: mixedulm_linear_solver.h:798
typename ModelPart::NodesContainerType NodesArrayType
An array of nodes.
Definition: mixedulm_linear_solver.h:128
void PerformSolutionStep(SparseMatrixType &rA, VectorType &rX, VectorType &rB) override
This function actually performs the solution work, eventually taking advantage of what was done befor...
Definition: mixedulm_linear_solver.h:307
typename TSparseSpaceType::VectorType VectorType
The vector type.
Definition: mixedulm_linear_solver.h:110
typename TDenseSpaceType::VectorType DenseVectorType
The dense vector type.
Definition: mixedulm_linear_solver.h:116
void Clear() override
This function is designed to clean up all internal data in the solver.
Definition: mixedulm_linear_solver.h:377
BlockType
An enumeration of the different types of blocks used in the mixed Uzawa-LM linear solver.
Definition: mixedulm_linear_solver.h:76
@ OTHER
A block of another type.
@ LM_ACTIVE
An active Lagrange multiplier block.
@ LM_INACTIVE
An inactive Lagrange multiplier block.
@ SLAVE_INACTIVE
An inactive slave block.
@ SLAVE_ACTIVE
An active slave block.
const DofsArrayType & GetDisplacementDofs() const
This method retrieves the displacement DoFs of the system ordered according to the resolution order.
Definition: mixedulm_linear_solver.h:778
bool Solve(SparseMatrixType &rA, VectorType &rX, VectorType &rB) override
Normal solve method.
Definition: mixedulm_linear_solver.h:420
bool AdditionalPhysicalDataIsNeeded() override
Some solvers may require a minimum degree of knowledge of the structure of the matrix....
Definition: mixedulm_linear_solver.h:492
KRATOS_CLASS_POINTER_DEFINITION(MixedULMLinearSolver)
Pointer definition of MixedULMLinearSolver.
void ProvideAdditionalData(SparseMatrixType &rA, VectorType &rX, VectorType &rB, DofsArrayType &rDofSet, ModelPart &rModelPart) override
Some solvers may require a minimum degree of knowledge of the structure of the matrix.
Definition: mixedulm_linear_solver.h:506
DenseVector< BlockType > BlockTypeVectorType
A vector of types.
Definition: mixedulm_linear_solver.h:140
MixedULMLinearSolver & operator=(const MixedULMLinearSolver &Other)
Assignment operator.
Definition: mixedulm_linear_solver.h:239
void PrintData(std::ostream &rOStream) const override
Print object's data.
Definition: mixedulm_linear_solver.h:804
KRATOS_DEFINE_LOCAL_FLAG(BLOCKS_ARE_ALLOCATED)
The flag that indicates if the blocks are allocated.
void Initialize(SparseMatrixType &rA, VectorType &rX, VectorType &rB) override
This function is designed to be called as few times as possible. It creates the data structures that ...
Definition: mixedulm_linear_solver.h:257
void FillBlockMatrices(const bool NeedAllocation, SparseMatrixType &rA, VectorType &rX, VectorType &rB)
T his function generates the subblocks of matrix A.
Definition: mixedulm_linear_solver.h:848
DofsArrayType & GetDisplacementDofs()
This method retrieves the displacement DoFs of the system ordered according to the resolution order.
Definition: mixedulm_linear_solver.h:769
typename TDenseSpaceType::MatrixType DenseMatrixType
The dense matrix type.
Definition: mixedulm_linear_solver.h:113
void InitializeSolutionStep(SparseMatrixType &rA, VectorType &rX, VectorType &rB) override
This function is designed to be called every time the coefficients change in the system that is,...
Definition: mixedulm_linear_solver.h:279
bool Solve(SparseMatrixType &rA, DenseMatrixType &rX, DenseMatrixType &rB) override
Multi solve method for solving a set of linear systems with same coefficient matrix.
Definition: mixedulm_linear_solver.h:476
std::size_t IndexType
The index type.
Definition: mixedulm_linear_solver.h:134
MixedULMLinearSolver(const MixedULMLinearSolver &rOther)
Copy constructor.
Definition: mixedulm_linear_solver.h:198
std::string Info() const override
Turn back information as a string.
Definition: mixedulm_linear_solver.h:792
typename LinearSolverType::Pointer LinearSolverPointerType
The pointer to a linear solver.
Definition: mixedulm_linear_solver.h:104
typename ModelPart::DofType DofType
The definition of the dof type.
Definition: mixedulm_linear_solver.h:119
void FinalizeSolutionStep(SparseMatrixType &rA, VectorType &rX, VectorType &rB) override
This function is designed to be called at the end of the solve step.
Definition: mixedulm_linear_solver.h:364
This class aims to manage meshes for multi-physics simulations.
Definition: model_part.h:77
MeshType::ConditionsContainerType ConditionsContainerType
Condintions container. A vector set of Conditions with their Id's as key.
Definition: model_part.h:183
Dof< double > DofType
Definition: model_part.h:109
PointerVectorSet< DofType > DofsArrayType
Definition: model_part.h:115
MeshType::NodesContainerType NodesContainerType
Nodes container. Which is a vector set of nodes with their Id's as key.
Definition: model_part.h:128
NodeType & GetNode(IndexType NodeId, IndexType ThisIndex=0)
Definition: model_part.h:433
This class defines the node.
Definition: node.h:65
This class provides to Kratos a data structure for I/O based on the standard of JSON.
Definition: kratos_parameters.h:59
static void CreateSolutionMatrix(CMatrix &C, const TSize NRows, const TSize NCols, const Ptr *CPtr, const IndexType *AuxIndex2C, const ValueType *AuxValC)
This method is designed to create the final solution sparse matrix from the auxiliar values.
Definition: sparse_matrix_multiplication_utility.h:610
static void MatrixMultiplication(const AMatrix &rA, const BMatrix &rB, CMatrix &rC)
Matrix-matrix product C = A·B @detail This method uses a template for each matrix.
Definition: sparse_matrix_multiplication_utility.h:117
static void SortRows(const TIndexType *CPtr, const TSize NRows, const TSize NCols, Col *Columns, ValueType *Values)
This method is designed to reorder the rows by columns.
Definition: sparse_matrix_multiplication_utility.h:654
BOOST_UBLAS_INLINE void clear()
Definition: array_1d.h:325
BOOST_UBLAS_INLINE void resize(size_type array_size, bool preserve=true)
Definition: array_1d.h:242
#define KRATOS_CATCH(MoreInfo)
Definition: define.h:110
#define KRATOS_TRY
Definition: define.h:109
std::size_t IndexType
The definition of the index type.
Definition: key_hash.h:35
#define KRATOS_ERROR_IF_NOT(conditional)
Definition: exception.h:163
#define KRATOS_ERROR_IF(conditional)
Definition: exception.h:162
#define KRATOS_DEBUG_ERROR_IF(conditional)
Definition: exception.h:171
#define KRATOS_DETAIL(label)
Definition: logger.h:280
#define KRATOS_INFO(label)
Definition: logger.h:250
#define KRATOS_WARNING(label)
Definition: logger.h:265
ncols
Definition: GenerateCN.py:97
Vector VectorType
Definition: geometrical_transformation_utilities.h:56
Matrix MatrixType
Definition: geometrical_transformation_utilities.h:55
void Mult(TSpaceType &dummy, typename TSpaceType::MatrixType &rA, typename TSpaceType::VectorType &rX, typename TSpaceType::VectorType &rY)
Definition: add_strategies_to_python.cpp:98
void UnaliasedAdd(TSpaceType &dummy, typename TSpaceType::VectorType &x, const double A, const typename TSpaceType::VectorType &rY)
Definition: add_strategies_to_python.cpp:170
REF: G. R. Cowper, GAUSSIAN QUADRATURE FORMULAS FOR TRIANGLES.
Definition: mesh_condition.cpp:21
bool WriteMatrixMarketVector(const char *FileName, VectorType &V)
Definition: matrix_market_interface.h:539
bool WriteMatrixMarketMatrix(const char *FileName, CompressedMatrixType &M, bool Symmetric)
Definition: matrix_market_interface.h:308
std::istream & operator>>(std::istream &rIStream, LinearMasterSlaveConstraint &rThis)
input stream function
std::vector< IndexType > IndexVectorType
Index vector.
Definition: mmg_io.h:63
std::ostream & operator<<(std::ostream &rOStream, const LinearMasterSlaveConstraint &rThis)
output stream function
Definition: linear_master_slave_constraint.h:432
list values
Definition: bombardelli_test.py:42
int j
Definition: quadrature.py:648
int node_id
Definition: read_stl.py:12
int counter
Definition: script_THERMAL_CORRECT.py:218
integer i
Definition: TensorModule.f:17
Definition: mesh_converter.cpp:38