KratosMultiphysics
KRATOS Multiphysics (Kratos) is a framework for building parallel, multi-disciplinary simulation software, aiming at modularity, extensibility, and high performance. Kratos is written in C++, and counts with an extensive Python interface.
edge_data.h
Go to the documentation of this file.
1 // | / |
2 // ' / __| _` | __| _ \ __|
3 // . \ | ( | | ( |\__ `
4 // _|\_\_| \__,_|\__|\___/ ____/
5 // Multi-Physics
6 //
7 // License: BSD License
8 // Kratos default license: kratos/license.txt
9 //
10 // Main authors: Antonia Larese
11 //
12 
13 #if !defined(KRATOS_EDGE_DATA_H_INCLUDED)
14 #define KRATOS_EDGE_DATA_H_INCLUDED
15 
16 // we suggest defining the following macro
17 #define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
18 
19 // we suggest defining the following macro
20 #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
21 
22 // System includes
23 #include <string>
24 #include <iostream>
25 #include <algorithm>
26 
27 // External includes
28 
29 // Project includes
30 #include "includes/define.h"
31 #include "includes/model_part.h"
33 #include "includes/node.h"
34 #include "utilities/geometry_utilities.h"
36 #include "utilities/openmp_utils.h"
37 
38 namespace Kratos
39 {
40  template <unsigned int TDim>
42  {
43  public:
44  // component ij of the consistent mass matrix (M = Ni * Nj * dOmega)
45  double Mass = 0.0;
46  // components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega)
47  // double Laplacian;
48  boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ;
49  // components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega)
51  // components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega)
52  // TRANSPOSED GRADIENT
54 
55  //*************************************************************************************
56  //*************************************************************************************
57  // gradient integrated by parts
58  // RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i
59  // ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually"
60 
61  inline void Add_Gp(array_1d<double, TDim> &destination, const double &p_i, const double &p_j)
62  {
63  for (unsigned int comp = 0; comp < TDim; comp++)
64  destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
65  }
66 
67  inline void Sub_Gp(array_1d<double, TDim> &destination, const double &p_i, const double &p_j)
68  {
69  for (unsigned int comp = 0; comp < TDim; comp++)
70  destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
71  }
72 
73  //*************************************************************************************
74  //*************************************************************************************
75  // gradient
76  // RHSi += Ni_DNj[k]*v[k]
77 
78  inline void Add_D_v(double &destination,
79  const array_1d<double, TDim> &v_i,
80  const array_1d<double, TDim> &v_j)
81  {
82  for (unsigned int comp = 0; comp < TDim; comp++)
83  destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]);
84  }
85 
86  inline void Sub_D_v(double &destination,
87  const array_1d<double, TDim> &v_i,
88  const array_1d<double, TDim> &v_j)
89  {
90  for (unsigned int comp = 0; comp < TDim; comp++)
91  destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]);
92  }
93 
94  //*************************************************************************************
95  //*************************************************************************************
96  // gradient
97  // RHSi += Ni_DNj pj
98 
99  inline void Add_grad_p(array_1d<double, TDim> &destination, const double &p_i, const double &p_j)
100  {
101  for (unsigned int comp = 0; comp < TDim; comp++)
102  destination[comp] += Ni_DNj[comp] * (p_j - p_i);
103  }
104 
105  inline void Sub_grad_p(array_1d<double, TDim> &destination, const double &p_i, const double &p_j)
106  {
107  for (unsigned int comp = 0; comp < TDim; comp++)
108  destination[comp] -= Ni_DNj[comp] * (p_j - p_i);
109  }
110 
111  //*************************************************************************************
112  //*************************************************************************************
113  // gradient
114  // RHSi += DNi_Nj[k]*v[k]
115 
116  inline void Add_div_v(double &destination,
117  const array_1d<double, TDim> &v_i,
118  const array_1d<double, TDim> &v_j)
119  {
120  for (unsigned int comp = 0; comp < TDim; comp++)
121  destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp];
122  }
123 
124  inline void Sub_div_v(double &destination,
125  const array_1d<double, TDim> &v_i,
126  const array_1d<double, TDim> &v_j)
127  {
128  for (unsigned int comp = 0; comp < TDim; comp++)
129  destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp];
130  }
131 
132  //*************************************************************************************
133  //*************************************************************************************
134  // gets the trace of the laplacian matrix
135 
136  inline void CalculateScalarLaplacian(double &l_ij)
137  {
138  l_ij = LaplacianIJ(0, 0);
139  for (unsigned int comp = 1; comp < TDim; comp++)
140  l_ij += LaplacianIJ(comp, comp);
141  }
142 
144  const array_1d<double, TDim> &a_i, const array_1d<double, TDim> &U_i,
145  const array_1d<double, TDim> &a_j, const array_1d<double, TDim> &U_j)
146  {
147 
148 #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
149  double temp = a_i[0] * Ni_DNj[0];
150  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
151  temp += a_i[k_comp] * Ni_DNj[k_comp];
152  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
153  destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]);
154 #else
155  double aux_i = a_i[0] * Ni_DNj[0];
156  double aux_j = a_j[0] * Ni_DNj[0];
157  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
158  {
159  aux_i += a_i[k_comp] * Ni_DNj[k_comp];
160  aux_j += a_j[k_comp] * Ni_DNj[k_comp];
161  }
162  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
163  destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp];
164 #endif
165  }
166 
168  const array_1d<double, TDim> &a_i, const array_1d<double, TDim> &U_i,
169  const array_1d<double, TDim> &a_j, const array_1d<double, TDim> &U_j)
170  {
171 
172 #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
173  double temp = a_i[0] * Ni_DNj[0];
174  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
175  temp += a_i[k_comp] * Ni_DNj[k_comp];
176  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
177  destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]);
178 #else
179  double aux_i = a_i[0] * Ni_DNj[0];
180  double aux_j = a_j[0] * Ni_DNj[0];
181  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
182  {
183  aux_i += a_i[k_comp] * Ni_DNj[k_comp];
184  aux_j += a_j[k_comp] * Ni_DNj[k_comp];
185  }
186  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
187  destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp];
188 #endif
189  }
190 
191  inline void Sub_ConvectiveContribution(double &destination,
192  const array_1d<double, TDim> &a_i, const double &phi_i,
193  const array_1d<double, TDim> &a_j, const double &phi_j)
194  {
195 
196 #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
197  double temp = a_i[0] * Ni_DNj[0];
198  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
199  temp += a_i[k_comp] * Ni_DNj[k_comp];
200 
201  destination -= temp * (phi_j - phi_i);
202 #else
203  double aux_i = a_i[0] * Ni_DNj[0];
204  double aux_j = a_j[0] * Ni_DNj[0];
205  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
206  {
207  aux_i += a_i[k_comp] * Ni_DNj[k_comp];
208  aux_j += a_j[k_comp] * Ni_DNj[k_comp];
209  }
210  destination -= aux_j * phi_j - aux_i * phi_i;
211 #endif
212  }
213 
214  inline void Add_ConvectiveContribution(double &destination,
215  const array_1d<double, TDim> &a_i, const double &phi_i,
216  const array_1d<double, TDim> &a_j, const double &phi_j)
217  {
218 
219 #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
220  double temp = a_i[0] * Ni_DNj[0];
221  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
222  temp += a_i[k_comp] * Ni_DNj[k_comp];
223 
224  destination += temp * (phi_j - phi_i);
225 #else
226  double aux_i = a_i[0] * Ni_DNj[0];
227  double aux_j = a_j[0] * Ni_DNj[0];
228  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
229  {
230  aux_i += a_i[k_comp] * Ni_DNj[k_comp];
231  aux_j += a_j[k_comp] * Ni_DNj[k_comp];
232  }
233  destination += aux_j * phi_j - aux_i * phi_i;
234 #endif
235  }
236 
237  //*************************************************************************************
238  //*************************************************************************************
239 
241  const array_1d<double, TDim> &a_i, const array_1d<double, TDim> &U_i,
242  const array_1d<double, TDim> &a_j, const array_1d<double, TDim> &U_j)
243  {
244  double conv_stab = 0.0;
245  for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
246  for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
247  conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp);
248  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
249  stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]);
250  }
251 
252  inline void CalculateConvectionStabilization_LOW(double &stab_low,
253  const array_1d<double, TDim> &a_i, const double &phi_i,
254  const array_1d<double, TDim> &a_j, const double &phi_j)
255  {
256  double conv_stab = 0.0;
257  for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
258  for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
259  conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp);
260  stab_low = conv_stab * (phi_j - phi_i);
261  }
262  //*************************************************************************************
263  //*************************************************************************************
264 
266  const array_1d<double, TDim> &a_i, const array_1d<double, TDim> &pi_i,
267  const array_1d<double, TDim> &a_j, const array_1d<double, TDim> &pi_j)
268  {
269 
270 #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
271  double temp = 0.0;
272  for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
273  temp += a_i[k_comp] * Ni_DNj[k_comp];
274  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
275  stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); // check if the minus sign is correct
276 #else
277  double aux_i = a_i[0] * Ni_DNj[0];
278  double aux_j = a_j[0] * Ni_DNj[0];
279  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
280  {
281  aux_i += a_i[k_comp] * Ni_DNj[k_comp];
282  aux_j += a_j[k_comp] * Ni_DNj[k_comp];
283  }
284  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
285  stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]);
286 #endif
287  }
288 
289  inline void CalculateConvectionStabilization_HIGH(double &stab_high,
290  const array_1d<double, TDim> &a_i, const double &pi_i,
291  const array_1d<double, TDim> &a_j, const double &pi_j)
292  {
293 
294 #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
295  double temp = 0.0;
296  for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
297  temp += a_i[k_comp] * Ni_DNj[k_comp];
298 
299  stab_high = -temp * (pi_j - pi_i); // check if the minus sign is correct
300 #else
301  double aux_i = a_i[0] * Ni_DNj[0];
302  double aux_j = a_j[0] * Ni_DNj[0];
303  for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
304  {
305  aux_i += a_i[k_comp] * Ni_DNj[k_comp];
306  aux_j += a_j[k_comp] * Ni_DNj[k_comp];
307  }
308 
309  stab_high = -(aux_j * pi_j - aux_i * pi_i);
310 #endif
311  }
312  //*************************************************************************************
313  //*************************************************************************************
314 
316  const double tau, const double beta,
317  const array_1d<double, TDim> &stab_low, const array_1d<double, TDim> &stab_high)
318  {
319  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
320  destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]);
321  }
322 
323  inline void Add_StabContribution(double &destination,
324  const double tau, const double beta,
325  const double &stab_low, const double &stab_high)
326  {
327  destination += tau * (stab_low - beta * stab_high);
328  }
329 
331  const double tau, const double beta,
332  const array_1d<double, TDim> &stab_low, const array_1d<double, TDim> &stab_high)
333  {
334  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
335  destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]);
336  }
337 
338  inline void Sub_StabContribution(double &destination,
339  const double tau, const double beta,
340  const double &stab_low, const double &stab_high)
341  {
342  destination -= tau * (stab_low - beta * stab_high);
343  }
344 
345  //*************************************************************************************
346  //*************************************************************************************
347 
349  const array_1d<double, TDim> &U_i, const double &nu_i,
350  const array_1d<double, TDim> &U_j, const double &nu_j)
351  {
352  // calculate scalar laplacian
353  double L = 0.0;
354  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
355  L += LaplacianIJ(l_comp, l_comp);
356 
357  // double nu_avg = 0.5*(nu_i+nu_j);
358  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
359  destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]);
360  }
361 
363  const array_1d<double, TDim> &U_i, const double &nu_i,
364  const array_1d<double, TDim> &U_j, const double &nu_j)
365  {
366  // calculate scalar laplacian
367  double L = 0.0;
368  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
369  L += LaplacianIJ(l_comp, l_comp);
370 
371  // double nu_avg = 0.5*(nu_i+nu_j);
372  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
373  destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]);
374  }
375  };
376 
377  // class definition of matrices using CSR format
378 
379  template <unsigned int TDim, class TSparseSpace>
381  {
382  public:
383  // name for the self defined structure
385  typedef vector<CSR_Tuple> EdgesVectorType;
386  // name for row start and column index vectors
387  typedef vector<unsigned int> IndicesVectorType;
388  // names for separately stored node based values
389  typedef vector<double> ValuesVectorType;
390  typedef vector<array_1d<double, TDim>> CalcVectorType;
391 
392  // constructor and destructor
393 
395 
397 
398  // functions to return private values
399 
400  inline unsigned int GetNumberEdges()
401  {
402  return mNumberEdges;
403  }
404 
406  {
407  return mNonzeroEdgeValues;
408  }
409 
411  {
412  return mColumnIndex;
413  }
414 
416  {
417  return mRowStartIndex;
418  }
419 
421  {
422  return mLumpedMassMatrix;
423  }
424 
426  {
427  return mInvertedMassMatrix;
428  }
429 
431  {
432  return mDiagGradientMatrix;
433  }
434 
436  {
437  return mHmin;
438  }
439 
440  //********************************************************
441  // function to size and initialize the vector of CSR tuples
442 
444  {
445  KRATOS_TRY
446 
447  // SIZE OF CSR VECTOR
448 
449  // defining the number of nodes and edges
450  int n_nodes = model_part.Nodes().size();
451  // remark: no colouring algorithm is used here (symmetry is neglected)
452  // respectively edge ij is considered different from edge ji
453  mNumberEdges = 0;
454  // counter to assign and get global nodal index
455  int i_node = 0;
456 
457  // counting the edges connecting the nodes
458  for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++)
459  {
460  // counting neighbours of each node
461  mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size();
462  // DIAGONAL TERMS
463  // mNumberEdges++;
464 
465  // assigning global index to each node
466  node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double>(i_node++);
467  }
468  // error message in case number of nodes does not coincide with number of indices
469  if (i_node != n_nodes)
470  KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!");
471 
472  // allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation
473  mNonzeroEdgeValues.resize(mNumberEdges);
474  SetToZero(mNonzeroEdgeValues);
475  mColumnIndex.resize(mNumberEdges);
476  SetToZero(mColumnIndex);
477  mRowStartIndex.resize(n_nodes + 1);
478  SetToZero(mRowStartIndex);
479  mLumpedMassMatrix.resize(n_nodes);
480  SetToZero(mLumpedMassMatrix);
481  mInvertedMassMatrix.resize(n_nodes);
482  SetToZero(mInvertedMassMatrix);
483  mDiagGradientMatrix.resize(n_nodes);
484  SetToZero(mDiagGradientMatrix);
485  mHmin.resize(n_nodes);
486  SetToZero(mHmin);
487 
488  // INITIALIZING OF THE CSR VECTOR
489 
490  // temporary variable as the row start index of a node depends on the number of neighbours of the previous one
491  unsigned int row_start_temp = 0;
492 
493  int number_of_threads = ParallelUtilities::GetNumThreads();
494  std::vector<int> row_partition(number_of_threads);
495  OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition);
496 
497  for (int k = 0; k < number_of_threads; k++)
498  {
499 #pragma omp parallel
500  if (OpenMPUtils::ThisThread() == k)
501  {
502  // main loop over all nodes
503  for (unsigned int aux_i = static_cast<unsigned int>(row_partition[k]); aux_i < static_cast<unsigned int>(row_partition[k + 1]); aux_i++)
504  {
505  typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i;
506 
507  // getting the global index of the node
508  i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
509 
510  // determining its neighbours
511  GlobalPointersVector<Node> &neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES);
512 
513  // number of neighbours of node i determines row start index for the following node
514  unsigned int n_neighbours = neighb_nodes.size();
515 
516  // reserving memory for work array
517  std::vector<unsigned int> work_array;
518  work_array.reserve(n_neighbours);
519  // DIAGONAL TERMS
520 
521  // nested loop over the neighbouring nodes
522  for (GlobalPointersVector<Node>::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++)
523  {
524  // getting global index of the neighbouring node
525  work_array.push_back(static_cast<unsigned int>(neighb_it->FastGetSolutionStepValue(AUX_INDEX)));
526  }
527  // reordering neighbours following their global indices
528  std::sort(work_array.begin(), work_array.end());
529 
530  // setting current row start index
531  mRowStartIndex[i_node] = row_start_temp;
532  // nested loop over the by now ordered neighbours
533  for (unsigned int counter = 0; counter < n_neighbours; counter++)
534  {
535  // getting global index of the neighbouring node
536  unsigned int j_neighbour = work_array[counter];
537  // calculating CSR index
538  unsigned int csr_index = mRowStartIndex[i_node] + counter;
539 
540  // saving column index j of the original matrix
541  mColumnIndex[csr_index] = j_neighbour;
542  }
543  // preparing row start index for next node
544  row_start_temp += n_neighbours;
545  }
546  }
547  }
548  // adding last entry (necessary for abort criterion of loops)
549  mRowStartIndex[n_nodes] = mNumberEdges;
550 
551  // INITIALIZING NODE BASED VALUES
552  // set the heights to a huge number
553  IndexPartition<unsigned int>(n_nodes).for_each([&](unsigned int i_node){
554  mHmin[i_node] = 1e10;
555  });
556 
557  KRATOS_CATCH("")
558  }
559 
560  //*********************************
561  // function to precalculate CSR data
562 
564  {
565  KRATOS_TRY
566 
567  // PRECALCULATING CSR DATA
568 
569  // defining temporary local variables for elementwise addition
570  // shape functions
572  // shape function derivatives
573  boost::numeric::ublas::bounded_matrix<double, TDim + 1, TDim> dN_dx;
574  // volume
575  double volume;
576  // weighting factor
577  double weighting_factor = 1.0 / static_cast<double>(TDim + 1);
578  // elemental matrices
579  boost::numeric::ublas::bounded_matrix<double, TDim + 1, TDim + 1> mass_consistent;
580  // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian;
581  array_1d<double, TDim + 1> mass_lumped;
582  // global indices of elemental nodes
583  array_1d<unsigned int, TDim + 1> nodal_indices;
584 
586 
587  // loop over all elements
588  for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++)
589  {
590  // LOCAL ELEMENTWISE CALCULATIONS
591 
592  // getting geometry data of the element
593  GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume);
594 
595  // calculate lenght of the heights of the element
596  for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
597  {
598  heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0);
599  for (unsigned int comp = 1; comp < TDim; comp++)
600  {
601  heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp);
602  }
603  heights[ie_node] = 1.0 / sqrt(heights[ie_node]);
604  // KRATOS_WATCH(heights);
605  }
606 
607  // setting up elemental mass matrices
608  CalculateMassMatrix(mass_consistent, volume);
609  noalias(mass_lumped) = ZeroVector(TDim + 1);
610  for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
611  {
612  for (unsigned int je_node = 0; je_node <= TDim; je_node++)
613  {
614  // mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume;
615  mass_lumped[ie_node] += mass_consistent(ie_node, je_node);
616  }
617  // mass_lumped[ie_node] = volume * N[ie_node];
618  }
619 
620  double weighted_volume = volume * weighting_factor;
621 
622  // ASSEMBLING GLOBAL DATA STRUCTURE
623 
624  // loop over the nodes of the element to determine their global indices
625  for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
626  nodal_indices[ie_node] = static_cast<unsigned int>(elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX));
627 
628  // assembling global "edge matrices" by adding local contributions
629  for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
630  {
631  // check the heights and change the value if minimal is found
632  if (mHmin[nodal_indices[ie_node]] > heights[ie_node])
633  mHmin[nodal_indices[ie_node]] = heights[ie_node];
634 
635  for (unsigned int je_node = 0; je_node <= TDim; je_node++)
636  {
637  // remark: there is no edge linking node i with itself!
638  // DIAGONAL TERMS
639  if (ie_node != je_node)
640  {
641  // calculating CSR index from global index
642  unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]);
643 
644  // assigning precalculated element data to the referring edges
645  // contribution to edge mass
646  mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node);
647 
648  // contribution to edge laplacian
649  boost::numeric::ublas::bounded_matrix<double, TDim, TDim> &laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ;
650  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
651  for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
652  laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume;
653 
654  // contribution to edge gradient
655  array_1d<double, TDim> &gradient = mNonzeroEdgeValues[csr_index].Ni_DNj;
656  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
657  // gradient[l_comp] += dN_dx(je_node,l_comp);
658  gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume;
659  // TRANSPOSED GRADIENT
660  // contribution to transposed edge gradient
661  array_1d<double, TDim> &transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj;
662  for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
663  transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume;
664  }
665  }
666  }
667 
668  // assembling node based vectors
669  for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
670  // diagonal of the global lumped mass matrix
671  mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node];
672  for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
673  {
674  // diagonal of the global gradient matrix
675  array_1d<double, TDim> &gradient = mDiagGradientMatrix[nodal_indices[ie_node]];
676  for (unsigned int component = 0; component < TDim; component++)
677  // gradient[component] += dN_dx(ie_node,component);
678  gradient[component] += dN_dx(ie_node, component) * weighted_volume;
679  }
680  }
681 
682  // copy mass matrix to inverted mass matrix
683  for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++)
684  {
685  mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode];
686  }
687 
688  // calculating inverted mass matrix (this requires syncronization for MPI paraellelism
689  for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++)
690  {
691  mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode];
692  }
693 
694  KRATOS_CATCH("")
695  }
696 
697  //******************************************
698  // function to calculate CSR index of edge ij
699 
700  unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ)
701  {
702  KRATOS_TRY
703 
704  // index indicating data position of edge ij
705  unsigned int csr_index;
706  // searching for coincidence of stored column index and neighbour index j
707  for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++)
708  if (mColumnIndex[csr_index] == NeighbourJ)
709  break;
710 
711  // returning CSR index of edge ij
712  return csr_index;
713 
714  KRATOS_CATCH("")
715  }
716 
717  //***********************************************
718  // function to get pointer to CSR tuple of edge ij
719 
720  CSR_Tuple *GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ)
721  {
722  KRATOS_TRY
723 
724  // index indicating data position of edge ij
725  unsigned int csr_index;
726  // searching for coincidence of stored column index and neighbour index j
727  for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++)
728  if (mColumnIndex[csr_index] == NeighbourJ)
729  break;
730 
731  // returning pointer to CSR tuple of edge ij
732  return &mNonzeroEdgeValues[csr_index];
733 
734  KRATOS_CATCH("")
735  }
736 
737  //*******************************
738  // function to free dynamic memory
739 
740  void Clear()
741  {
742  KRATOS_TRY
743 
744  mNonzeroEdgeValues.clear();
745  mColumnIndex.clear();
746  mRowStartIndex.clear();
747  mInvertedMassMatrix.clear();
748  mLumpedMassMatrix.clear();
749  mDiagGradientMatrix.clear();
750  mHmin.clear();
751 
752  KRATOS_CATCH("")
753  }
754  //****************************
755  // functions to access database
756  //(note that this is already thought for parallel;
757  // for a single processor this could be done in a faster way)
758 
760  {
761 
762  KRATOS_TRY
763 
764  // loop over all nodes
765  int n_nodes = rNodes.size();
766  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
767 
768 #pragma omp parallel for firstprivate(n_nodes, it_begin)
769  for (int i = 0; i < n_nodes; i++)
770  {
771  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
772 
773  // get the global index of node i
774  // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
775  unsigned int i_node = i;
776 
777  // save value in the destination vector
778  for (unsigned int component = 0; component < TDim; component++)
779  (rDestination[i_node])[component] = (*node_it)[component];
780  }
781 
782  KRATOS_CATCH("");
783  }
784 
785  //****************************
786  // functions to access database
787  //(note that this is already thought for parallel;
788  // for a single processor this could be done in a faster way)
789 
791  {
792 
793  KRATOS_TRY
794 
795  // loop over all nodes
796 
797  int n_nodes = rNodes.size();
798 
799  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
800 
801  unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
802 
803 #pragma omp parallel for firstprivate(n_nodes, it_begin, var_pos)
804  for (int i = 0; i < n_nodes; i++)
805  {
806  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
807 
808  // get the global index of node i
809  unsigned int i_node = i;
810 
811  // get the requested value in vector form
812  array_1d<double, 3> &vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos);
813  // save value in the destination vector
814  for (unsigned int component = 0; component < TDim; component++)
815  (rDestination[i_node])[component] = vector[component];
816  }
817 
818  KRATOS_CATCH("");
819  }
820 
822  {
823 
824  KRATOS_TRY
825 
826  // loop over all nodes
827  int n_nodes = rNodes.size();
828 
829  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
830 
831  unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
832 
833 #pragma omp parallel for firstprivate(n_nodes, it_begin, var_pos)
834  for (int i = 0; i < n_nodes; i++)
835  {
836  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
837 
838  // get the global index of node i
839  unsigned int i_node = i;
840 
841  // get the requested value in vector form
842  array_1d<double, 3> &vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos);
843  // save value in the destination vector
844  for (unsigned int component = 0; component < TDim; component++)
845  (rDestination[i_node])[component] = vector[component];
846  }
847 
848  KRATOS_CATCH("");
849  }
850 
852  {
853  KRATOS_TRY
854 
855  // loop over all nodes
856  int n_nodes = rNodes.size();
857 
858  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
859 
860  unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
861 
862 #pragma omp parallel for firstprivate(n_nodes, it_begin, var_pos)
863  for (int i = 0; i < n_nodes; i++)
864  {
865  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
866 
867  // get the global index of node i
868  unsigned int i_node = i;
869 
870  // get the requested scalar value
871  double &scalar = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos);
872  // save value in the destination vector
873  rDestination[i_node] = scalar;
874  }
875 
876  KRATOS_CATCH("");
877  }
878 
880  {
881  KRATOS_TRY
882 
883  int n_nodes = rNodes.size();
884  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
885 
886  unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
887 
888 #pragma omp parallel for firstprivate(n_nodes, it_begin, var_pos)
889  for (int i = 0; i < n_nodes; i++)
890  {
891  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
892 
893  // get the global index of node i
894  unsigned int i_node = i;
895 
896  // get the requested scalar value
897  double &scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos);
898  // save value in the destination vector
899  rDestination[i_node] = scalar;
900  }
901 
902  KRATOS_CATCH("");
903  }
904 
906  {
907  KRATOS_TRY
908 
909  // loop over all nodes
910  int n_nodes = rNodes.size();
911  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
912 
913  unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
914 
915 #pragma omp parallel for firstprivate(n_nodes, it_begin, var_pos)
916  for (int i = 0; i < n_nodes; i++)
917  {
918  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
919 
920  // get the global index of node i
921  unsigned int i_node = i;
922 
923  // get reference of destination
924  array_1d<double, 3> &vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos);
925  // save vector in database
926  for (unsigned int component = 0; component < TDim; component++)
927  vector[component] = (rOrigin[i_node])[component];
928  }
929 
930  KRATOS_CATCH("");
931  }
932 
934  {
935  KRATOS_TRY
936 
937  // loop over all nodes
938  int n_nodes = rNodes.size();
939  ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
940 
941  unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
942 
943 #pragma omp parallel for firstprivate(n_nodes, it_begin, var_pos)
944  for (int i = 0; i < n_nodes; i++)
945  {
946  ModelPart::NodesContainerType::iterator node_it = it_begin + i;
947 
948  // get the global index of node i
949  int i_node = i;
950 
951  // get reference of destination
952  double &scalar = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos);
953  // save scalar in database
954  scalar = rOrigin[i_node];
955  }
956 
957  KRATOS_CATCH("");
958  }
959 
960  //*********************************************************************
961  // destination = origin1 + value * Minv*origin
962 
964  CalcVectorType &destination,
965  const CalcVectorType &origin1,
966  const double value,
967  const ValuesVectorType &Minv_vec,
968  const CalcVectorType &origin)
969  {
970  KRATOS_TRY
971 
972  int loop_size = destination.size();
973 
974  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
975  array_1d<double, TDim> &dest = destination[i_node];
976  const double m_inv = Minv_vec[i_node];
977  const array_1d<double, TDim> &origin_vec1 = origin1[i_node];
978  const array_1d<double, TDim> &origin_value = origin[i_node];
979 
980  double temp = value * m_inv;
981  for (unsigned int comp = 0; comp < TDim; comp++)
982  dest[comp] = origin_vec1[comp] + temp * origin_value[comp];
983  });
984 
985  KRATOS_CATCH("")
986  }
987 
989  ValuesVectorType &destination,
990  const ValuesVectorType &origin1,
991  const double value,
992  const ValuesVectorType &Minv_vec,
993  const ValuesVectorType &origin)
994  {
995  KRATOS_TRY
996 
997  int loop_size = destination.size();
998 
999  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1000  double &dest = destination[i_node];
1001  const double m_inv = Minv_vec[i_node];
1002  const double &origin_vec1 = origin1[i_node];
1003  const double &origin_value = origin[i_node];
1004 
1005  double temp = value * m_inv;
1006  dest = origin_vec1 + temp * origin_value;
1007  });
1008 
1009  KRATOS_CATCH("")
1010  }
1011 
1012  //**********************************************************************
1013 
1014  void AllocateAndSetToZero(CalcVectorType &data_vector, int size)
1015  {
1016  data_vector.resize(size);
1017  int loop_size = size;
1018 
1019  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1020  array_1d<double, TDim> &aaa = data_vector[i_node];
1021  for (unsigned int comp = 0; comp < TDim; comp++)
1022  aaa[comp] = 0.0;
1023  });
1024  }
1025 
1026  void AllocateAndSetToZero(ValuesVectorType &data_vector, int size)
1027  {
1028  data_vector.resize(size);
1029  int loop_size = size;
1030 
1031  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1032  data_vector[i_node] = 0.0;
1033  });
1034  }
1035 
1036  //**********************************************************************
1037 
1038  void SetToZero(EdgesVectorType &data_vector)
1039  {
1040  int loop_size = data_vector.size();
1041 
1042  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1043  // initializing the CSR vector entries with zero
1044  data_vector[i_node].Mass = 0.0;
1045  noalias(data_vector[i_node].LaplacianIJ) = ZeroMatrix(TDim, TDim);
1046  noalias(data_vector[i_node].Ni_DNj) = ZeroVector(TDim);
1047  noalias(data_vector[i_node].DNi_Nj) = ZeroVector(TDim);
1048  });
1049  }
1050 
1051  void SetToZero(IndicesVectorType &data_vector)
1052  {
1053  int loop_size = data_vector.size();
1054 
1055  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1056  data_vector[i_node] = 0.0;
1057  });
1058  }
1059 
1060  void SetToZero(CalcVectorType &data_vector)
1061  {
1062  int loop_size = data_vector.size();
1063 
1064  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1065  array_1d<double, TDim> &aaa = data_vector[i_node];
1066  for (unsigned int comp = 0; comp < TDim; comp++)
1067  aaa[comp] = 0.0;
1068  });
1069  }
1070 
1071  void SetToZero(ValuesVectorType &data_vector)
1072  {
1073  int loop_size = data_vector.size();
1074 
1075  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1076  data_vector[i_node] = 0.0;
1077  });
1078  }
1079 
1080  //**********************************************************************
1081 
1083  CalcVectorType &destination)
1084  {
1085  int loop_size = origin.size();
1086 
1087  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1088  const array_1d<double, TDim> &orig = origin[i_node];
1089  array_1d<double, TDim> &dest = destination[i_node];
1090  for (unsigned int comp = 0; comp < TDim; comp++)
1091  dest[comp] = orig[comp];
1092  });
1093  }
1094 
1096  ValuesVectorType &destination)
1097  {
1098  int loop_size = origin.size();
1099 
1100  IndexPartition<unsigned int>(loop_size).for_each([&](unsigned int i_node){
1101  destination[i_node] = origin[i_node];
1102  });
1103  }
1104 
1105  private:
1106  // number of edges
1107  unsigned int mNumberEdges;
1108 
1109  // CSR data vector for storage of the G, L and consistent M components of edge ij
1110  EdgesVectorType mNonzeroEdgeValues;
1111 
1112  // vector to store column indices of nonzero matrix elements for each row
1113  IndicesVectorType mColumnIndex;
1114 
1115  // index vector to access the start of matrix row i in the column vector
1116  IndicesVectorType mRowStartIndex;
1117 
1118  // inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours)
1119  ValuesVectorType mInvertedMassMatrix;
1120 
1121  // minimum height around one node
1122  ValuesVectorType mHmin;
1123 
1124  // lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix)
1125  ValuesVectorType mLumpedMassMatrix;
1126  // diagonal of the gradient matrix (separately stored due to special calculations)
1127  CalcVectorType mDiagGradientMatrix;
1128 
1129  //*******************************************
1130  // functions to set up elemental mass matrices
1131 
1132  void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3> &mass_consistent, double volume)
1133  {
1134  for (unsigned int i_node = 0; i_node <= TDim; i_node++)
1135  {
1136  // diagonal terms
1137  mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; // 1/6
1138  // non-diagonal terms
1139  double temp = 0.08333333333333333333 * volume; // 1/12
1140  for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++)
1141  {
1142  // taking advantage of symmetry
1143  mass_consistent(i_node, j_neighbour) = temp;
1144  mass_consistent(j_neighbour, i_node) = temp;
1145  }
1146  }
1147  }
1148 
1149  void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4> &mass_consistent, double volume)
1150  {
1151  for (unsigned int i_node = 0; i_node <= TDim; i_node++)
1152  {
1153  // diagonal terms
1154  mass_consistent(i_node, i_node) = 0.1 * volume;
1155  // non-diagonal terms
1156  double temp = 0.05 * volume;
1157  for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++)
1158  {
1159  // taking advantage of symmetry
1160  mass_consistent(i_node, j_neighbour) = temp;
1161  mass_consistent(j_neighbour, i_node) = temp;
1162  }
1163  }
1164  }
1165  };
1166 
1167 } // namespace Kratos
1168 
1169 #endif // KRATOS_EDGE_DATA_H_INCLUDED defined
Definition: edge_data.h:42
void Sub_D_v(double &destination, const array_1d< double, TDim > &v_i, const array_1d< double, TDim > &v_j)
Definition: edge_data.h:86
array_1d< double, TDim > Ni_DNj
Definition: edge_data.h:50
void Sub_ViscousContribution(array_1d< double, TDim > &destination, const array_1d< double, TDim > &U_i, const double &nu_i, const array_1d< double, TDim > &U_j, const double &nu_j)
Definition: edge_data.h:362
void Add_StabContribution(array_1d< double, TDim > &destination, const double tau, const double beta, const array_1d< double, TDim > &stab_low, const array_1d< double, TDim > &stab_high)
Definition: edge_data.h:315
void Add_StabContribution(double &destination, const double tau, const double beta, const double &stab_low, const double &stab_high)
Definition: edge_data.h:323
void Add_Gp(array_1d< double, TDim > &destination, const double &p_i, const double &p_j)
Definition: edge_data.h:61
void Sub_Gp(array_1d< double, TDim > &destination, const double &p_i, const double &p_j)
Definition: edge_data.h:67
void CalculateConvectionStabilization_LOW(double &stab_low, const array_1d< double, TDim > &a_i, const double &phi_i, const array_1d< double, TDim > &a_j, const double &phi_j)
Definition: edge_data.h:252
boost::numeric::ublas::bounded_matrix< double, TDim, TDim > LaplacianIJ
Definition: edge_data.h:48
double Mass
Definition: edge_data.h:45
void CalculateScalarLaplacian(double &l_ij)
Definition: edge_data.h:136
void Add_ConvectiveContribution(double &destination, const array_1d< double, TDim > &a_i, const double &phi_i, const array_1d< double, TDim > &a_j, const double &phi_j)
Definition: edge_data.h:214
void Sub_ConvectiveContribution(array_1d< double, TDim > &destination, const array_1d< double, TDim > &a_i, const array_1d< double, TDim > &U_i, const array_1d< double, TDim > &a_j, const array_1d< double, TDim > &U_j)
Definition: edge_data.h:167
void Sub_StabContribution(double &destination, const double tau, const double beta, const double &stab_low, const double &stab_high)
Definition: edge_data.h:338
void Add_grad_p(array_1d< double, TDim > &destination, const double &p_i, const double &p_j)
Definition: edge_data.h:99
void Sub_StabContribution(array_1d< double, TDim > &destination, const double tau, const double beta, const array_1d< double, TDim > &stab_low, const array_1d< double, TDim > &stab_high)
Definition: edge_data.h:330
void Sub_div_v(double &destination, const array_1d< double, TDim > &v_i, const array_1d< double, TDim > &v_j)
Definition: edge_data.h:124
array_1d< double, TDim > DNi_Nj
Definition: edge_data.h:53
void Add_ConvectiveContribution(array_1d< double, TDim > &destination, const array_1d< double, TDim > &a_i, const array_1d< double, TDim > &U_i, const array_1d< double, TDim > &a_j, const array_1d< double, TDim > &U_j)
Definition: edge_data.h:143
void CalculateConvectionStabilization_HIGH(array_1d< double, TDim > &stab_high, const array_1d< double, TDim > &a_i, const array_1d< double, TDim > &pi_i, const array_1d< double, TDim > &a_j, const array_1d< double, TDim > &pi_j)
Definition: edge_data.h:265
void CalculateConvectionStabilization_HIGH(double &stab_high, const array_1d< double, TDim > &a_i, const double &pi_i, const array_1d< double, TDim > &a_j, const double &pi_j)
Definition: edge_data.h:289
void Add_div_v(double &destination, const array_1d< double, TDim > &v_i, const array_1d< double, TDim > &v_j)
Definition: edge_data.h:116
void Sub_ConvectiveContribution(double &destination, const array_1d< double, TDim > &a_i, const double &phi_i, const array_1d< double, TDim > &a_j, const double &phi_j)
Definition: edge_data.h:191
void Add_D_v(double &destination, const array_1d< double, TDim > &v_i, const array_1d< double, TDim > &v_j)
Definition: edge_data.h:78
void Add_ViscousContribution(array_1d< double, TDim > &destination, const array_1d< double, TDim > &U_i, const double &nu_i, const array_1d< double, TDim > &U_j, const double &nu_j)
Definition: edge_data.h:348
void Sub_grad_p(array_1d< double, TDim > &destination, const double &p_i, const double &p_j)
Definition: edge_data.h:105
void CalculateConvectionStabilization_LOW(array_1d< double, TDim > &stab_low, const array_1d< double, TDim > &a_i, const array_1d< double, TDim > &U_i, const array_1d< double, TDim > &a_j, const array_1d< double, TDim > &U_j)
Definition: edge_data.h:240
static void CalculateGeometryData(const GeometryType &rGeometry, BoundedMatrix< double, 4, 3 > &rDN_DX, array_1d< double, 4 > &rN, double &rVolume)
This function is designed to compute the shape function derivatives, shape functions and volume in 3D...
Definition: geometry_utilities.h:176
This class is a vector which stores global pointers.
Definition: global_pointers_vector.h:61
boost::indirect_iterator< typename TContainerType::iterator > iterator
Definition: global_pointers_vector.h:79
iterator begin()
Definition: global_pointers_vector.h:221
size_type size() const
Definition: global_pointers_vector.h:307
iterator end()
Definition: global_pointers_vector.h:229
This class is useful for index iteration over containers.
Definition: parallel_utilities.h:451
void for_each(TUnaryFunction &&f)
Definition: parallel_utilities.h:514
Definition: edge_data.h:381
EdgesStructureType< TDim > CSR_Tuple
Definition: edge_data.h:384
void WriteVectorToDatabase(Variable< array_1d< double, 3 >> &rVariable, CalcVectorType &rOrigin, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:905
void AssignVectorToVector(const CalcVectorType &origin, CalcVectorType &destination)
Definition: edge_data.h:1082
ValuesVectorType & GetLumpedMass()
Definition: edge_data.h:420
void FillOldScalarFromDatabase(Variable< double > &rVariable, ValuesVectorType &rDestination, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:879
void SetToZero(EdgesVectorType &data_vector)
Definition: edge_data.h:1038
~MatrixContainer()
Definition: edge_data.h:396
void BuildCSRData(ModelPart &model_part)
Definition: edge_data.h:563
CalcVectorType & GetDiagGradient()
Definition: edge_data.h:430
void ConstructCSRVector(ModelPart &model_part)
Definition: edge_data.h:443
void AssignVectorToVector(const ValuesVectorType &origin, ValuesVectorType &destination)
Definition: edge_data.h:1095
MatrixContainer()
Definition: edge_data.h:394
vector< double > ValuesVectorType
Definition: edge_data.h:389
IndicesVectorType & GetColumnIndex()
Definition: edge_data.h:410
vector< unsigned int > IndicesVectorType
Definition: edge_data.h:387
unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ)
Definition: edge_data.h:700
void Add_Minv_value(ValuesVectorType &destination, const ValuesVectorType &origin1, const double value, const ValuesVectorType &Minv_vec, const ValuesVectorType &origin)
Definition: edge_data.h:988
ValuesVectorType & GetInvertedMass()
Definition: edge_data.h:425
void Clear()
Definition: edge_data.h:740
IndicesVectorType & GetRowStartIndex()
Definition: edge_data.h:415
void FillVectorFromDatabase(Variable< array_1d< double, 3 >> &rVariable, CalcVectorType &rDestination, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:790
void FillScalarFromDatabase(Variable< double > &rVariable, ValuesVectorType &rDestination, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:851
vector< CSR_Tuple > EdgesVectorType
Definition: edge_data.h:385
void SetToZero(CalcVectorType &data_vector)
Definition: edge_data.h:1060
void WriteScalarToDatabase(Variable< double > &rVariable, ValuesVectorType &rOrigin, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:933
vector< array_1d< double, TDim > > CalcVectorType
Definition: edge_data.h:390
void Add_Minv_value(CalcVectorType &destination, const CalcVectorType &origin1, const double value, const ValuesVectorType &Minv_vec, const CalcVectorType &origin)
Definition: edge_data.h:963
void FillOldVectorFromDatabase(Variable< array_1d< double, 3 >> &rVariable, CalcVectorType &rDestination, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:821
void AllocateAndSetToZero(ValuesVectorType &data_vector, int size)
Definition: edge_data.h:1026
void AllocateAndSetToZero(CalcVectorType &data_vector, int size)
Definition: edge_data.h:1014
void SetToZero(IndicesVectorType &data_vector)
Definition: edge_data.h:1051
void FillCoordinatesFromDatabase(CalcVectorType &rDestination, ModelPart::NodesContainerType &rNodes)
Definition: edge_data.h:759
CSR_Tuple * GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ)
Definition: edge_data.h:720
ValuesVectorType & GetHmin()
Definition: edge_data.h:435
unsigned int GetNumberEdges()
Definition: edge_data.h:400
EdgesVectorType & GetEdgeValues()
Definition: edge_data.h:405
void SetToZero(ValuesVectorType &data_vector)
Definition: edge_data.h:1071
This class aims to manage meshes for multi-physics simulations.
Definition: model_part.h:77
MeshType::NodesContainerType NodesContainerType
Nodes container. Which is a vector set of nodes with their Id's as key.
Definition: model_part.h:128
static void DivideInPartitions(const int NumTerms, const int NumThreads, PartitionVector &Partitions)
Divide an array of length NumTerms between NumThreads threads.
Definition: openmp_utils.h:158
static int ThisThread()
Wrapper for omp_get_thread_num().
Definition: openmp_utils.h:108
static int GetNumThreads()
Returns the current number of threads.
Definition: parallel_utilities.cpp:34
Variable class contains all information needed to store and retrive data from a data container.
Definition: variable.h:63
#define KRATOS_CATCH(MoreInfo)
Definition: define.h:110
#define KRATOS_WATCH(variable)
Definition: define.h:806
#define KRATOS_TRY
Definition: define.h:109
REF: G. R. Cowper, GAUSSIAN QUADRATURE FORMULAS FOR TRIANGLES.
Definition: mesh_condition.cpp:21
KratosZeroVector< double > ZeroVector
Definition: amatrix_interface.h:561
KratosZeroMatrix< double > ZeroMatrix
Definition: amatrix_interface.h:559
T & noalias(T &TheMatrix)
Definition: amatrix_interface.h:484
model_part
Definition: face_heat.py:14
tau
Definition: generate_convection_diffusion_explicit_element.py:115
int n_nodes
Definition: generate_total_lagrangian_mixed_volumetric_strain_element.py:15
int L
Definition: ode_solve.py:390
int k
Definition: quadrature.py:595
float temp
Definition: rotating_cone.py:85
int counter
Definition: script_THERMAL_CORRECT.py:218
N
Definition: sensitivityMatrix.py:29
volume
Definition: sp_statistics.py:15
integer i
Definition: TensorModule.f:17