KratosMultiphysics
KRATOS Multiphysics (Kratos) is a framework for building parallel, multi-disciplinary simulation software, aiming at modularity, extensibility, and high performance. Kratos is written in C++, and counts with an extensive Python interface.
global_pointer_utilities.h
Go to the documentation of this file.
1 // | / |
2 // ' / __| _` | __| _ \ __|
3 // . \ | ( | | ( |\__ `
4 // _|\_\_| \__,_|\__|\___/ ____/
5 // Multi-Physics
6 //
7 // License: BSD License
8 // Kratos default license: kratos/license.txt
9 //
10 // Main authors: Riccardo Rossi
11 //
12 
13 #pragma once
14 
15 // System includes
16 #include <string>
17 #include <iostream>
18 
19 // External includes
20 
21 // Project includes
22 #include "includes/node.h"
24 #include "includes/variables.h"
30 
31 namespace Kratos
32 {
35 
38 
42 
46 
50 
54 
67 {
68 public:
71 
74 
78 
81  {}
82 
85 
94  template< class TContainerType >
95  static std::unordered_map< int, GlobalPointer<typename TContainerType::value_type> > RetrieveGlobalIndexedPointersMap(
96  const TContainerType& rContainer,
97  const std::vector<int>& rIdList,
98  const DataCommunicator& rDataCommunicator
99  )
100  {
102 
103  std::unordered_map< int, GPType > global_pointers_list;
104  const int current_rank = rDataCommunicator.Rank();
105  const int world_size = rDataCommunicator.Size();
106 
107  std::vector<int> remote_ids;
108 
109  if(rDataCommunicator.IsDistributed()) {
110  // If the execution is distributed, for every entity id find if its in the container, and if it is, if it is local
111  // to our partition.
112  for(const int id : rIdList ) {
113  const auto it = rContainer.find(id);
114 
115  if( it != rContainer.end()) {
116  if(ObjectIsLocal(*it, current_rank)) {
117  // Found locally
118  global_pointers_list.emplace(id,GPType(&*it, current_rank));
119  } else {
120  // Remote, but this is a lucky case since for those we know to which rank they they belong
121  // TODO: optimize according to the comment just above
122  remote_ids.push_back(id);
123  }
124  } else {
125  // Id not found and we have no clue of what node owns it
126  remote_ids.push_back(id);
127  }
128  }
129  } else {
130  // If the execution is not distributed, only check if the id is in the container.
131  for(const int id : rIdList ) {
132  const auto it = rContainer.find(id);
133  if( it != rContainer.end()) {
134  // Found locally
135  global_pointers_list.emplace(id,GPType(&*it, current_rank));
136  }
137  }
138  }
139 
140  // Gather everything onto master_rank processor
141  int master_rank = 0;
142 
143  std::vector<int> all_remote_ids;
144  std::vector< std::vector<int> > collected_remote_ids(world_size);
145  std::unordered_map< int, GPType > all_non_local_gp_map;
146 
147  //STEP1 - here we send the id we need to the master_rank
148  //NOTE: here we DO NOT use a collective since we need to keep distinguished the ids per rank
149  for(int i=0; i<world_size; ++i) {
150  if(i != master_rank) {
151  if(current_rank == master_rank) { //only master executes
152  rDataCommunicator.Recv(collected_remote_ids[i],i);
153  } else if(current_rank == i) { //only processor i executes
154  rDataCommunicator.Send(remote_ids,master_rank);
155  }
156  } else { //no communication needed
157  if(current_rank == master_rank) //only master executes
158  collected_remote_ids[i] = remote_ids;
159  }
160 
161  if(current_rank == master_rank) {
162  for(const int id : collected_remote_ids[i])
163  all_remote_ids.push_back( id );
164  }
165  }
166 
167  // very useful for debugging. do not remove for now
168  // if(current_rank == master_rank)
169  // {
170  // std::cout << "collected ids " << std::endl;
171  // for(unsigned int rank=0; rank<collected_remote_ids.size(); ++rank)
172  // {
173  // std::cout << " r = " << rank << " - ";
174  // for(int id : collected_remote_ids[rank])
175  // std::cout << id << " " ;
176  // std::cout << std::endl;
177  // }
178 
179  // std::cout << "all remote ids " << std::endl;
180  // for(int id : all_remote_ids)
181  // std::cout << id << " ";
182  // std::cout << std::endl;
183  // }
184 
185  if(current_rank == master_rank) {
186  std::sort(all_remote_ids.begin(), all_remote_ids.end());
187  auto last = std::unique(all_remote_ids.begin(), all_remote_ids.end());
188  all_remote_ids.erase(last, all_remote_ids.end());
189  }
190 
191  //communicate the size of all remote_ids and resize the vector accordingly
192  int number_of_all_remote_ids = all_remote_ids.size();
193  rDataCommunicator.Broadcast(number_of_all_remote_ids,master_rank);
194 
195  if(current_rank != master_rank)
196  all_remote_ids.resize(number_of_all_remote_ids);
197 
198  //STEP2 - here we give to every processor the ids that are needed by someone
199  rDataCommunicator.Broadcast(all_remote_ids,master_rank);
200 
201  //STEP3 - here we obtain the list of gps we own and we send it back to the master_rank
202  //gather results on master_rank
203  for(int i=0; i<world_size; ++i) {
204  if(i != master_rank) {
205  if(current_rank == master_rank) {
206  std::unordered_map< int, GPType > recv_gps;
207  rDataCommunicator.Recv(recv_gps, i);
208 
209  for(auto& it : recv_gps)
210  all_non_local_gp_map.emplace(it.first, it.second);
211  } else if(current_rank == i) {
212  auto non_local_gp_map = ComputeGpMap(rContainer, all_remote_ids, rDataCommunicator);
213  rDataCommunicator.Send(non_local_gp_map,master_rank);
214  }
215  } else {
216  auto recv_gps = ComputeGpMap(rContainer, all_remote_ids, rDataCommunicator);
217 
218  for(auto& it : recv_gps)
219  all_non_local_gp_map.emplace(it.first, it.second);
220  }
221  }
222 
223  //STEP4 - here we obtain from the master_rank the list of gps we need
224  //extract data and send to everyone
225  for(int i=0; i<world_size; ++i) {
226  if(i != master_rank) {
227  if(current_rank == master_rank) { //only master executes
228  auto gp_list = ExtractById(all_non_local_gp_map,collected_remote_ids[i]);
229 
230  //TODO: here we could use separately send and recv
231  rDataCommunicator.Send(gp_list,i);
232  } else if(current_rank == i) { //only processor i executes
233  std::unordered_map< int, GPType > gp_list;
234  rDataCommunicator.Recv(gp_list, master_rank);
235 
236  for(auto& it : gp_list)
237  global_pointers_list.emplace(it.first, it.second);
238  }
239  } else {
240  auto gp_list = ExtractById(all_non_local_gp_map,collected_remote_ids[i]);
241 
242  for(auto& it : gp_list)
243  global_pointers_list.emplace(it.first, it.second);
244  }
245  }
246 
247  return global_pointers_list;
248  }
249 
256  template< class TContainerType >
258  const TContainerType& rContainer,
259  const DataCommunicator& rDataCommunicator
260  )
261  {
262  // Retrieve the ids
263  std::vector<int> local_id_list;
264  local_id_list.reserve(rContainer.size());
265  for (const auto& r_entity : rContainer) {
266  local_id_list.push_back(r_entity.Id());
267  }
268 
269  // Retrieve the global pointers
270  return RetrieveGlobalIndexedPointers(rContainer, local_id_list, rDataCommunicator);
271  }
272 
279  template< class TContainerType >
281  const TContainerType& rContainer,
282  const DataCommunicator& rDataCommunicator
283  )
284  {
285  // Getting world size
286  const int world_size = rDataCommunicator.Size();
287 
288  // Getting number of entities
289  const std::size_t number_of_entities = rContainer.size();
290 
291  // Getting global number of points
292  std::vector<int> number_of_entities_per_partition(world_size);
293  std::vector<int> send_number_of_entities_per_partition(1, number_of_entities);
294  rDataCommunicator.AllGather(send_number_of_entities_per_partition, number_of_entities_per_partition);
295 
296  // Retrieve the ids
297  std::vector<int> global_id_list, local_id_list;
298  local_id_list.reserve(number_of_entities);
299  for (const auto& r_entity : rContainer) {
300  local_id_list.push_back(r_entity.Id());
301  }
302 
303  // Generate vectors with sizes for AllGatherv
304  std::vector<int> recv_sizes(number_of_entities_per_partition);
305  int message_size = 0;
306  std::vector<int> recv_offsets(world_size, 0);
307  for (int i_rank = 0; i_rank < world_size; i_rank++) {
308  recv_offsets[i_rank] = message_size;
309  message_size += recv_sizes[i_rank];
310  }
311  global_id_list.resize(message_size);
312 
313  // Invoque AllGatherv
314  rDataCommunicator.AllGatherv(local_id_list, global_id_list, recv_sizes, recv_offsets);
315 
316  // Retrieve the global pointers
317  return RetrieveGlobalIndexedPointers(rContainer, global_id_list, rDataCommunicator);
318  }
319 
328  template< class TContainerType >
330  const TContainerType& rContainer,
331  const std::vector<int>& rIdList,
332  const DataCommunicator& rDataCommunicator
333  )
334  {
335  auto global_pointers_list = RetrieveGlobalIndexedPointersMap(rContainer, rIdList, rDataCommunicator);
336 
337  const int current_rank = rDataCommunicator.Rank();
338 
339  // Compute final array
341  result.reserve(rIdList.size());
342  for(unsigned int i=0; i<rIdList.size(); ++i) {
343  auto it = global_pointers_list.find(rIdList[i]);
344  if(it != global_pointers_list.end())
345  result.push_back( it->second );
346  else
347  KRATOS_ERROR << "The id " << rIdList[i] << " was not found for processor " << current_rank << std::endl;
348  }
349 
350  return result;
351 
352  }
353 
357 
361 
365 
369 
373 
375  virtual std::string Info() const
376  {
377  std::stringstream buffer;
378  buffer << "GlobalPointerUtilities" ;
379  return buffer.str();
380  }
381 
383  virtual void PrintInfo(std::ostream& rOStream) const
384  {
385  rOStream << "GlobalPointerUtilities";
386  }
387 
389  virtual void PrintData(std::ostream& rOStream) const {}
390 
394 
396 protected:
399 
403 
407 
411 
415 
419 
423 
425 private:
428 
432 
439  static bool ObjectIsLocal(const GeometricalObject& rGeometricalObject, const int CurrentRank)
440  {
441  return true; //if the iterator was found, then it is local!
442  }
443 
450  static bool ObjectIsLocal(const Node& rNode, const int CurrentRank)
451  {
452  return rNode.FastGetSolutionStepValue(PARTITION_INDEX) == CurrentRank;
453  }
454 
458 
465  template< class GPType >
466  static std::unordered_map< int, GPType > ExtractById(
467  std::unordered_map< int, GPType >& rGPList,
468  const std::vector<int>& rIds)
469  {
470  std::unordered_map< int, GPType > extracted_list;
471  for(auto id : rIds){
472  auto gp = rGPList[id];
473  extracted_list[id] = gp;
474  }
475  return extracted_list;
476  }
477 
485  template< class TContainerType >
486  static std::unordered_map< int, GlobalPointer<typename TContainerType::value_type> > ComputeGpMap(
487  const TContainerType& rContainer,
488  const std::vector<int>& rIds,
489  const DataCommunicator& rDataCommunicator)
490  {
491  const int current_rank = rDataCommunicator.Rank();
492  std::unordered_map< int, GlobalPointer<typename TContainerType::value_type> > extracted_list;
493 
494  if(rDataCommunicator.IsDistributed()) {
495  // If the execution is distributed, for every entity id find if its in the container, and if it is, if it is local
496  // to our partition.
497  for(auto id : rIds) {
498  const auto it = rContainer.find(id);
499 
500  if( it != rContainer.end()) {
501  // Found locally
502  if(ObjectIsLocal(*it, current_rank)){
503  extracted_list.emplace(id, GlobalPointer<typename TContainerType::value_type>(&*it, current_rank));
504  }
505  }
506  }
507  } else {
508  // If the execution is not distributed, only check if the id is in the container.
509  for(auto id : rIds) {
510  const auto it = rContainer.find(id);
511 
512  if( it != rContainer.end()) {
513  // Found locally
514  extracted_list.emplace(id, GlobalPointer<typename TContainerType::value_type>(&*it, current_rank));
515  }
516  }
517  }
518  return extracted_list;
519  }
520 
524 
528 
532 
536 
538  GlobalPointerUtilities& operator=(GlobalPointerUtilities const& rOther) = delete;
539 
541  GlobalPointerUtilities(GlobalPointerUtilities const& rOther) = delete;
542 
544 
545 }; // Class GlobalPointerUtilities
546 
548 
551 
555 
557 inline std::istream& operator >> (std::istream& rIStream,
558  GlobalPointerUtilities& rThis)
559 {
560  return rIStream;
561 }
562 
564 inline std::ostream& operator << (std::ostream& rOStream,
565  const GlobalPointerUtilities& rThis)
566 {
567  rThis.PrintInfo(rOStream);
568  rOStream << std::endl;
569  rThis.PrintData(rOStream);
570 
571  return rOStream;
572 }
574 
576 
577 } // namespace Kratos.
Serial (do-nothing) version of a wrapper class for MPI communication.
Definition: data_communicator.h:318
void Recv(TObject &rRecvObject, const int RecvSource, const int RecvTag=0) const
Exchange data with other ranks.
Definition: data_communicator.h:573
virtual int Size() const
Get the parallel size of this DataCommunicator.
Definition: data_communicator.h:597
virtual int Rank() const
Get the parallel rank for this DataCommunicator.
Definition: data_communicator.h:587
void Broadcast(TObject &rBroadcastObject, const int SourceRank) const
Synchronize a buffer to the value held by the broadcasting rank.
Definition: data_communicator.h:473
void Send(const TObject &rSendValues, const int SendDestination, const int SendTag=0) const
Exchange data with other ranks.
Definition: data_communicator.h:559
virtual bool IsDistributed() const
Check whether this DataCommunicator is aware of parallelism.
Definition: data_communicator.h:606
This defines the geometrical object, base definition of the element and condition entities.
Definition: geometrical_object.h:58
This class is a wrapper for a pointer to a data that is located in a different rank.
Definition: global_pointer.h:44
This class is used to manage global pointers. Overall, the GlobalPointerUtilities class provides a us...
Definition: global_pointer_utilities.h:67
virtual void PrintInfo(std::ostream &rOStream) const
Print information about this object.
Definition: global_pointer_utilities.h:383
KRATOS_CLASS_POINTER_DEFINITION(GlobalPointerUtilities)
Pointer definition of GlobalPointerUtilities.
virtual void PrintData(std::ostream &rOStream) const
Print object's data.
Definition: global_pointer_utilities.h:389
virtual std::string Info() const
Turn back information as a string.
Definition: global_pointer_utilities.h:375
static GlobalPointersVector< typename TContainerType::value_type > GlobalRetrieveGlobalPointers(const TContainerType &rContainer, const DataCommunicator &rDataCommunicator)
Retrieve global pointers for entities in container, given a data communicator. All entities are retri...
Definition: global_pointer_utilities.h:280
virtual ~GlobalPointerUtilities()
Destructor.
Definition: global_pointer_utilities.h:84
static GlobalPointersVector< typename TContainerType::value_type > LocalRetrieveGlobalPointers(const TContainerType &rContainer, const DataCommunicator &rDataCommunicator)
Retrieve global pointers for entities in container, given a data communicator. Only local entities ar...
Definition: global_pointer_utilities.h:257
static std::unordered_map< int, GlobalPointer< typename TContainerType::value_type > > RetrieveGlobalIndexedPointersMap(const TContainerType &rContainer, const std::vector< int > &rIdList, const DataCommunicator &rDataCommunicator)
Retrieves a map of global pointers corresponding to the given entity ids, where the global pointers p...
Definition: global_pointer_utilities.h:95
static GlobalPointersVector< typename TContainerType::value_type > RetrieveGlobalIndexedPointers(const TContainerType &rContainer, const std::vector< int > &rIdList, const DataCommunicator &rDataCommunicator)
Retrieve global indexed pointers from container and data communicator.
Definition: global_pointer_utilities.h:329
GlobalPointerUtilities()
Default constructor.
Definition: global_pointer_utilities.h:80
This class is a vector which stores global pointers.
Definition: global_pointers_vector.h:61
void push_back(TPointerType x)
Definition: global_pointers_vector.h:322
void reserve(int dim)
Definition: global_pointers_vector.h:375
#define KRATOS_ERROR
Definition: exception.h:161
REF: G. R. Cowper, GAUSSIAN QUADRATURE FORMULAS FOR TRIANGLES.
Definition: mesh_condition.cpp:21
std::istream & operator>>(std::istream &rIStream, LinearMasterSlaveConstraint &rThis)
input stream function
std::ostream & operator<<(std::ostream &rOStream, const LinearMasterSlaveConstraint &rThis)
output stream function
Definition: linear_master_slave_constraint.h:432
integer i
Definition: TensorModule.f:17