OOFEM  2.4
OOFEM.org - Object Oriented Finite Element Solver
problemcomm.C
Go to the documentation of this file.
1 /*
2  *
3  * ##### ##### ###### ###### ### ###
4  * ## ## ## ## ## ## ## ### ##
5  * ## ## ## ## #### #### ## # ##
6  * ## ## ## ## ## ## ## ##
7  * ## ## ## ## ## ## ## ##
8  * ##### ##### ## ###### ## ##
9  *
10  *
11  * OOFEM : Object Oriented Finite Element Code
12  *
13  * Copyright (C) 1993 - 2013 Borek Patzak
14  *
15  *
16  *
17  * Czech Technical University, Faculty of Civil Engineering,
18  * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19  *
20  * This library is free software; you can redistribute it and/or
21  * modify it under the terms of the GNU Lesser General Public
22  * License as published by the Free Software Foundation; either
23  * version 2.1 of the License, or (at your option) any later version.
24  *
25  * This program is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28  * Lesser General Public License for more details.
29  *
30  * You should have received a copy of the GNU Lesser General Public
31  * License along with this library; if not, write to the Free Software
32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33  */
34 
35 #include "problemcomm.h"
36 #include "intarray.h"
37 #include "error.h"
38 #include "engngm.h"
39 #include "element.h"
40 #include "dofmanager.h"
41 
42 #ifdef __USE_MPI
43  #include <mpi.h>
44 #endif
45 
46 #define __VERBOSE_PARALLEL
47 
48 namespace oofem {
50  Communicator(emodel, b, rank, size)
51 {
52  this->initialized = false;
53 }
54 
55 
57 { }
58 
59 
61  ProblemCommunicator(emodel, b, rank, size)
62 { }
63 
65  ProblemCommunicator(emodel, b, rank, size)
66 { }
67 
68 void
69 NodeCommunicator :: setUpCommunicationMaps(EngngModel *pm, bool excludeSelfCommFlag, bool forceReinit)
70 {
71 #ifdef __VERBOSE_PARALLEL
72  VERBOSEPARALLEL_PRINT("NodeCommunicator :: setUpCommunicationMaps", "Setting up communication maps", rank);
73 #endif
74 
75  if ( !forceReinit && initialized ) {
76  return;
77  }
78 
79  Domain *domain = pm->giveDomain(1);
80  int nnodes = domain->giveNumberOfDofManagers();
81 
82  //
83  // receive and send maps are same and are assembled locally
84  // using DofManager's partition lists.
85  //
86 
87  IntArray domainNodeSendCount(size);
88 
89  for ( int i = 1; i <= nnodes; i++ ) {
90  DofManager *dman = domain->giveDofManager(i);
91  const IntArray *partitionList = dman->givePartitionList();
92  if ( dman->giveParallelMode() == DofManager_shared ) {
93  for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
94  if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
95  domainNodeSendCount.at(partitionList->at(j) + 1)++;
96  }
97  }
98  }
99  }
100 
101  // build maps simultaneously
102  IntArray pos(size);
103  std :: vector< IntArray >maps( size );
104  for ( int i = 0; i < size; i++ ) {
105  maps [ i ].resize( domainNodeSendCount.at ( i + 1 ) );
106  }
107 
108 
109  for ( int i = 1; i <= nnodes; i++ ) {
110  DofManager *dman = domain->giveDofManager(i);
111  // if combination node & element cut can occur, test for shared DofMan mode
112  const IntArray *partitionList = dman->givePartitionList();
113  if ( dman->giveParallelMode() == DofManager_shared ) {
114  for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
115  int partition = partitionList->at(j);
116  if ( !( excludeSelfCommFlag && ( this->rank == partition ) ) ) {
117  maps [ partition ].at( ++pos.at(partition + 1) ) = i;
118  }
119  }
120  }
121  }
122 
123  // set up domain communicators maps
124  for ( int i = 0; i < size; i++ ) {
125  this->setProcessCommunicatorToSendArry(this->giveProcessCommunicator(i), maps [ i ]);
126  this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), maps [ i ]);
127  //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, maps[i]);
128  //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, maps[i]);
129  }
130 
131  initialized = true;
132 }
133 
134 
135 void
136 ElementCommunicator :: setUpCommunicationMaps(EngngModel *pm, bool excludeSelfCommFlag, bool forceReinit)
137 {
138 #ifdef __VERBOSE_PARALLEL
139  VERBOSEPARALLEL_PRINT("ElementCommunicator :: setUpCommunicationMaps", "Setting up communication maps", rank);
140 #endif
141 
142  if ( !forceReinit && initialized ) {
143  return;
144  }
145 
146  OOFEM_LOG_RELEVANT("[%d] ElementCommunicator :: Setting up communication maps\n", rank);
147 
148  Domain *domain = pm->giveDomain(1);
149 
150  /*
151  * Initially, each partition knows for which nodes a receive
152  * is needed (and can therefore compute easily the recv map),
153  * but does not know for which nodes it should send data to which
154  * partition. Hence, the communication setup is performed by
155  * broadcasting "send request" lists of nodes for which
156  * a partition expects to receive data (ie. of those nodes
157  * which the partition uses, but does not own) to all
158  * collaborating processes. The "send request" list are
159  * converted into send maps.
160  */
161 
162  // receive maps can be build locally,
163  // but send maps should be assembled from broadcasted lists (containing
164  // expected receive nodes) of remote partitions.
165 
166  // first build local receive map
167  IntArray domainNodeRecvCount(size);
168  int domainRecvListSize = 0, domainRecvListPos = 0;
169  int nelems;
170  int result = 1;
171 
172  nelems = domain->giveNumberOfElements();
173  for ( int i = 1; i <= nelems; i++ ) {
174  Element *element = domain->giveElement(i);
175  const IntArray *partitionList = element->givePartitionList();
176  if ( element->giveParallelMode() == Element_remote ) {
177  // size of partitionList should be 1 <== only ine master
178  for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
179  if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
180  domainRecvListSize++;
181  domainNodeRecvCount.at(partitionList->at(j) + 1)++;
182  }
183  }
184  }
185  }
186 
187  // build maps simultaneously
188  IntArray pos(size);
189  std :: vector< IntArray >maps( size );
190  for ( int i = 0; i < size; i++ ) {
191  maps [ i ].resize( domainNodeRecvCount.at ( i + 1 ) );
192  }
193 
194  // allocate also domain receive list to be broadcasted
195  IntArray domainRecvList(domainRecvListSize);
196 
197  if ( domainRecvListSize ) {
198  for ( int i = 1; i <= nelems; i++ ) {
199  // test if element is remote one
200  Element *element = domain->giveElement(i);
201  if ( element->giveParallelMode() == Element_remote ) {
202  domainRecvList.at(++domainRecvListPos) = element->giveGlobalNumber();
203 
204  const IntArray *partitionList = element->givePartitionList();
205  // size of partitionList should be 1 <== only ine master
206  for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
207  if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
208  int partition = partitionList->at(j);
209  maps [ partition ].at( ++pos.at(partition + 1) ) = i;
210  }
211  }
212  }
213  }
214  }
215 
216  // set up domains recv communicator maps
217  for ( int i = 0; i < size; i++ ) {
218  this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), maps [ i ]);
219  //this->giveDomainCommunicator(i)->setToRecvArry(this->engngModel, maps [ i ]);
220  }
221 
222 
223 #ifdef __VERBOSE_PARALLEL
224  for (int i=0; i<size; i++) {
225  fprintf (stderr, "domain %d-%d: domainCommRecvsize is %d\n",rank,i,this->giveProcessCommunicator(i)->giveToRecvMap()->giveSize() );
226  printf ("domain %d-%d: reecv map:",rank,i);
228  }
229 #endif
230 
231 
232  // to assemble send maps, we must analyze broadcasted remote domain send lists
233  // and we must also broadcast our send list.
234 
235 #ifdef __VERBOSE_PARALLEL
236  VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Remote Element-cut broadcasting started", rank);
237 #endif
238 
239 
240  StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
241  IntArray remoteDomainRecvList;
242  IntArray toSendMap;
243  int localExpectedSize, globalRecvSize;
244  int sendMapPos, sendMapSize, globalDofManNum;
245 
246  // determine the size of receive buffer using AllReduce operation
247 #ifndef IBM_MPI_IMPLEMENTATION
248  localExpectedSize = domainRecvList.givePackSize(commBuff);
249 #else
250  localExpectedSize = domainRecvList.givePackSize(commBuff) + 1;
251 #endif
252 
253 
254 #ifdef __USE_MPI
255  result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
256  if ( result != MPI_SUCCESS ) {
257  OOFEM_ERROR("MPI_Allreduce failed");
258  }
259 
260 #else
261 WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY
262 #endif
263 
264 #ifdef __VERBOSE_PARALLEL
265  VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank);
266 #endif
267 
268 
269  // resize to fit largest received message
270  commBuff.resize(globalRecvSize);
271 
272  // resize toSend map to max possible size
273  toSendMap.resize(globalRecvSize);
274 
275  for ( int i = 0; i < size; i++ ) { // loop over domains
276  commBuff.init();
277  if ( i == rank ) {
278  //current domain has to send its receive list to all domains
279  // broadcast domainRecvList
280 
281 #ifdef __VERBOSE_PARALLEL
282  VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank);
283 #endif
284 
285  domainRecvList.storeYourself(commBuff);
286  result = commBuff.bcast(i);
287  if ( result != MPI_SUCCESS ) {
288  OOFEM_ERROR("commBuff broadcast failed");
289  }
290 
291 #ifdef __VERBOSE_PARALLEL
292  VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank);
293 #endif
294  } else {
295 #ifdef __VERBOSE_PARALLEL
296  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n",
297  rank, "ProblemCommunicator :: unpackAllData", i);
298 #endif
299  // receive broadcasted lists
300  result = commBuff.bcast(i);
301  if ( result != MPI_SUCCESS ) {
302  OOFEM_ERROR("commBuff broadcast failed");
303  }
304 
305 #ifdef __VERBOSE_PARALLEL
306  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n",
307  rank, "ProblemCommunicator :: unpackAllData", i);
308 #endif
309 
310 
311  // unpack remote receive list
312  if ( remoteDomainRecvList.restoreYourself(commBuff) != CIO_OK ) {
313  OOFEM_ERROR("unpack remote receive list failed");
314  }
315 
316  // find if remote elements are in local partition
317  // if yes add them into send map for correcponding i-th partition
318  sendMapPos = 0;
319  sendMapSize = 0;
320  // determine sendMap size
321  for ( int j = 1; j <= nelems; j++ ) { // loop over local elements
322  Element *element = domain->giveElement(j);
323  if ( element->giveParallelMode() == Element_local ) {
324  globalDofManNum = element->giveGlobalNumber();
325  // test id globalDofManNum is in remoteDomainRecvList
326  if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
327  sendMapSize++;
328  }
329  }
330  }
331 
332  toSendMap.resize(sendMapSize);
333 
334  for ( int j = 1; j <= nelems; j++ ) { // loop over local elements
335  Element *element = domain->giveElement(j);
336  if ( element->giveParallelMode() == Element_local ) {
337  globalDofManNum = element->giveGlobalNumber();
338  // test id globalDofManNum is in remoteDomainRecvList
339  if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
340  // add this local DofManager number to sed map for active partition
341  toSendMap.at(++sendMapPos) = j;
342  }
343  }
344  } // end loop over local DofManagers
345 
346  // set send map to i-th process communicator
348 
349 #ifdef __VERBOSE_PARALLEL
350  fprintf (stderr, "domain %d-%d: domainCommSendsize is %d\n",rank,i,this->giveProcessCommunicator(i)->giveToSendMap()->giveSize() );
351  printf ("domain %d-%d: send map:",rank,i);
353 
354 #endif
355 
356 
357  //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap);
358  } // end receiving broadcasted lists
359 
360 #ifdef __VERBOSE_PARALLEL
361  VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank);
362 #endif
363  } // end loop over domains
364 
365  initialized = true;
366 }
367 
368 int
370 {
372  processComm->setToSendArry(engngModel, map, 0);
373  return 1;
374 }
375 
376 int
378 {
380  processComm->setToRecvArry(engngModel, map, 0);
381  return 1;
382 }
383 
384 int
386 {
388  processComm->setToSendArry(engngModel, map, 1);
389  return 1;
390 }
391 
392 int
394 {
396  processComm->setToRecvArry(engngModel, map, 1);
397  return 1;
398 }
399 
400 
401 void
403 {
404  this->quickSortCommMap(map, 1, map.giveSize(), cmp);
405 }
406 
407 
408 void
409 ProblemCommunicator :: quickSortCommMap( IntArray &map, int l, int r, int ( ProblemCommunicator :: *cmp )( int, int ) )
410 {
411  if ( r <= l ) {
412  return;
413  }
414 
415  int i = quickSortPartition(map, l, r, cmp);
416  quickSortCommMap(map, l, i - 1, cmp);
417  quickSortCommMap(map, i + 1, r, cmp);
418 }
419 
420 
421 int
422 ProblemCommunicator :: quickSortPartition( IntArray &map, int l, int r, int ( ProblemCommunicator :: *cmp )( int, int ) )
423 {
424  int i = l - 1, j = r;
425  int v = map.at(r);
426  int swap;
427 
428  for ( ; ; ) {
429  while ( ( ( this->*cmp )(map.at(++i), v) ) < 0 ) {
430  ;
431  }
432 
433  while ( ( ( this->*cmp )( v, map.at(--j) ) ) < 0 ) {
434  if ( j == l ) {
435  break;
436  }
437  }
438 
439  if ( i >= j ) {
440  break;
441  }
442 
443  swap = map.at(i);
444  map.at(i) = map.at(j);
445  map.at(j) = swap;
446  }
447 
448  swap = map.at(i);
449  map.at(i) = map.at(r);
450  map.at(r) = swap;
451  return i;
452 }
453 
454 
455 int
457 {
460 }
461 int
463 {
464  return ( engngModel->giveDomain(1)->giveElement(i)->giveGlobalNumber() -
466 }
467 } // end namespace oofem
468 
contextIOResultType storeYourself(DataStream &stream) const
Stores array to output stream.
Definition: intarray.C:289
void printYourself() const
Prints receiver on stdout.
Definition: intarray.C:225
Class and object Domain.
Definition: domain.h:115
int rank
Rank of process.
Definition: communicator.h:109
int giveGlobalNumber() const
Definition: dofmanager.h:501
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition: domain.h:432
NodeCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size)
Definition: problemcomm.C:60
Class representing communicator for engng model.
Definition: problemcomm.h:50
int giveGlobalNumber() const
Definition: element.h:1059
virtual int setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map)
Assigns given map to given process communicator.
Definition: problemcomm.C:377
void setToSendArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toSend array to src.
Definition: processcomm.h:378
int givePackSize(DataStream &buff) const
Returns how much space is needed to pack receivers message.
Definition: intarray.C:325
const IntArray * givePartitionList()
Returns partition list of receiver.
Definition: dofmanager.h:519
Abstract base class for all finite elements.
Definition: element.h:145
virtual int setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map)
Assigns given map to given process communicator.
Definition: problemcomm.C:369
Base class for dof managers.
Definition: dofmanager.h:113
ProblemCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size)
Constructor.
Definition: problemcomm.C:49
int giveNumberOfElements() const
Returns number of elements in domain.
Definition: domain.h:434
#define OOFEM_LOG_DEBUG(...)
Definition: logger.h:128
Class implementing an array of integers.
Definition: intarray.h:61
int & at(int i)
Coefficient access function.
Definition: intarray.h:103
#define OOFEM_LOG_RELEVANT(...)
Definition: logger.h:126
const IntArray * giveToSendMap()
Returns receiver to send map.
Definition: processcomm.h:223
virtual void setUpCommunicationMaps(EngngModel *emodel, bool excludeSelfCommFlag, bool forceReinit=false)
Service for setting up the communication patterns with other remote process.
Definition: problemcomm.C:136
Element * giveElement(int n)
Service for accessing particular domain fe element.
Definition: domain.C:160
#define OOFEM_ERROR(...)
Definition: error.h:61
virtual int resize(int newSize)
Resizes buffer to given size.
Definition: combuff.h:314
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition: parallel.h:50
const IntArray * giveToRecvMap()
Returns receiver to receive map.
Definition: processcomm.h:227
virtual void init()
Initializes buffer to empty state.
Definition: combuff.h:316
Class representing process communicator for engineering model.
Definition: processcomm.h:176
void resize(int n)
Checks size of receiver towards requested bounds.
Definition: intarray.C:124
EngngModel * engngModel
Engineering model.
Definition: communicator.h:115
contextIOResultType restoreYourself(DataStream &stream)
Restores array from image on stream.
Definition: intarray.C:305
void quickSortCommMap(IntArray &map, int l, int r, int(ProblemCommunicator::*cmp)(int, int))
Implementation of quicksort algorithm.
Definition: problemcomm.C:409
virtual ~ProblemCommunicator()
Destructor.
Definition: problemcomm.C:56
void sortCommMap(IntArray &map, int(ProblemCommunicator::*cmp)(int, int))
Sorts given communication map, containing local DofManager numbers according to their corresponding g...
Definition: problemcomm.C:402
virtual int setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map)
Assigns given map to given process communicator.
Definition: problemcomm.C:385
elementParallelMode giveParallelMode() const
Return elementParallelMode of receiver.
Definition: element.h:1069
virtual void setUpCommunicationMaps(EngngModel *emodel, bool excludeSelfCommFlag, bool forceReinit=false)
Service for setting up the communication patterns with other remote process.
Definition: problemcomm.C:69
Element is local, there are no contributions from other domains to this element.
Definition: element.h:101
Class representing communicator.
Definition: communicator.h:105
void setToRecvArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toRecv array to src.
Definition: processcomm.h:388
int DofManCmp(int, int)
Global dofManager number comparison function.
Definition: problemcomm.C:456
virtual int bcast(int root)
Initializes broadcast over collaborating processes.
Definition: combuff.h:354
virtual int setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map)
Assigns given map to given process communicator.
Definition: problemcomm.C:393
ElementCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size)
Definition: problemcomm.C:64
int size
Number of processes.
Definition: communicator.h:111
Element in active domain is only mirror of some remote element.
Definition: element.h:102
ProcessCommunicator * giveProcessCommunicator(int i)
Returns i-th problem communicator.
Definition: communicator.h:139
The Communicator and corresponding buffers (represented by this class) are separated in order to allo...
Definition: communicator.h:60
Abstract base class representing the "problem" under consideration.
Definition: engngm.h:181
int giveSize() const
Definition: intarray.h:203
const IntArray * givePartitionList() const
Returns partition list of receiver.
Definition: element.h:1108
int quickSortPartition(IntArray &map, int l, int r, int(ProblemCommunicator::*cmp)(int, int))
Partitioning used in quicksort.
Definition: problemcomm.C:422
int ElemCmp(int, int)
Global element comparison function.
Definition: problemcomm.C:462
the oofem namespace is to define a context or scope in which all oofem names are defined.
Domain * giveDomain(int n)
Service for accessing particular problem domain.
Definition: engngm.C:1720
DofManager * giveDofManager(int n)
Service for accessing particular domain dof manager.
Definition: domain.C:314
DofManager is shared by neighboring partitions, it is necessary to sum contributions from all contrib...
Definition: dofmanager.h:82
dofManagerParallelMode giveParallelMode() const
Return dofManagerParallelMode of receiver.
Definition: dofmanager.h:512
int findFirstIndexOf(int value) const
Finds index of first occurrence of given value in array.
Definition: intarray.C:331

This page is part of the OOFEM documentation. Copyright (c) 2011 Borek Patzak
Project e-mail: info@oofem.org
Generated at Tue Jan 2 2018 20:07:30 for OOFEM by doxygen 1.8.11 written by Dimitri van Heesch, © 1997-2011