OOFEM  2.4
OOFEM.org - Object Oriented Finite Element Solver
feticommunicator.C
Go to the documentation of this file.
1 /*
2  *
3  * ##### ##### ###### ###### ### ###
4  * ## ## ## ## ## ## ## ### ##
5  * ## ## ## ## #### #### ## # ##
6  * ## ## ## ## ## ## ## ##
7  * ## ## ## ## ## ## ## ##
8  * ##### ##### ## ###### ## ##
9  *
10  *
11  * OOFEM : Object Oriented Finite Element Code
12  *
13  * Copyright (C) 1993 - 2013 Borek Patzak
14  *
15  *
16  *
17  * Czech Technical University, Faculty of Civil Engineering,
18  * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19  *
20  * This library is free software; you can redistribute it and/or
21  * modify it under the terms of the GNU Lesser General Public
22  * License as published by the Free Software Foundation; either
23  * version 2.1 of the License, or (at your option) any later version.
24  *
25  * This program is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28  * Lesser General Public License for more details.
29  *
30  * You should have received a copy of the GNU Lesser General Public
31  * License along with this library; if not, write to the Free Software
32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33  */
34 
35 #include "../sm/FETISolver/feticommunicator.h"
36 #include "engngm.h"
37 #include "intarray.h"
38 #include "dofmanager.h"
39 #include "unknownnumberingscheme.h"
40 #include "domain.h"
41 
42 #ifdef __USE_MPI
43  #include <mpi.h>
44 #endif
45 
46 namespace oofem {
48  Communicator(emodel, b, rank, size)
49 {
50  if ( rank != 0 ) {
51  OOFEM_ERROR("bad rank number, expected rank 0 for master");
52  }
53 }
54 
55 
57 { }
58 
59 
60 void
62 {
63  int i, j, l, maxRec;
64  int globaldofmannum, localNumber, ndofs;
65  int numberOfBoundaryDofMans;
66  int source, tag;
67  IntArray numberOfPartitionBoundaryDofMans(size);
68  StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
70  // FETIBoundaryDofManager *dofmanrec;
71  // Map containing boundary dof managers records, the key is corresponding global number
72  // value is corresponding local master dof manager number
73  map< int, int, less< int > >BoundaryDofManagerMap;
74  // communication maps of slaves
75  IntArray **commMaps = new IntArray * [ size ];
76  // location array
77  IntArray locNum;
78  Domain *domain = pm->giveDomain(1);
79 
80  // check if receiver is master
81  if ( this->rank != 0 ) {
82  OOFEM_ERROR("rank 0 (master) expected as receiver");
83  }
84 
85  // resize receive buffer
86  commBuff.resize( commBuff.givePackSizeOfInt(1) );
87 
88  //
89  // receive data
90  //
91  for ( i = 1; i < size; i++ ) {
93  while ( !commBuff.testCompletion(source, tag) ) {
94  ;
95  }
96 
97  // unpack data
98  commBuff.read(j);
99 #ifdef __VERBOSE_PARALLEL
100  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d (received %d)\n",
101  rank, "FETICommunicator :: setUpCommunicationMaps : received number of boundary dofMans", source, j);
102 #endif
103  numberOfPartitionBoundaryDofMans.at(source + 1) = j;
104  commBuff.init();
105  }
106 
107  MPI_Barrier(MPI_COMM_WORLD);
108 
109 
110  // determine the total number of boundary dof managers at master
111  int nnodes = domain->giveNumberOfDofManagers();
112  j = 0;
113  for ( i = 1; i <= nnodes; i++ ) {
114  if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_shared ) {
115  j++;
116  }
117  }
118 
119  numberOfPartitionBoundaryDofMans.at(1) = j;
120 
121  //
122  // receive list of bounadry dof managers with corresponding number of dofs from each partition
123  //
124 
125  // resize the receive buffer to fit all messages
126  maxRec = 0;
127  for ( i = 0; i < size; i++ ) {
128  if ( numberOfPartitionBoundaryDofMans.at(i + 1) > maxRec ) {
129  maxRec = numberOfPartitionBoundaryDofMans.at(i + 1);
130  }
131  }
132 
133  commBuff.resize( 2 * maxRec * commBuff.givePackSizeOfInt(1) );
134  // resize communication maps acordingly
135  for ( i = 0; i < size; i++ ) {
136  j = numberOfPartitionBoundaryDofMans.at(i + 1);
137  commMaps [ i ] = new IntArray(j);
138  }
139 
140 
141  // add local master contribution first
142  // loop over all dofmanager data received
143  i = 0;
144  for ( j = 1; j <= numberOfPartitionBoundaryDofMans.at(1); j++ ) {
145  // fing next shared dofman
146  while ( !( domain->giveDofManager(++i)->giveParallelMode() == DofManager_shared ) ) {
147  ;
148  }
149 
150  globaldofmannum = domain->giveDofManager(i)->giveGlobalNumber();
151  domain->giveDofManager(i)->giveCompleteLocationArray(locNum, dn);
152  ndofs = 0;
153  for ( l = 1; l <= locNum.giveSize(); l++ ) {
154  if ( locNum.at(l) ) {
155  ndofs++;
156  }
157  }
158 
159  // add corresponding entry to master map of boundary dof managers
160  if ( ( localNumber = BoundaryDofManagerMap [ globaldofmannum ] ) == 0 ) { // no local counterpart exist
161  // create it
162  boundaryDofManList.push_back( FETIBoundaryDofManager(globaldofmannum, 0, ndofs) );
163  // remember the local number; actual position in vector is localNumber-1
164  localNumber = BoundaryDofManagerMap [ globaldofmannum ] = ( boundaryDofManList.size() );
165  boundaryDofManList.back().addPartition(0);
166  } else { // update the corresponding record
167  boundaryDofManList [ localNumber - 1 ].addPartition(0);
168  if ( boundaryDofManList [ localNumber - 1 ].giveNumberOfDofs() != ndofs ) {
169  OOFEM_ERROR("ndofs size mismatch");
170  }
171  }
172 
173  // remember communication map for particular partition
174  commMaps [ 0 ]->at(j) = localNumber;
175  }
176 
177  //
178  // receive data from slave partitions
179  //
180 
181  for ( i = 1; i < size; i++ ) {
182  commBuff.iRecv(MPI_ANY_SOURCE, FETICommunicator :: BoundaryDofManagersRecMsg);
183  while ( !commBuff.testCompletion(source, tag) ) {
184  ;
185  }
186 
187  // unpack data
188 #ifdef __VERBOSE_PARALLEL
189  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d\n",
190  rank, "FETICommunicator :: setUpCommunicationMaps : received boundary dofMans records", source);
191 #endif
192 
193  // loop over all dofmanager data received
194  for ( j = 1; j <= numberOfPartitionBoundaryDofMans.at(source + 1); j++ ) {
195  commBuff.read(globaldofmannum);
196  commBuff.read(ndofs);
197 
198  // add corresponding entry to master map of boundary dof managers
199  if ( ( localNumber = BoundaryDofManagerMap [ globaldofmannum ] ) == 0 ) { // no local counterpart exist
200  // create it
201  boundaryDofManList.push_back( FETIBoundaryDofManager(globaldofmannum, 0, ndofs) );
202  // remember the local number; actual position in vector is localNumber-1
203  localNumber = BoundaryDofManagerMap [ globaldofmannum ] = ( boundaryDofManList.size() );
204  boundaryDofManList.back().addPartition(source);
205  } else { // update the corresponding record
206  boundaryDofManList [ localNumber - 1 ].addPartition(source);
207  if ( boundaryDofManList [ localNumber - 1 ].giveNumberOfDofs() != ndofs ) {
208  OOFEM_ERROR("ndofs size mismatch");
209  }
210  }
211 
212  // remember communication map for particular partition
213  commMaps [ source ]->at(j) = localNumber;
214  }
215 
216  commBuff.init();
217  }
218 
219  MPI_Barrier(MPI_COMM_WORLD);
220  //
221  // assign code numbers to boundary dofs
222  //
223  numberOfEquations = 0;
224  numberOfBoundaryDofMans = boundaryDofManList.size();
225  for ( i = 1; i <= numberOfBoundaryDofMans; i++ ) {
226  boundaryDofManList [ i - 1 ].setCodeNumbers(numberOfEquations); // updates numberOfEquations
227  }
228 
229  // store the commMaps
230  for ( i = 0; i < size; i++ ) {
231  if ( i != 0 ) {
232  this->giveProcessCommunicator(i)->setToSendArry(engngModel, * commMaps [ i ], 0);
233  this->giveProcessCommunicator(i)->setToRecvArry(engngModel, * commMaps [ i ], 0);
234  } else {
235  masterCommMap = * commMaps [ i ];
236  }
237 
238  delete commMaps [ i ];
239  }
240 
241  delete commMaps;
242 
243  MPI_Barrier(MPI_COMM_WORLD);
244 
245 #ifdef __VERBOSE_PARALLEL
246  VERBOSEPARALLEL_PRINT("FETICommunicator::setUpCommunicationMaps", "communication maps setup finished", rank);
247 #endif
248 }
249 } // end namespace oofem
The representation of EngngModel default unknown numbering.
Class and object Domain.
Definition: domain.h:115
int rank
Rank of process.
Definition: communicator.h:109
IntArray masterCommMap
Master communication map.
int giveGlobalNumber() const
Definition: dofmanager.h:501
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition: domain.h:432
virtual int iRecv(int source, int tag, int count=0)
Starts standard mode, nonblocking receive.
Definition: combuff.h:346
void setToSendArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toSend array to src.
Definition: processcomm.h:378
#define OOFEM_LOG_DEBUG(...)
Definition: logger.h:128
Class implementing an array of integers.
Definition: intarray.h:61
int & at(int i)
Coefficient access function.
Definition: intarray.h:103
int numberOfEquations
Number of equations at master level (determined form boundary nodes).
virtual int testCompletion()
Tests if the operation identified by this->request is complete.
Definition: combuff.h:348
virtual int givePackSizeOfInt(int count)
Definition: combuff.C:271
#define OOFEM_ERROR(...)
Definition: error.h:61
virtual int resize(int newSize)
Resizes buffer to given size.
Definition: combuff.h:314
FETICommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size)
Creates new communicator.
void setUpCommunicationMaps(EngngModel *pm)
Service for setting up the communication patterns with other remote processes.
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition: parallel.h:50
virtual void init()
Initializes buffer to empty state.
Definition: combuff.h:316
virtual int read(int *dest, int n)
Reads count integer values into array pointed by data.
Definition: combuff.h:333
EngngModel * engngModel
Engineering model.
Definition: communicator.h:115
Represent the abstraction for DOF manager.
vector< FETIBoundaryDofManager > boundaryDofManList
List of boundary dof managers records.
void giveCompleteLocationArray(IntArray &locationArray, const UnknownNumberingScheme &s) const
Returns full location array of receiver containing equation numbers of all dofs of receiver...
Definition: dofmanager.C:229
Class representing communicator.
Definition: communicator.h:105
void setToRecvArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toRecv array to src.
Definition: processcomm.h:388
int size
Number of processes.
Definition: communicator.h:111
ProcessCommunicator * giveProcessCommunicator(int i)
Returns i-th problem communicator.
Definition: communicator.h:139
The Communicator and corresponding buffers (represented by this class) are separated in order to allo...
Definition: communicator.h:60
Abstract base class representing the "problem" under consideration.
Definition: engngm.h:181
int giveSize() const
Definition: intarray.h:203
the oofem namespace is to define a context or scope in which all oofem names are defined.
Domain * giveDomain(int n)
Service for accessing particular problem domain.
Definition: engngm.C:1720
DofManager * giveDofManager(int n)
Service for accessing particular domain dof manager.
Definition: domain.C:314
virtual ~FETICommunicator()
Destructor.
DofManager is shared by neighboring partitions, it is necessary to sum contributions from all contrib...
Definition: dofmanager.h:82
dofManagerParallelMode giveParallelMode() const
Return dofManagerParallelMode of receiver.
Definition: dofmanager.h:512

This page is part of the OOFEM documentation. Copyright (c) 2011 Borek Patzak
Project e-mail: info@oofem.org
Generated at Tue Jan 2 2018 20:07:28 for OOFEM by doxygen 1.8.11 written by Dimitri van Heesch, © 1997-2011