OOFEM  2.4
OOFEM.org - Object Oriented Finite Element Solver
parmetisloadbalancer.C
Go to the documentation of this file.
1 /*
2  *
3  * ##### ##### ###### ###### ### ###
4  * ## ## ## ## ## ## ## ### ##
5  * ## ## ## ## #### #### ## # ##
6  * ## ## ## ## ## ## ## ##
7  * ## ## ## ## ## ## ## ##
8  * ##### ##### ## ###### ## ##
9  *
10  *
11  * OOFEM : Object Oriented Finite Element Code
12  *
13  * Copyright (C) 1993 - 2013 Borek Patzak
14  *
15  *
16  *
17  * Czech Technical University, Faculty of Civil Engineering,
18  * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19  *
20  * This library is free software; you can redistribute it and/or
21  * modify it under the terms of the GNU Lesser General Public
22  * License as published by the Free Software Foundation; either
23  * version 2.1 of the License, or (at your option) any later version.
24  *
25  * This program is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28  * Lesser General Public License for more details.
29  *
30  * You should have received a copy of the GNU Lesser General Public
31  * License along with this library; if not, write to the Free Software
32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33  */
34 
35 #include "parmetisloadbalancer.h"
36 #include "domain.h"
37 #include "engngm.h"
38 #include "element.h"
39 #include "dofmanager.h"
40 #include "connectivitytable.h"
41 #include "error.h"
42 #include "parallel.h"
43 #include "processcomm.h"
44 #include "communicator.h"
45 #include "classfactory.h"
46 
47 #include <set>
48 #include <stdlib.h>
49 
50 namespace oofem {
51 //#define ParmetisLoadBalancer_DEBUG_PRINT
52 
53 REGISTER_LoadBalancer(ParmetisLoadBalancer);
54 
56 {
57  elmdist = NULL;
58  tpwgts = NULL;
59 }
60 
62 {
63  if ( elmdist ) {
64  delete[] elmdist;
65  }
66 
67  if ( tpwgts ) {
68  delete[] tpwgts;
69  }
70 }
71 
72 
73 void
75 {
76  idx_t *eind, *eptr, *xadj, *adjncy, *vwgt, *vsize;
77  idx_t *part;
78  int i, nlocalelems, eind_size, nelem = domain->giveNumberOfElements();
79  int ndofman, idofman, numflag, ncommonnodes, options [ 4 ], ie, nproc;
80  int edgecut, wgtflag, ncon;
81  real_t ubvec [ 1 ], itr;
82  Element *ielem;
83  MPI_Comm communicator = MPI_COMM_WORLD;
85 
87  // init parmetis element numbering
89  // prepare data structures for ParMETIS_V3_Mesh2Dual
90  // count the size of eind array
91  eind_size = 0;
92  nlocalelems = 0;
93  for ( i = 1; i <= nelem; i++ ) {
94  ielem = domain->giveElement(i);
95  if ( ielem->giveParallelMode() == Element_local ) {
96  nlocalelems++;
97  eind_size += ielem->giveNumberOfDofManagers();
98  }
99  }
100 
101  // allocate eind and eptr arrays
102  eind = new idx_t [ eind_size ];
103  eptr = new idx_t [ nlocalelems + 1 ];
104  if ( ( eind == NULL ) || ( eptr == NULL ) ) {
105  OOFEM_ERROR("failed to allocate eind and eptr arrays");
106  }
107 
108  // fill in the eind and eptr (mesh graph)
109  int eind_pos = 0, eptr_pos = 0;
110  for ( i = 1; i <= nelem; i++ ) {
111  ielem = domain->giveElement(i);
112  if ( ielem->giveParallelMode() == Element_local ) {
113  eptr [ eptr_pos ] = eind_pos;
114  ndofman = ielem->giveNumberOfDofManagers();
115  for ( idofman = 1; idofman <= ndofman; idofman++ ) {
116  eind [ eind_pos++ ] = ielem->giveDofManager(idofman)->giveGlobalNumber() - 1;
117  }
118 
119  eptr_pos++;
120  }
121  }
122 
123  // last rec
124  eptr [ nlocalelems ] = eind_pos;
125 
126  // call ParMETIS_V3_Mesh2Dual to construct dual graph (in parallel)
127  // dual graph: elements are vertices; element edges are graph edges
128  // this is necessary, since cut runs through graph edges
129  numflag = 0;
130  ncommonnodes = 2;
131  ParMETIS_V3_Mesh2Dual(elmdist, eptr, eind, & numflag, & ncommonnodes, & xadj, & adjncy, & communicator);
132 
133  #ifdef ParmetisLoadBalancer_DEBUG_PRINT
134  int myrank = domain->giveEngngModel()->giveRank();
135  // DEBUG PRINT
136  fprintf(stderr, "[%d] xadj:", myrank);
137  for ( i = 0; i <= nlocalelems; i++ ) {
138  fprintf(stderr, " %d", xadj [ i ]);
139  }
140 
141  fprintf(stderr, "\n[%d] adjncy:", myrank);
142  for ( i = 0; i < xadj [ nlocalelems ]; i++ ) {
143  fprintf(stderr, " %d", adjncy [ i ]);
144  }
145 
146  fprintf(stderr, "\n");
147  #endif
148 
149 
150  // setup imbalance tolerance for each vertex weight - ubvec param
151  ubvec [ 0 ] = 1.05;
152  // setup options array
153  options [ 0 ] = 1; // set to zero for default
154  options [ 1 ] = 1; // get timings
155  options [ 2 ] = 15; // random seed
156  options [ 3 ] = 1; // sub-domains and processors are coupled
157  // set ratio of inter-proc communication compared to data redistribution time
158  itr = 1000.0;
159  // set partition weights by quering load balance monitor
160  const FloatArray &_procweights = lbm->giveProcessorWeights();
161  if ( tpwgts == NULL ) {
162  if ( ( tpwgts = new real_t [ nproc ] ) == NULL ) {
163  OOFEM_ERROR("failed to allocate tpwgts");
164  }
165  }
166 
167  for ( i = 0; i < nproc; i++ ) {
168  tpwgts [ i ] = _procweights(i);
169  }
170 
171  /*
172  * // log processor weights
173  * OOFEM_LOG_RELEVANT ("[%d] ParmetisLoadBalancer: proc weights: ", myrank);
174  * for (i=0; i<nproc; i++) OOFEM_LOG_RELEVANT ("%4.3f ",tpwgts[i]);
175  * OOFEM_LOG_RELEVANT ("\n");
176  */
177 
178  // obtain vertices weights (element weights) representing relative computational cost
179  if ( ( vwgt = new idx_t [ nlocalelems ] ) == NULL ) {
180  OOFEM_ERROR("failed to allocate vwgt");
181  }
182 
183  if ( ( vsize = new idx_t [ nlocalelems ] ) == NULL ) {
184  OOFEM_ERROR("failed to allocate vsize");
185  }
186 
187  for ( ie = 0, i = 0; i < nelem; i++ ) {
188  ielem = domain->giveElement(i + 1);
189  if ( ielem->giveParallelMode() == Element_local ) {
190  vwgt [ ie ] = ( int ) ( ielem->predictRelativeComputationalCost() * 100.0 );
191  vsize [ ie++ ] = 1; //ielem->predictRelativeRedistributionCost();
192  }
193  }
194 
195  wgtflag = 2;
196  numflag = 0;
197  ncon = 1;
198  if ( ( part = new idx_t [ nlocalelems ] ) == NULL ) {
199  OOFEM_ERROR("failed to allocate part");
200  }
201 
202  // call ParMETIS balancing routineParMETIS_V3_AdaptiveRepart
203  ParMETIS_V3_AdaptiveRepart(elmdist, xadj, adjncy, vwgt, vsize, NULL, & wgtflag, & numflag, & ncon, & nproc,
204  tpwgts, ubvec, & itr, options, & edgecut, part, & communicator);
205 
206  // part contains partition vector for local elements on receiver
207  // we need to map it to domain elements (this is not the same, since
208  // domain may contain not only its local elements but remote elements as well)
209  int loc_num = 0;
210  this->elementPart.resize(nelem);
211  for ( i = 1; i <= nelem; i++ ) {
212  ielem = domain->giveElement(i);
213  if ( ielem->giveParallelMode() == Element_local ) {
214  this->elementPart.at(i) = part [ loc_num++ ];
215  } else {
216  // we can not say anything about remote elements; this information is available on partition
217  // that has its local counterpart
218  this->elementPart.at(i) = -1;
219  }
220  }
221 
222  if ( part ) {
223  delete[] part;
224  }
225 
226  #ifdef ParmetisLoadBalancer_DEBUG_PRINT
227  // debug
228  fprintf(stderr, "[%d] edgecut: %d elementPart:", myrank, edgecut);
229  for ( i = 1; i <= nelem; i++ ) {
230  fprintf( stderr, " %d", elementPart.at(i) );
231  }
232 
233  fprintf(stderr, "\n");
234  #endif
235 
236  // delete allocated xadj, adjncy arrays by ParMETIS
237  delete[] eind;
238  delete[] eptr;
239  delete[] vwgt;
240  delete[] vsize;
241  free(xadj);
242  free(adjncy);
243 
244  this->labelDofManagers();
245 }
246 
247 void
249 {
250  int nproc = domain->giveEngngModel()->giveNumberOfProcesses();
251  int myrank = domain->giveEngngModel()->giveRank();
252  IntArray procElementCounts(nproc);
253 
254  //if (procElementCounts) delete procElementCounts;
255  if ( elmdist == NULL ) {
256  elmdist = new idx_t [ nproc + 1 ];
257  if ( elmdist == NULL ) {
258  OOFEM_ERROR("failed to allocate elmdist array");
259  }
260  }
261 
262  // determine number of local elements for the receiver
263  int i, nlocelem = 0, nelem = domain->giveNumberOfElements();
264  int globnum;
265 
266  for ( i = 1; i <= nelem; i++ ) {
268  nlocelem++;
269  }
270  }
271 
272  procElementCounts(myrank) = nlocelem;
273 
274  MPI_Allgather(& nlocelem, 1, MPI_INT, procElementCounts.givePointer(), 1, MPI_INT, MPI_COMM_WORLD);
275  elmdist [ 0 ] = 0;
276  for ( i = 0; i < nproc; i++ ) {
277  elmdist [ i + 1 ] = elmdist [ i ] + procElementCounts(i);
278  }
279 
280  // we need to number elements sequentially on each partition (and we start from rank 0)
281  // compute local offset
282  myGlobNumOffset = 0;
283  for ( i = 0; i < myrank; i++ ) {
284  myGlobNumOffset += procElementCounts(i);
285  }
286 
287  /* assemble maps of local numbering
288  * map is necessary since we may have remote elements that are not
289  * part of local domain for load balancing purposes
290  */
291  globnum = myGlobNumOffset + 1;
292  lToGMap.resize(nelem);
293  gToLMap.resize(nelem);
294  for ( i = 1; i <= nelem; i++ ) {
296  lToGMap.at(i) = globnum;
297  gToLMap.at(globnum - myGlobNumOffset) = i;
298  globnum++;
299  } else {
300  lToGMap.at(i) = 0;
301  }
302  }
303 }
304 
305 void
307 {
308  int idofman, ndofman = domain->giveNumberOfDofManagers();
310  const IntArray *dofmanconntable;
311  DofManager *dofman;
312  Element *ielem;
314  std :: set< int, std :: less< int > >__dmanpartitions;
315  int myrank = domain->giveEngngModel()->giveRank();
316  int nproc = domain->giveEngngModel()->giveNumberOfProcesses();
317  int ie, npart;
318 
319  // resize label array
320  dofManState.resize(ndofman);
321  dofManState.zero();
322  // resize dof man partitions
323  dofManPartitions.clear();
324  dofManPartitions.resize(ndofman);
325 
326  #ifdef ParmetisLoadBalancer_DEBUG_PRINT
327  int _cols = 0;
328  fprintf(stderr, "[%d] DofManager labels:\n", myrank);
329  #endif
330 
331  // loop over local dof managers
332  for ( idofman = 1; idofman <= ndofman; idofman++ ) {
333  dofman = domain->giveDofManager(idofman);
334  dmode = dofman->giveParallelMode();
335  if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
336  dofmanconntable = ct->giveDofManConnectivityArray(idofman);
337  __dmanpartitions.clear();
338  for ( ie = 1; ie <= dofmanconntable->giveSize(); ie++ ) {
339  ielem = domain->giveElement( dofmanconntable->at(ie) );
340  // assemble list of partitions sharing idofman dofmanager
341  // set is used to include possibly repeated partition only once
342  if ( ielem->giveParallelMode() == Element_local ) {
343  __dmanpartitions.insert( giveElementPartition( dofmanconntable->at(ie) ) );
344  }
345  }
346 
347  npart = __dmanpartitions.size();
348  dofManPartitions [ idofman - 1 ].resize( __dmanpartitions.size() );
349  int i = 1;
350  for ( auto &dm: __dmanpartitions ) {
351  dofManPartitions [ idofman - 1 ].at(i++) = dm;
352  }
353  }
354  }
355 
356  // handle master slave links between dofmans (master and slave required on same partition)
358 
359 
360  /* Exchange new partitions for shared nodes */
361  CommunicatorBuff cb(nproc, CBT_dynamic);
362  Communicator com(domain->giveEngngModel(), &cb, myrank, nproc, CommMode_Dynamic);
364  com.initExchange(SHARED_DOFMAN_PARTITIONS_TAG);
365  com.unpackAllData(this, & ParmetisLoadBalancer :: unpackSharedDmanPartitions);
366  com.finishExchange();
367 
368  /* label dof managers */
369  for ( idofman = 1; idofman <= ndofman; idofman++ ) {
370  dofman = domain->giveDofManager(idofman);
371  dmode = dofman->giveParallelMode();
372  npart = dofManPartitions [ idofman - 1 ].giveSize();
373  if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
374  // determine its state after balancing -> label
375  dofManState.at(idofman) = this->determineDofManState(idofman, myrank, npart, & dofManPartitions [ idofman - 1 ]);
376  } else {
377  dofManState.at(idofman) = DM_NULL;
378  }
379  }
380 
381 
382  #ifdef ParmetisLoadBalancer_DEBUG_PRINT
383  for ( idofman = 1; idofman <= ndofman; idofman++ ) {
384  fprintf(stderr, " | %d: ", idofman);
385  if ( dofManState.at(idofman) == DM_NULL ) {
386  fprintf(stderr, "NULL ");
387  } else if ( dofManState.at(idofman) == DM_Local ) {
388  fprintf(stderr, "Local ");
389  } else if ( dofManState.at(idofman) == DM_Shared ) {
390  fprintf(stderr, "Shared");
391  } else if ( dofManState.at(idofman) == DM_Remote ) {
392  fprintf(stderr, "Remote");
393  } else {
394  fprintf(stderr, "Unknown");
395  }
396 
397  //else if (dofManState.at(idofman) == DM_SharedExclude)fprintf (stderr, "ShdExc");
398  //else if (dofManState.at(idofman) == DM_SharedNew) fprintf (stderr, "ShdNew");
399  //else if (dofManState.at(idofman) == DM_SharedUpdate) fprintf (stderr, "ShdUpd");
400 
401  if ( ( ( ++_cols % 4 ) == 0 ) || ( idofman == ndofman ) ) {
402  fprintf(stderr, "\n");
403  }
404  }
405 
406  #endif
407 }
408 
409 int
411 {
413  int answer = DM_Local;
414 
415  if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
416  if ( ( npart == 1 ) && ( dofManPartitions->at(1) == myrank ) ) {
417  // local remains local
418  answer = DM_Local;
419  } else if ( npart == 1 ) {
420  // local goes to remote partition
421  answer = DM_Remote;
422  } else { // npart > 1
423  // local becomes newly shared
424  answer = DM_Shared;
425  }
426  } else {
427  answer = DM_NULL;
428  }
429 
430  /*
431  * if (dmode == DofManager_local) {
432  * if ((npart == 1) && (dofManPartitions->at(1) == myrank)) {
433  * // local remains local
434  * answer = DM_Local;
435  * } else if (npart == 1) {
436  * // local goes to remote partition
437  * answer = DM_Remote;
438  * } else { // npart > 1
439  * // local becomes newly shared
440  * answer = DM_SharedNew;
441  * }
442  * } else if (dmode == DofManager_shared) {
443  * // compare old and new partition list
444  * int i, _same = true, containsMyRank = dofManPartitions->findFirstIndexOf (myrank);
445  * const IntArray* oldpart = domain->giveDofManager(idofman)->givePartitionList();
446  * for (i=1; i<=dofManPartitions->giveSize(); i++) {
447  * if ((dofManPartitions->at(i)!= myrank) &&
448  * (!oldpart->findFirstIndexOf(dofManPartitions->at(i)))) {
449  * _same=false; break;
450  * }
451  * }
452  * if (_same && containsMyRank) {
453  * answer = DM_Shared;
454  * } else if (containsMyRank) {
455  * answer = DM_SharedUpdate;
456  * } else { // !containsMyRank
457  * answer = DM_SharedExclude;
458  * }
459  * } else {
460  * answer = DM_NULL;
461  * }
462  */
463  return answer;
464 }
465 
466 
469 {
470  return ( LoadBalancer :: DofManMode ) dofManState.at(idofman);
471 }
472 
473 
474 IntArray *
476 {
477  return & dofManPartitions [ idofman - 1 ];
478 }
479 
480 int
482 {
483  return elementPart.at(ielem);
484 }
485 
486 int
488 {
489  int myrank = domain->giveEngngModel()->giveRank();
490  int iproc = pc.giveRank();
491  int ndofman, idofman;
492  DofManager *dofman;
493 
494  if ( iproc == myrank ) {
495  return 1; // skip local partition
496  }
497 
498  // query process communicator to use
500  // loop over dofManagers and pack shared dofMan data
501  ndofman = domain->giveNumberOfDofManagers();
502  for ( idofman = 1; idofman <= ndofman; idofman++ ) {
503  dofman = domain->giveDofManager(idofman);
504  // test if iproc is in list of existing shared partitions
505  if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
506  ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
507  // send new partitions to remote representation
508  // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
509  pcbuff->write( dofman->giveGlobalNumber() );
510  this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
511  }
512  }
513 
514  pcbuff->write((int)PARMETISLB_END_DATA);
515  return 1;
516 }
517 
518 int
520 {
521  int myrank = domain->giveEngngModel()->giveRank();
522  int iproc = pc.giveRank();
523  int _globnum, _locnum;
524  IntArray _partitions;
525 
526  if ( iproc == myrank ) {
527  return 1; // skip local partition
528  }
529 
530  // query process communicator to use
532  // init domain global2local map
534 
535  pcbuff->read(_globnum);
536  // unpack dofman data
537  while ( _globnum != PARMETISLB_END_DATA ) {
538  _partitions.restoreYourself(*pcbuff);
539  if ( ( _locnum = domain->dofmanGlobal2Local(_globnum) ) ) {
540  this->addSharedDofmanPartitions(_locnum, _partitions);
541  } else {
542  OOFEM_ERROR("internal error, unknown global dofman %d", _globnum);
543  }
544 
545  /*
546  * fprintf (stderr,"[%d] Received shared plist of %d ", myrank, _globnum);
547  * for (int _i=1; _i<=dofManPartitions[_locnum-1].giveSize(); _i++)
548  * fprintf (stderr,"%d ", dofManPartitions[_locnum-1].at(_i));
549  * fprintf (stderr,"\n");
550  */
551  pcbuff->read(_globnum);
552  }
553 
554  return 1;
555 }
556 
557 
559 {
560  for ( int part: _partitions ) {
561  dofManPartitions [ _locnum - 1 ].insertOnce( part );
562  }
563 }
564 
566 {
567  int idofman, ndofman = domain->giveNumberOfDofManagers();
568  DofManager *dofman;
569  //int myrank = domain->giveEngngModel()->giveRank();
570  int __i, __j, __partition, _master;
571  bool isSlave;
572  IntArray slaveMastersDofMans;
573 
574  /*
575  * We assume that in the old partitioning, the master and slave consistency was assured. This means that master is presented
576  * on the same partition as slave. The master can be local (then all slaves are local) or master is shared (then slaves are on
577  * partitions sharing the master).
578  *
579  * If master was local, then its new partitioning can be locally resolved (as all slaves were local).
580  * If the master was shared, the new partitioning of master has to be communicated between old sharing partitions.
581  */
582  // handle master slave links between dofmans (master and slave required on same partition)
583 
584  for ( idofman = 1; idofman <= ndofman; idofman++ ) {
585  dofman = domain->giveDofManager(idofman);
586  isSlave = dofman->hasAnySlaveDofs();
587 
588  if ( isSlave ) {
589  // ok have a look on its masters
590  dofman->giveMasterDofMans(slaveMastersDofMans);
591  for ( __i = 1; __i <= slaveMastersDofMans.giveSize(); __i++ ) {
592  // loop over all slave masters
593  _master = slaveMastersDofMans.at(__i);
594 
595  // now loop over all slave new partitions and annd then to master's partitions
596  for ( __j = 1; __j <= dofManPartitions [ idofman - 1 ].giveSize(); __j++ ) {
597  __partition = dofManPartitions [ idofman - 1 ].at(__j);
598  // add slave partition to master
599  dofManPartitions [ _master - 1 ].insertOnce(__partition);
600  }
601  }
602  }
603  }
604 }
605 
606 } // end namespace oofem
int unpackSharedDmanPartitions(ProcessCommunicator &pc)
contextIOResultType storeYourself(DataStream &stream) const
Stores array to output stream.
Definition: intarray.C:289
int determineDofManState(int idofman, int myrank, int npart, IntArray *dofManPartitions)
DofManMode
Describes the state of dofmanager after load balancing on the local partition.
Definition: loadbalancer.h:115
Class and object Domain.
Definition: domain.h:115
virtual IntArray * giveDofManPartitions(int idofman)
Returns the partition list of given dofmanager after load balancing.
int giveGlobalNumber() const
Definition: dofmanager.h:501
virtual bool giveMasterDofMans(IntArray &masters)
Returns true if the receiver is linked (its slave DOFs depend on master values) to some other dof man...
Definition: dofmanager.C:842
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition: domain.h:432
int packAllData(T *ptr, int(T::*packFunc)(ProcessCommunicator &))
Pack all problemCommunicators data to their send buffers.
Definition: communicator.h:223
real_t * tpwgts
Partition weights (user input).
void zero()
Sets all component to zero.
Definition: intarray.C:52
Shared dofman that remains shared.
Definition: loadbalancer.h:118
void initGlobalDofManMap(bool forceinit=false)
Initializes global dof man map according to domain dofman list.
Definition: domain.C:1735
ConnectivityTable * giveConnectivityTable()
Returns receiver&#39;s associated connectivity table.
Definition: domain.C:1170
const IntArray * givePartitionList()
Returns partition list of receiver.
Definition: dofmanager.h:519
int packSharedDmanPartitions(ProcessCommunicator &pc)
EngngModel * giveEngngModel()
Returns engineering model to which receiver is associated.
Definition: domain.C:433
std::vector< IntArray > dofManPartitions
Array of dof man partitions.
Undefined (undetermined) state, if assigned means internal error.
Definition: loadbalancer.h:116
Abstract base class for all finite elements.
Definition: element.h:145
virtual DofManMode giveDofManState(int idofman)
Returns the label of dofmanager after load balancing.
Base class for dof managers.
Definition: dofmanager.h:113
int giveNumberOfProcesses() const
Returns the number of collaborating processes.
Definition: engngm.h:1060
const int * givePointer() const
Breaks encapsulation.
Definition: intarray.h:336
int giveNumberOfElements() const
Returns number of elements in domain.
Definition: domain.h:434
The ProcessCommunicator and corresponding buffers (represented by this class) are separated in order ...
Definition: processcomm.h:64
int giveRank()
Returns corresponding rank of associated partition.
Definition: processcomm.h:207
virtual bool hasAnySlaveDofs()
Definition: dofmanager.C:830
Class implementing an array of integers.
Definition: intarray.h:61
int & at(int i)
Coefficient access function.
Definition: intarray.h:103
void addSharedDofmanPartitions(int _locnum, IntArray _partitions)
virtual int giveNumberOfDofManagers() const
Definition: element.h:656
dofManagerParallelMode
In parallel mode, this type indicates the mode of DofManager.
Definition: dofmanager.h:80
Local dofman that became remote (became local on remote partition).
Definition: loadbalancer.h:119
IntArray elementPart
Partition vector of the locally-stored elements.
const FloatArray & giveProcessorWeights()
Returns processor weights; the larger weight means more powerful node, sum of weights should equal to...
Definition: loadbalancer.h:88
Element * giveElement(int n)
Service for accessing particular domain fe element.
Definition: domain.C:160
virtual int write(const int *data, int count)
Writes count integer values from array pointed by data.
Definition: processcomm.h:83
#define OOFEM_ERROR(...)
Definition: error.h:61
void clear()
Clears the array (zero size).
Definition: intarray.h:177
Local dofman that remains local.
Definition: loadbalancer.h:117
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
Definition: processcomm.h:210
IntArray dofManState
Array of DofManMode(s).
#define SHARED_DOFMAN_PARTITIONS_TAG
Class representing connectivity table.
Class representing process communicator for engineering model.
Definition: processcomm.h:176
#define PARMETISLB_END_DATA
End-of-data marker, used to identify end of data stream received.
void resize(int n)
Checks size of receiver towards requested bounds.
Definition: intarray.C:124
REGISTER_LoadBalancer(ParmetisLoadBalancer)
contextIOResultType restoreYourself(DataStream &stream)
Restores array from image on stream.
Definition: intarray.C:305
IntArray gToLMap
Element numbering maps.
Abstract base class representing general load balancer.
Definition: loadbalancer.h:108
(Dynamic) In this case the communication pattern and the amount of data sent between nodes is not kno...
Class representing vector of real numbers.
Definition: floatarray.h:82
virtual int giveElementPartition(int ielem)
Returns the new partition number assigned to local element after LB.
elementParallelMode giveParallelMode() const
Return elementParallelMode of receiver.
Definition: element.h:1069
int dofmanGlobal2Local(int _globnum)
Definition: domain.C:1864
Element is local, there are no contributions from other domains to this element.
Definition: element.h:101
const IntArray * giveDofManConnectivityArray(int dofman)
void labelDofManagers()
Label local partition nodes (the nodes that are local or shared).
Class representing communicator.
Definition: communicator.h:105
virtual double predictRelativeComputationalCost()
Returns the weight representing relative computational cost of receiver The reference element is tria...
Definition: element.C:1590
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1)
Definition: engngm.h:1058
virtual int read(int *data, int count)
Reads count integer values into array pointed by data.
Definition: processcomm.h:91
The Communicator and corresponding buffers (represented by this class) are separated in order to allo...
Definition: communicator.h:60
int giveSize() const
Definition: intarray.h:203
the oofem namespace is to define a context or scope in which all oofem names are defined.
DofManager * giveDofManager(int i) const
Definition: element.C:514
DofManager * giveDofManager(int n)
Service for accessing particular domain dof manager.
Definition: domain.C:314
DofManager is local, there are no contribution from other domains to this DofManager.
Definition: dofmanager.h:81
virtual LoadBalancerMonitor * giveLoadBalancerMonitor()
Returns reference to receiver&#39;s load balancer monitor.
Definition: engngm.h:1112
DofManager is shared by neighboring partitions, it is necessary to sum contributions from all contrib...
Definition: dofmanager.h:82
dofManagerParallelMode giveParallelMode() const
Return dofManagerParallelMode of receiver.
Definition: dofmanager.h:512
int findFirstIndexOf(int value) const
Finds index of first occurrence of given value in array.
Definition: intarray.C:331
Abstract base class representing general load balancer monitor.
Definition: loadbalancer.h:68

This page is part of the OOFEM documentation. Copyright (c) 2011 Borek Patzak
Project e-mail: info@oofem.org
Generated at Tue Jan 2 2018 20:07:30 for OOFEM by doxygen 1.8.11 written by Dimitri van Heesch, © 1997-2011