35 #include "../sm/FETISolver/feticommunicator.h" 81 if ( dof->giveEquationNumber(dn) != 0 ) {
103 if ( fabs(
limit) < 1.e-20 ) {
107 if (
err < 1.e-20 ) {
121 int boundaryDofManNum = 0;
128 for (
int i = 1; i <= nnodes; i++ ) {
136 commBuff.
write(boundaryDofManNum);
138 #ifdef __VERBOSE_PARALLEL 139 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Sending data to partition 0 (send %d)\n",
141 "FETISolver :: setUpCommunicationMaps : send number of boundary dofMans", boundaryDofManNum);
145 MPI_Barrier(MPI_COMM_WORLD);
148 #ifdef __VERBOSE_PARALLEL 150 this->
giveEngngModel()->giveRank(),
"FETISolver :: setUpCommunicationMaps");
156 commMap.
resize(boundaryDofManNum);
165 for (
int i = 1; i <= nnodes; i++ ) {
168 commMap.
at(indx++) = i;
172 for (
int j = 1; j <= locNum.
giveSize(); j++ ) {
173 if ( locNum.
at(j) ) {
183 for (
int i = 1; i <= nnodes; i++ ) {
186 commMap.
at(indx++) = i;
195 #ifdef __VERBOSE_PARALLEL 197 this->
giveEngngModel()->giveRank(),
"FETISolver :: setUpCommunicationMaps");
199 MPI_Barrier(MPI_COMM_WORLD);
200 #ifdef __VERBOSE_PARALLEL 202 this->
giveEngngModel()->giveRank(),
"FETISolver :: setUpCommunicationMaps");
204 MPI_Barrier(MPI_COMM_WORLD);
260 for ( i = 1; i <= size; i++ ) {
263 for ( j = 1; j <= ndofs; j++ ) {
264 if ( ( eqNum = locationArray.
at(j) ) ) {
265 for ( ir = 1; ir <=
nse; ir++ ) {
266 result &= send_buff->
write(
rbm.
at(eqNum, ir) );
270 result &= send_buff->
write(0.0);
284 int size, receivedRank;
285 int nshared, part, eqNum;
291 receivedRank = processComm.
giveRank();
293 if ( receivedRank != 0 ) {
295 for (
int i = 1; i <= size; i++ ) {
296 to = toRecvMap->
at(i);
300 for (
int irbm = 1; irbm <=
nsem.
at(receivedRank + 1); irbm++ ) {
302 result &= recv_buff->
read(value);
308 for (
int j = 1; j <= nshared; j++ ) {
310 if ( part == processComm.
giveRank() ) {
315 l.
at(eqNum,
rbmAddr.
at(receivedRank + 1) + irbm - 1) = value;
319 l.
at(eqNum,
rbmAddr.
at(receivedRank + 1) + irbm - 1) = ( -1.0 ) * value;
332 int to, from, receivedRank = 0, nshared, part, eqNum, result;
340 for (
int irbm = 1; irbm <=
nsem.
at(1); irbm++ ) {
341 for (
int i = 1; i <= size; i++ ) {
358 while ( locationArray.
at(locpos) == 0 ) {
361 if ( locpos > locationArray.
giveSize() ) {
366 value =
rbm.
at(locationArray.
at(locpos), irbm);
372 for (
int j = 1; j <= nshared; j++ ) {
379 l.
at(eqNum,
rbmAddr.
at(receivedRank + 1) + irbm - 1) = value;
383 l.
at(eqNum,
rbmAddr.
at(receivedRank + 1) + irbm - 1) = ( -1.0 ) * value;
401 for (
int i = 1; i <=
nse; i++ ) {
419 receivedRank = processComm.
giveRank();
421 if ( receivedRank != 0 ) {
422 for (
int i = 1; i <=
nsem.
at(receivedRank + 1); i++ ) {
436 for (
int i = 1; i <=
nsem.
at(1); i++ ) {
452 int ndofs, eqNum, nshared, part, from;
460 for (
int i = 1; i <= size; i++ ) {
461 from = toSendMap->
at(i);
466 for (
int k = 1; k <= ndofs; k++ ) {
468 for (
int j = 1; j <= nshared; j++ ) {
470 if ( part == processComm.
giveRank() ) {
478 result &= send_buff->
write(val);
482 for (
int j = 1; j <= ndofs; j++ ) {
483 if ( ( eqNum = locationArray.
at(j) ) ) {
484 result &= send_buff->
write( ( -1.0 ) *
w.
at(eqNum) );
508 for (
int i = 1; i <= size; i++ ) {
509 to = toRecvMap->
at(i);
512 for (
int j = 1; j <= ndofs; j++ ) {
513 if ( ( eqNum = locationArray.
at(j) ) ) {
514 result &= recv_buff->
read(value);
515 dd.
at(eqNum) = value;
517 #ifdef __VERBOSE_PARALLEL 518 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Unpacking solution value %f at %d\n",
519 this->
giveEngngModel()->giveRank(),
"FETISolver :: solveYourselfAt", value, eqNum);
531 int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
537 for (
int i = 1; i <= size; i++ ) {
549 while ( locationArray.
at(locpos) == 0 ) {
552 if ( locpos > locationArray.
giveSize() ) {
563 for (
int j = 1; j <= nshared; j++ ) {
570 value +=
w.
at(eqNum);
574 value = ( -1.0 ) *
w.
at(eqNum);
578 dd.
at( locationArray.
at(locpos) ) = value;
602 for (
int i = 1; i <= size; i++ ) {
605 for (
int j = 1; j <= ndofs; j++ ) {
606 if ( ( eqNum = locationArray.
at(j) ) ) {
607 result &= send_buff->
write(
pp.
at(eqNum) );
622 int size, receivedRank, to;
623 int nshared, part, eqNum;
629 receivedRank = processComm.
giveRank();
632 if ( receivedRank != 0 ) {
633 for (
int i = 1; i <= size; i++ ) {
634 to = toRecvMap->
at(i);
639 result &= recv_buff->
read(value);
645 for (
int j = 1; j <= nshared; j++ ) {
647 if ( part == processComm.
giveRank() ) {
652 g.
at(eqNum) += value;
656 g.
at(eqNum) += ( -1.0 ) * value;
668 int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
674 for (
int i = 1; i <= size; i++ ) {
691 while ( locationArray.
at(locpos) == 0 ) {
694 if ( locpos > locationArray.
giveSize() ) {
699 value =
pp.
at( locationArray.
at(locpos) );
705 for (
int j = 1; j <= nshared; j++ ) {
712 g.
at(eqNum) += value;
716 g.
at(eqNum) += ( -1.0 ) * value;
735 int ndofs, eqNum, nshared, part, from;
743 for (
int i = 1; i <= size; i++ ) {
744 from = toSendMap->
at(i);
750 for (
int k = 1; k <= ndofs; k++ ) {
752 for (
int j = 1; j <= nshared; j++ ) {
754 if ( part == processComm.
giveRank() ) {
762 result &= send_buff->
write(val);
766 for (
int j = 1; j <= ndofs; j++ ) {
767 if ( ( eqNum = locationArray.
at(j) ) ) {
768 result &= send_buff->
write( ( -1.0 ) *
d.
at(eqNum) );
793 for (
int i = 1; i <= size; i++ ) {
796 for (
int j = 1; j <= ndofs; j++ ) {
797 if ( ( eqNum = locationArray.
at(j) ) ) {
798 result &= recv_buff->
read(
dd.
at(eqNum) );
810 int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
816 for (
int i = 1; i <= size; i++ ) {
828 while ( locationArray.
at(locpos) == 0 ) {
831 if ( locpos > locationArray.
giveSize() ) {
842 for (
int j = 1; j <= nshared; j++ ) {
849 value +=
d.
at(eqNum);
853 value = ( -1.0 ) *
d.
at(eqNum);
857 dd.
at( locationArray.
at(locpos) ) = value;
880 for (
int i = 1; i <= size; i++ ) {
883 for (
int j = 1; j <= ndofs; j++ ) {
884 if ( ( eqNum = locationArray.
at(j) ) ) {
885 result &= send_buff->
write(
pp.
at(eqNum) );
900 int size, receivedRank, to;
901 int nshared, part, eqNum;
907 receivedRank = processComm.
giveRank();
910 if ( receivedRank != 0 ) {
911 for (
int i = 1; i <= size; i++ ) {
912 to = toRecvMap->
at(i);
917 result &= recv_buff->
read(value);
923 for (
int j = 1; j <= nshared; j++ ) {
925 if ( part == processComm.
giveRank() ) {
930 p.
at(eqNum) += value;
934 p.
at(eqNum) += ( -1.0 ) * value;
946 int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
952 for (
int i = 1; i <= size; i++ ) {
969 while ( locationArray.
at(locpos) == 0 ) {
972 if ( locpos > locationArray.
giveSize() ) {
977 value =
pp.
at( locationArray.
at(locpos) );
983 for (
int j = 1; j <= nshared; j++ ) {
990 p.
at(eqNum) += value;
994 p.
at(eqNum) += ( -1.0 ) * value;
1013 for (
int irbm = 1; irbm <=
nsem.
at(rank + 1); irbm++ ) {
1029 for (
int irbm = 1; irbm <=
nse; irbm++ ) {
1043 for (
int irbm = 1; irbm <=
nse; irbm++ ) {
1056 int masterLoopStatus;
1057 double nom = 0.0, denom, alpha, beta, energyNorm = 0.0;
1061 if ( !partitionStiffness ) {
1082 MPI_Barrier(MPI_COMM_WORLD);
1084 #ifdef __VERBOSE_PARALLEL 1086 this->
giveEngngModel()->giveRank(),
"FETISolver :: solveYourselfAt");
1099 #ifdef __VERBOSE_PARALLEL 1100 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: rbmodes computation startup, lneq is %d\n",
1101 rank,
"FETISolver :: solveYourselfAt", neq);
1106 #ifdef __VERBOSE_PARALLEL 1107 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Number of Rigid body modes %3d\n",
1108 rank,
"FETISolver :: solveYourselfAt",
nse);
1138 for (
int i = 1; i < size; i++ ) {
1145 #ifdef __VERBOSE_PARALLEL 1146 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Received data from partition %3d\n",
1147 rank,
"FETICommunicator :: setUpCommunicationMaps : received number of partition rbm", source);
1151 tnse +=
nsem.
at(source + 1);
1157 OOFEM_LOG_INFO(
"Number of RBM per partion\npart. rbm\n-------------------------------\n");
1158 for (
int i = 1; i <= size; i++ ) {
1167 for (
int i = 2; i <= size; i++ ) {
1171 #ifdef __VERBOSE_PARALLEL 1172 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Sending number of Rigid body modes %3d\n",
1173 rank,
"FETISolver :: solveYourselfAt",
nse);
1181 MPI_Barrier(MPI_COMM_WORLD);
1205 MPI_Barrier(MPI_COMM_WORLD);
1206 #ifdef __VERBOSE_PARALLEL 1207 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: RBMMessage Barrier finished",
1208 rank,
"FETISolver :: solveYourselfAt");
1226 MPI_Barrier(MPI_COMM_WORLD);
1235 if ( ( rank == 0 ) && ( tnse != 0 ) ) {
1290 MPI_Barrier(MPI_COMM_WORLD);
1292 #ifdef __VERBOSE_PARALLEL 1293 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Solution Approx Barrier finished\n",
1294 rank,
"FETISolver :: solveYourselfAt");
1308 #ifdef __VERBOSE_PARALLEL 1309 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Residual contribution packing initiated\n",
1310 rank,
"FETISolver :: solveYourselfAt");
1313 #ifdef __VERBOSE_PARALLEL 1314 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Residual contribution send initiated\n",
1315 rank,
"FETISolver :: solveYourselfAt");
1319 MPI_Barrier(MPI_COMM_WORLD);
1320 #ifdef __VERBOSE_PARALLEL 1321 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Residual contribution Barrier finished\n",
1322 rank,
"FETISolver :: solveYourselfAt");
1346 OOFEM_LOG_DEBUG(
"iteration gradient vector norm energy norm\n=====================================================================\n");
1348 OOFEM_LOG_DEBUG(
"iteration gradient vector norm\n================================================\n");
1355 for (
int i = 0; i <
ni; i++ ) {
1376 MPI_Barrier(MPI_COMM_WORLD);
1394 MPI_Barrier(MPI_COMM_WORLD);
1402 OOFEM_LOG_RELEVANT(
"FETISolver::solve : v modifikovane metode sdruzenych gradientu je nulovy jmenovatel u soucinitele alpha\n");
1413 alpha = nom / denom;
1419 w.
at(j) += alpha *
d.
at(j);
1420 g.
at(j) += alpha *
p.
at(j);
1431 OOFEM_LOG_RELEVANT(
"FETISolver::solve : v modifikovane metode sdruzenych gradientu je nulovy jmenovatel u soucinitele beta\n");
1473 commBuff.
read(masterLoopStatus);
1475 #ifdef __VERBOSE_PARALLEL 1476 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Received Loop break signal from master\n",
1477 rank,
"FETISolver :: solveYourselfAt");
1503 MPI_Barrier(MPI_COMM_WORLD);
1520 MPI_Barrier(MPI_COMM_WORLD);
1538 #ifdef __VERBOSE_PARALLEL 1540 OOFEM_LOG_DEBUG(
"Konec metody sdruzenych gradientu, nite %d, err %e\n", i, nom);
1555 #ifdef __VERBOSE_PARALLEL 1580 MPI_Barrier(MPI_COMM_WORLD);
1583 #ifdef __VERBOSE_PARALLEL 1590 #ifdef __VERBOSE_PARALLEL 1596 pp = partitionSolution;
1598 #ifdef __VERBOSE_PARALLEL 1611 #ifdef __VERBOSE_PARALLEL 1612 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Received residuals from slaves\n",
1613 rank,
"FETISolver :: solveYourselfAt");
1620 #ifdef __VERBOSE_PARALLEL 1621 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Sending residuals to master\n",
1622 rank,
"FETISolver :: solveYourselfAt");
1627 MPI_Barrier(MPI_COMM_WORLD);
1630 #ifdef __VERBOSE_PARALLEL 1663 MPI_Barrier(MPI_COMM_WORLD);
1669 partitionSolution.
add(help);
int packData(T *emodel, int(T::*packFunc)(ProcessCommunicator &))
Pack nodal data to send buff.
FloatMatrix l
Rigid body motions of all partitions. On master only.
The representation of EngngModel default unknown numbering.
void subtract(const FloatArray &src)
Subtracts array src to receiver.
#define NM_Success
Numerical method exited with success.
#define _IFT_FETISolver_maxerr
int giveGlobalNumber() const
int giveReferencePratition()
Returns reference partition number of receiver.
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Base class for all matrices stored in sparse format.
int packGammas(ProcessCommunicator &processComm)
int packAllData(T *ptr, int(T::*packFunc)(ProcessCommunicator &))
Pack all problemCommunicators data to their send buffers.
The purpose of DataStream abstract class is to allow to store/restore context to different streams...
int masterUnpackRBM(ProcessCommunicator &processComm)
void zero()
Sets all component to zero.
double & at(int i)
Coefficient access function.
virtual int iRecv(int source, int tag, int count=0)
Starts standard mode, nonblocking receive.
void setToSendArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toSend array to src.
This base class is an abstraction for all numerical methods solving sparse linear system of equations...
int giveCodeNumber(int partition_num, int dof_num)
Returns code number corresponding to partition number partition_num and to dof_num-th DOF...
ProcessCommunicator processCommunicator
int packQQProducts(ProcessCommunicator &processComm)
int packResiduals(ProcessCommunicator &processComm)
int estimateMaxPackSize(IntArray &, DataStream &, int &)
EngngModel * giveEngngModel()
Returns engineering model to which receiver is associated.
IntArray se
Indices of singular equations.
int unpackGammas(ProcessCommunicator &processComm)
void ldl_feti_sky(FloatArray &x, FloatArray &y, int nse, double limit, IntArray &se)
Solves the singular system of equations, the receiver should be factorized using rbmodes service...
int giveNumberOfProcesses() const
Returns the number of collaborating processes.
int unpackSolution(ProcessCommunicator &processComm)
void negated()
Changes sign of receiver values.
CommunicationBuffer * giveRecvBuff()
Returns receive buffer of receiver.
IntArray masterCommMap
List of local nodes (at master) participating in communication (list of boundary dof managers)...
int giveRank()
Returns corresponding rank of associated partition.
unsigned long NM_Status
Mask defining NumMetod Status; which can be asked after finishing computation by Numerical Method...
#define OOFEM_LOG_DEBUG(...)
int giveCompleteLocationArray(int rank, IntArray &locationArray)
Returns code numbers for all DOFs associated with shared partition.
Class implementing an array of integers.
int & at(int i)
Coefficient access function.
#define OOFEM_LOG_RELEVANT(...)
Class implementing sparse matrix stored in skyline form.
const IntArray * giveToSendMap()
Returns receiver to send map.
Domain * domain
Pointer to domain.
int giveSharedPartition(int i)
Returns number of i-th shared partition of receiver.
int unpackPPVector(ProcessCommunicator &processComm)
IntArray rbmAddr
Addresses of initial partition contribution to rbm matrix.
int masterUnpackQQProduct(ProcessCommunicator &processComm)
virtual int testCompletion()
Tests if the operation identified by this->request is complete.
int giveNumberOfRows() const
Returns number of rows of receiver.
void setUpCommunicationMaps()
Sets up the communication maps.
#define OOFEM_LOG_INFO(...)
int ni
Max number of iterations.
double dotProduct(const FloatArray &x) const
Computes the dot product (or inner product) of receiver and argument.
Class CommunicationBuffer provides abstraction for communication buffer.
#define _IFT_FETISolver_maxiter
int packDirectionVector(ProcessCommunicator &processComm)
virtual int givePackSizeOfInt(int count)
FETICommunicator * masterCommunicator
virtual int resize(int newSize)
Resizes buffer to given size.
double computeSquaredNorm() const
Computes the square of the norm.
int giveNumberOfDofs()
Returns number of DOFs (with associated equation) of receiver.
int packPPVector(ProcessCommunicator &processComm)
void setUpCommunicationMaps(EngngModel *pm)
Service for setting up the communication patterns with other remote processes.
int unpackResiduals(ProcessCommunicator &processComm)
virtual int iSend(int dest, int tag)
Starts standard mode, nonblocking send.
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
int masterMapDirectionVector()
virtual int write(bool data)
Writes a bool value.
const IntArray * giveToRecvMap()
Returns receiver to receive map.
virtual void init()
Initializes buffer to empty state.
Class representing process communicator for engineering model.
void beProductOf(const FloatMatrix &aMatrix, const FloatArray &anArray)
Receiver becomes the result of the product of aMatrix and anArray.
#define _IFT_FETISolver_energynormflag
FloatMatrix rbm
Rigid body motions.
void beTProductOf(const FloatMatrix &aMatrix, const FloatArray &anArray)
Receiver becomes the result of the product of aMatrix^T and anArray.
virtual int read(int *dest, int n)
Reads count integer values into array pointed by data.
double at(int i, int j) const
Coefficient access function.
void resize(int n)
Checks size of receiver towards requested bounds.
double limit
Linear dep./indep. trigger.
virtual IRResultType initializeFrom(InputRecord *ir)
int giveNumberOfSharedPartitions()
Returns number of partitions sharing receiver.
void rbmodes(FloatMatrix &r, int &nse, IntArray &se, double limit, int tc)
Splits the receiver to LDLT form, and computes the rigid body motions.
int giveNumberOfDomainEquations()
Initializes the variable VERBOSE, in order to get a few intermediate messages on screen: beginning an...
int initSend(int tag)
Initialize the send data exchange with associate problem.
virtual NM_Status solve(SparseMtrx &A, FloatArray &b, FloatArray &x)
Solves the given linear system by LDL^T factorization.
Class representing vector of real numbers.
int unpackAllData(T *ptr, int(T::*unpackFunc)(ProcessCommunicator &))
Unpack all problemCommuncators data from recv buffers.
#define FETISOLVER_MAX_RBM
int energyNorm_comput_flag
Flag indicating computation of energy norm.
int unpackDirectionVector(ProcessCommunicator &processComm)
Implementation of matrix containing floating point numbers.
IRResultType
Type defining the return values of InputRecord reading operations.
int packSolution(ProcessCommunicator &processComm)
virtual int givePackSizeOfDouble(int count)=0
FETISolver(Domain *d, EngngModel *m)
void giveCompleteLocationArray(IntArray &locationArray, const UnknownNumberingScheme &s) const
Returns full location array of receiver containing equation numbers of all dofs of receiver...
void resize(int rows, int cols)
Checks size of receiver towards requested bounds.
Class representing communicator for FETI solver.
CommunicationBuffer * giveSendBuff()
Returns send buffer of receiver.
void setToRecvArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toRecv array to src.
void zero()
Zeroes all coefficients of receiver.
REGISTER_SparseLinSolver(IMLSolver, ST_IML)
void beTProductOf(const FloatMatrix &a, const FloatMatrix &b)
Assigns to the receiver product of .
virtual int bcast(int root)
Initializes broadcast over collaborating processes.
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1)
virtual int write(const int *src, int n)
Writes count integer values from array pointed by data.
FETIBoundaryDofManager * giveDofManager(int i)
Returns reference to i-th boundary dof manager.
#define FETISOLVER_ZERONUM
computer zero
void zero()
Zeroes all coefficient of receiver.
int initSend(int tag)
Initializes data send exchange with all problems.
int packRBM(ProcessCommunicator &processComm)
The Communicator and corresponding buffers (represented by this class) are separated in order to allo...
Abstract base class representing the "problem" under consideration.
int unpackData(T *emodel, int(T::*unpackFunc)(ProcessCommunicator &))
Unpack nodal data from recv buff.
int giveSize() const
Returns the size of receiver.
the oofem namespace is to define a context or scope in which all oofem names are defined.
virtual int read(bool &data)
Reads a bool value from data.
Abstract class Dof represents Degree Of Freedom in finite element mesh.
DofManager * giveDofManager(int n)
Service for accessing particular domain dof manager.
void beInverseOf(const FloatMatrix &src)
Modifies receiver to become inverse of given parameter.
void negated()
Switches the sign of every coefficient of receiver.
EngngModel * giveEngngModel()
double err
Max allowed error.
IntArray * giveMasterCommMapPtr()
Returns pointer to master comm map stored in receiver.
EngngModel * engngModel
Pointer to engineering model.
int initReceive(int tag)
Initializes data receive exchange with all problems.
#define _IFT_FETISolver_limit
DofManager is shared by neighboring partitions, it is necessary to sum contributions from all contrib...
dofManagerParallelMode giveParallelMode() const
Return dofManagerParallelMode of receiver.
void projection(FloatArray &v, FloatMatrix &l, FloatMatrix &l1)
void add(const FloatArray &src)
Adds array src to receiver.
int initReceive(int tag)
Initialize the receive data exchange with associate problem.
FloatArray w
Primary unknowns.
CommunicatorBuff * commBuff
Common Communicator buffer.
void resize(int s)
Resizes receiver towards requested size.