AbstractTetrahedralMeshWriter.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2010
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 // Disable PETSc logging of MPI calls (we don't use this, anyway) to fix
00030 // "right-hand operand of comma has no effect" warnings when building with
00031 // PETSc 2.2.1.
00032 #define PETSC_HAVE_BROKEN_RECURSIVE_MACRO
00033 
00034 #include "AbstractTetrahedralMeshWriter.hpp"
00035 #include "AbstractTetrahedralMesh.hpp"
00036 #include "DistributedTetrahedralMesh.hpp"
00037 
00038 #include <mpi.h> // For MPI_Send, MPI_Recv
00039 
00043 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00044 struct MeshWriterIterators
00045 {
00047     typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator* pNodeIter;
00049     typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator* pElemIter;
00050 };
00051 
00053 // Implementation
00055 
00056 
00057 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00058 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AbstractTetrahedralMeshWriter(const std::string &rDirectory,
00059                    const std::string &rBaseName,
00060                    const bool clearOutputDir)
00061     : AbstractMeshWriter<ELEMENT_DIM, SPACE_DIM>(rDirectory, rBaseName, clearOutputDir),
00062       mpMesh(NULL),
00063       mpNodeMap(NULL),
00064       mNodesPerElement(ELEMENT_DIM+1),
00065       mpParallelMesh(NULL),
00066       mpIters(new MeshWriterIterators<ELEMENT_DIM,SPACE_DIM>),
00067       mNodeCounterForParallelMesh(0),
00068       mElementCounterForParallelMesh(0)
00069 
00070 {
00071     mpIters->pNodeIter = NULL;
00072     mpIters->pElemIter = NULL;
00073 }
00074 
00075 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00076 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::~AbstractTetrahedralMeshWriter()
00077 {
00078     if (mpIters->pNodeIter)
00079     {
00080         delete mpIters->pNodeIter;
00081     }
00082     if (mpIters->pElemIter)
00083     {
00084         delete mpIters->pElemIter;
00085     }
00086 
00087     delete mpIters;
00088 
00089     if (mpNodeMap)
00090     {
00091         delete mpNodeMap;
00092     }
00093 }
00094 
00095 
00096 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00097 std::vector<double> AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextNode()
00098 {
00099     // if we are writing from a mesh..
00100     assert(PetscTools::AmMaster());
00101     if (mpMesh)
00102     {
00103         std::vector<double> coords(SPACE_DIM);
00104         double raw_coords[SPACE_DIM];
00105 
00106         //Iterate over the locally-owned nodes
00107         if ( (*(mpIters->pNodeIter)) != mpMesh->GetNodeIteratorEnd())
00108         {
00109             //Either this a sequential mesh (and we own it all)
00110             // or it's parallel (and the master owns the first chunk)
00111             for (unsigned j=0; j<SPACE_DIM; j++)
00112             {
00113                 coords[j] = (*(mpIters->pNodeIter))->GetPoint()[j];
00114             }
00115 
00116             mNodeCounterForParallelMesh=(*(mpIters->pNodeIter))->GetIndex() + 1;//Ready for when we run out of local nodes
00117 
00118             ++(*(mpIters->pNodeIter));
00119             return coords;
00120         }
00121 
00122         //If we didn't return then the iterator has reached the end of the local nodes.
00123         // It must be a parallel mesh and we are expecting messages...
00124 
00125         assert( mpParallelMesh != NULL );
00126 
00127         MPI_Status status;
00128         // do receive, convert to std::vector on master
00129         MPI_Recv(raw_coords, SPACE_DIM, MPI_DOUBLE, MPI_ANY_SOURCE, mNodeCounterForParallelMesh, PETSC_COMM_WORLD, &status);
00130         assert(status.MPI_ERROR == MPI_SUCCESS);
00131         for (unsigned j=0; j<coords.size(); j++)
00132         {
00133             coords[j] = raw_coords[j];
00134         }
00135 
00136 
00137         mNodeCounterForParallelMesh++;
00138         return coords;
00139     }
00140     else
00141     {
00142         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextNode();
00143     }
00144 }
00145 
00146 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00147 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextElement()
00148 {
00149     assert(PetscTools::AmMaster());
00150     // if we are writing from a mesh..
00151     if (mpMesh)
00152     {
00153         ElementData elem_data;
00154         elem_data.NodeIndices.resize(mNodesPerElement);
00155 
00156         if ( mpParallelMesh == NULL ) // not using parallel mesh
00157         {
00158             // Use the iterator
00159             assert(this->mNumElements==mpMesh->GetNumElements());
00160 
00161             for (unsigned j=0; j<elem_data.NodeIndices.size(); j++)
00162             {
00163                 unsigned old_index = (*(mpIters->pElemIter))->GetNodeGlobalIndex(j);
00164                 elem_data.NodeIndices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00165             }
00166             // Set attribute
00167             elem_data.AttributeValue = (*(mpIters->pElemIter))->GetRegion();
00168 
00169             ++(*(mpIters->pElemIter));
00170 
00171             return elem_data;
00172         }
00173         else //Parallel mesh
00174         {
00175             //Use the mElementCounterForParallelMesh variable to identify next element
00176             if ( mpParallelMesh->CalculateDesignatedOwnershipOfElement( mElementCounterForParallelMesh ) == true )
00177             {
00178                 //Master owns this element
00179                 Element<ELEMENT_DIM, SPACE_DIM>* p_element = mpParallelMesh->GetElement(mElementCounterForParallelMesh);
00180                 assert(elem_data.NodeIndices.size() == ELEMENT_DIM+1);
00181                 assert( ! p_element->IsDeleted() );
00182                 //Master can use the local data to recover node indices & attribute
00183                 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00184                 {
00185                     elem_data.NodeIndices[j] = p_element->GetNodeGlobalIndex(j);
00186                 }
00187                 elem_data.AttributeValue = p_element->GetRegion();
00188             }
00189             else
00190             {
00191                 //Master doesn't own this element.
00192                 // +2 to allow for attribute value too
00193                 unsigned raw_indices[ELEMENT_DIM+2];
00194                 MPI_Status status;
00195                 //Get it from elsewhere
00196                 MPI_Recv(raw_indices, ELEMENT_DIM+2, MPI_UNSIGNED, MPI_ANY_SOURCE,
00197                          this->mNumNodes + mElementCounterForParallelMesh,
00198                          PETSC_COMM_WORLD, &status);
00199                 // Convert to std::vector
00200                 for (unsigned j=0; j< elem_data.NodeIndices.size(); j++)
00201                 {
00202                     elem_data.NodeIndices[j] = raw_indices[j];
00203                 }
00204                 // Attribute value
00205                 elem_data.AttributeValue = raw_indices[ELEMENT_DIM+1];
00206             }
00207             // increment element counter
00208             mElementCounterForParallelMesh++;
00209 
00210             return elem_data; // non-master processors will return garbage here - but they should never write to file
00211         }
00212     }
00213     else // not writing from a mesh
00214     {
00215         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextElement();
00216     }
00217 }
00218 
00219 
00221 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00222 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh(
00223       AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh,
00224       bool keepOriginalElementIndexing)
00225 {
00226     this->mpMeshReader = NULL;
00227     mpMesh = &rMesh;
00228 
00229     this->mNumNodes = mpMesh->GetNumNodes();
00230     this->mNumElements = mpMesh->GetNumElements();
00231     this->mNumBoundaryElements = mpMesh->GetNumBoundaryElements();
00232 
00233     typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00234     mpIters->pNodeIter = new NodeIterType(mpMesh->GetNodeIteratorBegin());
00235 
00236     typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElemIterType;
00237     mpIters->pElemIter = new ElemIterType(mpMesh->GetElementIteratorBegin());
00238 
00239     //Use this processes first element to gauge the size of all the elements
00240     if ( (*(mpIters->pElemIter)) != mpMesh->GetElementIteratorEnd())
00241     {
00242         mNodesPerElement = (*(mpIters->pElemIter))->GetNumNodes();
00243     }
00244 
00245     //Have we got a parallel mesh?
00247     mpParallelMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh);
00248 
00249     if (mpParallelMesh != NULL)
00250     {
00251         //It's a parallel mesh
00252         WriteFilesUsingParallelMesh(keepOriginalElementIndexing);
00253         return;
00254     }
00255 
00256     if (!PetscTools::AmMaster())
00257     {
00258         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); //Paired with Master process writing files
00259         return;
00260     }
00261 
00262     // Set up node map if we might have deleted nodes
00263     unsigned node_map_index = 0;
00264     if (mpMesh->IsMeshChanging())
00265     {
00266         mpNodeMap = new NodeMap(mpMesh->GetNumAllNodes());
00267         for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00268         {
00269             mpNodeMap->SetNewIndex(it->GetIndex(), node_map_index++);
00270         }
00271     }
00272 
00273     // Cache all of the BoundaryElements
00274     for (unsigned i=0; i<(unsigned)rMesh.GetNumAllBoundaryElements(); i++)
00275     {
00276         BoundaryElement<ELEMENT_DIM-1, SPACE_DIM>* p_boundary_element = rMesh.GetBoundaryElement(i);
00277         if (p_boundary_element->IsDeleted() == false)
00278         {
00279             std::vector<unsigned> indices(p_boundary_element->GetNumNodes());
00280             for (unsigned j=0; j<p_boundary_element->GetNumNodes(); j++)
00281             {
00282                 unsigned old_index = p_boundary_element->GetNodeGlobalIndex(j);
00283                 indices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00284             }
00285             this->SetNextBoundaryFace(indices);
00286         }
00287     }
00288     this->WriteFiles();
00289     PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); //Paired with waiting Slave processes
00290     delete mpIters->pNodeIter;
00291     mpIters->pNodeIter=NULL;
00292     delete mpIters->pElemIter;
00293     mpIters->pElemIter=NULL;
00294 }
00295 
00296 
00297 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00298 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingParallelMesh(bool keepOriginalElementIndexing)
00299 {
00300     if (keepOriginalElementIndexing)
00301     {
00302         //Concentrate node information to the master
00303     
00304         MPI_Status status;
00305         double raw_coords[SPACE_DIM];
00306         //Concentrate and cache all of the BoundaryElements
00307         unsigned raw_face_indices[ELEMENT_DIM];//Assuming that we don't have parallel quadratic meshes
00308         for (unsigned index=0; index<(unsigned)mpParallelMesh->GetNumBoundaryElements(); index++)
00309         {
00310             try
00311             {
00312                 if ( mpParallelMesh->CalculateDesignatedOwnershipOfBoundaryElement( index ) == true )
00313                 {
00314                     BoundaryElement<ELEMENT_DIM-1, SPACE_DIM>* p_boundary_element = mpParallelMesh->GetBoundaryElement(index);
00315                     assert(p_boundary_element->IsDeleted() == false);
00316                     for (unsigned j=0; j<ELEMENT_DIM; j++)
00317                     {
00318                         raw_face_indices[j] = p_boundary_element->GetNodeGlobalIndex(j);
00319                     }
00320                     MPI_Send(raw_face_indices, ELEMENT_DIM, MPI_UNSIGNED, 0, index, PETSC_COMM_WORLD);
00321                 }
00322             }
00323             catch (Exception e)
00324             {
00325             }
00326             if (PetscTools::AmMaster())
00327             {
00328                 MPI_Recv(raw_face_indices, ELEMENT_DIM, MPI_UNSIGNED, MPI_ANY_SOURCE, index, PETSC_COMM_WORLD, &status);
00329                 std::vector<unsigned> indices(ELEMENT_DIM);
00330                 for (unsigned j=0; j<indices.size(); j++)
00331                 {
00332                     indices[j] = raw_face_indices[j];
00333                 }
00334                 this->SetNextBoundaryFace(indices);
00335             }
00336         }
00337         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingParallelMesh 1");
00338     
00339         //Master goes on to write as usual
00340         if (PetscTools::AmMaster())
00341         {
00342             this->WriteFiles();
00343         }
00344         else
00345         {
00346             //Slaves concentrate the Nodes and Elements
00347             typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00348             for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00349             {
00350                 for (unsigned j=0; j<SPACE_DIM; j++)
00351                 {
00352                     raw_coords[j] = it->GetPoint()[j];
00353                 }
00354                 MPI_Send(raw_coords, SPACE_DIM, MPI_DOUBLE, 0, it->GetIndex(), PETSC_COMM_WORLD);//Nodes sent with positive tags
00355             }
00356     
00357             // +2 allows for attribute value
00358             unsigned raw_indices[ELEMENT_DIM+2]; //Assuming that we don't have parallel quadratic elements
00359             typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElementIterType;
00360             for (ElementIterType it = mpMesh->GetElementIteratorBegin(); it != mpMesh->GetElementIteratorEnd(); ++it)
00361             {
00362                 unsigned index =it->GetIndex();
00363                 if ( mpParallelMesh->CalculateDesignatedOwnershipOfElement( index ) == true )
00364                 {
00365                     for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00366                     {
00367                         raw_indices[j] = it->GetNodeGlobalIndex(j);
00368                     }
00369                     // Attribute value
00370                     raw_indices[ELEMENT_DIM+1] = it->GetRegion();
00371                     MPI_Send(raw_indices, ELEMENT_DIM+2, MPI_UNSIGNED, 0,
00372                              this->mNumNodes + (it->GetIndex()), //Elements sent with tags offset
00373                              PETSC_COMM_WORLD);
00374                 }
00375             }
00376         }
00377         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingParallelMesh 2");
00378     }
00379     else
00380     {
00381         for (unsigned writing_process=0; writing_process<PetscTools::GetNumProcs(); writing_process++)
00382         {
00383             if(PetscTools::GetMyRank() == writing_process)
00384             {
00385                 out_stream p_file=out_stream(NULL);
00386                 if (PetscTools::AmMaster())
00387                 {
00388                     // Make sure headers are written first
00389                     assert(PetscTools::GetMyRank() == 0);                   
00390                     CreateFilesWithHeaders();
00391                 }                
00392 
00393                 AppendLocalDataToFiles();
00394                 
00395                 if (PetscTools::AmTopMost())
00396                 {
00397                     // Make sure footers are written last
00398                     assert(PetscTools::GetMyRank() == PetscTools::GetNumProcs()-1);
00399                     WriteFilesFooter();
00400                 }        
00401             }
00402             //Process i+1 waits for process i to close the file
00403             PetscTools::Barrier();
00404         }//Loop in writing_process
00405                 
00406     }    
00407 }
00408 
00409 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00410 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::CreateFilesWithHeaders()
00411 {
00412     // If control reaches this line you haven't implemented the optimised 
00413     // parallel write for whichever visualiser you are writing for.
00414     NEVER_REACHED;
00415 }
00416    
00417 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00418 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AppendLocalDataToFiles()
00419 {
00420     // If control reaches this line you haven't implemented the optimised 
00421     // parallel write for whichever visualiser you are writing for.
00422     NEVER_REACHED;
00423 }
00424 
00425 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00426 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesFooter()
00427 {
00428     // If control reaches this line you haven't implemented the optimised 
00429     // parallel write for whichever visualiser you are writing for.
00430     NEVER_REACHED;
00431 }
00432 
00433 
00435 // Explicit instantiation
00437 
00438 template class AbstractTetrahedralMeshWriter<1,1>;
00439 template class AbstractTetrahedralMeshWriter<1,2>;
00440 template class AbstractTetrahedralMeshWriter<1,3>;
00441 template class AbstractTetrahedralMeshWriter<2,2>;
00442 template class AbstractTetrahedralMeshWriter<2,3>;
00443 template class AbstractTetrahedralMeshWriter<3,3>;

Generated on Mon Nov 1 12:35:24 2010 for Chaste by  doxygen 1.5.5