AbstractTetrahedralMeshWriter.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2011
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 // Disable PETSc logging of MPI calls (we don't use this, anyway) to fix
00030 // "right-hand operand of comma has no effect" warnings when building with
00031 // PETSc 2.2.1.
00032 #define PETSC_HAVE_BROKEN_RECURSIVE_MACRO
00033 
00034 #include "AbstractTetrahedralMeshWriter.hpp"
00035 #include "AbstractTetrahedralMesh.hpp"
00036 #include "DistributedTetrahedralMesh.hpp"
00037 #include "Version.hpp"
00038 
00039 #include <mpi.h> // For MPI_Send, MPI_Recv
00040 
00044 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00045 struct MeshWriterIterators
00046 {
00048     typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator* pNodeIter;
00050     typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator* pElemIter;
00051 };
00052 
00054 // Implementation
00056 
00057 
00058 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00059 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AbstractTetrahedralMeshWriter(const std::string &rDirectory,
00060                    const std::string &rBaseName,
00061                    const bool clearOutputDir)
00062     : AbstractMeshWriter<ELEMENT_DIM, SPACE_DIM>(rDirectory, rBaseName, clearOutputDir),
00063       mpMesh(NULL),
00064       mpNodeMap(NULL),
00065       mNodesPerElement(ELEMENT_DIM+1),
00066       mpDistributedMesh(NULL),
00067       mpIters(new MeshWriterIterators<ELEMENT_DIM,SPACE_DIM>),
00068       mNodeCounterForParallelMesh(0),
00069       mElementCounterForParallelMesh(0),
00070       mFilesAreBinary(false)
00071 {
00072     mpIters->pNodeIter = NULL;
00073     mpIters->pElemIter = NULL;
00074 }
00075 
00076 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00077 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::~AbstractTetrahedralMeshWriter()
00078 {
00079     if (mpIters->pNodeIter)
00080     {
00081         delete mpIters->pNodeIter;
00082     }
00083     if (mpIters->pElemIter)
00084     {
00085         delete mpIters->pElemIter;
00086     }
00087 
00088     delete mpIters;
00089 
00090     if (mpNodeMap)
00091     {
00092         delete mpNodeMap;
00093     }
00094 }
00095 
00096 
00097 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00098 std::vector<double> AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextNode()
00099 {
00100     // if we are writing from a mesh..
00101     assert(PetscTools::AmMaster());
00102     if (mpMesh)
00103     {
00104         std::vector<double> coords(SPACE_DIM);
00105         double raw_coords[SPACE_DIM];
00106 
00107         //Iterate over the locally-owned nodes
00108         if ( (*(mpIters->pNodeIter)) != mpMesh->GetNodeIteratorEnd())
00109         {
00110             //Either this a sequential mesh (and we own it all)
00111             // or it's parallel (and the master owns the first chunk)
00112             for (unsigned j=0; j<SPACE_DIM; j++)
00113             {
00114                 coords[j] = (*(mpIters->pNodeIter))->GetPoint()[j];
00115             }
00116 
00117             mNodeCounterForParallelMesh=(*(mpIters->pNodeIter))->GetIndex() + 1;//Ready for when we run out of local nodes
00118 
00119             ++(*(mpIters->pNodeIter));
00120             return coords;
00121         }
00122 
00123         //If we didn't return then the iterator has reached the end of the local nodes.
00124         // It must be a parallel mesh and we are expecting messages...
00125 
00126         assert( mpDistributedMesh != NULL );
00127 
00128         MPI_Status status;
00129         // do receive, convert to std::vector on master
00130         MPI_Recv(raw_coords, SPACE_DIM, MPI_DOUBLE, MPI_ANY_SOURCE, mNodeCounterForParallelMesh, PETSC_COMM_WORLD, &status);
00131         assert(status.MPI_ERROR == MPI_SUCCESS);
00132         for (unsigned j=0; j<coords.size(); j++)
00133         {
00134             coords[j] = raw_coords[j];
00135         }
00136 
00137 
00138         mNodeCounterForParallelMesh++;
00139         return coords;
00140     }
00141     else
00142     {
00143         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextNode();
00144     }
00145 }
00146 
00147 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00148 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextElement()
00149 {
00150     assert(PetscTools::AmMaster());
00151     // if we are writing from a mesh..
00152     if (mpMesh)
00153     {
00154         ElementData elem_data;
00155         elem_data.NodeIndices.resize(mNodesPerElement);
00156 
00157         if ( mpDistributedMesh == NULL ) // not using parallel mesh
00158         {
00159             // Use the iterator
00160             assert(this->mNumElements==mpMesh->GetNumElements());
00161 
00162             for (unsigned j=0; j<elem_data.NodeIndices.size(); j++)
00163             {
00164                 unsigned old_index = (*(mpIters->pElemIter))->GetNodeGlobalIndex(j);
00165                 elem_data.NodeIndices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00166             }
00167             // Set attribute
00168             elem_data.AttributeValue = (*(mpIters->pElemIter))->GetRegion();
00169 
00170             ++(*(mpIters->pElemIter));
00171 
00172             return elem_data;
00173         }
00174         else //Parallel mesh
00175         {
00176             //Use the mElementCounterForParallelMesh variable to identify next element
00177             if ( mpDistributedMesh->CalculateDesignatedOwnershipOfElement( mElementCounterForParallelMesh ) == true )
00178             {
00179                 //Master owns this element
00180                 Element<ELEMENT_DIM, SPACE_DIM>* p_element = mpDistributedMesh->GetElement(mElementCounterForParallelMesh);
00181                 assert(elem_data.NodeIndices.size() == ELEMENT_DIM+1);
00182                 assert( ! p_element->IsDeleted() );
00183                 //Master can use the local data to recover node indices & attribute
00184                 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00185                 {
00186                     elem_data.NodeIndices[j] = p_element->GetNodeGlobalIndex(j);
00187                 }
00188                 elem_data.AttributeValue = p_element->GetRegion();
00189             }
00190             else
00191             {
00192                 //Master doesn't own this element.
00193                 // +2 to allow for attribute value too
00194                 unsigned raw_indices[ELEMENT_DIM+2];
00195                 MPI_Status status;
00196                 //Get it from elsewhere
00197                 MPI_Recv(raw_indices, ELEMENT_DIM+2, MPI_UNSIGNED, MPI_ANY_SOURCE,
00198                          this->mNumNodes + mElementCounterForParallelMesh,
00199                          PETSC_COMM_WORLD, &status);
00200                 // Convert to std::vector
00201                 for (unsigned j=0; j< elem_data.NodeIndices.size(); j++)
00202                 {
00203                     elem_data.NodeIndices[j] = raw_indices[j];
00204                 }
00205                 // Attribute value
00206                 elem_data.AttributeValue = raw_indices[ELEMENT_DIM+1];
00207             }
00208             // increment element counter
00209             mElementCounterForParallelMesh++;
00210 
00211             return elem_data; // non-master processors will return garbage here - but they should never write to file
00212         }
00213     }
00214     else // not writing from a mesh
00215     {
00216         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextElement();
00217     }
00218 }
00219 
00220 
00222 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00223 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh(
00224       AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh,
00225       bool keepOriginalElementIndexing)
00226 {
00227     this->mpMeshReader = NULL;
00228     mpMesh = &rMesh;
00229 
00230     this->mNumNodes = mpMesh->GetNumNodes();
00231     this->mNumElements = mpMesh->GetNumElements();
00232     this->mNumBoundaryElements = mpMesh->GetNumBoundaryElements();
00233 
00234     typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00235     mpIters->pNodeIter = new NodeIterType(mpMesh->GetNodeIteratorBegin());
00236 
00237     typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElemIterType;
00238     mpIters->pElemIter = new ElemIterType(mpMesh->GetElementIteratorBegin());
00239 
00240     //Use this processes first element to gauge the size of all the elements
00241     if ( (*(mpIters->pElemIter)) != mpMesh->GetElementIteratorEnd())
00242     {
00243         mNodesPerElement = (*(mpIters->pElemIter))->GetNumNodes();
00244     }
00245     //Connectivity file is written when we write to a binary file (only available for TrianglesMeshWriter) and if we are preserving the element order
00246     if (this->mFilesAreBinary && keepOriginalElementIndexing)
00247     {
00248         unsigned max_elements_all;
00249         if (PetscTools::IsSequential())
00250         {
00251             max_elements_all = mpMesh->CalculateMaximumContainingElementsPerProcess();
00252         }
00253         else
00254         {
00255             unsigned max_elements_per_process = mpMesh->CalculateMaximumContainingElementsPerProcess();
00256             MPI_Allreduce(&max_elements_per_process, &max_elements_all, 1, MPI_UNSIGNED, MPI_MAX, PETSC_COMM_WORLD);
00257         }
00258 
00259 
00260         for (unsigned writing_process=0; writing_process<PetscTools::GetNumProcs(); writing_process++)
00261         {
00262             if (PetscTools::GetMyRank() == writing_process)
00263             {
00264                 std::string node_connect_list_file_name = this->mBaseName + ".ncl";
00265                 out_stream p_ncl_file=out_stream(NULL);
00266                 
00267                 if (PetscTools::AmMaster())
00268                 {
00269                     assert(writing_process==0);
00270                     //Open the file for the first time                    
00271                     p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(node_connect_list_file_name);
00272             
00273                     // Write the ncl header            
00274                     *p_ncl_file << this->mNumNodes << "\t";
00275                     *p_ncl_file << max_elements_all << "\t";
00276                     *p_ncl_file << "\tBIN\n";
00277                 }
00278                 else
00279                 {
00280                     //Append to the existing file
00281                     p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(node_connect_list_file_name, std::ios::app);                    
00282                 }
00283                 
00284                 // Write each node's data
00285                 unsigned default_marker = UINT_MAX;
00286     
00287                 for (NodeIterType iter = mpMesh->GetNodeIteratorBegin();
00288                      iter != mpMesh->GetNodeIteratorEnd();
00289                      ++iter)
00290                 {
00291                     //Get the containing element indices from the node's set and sort them
00292                     std::set<unsigned>& r_elem_set = iter->rGetContainingElementIndices();
00293                     std::vector<unsigned> elem_vector(r_elem_set.begin(),r_elem_set.end()); 
00294                     std::sort(elem_vector.begin(), elem_vector.end());
00295                     //Pad the vector with unsigned markers
00296                     for (unsigned elem_index=elem_vector.size();  elem_index<max_elements_all; elem_index++)
00297                     {
00298                         elem_vector.push_back(default_marker);
00299                     }
00300                     assert (elem_vector.size() == max_elements_all);
00301                     //Write raw data out of std::vector into the file
00302                     p_ncl_file->write((char*)&elem_vector[0], elem_vector.size()*sizeof(unsigned));
00303                 }
00304                 
00305                 if (PetscTools::AmTopMost())
00306                 {
00307                     *p_ncl_file << "#\n# " + ChasteBuildInfo::GetProvenanceString();
00308                 }
00309                 
00310                 p_ncl_file->close();
00311             }
00312             
00313             PetscTools::Barrier("AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh");
00314         }
00315     }    
00316 
00317 
00318     //Have we got a parallel mesh?
00320     mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh);
00321 
00322     if (mpDistributedMesh != NULL)
00323     {
00324         //It's a parallel mesh
00325         WriteFilesUsingParallelMesh(keepOriginalElementIndexing);
00326         return;
00327     }
00328 
00329     if (!PetscTools::AmMaster())
00330     {
00331         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); //Paired with Master process writing files
00332         return;
00333     }
00334 
00335     // Set up node map if we might have deleted nodes
00336     unsigned node_map_index = 0;
00337     if (mpMesh->IsMeshChanging())
00338     {
00339         mpNodeMap = new NodeMap(mpMesh->GetNumAllNodes());
00340         for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00341         {
00342             mpNodeMap->SetNewIndex(it->GetIndex(), node_map_index++);
00343         }
00344     }
00345 
00346     // Cache all of the BoundaryElements
00347     for (unsigned i=0; i<(unsigned)mpMesh->GetNumAllBoundaryElements(); i++)
00348     {
00349         BoundaryElement<ELEMENT_DIM-1, SPACE_DIM>* p_boundary_element = mpMesh->GetBoundaryElement(i);
00350         if (p_boundary_element->IsDeleted() == false)
00351         {
00352             std::vector<unsigned> indices(p_boundary_element->GetNumNodes());
00353             for (unsigned j=0; j<p_boundary_element->GetNumNodes(); j++)
00354             {
00355                 unsigned old_index = p_boundary_element->GetNodeGlobalIndex(j);
00356                 indices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00357             }
00358             this->SetNextBoundaryFace(indices);
00359         }
00360     }
00361     this->WriteFiles();
00362     PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); //Paired with waiting Slave processes
00363     delete mpIters->pNodeIter;
00364     mpIters->pNodeIter=NULL;
00365     delete mpIters->pElemIter;
00366     mpIters->pElemIter=NULL;
00367 }
00368 
00369 
00370 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00371 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingParallelMesh(bool keepOriginalElementIndexing)
00372 {
00373     if (keepOriginalElementIndexing)
00374     {
00375         //Concentrate node information to the master
00376     
00377         MPI_Status status;
00378         double raw_coords[SPACE_DIM];
00379         //Concentrate and cache all of the BoundaryElements
00380         unsigned raw_face_indices[ELEMENT_DIM];//Assuming that we don't have parallel quadratic meshes
00381         for (unsigned index=0; index<(unsigned)mpDistributedMesh->GetNumBoundaryElements(); index++)
00382         {
00383             try
00384             {
00385                 if ( mpDistributedMesh->CalculateDesignatedOwnershipOfBoundaryElement( index ) == true )
00386                 {
00387                     BoundaryElement<ELEMENT_DIM-1, SPACE_DIM>* p_boundary_element = mpDistributedMesh->GetBoundaryElement(index);
00388                     assert(p_boundary_element->IsDeleted() == false);
00389                     for (unsigned j=0; j<ELEMENT_DIM; j++)
00390                     {
00391                         raw_face_indices[j] = p_boundary_element->GetNodeGlobalIndex(j);
00392                     }
00393                     MPI_Send(raw_face_indices, ELEMENT_DIM, MPI_UNSIGNED, 0, index, PETSC_COMM_WORLD);
00394                 }
00395             }
00396             catch (Exception e)
00397             {
00398             }
00399             if (PetscTools::AmMaster())
00400             {
00401                 MPI_Recv(raw_face_indices, ELEMENT_DIM, MPI_UNSIGNED, MPI_ANY_SOURCE, index, PETSC_COMM_WORLD, &status);
00402                 std::vector<unsigned> indices(ELEMENT_DIM);
00403                 for (unsigned j=0; j<indices.size(); j++)
00404                 {
00405                     indices[j] = raw_face_indices[j];
00406                 }
00407                 this->SetNextBoundaryFace(indices);
00408             }
00409         }
00410         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingParallelMesh 1");
00411     
00412         //Master goes on to write as usual
00413         if (PetscTools::AmMaster())
00414         {
00415             this->WriteFiles();
00416         }
00417         else
00418         {
00419             //Slaves concentrate the Nodes and Elements
00420             typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00421             for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00422             {
00423                 for (unsigned j=0; j<SPACE_DIM; j++)
00424                 {
00425                     raw_coords[j] = it->GetPoint()[j];
00426                 }
00427                 MPI_Send(raw_coords, SPACE_DIM, MPI_DOUBLE, 0, it->GetIndex(), PETSC_COMM_WORLD);//Nodes sent with positive tags
00428             }
00429     
00430             // +2 allows for attribute value
00431             unsigned raw_indices[ELEMENT_DIM+2]; //Assuming that we don't have parallel quadratic elements
00432             typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElementIterType;
00433             for (ElementIterType it = mpMesh->GetElementIteratorBegin(); it != mpMesh->GetElementIteratorEnd(); ++it)
00434             {
00435                 unsigned index =it->GetIndex();
00436                 if ( mpDistributedMesh->CalculateDesignatedOwnershipOfElement( index ) == true )
00437                 {
00438                     for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00439                     {
00440                         raw_indices[j] = it->GetNodeGlobalIndex(j);
00441                     }
00442                     // Attribute value
00443                     raw_indices[ELEMENT_DIM+1] = it->GetRegion();
00444                     MPI_Send(raw_indices, ELEMENT_DIM+2, MPI_UNSIGNED, 0,
00445                              this->mNumNodes + (it->GetIndex()), //Elements sent with tags offset
00446                              PETSC_COMM_WORLD);
00447                 }
00448             }
00449         }
00450         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingParallelMesh 2");
00451     }
00452     else
00453     {
00454         for (unsigned writing_process=0; writing_process<PetscTools::GetNumProcs(); writing_process++)
00455         {
00456             if(PetscTools::GetMyRank() == writing_process)
00457             {
00458                 if (PetscTools::AmMaster())
00459                 {
00460                     // Make sure headers are written first
00461                     assert(PetscTools::GetMyRank() == 0);                   
00462                     CreateFilesWithHeaders();
00463                 }                
00464 
00465                 AppendLocalDataToFiles();
00466                 
00467                 if (PetscTools::AmTopMost())
00468                 {
00469                     // Make sure footers are written last
00470                     assert(PetscTools::GetMyRank() == PetscTools::GetNumProcs()-1);
00471                     WriteFilesFooter();
00472                 }        
00473             }
00474             //Process i+1 waits for process i to close the file
00475             PetscTools::Barrier();
00476         }//Loop in writing_process
00477                 
00478     }    
00479 }
00480 
00481 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00482 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::CreateFilesWithHeaders()
00483 {
00484     // If control reaches this line you haven't implemented the optimised 
00485     // parallel write for whichever visualiser you are writing for.
00486     NEVER_REACHED;
00487 }
00488    
00489 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00490 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AppendLocalDataToFiles()
00491 {
00492     // If control reaches this line you haven't implemented the optimised 
00493     // parallel write for whichever visualiser you are writing for.
00494     NEVER_REACHED;
00495 }
00496 
00497 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00498 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesFooter()
00499 {
00500     // If control reaches this line you haven't implemented the optimised 
00501     // parallel write for whichever visualiser you are writing for.
00502     NEVER_REACHED;
00503 }
00504 
00505 
00507 // Explicit instantiation
00509 
00510 template class AbstractTetrahedralMeshWriter<1,1>;
00511 template class AbstractTetrahedralMeshWriter<1,2>;
00512 template class AbstractTetrahedralMeshWriter<1,3>;
00513 template class AbstractTetrahedralMeshWriter<2,2>;
00514 template class AbstractTetrahedralMeshWriter<2,3>;
00515 template class AbstractTetrahedralMeshWriter<3,3>;

Generated on Mon Apr 18 11:35:35 2011 for Chaste by  doxygen 1.5.5